query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Setter for the number at tile position pos
def set_number(self, row, col, value): self._grid[row][col] = value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_number(self, row, col, value):\r\n self._grid[row][col] = value", "def set_number(self, row, col, value):\r\n self._grid[row][col] = value", "def set_number(self, row, col, value):\r\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._grid_tile[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid_2048[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value;", "def set_tile(self, row, col, value):\n # replace with your code\n pass", "def set_tile(self, row, col, value):\r\n self.grid[row][col] = value", "def changeTile (self, posY, posX, tile=\"t\"):\r\n self.grid[posY][posX] = tile", "def set_tile(self, row, col, value):\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n if col < self.grid_height and row < self.grid_width:\n self.board[row][col] = value", "def set_tile(self, row, col, value):\r\n self._grid[row][col]=value", "def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._cells[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._cells[row][col] = value", "def set_tile(self, row, col, value):\n self._grid[row][col] = value", "def set_tile(self, row, column, tile_number):\n \n current_tile = self.get_tile(row, column)\n \n bits_to_shift = tile_offsets[row][column]\n new_mask = tile_number << bits_to_shift\n old_mask = current_tile << bits_to_shift\n self.tiles = self.tiles ^ old_mask\n self.tiles = self.tiles ^ new_mask", "def set_tile(self, row, col, value):\r\n self._board[row][col] = value", "def set_tile(self, row, col, value):\r\n self._cells[row][col] = value", "def pos_number(self):\n return self._pos_number.zfill(2)", "def set_tile(self, row, col, value):\n if row >= 0 and row < self.get_grid_height():\n if col >= 0 and col < self.get_grid_width():\n # Only set if the row and column are ok\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\n self._cells[row][col] = value", "def set_number(self, num, row, col, where=\"\"):\n print(where, num, \"(\", row, \",\", col, \")\", self.possibles[row][col])\n self.content[row][col] = num\n self.possibles[row][col] = set()\n self.board.set_num(num, row, col)\n self.reset_possible(num, row, col)\n self.change = True", "def getTilePos(self, pos = None):\n\n if not pos:\n pos = self.actor.getPos()\n \n for i in range(len(pos)):\n pos[i] = int(math.floor( (pos[i] + self.dimensions[i]) / 2.0))\n #pos[i] = int(math.floor( pos[i] / 2.0))\n\n return pos", "def write_num(x,y,t,pos):\n moveturtle(x+0.5,y+0.5,t)\n t.write(pos)\n moveturtle(x,y,t)", "def set_xy(self, x, y, val):\r\n\t\tself.grid[y, x] = val", "def set_our_tile(self, x, y, value):\n\t\tif x >= 0 and x < self.w and y >= 0 and y < self.h:\n\t\t\tself.our_tiles[x][y] = value", "def set_number(self, col, row, num):\n if col > 8 or row > 8 or num > 9 or num < 0:\n return \"Invalid input, try again!\"\n elif self.new_input_does_not_overlap_original_board(col, row):\n if num == 0:\n self.board[row][col] = 0\n else:\n self.board[row][col] = num\n return self.update_game_state()\n # return alg.check_solution(self.board)\n else:\n return \"Cannot change this number, try again!\"", "def _updatePos(self, newTile):\n self.pos = newTile\n self.rect = (newTile.x * TILE_W, newTile.y * TILE_H)", "def number(cls, tileName):\n return TILENAMEMAP[tileName]['Number'] if tileName in TILENAMEMAP else None", "def set_move(self, position: Point, mark: Mark) -> None:\n\t\tif mark == Mark.X:\n\t\t\tself.tiles[position.x][position.y] = 1\n\t\telse:\n\t\t\tself.tiles[position.x][position.y] = -1", "def setInteger(self, value):", "def setInteger(self, value):", "def set_cell(self, x, y, val):\n pass", "def set_cell(self, x, y, val):\n pass", "def set_cell(self, pos, value):\n\t\tpos = Point(pos)\n\t\tif not self.valid(pos):\n\t\t\traise KeyError('Invalid cell position: {0}'.format(pos))\n\t\tself.data[pos.x + pos.y * self.dims.width] = value", "def __setitem__(self, index, value):\n # attempt to\n try:\n # cast {index} to an integer\n index = int(index)\n # if this fails\n except TypeError:\n # let my tile do the rest\n self.data[self.tile.offset(index)] = value\n # otherwise\n else:\n # set the item directly in my container\n self.data[index] = value\n # all done\n return", "def tile_change(direction, tile):\n lower_direction = direction.lower()\n if lower_direction == \"n\":\n tile += 1\n elif lower_direction == \"s\":\n tile -= 1\n elif lower_direction == \"e\":\n tile += 10\n else:\n tile -= 10\n return tile", "def position(self, pos: int):\n self.__pos = pos", "def position_to_tile(self, position):\r\n return position[1] + self.width * position[0]", "def update_grid_pos(self):\n self.grid_pos = self.get_tile_of_position(self.tank.body.position)", "def set_cells(self, val=None):\t\r\n self._cells = \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)", "def setNumber(self, number):\n prevNumber = self.number\n self.number = number\n return prevNumber", "def setPosition(position):", "def set_num(self, num):\n self.cmd_num = num", "def set_number(self, number):\n self.number = number", "def setNumber(self, n):\n VisualElement.setNumber(self, n)\n\n # the brush color is dependent of number\n self.element_region.setBrush(self.brush)", "def set_tile(self, row, col, value):\r\n del self.board[row][col]\r\n self.board[row].insert(col,value)\r\n return self.board", "def positions(self, tileID, numSamples):", "def set_px(self, value):\n pass", "def tile_index_at(self, position: TilePosition) -> int:\r\n tile_index: int = pyxel.tilemap(self.tilemap_id).get(\r\n self.rect_uv.x + position.tile_x, self.rect_uv.y + position.tile_y)\r\n return tile_index", "def change(self, num):\n if self.manhattan(self[0], self[num]) != 1:\n raise RuntimeError(\n \"Numbers {} {} in positions {} {} can't change\".format(0, num, self[0], self[num]))\n\n z_i, z_j = self[0]\n n_i, n_j = self[num]\n\n self.pos_to_num[(z_i, z_j)] = num\n self.pos_to_num[(n_i, n_j)] = 0\n\n self.num_to_pos[0] = (n_i, n_j)\n self.num_to_pos[num] = (z_i, z_j)", "def new_tile(self):\n rowm, colm = self.get_ava_index()\n value = 2 if random() <= 0.90 else 4\n self.set_tile(rowm, colm, value)\n print rowm,colm,value", "def set_value(self, m: int, n: int, value: int) -> None:\n\t\tself.matrix[m][n] = value", "def __setitem__(self, pos, val):\n self._coords[pos] = val", "def set_item(self, row, col, value):\n self.board[row][col] = value", "def set(self,argument):\n if argument == \"X\" or \"O\":\n self.tile=argument", "def get_number(self, row, col):\r\n return self._grid[row][col]", "def get_number(self, row, col):\r\n return self._grid[row][col]", "def get_number(self, row, col):\r\n return self._grid[row][col]", "def set(self, coord, value):\n layer, row, column = tuple(coord)\n self.validPosition(layer, row, column)\n self._state['visible']['board'][layer][row][column] = value", "def set_layer(self, n):\n self.layernum = n\n self.update()", "def set_tile(self, point, glyph=\".\"):\n self.matrix[point.y][point.x] = glyph", "def setX(self, value):\n self.position[0] = value", "def setCoordinateValue(self,coordVal):\n gridkey = self.__createkey__()\n self.grid[gridkey] = coordVal\n return self.getCoordinate()", "def setTile(self, cell, tile):\n assert isinstance(cell, tuple)\n cellx, celly = cell\n\n if cellx < 0 or cellx > self.map_array.shape[0]-1 or celly < 0 or celly > self.map_array.shape[1]-1:\n return\n\n if self.tile_dict.get((cellx, celly)):\n self.canvas.delete(self.tile_dict[(cellx, celly)])\n\n if tile:\n self.map_array[cellx, celly] = tile.tid\n if tile.tid == 0.0:\n return\n map_posx, map_posy = iso(cellx * self.cell_width, celly * self.cell_height)\n image = self.main.main_tilelist.images[tile.tid]\n self.tile_dict[(cellx, celly)] = self.canvas.create_image(map_posx, map_posy, image=image, anchor=tk.N)", "def updateNumerosity(self, num):\n self.numerosity += num", "def setvalue(self,num,name,val):\n self.M.reconfigure(num,{name:float(val)})", "def initialize_position(self):\n self.x = self.cell_xl + self.cell_dx * np.random.rand(1)[0]", "def change_value(value, loc):\r\n (application.ui.__getattribute__(f'cell{loc.column+1}{loc.row+1}')).setText(str(value))\r\n sudoku_grid[loc.row, loc.column] = value\r\n global cnt_free_cells\r\n cnt_free_cells -= 1\r\n # print(f'Placed {value} at {loc}')\r", "def define_number_positions(self):\n self.number_positions = np.array([\n [(\n int((j + 0.5) * SCREEN_WIDTH // 9),\n int((i + 0.5) * SCREEN_WIDTH // 9)\n ) for j in range(9)]\n for i in range(9)\n ])", "def set_pos(self, x):\n self._pos = x", "def get_number(self, row, col):\n return self._grid[row][col]", "def get_number(self, row, col):\n return self._grid[row][col]", "def get_number(self, row, col):\n return self._grid[row][col]", "def get_number(self, row, col):\n return self._grid[row][col]", "def set(self,row,col,value):\r\n self.puzzle[row][col] = value\r\n print(\"Entered value \",value)\r\n if self.puzzle[row][col] == self.rows[row][col]:\r\n self.score = self.score+5\r\n else:\r\n self.score = self.score-5", "def GetTileIndex(self, pos):\r\n #pixel = rpg_image.GetPixel(self.image, pos)\r\n try:\r\n pixel = self.image_buffer[pos[0]][pos[1]]\r\n except IndexError, e:\r\n pixel = -1\r\n \r\n return pixel", "def set_mark( self, mark, index ):\n\n try:\n int(self.__grid[index-1])\n\n if mark.lower() == 'x' or mark.lower() == 'o': \n self.__grid[index-1] = mark\n\n return 1\n\n except ValueError:\n return 0", "def setInt(self, addr: ghidra.program.model.address.Address, value: int) -> None:\n ...", "def set_pos(self, p: tuple) -> None:\n self.pos = p", "def set_cell(self, x, y, tile_index):\n data_index = x + y * self._size[0] # type: int\n # self._data[data_index] = tile_index\n #\n # if self._sprites[data_index]:\n # self._sprites[data_index].delete()\n # self._sprites[data_index] = None\n\n # Release resources\n if self._tiles[data_index]:\n self._tiles[data_index].delete()\n self._tiles[data_index] = None\n\n # Only create sprite when not zero\n if tile_index:\n tile_prototype = self._tile_set.get(tile_index, None) # type: Optional[Tile]\n if not tile_prototype:\n raise TileSetError(\"tile set does not contain tile for index %s\" % tile_index)\n\n tile_w, tile_h = self._tile_size_2d\n i, j, _k = cart_to_iso(x, y, 0)\n ax, ay = tile_prototype.anchor\n tile_x, tile_y = i * tile_w - ax, j * tile_h - ay\n\n tile = deepcopy(tile_prototype)\n tile.sprite = pyglet.sprite.Sprite(tile.image, tile_x, tile_y)\n tile.aabb3d.pos = float(x), float(y), 0.0\n tile.aabb2d.pos = tile_x, tile_y\n self._tiles[data_index] = tile\n # self._sprites[data_index] = pyglet.sprite.Sprite(tile.image, tile_x, tile_y)\n\n # Currently only supports a single level, so everything is on z-level 0\n # self._aabb3d[data_index] = AABB3D(float(x), float(y), 0.0, tile.size[0], tile.size[1], tile.size[2])\n # self._aabb2d[data_index] = AABB2D(tile_x, tile_y, tile_w, tile_h)", "def __getpos__(self, num):\n return self.num_to_pos[num]", "def add_number(self):\n # take one of the free positions in the grid at random\n x, y = random.choice(self.free_positions)\n # with the probability of Game.proba_four, put a 4 in the box. Else\n # put a 2\n if random.random() < Game.proba_four:\n self.grid[x][y] = 4\n else:\n self.grid[x][y] = 2", "def pain(self, int):\n self.vel[1] = int", "def set_position(self, pos, debug=False):\n pos = max(pos, 0)\n pos = min(pos, 1)\n posrange = pos * self.range\n pos = posrange + self.min\n if debug:\n print('Setting Dynamixel {} with posrange {} to position {}'.format(self.id, posrange, pos))\n self.motor.set_position(int(pos))", "def set_data(self, data, *pos):\n r, c = pos\n self._grid[r][c] = data", "def put_number(self, *args):\n return _ida_hexrays.cexpr_t_put_number(self, *args)", "def mark_pos(self, position, marker):\n i, j = self.board[position]\n self.grid[i][j] = marker", "def setMidiNumber(self, new_nbr):\n\n self.nbr = limiter(new_nbr)", "def setInt(self, address: ghidra.program.model.address.Address, value: int) -> None:\n ...", "def get_tile(self, row, col):\n # replace with your code\n return 0", "def set_position(self, position):\n self.position = tuple(position)", "def cleanTileAtPosition(self, pos):\n #Return the floor of x as a float, the largest integer value less than\n #or equal to x\n posx = pos.getX()\n posy = pos.getY()\n posx = math.floor(posx)\n posy = math.floor(posy)\n self.tiles[(posx, posy)] = 1 # using 0 as dirty value, 1 as clean value, of key tuple pos(x,y)\n #self.printTiles()\n #raise NotImplementedError" ]
[ "0.7453924", "0.7453924", "0.7453924", "0.69439274", "0.690476", "0.690476", "0.6867873", "0.6846585", "0.6842772", "0.68179584", "0.67959946", "0.67660105", "0.67594844", "0.67594844", "0.66707844", "0.66694415", "0.66609603", "0.66590446", "0.66233623", "0.6552477", "0.64449495", "0.6417769", "0.6408263", "0.63783526", "0.63635814", "0.63210785", "0.6303899", "0.616876", "0.61539376", "0.6150407", "0.6081049", "0.6077612", "0.6025042", "0.6004493", "0.5997587", "0.5997587", "0.5971016", "0.5971016", "0.5958486", "0.5954103", "0.59496", "0.59490573", "0.5934247", "0.59326804", "0.59195715", "0.5910315", "0.5907727", "0.58921516", "0.58888066", "0.5871539", "0.58564883", "0.5851937", "0.58509755", "0.58352005", "0.58265406", "0.58259034", "0.58250237", "0.5822704", "0.58179474", "0.57953036", "0.57425976", "0.57425976", "0.57425976", "0.57375914", "0.5736133", "0.5732317", "0.5726408", "0.57145864", "0.57097334", "0.5706727", "0.5699241", "0.5681423", "0.5681107", "0.5671433", "0.5667123", "0.56668466", "0.56668466", "0.56668466", "0.56668466", "0.5664133", "0.5649288", "0.56476045", "0.56420517", "0.561314", "0.55891895", "0.5588425", "0.55859417", "0.55760807", "0.5562108", "0.55618566", "0.55608255", "0.55582386", "0.55549335", "0.5543871", "0.5543309", "0.5538038", "0.55375224" ]
0.74432904
6
Make a copy of the puzzle to update during solving Returns a Puzzle object
def clone(self): new_puzzle = Puzzle(self._height, self._width, self._grid) return new_puzzle
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve(self):\n new_puzzle = self._puzzle.clone()\n self._solution = new_puzzle.solve_puzzle()\n del new_puzzle\n pass", "def clone(self):\r\n new_puzzle = Puzzle(self._height, self._width, self._grid)\r\n return new_puzzle", "def clone(self):\r\n new_puzzle = Puzzle(self._height, self._width, self._grid)\r\n return new_puzzle", "def clone(self):\r\n new_puzzle = Puzzle(self._height, self._width, self._grid)\r\n return new_puzzle", "def copy(self):\r\n board = []\r\n for row in self.board:\r\n board.append([x for x in row])\r\n return Puzzle(board)", "def buildpuzzle(self):\r\n self.puzzle = copy.deepcopy(self.rows)\r\n if self.difficulty == 1:\r\n self.removedigits(1)\r\n if self.difficulty == 2:\r\n self.removedigits(2)\r\n if self.difficulty == 3:\r\n self.removedigits(3)", "def copy(self):\n\t\t\n\t\t\taCopy = LpSolver.copy()\n\t\t\taCopy.presolve = self.presolve\n\t\t\treturn aCopy", "def __init__(self, puzzle):\n self.puzzle = puzzle", "def clone_puzzle(self) -> List[int]:\n self.clone = self._puzzle\n return self.clone", "def generate_new_puzzle():\n new_puzzle = pb() \n\n # only generate solvable puzzles\n while not new_puzzle.is_solvable():\n new_puzzle = pb()\n\n return new_puzzle", "def copy(self):\n\t\t\n\t\t\taCopy = LpSolver.copy()\n\t\t\taCopy.cuts = self.cuts\n\t\t\taCopy.presolve = self.presolve\n\t\t\taCopy.dual = self.dual\n\t\t\taCopy.crash = self.crash\n\t\t\taCopy.scale = self.scale\n\t\t\taCopy.rounding = self.rounding\n\t\t\taCopy.integerPresolve = self.integerPresolve\n\t\t\taCopy.strong = self.strong\n\t\t\treturn aCopy", "def solution_copy(self):\n to_return = DepAlgoSolution(self.packages_in_solution[:], self.visited_packages[:], set(self.visited_names))\n to_return.is_valid = self.is_valid\n to_return.not_to_delete_deps = set(self.not_to_delete_deps)\n for key, value in self.dict_to_way.items():\n to_return.dict_to_way[key] = value[:]\n for key, value in self.dict_to_deps.items():\n to_return.dict_to_deps[key] = set(value)\n for key, value in self.dict_call_as_needed.items():\n to_return.dict_call_as_needed[key] = value\n to_return.installed_solution_packages = set(self.installed_solution_packages)\n return to_return", "def solve(self):\n dim = self.puzzle.dimension\n\n # initial loop\n for value, (row, col) in self.puzzle:\n if value:\n self.clear_row(row, value)\n self.clear_col(col, value)\n self.clear_subgrid(row, col, value)\n self.updates.add((value, (row, col)))\n for ps in self.possibilities:\n ps.discard((row, col))\n\n while self.updates:\n while self.updates:\n # while self.updates:\n value, (row, col) = self.updates.pop()\n for i in range(1, dim + 1):\n self.check_row(i, value)\n self.check_col(i, value)\n for i in range(2, 8, 3):\n self.check_subgrid(row, i, value)\n self.check_subgrid(i, col, value)\n\n for value, (row, col) in self.puzzle:\n if not value:\n self.check_cell(row, col)\n\n # for value in range(1, dim + 1):\n # for row in [2, 5, 8]:\n # for col in [2, 5, 8]:\n # self.check_subgrid(row, col, value)", "def copy(self):\n return SolverPars(learn=self.learn, num_steps=self.num_steps,\n grad_tol=self.grad_tol, diff_tol=self.diff_tol, updater=self.updater)", "def get_puzzle(self):\n return self._puzzle", "def copy(self):\n\t\t\n\t\taCopy = LpSolver_CMD.copy(self)\n\t\taCopy.cuts = self.cuts\n\t\taCopy.presolve = self.presolve\n\t\taCopy.dual = self.dual\n\t\taCopy.strong = self.strong\n\t\treturn aCopy", "def copy(self):\n\t\t\n\t\taCopy = LpSolver.copy(self)\n\t\taCopy.path = self.path\n\t\taCopy.keepFiles = self.keepFiles\n\t\taCopy.tmpDir = self.tmpDir\n\t\treturn aCopy", "def solve(puzzle):\n print(\"Solving...\")\n array_puzzle = np.asarray(puzzle)\n array_puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n goal_state = __generate_goal(len(array_puzzle[0]), len(array_puzzle))\n\n flat_puzzle = list(chain.from_iterable(puzzle)) # Flatten the list\n\n # If the puzzle doesn't contain 0, exit.\n try:\n flat_puzzle.remove(0) # Remove 0 from the list\n except:\n print(\"All puzzles must include an open tile (0).\")\n return None\n\n inversions = __count_inversions(flat_puzzle) # Count the inversions\n\n # width = len(array_puzzle[0]) # Get the width of the puzzle (columns)\n # length = len(array_puzzle) # Get the length of the puzzle (rows)\n\n oddEven = __odd_or_even(len(array_puzzle[0])) # Determine if the width is odd or even.\n start_position = __find_start(array_puzzle) # Find the start position's row\n solvable = __is_solvable(oddEven, inversions, len(array_puzzle), start_position) # Cleck if the puzzle is solvable.\n\n # If the puzzle is not solvable, return None.\n if(solvable == \"None\"):\n return None\n\n # If we cannot calculate a* (for example the given values are not all in sequential order (1-5) 4 is replaced by 6 (1,2,3,5,6))\n try:\n return __a_star(array_puzzle, goal_state)\n except:\n print(\"Please make sure there are no duplicate or skipped inputs.\")\n return None\n\n # This code was used in testing to print out the string.\n # solved = __a_star(array_puzzle, goal_state)\n # Return the moves needed to complete the puzzle.\n # return print(str(__build_string(solved)) + \" (\" + str(len(solved)) + \")\")", "def solve_puzzle(board):\n # Propagate value effects\n board = simplify_puzzle(board, [])\n\n # Brute force remaining cells\n board = brute(board)\n\n # Verify that the puzzle was successfully solved\n assert get_length(board)==81\n assert valid_attempt(board)\n\n return board", "def copy(self):\r\n\t\tnewBoard = BoardClass()\r\n\r\n\t\tfor row in self.board:\r\n\t\t\tnewBoard.board.append(row[:])\r\n\t\tnewBoard.x = self.x\r\n\t\tnewBoard.y = self.y\r\n\t\tnewBoard.heuristic = self.heuristic\r\n\t\tnewBoard.n = self.n\r\n\t\tnewBoard.hType = self.hType\r\n\t\tnewBoard.steps = self.steps\r\n\r\n\t\treturn newBoard", "def copy(self):\n\t\tlpcopy = LpProblem(name = self.name, sense = self.sense)\n\t\tlpcopy.objective = self.objective\n\t\tlpcopy.constraints = self.constraints.copy()\n\t\tlpcopy.sos1 = self.sos1.copy()\n\t\tlpcopy.sos2 = self.sos2.copy()\n\t\treturn lpcopy", "def solve_puzzle(self):\r\n # initialize some values and start tile at bottom right corner\r\n col = self.get_width() - 1\r\n row = self.get_height() - 1\r\n move = \"\"\r\n curr_row, curr_col = self.current_position(0, 0)\r\n move_to_target = abs(curr_col - col) * \"r\" + abs(curr_row - row) * \"d\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n\r\n # apply solver methods\r\n for dummy_i in range(row, 1, -1):\r\n for dummy_j in range(col, 0, -1):\r\n move += self.solve_interior_tile(dummy_i, dummy_j)\r\n move += self.solve_col0_tile(dummy_i)\r\n \r\n for dummy_j in range(col, 1, -1):\r\n move += self.solve_row1_tile(dummy_j)\r\n move += self.solve_row0_tile(dummy_j)\r\n \r\n move += self.solve_2x2()\r\n return move", "def clone(self):\n copy = Board(self.game)\n for old_piece in self.game_pieces:\n copy.game_pieces.append(old_piece.clone())\n \n return copy", "def solved(self):\r\n return self.puzzle.solved", "def copy(self):\n return type(self)(self.game_board.copy(), self.current_piece)", "def test_copy(self):\n p = hw.create_tile_puzzle(3, 3)\n p2 = p.copy()\n self.assertTrue(p.get_board() == p2.get_board())\n p2.perform_move('up')\n self.assertFalse(p.get_board() == p2.get_board())", "def deepcopy(self):\n\t\tlpcopy = LpProblem(name = self.name, sense = self.sense)\n\t\tif lpcopy.objective != None:\n\t\t\tlpcopy.objective = self.objective.copy()\n\t\tlpcopy.constraints = {}\n\t\tfor k,v in self.constraints.iteritems():\n\t\t\tlpcopy.constraints[k] = v.copy()\n\t\tlpcopy.sos1 = self.sos1.copy()\n\t\tlpcopy.sos2 = self.sos2.copy()\n\t\treturn lpcopy", "def solve(puzzle, verbose=False):\n sol = puzzle.extensions()\n s = puzzle\n for i in sol:\n if verbose == True:\n print(i)\n if not i.is_solved():\n s = solve(i)\n if s != puzzle:\n break\n else:\n s = i\n break\n return s", "def get_solution(self):\r\n return self.solution", "def copy_state(self):\n new_state = SudokuState(np.ndarray.copy(self.final_values)) # Copy final values using numpy's built-in copy\n for (row, col), values in np.ndenumerate(self.possible_values):\n new_state.possible_values[row][col] = values[:] # Copy over each list of possible values\n return new_state # Return a SudokuState with the copied values", "def clone(self):\n copy = GamePiece((self.x, self.y), self.player)\n return copy", "def solve(self):\n\n self.queue.add(*self.moved.items)\n self.solving = True\n self.moved.items = []", "def solute(self, puzzle):\r\n \"\"\"suppose that ax = c, where a is a matrix, c and x are vectors.\"\"\"\r\n \"\"\"The aim is to figure out x, which indicates the solution.\"\"\"\r\n A, a, c = [], [], []\r\n for i in range(puzzle.row):\r\n for j in range(puzzle.column):\r\n # create a puzzle.row * puzzle.column by puzzle.row * puzzle.column matrix.\r\n # each column represents a cell in the puzzle.\r\n # each row represents the changed cell if column c is selected.\r\n if puzzle.lights[i][j] == -1:\r\n c.append(1)\r\n else:\r\n c.append(0)\r\n for m in range(puzzle.row):\r\n for n in range(puzzle.column):\r\n if self.is_adjecent([m, n], [i, j]):\r\n # if [m, n] is adjecent to [i, j], then a[ij][mn] should be 1.\r\n a.append(1)\r\n else:\r\n a.append(0)\r\n a.append(c[i * puzzle.column + j])\r\n A.append(a)\r\n a = []\r\n\r\n self.eliminate(A)\r\n x = [item[len(item) - 1] for item in A]\r\n # x is the last column of A.\r\n # if x[i] is 1, cell i should be selected.\r\n i = 0\r\n for m in range(puzzle.row):\r\n for n in range(puzzle.column):\r\n if x[i] == 1:\r\n puzzle.selection.add((m, n))\r\n i += 1\r\n\r\n return puzzle.selection", "def get_approx_solution(self, solver):\n tour = solver.solve(self)\n print('The cost is {}.'.format(get_cost(tour,self)))\n self.tours[solver.__class__.__name__] = tour\n return tour", "def copy(self):\n cpy = deepcopy(self)\n # usually we use copy to perform transformations on the board\n # so it's good to reset memoized values\n cpy._memoized_compact = None \n return cpy", "def copy(self):\r\n copy_board = Board(self._squareCount, self._pebbleCount)\r\n copy_board.squares = [list(row) for row in self.squares]\r\n return copy_board", "def get_solution(self):\n return self._generate_solution()", "def test_is_solved(self):\n p = hw.TilePuzzle([[1, 2], [3, 0]])\n self.assertTrue(p.is_solved())\n p = hw.TilePuzzle([[0, 1], [3, 2]])\n self.assertFalse(p.is_solved())", "def solve_puzzle(self):\n # move zero tile to the lower right corner\n row, col = self.current_position(0, 0)\n movements = \"d\" * (self.get_height() - 1 - row) + \"r\" * (\n self.get_width() - 1 - col)\n self.update_puzzle(movements)\n # solve rowid from 2 by row\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col == 0:\n movements += self.solve_col0_tile(row)\n assert self.lower_row_invariant(row - 1,\n self.get_width() - 1)\n else:\n movements += self.solve_interior_tile(row, col)\n assert self.lower_row_invariant(row, col - 1)\n # solve the uppermost two rows by column\n for col in range(self.get_width() - 1, 1, -1):\n for row in range(1, -1, -1):\n if row == 0:\n assert self.row0_invariant(col)\n movements += self.solve_row0_tile(col)\n assert self.row1_invariant(col - 1)\n else:\n assert self.row1_invariant(col)\n movements += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n movements += self.solve_2x2()\n return movements", "def solve(self):\n if not self.solvable:\n print('Suduko not Solvable')\n return False\n res=self.back(0, 0)\n # if self.a[0][0]!=0:\n # res=self.back(0, 1)\n # else:\n # for i in range(1, 10):\n # self.a[0][0]=i\n # res=self.back(0, 1)\n # if res:\n # break\n if res:\n self.check_if_solvable()\n print(\"Sudoku Solved!\")\n print(self.a)\n return self.a\n else: print(\"Not Solvable\")\n return False", "def __init__(self, puzzle):\n # Split the given string input and find the side length and block size of the puzzle\n puz = [int(i) for i in puzzle.split(' ') if i]\n self.sl = int(math.sqrt(len(puz))) \n self.bs = int(math.sqrt(self.sl))\n\n # If side length squared not the same length as total puzzle, or if side lengths\n # not a square length, raise error\n if not (self.sl**2 == len(puz)) or not (self.bs**2 == self.sl):\n raise Sudoku_Errors.InvalidPuzzleException(puzzle, \"Puzzle side lengths not a perfect square\")\n\n # For each value in the puzzle, if not in correct range, raise error\n for ind in range(len(puz)):\n row = ind // self.sl\n col = ind % self.sl\n if not (0 <= puz[ind] <= self.sl):\n raise Sudoku_Errors.InvalidPuzzleException(puzzle,\n \"Puzzle value at ({}, {}) is out of range in puzzle \\n{}\".format(row, col, puzzle))\n\n # Split string by spaces into single list\n self.puzzle = [[j for j in puz[(i*self.sl):(i*self.sl)+self.sl]] for i in range(self.sl)]\n\n # For each value in the puzzle, check that it is a valid value for that square\n for row in range(self.sl):\n for col in range(self.sl):\n # This temporary replacing of each value with 0 is a trick so that\n # the valid_square method can be used on every square\n val = self.puzzle[row][col]\n self.puzzle[row][col] = 0\n\n if not self.valid_square(row, col, val):\n # If not a valid puzzle, reset self.puzzle and raise error\n self.puzzle = None\n raise Sudoku_Errors.InvalidPuzzleException(puzzle,\n \"Puzzle value at ({}, {}) is incorrect in puzzle \\n{}\".format(row, col, puzzle))\n\n # If value is valid, replace that square with prior value that was input\n self.puzzle[row][col] = val", "def is_solvable(self):\n self_copy = deepcopy(self)\n return self_copy.solve()", "def Clone(self):\n st = PunterGameState()\n st.fullGraph = self.fullGraph\n st.score = self.score\n st.playerJustMoved = self.playerJustMoved\n st.pathes = copy.deepcopy(self.pathes)\n st.scores = copy.deepcopy(self.scores)\n st.endpoints = self.endpoints[:]\n return st", "def _copy(self, p):\n p._.d = self._.d\n p._.n = self._.n\n if self._has(\"p\"):\n p._.p = copy(self._.p)\n if self._has(\"q\"):\n p._.q = copy(self._.q)\n if self._has(\"P\"):\n p._.P = copy(self._.P)\n if self._has(\"Q\"):\n p._.Q = copy(self._.Q)\n if self._has(\"k\"):\n p._.k = self._.k\n if self._has(\"m\"):\n p._.m = self._.m\n if self._has(\"fsd\"):\n p._.fsd = self._.fsd\n if self._has(\"pPolynomial_ordering\"):\n p._.pPolynomial_ordering = self._.pPolynomial_ordering\n if self._has(\"qPolynomial_ordering\"):\n p._.qPolynomial_ordering = self._.qPolynomial_ordering\n if self._has(\"complement\"):\n p._.complement = self._.complement\n p._.fusion_schemes.update(self._.fusion_schemes)\n p._.subschemes.update(self._.subschemes)\n p._.subconstituents = list(self._.subconstituents)\n p._.triple.update(self._.triple)\n p._.triple_solution.update(self._.triple_solution)\n p._.triple_solution_generator.update(self._.triple_solution_generator)\n p._.quadruple.update(self._.quadruple)", "def clone(self):\n new_board = utils.copy_matrix(self.board)\n return Sudoku(new_board, self.block_size)", "def copy(self):\n\t\tb = Board(self.size, self.end_count)\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tb.tiles[x][y] = self.tiles[x][y]\n\t\treturn b", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def solve_puzzle(self):\n cur0_row, cur0_col = self.current_position(0, 0)\n move_str = 'd' * (self._height - cur0_row - 1) + 'r' * (self._width - cur0_col - 1)\n self.update_puzzle(move_str)\n for row in range(self._height-1, 1, -1):\n for col in range(self._width-1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n for col in range(self._width-1, 1, -1):\n assert self.row1_invariant(col)\n move_str += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n move_str += self.solve_row0_tile(col)\n move_str += self.solve_2x2()\n return move_str", "def update(self, solution):\n self.heuristic_path = [i for i in self.initial_path if i in solution]\n self.heuristic_cost = self.pathCost(self.heuristic_path)", "def copy_grid (grid):\r\n return copy.deepcopy(grid)", "def clone(self):\n # Run the constructor.\n other = BoardPath()\n # Copy the object variables\n other._current_cost = self._current_cost\n other._path = self._path[:]\n other._current_loc = self._current_loc\n return other", "def sudoku(puzzle):\n positions = all_pos(puzzle)\n if solve(puzzle, positions, 0):\n return puzzle\n return None", "def build_game_board(self):\n # retrieves new sudoku puzzle from dataset\n sudoku_set = self.data.get_sudoku_set()\n sudoku_problem, sudoku_solution = sudoku_set[0], sudoku_set[1]\n\n # removes old game boards\n self.board = []\n self.puzzle = []\n self.alg_solution = []\n self.data_solution = []\n\n # sets up sudoku puzzle to array format\n segment = []\n for num in sudoku_problem:\n segment.append(int(num))\n if len(segment) == 9:\n self.board.append(segment)\n self.puzzle.append(segment[:])\n segment = []\n\n self.alg_solution = alg.solve_sudoku(self.puzzle) # uses sudoku backtracking algorithm to solve puzzle\n\n # sets up the provided sudoku puzzle solution from dataset to array format\n for num in sudoku_solution:\n segment.append(int(num))\n if len(segment) == 9:\n self.data_solution.append(segment)\n segment = []\n\n self.game_state = \"Not Solved, Keep Trying!\"", "def solve(self):\n # If board is filled, board is trivially solved\n if self.check_full_board():\n return self.done\n\n # Iterate over every square in the board\n for row in range(self.num_rows):\n for col in range(self.num_columns):\n\n # If square is empty, begin plugging in possible values\n if self.check_empty_space(row, col):\n for val in range(1, 10):\n if not self.check_row(val, row) and \\\n not self.check_column(val, col) and \\\n not self.check_box(val, self.what_box(row, col)):\n self.board[row][col] = val\n \n if self.solve():\n return self.done()\n \n # Didn't work; undo assigment\n self.board[row][col] = ' '\n\n # Bad path; backtrack\n return False", "def solve_tour(self):\n\t\tboard = [[-1 for _ in range(self.N)]for _ in range(self.N)]\n\t\tboard[0][0] = 0\n\n\t\tz = self.find_tour(board, 0, 0, 1)\n\t\tif z:\n\t\t\tfor i in range(self.N):\n\t\t\t\tfor j in range(self.N):\n\t\t\t\t\tself.solution.append(board[i][j])\n\t\t\tprint board\n\t\t\treturn self.solution\n\t\t\t\t\n\t\telse:\n\t\t\tprint(\"No solution\")", "def create(self, show=False):\n # First create empty Sudoku object, and set of indices of empty squares\n puzzle = Sudoku(\"0 \"*(self.sl**2))\n indices = [i for i in range(self.sl**2)]\n deleted = []\n\n # First add pseudorandom squares into puzzle, try 1/2 of total squares\n num_squares_to_add = (self.sl**2) // 2\n self.random_insertion(puzzle, num_squares_to_add, indices, deleted)\n\n # Repeat steps of deleting/inserting until one solution puzzle created\n while True:\n if show:\n print(render(puzzle.get_puzzle()))\n # Now check if one solution exists, and return Sudoku object if it does\n s = time.time()\n if puzzle.is_one_sol():\n return puzzle\n t = time.time()\n\n # If solving takes too much time, \"revamp\" process by deleting and inserting \n # multiple squares\n if t-s > 0.5:\n dels, ins = 1, 0\n while dels > ins:\n dels = self.random_deletion(puzzle, self.sl*2, indices, deleted)\n ins = self.random_insertion(puzzle, self.sl*10, indices, deleted) \n\n # If not one solution exists and it's solvable, more than one solution exists\n elif puzzle.is_solvable():\n dels, ins = 1, 0\n while dels > ins:\n dels = self.random_deletion(puzzle, self.sl*2, indices, deleted)\n ins = self.random_insertion(puzzle, self.sl*10, indices, deleted)\n\n # Else, there are no solutions, so must delete a square\n else:\n self.random_deletion(puzzle, 1, indices, deleted)\n\n return puzzle", "def copy(self):\n return self._new_rep(self._func(self.rep))", "def get_solution(self):\n objective_value = self.solver.objective.value\n status = self.solver.status\n variables = pd.Series(data=self.solver.primal_values)\n\n fluxes = empty(len(self.reactions))\n rxn_index = list()\n var_primals = self.solver.primal_values\n\n for (i, rxn) in enumerate(self.reactions):\n rxn_index.append(rxn.id)\n fluxes[i] = var_primals[rxn.id] - var_primals[rxn.reverse_id]\n\n fluxes = pd.Series(index=rxn_index, data=fluxes, name=\"fluxes\")\n\n solution = Solution(objective_value=objective_value, status=status,\n fluxes=fluxes)\n\n self.solution = solution\n\n self.solution.raw = variables\n\n self.\\\n solution.values = pd.DataFrame.from_dict({k:v.unscaled\n for k,v in self._var_dict.items()},\n orient = 'index')\n\n return solution", "def get_solution(self):\n solution = self.raw_solution\n if solution is not None:\n return {\n \"solution\": self.raw_solution\n }", "def setPuzzle():\n matrix = tuple() # This will be a tuple of tuples to hold the original puzzle set\n\n matrix += ((0, 25, 0, 21, 0, 4, 0, 8, 0, 17, 0),)\n matrix += ((12, 22, 13, 8, 18, 8, 0, 18, 2, 13, 8),)\n matrix += ((0, 14, 0, 24, 0, 21, 0, 22, 0, 22, 0),)\n matrix += ((5, 13, 26, 20, 0, 16, 20, 9, 13, 7, 13),)\n matrix += ((0, 7, 0, 5, 0, 20, 0, 3, 0, 0, 9),)\n matrix += ((20, 16, 22, 0, 0, 0, 0, 0, 21, 17, 3),)\n matrix += ((17, 0, 0, 8, 0, 23, 0, 1, 0, 21, 0),)\n matrix += ((9, 21, 10, 11, 4, 20, 0, 10, 21, 3, 18),)\n matrix += ((0, 18, 0, 4, 0, 8, 0, 13, 0, 3, 0),)\n matrix += ((7, 22, 6, 21, 0, 18, 21, 25, 17, 20, 18),)\n matrix += ((0, 9, 0, 18, 0, 19, 0, 8, 0, 15, 0),)\n\n return matrix", "def solve(self):\n ...", "def get_board_copy(self):\n board_copy = Board()\n board_copy._current_side_color = self._current_side_color\n board_copy._other_side_color = self._other_side_color\n board_copy._rubrics = copy.deepcopy(self._rubrics)\n\n # populate the dict with the copies of the objects:\n for x in range(8):\n for y in range(8):\n piece = board_copy._rubrics[x][y]\n if piece.piece_type != PieceType.PLACEHOLDER:\n board_copy._pieces[piece.color][piece.name] = piece\n\n return board_copy", "def solve(self) -> List[Board]:\n # greedy search\n for seq in permutations([i for i in range(self.n)]):\n b = Board(n=self.n)\n for j in range(self.n):\n b.set_queen(at=(j, seq[j]))\n if validate(board=b):\n self.results.append(b)\n # return early if requires taking a solution\n if self.take_one_solution:\n break\n return self.results", "def __init__(self, puzzle, movesSoFar, blocking, thedeep):\n\t\tself.puzzle = puzzle\n\t\tself.movesSoFar = movesSoFar\n\t\tself.blocking = blocking\n\t\tself.thedeep = thedeep", "def sketch_of_solution(self,sol=None):\n raise NotImplementedError", "def __deepcopy__(self, memodict={}):\n dp = Board()\n dp.board = copy.deepcopy(self.board)\n dp.moves = copy.deepcopy(self.moves)\n dp.num_white_pieces = copy.deepcopy(self.num_white_pieces)\n dp.num_black_pieces = copy.deepcopy(self.num_black_pieces)\n dp.num_white_kings = copy.deepcopy(self.num_white_kings)\n dp.num_black_kings = copy.deepcopy(self.num_black_kings)\n return dp", "def copy(self):\n piece_type = type(self)\n new_piece = piece_type(self.player_name, self.coords, True)\n new_piece.id = self.id\n return new_piece", "def solve(self):\n pass", "def solve(self):\n pass", "def clone(self):", "def give_puzzle(self) -> List[int]:\n return self._puzzle", "def solve(self):", "def __deepcopy__(self, memodict={}) -> 'Board':\r\n squares: Dict[Pos2D, Square] = deepcopy(self.squares)\r\n round_num: int = self.round_num\r\n phase: GamePhase = self.phase\r\n winner: PlayerColor = self.winner\r\n\r\n return Board(squares, round_num, phase, winner)", "def __swap(self, x1, y1, x2, y2):\n temp = self.puzzle.copy()\n temp[x1, y1] = temp[x2, y2]\n temp[x2, y2] = self.puzzle[x1, y1]\n return temp", "def solve_puzzle(self):\n\n move_str = \"\"\n \n # Move zero tile to bottom right corner tile of puzzle.\n zero_pos = self.current_position(0,0) \n vert_dist = (self.get_height() - 1) - zero_pos[0]\n horiz_dist = (self.get_width() - 1) - zero_pos[1]\n move_str += ((\"d\" * vert_dist) + (\"r\" * horiz_dist))\n self.update_puzzle(move_str)\n \n # Solve lower rows\n if self.get_height() > 2:\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n \n # Solve top 2 rows\n if self.get_width() > 2:\n for col in range(self.get_width() - 1, 1, -1):\n move_str += self.solve_row1_tile(col)\n move_str += self.solve_row0_tile(col)\n \n # Solve 2x2\n move_str += self.solve_2x2()\n\n return move_str", "def redefineProblem(self):\n self.formulation = cp.Problem(self.obj, self.constraints)", "def redefineProblem(self):\n self.formulation = cp.Problem(self.obj, self.constraints)", "def solution(self) -> State:", "def _swap(self, x1, y1, x2, y2):\n puzzle_copy = [list(row) for row in self.position] # copy the puzzle\n puzzle_copy[x1][y1], puzzle_copy[x2][y2] = puzzle_copy[x2][y2], puzzle_copy[x1][y1]\n\n return puzzle_copy", "def copy(self):\n p = Project()\n p.name = self.name\n p.path = self.path\n p._plugin = self._plugin\n p.stage = self.stage.copy()\n p.stage.project = p\n\n for sprite in self.sprites:\n s = sprite.copy()\n s.project = p\n p.sprites.append(s)\n\n for actor in self.actors:\n if isinstance(actor, Sprite):\n p.actors.append(p.get_sprite(actor.name))\n else:\n a = actor.copy()\n if isinstance(a, Watcher):\n if isinstance(a.target, Project):\n a.target = p\n elif isinstance(a.target, Stage):\n a.target = p.stage\n else:\n a.target = p.get_sprite(a.target.name)\n p.actors.append(a)\n\n p.variables = dict((n, v.copy()) for (n, v) in self.variables.items())\n p.lists = dict((n, l.copy()) for (n, l) in self.lists.items())\n p.thumbnail = self.thumbnail\n p.tempo = self.tempo\n p.notes = self.notes\n p.author = self.author\n return p", "def _load(self):\n if ((self._selection_rate + self._random_selection_rate) / 2) * self._nb_children != 1:\n raise Exception(\"Either the selection rate, random selection rate or the number of children is not \"\n \"well adapted to fit the population\")\n\n values_to_set = fileloader.load_file_as_values(self._model_to_solve)\n zeros_to_count = '0' if len(values_to_set) < 82 else '00'\n print(\"The solution we have to solve is: (nb values to find = {})\".format(values_to_set.count(zeros_to_count)))\n\n self._start_time = time()\n s = Sudoku(values_to_set)\n s.display()\n\n self._run_pencil_mark(s)\n return s", "def solve(self):\n\n # Create the square the represents the beginning of the game when no-one is on the board\n # and add it to the queue\n #\n zero_square = _BoardSquare(square_number=0,\n die_rolls=0,\n previous_square=None,\n previous_roll=0)\n self._board_square_queue.put(zero_square)\n\n while not self._board_square_queue.empty():\n next_node = self._board_square_queue.get()\n square_number = next_node.square_number\n for die_value in range(0, DIE_FACES):\n roll = die_value + 1\n next_square_number = square_number + roll\n\n # when we are near the end, we will get\n # rolls that put us past the last square\n # we can skip those\n if next_square_number > 100:\n continue\n\n # if the board square we land on is a chute or ladder\n # we jump to the target board square instead\n if next_square_number in self._shortcuts:\n next_square_number = self._shortcuts[next_square_number]\n\n # Look up the square in the board list\n # which acts as a hash lookup on board square number\n try:\n existing_square = self._board[next_square_number - 1]\n except Exception:\n # this would happen if we tried to access the\n # hash list oustside of its bounds\n print(\"next_square_number: %d\" % next_square_number)\n raise\n\n if existing_square == None:\n # The square is not in the list yet, so this is\n # the first time we have landed on it\n # create a BoardSquare to represent it\n # and add it to the hash-list\n # and add it to the queue\n square = _BoardSquare(square_number=next_square_number,\n die_rolls=next_node.die_rolls + 1,\n previous_square=next_node,\n previous_roll=roll)\n\n self._board[next_square_number - 1] = square\n self._board_square_queue.put(square)\n elif existing_square.die_rolls > next_node.die_rolls + 1:\n existing_square.die_rolls = next_node.die_rolls + 1\n existing_square.previous_square = next_node\n existing_square.previous_roll = roll\n self._board_square_queue.put(existing_square)\n\n #print(self)\n #self.print_solution()", "def checkPuzzle(self):\n print('Got to checkPuzzle')", "def copy_grid (grid):\r\n import copy\r\n g=copy.deepcopy(grid)\r\n return g", "def create_neighbor():\n copy = np.copy(puzzle)\n pair1, pair2 = same_box_pair()\n\n temp = copy[pair1[0], pair1[1]]\n copy[pair1[0], pair1[1]] = copy[pair2[0], pair2[1]]\n copy[pair2[0], pair2[1]] = temp\n\n return copy", "def _exploit(self, trial_executor, trial, trial_to_clone):\n\n trial_state = self._trial_state[trial]\n new_state = self._trial_state[trial_to_clone]\n \n if not new_state.last_checkpoint:\n logger.info(\"[pbt]: no checkpoint for trial.\"\n \" Skip exploit for Trial {}\".format(trial))\n return\n \n # if we are at a new timestep, we dont want to penalise for trials still going\n if self.data['T'].max() > self.latest:\n self.current = None\n \n print(\"\\n\\n\\n\\n Copying: \\n{} \\n with:{} \\n\\n\".format(str(trial), str(trial_to_clone)))\n new_config, lengthscale, mindist, meandist, data = explore(self.data, self.bounds,\n self.current,\n trial_to_clone,\n trial,\n trial_to_clone.config,\n self._hyperparam_mutations,\n self._resample_probability)\n \n # important to replace the old values, since we are copying across\n self.data = data.copy()\n \n # if the current guy youre selecting is at a point youve already done, \n # then append the data to the \"current\" which is the points in the current batch\n \n new = []\n for key in self._hyperparam_mutations.keys():\n new.append(new_config[key])\n \n new = np.array(new)\n new = new.reshape(1, new.size)\n if self.data['T'].max() > self.latest:\n self.latest = self.data['T'].max()\n self.current = new.copy()\n else:\n self.current = np.concatenate((self.current, new), axis=0)\n print(\"\\n\\n\\n\\n\\n Currently Evaluating \\n\\n\\n\\n\\n\")\n print(self.current)\n print(\"\\n\\n\\n\\n\\n\")\n \n # log the lengthscale\n self.meta['timesteps'].append(self.data['T'].values[-1])\n self.meta['lengthscales'].append(lengthscale)\n self.meta['closest'].append(mindist)\n self.meta['meandist'].append(meandist)\n meta = pd.DataFrame({'timesteps': self.meta['timesteps'], \n 'lengthscales': self.meta['lengthscales'],\n 'closest': self.meta['closest'],\n 'meandist': self.meta['meandist']})\n meta.to_csv('meta_data.csv')\n \n logger.info(\"[exploit] transferring weights from trial \"\n \"{} (score {}) -> {} (score {})\".format(\n trial_to_clone, new_state.last_score, trial,\n trial_state.last_score))\n\n if self._log_config:\n self._log_config_on_step(trial_state, new_state, trial,\n trial_to_clone, new_config)\n\n new_tag = make_experiment_tag(trial_state.orig_tag, new_config,\n self._hyperparam_mutations)\n reset_successful = trial_executor.reset_trial(trial, new_config,\n new_tag)\n if reset_successful:\n trial_executor.restore(\n trial, Checkpoint.from_object(new_state.last_checkpoint))\n else:\n trial_executor.stop_trial(trial, stop_logger=False)\n trial.config = new_config\n trial.experiment_tag = new_tag\n trial_executor.start_trial(\n trial, Checkpoint.from_object(new_state.last_checkpoint))\n\n self._num_perturbations += 1\n # Transfer over the last perturbation time as well\n trial_state.last_perturbation_time = new_state.last_perturbation_time", "def __init__(self, ar):\n self.a=ar #copy of ar\n self.b=ar #copy of ar\n self.check_if_solvable() #checks if sudoku is solvable or not", "def shuffle(self):\r\n puzzle = self\r\n for _ in range(1000):\r\n puzzle = random.choice(puzzle.actions)[0]()\r\n return puzzle", "def solve(self, problem, warm_start, verbose, solver_opts):\n data, inv_data = self.apply(problem)\n solution = self.solve_via_data(data, warm_start, verbose, solver_opts)\n return self.invert(solution, inv_data)", "def solve_puzzle(self):\n moves = self.moves\n peg_pos = self.peg_pos\n move_z = self.move_to_height\n \n print('Solving Tower of Hanoi:')\n for i, move in enumerate(moves):\n des_peg = move[0]\n des_peg_pos = peg_pos[des_peg]\n \n #move to peg\n print(' Moving to peg: '+str(des_peg)+' at: '+str(des_peg_pos))\n self.move_to(des_peg_pos[0], des_peg_pos[1], move_z)\n \n #if index is even, pickup disk, else drop disk\n if i % 2 == 0:\n print(' Picking up disk at height: '+str(move[1]))\n self.pick(move[1])\n else:\n print(' Dropping disk')\n self.drop()\n print('Finished solving puzzle')", "def copy(self):\n return Node(deepcopy(self.board), self.location, self.stack_size, self.target_location, path=deepcopy(self.path))", "def __init__(self, puzzle, g, path, goal):\n self.puzzle = puzzle\n self.puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n self.g = g\n self.h = self.__cost_to_goal(goal)\n self.path = path", "def get_solution(self):\n start_time = time.clock()\n frontier = [Node(self, None, 0, None)]\n explored = []\n visited = 0\n\n while True:\n visited += 1\n # pop the lowest value from the frontier (sorted using bisect, so pop(0) is the lowest)\n node = frontier.pop(0)\n\n # if the current node is at the goal state, we're done! \n if node.board.h() == 0:\n # recursively compile a list of all the moves\n moves = []\n while node.parent:\n moves.append(node.action)\n node = node.parent\n moves.reverse()\n\n print(\"Time:\", time.clock() - start_time)\n return calcal(moves, self.original)\n # print(\"Solution found!\")\n # print(\"Moves:\", len(moves))\n # print(\"Nodes visited:\", visited)\n # print(\"All moves:\", \", \".join(str(move) for move in moves))\n # break\n else:\n # we're not done yet:\n # expand the node, and add the new nodes to the frontier, as long\n # as they're not in the frontier or explored list already\n for new_node in node.expand():\n if new_node not in frontier and new_node not in explored:\n # use bisect to insert the node at the proper place in the frontier\n bisect.insort(frontier, new_node)\n \n explored.append(node)", "def solve_puzzle(self):\n # replace with your code\n string = ''\n width = self._width\n height = self._height\n zero = self.current_position(0, 0)\n row_to_zero = height - 1 - zero[0]\n col_to_zero = width - 1 - zero[1]\n string += 'r' * col_to_zero\n string += 'd' * row_to_zero\n self.update_puzzle(string)\n if width == 2 and height == 2:\n string += self.solve_2x2()\n elif width > 2 and height == 2:\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n elif width == 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n string += self.solve_2x2()\n elif width > 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n #for row in range(height - 1, -1, -1):\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n return string", "def __init__(self, puzzle):\n self._puzzle = puzzle\n self._puzzle_height = puzzle.get_height()\n self._puzzle_width = puzzle.get_width()\n self._root = Tk()\n self._root.title(\"Fifteen Puzzle\")\n self._frame = Canvas(self._root, bg='#FFFFFF',\n width=self._puzzle_width * TILE_SIZE,\n height=self._puzzle_height * TILE_SIZE)\n self._frame.grid(columnspan=4)\n self._solution = \"\"\n self._current_moves = \"\"\n\n Button(self._root, text='Solve', command=self.solve).grid(row=1, column=0)\n Button(self._root, text='Print moves', \n command=self.print_moves).grid(row=1, column=3)\n\n self.input_move = Entry(self._root)\n self.input_move.bind(\"<Return>\", self.enter_moves)\n self.input_move.grid(row=1, column=1)\n\n self._root.bind(sequence='<KeyPress-Up>', func=self.keydown)\n self._root.bind(sequence='<KeyPress-Down>', func=self.keydown)\n self._root.bind(sequence='<KeyPress-Left>', func=self.keydown)\n self._root.bind(sequence='<KeyPress-Right>', func=self.keydown)\n self._root.after(300, self.tick)\n\n self.draw()\n self._root.mainloop()", "def simplify(phi):\n\n # 1. only manipulate the copy\n #phic = copy.deepcopy(phi)\n #return phic\n pass", "def solve(grid):\n\n if is_grid_solved(grid):\n return grid\n\n new_grid = copy.deepcopy(grid)\n\n for x_element in range(len(new_grid)):\n for y_element in range(len(new_grid[x_element])):\n if new_grid[x_element][y_element] == 0:\n answers = ExactCover(new_grid, x_element, y_element)\n for answer in answers:\n new_grid[x_element][y_element] = answer\n new_grid = solve(new_grid)\n if not is_grid_solved(new_grid):\n new_grid[x_element][y_element] = 0\n else:\n break\n return new_grid\n\n return new_grid" ]
[ "0.8077919", "0.7812692", "0.7812692", "0.7812692", "0.7204438", "0.70470625", "0.68246186", "0.6803633", "0.67532885", "0.6707239", "0.6599824", "0.65672135", "0.65080994", "0.65023094", "0.64462394", "0.6381703", "0.63737535", "0.62523", "0.6221241", "0.62164736", "0.61948055", "0.6171669", "0.61620843", "0.6161145", "0.6160078", "0.61448497", "0.61207384", "0.60753185", "0.6046616", "0.60175276", "0.5992016", "0.59481", "0.5929717", "0.5926811", "0.5923679", "0.5862595", "0.5858307", "0.5848717", "0.58473575", "0.583283", "0.5808717", "0.5795809", "0.57906204", "0.5788745", "0.57762283", "0.57299715", "0.5718435", "0.5711042", "0.5709037", "0.56885475", "0.5683948", "0.56702447", "0.56685764", "0.56526446", "0.5651035", "0.56357026", "0.5623204", "0.56169623", "0.56161636", "0.56160104", "0.56094855", "0.56091356", "0.56057715", "0.5603482", "0.5603395", "0.5599816", "0.55827", "0.5575471", "0.5575471", "0.5574253", "0.5568302", "0.55654955", "0.5554837", "0.5538684", "0.5538673", "0.55355495", "0.55355495", "0.55320895", "0.55250317", "0.5518305", "0.5518236", "0.5516992", "0.5505534", "0.5505127", "0.5486867", "0.5472634", "0.5452967", "0.5443996", "0.5440313", "0.543406", "0.5430078", "0.5428885", "0.54218364", "0.54145604", "0.54116315", "0.54115134", "0.5402857" ]
0.7704075
7
Locate the current position of the tile that will be at position (solved_row, solved_col) when the puzzle is solved Returns a tuple of two integers
def current_position(self, solved_row, solved_col): solved_value = (solved_col + self._width * solved_row) for row in range(self._height): for col in range(self._width): if self._grid[row][col] == solved_value: return (row, col) assert False, "Value " + str(solved_value) + " not found"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current_position(self, solved_row, solved_col):\r\n solved_value = (solved_col + self._width * solved_row)\r\n\r\n for row in range(self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] == solved_value:\r\n return (row, col)\r\n assert False, \"Value \" + str(solved_value) + \" not found\"", "def current_position(self, solved_row, solved_col):\r\n solved_value = (solved_col + self._width * solved_row)\r\n\r\n for row in range(self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] == solved_value:\r\n return (row, col)\r\n assert False, \"Value \" + str(solved_value) + \" not found\"", "def current_position(self, solved_row, solved_col):\r\n solved_value = (solved_col + self._width * solved_row)\r\n\r\n for row in range(self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] == solved_value:\r\n return (row, col)\r\n assert False, \"Value \" + str(solved_value) + \" not found\"", "def solve_puzzle(self):\n # move zero tile to the lower right corner\n row, col = self.current_position(0, 0)\n movements = \"d\" * (self.get_height() - 1 - row) + \"r\" * (\n self.get_width() - 1 - col)\n self.update_puzzle(movements)\n # solve rowid from 2 by row\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col == 0:\n movements += self.solve_col0_tile(row)\n assert self.lower_row_invariant(row - 1,\n self.get_width() - 1)\n else:\n movements += self.solve_interior_tile(row, col)\n assert self.lower_row_invariant(row, col - 1)\n # solve the uppermost two rows by column\n for col in range(self.get_width() - 1, 1, -1):\n for row in range(1, -1, -1):\n if row == 0:\n assert self.row0_invariant(col)\n movements += self.solve_row0_tile(col)\n assert self.row1_invariant(col - 1)\n else:\n assert self.row1_invariant(col)\n movements += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n movements += self.solve_2x2()\n return movements", "def solve_puzzle(self):\r\n # initialize some values and start tile at bottom right corner\r\n col = self.get_width() - 1\r\n row = self.get_height() - 1\r\n move = \"\"\r\n curr_row, curr_col = self.current_position(0, 0)\r\n move_to_target = abs(curr_col - col) * \"r\" + abs(curr_row - row) * \"d\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n\r\n # apply solver methods\r\n for dummy_i in range(row, 1, -1):\r\n for dummy_j in range(col, 0, -1):\r\n move += self.solve_interior_tile(dummy_i, dummy_j)\r\n move += self.solve_col0_tile(dummy_i)\r\n \r\n for dummy_j in range(col, 1, -1):\r\n move += self.solve_row1_tile(dummy_j)\r\n move += self.solve_row0_tile(dummy_j)\r\n \r\n move += self.solve_2x2()\r\n return move", "def solve_interior_tile(self, target_row, target_col):\n cur_row, cur_col = self.current_position(target_row, target_col)\n move_str = self.position_tile(target_row, target_col, cur_row, cur_col)\n self.update_puzzle(move_str)\n return move_str", "def solve_interior_tile(self, target_row, target_col):\r\n assert self._grid[target_row][target_col] == 0\r\n moves_str = \"\"\r\n target_current_row, target_current_col = self.current_position(target_row, target_col)\r\n \r\n moves_str += self.position_tile(target_row, target_col, target_current_row, target_current_col) \r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_interior_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_interior_tile(self, target_row, target_col):\r\n whole_move = ''\r\n # replace with your code\r\n if self._grid[target_row][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA\"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[target_row][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (target_row - zero_row) * 'd'\r\n #path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd'\r\n zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n assert self.lower_row_invariant(target_row, target_col), \"Invarian is False\"\r\n \r\n #current_position = self.current_position(target_row, target_col)\r\n #current_row, current_col = self.current_position(target_row, target_col)\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, target_col)\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (target_row - self.current_position(target_row, target_col)[0]) * 'u'\r\n zero_row = target_row - len(path_up)\r\n if target_col < self.current_position(target_row, target_col)[1]: # Right move\r\n path_side = (self.current_position(target_row, target_col)[1] - target_col) * 'r'\r\n zero_col = target_col + len(path_side)\r\n else: # Left move\r\n path_side = (target_col - self.current_position(target_row, target_col)[1]) * 'l'\r\n zero_col = target_col - len(path_side)\r\n \r\n #path_for_zero = path_up + path_side\r\n # print '------------------------------------------'\r\n # print 'Move to ZERO =', path_for_zero\r\n \r\n self.update_puzzle(path_up + path_side)\r\n \r\n # print 'Grid after move:'\r\n # print self\r\n # current_position = self.current_position(target_row, target_col) \r\n # current_row, current_col = current_position\r\n # print 'Updated Target tile position=',current_position\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, target_col)\r\n # print '-----------------------------------------'\r\n \r\n\r\n ###### New moves after moving ZERO tile into target tile\r\n # counter = 0\r\n whole_move += path_up + path_side\r\n while self.current_position(target_row, target_col) != \\\r\n (target_row, target_col) or zero_col != target_col - 1:\r\n # tt_in_home = self.current_position(target_row, target_col) == \\\r\n # (target_row, target_col)\r\n\r\n cyclic_moves = ''\r\n # counter += 1\r\n #current_position = self.current_position(target_row, target_col) \r\n #current_col = self.current_position(target_row, target_col)[1]\r\n # print 'Zero coloumn', zero_col, '== Target coloumn', target_col\r\n # print zero_col == target_col \r\n \r\n #### Case 1 if ZERO located in ther right of\r\n #### target tile (after it)\r\n if zero_col > self.current_position(target_row, target_col)[1]:\r\n # print ' Look in the up puzzle, zero on the right side'\r\n # if self.current_position(target_row, target_col)[1] != target_col:\r\n # # print 'not under target place'\r\n # cyclic_moves = 'dllur'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n if self.current_position(target_row, target_col)[1] == target_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # elif self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # # print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n # cyclic_moves = 'ul'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #### Case 2 if ZERO located under the target tile, and both\r\n #### of them located under the target position of the target tile\r\n elif zero_col == self.current_position(target_row, target_col)[1] and zero_col == target_col:\r\n # print 'Both under the target place'\r\n # print 'TT in home=', tt_in_home\r\n if self.current_position(target_row, target_col) == \\\r\n (target_row, target_col):\r\n cyclic_moves = 'ld'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n else:\r\n cyclic_moves = 'lddru'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n elif zero_col < self.current_position(target_row, target_col)[1]:\r\n # print 'ZERO tile located in the left side'\r\n if self.current_position(target_row, target_col)[1] != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self.current_position(target_row, target_col)[1] == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n #zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loot counter =',counter\r\n whole_move += cyclic_moves\r\n # if counter > 12:\r\n # break\r\n # print 'Tile is solved with move '+ whole_move\r\n assert self.lower_row_invariant(target_row, target_col-1), \"Invarian is False\"\r\n return whole_move", "def solve_puzzle(self):\n cur0_row, cur0_col = self.current_position(0, 0)\n move_str = 'd' * (self._height - cur0_row - 1) + 'r' * (self._width - cur0_col - 1)\n self.update_puzzle(move_str)\n for row in range(self._height-1, 1, -1):\n for col in range(self._width-1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n for col in range(self._width-1, 1, -1):\n assert self.row1_invariant(col)\n move_str += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n move_str += self.solve_row0_tile(col)\n move_str += self.solve_2x2()\n return move_str", "def _get_coordinates(self, tile, position=None):\n if not position:\n position = self.position\n\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n if position[i][j] == tile:\n return i, j\n\n return RuntimeError('Invalid tile value')", "def solvable(grid):\n y = x = 1\n stack = deque([(0, y, x,)])\n goal = len(grid) - 2\n found = np.ones_like(grid, dtype=bool)\n \n while stack:\n i, y, x = stack.popleft()\n i += 1\n for y2, x2 in solve_perfect.neighbors(y, x, grid):\n if found[y2, x2]:\n if y2 == goal and x2 == goal:\n return i\n else:\n found[y2, x2] = False\n stack.append((i, y2, x2,))\n \n return 0", "def solve_interior_tile(self, target_row, target_col):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, target_col), \"tiles to right and below incorrectly ordered\"\n \n correct_tile = self.current_position(target_row, target_col) \n move_str = self.position_tile(target_row, target_col, correct_tile) \n self.update_puzzle(move_str)\n \n assert self.lower_row_invariant(target_row, target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n return move_str", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def get_current_position(self) -> Tuple[int, int]:\n return self.__row_position, self.__col_position", "def solve_interior_tile(self, target_row, target_col):\r\n assert self.lower_row_invariant(target_row, target_col)\r\n row, col = self.current_position(target_row, target_col)\r\n # use move-helper function to get to target tile\r\n move_to_target = self.move_to_target(target_row, target_col, row, col)\r\n \r\n # update the grid\r\n self.update_puzzle(move_to_target)\r\n assert self.lower_row_invariant(target_row, target_col - 1)\r\n return move_to_target", "def solve_row1_tile(self, target_col):\r\n # replace with your code\r\n whole_move = ''\r\n if self._grid[1][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA solve_row1_tile \"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[1][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (1 - zero_row) * 'd'\r\n # path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (1 - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (1 - zero_row) * 'd'\r\n #zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n #zero_row -= len(filter(lambda x: x=='u', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n\r\n assert self.row1_invariant(target_col), 'Some trouble in row1_invariant' \r\n \r\n #current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = 1, target_col\r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = (zero_row - self.current_position(1, target_col)[0]) * 'u' + (zero_col - self.current_position(1, target_col)[1]) * 'l'\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(1, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (1, target_col)\r\n counter = 0\r\n while self.current_position(1, target_col) != \\\r\n (1, target_col) or (zero_row, zero_col) != (0, target_col):\r\n # print 'Welcome to while loop!'\r\n cyclic_moves = ''\r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n #current_position = self.current_position(1, target_col)\r\n current_col = self.current_position(1, target_col)[1]\r\n counter +=1\r\n if self.current_position(1, target_col) == \\\r\n (1, target_col):\r\n # print 'ZERO not under TT'\r\n cyclic_moves = 'ur'\r\n whole_move += cyclic_moves\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif zero_col < current_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'ZERO tile located in the left side and down move is POSIBLE'\r\n if current_col != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif current_col != target_col and self._grid[zero_row+1][zero_col] > \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves)) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n # elif zero_col < current_col and self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n # # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n # if current_col != target_col:\r\n # # print 'not under the target place'\r\n # cyclic_moves = 'urrdl'\r\n # whole_move += cyclic_moves\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # elif current_col == target_col:\r\n # # print 'Target tile under target place'\r\n # cyclic_moves = 'urd'\r\n # whole_move += cyclic_moves\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #cyclic_moves +='ur'\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n return whole_move", "def find_unsettled_spot(self):\n\t\tfor i in range(9):\n\t\t\tfor j in range(9):\n\t\t\t\tif self.grid[i][j] == 0:\n\t\t\t\t\treturn i, j\n\t\treturn", "def solve_interior_tile(self, target_row, target_col):\n movements = self.move_tile(target_row, target_col,\n target_row * self.get_width() + target_col)\n self.update_puzzle(movements)\n return movements", "def solve_row0_tile(self, target_col):\n solved_row, solved_col = self.current_position(0, target_col)\n movements = \"\"\n if solved_col == target_col - 1 and solved_row == 0:\n movements = \"ld\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ld\")\n movements = \"ld\" + local_board.move_tile(\n 1, target_col - 1, target_col) + \"urdlurrdluldrruld\"\n self.update_puzzle(movements)\n return movements", "def __get_index_pair__(self, target_tile:Union[StaticTile, DynamicTile]) -> tuple:\n for colNum, col in enumerate(self.map):\n for rowNum, tile in enumerate(col):\n if tile == target_tile:\n return (colNum, rowNum)", "def solve_col0_tile(self, target_row):\r\n # check if curr_pos (i, 0) where i > 1\r\n assert self.lower_row_invariant(target_row, 0)\r\n move = \"ur\"\r\n self.update_puzzle(move)\r\n row, col = self.current_position(target_row, 0)\r\n if row == target_row and col == 0:\r\n move_to_target = (self.get_width() - 2) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n else:\r\n move_to_target = self.move_to_target(target_row - 1, 1, row, col)\r\n # add solver move to str\r\n move_to_target += \"ruldrdlurdluurddlu\" + (self.get_width() - 1) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1)\r\n return move", "def find_position(self, element):\n for row in range(self.rows):\n for col in range(self.cols):\n if self.data[row][col] == element:\n return row, col\n return None, None", "def find_position(self, element):\n for row in range(self.rows):\n for col in range(self.cols):\n if self.data[row][col] == element:\n return row, col\n return None, None", "def solve_row0_tile(self, target_col):\r\n # replace with your code\r\n assert self.row0_invariant(target_col), 'Some trouble in row0_invariant' \r\n whole_move = ''\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n # print self.get_number(current_row, current_col)\r\n zero_row, zero_col = 0, target_col\r\n # print 'Target tile position=',self.current_position(0, target_col)\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (0, target_col)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ld'\r\n \r\n recomended_move = 'ld'\r\n whole_move += recomended_move\r\n zero_col -= len(filter(lambda x: x=='l', recomended_move))\r\n zero_row += len(filter(lambda x: x=='d', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n #####Case when we check if recomended move solves the tile\r\n if self.current_position(0, target_col) == (0, target_col):\r\n # print 'Congrads recomended move made great deal !!'\r\n return whole_move\r\n #####If not, we position TT to (1, target_col-1),\r\n ##### and ZEOR to (1, target_col-2)\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (zero_row - current_row) * 'u'\r\n path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = path_up + path_side\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n \r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n counter = 0\r\n # print self.current_position(0, target_col) != (1, target_col-1)\r\n # print (zero_row,zero_col) != (1,target_col-2)\r\n ####POitioning TT and zero into positions that can be solvable\r\n while self.current_position(0, target_col) != (1, target_col-1) or \\\r\n (zero_row,zero_col) != (1,target_col-2):\r\n counter +=1\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n cyclic_moves = ''\r\n # print 'Aloha in the loop'\r\n if zero_col < current_col:\r\n # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n \r\n\r\n if current_col != target_col-1 and zero_row == 0:\r\n # print 'In the upper row and we can use down cycling'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n\r\n elif current_col != target_col-1:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col-1:\r\n # print 'Target tile under target place'\r\n # print 'DEBUG!!!!'\r\n # print self\r\n # print zero_col, target_col\r\n if zero_col == 0 and current_col == 1:\r\n cyclic_moves = 'druld'\r\n elif zero_row == 0:\r\n cyclic_moves = 'druld'\r\n \r\n else:\r\n cyclic_moves = 'urd'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n elif zero_row > current_row:\r\n # print 'DEBUG'\r\n # print 'TT under zero tile'\r\n cyclic_moves = 'uld'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n \r\n if counter > 10:\r\n # print 'COUNTER break'\r\n break\r\n\r\n #####Solving using pattern 2 x 3 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 2x3 puzzle formed recently'\r\n move2x3 = \"urdlurrdluldrruld\"\r\n whole_move += move2x3\r\n zero_col -= len(filter(lambda x: x=='l', move2x3))\r\n zero_col += len(filter(lambda x: x=='r', move2x3))\r\n zero_row += len(filter(lambda x: x=='d', move2x3))\r\n zero_row -= len(filter(lambda x: x=='u', move2x3))\r\n self.update_puzzle(move2x3)\r\n # print self\r\n assert self.row1_invariant(target_col-1), 'Some trouble in row1_invariant' \r\n return whole_move", "def player_location(self):\n x = 0\n y = 0\n for line in self.grid:\n for i in line:\n if i == \"P\":\n return x, y\n \n y+=1\n x += 1\n y = 0", "def get_position(self, cell) -> tuple:\n for i, row in enumerate(self.cells):\n if cell in row:\n return row.index(cell), i\n if not isinstance(cell, Cell):\n raise TypeError(f\"Argument should be of type 'Cell', not '{cell.__class__.__name__}'.\")\n raise ValueError(\"The given cell is not a part of the grid.\")", "def find_index(self):\n current = self.from_grid\n #find index of \"*\"\n for x in range(len(current)):\n for y in range(len(current[x])):\n if current[x][y] == \"*\":\n index = (x,y)\n return index", "def find(self, value):\n for row in range(self.getHeight()):\n for column in range(self.getWidth()):\n if self[row][column] == value:\n return (row, column)\n return None", "def position(self) -> Tuple[int, int]:\n return self.row, self.col", "def solve_interior_tile(self, target_row, target_col):\n # replace with your code\n string = ''\n target = self.current_position(target_row, target_col)\n row_difference = target_row - target[0]\n #print 'Row difference', row_difference\n col_difference = target_col - target[1]\n #print 'Col difference', col_difference\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0 and col_difference > 1:\n string += 'urrdl' * (col_difference - 1)\n if row_difference == 1:\n string += 'urrdl' * (col_difference - 1)\n string += 'dru'\n if row_difference > 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n if row_difference == 1:\n string += 'ulldr' * (col_difference - 1)\n string += 'ullddru'\n if row_difference > 1:\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n string += 'lddru' * (row_difference - 1)\n if row_difference > 0:\n string += 'ld'\n print 'Interior Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row, target_col - 1), 'False string'\n return string", "def solve_step(self,puzzle_grid,x,y):\n self.puzzleGrid = puzzle_grid\n if(self.foundStep == False):\n self.targetCell = self.puzzleGrid.grid[x][y]\n if(self.targetCell.isSolved == False):\n self.calculate_possibilities()\n if len(self.targetCell.possibilities) == 1: #README method 1\n self.targetCell.solve()\n return True\n else:\n return self.check_neighbours() #README method 2", "def position(square):\n first = square[0]\n second = square[1]\n col = parseCol(first)\n row = parseRow(second)\n return (row, col)", "def get_0_pos(grid):\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n if grid[i][j] == 0:\n return i, j\n return -1, -1", "def solve_col0_tile(self, target_row):\r\n # print '----------------------------------'\r\n # print 'SOLVING ZERO COLOUMN'\r\n assert self.lower_row_invariant(target_row,0), \"Invarian is False\"\r\n whole_move = ''\r\n #current_position = self.current_position(target_row, 0)\r\n current_row, current_col = self.current_position(target_row, 0)\r\n zero_row, zero_col = target_row, 0\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, 0)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ur'\r\n \r\n recomended_move = 'ur'\r\n whole_move += recomended_move\r\n zero_col += len(filter(lambda x: x=='r', recomended_move))\r\n zero_row -= len(filter(lambda x: x=='u', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n if self.current_position(target_row, 0) == \\\r\n (target_row, 0):\r\n # print 'TT stays in correct place after recomended move'\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n self.update_puzzle(zero_to_place_move)\r\n whole_move += zero_to_place_move\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move\r\n #move ZERO tile to the right\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n ### reposition TT to (target_row -1, 1)\r\n ### reposition ZERO tile to (target_row-1,0)\r\n \r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (current_col - zero_col) * 'r'\r\n path_for_zero = (zero_row - current_row) * 'u' + (current_col - zero_col) * 'r'\r\n whole_move += path_for_zero\r\n zero_col += len(filter(lambda x: x=='r', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n counter = 0\r\n while self.current_position(target_row, 0) != (target_row -1, 1) or \\\r\n (zero_row,zero_col) != (target_row-1,0):\r\n cyclic_moves = ''\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n # print 'We are in while loop'\r\n counter += 1\r\n if zero_row < current_row:\r\n # print 'Zero row under current TT '\r\n if self.current_position(target_row, 0) == (target_row -1, 1):\r\n # print 'TT is in the reccomended posiont (target_row -1, 1) \\n and we are movind zero to the left side of TT '\r\n cyclic_moves = 'ld'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n else:\r\n # print 'TT should be one tile down'\r\n cyclic_moves = 'lddru'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n #### Case 1 if ZERO located in the right of\r\n #### target tile (after it)\r\n if zero_col > current_col:\r\n # print ' Look in the up puzzle, zero in the right side'\r\n if current_col != 1:\r\n # print 'not under target place (target_row -1, 1)'\r\n cyclic_moves = 'dllur'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n whole_move += cyclic_moves\r\n # elif current_col == 1 and self._grid[zero_row+1][zero_col] < \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n elif current_col == 1: \r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self._grid[zero_row+1][zero_col] > \\\r\n self._grid[current_position[0]][current_position[1]]:\r\n print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n cyclic_moves = 'ul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n ### Solwing 3x2 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 3x2 puzzle formed recently'\r\n move3x2 = 'ruldrdlurdluurddlur'\r\n whole_move += move3x2\r\n zero_col -= len(filter(lambda x: x=='l', move3x2))\r\n zero_col += len(filter(lambda x: x=='r', move3x2))\r\n zero_row += len(filter(lambda x: x=='d', move3x2))\r\n zero_row -= len(filter(lambda x: x=='u', move3x2))\r\n self.update_puzzle(move3x2)\r\n # print 'Grid afret 3x2 solver move'\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n #####Moving ZERO to the (target_row - 1, n - 1) position where\r\n ##### 'n' is a grid height.\r\n # print self._width-1 - zero_col\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n whole_move += zero_to_place_move\r\n self.update_puzzle(zero_to_place_move)\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move", "def GetPosition(board):\n\tfor i in range(len(board.matrix)):\n\t\tfor j in range(len(board.matrix[i])):\n\t\t\tif board.matrix[i][j]==\"X\":\n\t\t\t\treturn i,j", "def solve_puzzle(self):\n # replace with your code\n string = ''\n width = self._width\n height = self._height\n zero = self.current_position(0, 0)\n row_to_zero = height - 1 - zero[0]\n col_to_zero = width - 1 - zero[1]\n string += 'r' * col_to_zero\n string += 'd' * row_to_zero\n self.update_puzzle(string)\n if width == 2 and height == 2:\n string += self.solve_2x2()\n elif width > 2 and height == 2:\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n elif width == 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n string += self.solve_2x2()\n elif width > 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n #for row in range(height - 1, -1, -1):\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n return string", "def solve_col0_tile(self, target_row):\n solved_row, solved_col = self.current_position(target_row, 0)\n movements = \"\"\n if solved_row == target_row - 1 and solved_col == 0:\n movements = \"u\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ur\")\n movements = \"ur\" + local_board.move_tile(\n target_row - 1, 1, target_row *\n self.get_width()) + \"ruldrdlurdluurddlu\"\n movements += \"r\" * (self.get_width() - 1)\n self.update_puzzle(movements)\n return movements", "def guess(self, row, col) -> Tuple[int, Optional[ship.Ship]]:\n my_ship: ship.Ship = self._board_matrix[row][col]\n\n # if my_ship is None the guess is a miss, otherwise its a hit\n\n # --------- BEGIN YOUR CODE ----------\n\n # This is exactly the same as Human.guess, just copy the code over\n\n # --------- END YOUR CODE ----------", "def find_element(grid, target):\n \n # First, iterate over the row indices\n for row_number in range(len(grid)):\n \n# print(\"Checking row\", row_number)\n \n for col_number in range(len(grid[row_number])):\n \n# print(\"Checking column\", col_number)\n \n if grid[row_number][col_number] == target:\n return (row_number, col_number)\n \n return None", "def __get_random_player_position(self) -> Tuple[int, int]:\n no_player_position = True\n while no_player_position:\n for row in range(0, self.__labyrinth.labyrinth_height):\n for col in range(0, self.__labyrinth.labyrinth_width):\n if self.__labyrinth[row][col] == Labyrinth.FLOOR and no_player_position:\n self.__row_position = row\n self.__col_position = col\n\n if len(self.__path_to_end()) > self.__labyrinth.labyrinth_width and \\\n len(self.__path_to_end()) > self.__labyrinth.labyrinth_height:\n self.__labyrinth[row][col] = Labyrinth.START\n no_player_position = False\n\n return self.__row_position, self.__col_position", "def get_piece_jumping_position(self, captured_piece):\n row_diff = captured_piece.row - self.row # Compares the row/column numbers of the two pieces\n col_diff = captured_piece.col - self.col\n opp_row = row_diff + captured_piece.row\n opp_col = col_diff + captured_piece.col\n return {'opp_row': opp_row, 'opp_col': opp_col}", "def check_cols(self):\r\n for i in range(3):\r\n if self.grid[i][-1] != ' ' and self.grid[i][-1] == self.grid[i+3][-1] and self.grid[i+3][-1] == self.grid[i+6][-1]:\r\n return (i, (self.grid[i], self.grid[i+6]))\r\n return (-1, None)", "def find_coords_of_selected_sq(self, evt):\n # saves row and col tuple into two variables\n column, row = self.get_row_col(evt)\n # normalize for all square size by keeping the floor\n column_floor, row_floor = self.floor_of_row_col(column, row)\n\n corner_column = (column_floor * self.sq_size) + self.sq_size\n corner_row = (row_floor * self.sq_size) + self.sq_size\n return corner_column, corner_row", "def solve_puzzle(self):\n\n move_str = \"\"\n \n # Move zero tile to bottom right corner tile of puzzle.\n zero_pos = self.current_position(0,0) \n vert_dist = (self.get_height() - 1) - zero_pos[0]\n horiz_dist = (self.get_width() - 1) - zero_pos[1]\n move_str += ((\"d\" * vert_dist) + (\"r\" * horiz_dist))\n self.update_puzzle(move_str)\n \n # Solve lower rows\n if self.get_height() > 2:\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n \n # Solve top 2 rows\n if self.get_width() > 2:\n for col in range(self.get_width() - 1, 1, -1):\n move_str += self.solve_row1_tile(col)\n move_str += self.solve_row0_tile(col)\n \n # Solve 2x2\n move_str += self.solve_2x2()\n\n return move_str", "def get_next_empty_cell(self):\n for row in range(len(self.grid)):\n for col in range(len(self.grid[0])):\n if self.grid[row][col] == 0:\n return (row, col)\n return None", "def get_move(board, player):\r\n row, col = 0, 0\r\n return row, col", "def solve_col0_tile(self, target_row):\r\n moves_str = \"\"\r\n # move the zero tile from (i,0) to (i−1,1) \r\n # using the move string \"ur\"\r\n moves_str += \"ur\"\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str)\r\n # If the target tile is now at position (i,0)\r\n # you can simply move tile zero to the end of row i−1\r\n current_row, current_col = temp_grid.current_position(target_row, 0)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n if current_row == target_row and current_col == 0:\r\n rights = self._width - 1 - zero_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\" \r\n # However, if the target tile is not positioned at (i,0)\r\n # we suggest the following solution strategy:\r\n else:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ruldrdlurdluurddlu\"\r\n for dummy_r in range(self._width - 1):\r\n moves_str += \"r\"\r\n \r\n print \"solve_col0_tile\"\r\n print moves_str\r\n self.update_puzzle(moves_str)\r\n print self._grid\r\n return moves_str", "def find_next_empty_cell(grid):\n for i, row in enumerate(grid):\n for j, col in enumerate(row):\n if col == 0:\n return (i, j)\n return None", "def getGameState(self):\n row1 = [0, 0, 0]\n row2 = [0, 0, 0]\n row3 = [0, 0, 0]\n tilePosStatement = Statement()\n posTerm1 = Term('?x')\n posTerm2 = Term('?y')\n posTerm3 = Term('?tile')\n tilePosStatement.terms = (posTerm1, posTerm2, posTerm3)\n tilePosStatement.predicate = 'tilePos'\n for fact in self.kb.facts:\n if match(fact.statement, tilePosStatement):\n if fact.statement.terms[2] == Term(Constant('tile1')):\n term = 1\n if fact.statement.terms[2] == Term(Constant('tile2')):\n term = 2\n if fact.statement.terms[2] == Term(Constant('tile3')):\n term = 3\n if fact.statement.terms[2] == Term(Constant('tile4')):\n term = 4\n if fact.statement.terms[2] == Term(Constant('tile5')):\n term = 5\n if fact.statement.terms[2] == Term(Constant('tile6')):\n term = 6\n if fact.statement.terms[2] == Term(Constant('tile7')):\n term = 7\n if fact.statement.terms[2] == Term(Constant('tile8')):\n term = 8\n if fact.statement.terms[2] == Term(Constant('empty')):\n term = -1\n if fact.statement.terms[0] == Term(Constant('pos1')):\n col = 0\n elif fact.statement.terms[0] == Term(Constant('pos2')):\n col = 1\n elif fact.statement.terms[0] == Term(Constant('pos3')):\n col = 2\n if fact.statement.terms[1] == Term(Constant('pos1')):\n row1[col] = term\n\n elif fact.statement.terms[1] == Term(Constant('pos2')):\n row2[col] = term\n\n elif fact.statement.terms[1] == Term(Constant('pos3')):\n row3[col] = term\n\n row1 = tuple(row1)\n row2 = tuple(row2)\n row3 = tuple(row3)\n result = (row1, row2, row3)\n return result\n\n ### Student code goes here", "def solve_2x2(self):\r\n # replace with your code\r\n assert self.row1_invariant(1), '2x2 Dont pass row1_invariant(1)'\r\n whole_move = ''\r\n current_position = self.current_position(0, 0)\r\n # print 'Zero position =', current_position\r\n counter = 0\r\n \r\n\r\n \r\n # if current_position == (0,0):\r\n # print (0,0)\r\n # move_to_00 = 'rdlu' \r\n if current_position == (0,1):\r\n # print (0,1)\r\n move_to_00 = 'l'\r\n if current_position == (1,0):\r\n # print (1,0)\r\n move_to_00 = 'u'\r\n if current_position == (1,1):\r\n # print (1,1)\r\n move_to_00 = 'ul'\r\n whole_move += move_to_00\r\n self.update_puzzle(move_to_00)\r\n # print self\r\n # print self.get_number(1,1) < self.get_number(1,0)\r\n \r\n while self.get_number(0,0) != 0 or self.get_number(0,1) != 1:\r\n \r\n # print 'Aloha in loop!'\r\n counter +=1\r\n move = 'rdlu'\r\n whole_move += move\r\n self.update_puzzle(move)\r\n # print self\r\n if counter >5:\r\n break\r\n return whole_move", "def move_tile(self, target_row, target_col, val):\n # a little bit twisted here for the use of both solve_interior_tile and solve_col0_tile\n solved_row, solved_col = self.current_position(0, val)\n movements = \"\"\n if solved_row == target_row and solved_col == target_col:\n return \"\"\n if solved_row == target_row:\n if target_col > solved_col:\n movements = \"l\" * (target_col - solved_col) + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"r\" * (solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ulld\"\n elif solved_col == target_col:\n movements = \"u\" * (target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n elif solved_col < target_col:\n if solved_col == 0:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"rddlu\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n elif solved_col > target_col:\n if solved_row == 0:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"dllur\" * (\n solved_col - target_col - 1) + \"dlu\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n else:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ullddru\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n return movements", "def find_cell( self , x , y , z , start_ijk = None):\n\n if start_ijk:\n start_index = self.__global_index( ijk = start_ijk )\n else:\n start_index = 0\n global_index = self._get_ijk_xyz( x , y , z , start_index)\n if global_index >= 0:\n i = ctypes.c_int()\n j = ctypes.c_int()\n k = ctypes.c_int()\n self._get_ijk1( global_index , ctypes.byref(i) , ctypes.byref(j) , ctypes.byref(k)) \n return (i.value , j.value , k.value)\n else:\n return None", "def part2():\n grid[(0, 0)] = 1\n coordinates_value = 0\n layer = 1\n x = 0; y = 0\n done = False\n while not done:\n # print(\"Layer: \", layer)\n # go right one step\n layer += 1; x += 1\n grid[(x,y)] = check_neighbours((x,y))\n\n # go up to the boundary of layer\n for y_up in range(y+1, layer):\n coord = (x, y_up)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n y = y_up\n\n # go left till the boundary of layer\n for x_left in range(x-1, -layer, -1):\n coord = (x_left, y)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n x = x_left\n\n # go down till the boundary of layer\n for y_down in range(y-1, -layer, -1):\n coord = (x, y_down)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n y = y_down\n\n # go right till the boundary of layer\n for x_right in range(x+1, layer):\n coord = (x_right, y)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n x = x_right", "def solve_puzzle(self):\r\n moves_str = \"\"\r\n # move zero to the most botton right\r\n zero_row, zero_col = self.current_position(0, 0)\r\n downs = self._height - 1 - zero_row\r\n rights = self._width - 1 - zero_col\r\n for dummy_d in range(downs):\r\n moves_str += \"d\"\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n self.update_puzzle(moves_str)\r\n # Solve the bottom m−2 rows of the puzzle \r\n # in a row by row manner from bottom to top. \r\n # Each individual row will be solved in a right to left order.\r\n if self._height > 2 and self._width > 2:\r\n for row in range(self._height - 1, 1, -1):\r\n for col in range(self._width - 1, 0, -1):\r\n assert self.lower_row_invariant(row, col)\r\n moves_str += self.solve_interior_tile(row, col)\r\n assert self.lower_row_invariant(row, col - 1)\r\n assert self.lower_row_invariant(row, 0)\r\n moves_str += self.solve_col0_tile(row)\r\n assert self.lower_row_invariant(row - 1, self._width - 1)\r\n # Solve the rightmost n−2 columns of the top two rows\r\n # in a right to left order). \r\n # Each column consists of two unsolved positions \r\n # and will be solved in a bottom to top order.\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n \r\n elif self._height <=2 and self._width > 2:\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n elif self._height <= 2 and self._width <= 2:\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n #elif self._height > 2 and self._width <= 2:\r\n \r\n print moves_str\r\n print self._grid\r\n return moves_str", "def location_of(self, c: str) -> tuple:\n\n c = c.upper()\n if c == 'J': c = 'I'\n\n row = 0\n while row < 5:\n col = self.key[row].find(c)\n\n if col != -1:\n return (row, col)\n\n row += 1\n\n raise ValueError(\"couldn't find letter %r in matrix %r\" % (c, self.key))", "def find_player_position(labyrinth: Labyrinth) -> Tuple[int, int]:\n for row in range(0, len(labyrinth)):\n for col in range(0, len(labyrinth[0])):\n if labyrinth[row][col] == Labyrinth.START:\n return row, col\n\n # todo: handle exception, if there is no field holding 'S' then something is wrong\n return -1, -1", "def get_xy_position(row, col):\n spacing_x = 86 + 11\n spacing_y = 98 + 8\n top_y = 50\n left_x = 50\n return left_x + col * spacing_x, top_y + row * spacing_y", "def get_position(self, number):\n for rowidx, row in enumerate(self.numbers):\n for colidx, num in enumerate(row):\n if num == number:\n return rowidx, colidx", "def findNextMove(curHVal):\n minHCalc = curHVal; #Initializing to curHVal\n minNewPosition = (0,0); #Initializing it\n\n tempBoardPositions = [x for x in positions]\n \n for move in findPlacesToMove():\n #Move number to blank\n tempBoardPositions[curBlank[0]][curBlank[1]] = tempBoardPositions[move[0]][move[1]];\n #Move blank to new move position\n tempBoardPositions[move[0]][move[1]] = 0;\n \n hVal = heuristicValueOfPosition(tempBoardPositions);\n if(hVal < minHCalc):\n minHCalc = hVal;\n minNewPosition = move;\n \n #Resetting board to what it was before this move\n tempBoardPositions[move[0]][move[1]] = tempBoardPositions[curBlank[0]][curBlank[1]]\n #tempBoardPositions[curBlank[0]][curBlank[1]] = 0;\n\n print(\"After calc -- {}\".format(positions))\n \n if minHCalc < curHVal:\n return (minNewPosition, minHCalc)\n else:\n return -1; #That we have reached local minima", "def find_position(self, val):\n edges = np.array(self.cell_edges)\n if val in edges:\n index = np.searchsorted(edges, val)\n return index, index\n else:\n edges -= val\n if edges[0] > 0:\n return -1, 0\n if edges[-1] < 0:\n return 0, -1\n index = 0\n for i, e in enumerate(edges):\n if e > 0:\n index = i\n break\n return index - 1, index", "def __get_cell_state(self, y, x):\n\t\tif 0 <= y <= self.__height - 1:\n\t\t\tif 0 <= x <= self.__width - 1:\n\t\t\t\treturn self.__board[y][x]\n\t\treturn 0", "def get_tile_location(self):\n if self.rect.x == 0:\n tile_x = 0\n elif self.rect.x % 32 == 0:\n tile_x = (self.rect.x / 32)\n else:\n tile_x = 0\n\n if self.rect.y == 0:\n tile_y = 0\n elif self.rect.y % 32 == 0:\n tile_y = (self.rect.y / 32)\n else:\n tile_y = 0\n\n return [tile_x, tile_y]", "def get_move(board, player):\n row, col = 0, 0\n return row, col", "def get_move(self, board):\n while True:\n col = random.randint(0, board.width)\n row = board.try_move(col)\n\n if row >= 0:\n break\n\n return row, col", "def get(self,row,col):\r\n return self.puzzle[row][col]", "def check_rows(self):\r\n for i in range(0, len(self.grid),3):\r\n if self.grid[i][-1] != ' ' and self.grid[i][-1] == self.grid[i+1][-1] and self.grid[i+1][-1] == self.grid[i+2][-1]:\r\n return (i, (self.grid[i], self.grid[i+2]))\r\n return (-1, None)", "def solve_row0_tile(self, target_col):\r\n assert self.row0_invariant(target_col)\r\n move = \"ld\"\r\n self.update_puzzle(move)\r\n \r\n row, col = self.current_position(0, target_col)\r\n if row == 0 and col == target_col:\r\n return move\r\n else:\r\n move_to_target = self.move_to_target(1, target_col - 1, row, col)\r\n # 2x3 puzzle solver\r\n move_to_target += \"urdlurrdluldrruld\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n return move", "def _get_position_grid_column(position, grid_row):\n \n for (box, grid_col_index) in zip(grid_row, range(len(grid_row))):\n if box.contains_point((position.x, position.y)):\n return grid_col_index\n return None", "def solve_row0_tile(self, target_col):\r\n # move the zero tile from position (0,j) to (1,j−1) \r\n # using the move string \"ld\" \r\n moves_str = \"\"\r\n moves_str += \"ld\"\r\n # check whether target tile is at position (0,j).\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str) \r\n current_row, current_col = temp_grid.current_position(0, target_col)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n \r\n # If target tile is not at position (0,j).\r\n # reposition the target tile to position (1,j−1) \r\n # with tile zero in position (1,j−2).\r\n if current_row != 0 or current_col != target_col:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"urdlurrdluldrruld\"\r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_row0_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def getGameState(self):\n ### Student code goes here\n row1 = ()\n row2 = ()\n row3 = ()\n for currRow in range(1,4):\n for currCol in range(1,4):\n tileFound = False\n for fact in self.kb.facts:\n if fact.statement.predicate == \"located\":\n tile = fact.statement.terms[0].term.element\n column = fact.statement.terms[1].term.element\n row = fact.statement.terms[2].term.element\n\n tileNumber = int(tile[-1])\n columnNumber = int(column[-1])\n rowNumber = int(row[-1])\n\n if rowNumber == currRow and columnNumber == currCol:\n tileFound = True\n if rowNumber == 1:\n row1 += tuple([tileNumber])\n elif rowNumber == 2:\n row2 += tuple([tileNumber])\n elif rowNumber == 3:\n row3 += tuple([tileNumber])\n \n break\n\n if not tileFound:\n if currRow == 1:\n row1 += tuple([-1])\n elif currRow == 2:\n row2 += tuple([-1])\n elif currRow == 3:\n row3 += tuple([-1])\n\n\n return (row1, row2, row3)", "def __find_start(puzzle):\n for i in range(len(puzzle)):\n for j in range(len(puzzle[0])):\n if puzzle[i][j] == 0:\n return i\n return 0", "def _get_coordinates(x,y,z):\n\t\ttemp = Board.board\n\t\ttemp1 = temp=='M'\n\t\tfor i in range(6,x):\n\t\t\tfor j in range(y,z):\n\t\t\t\tif(temp1[i][j]==True):\n\t\t\t\t\tcurrent_x = i\n\t\t\t\t\tcurrent_y = j\n\n\t\treturn current_x,current_y", "def solve_row1_tile(self, target_col):\r\n moves_str = \"\"\r\n current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ur\"\r\n self.update_puzzle(moves_str)\r\n print \"solve_row1_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve(grid):\n puzzle_dict = grid_values(grid)\n return search(puzzle_dict)", "def get_neighbour_squares_idx(self, pos):\n if pos:\n possible_values = {0, 1, 2}\n col_variation = zip( [pos[0], pos[0]], possible_values - {pos[1]} )\n row_variation = zip( possible_values - {pos[0]}, [pos[1], pos[1]] )\n return list(col_variation), list(row_variation)", "def get_tile(self, row, col):\n #print 'The value of tile at position: (',row,',',col,') is: ',self.grid[row][col]\n return self.grid[row][col]", "def get_current_edges(self) -> Tuple[int, int, int, int]:\n top = int(self.tile_rows[0], 2)\n bottom = int(self.tile_rows[-1], 2)\n left = int(''.join([r[0] for r in self.tile_rows]), 2)\n right = int(''.join([r[-1] for r in self.tile_rows]), 2)\n\n return (top, bottom, left, right)", "def solve_2x2(self):\r\n assert self.row1_invariant(1)\r\n pos_1_0 = self.get_number(1, 0)\r\n pos_0_0 = self.get_number(0, 0)\r\n pos_0_1 = self.get_number(0, 1)\r\n # create grid and solve individual cases\r\n grid = [pos_1_0, pos_0_0, pos_0_1]\r\n if grid == [self.get_width(), 1, self.get_width() + 1]:\r\n move = \"ul\"\r\n elif grid == [1, self.get_width() + 1, self.get_width()]:\r\n move = \"lurdlu\"\r\n elif grid == [self.get_width() + 1, self.get_width(), 1]:\r\n move = \"lu\"\r\n self.update_puzzle(move)\r\n return move", "def get_tile(self, row, col):\n # replace with your code\n return 0", "def solve_row1_tile(self, target_col):\r\n row, col = self.current_position(1, target_col)\r\n move = self.move_to_target(1, target_col, row, col)\r\n # for next move\r\n move += \"ur\"\r\n \r\n self.update_puzzle(move)\r\n return move", "def solve_2x2(self):\n # move zero tile to the left-up corner\n self.update_puzzle(\"lu\")\n movements = \"rdlu\"\n for _ in range(3):\n self.update_puzzle(\"rdlu\")\n if self.row0_invariant(0):\n return \"lu\" + movements\n movements += \"rdlu\"\n # the final 2x2 cannot be solved\n return \"\"", "def random_position(self):\n while True:\n h = random.randrange(0, self.height)\n w = random.randrange(0, self.width)\n if self.grid[h, w] == 0:\n return (h, w)", "def sudoku(puzzle):\n positions = all_pos(puzzle)\n if solve(puzzle, positions, 0):\n return puzzle\n return None", "def solve_tour(self):\n\t\tboard = [[-1 for _ in range(self.N)]for _ in range(self.N)]\n\t\tboard[0][0] = 0\n\n\t\tz = self.find_tour(board, 0, 0, 1)\n\t\tif z:\n\t\t\tfor i in range(self.N):\n\t\t\t\tfor j in range(self.N):\n\t\t\t\t\tself.solution.append(board[i][j])\n\t\t\tprint board\n\t\t\treturn self.solution\n\t\t\t\t\n\t\telse:\n\t\t\tprint(\"No solution\")", "def solve(self):\n dim = self.puzzle.dimension\n\n # initial loop\n for value, (row, col) in self.puzzle:\n if value:\n self.clear_row(row, value)\n self.clear_col(col, value)\n self.clear_subgrid(row, col, value)\n self.updates.add((value, (row, col)))\n for ps in self.possibilities:\n ps.discard((row, col))\n\n while self.updates:\n while self.updates:\n # while self.updates:\n value, (row, col) = self.updates.pop()\n for i in range(1, dim + 1):\n self.check_row(i, value)\n self.check_col(i, value)\n for i in range(2, 8, 3):\n self.check_subgrid(row, i, value)\n self.check_subgrid(i, col, value)\n\n for value, (row, col) in self.puzzle:\n if not value:\n self.check_cell(row, col)\n\n # for value in range(1, dim + 1):\n # for row in [2, 5, 8]:\n # for col in [2, 5, 8]:\n # self.check_subgrid(row, col, value)", "def solve_row1_tile(self, target_col):\n cur_row, cur_col = self.current_position(1, target_col)\n move_str = self.position_tile(1, target_col, cur_row, cur_col, need_ld=False)\n self.update_puzzle(move_str)\n return move_str", "def solve_row1_tile(self, target_col):\n movements = self.solve_interior_tile(1, target_col)\n self.update_puzzle(\"ur\")\n return movements + \"ur\"", "def outcome(self):\n if self.grid[0][0] == self.grid[1][0] == self.grid[2][0] and self.grid[0][0] != 0:\n return self.grid[0][0]\n if self.grid[0][1] == self.grid[1][1] == self.grid[2][1] and self.grid[0][1] != 0:\n return self.grid[0][1]\n if self.grid[0][2] == self.grid[1][2] == self.grid[2][2] and self.grid[0][2] != 0:\n return self.grid[0][2]\n if self.grid[0][0] == self.grid[0][1] == self.grid[0][2] and self.grid[0][0] != 0:\n return self.grid[0][0]\n if self.grid[1][0] == self.grid[1][1] == self.grid[1][2] and self.grid[1][0] != 0:\n return self.grid[1][0]\n if self.grid[2][0] == self.grid[2][1] == self.grid[2][2] and self.grid[2][0] != 0:\n return self.grid[2][0]\n if self.grid[0][0] == self.grid[1][1] == self.grid[2][2] and self.grid[0][0] != 0:\n return self.grid[0][0]\n if self.grid[0][2] == self.grid[1][1] == self.grid[2][0] and self.grid[0][2] != 0:\n return self.grid[0][2]\n return 0", "def get_xy(self, x, y):\r\n\t\treturn self.grid[y, x]", "def get_grid_position(self):\n tile_size_x = constants.WINDOW_WIDTH / constants.GRID_TILE_LENGTH\n tile_size_y = constants.WINDOW_HEIGHT / constants.GRID_TILE_LENGTH\n grid_x = tile_size_x / self.host.x\n grid_y = tile_size_y / self.host.y\n return grid_x, grid_y", "def get_position(self): # maybe encoded in filepath at some point\n result = (self.iter * self.row_step)% self.row_size, self.iter // (self.row_size * self.row_step)* self.col_step\n self.iter += 1\n return result", "def solve_row0_tile(self, target_col):\n move_str = 'ld'\n self.update_puzzle(move_str)\n cur_row, cur_col = self.current_position(0, target_col)\n if cur_row == 0 and cur_col == target_col:\n return move_str\n else:\n move_str += self.position_tile(1, target_col-1, cur_row, cur_col)\n move_str += 'urdlurrdluldrruld'\n self.update_puzzle(move_str[2:])\n return move_str", "def GetPlayerXY(level):\n for row, line in enumerate(level):\n for column, square in enumerate(line):\n if square in \"SQ\":\n return (column, row, square)", "def get_clicked_tile(self, x: int, y: int) -> Optional[Point]:\n\t\ttile_x = x//(self.canvas_width//self.board_size)\n\t\ttile_y = y//(self.canvas_height//self.board_size)\n\n\t\tif tile_x < 0 or tile_x >= self.board_size or tile_y < 0 or tile_y >= self.board_size:\n\t\t\treturn None\n\t\telse:\n\t\t\treturn Point(tile_x, tile_y)", "def get_tile(self, row, col):\r\n # replace with your code\r\n return self.grid[row][col]", "def find_player(self):\n for y, line in enumerate(self.maze):\n for x, character in enumerate(line):\n if character == \"m\":\n return y, x\n return None", "def solve(self, board) -> None:\n coords = []\n board_len = len(board)\n row_len = len(board[0]) - 1\n # top\n # coords.append([[0, i] for i, q in enumerate(board[0]) if q == \"O\"])\n # # bottom\n # coords.append(\n # [[board_len, i] for i, q in enumerate(board[board_len]) if q == \"O\"]\n # )\n for i in range(board_len):\n row_coord = [[i,indx] for indx, q in enumerate(board[i]) if q == \"O\"]\n # import pdb; pdb.set_trace()\n for x in row_coord:\n coords.append(x)\n for x in coords:\n if len(x) == 0:\n continue\n if x[0] == 0:\n print(\"top border\")\n elif x[0] == board_len - 1:\n print(\"bottom border\")\n elif x[1] == 0:\n print(\"left border\")\n elif x[1] == row_len:\n prin(\"right border\")" ]
[ "0.84899926", "0.84899926", "0.84899926", "0.7324289", "0.707103", "0.69124496", "0.6876786", "0.68116784", "0.67074156", "0.6705732", "0.6690645", "0.66753584", "0.6621211", "0.66135", "0.66128737", "0.66079515", "0.6600574", "0.65944666", "0.65453476", "0.65348464", "0.6522486", "0.65102416", "0.65102416", "0.6489102", "0.645828", "0.64574015", "0.64445126", "0.6443785", "0.6411395", "0.64072406", "0.6382274", "0.6377268", "0.63769966", "0.63656807", "0.6327355", "0.63239557", "0.62989044", "0.62954086", "0.62826407", "0.6271351", "0.62484497", "0.62467784", "0.62355995", "0.62331283", "0.6213876", "0.6209451", "0.62038743", "0.61942494", "0.6186904", "0.6185476", "0.6177184", "0.61714035", "0.6170142", "0.61696064", "0.6169341", "0.6168965", "0.61454374", "0.6145199", "0.61434644", "0.6142808", "0.6141101", "0.6138776", "0.6135638", "0.6131897", "0.6124412", "0.6121957", "0.61059844", "0.61049956", "0.608759", "0.6077859", "0.6075795", "0.6069958", "0.60688484", "0.6063666", "0.605925", "0.6046759", "0.6045205", "0.6032613", "0.603238", "0.60165644", "0.6014458", "0.60119766", "0.6002617", "0.59988046", "0.5976635", "0.59749943", "0.5963143", "0.5960357", "0.5957209", "0.5942898", "0.5941709", "0.59376544", "0.59345776", "0.5930893", "0.5929189", "0.5921001", "0.5918666" ]
0.8510771
3
Updates the puzzle state based on the provided move string
def update_puzzle(self, move_string): zero_row, zero_col = self.current_position(0, 0) for direction in move_string: if direction == "l": assert zero_col > 0, "move off grid: " + direction self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1] self._grid[zero_row][zero_col - 1] = 0 zero_col -= 1 elif direction == "r": assert zero_col < self._width - 1, "move off grid: " + direction self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1] self._grid[zero_row][zero_col + 1] = 0 zero_col += 1 elif direction == "u": assert zero_row > 0, "move off grid: " + direction self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col] self._grid[zero_row - 1][zero_col] = 0 zero_row -= 1 elif direction == "d": assert zero_row < self._height - 1, "move off grid: " + direction self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col] self._grid[zero_row + 1][zero_col] = 0 zero_row += 1 else: assert False, "invalid direction: " + direction
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][\n zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][\n zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction\n for row in self._grid:\n print row\n print", "def handle_move(self, move_string):\n def map_move(move):\n col = int(ascii_lowercase.find(move[0])) + 1 # dummy col\n row = int(move[1:])\n # if not 0 < col <= game[\"board_width\"]:\n # raise ValueError('bad coord; invalid col in ' + coord)\n # if not 0 < row <= game[\"board_height\"]:\n # raise ValueError('bad coord; invalid row in ' + coord)\n return row*(self.rules[\"row_len\"]) + col\n move = list(map(map_move,move_string.split(' ')))\n self.turn[\"board\"][move[0]].make_move(*move[1:])\n self.turn[\"half_move_clock\"] += 1\n if self.turn[\"active_player\"] == 1:\n self.turn[\"full_move_clock\"] += 1\n self.turn[\"active_player\"] = (self.turn[\"active_player\"] + 1) % 2\n # self.turn[\"board\"][move_start].make_move(move_end)", "def move(self, move):\n out = ''\n for val in self.moves[move]:\n out += self.state[val]\n self.state = out", "def make_move(state: str, section_num: int, move: str) -> str:\n if move == wf.CHECK:\n check_result = wf.check_section(state, section_num)\n if check_result:\n print('The section is correct')\n else:\n print('The section is incorrect')\n else:\n state = wf.change_state(state, section_num, move) \n return state", "def setBoard( self, moveString ): \n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.__width:\n self.addMove(col, nextCh)\n if nextCh == 'X': nextCh = 'O'\n else: nextCh = 'X'", "def set_board(self, move_string):\n next_side = \"X\"\n for col_string in move_string:\n col = int(col_string)\n if col >= 0 and col <= self.width:\n self.add_move(col, next_side)\n if next_side == \"X\":\n next_side = \"O\"\n else:\n next_side = \"X\"", "def setBoard( self, moveString ):\n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.width:\n self.addMove(col, nextCh)\n if nextCh == 'X': nextCh = 'O'\n else: nextCh = 'X'", "def setBoard(self, moveString):\n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.width:\n self.addMove(col, nextCh)\n if nextCh == 'X':\n nextCh = 'O'\n else:\n nextCh = 'X'", "def setBoard( self, moveString ):\n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.__width:\n self.addMove(col, nextCh)\n if nextCh == 'X': \n nextCh = 'O'\n else: nextCh = 'X'", "def respond_to_move(self, move):\n\n # this will get the piece at the queried position,\n # will notify user if there is no piece there\n current_algebraic, new_algebraic = move\n row, column = self.algebraic_mapped_to_position[current_algebraic]\n if self.board[row][column] == empty_square:\n print(\"There is no piece at %s\" % (current_algebraic,))\n return\n piece, location = self.board[row][column]\n\n # this will get all possible moves from this position\n # and will make the move if the new position is a\n # valid move\n piece_name = self.piece_names[piece]\n moves = self.moves[piece_name]((row, column))\n \n new_row, new_column = self.algebraic_mapped_to_position[new_algebraic]\n print(\"old position %s, %s\" % (row, column))\n print(\"new algebraic %s\" % new_algebraic)\n print(\"new position %s, %s\" % (new_row, new_column))\n print(\"moves %s\" % moves)\n if (new_row, new_column) in moves:\n # this will change the game board to reflect the move\n self.board[row][column] = empty_square\n self.board[new_row][new_column] = piece+location", "def sim_move(self, state, move):\n out = ''\n for val in self.moves[move]:\n out += state[val]\n return out", "def _ai_move(self):\n move = self.AI_MOVES[self.game_board.get_string_board()][0]\n self.game_board.move_pieces(start=move[\"start\"], end=move[\"end\"])\n\n self.turn_value_text = \"You (Black)\"\n self.selected_piece_value_text = f\"N/A\"\n self.selected_move = -1\n\n self._sync_gui()", "def move(state=None, actual_move=None):\n copy = state.copy()\n copy.push(chess.Move.from_uci(uci=actual_move))\n return copy", "def update_game_state(self):\n # if board is not filled out, returns a valid move message\n for row in self.board:\n if 0 in row:\n return \"Valid input\"\n\n # if board is filled out, verifies if solution is valid and updates game state\n self.game_state = alg.check_solution(self.board)\n return self.game_state", "def apply_move(self, move):\r\n next_board = copy.deepcopy(self.board)\r\n next_board.place(self.next_player, move)\r\n return GameState(next_board, self.next_player.other, move)", "def apply_move(self, move):\n if self.check_move(move=move):\n self.board_list[move] = self.current_player.marker # changes value in the board to player which is either X or O\n self.moves_made += str(move) # keeps track of all moves\n return True\n else:\n return False", "def make_move(self, state):\r\n # intially set drop phase to true\r\n drop_phase = True\r\n move = [] # list to make moves with to return\r\n succ = self.succ(state) # get the successor of this state\r\n # intial postion of board to set up most advantagous spot if its empty\r\n if sum(x.count(self.my_piece) for x in self.board) == 0 and self.board[2][2] == ' ':\r\n move.insert(0, (2, 2))\r\n return move\r\n \r\n # check the number of 'r' and 'b' on board if theres 4 of each drop phase is false\r\n if sum(x.count('r') for x in self.board) == 4 and sum(x.count('b') for x in self.board) == 4:\r\n drop_phase = False\r\n\r\n # if not during drop phase use minimax to make next move from one postion to next\r\n if not drop_phase:\r\n move = []\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n f = d['from']\r\n s = sorted(succ, key=lambda e: e['f'])\r\n moveto = s[-1]\r\n move.insert(1, (moveto['from'][0], moveto['from'][1]))\r\n move.insert(0, (moveto['pos'][0], moveto['pos'][1]))\r\n return move # return the from, to move\r\n\r\n else: #else use minimax and to make move during drop phase selecting spot to place AI piece\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n hold = []\r\n move = []\r\n n = None\r\n hold = []\r\n for s in succ:\r\n p = s['pos'][0]\r\n p1 = s['pos'][1]\r\n if s['f'] == val and state[p][p1] == ' ':\r\n hold.append(s)\r\n if len(hold) == 1:\r\n row = hold[0]['pos'][0]\r\n col = hold[0]['pos'][1]\r\n else:\r\n f = sorted(hold, key=lambda e: e['pos'])\r\n row = f[0]['pos'][0]\r\n col = f[0]['pos'][1]\r\n\r\n move.insert(0, (row, col)) # return the move \r\n return move", "def execute_move(self, game_state):\n game_state.pacs_pos[self.pac_id] = self.next_move", "def make_move(self, move):\n if int(move) < 0 or int(move) > 48 or self.board[int(move) // 7][int(move) % 7] != \"\" or int(move) % 2 == 0:\n raise ValueError(\"{} is not a valid move for {}\".format(move, self.board))\n DotsAndBoxesState.score1 += self.check_score(move)\n self.board[int(move) // 7][int(move) % 7] = colors[self.turn] + self.turn + \"\\u001b[0m\"\n self.turn = get_opponent(self.turn) #change into another player's trun", "def change_move_state(self, new_state):\n\n if new_state != self.move_state:\n print(\"Changing move state from \", states[self.move_state],\n \" to \", states[new_state])\n self.move_state = new_state\n print(\"move_state is now\", self.move_state)", "def apply_move(self, move, state):\n x, y , heading, grid_data = state\n map_data = [row[:] for row in grid_data]\n if move == self.MOVE_FORWARD:\n # get coordinates for next cell\n if heading == self.UP:\n next_y = y - 1\n next_x = x\n elif heading == self.DOWN:\n next_y = y + 1\n next_x = x\n elif heading == self.LEFT:\n next_y = y\n next_x = x - 1\n else:\n next_y = y\n next_x = x + 1\n\n # handle special tile types\n if map_data[next_y][next_x] == self.ICE_SYMBOL:\n # handle ice tile - slide until first non-ice tile or blocked\n if heading == self.UP:\n for i in range(next_y, -1, -1):\n if map_data[i][next_x] != self.ICE_SYMBOL:\n if map_data[i][next_x] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(i, next_x, map_data):\n # if blocked, stop on last ice cell\n next_y = i + 1\n break\n else:\n next_y = i\n break\n elif heading == self.DOWN:\n for i in range(next_y, self.y_size):\n if map_data[i][next_x] != self.ICE_SYMBOL:\n if map_data[i][next_x] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(i, next_x, map_data):\n # if blocked, stop on last ice cell\n next_y = i - 1\n break\n else:\n next_y = i\n break\n elif heading == self.LEFT:\n for i in range(next_x, -1, -1):\n if map_data[next_y][i] != self.ICE_SYMBOL:\n if map_data[next_y][i] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(next_y, i, map_data):\n # if blocked, stop on last ice cell\n next_x = i + 1\n break\n else:\n next_x = i\n break\n else:\n for i in range(next_x, self.x_size):\n if map_data[next_y][i] != self.ICE_SYMBOL:\n if map_data[next_y][i] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(next_y, i, map_data):\n # if blocked, stop on last ice cell\n next_x = i - 1\n break\n else:\n next_x = i\n break\n if map_data[next_y][next_x] == self.TELEPORT_SYMBOL:\n # handle teleport - find the other teleporter\n tpy, tpx = (None, None)\n for i in range(self.y_size):\n for j in range(self.x_size):\n if map_data[i][j] == self.TELEPORT_SYMBOL and (i != next_y or j != next_x):\n tpy, tpx = (i, j)\n break\n if tpy is not None:\n break\n if tpy is None:\n raise Exception(\"LaserTank Map Error: Unmatched teleport symbol\")\n next_y, next_x = (tpy, tpx)\n else:\n # if not ice or teleport, perform collision check\n if self.cell_is_blocked(next_y, next_x, map_data):\n return self.COLLISION\n\n # check for game over conditions\n if self.cell_is_game_over(next_y, next_x, map_data):\n return self.GAME_OVER\n\n # no collision and no game over - update player position\n y = next_y\n x = next_x\n return (x, y, heading, map_data)\n\n elif move == self.TURN_LEFT:\n # no collision or game over possible\n if heading == self.UP:\n heading = self.LEFT\n elif heading == self.DOWN:\n heading = self.RIGHT\n elif heading == self.LEFT:\n heading = self.DOWN\n else:\n heading = self.UP\n return (x, y, heading, map_data)\n\n elif move == self.TURN_RIGHT:\n # no collision or game over possible\n if heading == self.UP:\n heading = self.RIGHT\n elif heading == self.DOWN:\n heading = self.LEFT\n elif heading == self.LEFT:\n heading = self.UP\n else:\n heading = self.DOWN\n return (x, y, heading, map_data)\n\n elif move == self.SHOOT_LASER:\n # set laser direction\n if heading == self.UP:\n laserheading = self.UP\n dy, dx = (-1, 0)\n elif heading == self.DOWN:\n laserheading = self.DOWN\n dy, dx = (1, 0)\n elif heading == self.LEFT:\n laserheading = self.LEFT\n dy, dx = (0, -1)\n else:\n laserheading = self.RIGHT\n dy, dx = (0, 1)\n\n # loop until laser blocking object reached\n ly, lx = (y, x)\n while True:\n ly += dy\n lx += dx\n\n # handle boundary and immovable obstacles\n if ly < 0 or ly >= self.y_size or \\\n lx < 0 or lx >= self.x_size or \\\n map_data[ly][lx] == self.OBSTACLE_SYMBOL:\n # laser stopped without effect\n return self.COLLISION\n\n # handle movable objects\n elif self.cell_is_laser_movable(ly, lx, laserheading, map_data):\n # check if tile can be moved without collision\n if self.cell_is_blocked(ly + dy, lx + dx, map_data) or \\\n map_data[ly + dy][lx + dx] == self.ICE_SYMBOL or \\\n map_data[ly + dy][lx + dx] == self.TELEPORT_SYMBOL or \\\n map_data[ly + dy][lx + dx] == self.FLAG_SYMBOL or \\\n (ly + dy == y and lx + dx == x):\n # tile cannot be moved\n return self.COLLISION\n else:\n old_symbol = map_data[ly][lx]\n map_data[ly][lx] = self.LAND_SYMBOL\n if map_data[ly + dy][lx + dx] == self.WATER_SYMBOL:\n # if new bridge position is water, convert to land tile\n if old_symbol == self.BRIDGE_SYMBOL:\n map_data[ly + dy][lx + dx] = self.LAND_SYMBOL\n # otherwise, do not replace the old symbol\n else:\n # otherwise, move the tile forward\n map_data[ly + dy][lx + dx] = old_symbol\n break\n\n # handle bricks\n elif map_data[ly][lx] == self.BRICK_SYMBOL:\n # remove brick, replace with land\n map_data[ly][lx] = self.LAND_SYMBOL\n break\n\n # handle facing anti-tanks\n elif (map_data[ly][lx] == self.ANTI_TANK_UP_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.ANTI_TANK_DOWN_SYMBOL and laserheading == self.UP) or \\\n (map_data[ly][lx] == self.ANTI_TANK_LEFT_SYMBOL and laserheading == self.RIGHT) or \\\n (map_data[ly][lx] == self.ANTI_TANK_RIGHT_SYMBOL and laserheading == self.LEFT):\n # mark anti-tank as destroyed\n map_data[ly][lx] = self.ANTI_TANK_DESTROYED_SYMBOL\n break\n\n # handle player laser collision\n elif ly == y and lx == x:\n return self.GAME_OVER\n\n # handle facing mirrors\n elif (map_data[ly][lx] == self.MIRROR_UL_SYMBOL and laserheading == self.RIGHT) or \\\n (map_data[ly][lx] == self.MIRROR_UR_SYMBOL and laserheading == self.LEFT):\n # new direction is up\n dy, dx = (-1, 0)\n laserheading = self.UP\n elif (map_data[ly][lx] == self.MIRROR_DL_SYMBOL and laserheading == self.RIGHT) or \\\n (self.grid_data[ly][lx] == self.MIRROR_DR_SYMBOL and laserheading == self.LEFT):\n # new direction is down\n dy, dx = (1, 0)\n laserheading = self.DOWN\n elif (map_data[ly][lx] == self.MIRROR_UL_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.MIRROR_DL_SYMBOL and laserheading == self.UP):\n # new direction is left\n dy, dx = (0, -1)\n laserheading = self.LEFT\n elif (map_data[ly][lx] == self.MIRROR_UR_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.MIRROR_DR_SYMBOL and laserheading == self.UP):\n # new direction is right\n dy, dx = (0, 1)\n laserheading = self.RIGHT\n # do not terminate laser on facing mirror - keep looping\n\n # check for game over condition after effect of laser\n if self.cell_is_game_over(y, x, map_data):\n return self.GAME_OVER\n return (x, y, heading, map_data)\n return self.SUCCESS", "def update_game_states(player_move, values):\n moves = ['a', 's', 'd']\n ordered = sorted([n for n in game_states[values].values()\n if type(n) != str])[::-1]\n for i in range(3):\n opt_a_letter = moves[i]\n opt_a_number = game_states[values][opt_a_letter]\n\n opt_b_letter = moves[i-2]\n opt_b_number = game_states[values][opt_b_letter]\n\n if player_move == moves[i]:\n if type(opt_b_number) == int:\n if not (opt_b_number == ordered[0] and\n (opt_b_number-ordered[1]) >= 10):\n game_states[values][opt_b_letter] += 1\n\n elif type(opt_a_number) == int:\n if not (opt_a_number == ordered[0] and\n (opt_a_number-ordered[1]) >= 10):\n game_states[values][opt_a_letter] += 1", "def make_move(self, move: Any) -> 'StonehengeState':\n if type(move) == str:\n new_state = StonehengeState(not self.p1_turn, self.side_length)\n # copy the board information from current state\n # make copy of current state information\n hori_lst_copy = []\n for lst in self.hori_lst:\n temp = []\n for item in lst:\n temp.append(item)\n hori_lst_copy.append(temp)\n left_lst_copy = []\n for lst in self.left_lst:\n temp = []\n for item in lst:\n temp.append(item)\n left_lst_copy.append(temp)\n right_lst_copy = []\n for lst in self.right_lst:\n temp = []\n for item in lst:\n temp.append(item)\n right_lst_copy.append(temp)\n\n hori_result_copy = []\n for item in self.hori_result:\n hori_result_copy.append(item)\n left_result_copy = []\n for item in self.left_result:\n left_result_copy.append(item)\n right_result_copy = []\n for item in self.right_result:\n right_result_copy.append(item)\n\n new_state.hori_lst = hori_lst_copy\n new_state.hori_result = hori_result_copy\n new_state.left_lst = left_lst_copy\n new_state.left_result = left_result_copy\n new_state.right_lst = right_lst_copy\n new_state.right_result = right_result_copy\n # update the new state with str move\n # parallel nested list data structure\n lst = [new_state.hori_lst, new_state.left_lst, new_state.right_lst]\n result = [new_state.hori_result, new_state.left_result, new_state.right_result]\n # update the cell\n for i in range(len(lst)):\n for j in range(len(lst[i])):\n for k in range(len(lst[i][j])):\n if lst[i][j][k] == move:\n # should use the player name of last state, so opposite names\n if new_state.p1_turn:\n lst[i][j][k] = \"2\"\n else:\n lst[i][j][k] = \"1\"\n # update ley-line marks\n # the ley-line may belong to a player after this move\n p1_taken = 0\n p2_taken = 0\n if result[i][j] != \"@\":\n continue\n for item in lst[i][j]:\n if item == \"1\":\n p1_taken += 1\n if item == \"2\":\n p2_taken += 1\n if float(p1_taken) >= len(lst[i][j]) / 2:\n result[i][j] = \"1\"\n if float(p2_taken) >= len(lst[i][j]) / 2:\n result[i][j] = \"2\"\n ###### CHECK FOR SHALLOW COPY PROBLEM, IF ATTRIBUTE IS UPDATE IN NEW STATE\n return new_state", "def make_move(self, move):\n if type(move) == str:\n move = int(move)\n\n new_state = SubtractSquareState(not self.p1_turn,\n self.current_total - move)\n return new_state", "def move(self, state):\n raise NotImplementedError(\"Need to implement this method\")", "def make_move(self, board):\n user_input = self.get_user_input(\n 'coordinates of next move (x,y): '\n )\n move = self.transform_user_input(user_input)\n\n valid = board.move_is_valid(move)\n while not valid:\n user_input = self.get_user_input(\n 'Invalid move, coordinate of next move: '\n )\n move = self.transform_user_input(user_input)\n valid = board.move_is_valid(move)\n board.set_piece(move, color=self.color)", "def loop1(self, move, new_state, i):\n if move == i[0]:\n # analyze another cell\n if new_state.letters[i[1]].isalpha():\n new_state.claim[i[2]] \\\n = new_state.get_current_player_name()[1]\n # analyze the other 3 cells\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[3]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[4]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[5]]) \\\n and new_state.claim[i[6]] == \"@\":\n new_state.claim[i[6]] \\\n = new_state.get_current_player_name()[1]\n # analyze the other 4 cells\n count = 0\n for x in [new_state.letters[i[7]],\n new_state.letters[i[8]],\n new_state.letters[i[9]],\n new_state.letters[i[10]]]:\n if new_state.get_current_player_name()[1] == x:\n count += 1\n if count == 2 and new_state.claim[i[11]] == \"@\":\n new_state.claim[i[11]] = \\\n new_state.get_current_player_name()[1]\n new_state.letters = [self.get_current_player_name()[1]\n if i == move else i for i in self.letters]\n return new_state", "def _move_and_update(self, move):\n unlocked_before_move = self.game_state.is_current_level_unlocked()\n self.rule_checker.is_valid_move(self.current_turn.entity, move, self.game_state.current_level)\n self.game_state.move(self.current_turn.entity, move)\n result = self._get_move_result(unlocked_before_move)\n if result != Moveresult.EJECT and result != Moveresult.EXIT:\n self.current_turn.notify(self._format_move_result_notification(move, result))\n self._update_scoreboard(result)", "def update_pos(self, move):\n change = Maze.moves[move]\n self.current_pos[0] += change[0]\n self.current_pos[1] += change[1]", "def loop54(self, move, new_state, i):\n if move == i[0]:\n # analyze the other 2 cells\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[1]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[2]]) \\\n and new_state.claim[i[3]] == \"@\":\n new_state.claim[i[3]] \\\n = new_state.get_current_player_name()[1]\n\n # analyze the other 5 cells\n count = 0\n for x in [new_state.letters[i[4]],\n new_state.letters[i[5]],\n new_state.letters[i[6]],\n new_state.letters[i[7]],\n new_state.letters[i[8]]]:\n if new_state.get_current_player_name()[1] == x:\n count += 1\n if count == 2 and new_state.claim[i[9]] == \"@\":\n new_state.claim[i[9]] = \\\n new_state.get_current_player_name()[1]\n\n # analyze the other 5 cells\n count = 0\n for x in [new_state.letters[i[10]],\n new_state.letters[i[11]],\n new_state.letters[i[12]],\n new_state.letters[i[13]],\n new_state.letters[i[14]]]:\n if new_state.get_current_player_name()[1] == x:\n count += 1\n if count == 2 and new_state.claim[i[15]] == \"@\":\n new_state.claim[i[15]] = \\\n new_state.get_current_player_name()[1]\n new_state.letters = [self.get_current_player_name()[1]\n if i == move else i for i in self.letters]\n return new_state", "def play_move(self,state):\n #Keep asking for the next move until a valid move.\n while(True):\n childList = state.get_successors()\n print(\"Your possible moves:\")\n i = 0\n for c in childList:\n if i > 0 and i%4 == 0:\n print()\n print(c.get_action().ljust(10),end=\"\\t\");\n i += 1\n print()\n nextMove = input(\"What is your next move? \\ne.g.'F2-E3' or 'Quit'\\n\")\n #Check if the move is valid\n if nextMove.lower() == 'Quit'.lower():\n return None\n for c in childList:\n if c.get_action().upper() == nextMove.upper():\n return c\n # Move not possible \n print(\"Invalid move!! Please try again...\\n\")", "def make_move(self, move: Any) -> \"StonehengeState\":\n new_board = deepcopy(self.current_board)\n for index in range(len(self.current_board)):\n if self.current_board[index] == move:\n if self.p1_turn:\n new_board = new_board.replace(\n self.current_board[index], '1')\n else:\n new_board = new_board.replace(\n self.current_board[index], '2')\n new_ley_lines = deepcopy(self.current_ley_lines)\n for item in new_ley_lines:\n for key in item:\n for index in range(len(key[1])):\n if key[1][index] == move:\n if self.p1_turn:\n key[1][index] = '1'\n else:\n key[1][index] = '2'\n change_dict = {}\n for item in new_ley_lines:\n for key in item:\n p1_count = 0\n p2_count = 0\n for string in key[1]:\n if string == '1':\n p1_count += 1\n if string == '2':\n p2_count += 1\n\n\n if p1_count >= len(key[1])/2 and p1_count > p2_count:\n\n change_dict[key[0]] = '1'\n if p2_count >= len(key[1])/2 and p2_count > p1_count:\n\n change_dict[key[0]] = '2'\n for key in change_dict:\n if not (key == '1' or key == '2'):\n if str(key) in new_board:\n new_board = new_board.replace(str(key), change_dict[key])\n for item in new_ley_lines:\n for key1 in item:\n if key == key1[0]:\n key1[0] = change_dict[key]\n\n new_state = StonehengeState(not self.p1_turn, self.side_length,\n new_board, new_ley_lines)\n return new_state", "def loop56(self, move, new_state, i):\n if move == i[0]:\n\n # analyze the other 4 cells\n count = 0\n for x in [new_state.letters[i[1]],\n new_state.letters[i[2]],\n new_state.letters[i[3]],\n new_state.letters[i[4]]]:\n if new_state.get_current_player_name()[1] == x:\n count += 1\n if count == 2 and new_state.claim[i[5]] == \"@\":\n new_state.claim[i[5]] = \\\n new_state.get_current_player_name()[1]\n\n # analyze the other 4 cells\n count = 0\n for x in [new_state.letters[i[6]],\n new_state.letters[i[7]],\n new_state.letters[i[8]],\n new_state.letters[i[9]]]:\n if new_state.get_current_player_name()[1] == x:\n count += 1\n if count == 2 and new_state.claim[i[10]] == \"@\":\n new_state.claim[i[10]] = \\\n new_state.get_current_player_name()[1]\n\n # analyze the other 4 cells\n count = 0\n for x in [new_state.letters[i[11]],\n new_state.letters[i[12]],\n new_state.letters[i[13]],\n new_state.letters[i[14]]]:\n if new_state.get_current_player_name()[1] == x:\n count += 1\n if count == 2 and new_state.claim[i[15]] == \"@\":\n new_state.claim[i[15]] = \\\n new_state.get_current_player_name()[1]\n\n new_state.letters = [self.get_current_player_name()[1]\n if i == move else i for i in self.letters]\n return new_state", "def execute_move(board, move):\n\n player, spike_index, fields_to_move = Judge._validate_move(move)\n\n board.set_player_perspective(player)\n \n if spike_index == OUT_OF_BAR_SPECIAL_MOVE:\n dest_spike_index = fields_to_move - 1\n board.remove_checker_from_bar()\n else:\n dest_spike_index = spike_index + fields_to_move\n board.pop_player_checker(spike_index)\n\n if dest_spike_index >= len(INITIAL_SPIKES_STATE):\n return board\n\n board.push_player_checker(dest_spike_index)\n\n return board", "def move(self, state, move_cmd, i, j):\r\n new_state = self.clone_state(state)\r\n coordinate_change = self.action_dic[self.reflection_dic[move_cmd]]\r\n new_state[i][j], new_state[i + coordinate_change[0]][j + coordinate_change[1]] = \\\r\n new_state[i + coordinate_change[0]][j + coordinate_change[1]]\\\r\n , new_state[i][j]\r\n return new_state", "def makeMove(self, moveStr):\r\n\t\tmoveStr = str(moveStr)\r\n\r\n\t\tmoveUci = self._userParseSanToUci(moveStr)\r\n\t\t# print(moveUci)\r\n\r\n\t\tif moveUci is None:\r\n\t\t\treturn\r\n\r\n\t\tresponse = requests.post(f'https://lichess.org/api/board/game/{self.gameId}/move/{moveUci}', headers=self.authHeader)\r\n\r\n\t\tif response.status_code == 200:\r\n\t\t\tlog.debug('Move Successfully Sent')\r\n\r\n\t\telse:\r\n\t\t\tlog.warning(f'Move Unsuccessfully Sent. Status Code: {response.status_code}')", "def move_length_52(self, move, new_state):\n # Then consider the internal1 move\n new1_state = new_state\n if move in [\"D\", \"P\", \"S\"]:\n for i in [[\"D\", 2, 4, 4, 1, 6, 10, 15, 20, 1,\n 0, 7, 12, 18, 24, 17],\n [\"P\", 9, 21, 14, 14, 16, 17, 18, 19,\n 0, 1, 3, 6, 10, 20, 1],\n [\"S\", 13, 23, 7, 14, 15, 16, 17, 19,\n 10, 0, 3, 7, 12, 24, 17]]:\n new1_state = self.loop54(move, new_state, i)\n\n # Then consider the internal2 move\n internal2 = \\\n [[\"G\", 5, 7, 8, 6, 2, 11, 17, 23, 16, 1, 3, 10, 15, 20, 1],\n [\"H\", 5, 6, 8, 6, 4, 11, 16, 21, 3, 0, 3, 12, 18, 24, 17],\n [\"K\", 5, 16, 22, 15, 9, 11, 12, 13, 8, 1, 3, 6, 15, 20, 1],\n [\"M\", 8, 17, 22, 5, 9, 10, 11, 13, 8, 0, 3, 7, 18, 24, 17],\n [\"Q\", 5, 10, 22, 15, 4, 7, 11, 21, 3, 14, 15, 17, 18, 19, 10],\n [\"R\", 8, 12, 22, 5, 2, 6, 11, 23, 16, 14, 15, 16, 18, 19, 10]]\n if move in [\"G\", \"H\", \"K\", \"M\", \"Q\", \"R\"]:\n for i in internal2:\n new1_state = self.loop55(move, new_state, i)\n\n # Finally consider the move \"L\"\n internal3 = [[\"L\", 9, 10, 12, 13, 8, 2, 6,\n 17, 23, 16, 4, 7, 16, 21, 3]]\n if move in [\"L\"]:\n for i in internal3:\n new1_state = self.loop56(move, new_state, i)\n return StonehengeState(not self.p1_turn, new1_state.length,\n new1_state.letters, new1_state.claim)", "def apply_move(b,player,move):\n move = move.strip().lower()\n if len(move)!=2:\n raise Exception(\"Valid move is two characters (e.g. A2 or B3)\")\n if move[0] not in COLS:\n move = move[::-1]\n if move[0] not in COLS:\n raise Exception(\"No column spec found\")\n j = COLS.index(move[0])\n i = int(move[1])-1\n if b[i][j] != \" \":\n raise Exception(\"Another move already filled that position\")\n b[i][j] = player", "def loop53(self, move, new_state, i):\n if move == i[0]:\n # analyze the other 3 cells\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[1]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[2]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[3]]) \\\n and new_state.claim[i[4]] == \"@\":\n new_state.claim[i[4]] \\\n = new_state.get_current_player_name()[1]\n\n # analyze the other 3 cells\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[5]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[6]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[7]]) \\\n and new_state.claim[i[8]] == \"@\":\n new_state.claim[i[8]] \\\n = new_state.get_current_player_name()[1]\n\n # analyze the other 4 cells\n count = 0\n for x in [new_state.letters[i[9]],\n new_state.letters[i[10]],\n new_state.letters[i[11]],\n new_state.letters[i[12]]]:\n if new_state.get_current_player_name()[1] == x:\n count += 1\n if count == 2 and new_state.claim[i[13]] == \"@\":\n new_state.claim[i[13]] = \\\n new_state.get_current_player_name()[1]\n new_state.letters = [self.get_current_player_name()[1]\n if i == move else i for i in self.letters]\n return new_state", "def loop52(self, move, new_state, i):\n if move == i[0]:\n # analyze the other 2 cells\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[1]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[2]]) \\\n and new_state.claim[i[3]] == \"@\":\n new_state.claim[i[3]] \\\n = new_state.get_current_player_name()[1]\n\n # analyze the other 4 cells\n count = 0\n for x in [new_state.letters[i[4]],\n new_state.letters[i[5]],\n new_state.letters[i[6]],\n new_state.letters[i[7]]]:\n if new_state.get_current_player_name()[1] == x:\n count += 1\n if count == 2 and new_state.claim[i[8]] == \"@\":\n new_state.claim[i[8]] = \\\n new_state.get_current_player_name()[1]\n\n # analyze the other 4 cells\n count = 0\n for x in [new_state.letters[i[9]],\n new_state.letters[i[10]],\n new_state.letters[i[11]],\n new_state.letters[i[12]]]:\n if new_state.get_current_player_name()[1] == x:\n count += 1\n if count == 2 and new_state.claim[i[13]] == \"@\":\n new_state.claim[i[13]] = \\\n new_state.get_current_player_name()[1]\n new_state.letters = [self.get_current_player_name()[1]\n if i == move else i for i in self.letters]\n return new_state", "def update(self):\n if not self._move:\n self.get_next_move()\n if self._move:\n self._move.update()", "def loop51(self, move, new_state, i):\n if move == i[0]:\n # analyze another cell\n if new_state.letters[i[1]].isalpha():\n new_state.claim[i[2]] \\\n = new_state.get_current_player_name()[1]\n\n # analyze the other 4 cells\n count = 0\n for x in [new_state.letters[i[3]],\n new_state.letters[i[4]],\n new_state.letters[i[5]],\n new_state.letters[i[6]]]:\n if new_state.get_current_player_name()[1] == x:\n count += 1\n if count == 2 and new_state.claim[i[7]] == \"@\":\n new_state.claim[i[7]] = \\\n new_state.get_current_player_name()[1]\n\n # analyze the other 5 cells\n count = 0\n for x in [new_state.letters[i[8]],\n new_state.letters[i[9]],\n new_state.letters[i[10]],\n new_state.letters[i[11]],\n new_state.letters[i[12]]]:\n if new_state.get_current_player_name()[1] == x:\n count += 1\n if count == 2 and new_state.claim[i[13]] == \"@\":\n new_state.claim[i[13]] = \\\n new_state.get_current_player_name()[1]\n new_state.letters = [self.get_current_player_name()[1]\n if i == move else i for i in self.letters]\n return new_state", "def _move(self, move: Tile):\n if not self.game_state:\n raise RuntimeError(\"Cannot call move when the game has not started!\")\n # Adversaries are supposed to be notified of new info right before they move.\n if issubclass(type(self.current_turn), Enemy):\n current_enemy = next(enemy for enemy in self.enemy_list if enemy.entity.name == self.current_turn.entity.name)\n self._notify_adversary(current_enemy)\n # Players that are alive before this move\n pre_players = self.game_state.get_current_characters()\n completed_before_turn = self.game_state.get_completed_characters()\n if move != None:\n self._move_and_update(move)\n else:\n self.current_turn.notify(self._format_move_result_notification(None, Moveresult.OK))\n # Notify players and adversaries of changes to the gamestate, including players who were killed\n # on this turn. Note that this usually results in one rendering per move to all entities.\n self._update_players()\n self._update_adversaries()\n self._handle_completed_characters(completed_before_turn)\n self._handle_killed_players(pre_players)\n self.current_turn = self.turn_order.next()", "def apply_move(self, start_move, move):\n\t\t# check that the start move and the move are Move objects\n\t\tif not isinstance(move, Move):\n\t\t\tmove = Move(move)\n\t\tif not isinstance(start_move, Move):\n\t\t\tstart_move = Move(start_move)\n\t\t# copy the board\n\t\tnext_board = copy.deepcopy(self.board)\n\t\t# place the move on the next board\n\t\tnext_board.place(self.next_player, start_move.point, move.point)\n\t\treturn GameState(next_board, self.next_player.other, move)", "def loop2(self, move, new_state, i):\n if move == i[0]:\n # analyze the other 2 cells\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[1]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[2]]) \\\n and new_state.claim[i[3]] == \"@\":\n new_state.claim[i[3]] \\\n = new_state.get_current_player_name()[1]\n\n # analyze the other 4 cells\n count = 0\n for x in [new_state.letters[i[4]],\n new_state.letters[i[5]],\n new_state.letters[i[6]],\n new_state.letters[i[7]]]:\n if new_state.get_current_player_name()[1] == x:\n count += 1\n if count == 2 and new_state.claim[i[8]] == \"@\":\n new_state.claim[i[8]] = \\\n new_state.get_current_player_name()[1]\n\n # analyze the other 4 cells\n count = 0\n for x in [new_state.letters[i[9]],\n new_state.letters[i[10]],\n new_state.letters[i[11]],\n new_state.letters[i[12]]]:\n if new_state.get_current_player_name()[1] == x:\n count += 1\n if count == 2 and new_state.claim[i[13]] == \"@\":\n new_state.claim[i[13]] = \\\n new_state.get_current_player_name()[1]\n new_state.letters = [self.get_current_player_name()[1]\n if i == move else i for i in self.letters]\n return new_state", "def update_board(board: Board, move: Move) -> Board:\n old_position = move[0]\n new_position = move[1]\n character = board[old_position[0]][old_position[1]]\n board = change_position(board, new_position, character)\n board = clear_position(board, old_position)\n \n return board", "def make_move(self, move, player):\n if not self.test_valid_move( move):\n return False\n self.game_state[move[0]][move[1]] = player", "def move(puzzle: str, direction: str):\r\n position_index = puzzle.index(EMPTY)\r\n position = position_index + 1\r\n grid_width = get_grid_width(puzzle)\r\n\r\n # What direction to moved the tile if it's a valid move\r\n if direction == UP:\r\n if (position) > grid_width:\r\n return swap_position(puzzle, position_index, position_index - grid_width)\r\n\r\n elif direction == DOWN:\r\n if (len(puzzle) - position) >= grid_width:\r\n return swap_position(puzzle, position_index, position_index + grid_width)\r\n\r\n elif direction == LEFT:\r\n if (position - 1) % grid_width != 0:\r\n return swap_position(puzzle, position_index, position_index - 1)\r\n\r\n elif direction == RIGHT:\r\n if position % grid_width != 0:\r\n return swap_position(puzzle, position_index, position_index + 1)\r\n\r\n return None", "def loop3(self, move, new_state, i):\n if move == i[0]:\n # analyze the other 3 cells\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[1]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[2]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[3]]) \\\n and new_state.claim[i[4]] == \"@\":\n new_state.claim[i[4]] \\\n = new_state.get_current_player_name()[1]\n\n # analyze the other 3 cells\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[5]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[6]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[7]]) \\\n and new_state.claim[i[8]] == \"@\":\n new_state.claim[i[8]] \\\n = new_state.get_current_player_name()[1]\n\n # analyze the other 4 cells\n count = 0\n for x in [new_state.letters[i[9]],\n new_state.letters[i[10]],\n new_state.letters[i[11]],\n new_state.letters[i[12]]]:\n if new_state.get_current_player_name()[1] == x:\n count += 1\n if count == 2 and new_state.claim[i[13]] == \"@\":\n new_state.claim[i[13]] = \\\n new_state.get_current_player_name()[1]\n\n new_state.letters = [self.get_current_player_name()[1]\n if i == move else i for i in self.letters]\n return new_state", "def move_length_2(self, move, new_state):\n for i in [[\"A\", 1, 2, 2, 0, 3, 6, 8],\n [\"B\", 0, 2, 4, 6, 3, 5, 1],\n [\"C\", 0, 0, 5, 7, 3, 4, 4],\n [\"E\", 1, 6, 6, 3, 2, 3, 4],\n [\"F\", 2, 7, 6, 5, 1, 3, 1],\n [\"G\", 4, 3, 5, 5, 0, 3, 8]]:\n if move == i[0]:\n if new_state.letters[i[1]].isalpha():\n new_state.claim[i[2]] \\\n = new_state.get_current_player_name()[1]\n if new_state.letters[i[3]].isalpha():\n new_state.claim[i[4]] \\\n = new_state.get_current_player_name()[1]\n if new_state.get_current_player_name()[1] \\\n == new_state.letters[i[5]] \\\n or new_state.get_current_player_name()[1] \\\n == new_state.letters[i[6]]:\n new_state.claim[i[7]] \\\n = new_state.get_current_player_name()[1]\n if move == \"D\":\n if new_state.get_current_player_name()[1] == \\\n (new_state.letters[0] or new_state.letters[6]):\n new_state.claim[8] = new_state.get_current_player_name()[1]\n if new_state.get_current_player_name()[1] == \\\n (new_state.letters[1] or new_state.letters[5]):\n new_state.claim[1] = new_state.get_current_player_name()[1]\n if new_state.get_current_player_name()[1] == \\\n (new_state.letters[2] or new_state.letters[4]):\n new_state.claim[4] = new_state.get_current_player_name()[1]\n new_state.letters = [self.get_current_player_name()[1]\n if i == move else i for i in self.letters]\n\n return StonehengeState(not self.p1_turn, new_state.length,\n new_state.letters, new_state.claim)", "def move(self, movement):\n index = self.state.index(0)\n\n new_state = self.state.copy()\n\n if movement == 'up':\n new_state[index], new_state[index - 3] = new_state[index - 3], new_state[index]\n elif movement == 'down':\n new_state[index], new_state[index + 3] = new_state[index + 3], new_state[index]\n elif movement == 'left':\n new_state[index], new_state[index - 1] = new_state[index - 1], new_state[index]\n else:\n # movement == 'right'\n new_state[index], new_state[index + 1] = new_state[index + 1], new_state[index]\n \n return new_state", "def make_move(self, state):\n emptySpaces = 0\n for row in state:\n emptySpaces = emptySpaces + row.count(' ')\n if emptySpaces > 17:\n drop_phase = True\n else:\n drop_phase = False\n\n move = []\n if not drop_phase:\n best_value = float('-inf')\n best_state = None\n for s in self.succ(state, False, self.my_piece):\n if self.game_value(s) == -1 or self.game_value(s) == 1:\n best_state = s\n break\n currValue = self.Min_Value(state, 0)\n if currValue>best_value:\n best_value = currValue\n best_state = s\n for i in range(len(state)):\n for j in range(len(state[i])):\n if state[i][j]!= ' ' and best_state[i][j]== ' ':\n move.append((i,j))\n if state[i][j]== ' ' and best_state[i][j]!= ' ':\n move.insert(0, (i,j))\n \n # TODO: choose a piece to move and remove it from the board\n # (You may move this condition anywhere, just be sure to handle it)\n #\n # Until this part is implemented and the move list is updated\n # accordingly, the AI will not follow the rules after the drop phase!\n \n\n # select an unoccupied space randomly\n # TODO: implement a minimax algorithm to play better\n \n else:\n best_value = float('-inf')\n best_state = None\n for s in self.succ(state, True, self.my_piece):\n if self.game_value(s) == -1 or self.game_value(s) == 1:\n best_state = s\n break\n currValue = self.Min_Value(state, 0)\n if currValue>best_value:\n best_value = currValue\n best_state = s\n for i in range(len(state)):\n for j in range(len(state[i])):\n if state[i][j]== ' ' and best_state[i][j]!= ' ':\n move.insert(0, (i,j))\n \n return move", "def move(self, action):\n \n currentState = self.state\n\n if action == \"up\":\n newState = (self.state[0] - 1, self.state[1])\n elif action == \"down\":\n newState = (self.state[0] + 1, self.state[1])\n elif action == \"right\":\n newState = (self.state[0], self.state[1] + 1)\n elif action == \"left\":\n newState = (self.state[0], self.state[1] - 1)\n else:\n raise NameError(action, 'This is not a valid action!')\n\n # Need to check if the new state is a legal move\n if (newState[0] >= 0) & (newState[0] <= 1) & (newState[1] >= 0) & (newState[1] <= 2):\n return newState\n else:\n print('This move takes you off the board, you have not moved!')\n return currentState", "def loop55(self, move, new_state, i):\n if move == i[0]:\n\n # analyze the other 3 cells\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[1]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[2]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[3]]) \\\n and new_state.claim[i[4]] == \"@\":\n new_state.claim[i[4]] \\\n = new_state.get_current_player_name()[1]\n\n # analyze the other 4 cells\n count = 0\n for x in [new_state.letters[i[5]],\n new_state.letters[i[6]],\n new_state.letters[i[7]],\n new_state.letters[i[8]]]:\n if new_state.get_current_player_name()[1] == x:\n count += 1\n if count == 2 and new_state.claim[i[9]] == \"@\":\n new_state.claim[i[9]] = \\\n new_state.get_current_player_name()[1]\n\n # analyze the other 5 cells\n count = 0\n for x in [new_state.letters[i[10]],\n new_state.letters[i[11]],\n new_state.letters[i[12]],\n new_state.letters[i[13]],\n new_state.letters[i[14]]]:\n if new_state.get_current_player_name()[1] == x:\n count += 1\n if count == 2 and new_state.claim[i[15]] == \"@\":\n new_state.claim[i[15]] = \\\n new_state.get_current_player_name()[1]\n new_state.letters = [self.get_current_player_name()[1]\n if i == move else i for i in self.letters]\n return new_state", "def decide_move(self, game_state):\n # Get all possible moves\n valid_pos = game_state.get_valid_positions(game_state.pacs_pos[self.pac_id], 'pac')\n # Get the value of the expression tree for each possible move.\n # Feed the calculator the values of G, P, W, F, M instead of\n # recalculating those values each time we hit them in the tree.\n valid_pos_vals = [ self.tree.root.calc([game_state.G(pos),\n game_state.P(pos),\n game_state.W(pos),\n game_state.F(pos),\n game_state.M(pos, pac_id = self.pac_id)]) \\\n for pos in valid_pos ]\n # Find the index of the highest-valued move\n new_pos_idx = valid_pos_vals.index(max(valid_pos_vals))\n # Set the next move\n self.next_move = valid_pos[new_pos_idx]", "def get_move(state):\n entry = game_states[get_values(state)]\n options = list()\n\n for move in entry:\n move_result = entry[move]\n if move_result == 'Y':\n return move\n elif move_result == 'N':\n continue\n options.extend([move]*move_result)\n return choice(options)", "def update_board(self,move, _testing : bool = True ) -> bool :\r\n\r\n temp = self.board.copy()\r\n self.count = 0\r\n\r\n for direction in DIRECTIONS:\r\n self.update_direction(move,direction)\r\n\r\n if self.count == 0:\r\n self.board = temp\r\n return False\r\n else:\r\n if _testing:\r\n self.board = temp\r\n else:\r\n self.board[move[0],move[1]] = self.turn\r\n return True", "def play_move(self,state):\n raise AIError(\"Must be implemented for child class!\")", "def fix_move(self, invalid_move: QMove):\n\n # TODO: reduce time_per_game second by second\n ERROR_MSG = f\"INVALID_MOVE {invalid_move.to_string()}\"\n\n if self.is_ai and self.proc is not None:\n self.proc.stdin.write(str.encode(ERROR_MSG + '\\n'))\n self.proc.stdin.flush()\n new_move = QMove(os.read(self.proc.stdout.fileno(), 100))\n else:\n new_move = QMove(\n input(\"Move was invalid, enter a valid move:\\n\\t>> \"))\n\n return new_move", "def move(state, pos, rel_pos):\n new_state = state.copy()\n return swap(new_state, pos, pos + rel_pos)", "def move(self, board):\n\n # We record all game positions to feed them into the NN for training with the corresponding updated Q\n # values.\n self.board_position_log.append(board.getState().copy())\n\n nn_input = self.board_state_to_nn_input(board.getState())\n probs, _ = self.get_valid_probs([nn_input], self.q_net, [board])\n probs = probs[0]\n # print(probs)\n # print(type(probs))\n # print(probs.shape)\n # input()\n # print(probs)\n # Most of the time our next move is the one with the highest probability after removing all illegal ones.\n # Occasionally, however we randomly chose a random move to encourage exploration\n if (self.training is True) and \\\n ((self.game_counter < self.pre_training_games) or (np.random.rand(1) < self.random_move_prob)):\n available = []\n for index in range(6):\n if probs[index] != -1.0:\n available.append(index)\n randomOne = random.randint(0,len(available)-1)\n move = available[randomOne]\n else:\n move = np.argmax(probs)\n # We record the action we selected as well as the Q values of the current state for later use when\n # adjusting NN weights.\n self.action_log.append(move)\n\n # We execute the move and return the result\n board.makeMove(move)\n return board.getState(), board.isOver()", "def update_board(self, move):\n #new_move equals the gird with selection(Which is the players input)\n new_move = self.grid[move]\n\n # check if column selected by player is full if the first index (top) has a game piece\n if new_move[0] != \" \" :\n return True\n\n # this will get the correct column and add the player's move\n # subtract player column selection by 1 to select correct column\n adjustment = -1\n while new_move[adjustment] != \" \":\n adjustment -= 1\n\n # update the grid with the selected column by the player\n new_move[adjustment] = self.playing_player[1]\n return False", "def move_length_3(self, move, new_state):\n # First consider the move on 6 corners\n for i in [[\"A\", 1, 2, 2, 5, 0, 3, 7, 11, 11],\n [\"B\", 0, 2, 4, 8, 8, 3, 6, 9, 1],\n [\"F\", 9, 9, 2, 0, 0, 6, 7, 8, 6],\n [\"J\", 5, 9, 10, 11, 7, 6, 3, 1, 1],\n [\"I\", 11, 5, 4, 1, 8, 5, 6, 7, 6],\n [\"L\", 8, 5, 9, 10, 7, 3, 0, 7, 11]]:\n if move == i[0]:\n if new_state.letters[i[1]].isalpha():\n new_state.claim[i[2]] \\\n = new_state.get_current_player_name()[1]\n if new_state.get_current_player_name()[1] \\\n == new_state.letters[i[3]] \\\n or new_state.get_current_player_name()[1] \\\n == new_state.letters[i[4]]:\n new_state.claim[i[5]] \\\n = new_state.get_current_player_name()[1]\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[6]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[7]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[8]]) \\\n and new_state.claim[i[9]] == \"@\":\n new_state.claim[i[9]] \\\n = new_state.get_current_player_name()[1]\n\n # then consider the move on the middle of each side\n for i in [[\"C\", 0, 5, 0, 3, 4, 4, 6, 10, 10],\n [\"E\", 1, 8, 8, 2, 3, 4, 7, 10, 3],\n [\"K\", 9, 11, 7, 2, 6, 10, 7, 4, 3]]:\n if move == i[0]:\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[1]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[2]]) \\\n and new_state.claim[i[3]] == \"@\":\n new_state.claim[i[3]] \\\n = new_state.get_current_player_name()[1]\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[4]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[5]]) \\\n and new_state.claim[i[6]] == \"@\":\n new_state.claim[i[6]] \\\n = new_state.get_current_player_name()[1]\n\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[7]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[8]]) \\\n and new_state.claim[i[9]] == \"@\":\n new_state.claim[i[9]] \\\n = new_state.get_current_player_name()[1]\n # Finally consider the internal move\n internal = [[\"D\", 2, 4, 4, 1, 6, 9, 1, 0, 7, 11, 11],\n [\"G\", 2, 10, 10, 9, 3, 1, 1, 5, 7, 8, 6],\n [\"H\", 4, 10, 3, 0, 3, 11, 11, 5, 6, 8, 6]]\n for i in internal:\n if move == i[0]:\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[1]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[2]]) \\\n and new_state.claim[i[3]] == \"@\":\n new_state.claim[i[3]] \\\n = new_state.get_current_player_name()[1]\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[4]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[5]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[6]]) \\\n and new_state.claim[i[7]] == \"@\":\n new_state.claim[i[7]] \\\n = new_state.get_current_player_name()[1]\n\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[8]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[9]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[10]]) \\\n and new_state.claim[i[11]] == \"@\":\n new_state.claim[i[11]] \\\n = new_state.get_current_player_name()[1]\n\n new_state.letters = [self.get_current_player_name()[1]\n if i == move else i for i in self.letters]\n\n return StonehengeState(not self.p1_turn, new_state.length,\n new_state.letters, new_state.claim)", "def interactive_strategy(game: Game) -> str:\n move = input(\"Enter a move: \")\n return game.str_to_move(move)", "def select_move(self, game_state):\n raise NotImplementedError()", "def play_move(board, move):\n\tboard_copy = list(board)\n\n\tboard_copy[move] = 'o'\n\treturn ''.join(board_copy)", "def is_valid_move(state, move):\n row, col = move\n if row not in [1, 2, 3] or col not in [1, 2, 3]:\n print(\"Invalid move! Specify correct game square!\")\n return False\n if state[row-1][col-1] != '_':\n print('Invalid move! Place your marker on a free square!')\n return False\n return True", "def make_move(self, move: Any) -> \"StonehengeState\":\n new_state = StonehengeState(self.p1_turn, self.length,\n self.letters[:], self.claim[:])\n state = new_state\n if new_state.length == 1:\n state = self.move_length_1(move, new_state)\n if new_state.length == 2:\n state = self.move_length_2(move, new_state)\n if new_state.length == 3:\n state = self.move_length_3(move, new_state)\n if new_state.length == 4:\n if move in [\"A\", \"B\", \"J\", \"O\", \"N\", \"R\",\n \"C\", \"F\", \"E\", \"I\", \"P\", \"Q\"]:\n state = self.move_length_4(move, new_state)\n else:\n state = self.move_length_41(move, new_state)\n if new_state.length == 5:\n if move in [\"A\", \"B\", \"U\", \"O\", \"T\", \"Y\",\n \"C\", \"J\", \"E\", \"N\", \"V\", \"X\"]:\n state = self.move_length_5(move, new_state)\n elif move in [\"F\", \"I\", \"W\"]:\n state = self.move_length_51(move, new_state)\n else:\n state = self.move_length_52(move, new_state)\n return state", "def move(self, board):\n # first, make your turn:\n currentState = board[self.x,self.y]\n turnDir = self.rule[(currentState + 1) % len(self.rule)]\n self.turn( int(turnDir) )\n # next, let's change this cell's state:\n if currentState >= len(self.rule) - 1:\n board[self.x,self.y] = 0\n else:\n board[self.x,self.y] = currentState + 1\n # and let's move:\n offsets = self.nextPositionOffset() # based on x, y, and dir\n self.x, self.y = board.move(self.x, self.y, offsets[0], offsets[1])", "def step(self, move):\r\n self.board.push_uci(move)\r\n self.num_halfmoves += 1", "def move(self, piece):\n\n if list(piece) in self.find_moves():\n self.block[tuple( self.find_free() )] = self.block[tuple(piece)]\n self.block[tuple(piece)] = 0\n return \"success\"\n else:\n return \"error\"", "def move_length_1(self, move, new_state):\n for i in [[\"A\", 1, 2, 2, 5, 0],\n [\"B\", 0, 2, 2, 1, 4],\n [\"C\", 0, 5, 1, 1, 3]]:\n if move == i[0]:\n if new_state.letters[i[1]].isalpha():\n new_state.claim[i[2]] \\\n = new_state.get_current_player_name()[1]\n if new_state.letters[i[3]].isalpha():\n new_state.claim[i[4]] \\\n = new_state.get_current_player_name()[1]\n new_state.claim[i[5]] = \\\n new_state.get_current_player_name()[1]\n\n new_state.letters = [self.get_current_player_name()[1]\n if i == move else i for i in self.letters]\n\n return StonehengeState(not self.p1_turn, new_state.length,\n new_state.letters, new_state.claim)", "def update_after_move(self, move):\n self.toggle_next_player()\n self.status = self.get_status(move)", "def place_piece(self, move, piece):\n self.totMoves+=1\n print(move)\n if len(move) > 1:\n self.board[move[1][0]][move[1][1]] = ' '\n self.board[move[0][0]][move[0][1]] = piece", "def make_move(self, move):\n self.board[int(move) - 1] = self.nplayer", "def execute_move(self, move, color):\n\n #Much like move generation, start at the new piece's square and\n #follow it on all 8 directions to look for a piece allowing flipping.\n\n # Add the piece to the empty square.\n # print(move)\n flips = [flip for direction in self.__directions\n for flip in self._get_flips(move, direction, color)]\n assert len(list(flips))>0\n for x, y in flips:\n #print(self[x][y],color)\n self[x][y] = color", "def play_move(self, move):\n color, move_tuple = convert_from_GTP(move, self.size)\n result = self.game.move_stone(move_tuple[0], move_tuple[1], color)\n if result.status != gogame.VALID:\n raise PlayError, result.status\n self.board.PlaceMove(move_tuple, color)\n self.board.ErasePieces(result.prisoners)\n self.board.SwitchCurColor()\n\n self.curWatcher.play(move)\n\n #if there are consecutive passes, end the game, else reset pass count\n if move == \"pass\":\n self.isPass += 1\n if self.isPass == 2:\n self.end_game()\n else:\n self.isPass = 0\n\n #switch the players\n temp = self.curPlayer\n self.curPlayer = self.curWatcher\n self.curWatcher = temp\n \n if self.isPass < 2:\n self.call_for_move()\n else:\n self.end_game()", "def attempt_move(self, move_input):\n # handle undo move\n if move_input == ['UN', 0, 'UN']:\n self.undo_move()\n return True\n\n # handle stock draw Special Action first\n if move_input == ['S0', 0, 'S0']:\n self.save_board_state()\n self.stock.deal_to_wp(self.wp)\n self.moves += 1\n return True\n\n # handle basic cases\n if len(move_input) != 3:\n return False\n if move_input[0] not in self.move_dict or move_input[2] not in self.move_dict:\n return False\n if type(move_input[1]) is not int:\n return False\n if move_input[2] == \"W0\":\n return False\n\n orig_pile = self.move_dict[move_input[0]]\n orig_ind = move_input[1]\n dest_pile = self.move_dict[move_input[2]]\n if orig_ind >= orig_pile.get_length():\n return False\n\n # handle flip tableau card Special Action\n if move_input[0][0] == 'T' and orig_pile == dest_pile and orig_ind == 0:\n orig_pile.reveal_top_card()\n\n # basic conditions have been met\n adj_ind = orig_pile.get_length() - orig_ind - 1\n if orig_pile.is_valid_retrieval(orig_ind):\n self.save_board_state()\n move_pile = orig_pile.remove_cards(orig_ind + 1)\n if dest_pile.is_valid_placement(move_pile):\n dest_pile.merge_pile(move_pile)\n if move_input[0][0] == 'T' and self.auto_flip_tab:\n orig_pile.reveal_top_card()\n self.moves += 1\n return True\n else:\n orig_pile.merge_pile(move_pile)\n self.board_states.pop()\n return False\n return False", "def result(self, board_state, move):\n # Create a copy of the current board state\n output_state = BoardState(other_state=board_state)\n # Swap pieces\n output_state.move_piece(move)\n # Eliminate pieces\n output_state.eliminate_piece()\n return output_state", "def enter_moves(self, event):\n self._solution = self.input_move.get()", "def update_state(self, new_state):\n position_mapping = {STATE_OPEN: 100, STATE_CLOSED: 0}\n hk_position = position_mapping.get(new_state.state)\n if hk_position is not None:\n self.char_current_position.set_value(hk_position)\n self.char_target_position.set_value(hk_position)\n if new_state.state == STATE_OPENING:\n self.char_position_state.set_value(1)\n elif new_state.state == STATE_CLOSING:\n self.char_position_state.set_value(0)\n else:\n self.char_position_state.set_value(2)", "def move_length_4(self, move, new_state):\n # First consider the move on 6 corners\n if move in [\"A\", \"B\", \"J\", \"O\", \"N\", \"R\"]:\n corners = [[\"A\", 1, 2, 2, 5, 9, 0, 3, 7, 12, 17, 14],\n [\"B\", 0, 2, 4, 8, 13, 10, 3, 6, 10, 14, 1],\n [\"J\", 14, 11, 5, 2, 0, 0, 10, 11, 12, 13, 8],\n [\"O\", 9, 11, 15, 16, 17, 9, 10, 6, 3, 1, 1],\n [\"N\", 17, 7, 1, 4, 8, 10, 9, 10, 11, 12, 8],\n [\"R\", 13, 7, 14, 15, 16, 9, 0, 3, 7, 12, 14]]\n for i in corners:\n new_state = self.loop1(move, new_state, i)\n return StonehengeState(not self.p1_turn, new_state.length,\n new_state.letters, new_state.claim)\n\n # then consider the move on the middle of each side\n middle = [[\"C\", 3, 4, 4, 6, 11, 16, 13, 0, 5, 9, 0],\n [\"F\", 10, 15, 12, 9, 2, 0, 0, 6, 7, 8, 6],\n [\"E\", 2, 3, 4, 1, 8, 13, 10, 7, 11, 15, 3],\n [\"I\", 12, 16, 5, 5, 6, 7, 6, 1, 4, 13, 10],\n [\"P\", 5, 10, 12, 14, 16, 17, 9, 11, 7, 4, 3],\n [\"Q\", 12, 8, 5, 14, 15, 17, 9, 2, 6, 11, 13]]\n for i in middle:\n if move == i[0]:\n # analyze the other 2 cells\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[1]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[2]]) \\\n and new_state.claim[i[3]] == \"@\":\n new_state.claim[i[3]] \\\n = new_state.get_current_player_name()[1]\n # analyze the other 3 cells\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[4]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[5]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[6]]) \\\n and new_state.claim[i[7]] == \"@\":\n new_state.claim[i[7]] \\\n = new_state.get_current_player_name()[1]\n # analyze the other 3 cells\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[8]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[9]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[10]]) \\\n and new_state.claim[i[11]] == \"@\":\n new_state.claim[i[11]] \\\n = new_state.get_current_player_name()[1]\n new_state.letters = [self.get_current_player_name()[1]\n if i == move else i for i in self.letters]\n\n return StonehengeState(not self.p1_turn, new_state.length,\n new_state.letters, new_state.claim)", "def apply_move(board_state, move, side):\n move_x, move_y = move\n\n def get_tuples():\n for x in range(len(board_state)):\n if move_x == x:\n temp = list(board_state[x])\n temp[move_y] = side\n yield tuple(temp)\n else:\n yield board_state[x]\n\n return tuple(get_tuples())", "def play_move(self,state):\n self.__engine.set_state(state)\n result = self.__engine.getNextState()\n time_elapsed = self.__engine.get_time_elapsed()\n num_nodes = self.__engine.get_num_explored()\n if self.moves == 0:\n self.average_time = time_elapsed\n self.average_nodes = num_nodes\n else:\n self.average_time = ( (self.average_time * self.moves) + time_elapsed ) / (self.moves+1)\n self.average_nodes = ( (self.average_nodes * self.moves) + num_nodes ) / (self.moves+1)\n self.moves += 1\n return result", "def move_1_piece(context: GUI, old_coordinate, new_coordinate):\n\n old_tile = context.board.board_dict[old_coordinate]\n new_tile = context.board.board_dict[new_coordinate]\n\n new_tile.piece = old_tile.piece\n old_tile.piece = None\n\n context.update_move_printer(old_coordinate + \" \" + new_coordinate)", "def update(self, move):\n\n if not 0 <= move < 7:\n raise InvalidMove\n\n placed = False\n x = None\n y = None\n\n for row in reversed(xrange(self._rows)):\n if not self._board[row][move]:\n self._board[row][move] = self.current_player\n placed = True\n x = move\n y = row\n break\n\n if not placed:\n raise InvalidMove\n\n return (x, y)", "def move_step(self, move):\n # Check that the move is valid\n steps = self.mgr.obj.steps\n if len(steps) == 0:\n return\n idx = self.stepsListWidget.currentRow()\n idx_max = len(steps) - 1\n if (idx+move < 0) or (idx+move > idx_max):\n return\n \n # Insert the step at its new location, then delete it at the old location\n steps.insert(idx+move+(move>0), steps[idx])\n del steps[idx if move>0 else idx+1]\n \n self.load_steps()\n self.stepsListWidget.setCurrentRow(idx+move)\n self.mgr.changed = True", "def get_move() -> str:\n msg = 'Enter a move for that section (C to check, S to swap, R to rotate): '\n move = input(msg)\n while not wf.is_valid_move(move):\n print('Invalid move!')\n move = input(msg) \n return move", "def move_simplifier(move_input) -> str:\n short_input = move_input.strip().lower()\n short_input = short_input.replace(\"in rage\", \"\")\n\n for old, new in const.REPLACE.items():\n short_input = short_input.replace(old, new)\n\n # cd works, ewgf doesn't, for some reason\n if short_input[:2].lower() == 'cd' and short_input[:3].lower() != 'cds':\n short_input = short_input.lower().replace('cd', 'fnddf')\n if short_input[:2].lower() == 'wr':\n short_input = short_input.lower().replace('wr', 'fff')\n return short_input", "def api_make_move(self, move_input):\n return self.board.attempt_move(move_input)", "def execute_move(self, game_state):\n # Set new location based on which ghost this is\n game_state.ghosts_pos[self.ghost_id] = self.next_move", "def execute_move(self, game_state):\n # Set new location based on which ghost this is\n game_state.ghosts_pos[self.ghost_id] = self.next_move", "def update_to_state(self, game_state):\n pass", "def _update_board(self, move: int) -> None:\n row = self._column_to_row[move] # Find what row to place the disk in\n if self._is_red_active:\n self.board_array[row][move] = 1\n self.hash = self.hash ^ int(self._red_hash_keys[row][move]) # Update hash\n else:\n self.board_array[row][move] = -1\n self.hash = self.hash ^ int(self._yellow_hash_keys[row][move]) # # Update hash\n\n self._column_to_row[move] += 1\n if self._column_to_row[move] == 6:\n self._valid_moves.remove(move)", "def run_match(player_move, comp_move, state):\n #Player/computer is vulnerable if they chose to reload\n player_vuln = False\n comp_vuln = False\n state['player_prev'] = player_move\n state['comp_prev'] = comp_move\n\n #Update game variables\n if player_move == 'd':\n state['player_ammo'] += 1\n player_vuln = True\n if comp_move == 'd':\n state['comp_ammo'] += 1\n comp_vuln = True\n if player_move == 'a':\n if comp_vuln:\n return 'N'\n state['player_ammo'] -= 1\n if comp_move == 'a':\n if player_vuln:\n return 'Y'\n state['comp_ammo'] -= 1\n\n return game_states[get_values(state)]" ]
[ "0.8120185", "0.8120185", "0.8120185", "0.8101825", "0.8077781", "0.7200039", "0.70454454", "0.6661715", "0.6583404", "0.6582128", "0.65785265", "0.65703756", "0.6560121", "0.6289467", "0.6268087", "0.61335135", "0.61134064", "0.60638404", "0.605805", "0.6051816", "0.60436565", "0.6034461", "0.60166997", "0.6004741", "0.59758765", "0.5964111", "0.5961281", "0.5947658", "0.5941391", "0.59357363", "0.59317714", "0.5905455", "0.5897025", "0.5879351", "0.5859501", "0.58204925", "0.581324", "0.5805421", "0.5804164", "0.5803279", "0.57996786", "0.5798522", "0.5790429", "0.57878107", "0.5779558", "0.57773286", "0.5769633", "0.5760063", "0.5751905", "0.5744024", "0.5743781", "0.57430613", "0.57360244", "0.5734451", "0.5733622", "0.57306594", "0.57163316", "0.57151425", "0.5715027", "0.5712829", "0.57100564", "0.57039243", "0.5701308", "0.5678244", "0.56681985", "0.56593823", "0.56590444", "0.5619049", "0.5618691", "0.5610248", "0.5609572", "0.5606038", "0.5603069", "0.5602733", "0.56023943", "0.56018597", "0.5599381", "0.55993253", "0.5589339", "0.5586523", "0.5581515", "0.5574841", "0.55716753", "0.55696976", "0.55677015", "0.5563588", "0.55635047", "0.5557694", "0.55453694", "0.55382967", "0.553101", "0.55224824", "0.55192405", "0.5513172", "0.5506038", "0.5506038", "0.54960865", "0.5490323", "0.54889756" ]
0.81526875
1
Check whether the puzzle satisfies the specified invariant at the given position in the bottom rows of the puzzle (target_row > 1) Returns a boolean
def lower_row_invariant(self, target_row, target_col): assert target_row > 1, 'target_row invalid' result = True if self._grid[target_row][target_col] != 0: result = False for row in range(target_row+1, self._height): for col in range(self._width): solved_value = (col + self._width * row) if solved_value != self._grid[row][col]: result = False for col in range(target_col+1, self._width): solved_value = (col + self._width * target_row) if solved_value != self._grid[target_row][col]: result = False return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def row1_invariant(self, target_col):\r\n # replace with your code\r\n conditions = 0\r\n current = self._grid[1][target_col] == 0\r\n if current:\r\n conditions +=1\r\n else:\r\n # print 'Tile ZERO is not at (0, %s) position' %(target_col)\r\n return False\r\n \r\n below_row = 1 + 1\r\n for _ in range(1, self._height - below_row):\r\n below_row += 1\r\n for ind in range(len(self._grid[below_row])):\r\n if self.current_position(below_row, ind) != (below_row, ind):\r\n # print 'Some tile in the lower row does not in correct place in row1_invariant' \r\n return False\r\n conditions += 1\r\n if conditions == 2:\r\n # print 'All conditions are correct!'\r\n return True", "def row0_invariant(self, target_col):\r\n # asserts that curr_tile is in target_col\r\n if self.get_number(0, target_col) != 0:\r\n return False\r\n # asserts that tile (0,j) is solved, the grid below (0,j) and to the right is solved \r\n for dummy_j in range(0, self.get_width()):\r\n for dummy_i in range(0, self.get_height()):\r\n if dummy_i > 1 or (dummy_i == 0 and dummy_j > target_col) or (dummy_i == 1 and dummy_j >= target_col):\r\n if (dummy_i, dummy_j) != self.current_position(dummy_i, dummy_j):\r\n return False\r\n return True", "def row1_invariant(self, target_col):\r\n # assert that row 1 is solved\r\n if not self.lower_row_invariant(1, target_col):\r\n return False\r\n # asserts that tile proceeded to (1,j), the grid below (1,j) and to the right is solved\r\n for dummy_j in range(0, self.get_width()):\r\n for dummy_i in range(2, self.get_height()):\r\n if not (dummy_i, dummy_j) == self.current_position(dummy_i, dummy_j):\r\n return False\r\n return True", "def row0_invariant(self, target_col):\n result = True\n if self._grid[0][target_col] != 0:\n result = False\n if self._grid[1][target_col] != (target_col + self._width * 1):\n result = False\n for row in range(2, self._height):\n for col in range(self._width):\n solved_value = (col + self._width * row)\n if solved_value != self._grid[row][col]:\n result = False\n for row in (0, 1):\n for col in range(target_col+1, self._width):\n solved_value = (col + self._width * row)\n if solved_value != self._grid[row][col]:\n result = False\n return result", "def row0_invariant(self, target_col):\r\n # replace with your code\r\n conditions = 0\r\n current = self._grid[0][target_col] == 0\r\n if current:\r\n conditions +=1\r\n else:\r\n # print 'Tile ZERO is not at (0, %s) position' %(target_col)\r\n return False\r\n \r\n below_row = 1 + 1\r\n for _ in range(1, self._height - below_row):\r\n below_row += 1\r\n for ind in range(len(self._grid[below_row])):\r\n if self.current_position(below_row, ind) != (below_row, ind):\r\n # print 'Some tile in the lower row does not in correct place in row0_invariant' \r\n return False\r\n conditions += 1\r\n \r\n\r\n for ind in range(len(self._grid[1][target_col:])):\r\n if self.current_position(1, ind+target_col) != (1, ind+target_col):\r\n # print 'Some tile in the lower row does not in correct place in row0_invariant' \r\n return False\r\n conditions += 1\r\n if conditions == 3:\r\n # print 'All conditions are cprrect!'\r\n return True", "def row1_invariant(self, target_col):\n result = True\n if self._grid[1][target_col] != 0:\n result = False\n for row in range(2, self._height):\n for col in range(self._width):\n solved_value = (col + self._width * row)\n if solved_value != self._grid[row][col]:\n result = False\n for row in (0, 1):\n for col in range(target_col+1, self._width):\n solved_value = (col + self._width * row)\n if solved_value != self._grid[row][col]:\n result = False\n return result", "def row0_invariant(self, target_col):\n \n # Returns False if zero tile is NOT in target position (0, target_col).\n if self.get_number(0, target_col) != 0:\n return False\n \n # Returns False if tiles to the right of target_col are NOT positioned correctly.\n if target_col < self.get_width():\n for col in range(target_col + 1, self.get_width()):\n if self.get_number(0, col) != col:\n return False\n \n # Returns False if tiles to the right of target_col in row 1 are NOT positioned correctly.\n for col in range(target_col, self.get_width()):\n if self.get_number(1, col) != col + self.get_width():\n return False\n\n # Returns False if tiles in rows 2 and below are NOT positioned correctly.\n if 1 < self.get_height():\n for row in range(2, self.get_height()):\n for col in range(self.get_width()):\n if self.get_number(row, col) != col + (row * self.get_width()):\n return False\n\n return True", "def row1_invariant(self, target_col):\n \n # Returns False if zero tile is NOT in target position (1, target_col).\n if self.get_number(1, target_col) != 0:\n return False\n \n # Returns False if tiles to the right of target_col are NOT positioned correctly.\n if target_col < self.get_width():\n for col in range(target_col + 1, self.get_width()):\n if self.get_number(1, col) != col + (1 * self.get_width()):\n return False\n\n # Returns False if tiles in rows 2 and below are NOT positioned correctly.\n if 1 < self.get_height():\n for row in range(2, self.get_height()):\n for col in range(self.get_width()):\n if self.get_number(row, col) != col + (row * self.get_width()):\n return False\n\n return True", "def lower_row_invariant(self, target_row, target_col):\r\n conditions = 0\r\n curent = self._grid[target_row][target_col] == 0\r\n if curent:\r\n conditions +=1\r\n else:\r\n print 'Tile ZERO is not at current position'\r\n return False\r\n\r\n last_row_ind = self._height - 1\r\n if target_row != last_row_ind:\r\n lower_row = target_row + 1\r\n for ind in range(len(self._grid[lower_row])):\r\n if self.current_position(lower_row, ind) != (lower_row, ind):\r\n print 'Some tile in the lower row does not in correct place' \r\n return False\r\n conditions += 1\r\n # print len(self._grid[target_row])\r\n # print self._grid[target_row]\r\n # print self._grid[target_row][target_col+1:]\r\n right_part = self._grid[target_row][target_col+1:]\r\n \r\n for tile in range(1,len(right_part)+1):\r\n # print right_part.index(self._grid[target_col+1])\r\n # print tile\r\n # print self.current_position(target_row, target_col + tile)\r\n # print (target_row, target_col+tile)\r\n if self.current_position(target_row, target_col+tile) != (target_row, target_col+tile):\r\n print 'Right part tile does not in correct place'\r\n return False\r\n conditions +=1\r\n if conditions == 3:\r\n print 'All conditions are correct!'\r\n return True", "def row0_invariant(self, target_col):\n # replace with your code\n if self.get_number(0, target_col) != 0:\n return False\n current = 0\n for row in range(2, self.get_height()):\n if target_col == self.get_width() - 1:\n current = self._grid[row][0]\n else:\n current = self._grid[row - 1][-1] + 1\n column = self._grid[row]\n for grid in column:\n if grid != current:\n print 'Error 4'\n return False\n current += 1\n current = self._grid[1][target_col]\n for grid in self._grid[1][target_col:]:\n if grid != current:\n print 'Error 5'\n return False\n current += 1\n return True", "def lower_row_invariant(self, target_row, target_col):\r\n \r\n if self.get_number(target_row, target_col) != 0:\r\n return False\r\n for tile in range(target_col + 1, self._width):\r\n if self.current_position(target_row, tile) != (target_row, tile):\r\n return False\r\n for dummy_j in range(target_row + 1, self._height):\r\n for dummy_i in range(0, self._width):\r\n if self.current_position(dummy_j, dummy_i) != (dummy_j, dummy_i):\r\n return False\r\n return True", "def row0_invariant(self, target_col):\r\n \r\n solved_lower_right = False\r\n solved_grid = [[col + self.get_width() * row\r\n for col in range(self.get_width())]\r\n for row in range(self._height)]\r\n if self._grid[0][target_col] == 0:\r\n solved_lower_right = True\r\n \r\n for row in range(1 + 1, self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n for row in range(0, 1):\r\n for col in range(target_col + 1, self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n if self._grid[1][target_col] != solved_grid[1][target_col]:\r\n solved_lower_right = False\r\n \r\n return solved_lower_right", "def lower_row_invariant(self, target_row, target_col):\n # Tile zero is positioned at (i,j).\n if self.get_number(target_row, target_col) != 0:\n return False\n # All tiles in rows i+1 or below are positioned at their solved location.\n for row in range(target_row + 1, self.get_height()):\n start_col = (target_col if row <= 1 else 0)\n for col in range(start_col, self.get_width()):\n if not self.right_number(row, col):\n return False\n # All tiles in row i to the right of position (i,j) are positioned at\n # their solved location.\n for col in range(target_col + 1, self.get_width()):\n if not self.right_number(target_row, col):\n return False\n return True", "def lower_row_invariant(self, target_row, target_col):\n \n # Returns False if target cell is NOT the zero tile.\n if self.get_number(target_row, target_col) != 0:\n return False\n \n # Returns False if cells to the right of target_col \n # are NOT positioned in their solved locations.\n if target_col < self.get_width():\n for col in range(target_col + 1, self.get_width()):\n if self.get_number(target_row, col) != col + (target_row * self.get_width()):\n return False\n\n # Returns False if cells in rows target_row + 1 and below \n # are NOT positioned at their solved locations.\n if target_row < self.get_height():\n for row in range(target_row + 1, self.get_height()):\n for col in range(self.get_width()):\n if self.get_number(row, col) != col + (row * self.get_width()):\n return False\n\n return True", "def __check_row(self, x: int, y: int) -> bool:\n return not any([self.__maze[x, y + i] for i in (-1, 0, 1)])", "def lower_row_invariant(self, target_row, target_col):\n # replace with your code\n if self.get_number(target_row, target_col) != 0:\n print 'Error 1: Current number is not 0'\n return False\n current = 0\n for row in range(target_row + 1, self.get_height()):\n if target_col == self.get_width() - 1:\n current = self._grid[row][0]\n else:\n current = self._grid[row - 1][-1] + 1\n column = self._grid[row]\n for grid in column:\n if grid != current:\n print 'Error 2'\n return False\n current += 1\n if target_col != self.get_width() - 1:\n current = self._grid[target_row][target_col + 1]\n for grid in self._grid[target_row][target_col + 1:]:\n if grid != current:\n print 'Error 3'\n return False\n current += 1\n return True", "def row1_invariant(self, target_col):\r\n solved_lower_right = False\r\n solved_grid = [[col + self.get_width() * row\r\n for col in range(self.get_width())]\r\n for row in range(self._height)]\r\n if self._grid[1][target_col] == 0:\r\n solved_lower_right = True\r\n \r\n for row in range(1 + 1, self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n for row in range(0, 1):\r\n for col in range(target_col + 1, self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n return solved_lower_right", "def lower_row_invariant(self, target_row, target_col):\r\n # Tile zero is positioned at (i,j).\r\n # All tiles in rows i+1 or below are positioned at their solved location.\r\n # All tiles in row i to the right of position (i,j) are positioned at their solved location.\r\n solved_lower = False\r\n solved_grid = [[col + self.get_width() * row\r\n for col in range(self.get_width())]\r\n for row in range(self._height)]\r\n if self._grid[target_row][target_col] == 0:\r\n solved_lower = True\r\n \r\n for row in range(target_row + 1, self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower = False\r\n \r\n for col in range(target_col + 1, self._width):\r\n if self._grid[target_row][col] != solved_grid[target_row][col]:\r\n solved_lower = False\r\n \r\n return solved_lower", "def row1_invariant(self, target_col):\n # replace with your code\n if self.lower_row_invariant(1, target_col):\n return True\n return False", "def row_constraint(board: Board, row: int, col: int) -> bool:\n for i in range(col):\n if board.is_queen(row=row, col=i):\n return False\n return True", "def check_constraints(board: Board, row: int, col: int) -> bool:\n if not row_constraint(board=board, row=row, col=col):\n return False\n if not upper_diagonal_constraint(board=board, row=row, col=col):\n return False\n if not lower_diagonal_constraint(board, row, col):\n return False\n return True", "def is_valid(row, peg):\n return (\n (row < TRI_SIZE) and\n (row >= 0) and\n (peg < TRI_SIZE) and\n (peg >= 0) and\n (peg <= row)\n )", "def on_board(hexe):\n\n cube = axial_to_cube(hexe)\n\n # check each bound\n for axis in cube:\n if abs(axis) > BOARD_BOUND:\n return False\n return True", "def checkSolution(self):\n movesToEndblock = self.gridSize - self.changeable[0] - 2\n if self.checkMove(0,movesToEndblock) == 0:\n return 0\n return 1", "def checkSafe(Board, rows, column):\n for x in range(rows):\n if (Board[x] == column or\n Board[x] + rows - x == column or\n Board[x] + x - rows == column):\n return False\n return True", "def check(self) -> bool:\n\n\t\treturn all([all(row) for row in self.board])", "def checkvalid(self,borad,row,col,n):\n # check the above column has 'Q'\n i=0\n while i!=row:\n if borad[i][col]=='Q':\n return False\n i+=1\n # check the left-top 135 and right-top 45\n i,j=row-1,col-1\n while i>=0 and j>=0:\n if borad[i][j]=='Q':\n return False\n i-=1\n j-=1\n \n i,j=row-1,col+1\n while i>=0 and j<n:\n if borad[i][j]=='Q':\n return False\n i-=1\n j+=1\n \n return True", "def check_bounds(self, row: int, col: int) -> bool:\n return 0 <= row < self.row and 0 <= col < self.col", "def goal_test(state): \n size = len(state)\n for i in range (size):\n for j in range (size):\n if state[i][j] != i*size + j:\n return False \n return True", "def is_solved(self, grid: list):\n # Iterates over rows\n for i in range(9):\n\n if 0 in grid[i]: # Looks for 0s\n return False\n for j in range(9):\n if not self.validate_cell(grid, i, j): # validates each cell\n return False\n return True", "def is_possible_grid(self,row,col,user_value):\n start_row = row - (row % 3)\n start_col = col - (col % 3)\n for x in range(3):\n for y in range(3):\n if self.arr[x+start_row][y+start_col] == user_value:\n logging.debug(f\"is_posssible_grid(): (False) x: {x} y: {y} s_row: {start_row} s_col: {start_col} arr[x+start_row][y+start_col]: {self.arr[x+start_row][y+start_col]} == {user_value}\")\n return False\n logging.debug(f\"is_posssible_grid(): (True) x: {x} y: {y} s_row: {start_row} s_col: {start_col} arr[x+start_row][y+start_col]: {self.arr[x+start_row][y+start_col]} != {user_value}\")\n return True", "def check(self,a,x,y):\r\n return not self.exitsinrow(self.rows,x,a) and not self.existsincol(self.rows,y,a) and \\\r\n not self.exitsinblock(self.rows, x - x % 3, y - y % 3,a)", "def is_upper_triangular(self):\n self.check_square()\n\n for i in range(self.rows):\n for j in range(i):\n if self[i, j] != 0:\n return False\n return True", "def check(chessboard, row, col, n):\n for i in range(col):\n if chessboard[row][i] == 1:\n return False\n\n for j, i in zip(range(row, -1, -1), range(col, -1, -1)):\n if chessboard[j][i] == 1:\n return False\n \n for j, i in zip(range(row, n, 1), range(col, -1, -1)):\n if chessboard[j][i] == 1:\n return False\n\n return True", "def __can_enter(self, position, traversed):\n row, col = position\n # Check index values\n if row < 0 or col < 0:\n return False\n if row >= self.__row_count or col >= self.__col_count:\n return False\n # Check if already traversed\n if traversed[row][col]:\n return False\n # Check if blocked\n if self.__grid[row][col].blocked:\n return False\n return True", "def check_pivot_row(self, row):\r\n all_zeros = True\r\n for i in range(self.SIZE):\r\n if self.matrix[row][i] != 0:\r\n all_zeros = False\r\n break\r\n\r\n if all_zeros:\r\n self.check_solvability(0, self.matrix[row][-1])", "def valid(game_board, value, row, col):\n if len(value) > 1:\n value = \"X\"\n # Check row of new position\n for i in range(len(game_board[row])):\n if game_board[row][i] == value and i != col:\n return False\n\n # Check column of new position\n for i in range(len(game_board)):\n if game_board[i][col] == value and i != row:\n return False\n\n # Check the 3x3 square area\n start_row = 3 * (row // 3)\n start_col = 3 * (col // 3)\n for i in range(start_row, start_row+3):\n for j in range(start_col, start_col+3):\n if game_board[i][j] == value and i != row and j != col:\n return False\n\n return True", "def check_rows(self):\n\t\tfor i in range(len(self.board)):\n\t\t\tpts = 0\n\t\t\tfor j in range(len(self.board[i])):\n\t\t\t\tif self.board[i][j] == self.marker:\n\t\t\t\t\tpts+=1\n\t\t\tif pts == 3:\n\t\t\t\tprint('YOU WON')\n\t\t\t\treturn True", "def is_solvable(self, row=0, col=0):\n if row == self.sl-1 and col == self.sl: \n return True\n\n # If column is the side length, mvoe indices to next row\n if col == self.sl:\n return self.is_solvable(row+1, 0)\n\n # If square has a value already, move to next column\n if self.puzzle[row][col] != 0: \n return self.is_solvable(row, col + 1)\n\n # If empty square, try each value in that square\n for value in range(1, self.sl+1): \n # If a valid value, recurse with that value and attempt to solve \n if self.valid_square(row, col, value): \n self.puzzle[row][col] = value\n solved = self.is_solvable(row, col + 1) \n self.puzzle[row][col] = 0\n\n # If value solves puzzle, return solved\n if solved:\n return solved\n\n return False", "def isLegal(self):\n # checks for same values in rows\n for n in range(9):\n rows = set()\n for m in range(9):\n if self.puzzle[n][m] != 0:\n size = len(rows)\n rows.add(self.puzzle[n][m])\n if size == len(rows):\n return False\n\n #checks for same values in columns\n for m in range(9):\n cols = set()\n for n in range(9):\n if self.puzzle[n][m] != 0:\n size = len(cols)\n cols.add(self.puzzle[n][m])\n if size == len(cols):\n return False\n\n #checks for same values in sections\n sections = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n for r in sections:\n for c in sections:\n sects = set()\n for n in r:\n for m in c:\n if self.puzzle[n][m] != 0:\n size = len(sects)\n sects.add(self.puzzle[n][m])\n if size == len(sects):\n return False\n return True", "def check(self):\n for row in self.grid:\n for i in range(1, 10):\n if row.count(i) != 1:\n return False\n\n for col in range(9):\n lst = [row[col] for row in self.grid]\n for i in range(1, 10):\n if lst.count(i) != 1:\n return False\n \n for i in range(3):\n for j in range(3):\n lst = [row[j* 3:(j*3) + 3] for row in self.grid[i * 3:(i*3) + 3]] \n flat_list = []\n for k in lst:\n for number in k:\n flat_list.append(number)\n \n for check_number in range(1, 10):\n if flat_list.count(check_number) != 1:\n return False\n return True", "def check_quadline(self, row: int, col: int, drow: int, dcol: int) -> bool:\n count = 1\n token = self.get_token(row, col)\n count_token = 1\n while self.check_bounds(row+drow, col+dcol) and count <= 3:\n if self.grid[row+drow][col+dcol] == token:\n row += drow\n col += dcol\n count_token += 1\n if count_token == 4:\n return True\n count += 1\n return False", "def is_solvable(self) -> bool:\r\n inv_count = 0\r\n arr = self.current_state.flatten()\r\n for i in range(0, 9):\r\n for j in range(i + 1, 9):\r\n if arr[j] and arr[i] and arr[i] > arr[j]:\r\n inv_count += 1\r\n return inv_count % 2 == 0", "def check_neighbours(self):\n for p in self.targetCell.possibilities:\n if p != 0:\n if p not in self.targetCell.row_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.column_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.box_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n return False", "def upper_diagonal_constraint(board: Board, row: int, col: int) -> bool:\n #move to first tile in diagonal\n row -= 1\n col -= 1\n #while still in board, test and move to next position return false if fails\n while row >= 0 and col >= 0:\n if board.is_queen(row=row, col=col):\n return False\n row -= 1\n col -= 1\n return True", "def valid(self):\n # Verify correct vertex values\n self.verify_vertex_values()\n # Check for duplicate values in lines\n for line in range(9):\n seen = []\n for row in range(9):\n if self.grid[line][row] is None:\n pass\n elif self.grid[line][row] in seen:\n return False\n else:\n seen.append(self.grid[line][row])\n # Check for duplicate values in rows\n for row in range(9):\n seen = []\n for line in range(9):\n if self.grid[line][row] is None:\n pass\n elif self.grid[line][row] in seen:\n return False\n else:\n seen.append(self.grid[line][row])\n # Check for duplicate values in subgrids\n for (subgrid_line, subgrid_row) in [(subg_ln, subg_rw) for subg_ln in range(3) for subg_rw in range(3)]:\n seen = []\n for (line, row) in [(ln, rw) for ln in range(3) for rw in range(3)]:\n if self.grid[3*subgrid_line + line][3*subgrid_row + row] is None:\n pass\n elif self.grid[3*subgrid_line + line][3*subgrid_row + row] in seen:\n return False\n else:\n seen.append(self.grid[3*subgrid_line + line][3*subgrid_row + row])\n # No duplicates found\n return True", "def is_solvable(board: list) -> bool:\n inv_count = invserion_count(board)\n return inv_count%2 == 0", "def __is_valid_value(self, target_row, target_col, value):\n if value == 0:\n return True # 0's are always a valid value since they are a placeholder (signify empty position)\n\n # Check row and column:\n for i in range(9):\n if self.final_values[i][target_col] == value and i != target_row: # Check column\n return False # Value appears on the same column twice\n if self.final_values[target_row][i] == value and i != target_col: # Check row\n return False # Value appears on the same row twice\n\n # Find start of 3x3 block:\n block_row = target_row - (target_row % 3)\n block_col = target_col - (target_col % 3)\n\n # Check each element in the 3x3 block:\n for row in range(3):\n for col in range(3):\n if value == self.final_values[block_row + row][block_col + col] and block_row + row != target_row and block_col + col != target_col:\n return False # Value appears in the same block twice\n\n return True # Value does not appear in the same row, col or block", "def valid_coverage_cell(self, row, column):\n\n if (row < self.cov_grid.shape[0] and\n row >= 0) and \\\n (column < self.cov_grid.shape[1] and\n column >= 0):\n return True\n else:\n return False", "def is_valid_number(self):\n for condition in [self.game.getRow(self.pos), self.game.getCol(self.pos), self.game.getSquare(self.pos)]:\n if not self.check_alignement_condition(condition):\n return False\n return True", "def test_cell_existence(board: list, i: int, j: int) -> bool:\n return not (i < 0 or i > len(board)-1 or j < 0 or j > len(board)-1)", "def is_solvable(self):\n for row, col in np.ndindex(9, 9):\n if len(self.possible_values[row][col]) < 1 and self.final_values[row][col] == 0:\n return False\n return True", "def is_in_the_grid(self, row: int, col: int) -> bool:\n return 0 <= row < self.n_row and 0 <= col < self.n_col", "def valid_goal(map, goal):\n row, col = goal\n height_, width_ = map.shape\n\n # if the end point is out of the map, return infinity\n if col < 0 or row < 0 or col > width_ or row > height_:\n return False\n\n # bottom edge\n if row == 0:\n if col == 0:\n return map[0, 0] == 0\n if col == width_:\n return map[0, width_ - 1] == 0\n return map[0, col - 1] == 0 or map[0, col] == 0\n\n # top edge\n if row == height_:\n if col == 0:\n return map[height_ - 1, 0] == 0\n if col == width_:\n return map[height_ - 1, width_ - 1] == 0\n return map[height_ - 1, col - 1] == 0 or map[height_ - 1, col] == 0\n\n # left edge\n if col == 0:\n return map[row - 1, 0] == 0 or map[row, 0] == 0\n\n # right edge\n if col == width_:\n return map[row - 1, width_ - 1] == 0 or map[row, width_ - 1] == 0\n\n # internal\n return map[row - 1, col - 1] == 0 or map[row, col - 1] == 0 \\\n or map[row - 1, col] == 0 or map[row, col] == 0", "def is_valid(columns, row, col):\n # `row` is the current row; check against all previous rows\n for r in range(row):\n c = columns[r]\n # Check column\n if c == col:\n return False\n # Check diagonal\n if abs(c - col) == row - r:\n return False\n return True", "def square_is(position, expected, world):\n\n result = False\n size = len(world) - 1\n\n if size >= position[0] >= 0 and size >= position[1] >= 0:\n result = world[position[0]][position[1]] == expected\n\n return result", "def isSafe(board, row, col, n):\n\n \"\"\" veriying the row on left side \"\"\"\n for i in range(col):\n if board[row][i] == 1:\n return False\n\n \"\"\" veriying upper diagonal on left side \"\"\"\n for i,j in zip(range(row,-1,-1), range(col,-1,-1)):\n if board[i][j] == 1:\n return False\n\n \"\"\" veriying upper diagonal on left side \"\"\"\n for i,j in zip(range(row, n, 1), range(col,-1,-1)):\n if board[i][j] == 1:\n return False\n\n return True", "def is_four_in_row(board, row, column):\n sequence = [board[row][column] for j in range(4)]\n if is_subset(sequence, board[row]):\n return True\n else:\n return False", "def can_attack(self, aq: object) -> bool:\n if self.row == aq.row and self.column == aq.column:\n raise ValueError(\"Same queen\")\n return (self.row == aq.row\n or self.column == aq.column\n or self.row - self.column == aq.row - aq.column\n or self.row + self.column == aq.row + aq.column)", "def is_valid(self,row,col) -> bool:\n if(row >=0 and col>=0 and row<self.row and col<self.col and self.array[row][col]==-1 ):\n return True\n return False", "def check_correctness(sol_list, board, pents):\n # All tiles used\n if len(sol_list) != len(pents):\n return False\n # Construct board\n sol_board = np.zeros(board.shape)\n seen_pents = [0]*len(pents)\n for pent, coord in sol_list:\n pidx = get_pent_idx(pent)\n if seen_pents[pidx] != 0:\n return False\n else:\n seen_pents[pidx] = 1\n if not add_pentomino(sol_board, pent, coord, True, pents): \n return False\n \n # Check same number of squares occupied\n if np.count_nonzero(board) != np.count_nonzero(sol_board):\n return False\n # Check overlap\n if np.count_nonzero(board) != np.count_nonzero(np.multiply(board, sol_board)):\n return False\n \n return True", "def inBoard(self, row, col):\n return 0 <= row < self.rows and 0 <= col < self.cols", "def is_winning(self):\n\n current_board = self.current_board\n\n # check rows\n for row in current_board:\n row = set(row)\n if (\"X\" not in row and \"-\" not in row) or (\"O\" not in row and \"-\" not in row):\n return True\n\n # check columns\n for i in range(len(current_board)):\n column_to_check = set()\n \n for j in range(len(current_board)):\n column_to_check.add(current_board[j][i])\n\n if (\"X\" not in column_to_check and \"-\" not in column_to_check) or (\"O\" not in column_to_check and \"-\" not in column_to_check):\n return True\n \n # check diagonals\n forward_diagonal_check = set()\n backward_diagonal_check = set()\n \n for i in range(len(current_board)):\n forward_diagonal_check.add(current_board[i][i])\n backward_diagonal_check.add(current_board[i][len(current_board)-1-i])\n\n if forward_diagonal_check == {\"X\"} or forward_diagonal_check == {\"O\"}:\n return True\n\n if backward_diagonal_check == {\"X\"} or backward_diagonal_check == {\"O\"}:\n return True", "def _check_if_position_on_board(coord: tuple, board_size: int):\n in_row = coord[0] in range(board_size)\n in_col = coord[1] in range(board_size)\n return in_row and in_col", "def is_posssible_row(self,row,user_value):\n for col in range(9):\n if self.arr[row][col] == user_value:\n logging.debug(f\"is_posssible_row(): (False) row: {row} col: {col} arr{self.arr[row][col]} == {user_value}\")\n return False\n logging.debug(f\"is_posssible_row(): (True) row: {row} col: {col} arr{self.arr[row][col]} != {user_value}\")\n return True", "def _pre_check(self) -> bool:\n if self._fuse_row:\n rows = (\n self._tiling.cells_in_row(self._row_idx),\n self._tiling.cells_in_row(self._row_idx + 1),\n )\n else:\n rows = (\n self._tiling.cells_in_col(self._col_idx),\n self._tiling.cells_in_col(self._col_idx + 1),\n )\n has_a_long_row = any(len(row) > 1 for row in rows)\n if has_a_long_row:\n return False\n first_cell = next(iter(rows[0]))\n second_cell = next(iter(rows[1]))\n cells_are_adjacent = (\n first_cell[0] == second_cell[0] or first_cell[1] == second_cell[1]\n )\n if not cells_are_adjacent:\n return False\n same_basis = (\n self._tiling.cell_basis()[first_cell][0]\n == self._tiling.cell_basis()[second_cell][0]\n )\n if not same_basis:\n return False\n self._first_cell = first_cell\n self._second_cell = second_cell\n return True", "def valid(n, board, row, col):\n for i in range(col):\n if board[row][i] == 1:\n return False\n x = row\n y = col\n while x >= 0 and y >= 0:\n if board[x][y] == 1:\n return False\n x -= 1\n y -= 1\n x = row\n y = col\n while x < n and y >= 0:\n if board[x][y] == 1:\n return False\n x += 1\n y -= 1\n return True", "def goal_test(self, state):\n return state.board==range(self.N*self.N)", "def win_condition(self, player):\n\n row_list = []\n column_list = []\n constant_condition = False\n row_sequential_condition = False\n column_sequential_condition = False\n\n # Loop through positions on board for player\n for position_key, position_obj in sorted(self.board.positions.items()):\n if position_obj.value == player.value:\n row_list.append(position_obj.row)\n column_list.append(position_obj.column)\n\n # Either row keys or column keys must stay constant\n row_set = set(row_list)\n column_set = set(column_list)\n if len(row_set) == 1 or len(column_set) == 1:\n constant_condition = True\n\n # The other row keys or column keys must be sequential for number of row or columns\n row_seq_list = [n for n in range(1, self.board.rows + 1)]\n column_seq_list = [n for n in range(1, self.board.columns + 1)]\n if row_list == row_seq_list:\n row_sequential_condition = True\n if column_list == column_seq_list:\n column_sequential_condition = True\n\n if constant_condition and (row_sequential_condition or column_sequential_condition):\n return True\n elif row_sequential_condition and column_sequential_condition:\n return True\n else:\n return False", "def _is_valid_land(x, y, grid):\n return (x >= 0) and (x < len(grid)) and (y >= 0) and (y < len(grid[0])) and grid[x][y]", "def is_solved(self):\n # Iterate through each square of the puzzle\n for row in range(self.sl):\n for col in range(self.sl):\n val = self.puzzle[row][col]\n\n # If any square value is blank (0), not solved, return False\n if val == 0:\n return False\n\n # Trick to keep DRY code: replace each value temporarily with a\n # 0, and use valid_square method with original value to determine\n # if every square is valid\n self.puzzle[row][col] = 0\n valid = self.valid_square(row, col, val)\n self.puzzle[row][col] = val\n \n # If not a valid value for square, return False\n if not valid:\n return False\n return True", "def deadTest(self, board):\n if board[0] and board[4] and board[8]:\n return True\n if board[2] and board[4] and board[6]:\n return True\n for i in range(3):\n #check every row\n row = i * 3\n if board[row] and board[row+1] and board[row+2]:\n return True\n #check every column\n if board[i] and board[i+3] and board[i+6]:\n return True\n return False", "def deadTest(self, board):\n if board[0] and board[4] and board[8]:\n return True\n if board[2] and board[4] and board[6]:\n return True\n for i in range(3):\n #check every row\n row = i * 3\n if board[row] and board[row+1] and board[row+2]:\n return True\n #check every column\n if board[i] and board[i+3] and board[i+6]:\n return True\n return False", "def solve_one(board: Board, col: int) -> bool:\n #Completed board found\n if col >= board.size:\n return True\n for row in range(board.size):\n #check if position is valid\n if check_constraints(board=board, row=row, col=col):\n #update board and continue BFS\n board.mark_tile(row=row, col=col)\n if solve_one(col=col+1, board=board):\n return True\n board.unmark_tile(row=row, col=col)\n #no valid solutions for current board position\n return False", "def is_goal(self):\r\n\t\tfor row in range(self.n):\r\n\t\t\tfor col in range(self.n):\r\n\t\t\t\tif self.board[row][col] != BoardClass.goal[row][col]:\r\n\t\t\t\t\treturn False\r\n\r\n\t\treturn True", "def check_position_is_legal(grid, num, i, j):\n args = (grid, num, i, j)\n return (not check_row(*args)) and (not check_col(*args)) and (not check_local_square(*args))", "def in_row(n: int, row: int, col: int, grid: List) -> bool:\n for x in range(9):\n if x != col and n == grid[row][x]:\n return True\n return False", "def check_if_solved(self):\n for cell in self.board.values():\n if not cell.value:\n return False\n return True", "def is_bishop_move_valid(self, from_row, from_col, to_row, to_col):\n # if not on same colored diagonal exit.\n if abs(from_row - to_row) != abs(from_col - to_col):\n return False\n\n # check if any pieces are in the way of destination\n dr = 1 if to_row - from_row > 0 else -1\n dc = 1 if to_col - from_col > 0 else -1\n dm = abs(to_row - from_row)\n return self._any_piece_in_way(from_row, from_col, dr, dc, dm)", "def is_in_chessboard(row_or_col):\n\n nonzero, = row_or_col.nonzero()\n\n # compute the approximate number of crossed squares\n squares = 0\n for i, j in zip(nonzero, nonzero[1:]):\n if j - i >= min_square_dim:\n squares += 1\n\n return squares >= 8", "def is_cell_row_valid(board, r, c):\n return all(board[r][i] <= board[r][c] for i in xrange(len(board[r])))", "def is_winning(self, curr_state):\n rows = [[0,1,2], [3,4,5], [6,7,8]]\n columns = [[0,3,6], [1,4,7], [2,5,8]]\n diagonal = [[0,4,8], [2,4,6]]\n total_checks = rows + columns + diagonal\n for row in total_checks:\n sum = 0\n count = 0\n for pos in row:\n if np.isnan(curr_state[pos]):\n break\n else:\n sum = sum + curr_state[pos]\n count = count + 1\n if sum == 15 and count == 3:\n return True\n return False", "def valid_guess(self, row, col):\n # if row nor col is at an edge space, returns False\n if not isinstance(row, int) or not isinstance(col, int):\n return False\n # ensures no corner spaces have been selected\n if row < 1 or row > 8:\n return False\n if col < 1 or col > 8:\n return False\n return True", "def is_complete(sudoku_board):\n BoardArray = sudoku_board.CurrentGameBoard\n size = len(BoardArray)\n subsquare = int(math.sqrt(size))\n\n #check each cell on the board for a 0, or if the value of the cell\n #is present elsewhere within the same row, column, or square\n for row in range(size):\n for col in range(size):\n if BoardArray[row][col]==0:\n return False\n for i in range(size):\n if ((BoardArray[row][i] == BoardArray[row][col]) and i != col):\n return False\n if ((BoardArray[i][col] == BoardArray[row][col]) and i != row):\n return False\n #determine which square the cell is in\n SquareRow = row // subsquare\n SquareCol = col // subsquare\n for i in range(subsquare):\n for j in range(subsquare):\n if((BoardArray[SquareRow*subsquare+i][SquareCol*subsquare+j]\n == BoardArray[row][col])\n and (SquareRow*subsquare + i != row)\n and (SquareCol*subsquare + j != col)):\n return False\n return True", "def check_row(self, num, num_row):\n row = self.return_row(num_row)\n for board_num in row:\n if num == board_num:\n return True\n return False", "def solve(grid):\n find = find_empty(grid)\n if not find:\n return True\n\n row, col = find\n for i in range(1, 10):\n if valid(grid, i, (row, col)):\n grid[row][col] = i\n if solve(grid):\n return True\n grid[row][col] = 0\n return False", "def checkWithinBound(rowWithinBound,colWithinBound):\n if(rowWithinBound == 0 and colWithinBound == 0):\n return True\n else:\n return False", "def row_win(board, player):\n for row in board:\n if check_row(row, player):\n return True\n return False", "def feasible(self):\n return self.lowBound <= self._value <= self.upBound", "def is_valid_board(self):\n total = sum(range(1, self.n+1))\n d = {x : [set([]), set([])] for x in range(1, self.n+1)}\n for row_index in range(self.n):\n for col_index in range(self.n):\n num = self.board[row_index][col_index]\n try:\n if row_index in d[num][0] or col_index in d[num][1]:\n print(\"Invalid solution.\")\n return\n except KeyError:\n print(\"Unsolved solution.\") # d[0]\n return\n\n d[num][0].add(row_index)\n d[num][1].add(col_index)\n print(\"Valid solution!\")", "def square_check(self):\n return len(self.matrix) == len(self.matrix[0])", "def is_legal_move(player, row_from, col_from, row_to, col_to):\r\n illegal_moves = [(0, 0), (2, 0), (0, 4), (2, 4)]\r\n\r\n \"\"\"special moves that are move available according to diagram\r\n List of tuples to and from values that are not possible\"\"\"\r\n moves_not_permitted = [[(0, 2), (1, 1)], [(0, 2), (1, 3)], [(1, 1), (2, 2)], [(1, 3), (2, 2)]]\r\n row_diff = abs(row_from - row_to)\r\n col_diff = abs(col_from - col_to)\r\n\r\n if player == 'hounds':\r\n\r\n if (row_to >= 0 and row_to < 3 and col_to >= 0 and col_to < 5):\r\n \"\"\"Check if the move is not out of bounds for the board with max col range 4 and row range 3\r\n and then check if it is a legal move\"\"\"\r\n\r\n if board[row_to][col_to] == 0 and (row_to, col_to) not in illegal_moves and row_diff <= 1 and col_diff <= 1:\r\n \"\"\" Check if the position is blank.\r\n Then check if the move is not one of the blank places\r\n Then check if the row difference and column difference isn't more than 1\r\n \"\"\"\r\n if (col_to - col_from) < 0: # no moves to the left of the board\r\n return False\r\n\r\n for item in moves_not_permitted:\r\n if len(set([(row_from, col_from), (row_to, col_to)]).intersection(set(item))) == 2:\r\n \"\"\" If to and from co-ordinates are present in the moves_not_permitted list then return False\"\"\"\r\n return False\r\n else:\r\n pass\r\n return True\r\n else:\r\n return False\r\n\r\n else:\r\n \"\"\"When player is a hare\"\"\"\r\n\r\n if (row_to >= 0 and row_to < 3 and col_to >= 0 and col_to < 5):\r\n \"\"\"Check if the move is not out of bounds for the board with max col range 4 and row range 3\r\n and then check if it is a legal move\"\"\"\r\n\r\n if board[row_to][col_to] == 0 and (row_to, col_to) not in illegal_moves and row_diff <= 1 and col_diff <= 1:\r\n \"\"\" Check if the position is blank.\r\n Then check if the move is not one of the blank places\r\n Then check if the row difference and column difference isn't more than 1\"\"\"\r\n\r\n for item in moves_not_permitted:\r\n if len(set([(row_from, col_from), (row_to, col_to)]).intersection(set(item))) == 2:\r\n \"\"\" If to and from co-ordinates are present in the moves_not_permitted list then return False\"\"\"\r\n return False\r\n else:\r\n pass\r\n return True\r\n\r\n else:\r\n return False", "def isComplete(grid):\n for row in range(0,9):\n for col in range(0,9):\n if grid[row][col]==0:\n return False\n return True", "def is_at_target_position(self, position, tolerance=0.0):\n x, _ = position\n return x > self.corridor_length - tolerance", "def is_queen_move_valid(self, from_row, from_col, to_row, to_col):\n # if not on same colored diagonal\n if abs(from_row - to_row) != abs(from_col - to_col):\n # if on same col? (like rook)\n if from_row != to_row and (from_col == to_col):\n dc = 0\n dr = 1 if to_row - from_row > 0 else -1\n # elif on same row?\n elif from_col != to_col and (from_row == to_row):\n dr = 0\n dc = 1 if to_col - from_col > 0 else -1\n else:\n # if not on same col or row\n return False\n else:\n # on same colored diagonal (moves like bishop)\n dr = 1 if to_row - from_row > 0 else -1\n dc = 1 if to_col - from_col > 0 else -1\n\n # check if any pieces are in the way of destination\n dm = abs(to_row - from_row)\n return self._any_piece_in_way(from_row, from_col, dr, dc, dm, toRow=to_row, toCol=to_col)", "def check_for_win(self, row, col, player): \n\n count = 0\n for i in range(0, len(self.board[0])):\n # Check vertical\n if self.board[row][i] == player:\n count += 1\n else:\n count = 0\n \n if count == self.max_count:\n return True\n\n count = 0\n for i in range(0, len(self.board)):\n # Check horisontal\n if self.board[:, col][i] == player:\n count += 1\n else:\n count = 0\n \n if count == self.max_count:\n return True\n \n count = 0\n totoffset = col - row\n for i in np.diagonal(self.board, offset=totoffset):\n # Check diagonal\n if i == player:\n count += 1\n else:\n count = 0\n\n if count == self.max_count:\n return True\n\n count = 0\n mirrorboard = np.fliplr(self.board)\n col = self.colswitch[col]\n totoffset = col - row\n for i in np.diagonal(mirrorboard, offset=totoffset):\n # Check other diagonal\n if i == player:\n count += 1\n else:\n count = 0\n\n if count == self.max_count:\n return True", "def _inblock(row, column, init, end):\n return all([row[column][0] >= init[0],\n row[column][1] >= init[1],\n row[column][0] <= end[0],\n row[column][1] <= end[1]])", "def is_valid(array, index):\n row, column = index\n return 0 <= row < len(array) and 0 <= column < len(array[row])", "def contains_row(matrix, row):\n return (matrix == row).all(axis=1).any()", "def valid_point(self, row, col):\n return self.topdown_view[row][col] == 1.0" ]
[ "0.71912694", "0.7152272", "0.70571184", "0.70388037", "0.70372164", "0.6987461", "0.6711312", "0.67097217", "0.6679228", "0.6654584", "0.66055614", "0.6557503", "0.6554002", "0.65482295", "0.64755046", "0.6440227", "0.6397061", "0.6329067", "0.6262952", "0.62180895", "0.60973376", "0.5991759", "0.59884995", "0.59483486", "0.59350926", "0.59034485", "0.58998644", "0.5893326", "0.5884642", "0.5861342", "0.5839313", "0.5832668", "0.5810527", "0.58020765", "0.5800204", "0.5781294", "0.57588506", "0.5752683", "0.5739059", "0.5738724", "0.57384276", "0.572673", "0.5705036", "0.57013637", "0.5693901", "0.5681043", "0.56704265", "0.5668898", "0.56675965", "0.5665535", "0.56593466", "0.5659289", "0.5657299", "0.56565887", "0.5651469", "0.5632057", "0.56273806", "0.5627153", "0.56190616", "0.56187874", "0.56113505", "0.558728", "0.5580809", "0.55789673", "0.5575264", "0.5560581", "0.55526197", "0.55522764", "0.5548167", "0.55468494", "0.5546145", "0.55368805", "0.55368805", "0.55363643", "0.5530476", "0.5527971", "0.552789", "0.5527222", "0.55188495", "0.5517624", "0.5516268", "0.55142725", "0.5510354", "0.55080986", "0.55063397", "0.5499501", "0.5497838", "0.5494929", "0.549425", "0.5477817", "0.54764235", "0.5475334", "0.54746616", "0.54735756", "0.54622006", "0.54574335", "0.5455222", "0.5443213", "0.5436616", "0.543226" ]
0.6862375
6
helper function for solve_interior_tile and solve_col0_tile
def position_tile(self, target_row, target_col, cur_row, cur_col, need_ld=True): move_str = '' if cur_row == target_row: if cur_col < target_col: move_str += 'l' * (target_col - cur_col) if target_col - cur_col > 1: move_str += 'ur' move_str += 'druldru' * (target_col - cur_col - 1) else: move_str += 'ur' if not need_ld else '' need_ld = False else: move_str += 'r' * (cur_col - target_col) if cur_col - target_col > 1: move_str += 'ul' move_str += 'dlurdlu' * (cur_col - target_col - 1) else: need_ld = False else: move_str += 'u' * (target_row - cur_row) if cur_col < target_col: move_str += ('l' * (target_col - cur_col) + 'dru') move_str += 'druldru' * (target_col - cur_col - 1) move_str += 'lddru' * (target_row - cur_row - 1) elif cur_col > target_col: move_str += ('r' * (cur_col - target_col) + 'dlu') move_str += 'dlurdlu' * (cur_col - target_col - 1) move_str += 'lddru' * (target_row - cur_row - 1) else: move_str += 'lddru' * (target_row - cur_row - 1) if need_ld: move_str += 'ld' return move_str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_interior_tile(self, target_row, target_col):\r\n whole_move = ''\r\n # replace with your code\r\n if self._grid[target_row][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA\"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[target_row][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (target_row - zero_row) * 'd'\r\n #path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd'\r\n zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n assert self.lower_row_invariant(target_row, target_col), \"Invarian is False\"\r\n \r\n #current_position = self.current_position(target_row, target_col)\r\n #current_row, current_col = self.current_position(target_row, target_col)\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, target_col)\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (target_row - self.current_position(target_row, target_col)[0]) * 'u'\r\n zero_row = target_row - len(path_up)\r\n if target_col < self.current_position(target_row, target_col)[1]: # Right move\r\n path_side = (self.current_position(target_row, target_col)[1] - target_col) * 'r'\r\n zero_col = target_col + len(path_side)\r\n else: # Left move\r\n path_side = (target_col - self.current_position(target_row, target_col)[1]) * 'l'\r\n zero_col = target_col - len(path_side)\r\n \r\n #path_for_zero = path_up + path_side\r\n # print '------------------------------------------'\r\n # print 'Move to ZERO =', path_for_zero\r\n \r\n self.update_puzzle(path_up + path_side)\r\n \r\n # print 'Grid after move:'\r\n # print self\r\n # current_position = self.current_position(target_row, target_col) \r\n # current_row, current_col = current_position\r\n # print 'Updated Target tile position=',current_position\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, target_col)\r\n # print '-----------------------------------------'\r\n \r\n\r\n ###### New moves after moving ZERO tile into target tile\r\n # counter = 0\r\n whole_move += path_up + path_side\r\n while self.current_position(target_row, target_col) != \\\r\n (target_row, target_col) or zero_col != target_col - 1:\r\n # tt_in_home = self.current_position(target_row, target_col) == \\\r\n # (target_row, target_col)\r\n\r\n cyclic_moves = ''\r\n # counter += 1\r\n #current_position = self.current_position(target_row, target_col) \r\n #current_col = self.current_position(target_row, target_col)[1]\r\n # print 'Zero coloumn', zero_col, '== Target coloumn', target_col\r\n # print zero_col == target_col \r\n \r\n #### Case 1 if ZERO located in ther right of\r\n #### target tile (after it)\r\n if zero_col > self.current_position(target_row, target_col)[1]:\r\n # print ' Look in the up puzzle, zero on the right side'\r\n # if self.current_position(target_row, target_col)[1] != target_col:\r\n # # print 'not under target place'\r\n # cyclic_moves = 'dllur'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n if self.current_position(target_row, target_col)[1] == target_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # elif self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # # print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n # cyclic_moves = 'ul'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #### Case 2 if ZERO located under the target tile, and both\r\n #### of them located under the target position of the target tile\r\n elif zero_col == self.current_position(target_row, target_col)[1] and zero_col == target_col:\r\n # print 'Both under the target place'\r\n # print 'TT in home=', tt_in_home\r\n if self.current_position(target_row, target_col) == \\\r\n (target_row, target_col):\r\n cyclic_moves = 'ld'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n else:\r\n cyclic_moves = 'lddru'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n elif zero_col < self.current_position(target_row, target_col)[1]:\r\n # print 'ZERO tile located in the left side'\r\n if self.current_position(target_row, target_col)[1] != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self.current_position(target_row, target_col)[1] == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n #zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loot counter =',counter\r\n whole_move += cyclic_moves\r\n # if counter > 12:\r\n # break\r\n # print 'Tile is solved with move '+ whole_move\r\n assert self.lower_row_invariant(target_row, target_col-1), \"Invarian is False\"\r\n return whole_move", "def solve_interior_tile(self, target_row, target_col):\n # replace with your code\n string = ''\n target = self.current_position(target_row, target_col)\n row_difference = target_row - target[0]\n #print 'Row difference', row_difference\n col_difference = target_col - target[1]\n #print 'Col difference', col_difference\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0 and col_difference > 1:\n string += 'urrdl' * (col_difference - 1)\n if row_difference == 1:\n string += 'urrdl' * (col_difference - 1)\n string += 'dru'\n if row_difference > 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n if row_difference == 1:\n string += 'ulldr' * (col_difference - 1)\n string += 'ullddru'\n if row_difference > 1:\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n string += 'lddru' * (row_difference - 1)\n if row_difference > 0:\n string += 'ld'\n print 'Interior Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row, target_col - 1), 'False string'\n return string", "def solve_interior_tile(self, target_row, target_col):\r\n assert self._grid[target_row][target_col] == 0\r\n moves_str = \"\"\r\n target_current_row, target_current_col = self.current_position(target_row, target_col)\r\n \r\n moves_str += self.position_tile(target_row, target_col, target_current_row, target_current_col) \r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_interior_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_interior_tile(self, target_row, target_col):\n movements = self.move_tile(target_row, target_col,\n target_row * self.get_width() + target_col)\n self.update_puzzle(movements)\n return movements", "def get_tile(self, row, col):\n # replace with your code\n return 0", "def find_tile(loc, dir):\n #returns the integer tile number\n \n # should be looking in the directory with supergrid data (probably \"fix\" directory)\n filename_pattern = '*grid.tile*.nc'\n \n #find all supergrid files in the directory\n grid_fnames = []\n for f_name in os.listdir(dir):\n if fnmatch.fnmatch(f_name, filename_pattern):\n grid_fnames.append(f_name)\n if not grid_fnames:\n message = 'No filenames matching the pattern {0} found in {1}'.format(filename_pattern,dir)\n logging.critical(message)\n raise Exception(message)\n \n #non-polar tiles can use traditional 2D point-in-polygon methods; if a point is not in a non-polar tile,\n #it is in one of the polar tiles, and the tile can be distinguished by the sign of latitude of the point\n polar_tile_filenames = []\n found_tile = False\n for f_name in grid_fnames:\n if not found_tile:\n nc_file = Dataset('{0}/{1}'.format(dir,f_name))\n longitude = np.array(nc_file['x']).swapaxes(0,1)\n latitude = np.array(nc_file['y']).swapaxes(0,1)\n nc_file.close()\n \n adj_long = False \n #look for reversal of longitude; if found, adjust longitude so that 0-360 transition doesn't exist\n for row in longitude:\n if not (np.all(np.diff(row) >= 0) or np.all(np.diff(row) <= 0)):\n adj_long = True\n if adj_long:\n longitude[longitude < 180] += 360\n \n #get lon/lat pairs for all edges of the tiles\n \n edge_1_lon = longitude[0,:]\n edge_1_lat = latitude[0,:]\n edge_1 = list(zip(edge_1_lon, edge_1_lat))\n \n edge_2_lon = longitude[:,-1]\n edge_2_lat = latitude[:,-1]\n edge_2 = list(zip(edge_2_lon, edge_2_lat))\n \n edge_3_lon = longitude[-1,:]\n edge_3_lat = latitude[-1,:]\n edge_3 = list(zip(edge_3_lon, edge_3_lat))\n edge_3.reverse() #need to reverse the direction of this edge to form a regular polygon\n \n edge_4_lon = longitude[:,0]\n edge_4_lat = latitude[:,0]\n edge_4 = list(zip(edge_4_lon, edge_4_lat))\n edge_4.reverse() #need to reverse the direction of this edge to form a regular polygon\n \n polygon_points = edge_1 + edge_2 + edge_3 + edge_4\n \n tile_polygon = Polygon(polygon_points)\n tile_polygon = tile_polygon.simplify(0)\n \n if tile_polygon.is_valid: #this will be True unless the tile is a polar tile, which will not form a regular polygon in Cartesian space using lon/lat data\n temp_loc = copy.deepcopy(loc)\n if adj_long:\n if loc[0] < 180:\n temp_loc[0] += 360\n loc_point = Point(temp_loc)\n if tile_polygon.contains(loc_point):\n found_tile = True\n return f_name.split('tile')[1].split('.nc')[0] \n else:\n polar_tile_filenames.append(f_name)\n \n #if the tile hasn't been found by this point, it must be contained within a polar tile\n for f_name in polar_tile_filenames:\n nc_file = Dataset('{0}/{1}'.format(dir,f_name))\n latitude = np.array(nc_file['y']).swapaxes(0,1)\n nc_file.close()\n \n #if the sign of the mean latitude of the tile is the same as that of the point, the tile has been found\n if np.sign(np.mean(latitude)) == np.sign(loc[1]):\n found_tile = True\n return f_name.split('tile')[1].split('.nc')[0] \n return -1", "def solve_interior_tile(self, target_row, target_col):\n cur_row, cur_col = self.current_position(target_row, target_col)\n move_str = self.position_tile(target_row, target_col, cur_row, cur_col)\n self.update_puzzle(move_str)\n return move_str", "def solve_interior_tile(self, target_row, target_col):\r\n assert self.lower_row_invariant(target_row, target_col)\r\n row, col = self.current_position(target_row, target_col)\r\n # use move-helper function to get to target tile\r\n move_to_target = self.move_to_target(target_row, target_col, row, col)\r\n \r\n # update the grid\r\n self.update_puzzle(move_to_target)\r\n assert self.lower_row_invariant(target_row, target_col - 1)\r\n return move_to_target", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def solve_row1_tile(self, target_col):\r\n # replace with your code\r\n whole_move = ''\r\n if self._grid[1][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA solve_row1_tile \"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[1][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (1 - zero_row) * 'd'\r\n # path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (1 - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (1 - zero_row) * 'd'\r\n #zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n #zero_row -= len(filter(lambda x: x=='u', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n\r\n assert self.row1_invariant(target_col), 'Some trouble in row1_invariant' \r\n \r\n #current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = 1, target_col\r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = (zero_row - self.current_position(1, target_col)[0]) * 'u' + (zero_col - self.current_position(1, target_col)[1]) * 'l'\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(1, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (1, target_col)\r\n counter = 0\r\n while self.current_position(1, target_col) != \\\r\n (1, target_col) or (zero_row, zero_col) != (0, target_col):\r\n # print 'Welcome to while loop!'\r\n cyclic_moves = ''\r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n #current_position = self.current_position(1, target_col)\r\n current_col = self.current_position(1, target_col)[1]\r\n counter +=1\r\n if self.current_position(1, target_col) == \\\r\n (1, target_col):\r\n # print 'ZERO not under TT'\r\n cyclic_moves = 'ur'\r\n whole_move += cyclic_moves\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif zero_col < current_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'ZERO tile located in the left side and down move is POSIBLE'\r\n if current_col != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif current_col != target_col and self._grid[zero_row+1][zero_col] > \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves)) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n # elif zero_col < current_col and self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n # # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n # if current_col != target_col:\r\n # # print 'not under the target place'\r\n # cyclic_moves = 'urrdl'\r\n # whole_move += cyclic_moves\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # elif current_col == target_col:\r\n # # print 'Target tile under target place'\r\n # cyclic_moves = 'urd'\r\n # whole_move += cyclic_moves\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #cyclic_moves +='ur'\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n return whole_move", "def _get_tile_info(img_shape, tile_shape, ambiguous_size=128):\n # * get normal tiling set\n tile_grid_top_left, _ = _get_patch_top_left_info(img_shape, tile_shape, tile_shape)\n tile_grid_bot_right = []\n for idx in list(range(tile_grid_top_left.shape[0])):\n tile_tl = tile_grid_top_left[idx][:2]\n tile_br = tile_tl + tile_shape\n axis_sel = tile_br > img_shape\n tile_br[axis_sel] = img_shape[axis_sel]\n tile_grid_bot_right.append(tile_br)\n tile_grid_bot_right = np.array(tile_grid_bot_right)\n tile_grid = np.stack([tile_grid_top_left, tile_grid_bot_right], axis=1)\n tile_grid_x = np.unique(tile_grid_top_left[:, 1])\n tile_grid_y = np.unique(tile_grid_top_left[:, 0])\n # * get tiling set to fix vertical and horizontal boundary between tiles\n # for sanity, expand at boundary `ambiguous_size` to both side vertical and horizontal\n stack_coord = lambda x: np.stack([x[0].flatten(), x[1].flatten()], axis=-1)\n tile_boundary_x_top_left = np.meshgrid(\n tile_grid_y, tile_grid_x[1:] - ambiguous_size\n )\n tile_boundary_x_bot_right = np.meshgrid(\n tile_grid_y + tile_shape[0], tile_grid_x[1:] + ambiguous_size\n )\n tile_boundary_x_top_left = stack_coord(tile_boundary_x_top_left)\n tile_boundary_x_bot_right = stack_coord(tile_boundary_x_bot_right)\n tile_boundary_x = np.stack(\n [tile_boundary_x_top_left, tile_boundary_x_bot_right], axis=1\n )\n #\n tile_boundary_y_top_left = np.meshgrid(\n tile_grid_y[1:] - ambiguous_size, tile_grid_x\n )\n tile_boundary_y_bot_right = np.meshgrid(\n tile_grid_y[1:] + ambiguous_size, tile_grid_x + tile_shape[1]\n )\n tile_boundary_y_top_left = stack_coord(tile_boundary_y_top_left)\n tile_boundary_y_bot_right = stack_coord(tile_boundary_y_bot_right)\n tile_boundary_y = np.stack(\n [tile_boundary_y_top_left, tile_boundary_y_bot_right], axis=1\n )\n tile_boundary = np.concatenate([tile_boundary_x, tile_boundary_y], axis=0)\n # * get tiling set to fix the intersection of 4 tiles\n tile_cross_top_left = np.meshgrid(\n tile_grid_y[1:] - 2 * ambiguous_size, tile_grid_x[1:] - 2 * ambiguous_size\n )\n tile_cross_bot_right = np.meshgrid(\n tile_grid_y[1:] + 2 * ambiguous_size, tile_grid_x[1:] + 2 * ambiguous_size\n )\n tile_cross_top_left = stack_coord(tile_cross_top_left)\n tile_cross_bot_right = stack_coord(tile_cross_bot_right)\n tile_cross = np.stack([tile_cross_top_left, tile_cross_bot_right], axis=1)\n return tile_grid, tile_boundary, tile_cross", "def solve_interior_tile(self, target_row, target_col):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, target_col), \"tiles to right and below incorrectly ordered\"\n \n correct_tile = self.current_position(target_row, target_col) \n move_str = self.position_tile(target_row, target_col, correct_tile) \n self.update_puzzle(move_str)\n \n assert self.lower_row_invariant(target_row, target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n return move_str", "def solve_puzzle(self):\n # move zero tile to the lower right corner\n row, col = self.current_position(0, 0)\n movements = \"d\" * (self.get_height() - 1 - row) + \"r\" * (\n self.get_width() - 1 - col)\n self.update_puzzle(movements)\n # solve rowid from 2 by row\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col == 0:\n movements += self.solve_col0_tile(row)\n assert self.lower_row_invariant(row - 1,\n self.get_width() - 1)\n else:\n movements += self.solve_interior_tile(row, col)\n assert self.lower_row_invariant(row, col - 1)\n # solve the uppermost two rows by column\n for col in range(self.get_width() - 1, 1, -1):\n for row in range(1, -1, -1):\n if row == 0:\n assert self.row0_invariant(col)\n movements += self.solve_row0_tile(col)\n assert self.row1_invariant(col - 1)\n else:\n assert self.row1_invariant(col)\n movements += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n movements += self.solve_2x2()\n return movements", "def part2():\n grid[(0, 0)] = 1\n coordinates_value = 0\n layer = 1\n x = 0; y = 0\n done = False\n while not done:\n # print(\"Layer: \", layer)\n # go right one step\n layer += 1; x += 1\n grid[(x,y)] = check_neighbours((x,y))\n\n # go up to the boundary of layer\n for y_up in range(y+1, layer):\n coord = (x, y_up)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n y = y_up\n\n # go left till the boundary of layer\n for x_left in range(x-1, -layer, -1):\n coord = (x_left, y)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n x = x_left\n\n # go down till the boundary of layer\n for y_down in range(y-1, -layer, -1):\n coord = (x, y_down)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n y = y_down\n\n # go right till the boundary of layer\n for x_right in range(x+1, layer):\n coord = (x_right, y)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n x = x_right", "def adjacent_tiles(self,tile,pattern):\n\n # Initialize the list of tiles to return\n adj_tiles = []\n\n # Find the row and column of the input tile\n for i in self.tilelist:\n for j in i:\n if j == tile:\n row = self.tilelist.index(i)\n column = self.tilelist[row].index(j)\n\n # Define functions for the 2 distinct patterns\n def plus_sign(self,row,column):\n nonlocal adj_tiles\n if row - 1 >= 0:\n adj_tiles += [self.tilelist[row - 1][column]]\n if row + 1 != len(self.tilelist):\n adj_tiles += [self.tilelist[row + 1][column]]\n if column - 1 >= 0 :\n adj_tiles += [self.tilelist[row][column - 1]]\n if column + 1 != len(self.tilelist[row]):\n adj_tiles += [self.tilelist[row][column + 1]]\n\n def diagonal(self,row,column):\n nonlocal adj_tiles\n if column - 1 >= 0:\n if row - 1 >= 0:\n adj_tiles += [self.tilelist[row - 1][column - 1]]\n if row + 1 != len(self.tilelist):\n adj_tiles += [self.tilelist[row + 1][column - 1]]\n if column + 1 != len(self.tilelist[row]):\n if row - 1 >= 0:\n adj_tiles += [self.tilelist[row - 1][column + 1]]\n if row + 1 != len(self.tilelist):\n adj_tiles += [self.tilelist[row + 1][column + 1]]\n\n # Return the tiles that form a plus sign with the given input tile\n if pattern == 'p':\n plus_sign(self,row,column)\n\n # Return the tiles touching the four corners of the input tile\n elif pattern == 'x':\n diagonal(self,row,column)\n\n # Return all of the tiles surrounding the input tile\n elif pattern == 'b':\n plus_sign(self,row,column)\n diagonal(self,row,column)\n\n return adj_tiles", "def tettile(board: Board, tiles: List[Tile]) -> Union[List[Tuple[Tuple[int, int], Tile]], bool]:\n solution = []\n tiles_used = set()\n for i, tile in enumerate(tiles):\n if tile.type in tiles_used: # Prevent us from trying the same failed piece over and over\n continue\n tiles_used |= set(tile.type) # Add the current tile to the set of used tiles\n for j in range(tile.num_orientations):\n # Find the most northwestern possible tile position (or False if there isn't one)\n position = board.tile_can_be_placed(tile)\n if type(position) == np.ndarray:\n # If there's a position, place the tile\n board.place_tile(tile, position)\n # It might be the case that the placed tile partitioned the board such that\n # there is at least one partition that doesn't have a multiple of 4 cells\n if not board.is_valid():\n # If that's the case, short circuit the search of this branch\n board.remove_tile(tile, position)\n tile.rotate()\n continue\n # Otherwise, append the tile to the list of possible solutions\n solution.append((position, tile))\n if board.is_solved():\n # If the board is now solved, return the solution so it can bubble up\n return solution\n # If the board is not solved, call the function recursively, slicing out the current\n # tile from the list of tiles passed into the recursive call\n result = tettile(board, tiles[:i] + tiles[i + 1:])\n if board.is_solved():\n # If the recursion found a solution, append it to the solution that contains\n # the current tile and return that to bubble it up\n return solution + result\n else:\n # If the recursion did not find a solution, remove the current tile from the list\n # of solutions and from the board.\n board.remove_tile(tile, position)\n solution.pop()\n # Make sure to try all orientations of a piece at the given location\n tile.rotate()\n # Return the empty list if no solution was found\n return solution", "def calculate_min_max_tiles(self):", "def solve_row0_tile(self, target_col):\r\n # replace with your code\r\n assert self.row0_invariant(target_col), 'Some trouble in row0_invariant' \r\n whole_move = ''\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n # print self.get_number(current_row, current_col)\r\n zero_row, zero_col = 0, target_col\r\n # print 'Target tile position=',self.current_position(0, target_col)\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (0, target_col)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ld'\r\n \r\n recomended_move = 'ld'\r\n whole_move += recomended_move\r\n zero_col -= len(filter(lambda x: x=='l', recomended_move))\r\n zero_row += len(filter(lambda x: x=='d', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n #####Case when we check if recomended move solves the tile\r\n if self.current_position(0, target_col) == (0, target_col):\r\n # print 'Congrads recomended move made great deal !!'\r\n return whole_move\r\n #####If not, we position TT to (1, target_col-1),\r\n ##### and ZEOR to (1, target_col-2)\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (zero_row - current_row) * 'u'\r\n path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = path_up + path_side\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n \r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n counter = 0\r\n # print self.current_position(0, target_col) != (1, target_col-1)\r\n # print (zero_row,zero_col) != (1,target_col-2)\r\n ####POitioning TT and zero into positions that can be solvable\r\n while self.current_position(0, target_col) != (1, target_col-1) or \\\r\n (zero_row,zero_col) != (1,target_col-2):\r\n counter +=1\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n cyclic_moves = ''\r\n # print 'Aloha in the loop'\r\n if zero_col < current_col:\r\n # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n \r\n\r\n if current_col != target_col-1 and zero_row == 0:\r\n # print 'In the upper row and we can use down cycling'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n\r\n elif current_col != target_col-1:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col-1:\r\n # print 'Target tile under target place'\r\n # print 'DEBUG!!!!'\r\n # print self\r\n # print zero_col, target_col\r\n if zero_col == 0 and current_col == 1:\r\n cyclic_moves = 'druld'\r\n elif zero_row == 0:\r\n cyclic_moves = 'druld'\r\n \r\n else:\r\n cyclic_moves = 'urd'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n elif zero_row > current_row:\r\n # print 'DEBUG'\r\n # print 'TT under zero tile'\r\n cyclic_moves = 'uld'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n \r\n if counter > 10:\r\n # print 'COUNTER break'\r\n break\r\n\r\n #####Solving using pattern 2 x 3 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 2x3 puzzle formed recently'\r\n move2x3 = \"urdlurrdluldrruld\"\r\n whole_move += move2x3\r\n zero_col -= len(filter(lambda x: x=='l', move2x3))\r\n zero_col += len(filter(lambda x: x=='r', move2x3))\r\n zero_row += len(filter(lambda x: x=='d', move2x3))\r\n zero_row -= len(filter(lambda x: x=='u', move2x3))\r\n self.update_puzzle(move2x3)\r\n # print self\r\n assert self.row1_invariant(target_col-1), 'Some trouble in row1_invariant' \r\n return whole_move", "def solve_puzzle(self):\n # replace with your code\n string = ''\n width = self._width\n height = self._height\n zero = self.current_position(0, 0)\n row_to_zero = height - 1 - zero[0]\n col_to_zero = width - 1 - zero[1]\n string += 'r' * col_to_zero\n string += 'd' * row_to_zero\n self.update_puzzle(string)\n if width == 2 and height == 2:\n string += self.solve_2x2()\n elif width > 2 and height == 2:\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n elif width == 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n string += self.solve_2x2()\n elif width > 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n #for row in range(height - 1, -1, -1):\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n return string", "def find_i_j(x, y, z, nchannels=3, maxdimsize=4096):\n\tz = ceil(z/nchannels) # take account having different layers of tiles\n\n\tif x*y*z > maxdimsize**2:\n\t\traise ValueError(\"Tiled array range not big enough\")\n\n\tmax_n = int(ceil(log(maxdimsize, 2))) # n value required if max images in i direction\n\tmax_m = int(ceil(log(maxdimsize, 2))) # m value required if max images in j direction\n\n\tsolutions = [] # hold n, m and the number of wasted pixels for a solution\n\tsol_num = 0\n\n\tfor n in range(1, max_n + 1):\n\t\tfor m in range(1, max_m + 1):\n\t\t\tif(trunc(2**n / x) * trunc(2**m / y) >= z):\n\t\t\t\t# determines if a tile of with dimesions of n and m can contain\n\t\t\t\t# all the images\n\n\t\t\t\tsol_info = []\n\t\t\t\tsol_info.append(n)\n\t\t\t\tsol_info.append(m)\n\t\t\t\tsol_info.append(waste_det(x, y, z, n, m))\n\n\t\t\t\tsolutions.append(sol_info)\n\n\t\t\t\tsol_num += 1\n\n\tsol_total = sol_num # records total number of solutions\n\n\topt = find_waste_min(sol_total, solutions) # gets properties of optimal solution\n\n\ti = 2**opt[1]\n\tj = 2**opt[2]\n\n\ttile_dim = [i, j]\n\n\treturn tile_dim", "def solve_col0_tile(self, target_row):\r\n # print '----------------------------------'\r\n # print 'SOLVING ZERO COLOUMN'\r\n assert self.lower_row_invariant(target_row,0), \"Invarian is False\"\r\n whole_move = ''\r\n #current_position = self.current_position(target_row, 0)\r\n current_row, current_col = self.current_position(target_row, 0)\r\n zero_row, zero_col = target_row, 0\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, 0)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ur'\r\n \r\n recomended_move = 'ur'\r\n whole_move += recomended_move\r\n zero_col += len(filter(lambda x: x=='r', recomended_move))\r\n zero_row -= len(filter(lambda x: x=='u', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n if self.current_position(target_row, 0) == \\\r\n (target_row, 0):\r\n # print 'TT stays in correct place after recomended move'\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n self.update_puzzle(zero_to_place_move)\r\n whole_move += zero_to_place_move\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move\r\n #move ZERO tile to the right\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n ### reposition TT to (target_row -1, 1)\r\n ### reposition ZERO tile to (target_row-1,0)\r\n \r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (current_col - zero_col) * 'r'\r\n path_for_zero = (zero_row - current_row) * 'u' + (current_col - zero_col) * 'r'\r\n whole_move += path_for_zero\r\n zero_col += len(filter(lambda x: x=='r', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n counter = 0\r\n while self.current_position(target_row, 0) != (target_row -1, 1) or \\\r\n (zero_row,zero_col) != (target_row-1,0):\r\n cyclic_moves = ''\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n # print 'We are in while loop'\r\n counter += 1\r\n if zero_row < current_row:\r\n # print 'Zero row under current TT '\r\n if self.current_position(target_row, 0) == (target_row -1, 1):\r\n # print 'TT is in the reccomended posiont (target_row -1, 1) \\n and we are movind zero to the left side of TT '\r\n cyclic_moves = 'ld'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n else:\r\n # print 'TT should be one tile down'\r\n cyclic_moves = 'lddru'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n #### Case 1 if ZERO located in the right of\r\n #### target tile (after it)\r\n if zero_col > current_col:\r\n # print ' Look in the up puzzle, zero in the right side'\r\n if current_col != 1:\r\n # print 'not under target place (target_row -1, 1)'\r\n cyclic_moves = 'dllur'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n whole_move += cyclic_moves\r\n # elif current_col == 1 and self._grid[zero_row+1][zero_col] < \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n elif current_col == 1: \r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self._grid[zero_row+1][zero_col] > \\\r\n self._grid[current_position[0]][current_position[1]]:\r\n print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n cyclic_moves = 'ul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n ### Solwing 3x2 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 3x2 puzzle formed recently'\r\n move3x2 = 'ruldrdlurdluurddlur'\r\n whole_move += move3x2\r\n zero_col -= len(filter(lambda x: x=='l', move3x2))\r\n zero_col += len(filter(lambda x: x=='r', move3x2))\r\n zero_row += len(filter(lambda x: x=='d', move3x2))\r\n zero_row -= len(filter(lambda x: x=='u', move3x2))\r\n self.update_puzzle(move3x2)\r\n # print 'Grid afret 3x2 solver move'\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n #####Moving ZERO to the (target_row - 1, n - 1) position where\r\n ##### 'n' is a grid height.\r\n # print self._width-1 - zero_col\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n whole_move += zero_to_place_move\r\n self.update_puzzle(zero_to_place_move)\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move", "def part2():\r\n my_input = 368078\r\n coords = [(1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1)]\r\n x = y = dx = 0\r\n dy = -1\r\n grid = {}\r\n\r\n while True:\r\n total = 0\r\n for offset in coords:\r\n ox, oy = offset\r\n if (x+ox, y+oy) in grid:\r\n total += grid[(x+ox, y+oy)]\r\n if total > int(my_input):\r\n return total\r\n if (x, y) == (0, 0):\r\n grid[(0, 0)] = 1\r\n else:\r\n grid[(x, y)] = total\r\n if (x == y) or (x < 0 and x == -y) or (x > 0 and x == 1-y):\r\n dx, dy = -dy, dx\r\n x, y = x+dx, y+dy", "def fn(i, j):\n if i == 0 and j == 0: return grid[0][0], grid[0][0]\n if i < 0 or j < 0: return -inf, inf\n if grid[i][j] == 0: return 0, 0\n mx1, mn1 = fn(i-1, j) # from top\n mx2, mn2 = fn(i, j-1) # from left \n mx, mn = max(mx1, mx2)*grid[i][j], min(mn1, mn2)*grid[i][j]\n return (mx, mn) if grid[i][j] > 0 else (mn, mx)", "def transport_map(img):\n row, col = img.shape[:2]\n I = [None] * col # To store column number of images\n T = np.zeros((row,col), dtype=float) #Transport map\n C = np.zeros((row,col), dtype=int) #Map with path chosen\n for i in range(row):\n print \"row number Transport map:\",i\n for j in range(col):\n if i == 0 and j == 0:\n T[i, j] = 0\n I[j] = img \n continue\n if j==0 and i > 0: \n img, e = seam_removal_horizontal(I[j]) \n T[i,j], I[j], C[i,j] = e + T[i-1, j], img, 0 \n elif i == 0 and j > 0:\n img, e = seam_removal_vertical(I[j-1],[]) \n T[i,j], I[j], C[i,j] = e + T[i, j-1], img, 1\n else:\n img_h, eh = seam_removal_horizontal(I[j]) \n img_v, ev = seam_removal_vertical(I[j-1],[])\n T[i,j] = min(eh + T[i-1, j], ev + T[i, j-1]) \n C[i,j] = np.argmin((eh + T[i-1, j], ev + T[i, j-1]))\n if C[i,j] == 0:\n I[j] = img_h \n else:\n I[j] = img_v\n \n return T,C", "def innerCells(w, h):\n A = createBoard(w, h)\n\n for row in range(1, h-1):\n for col in range(1, w-1):\n if row == h-1:\n A[row][col] = 0\n elif col == w-1:\n A[row][col] = 0\n else:\n A[row][col] = 1\n return A", "def get_tile(self, row, col):\r\n # replace with your code\r\n return self.grid[row][col]", "def inner_cells(w, h):\n a = create_board(w, h)\n\n for row in range(h):\n for col in range(w):\n if 0 < row < h - 1 and 0 < col < w - 1:\n a[row][col] = 1\n else:\n a[row][col] = 0\n\n return a", "def solve_part2(start):\n inputs = load_inputs(False)\n all_matches = []\n tiles = inputs.keys()\n for elem in tiles:\n matches = defaultdict(list)\n for elem2 in tiles:\n if elem != elem2 and compare_tile(inputs[elem], inputs[elem2]):\n l = matches[elem]\n l.append(elem2)\n if matches[elem]:\n all_matches.append(matches[elem])\n\n # start frmo an aribtrary corner\n # find a match, rotate me so that the match is along the right side\n # fill in properly oriented match\n # repeat, for row = 1+, consider top-match and left-match\n\n # for eery rotations / orientation, look fot the pattern", "def solution(n, m, r, c, k) -> int:\n xs = []\n # Add all the non-zero room widths to xs\n last_column_wall = None\n for col in c:\n if last_column_wall is not None and col - last_column_wall - 1 > 0:\n xs.append(col - last_column_wall - 1)\n last_column_wall = col\n ys = []\n # Add all the non-zero room heights to ys\n last_row_wall = None\n for row in r:\n if last_row_wall is not None and row - last_row_wall - 1 > 0:\n ys.append(row - last_row_wall - 1)\n last_row_wall = row\n return aux(xs, ys, k)", "def get_tile(self, row, col):\n # replace with your code\n return self.grid[row][col]", "def calc(s,tnx,i0s,ies):\n\n # round down\n tile0 = s.start // tnx\n # round up\n tilee = -(-s.stop // tnx)\n\n tiles = []\n srcslices = []\n tgtslices = []\n for tile in range(tile0,tilee):\n ii0 = max(0, -((s.start - i0s[tile]) // s.step))\n iie = -((s.start - min(s.stop,ies[tile])) // s.step)\n if iie > ii0:\n tiles.append(tile)\n myi0 = s.start + ii0*s.step - i0s[tile]\n myie = s.start + iie*s.step - i0s[tile]\n srcslices.append(slice(myi0,myie,s.step))\n tgtslices.append(slice(ii0,iie))\n\n return tiles, srcslices, tgtslices", "def get_others(map_, r, c):\n nums = 0\n # your code here\n if r == 0 and c == 0: #top left corder\n nums += 2\n if len(map_[0]) > 1:\n if map_[r][c+1] == 0:\n nums += 1\n if map_[r+1][c] == 0:\n nums += 1\n elif r == 0 and c == len(map_[0])-1: #top right corner\n nums += 2\n if len(map_[0]) > 1:\n if map_[r][c-1] == 0:\n nums += 1\n if map_[r+1][c] == 0:\n nums += 1\n elif r == len(map_)-1 and c == 0: #bottom left corder\n nums += 2\n if len(map_[0]) > 1:\n if map_[r][c+1] == 0:\n nums += 1\n if map_[r-1][c] == 0:\n nums += 1\n elif r == len(map_)-1 and c == len(map_[0])-1: #bottom right corner\n nums += 2\n if map_[r][c-1] == 0:\n nums += 1\n if map_[r-1][c] == 0:\n nums += 1\n elif r == 0: # top edge, excluding corner\n nums += 1\n if map_[r][c-1] == 0:\n nums += 1\n if map_[r][c+1] == 0:\n nums += 1\n if len(map_) > r and map_[r+1][c] == 0:\n nums += 1\n elif r == len(map_)-1: # bottom edge, excluding corner\n nums += 1\n if map_[r][c-1] == 0:\n nums += 1\n if map_[r][c+1] == 0:\n nums += 1\n if map_[r-1][c] == 0:\n nums += 1\n elif c == 0: # left edge, excluding corner\n nums += 1\n if map_[r-1][c] == 0:\n nums += 1\n if map_[r+1][c] == 0:\n nums += 1\n if len(map_[0]) > c and map_[r][c+1] == 0:\n nums += 1\n elif c == len(map_[0])-1: # right edge. excluding corner\n nums += 1\n if map_[r-1][c] == 0:\n nums += 1\n if map_[r+1][c] == 0:\n nums += 1\n if map_[r][c-1] == 0:\n nums += 1\n else: # the rest, excluding edge and corner\n if map_[r-1][c] == 0:\n nums += 1\n if map_[r+1][c] == 0:\n nums += 1\n if map_[r][c-1] == 0:\n nums += 1\n if map_[r][c+1] == 0:\n nums += 1\n return nums", "def test_multigrid_calculates_neighbours_correctly():\n\n # create a grid which will result in 9 cells\n h = 64\n img_dim = (3 * h + 1, 3 * h + 1)\n amg = mg.MultiGrid(img_dim, h, WS=127)\n\n # check that each cell has the expected neighbours\n print(amg.n_cells)\n\n # expected neieghbours left to right, bottom to top\n cells = [{\"north\": amg.cells[3], \"east\": amg.cells[1], \"south\": None, \"west\": None}, # bl\n {\"north\": amg.cells[4], \"east\": amg.cells[2],\n \"south\": None, \"west\": amg.cells[0]}, # bm\n {\"north\": amg.cells[5], \"east\": None,\n \"south\": None, \"west\": amg.cells[1]}, # br\n {\"north\": amg.cells[6], \"east\": amg.cells[4],\n \"south\": amg.cells[0], \"west\": None}, # ml\n {\"north\": amg.cells[7], \"east\": amg.cells[5],\n \"south\": amg.cells[1], \"west\": amg.cells[3]}, # mm\n {\"north\": amg.cells[8], \"east\": None,\n \"south\": amg.cells[2], \"west\": amg.cells[4]}, # mr\n # tl\n {\"north\": None, \"east\": amg.cells[7],\n \"south\": amg.cells[3], \"west\": None},\n # tm\n {\"north\": None,\n \"east\": amg.cells[8], \"south\": amg.cells[4], \"west\": amg.cells[6]},\n {\"north\": None, \"east\": None,\n \"south\": amg.cells[5], \"west\": amg.cells[7]}, # tr\n ]\n\n for ii, (gc, cell) in enumerate(zip(amg.cells, cells)):\n print(ii)\n assert gc.north == cell['north']\n assert gc.east == cell['east']\n assert gc.south == cell['south']\n assert gc.west == cell['west']", "def get_tile(left, up, right, down):\n tile = 0\n if left:\n tile += 1\n if up:\n tile += 2\n if right:\n tile += 4\n if down:\n tile += 8\n return tile", "def get_tile(self, row, col):\r\n # replace with your code\r\n return self._grid_tile[row][col]", "def solve(board):\r\n \r\n #An O(2mn) time solution; the first O(mn) traversal is to preform a bfs on all tiles attached to edge 'O' tiles (can't convert to 'X's); the second is to convert all remaining 'O's into 'X's\r\n \r\n def bfs(curr,r,c):\r\n if not curr: return\r\n prev = len(curr)\r\n for n in range(0,prev):\r\n i,j = curr[n][0],curr[n][1]\r\n board[i][j] = 'A'\r\n for x,y in [(-1, 0), (0, -1), (1, 0), (0, 1)]:\r\n x_n = i+x\r\n y_n = j+y\r\n if x_n >= 0 and x_n < r and y_n >= 0 and y_n < c and board[x_n][y_n] == \"O\":\r\n curr += [(x_n,y_n)]\r\n bfs(curr[prev:],r,c)\r\n\r\n \r\n q,r,c = [],len(board),len(board[0])\r\n if not r or q: return\r\n\r\n for i in range(r):\r\n for j in range(c):\r\n if (i==0 or j==0 or i==r-1 or j==c-1) and board[i][j] == \"O\":\r\n q += [(i,j)]\r\n \r\n bfs(q,r,c)\r\n\r\n for i in range(r):\r\n for j in range(c): \r\n if board[i][j] == \"O\": \r\n board[i][j] = \"X\"\r\n elif board[i][j] == \"A\":\r\n board[i][j] = \"O\"\r\n \r\n return", "def get_tile(self, row, col):\n # replace with your code\n if row < self._grid_height and col < self._grid_width:\n return self._grid_2048[row][col]", "def create_tile(self, mines, row, col):\n if row * self.cols + col in mines:\n return Tiles.mine\n return Tiles.zero", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def move_tile(self, target_row, target_col, val):\n # a little bit twisted here for the use of both solve_interior_tile and solve_col0_tile\n solved_row, solved_col = self.current_position(0, val)\n movements = \"\"\n if solved_row == target_row and solved_col == target_col:\n return \"\"\n if solved_row == target_row:\n if target_col > solved_col:\n movements = \"l\" * (target_col - solved_col) + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"r\" * (solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ulld\"\n elif solved_col == target_col:\n movements = \"u\" * (target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n elif solved_col < target_col:\n if solved_col == 0:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"rddlu\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n elif solved_col > target_col:\n if solved_row == 0:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"dllur\" * (\n solved_col - target_col - 1) + \"dlu\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n else:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ullddru\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n return movements", "def find_loc_indices(loc, dir, tile):\n #returns the indices of the nearest neighbor point in the given tile, the lon/lat of the nearest neighbor, \n #and the distance (m) from the given point to the nearest neighbor grid cell\n \n filename_pattern = '*grid.tile{0}.nc'.format(tile)\n for f_name in os.listdir(dir):\n if fnmatch.fnmatch(f_name, filename_pattern):\n filename = f_name\n if not filename:\n message = 'No filenames matching the pattern {0} found in {1}'.format(filename_pattern,dir)\n logging.critical(message)\n raise Exception(message)\n \n nc_file = Dataset('{0}/{1}'.format(dir,filename))\n #read in supergrid longitude and latitude\n lon_super = np.array(nc_file['x']) #[lat,lon] or [y,x] #.swapaxes(0,1)\n lat_super = np.array(nc_file['y']) #[lat,lon] or [y,x] #.swapaxes(0,1)\n #get the longitude and latitude data for the grid centers by slicing the supergrid \n #and taking only odd-indexed values\n longitude = lon_super[1::2,1::2]\n latitude = lat_super[1::2,1::2]\n nc_file.close()\n \n adj_long = False \n #look for reversal of longitude; if found, adjust longitude so that 0-360 transition doesn't exist\n temp_loc = copy.deepcopy(loc)\n for row in longitude:\n if not (np.all(np.diff(row) >= 0) or np.all(np.diff(row) <= 0)):\n adj_long = True\n if adj_long:\n longitude[longitude < 180] += 360\n if loc[0] < 180:\n temp_loc[0] += 360\n \n #set up an array to hold the euclidean distance between the given point and every grid cell\n eucl_dist = np.zeros((longitude.shape[0],longitude.shape[1]))\n \n #get the Cartesian location of the given point\n cart_loc = np.array(sph2cart(math.radians(temp_loc[0]), math.radians(temp_loc[1]), earth_radius))\n \n for i in range(len(longitude)):\n for j in range(len(longitude[i])):\n #get the Cartesian location of all grid points\n cart_cell = np.array(sph2cart(math.radians(longitude[i,j]), math.radians(latitude[i,j]), earth_radius))\n \n #calculate the euclidean distance from the given point to the current grid cell\n eucl_dist[i,j] = np.linalg.norm(cart_loc - cart_cell)\n \n #get the indices of the grid point with the minimum euclidean distance to the given point\n i,j = np.unravel_index(eucl_dist.argmin(), eucl_dist.shape)\n \n return (i,j,longitude[i,j]%360.0, latitude[i,j], eucl_dist[i,j])", "def _find_connected_tiles(self, row, col, non_empty_tiles_not_visited: set) -> None:\n\n non_empty_tiles_not_visited.remove((row, col))\n\n if (row > 0) and (self.board[row - 1][col] is not None) and ((row - 1, col) in non_empty_tiles_not_visited):\n self._find_connected_tiles(row - 1, col, non_empty_tiles_not_visited)\n if (\n (row < self.board_size - 1)\n and (self.board[row + 1][col] is not None)\n and ((row + 1, col) in non_empty_tiles_not_visited)\n ):\n self._find_connected_tiles(row + 1, col, non_empty_tiles_not_visited)\n if (col > 0) and (self.board[row][col - 1] is not None) and ((row, col - 1) in non_empty_tiles_not_visited):\n self._find_connected_tiles(row, col - 1, non_empty_tiles_not_visited)\n if (\n (col < self.board_size - 1)\n and (self.board[row][col + 1] is not None)\n and ((row, col + 1) in non_empty_tiles_not_visited)\n ):\n self._find_connected_tiles(row, col + 1, non_empty_tiles_not_visited)", "def get_tile(self, row, col):\r\n # replace with your code\r\n return self._cells[row][col]", "def solve(raster):\n cells_changed = True\n while cells_changed:\n cells_changed = False\n for meta in raster.row_meta:\n mask = raster.get_row(meta.idx)\n orig_meta = copy.deepcopy(meta)\n\n linesolve(mask, meta)\n\n if raster.update_row(mask=mask, idx=meta.idx) or meta != orig_meta:\n cells_changed = True\n\n for meta in raster.col_meta:\n mask = raster.get_col(meta.idx)\n orig_meta = copy.deepcopy(meta)\n\n linesolve(mask, meta)\n\n if raster.update_col(mask=mask, idx=meta.idx) or meta != orig_meta:\n cells_changed = True\n\n if raster.is_solved():\n return Solution(raster.table)\n\n return None", "def isSolvable(self):\n tiles = []\n for i in range(len(self.tiles)):\n for j in range(len(self.tiles)):\n if self.tiles[j][1] * 3 + self.tiles[j][0] + 1 == i + 1:\n tiles.append(j + 1)\n count = 0\n for i in range(len(tiles) - 1):\n for j in range(i + 1, len(tiles)):\n if tiles[i] > tiles[j] and tiles[i] != 9:\n count += 1\n return count % 2 == 0 and count != 0", "def solvable(grid):\n y = x = 1\n stack = deque([(0, y, x,)])\n goal = len(grid) - 2\n found = np.ones_like(grid, dtype=bool)\n \n while stack:\n i, y, x = stack.popleft()\n i += 1\n for y2, x2 in solve_perfect.neighbors(y, x, grid):\n if found[y2, x2]:\n if y2 == goal and x2 == goal:\n return i\n else:\n found[y2, x2] = False\n stack.append((i, y2, x2,))\n \n return 0", "def _mergeTiles(self, base, tile, x, y):\n # Replace non blank pixels, aggregating opacity appropriately\n x = int(round(x))\n y = int(round(y))\n if base is None and not x and not y:\n return tile\n if base is None:\n base = np.zeros((0, 0, tile.shape[2]), dtype=tile.dtype)\n base, tile = _makeSameChannelDepth(base, tile)\n if base.shape[0] < tile.shape[0] + y:\n vfill = np.zeros(\n (tile.shape[0] + y - base.shape[0], base.shape[1], base.shape[2]),\n dtype=base.dtype)\n if base.shape[2] == 2 or base.shape[2] == 4:\n vfill[:, :, -1] = 1\n base = np.vstack((base, vfill))\n if base.shape[1] < tile.shape[1] + x:\n hfill = np.zeros(\n (base.shape[0], tile.shape[1] + x - base.shape[1], base.shape[2]),\n dtype=base.dtype)\n if base.shape[2] == 2 or base.shape[2] == 4:\n hfill[:, :, -1] = 1\n base = np.hstack((base, hfill))\n if base.flags.writeable is False:\n base = base.copy()\n base[y:y + tile.shape[0], x:x + tile.shape[1], :] = tile\n return base", "def voronoi_sub_mask_1d_index_to_pixeliztion_1d_index_from_grids_and_geometry(\n grid,\n mask_1d_index_to_nearest_pixelization_1d_index,\n sub_mask_1d_index_to_mask_1d_index,\n pixel_centres,\n pixel_neighbors,\n pixel_neighbors_size,\n):\n\n sub_mask_1d_index_to_pixeliztion_1d_index = np.zeros((grid.shape[0]))\n\n for sub_mask_1d_index in range(grid.shape[0]):\n\n nearest_pixelization_1d_index = mask_1d_index_to_nearest_pixelization_1d_index[\n sub_mask_1d_index_to_mask_1d_index[sub_mask_1d_index]\n ]\n\n while True:\n\n nearest_pixelization_pixel_center = pixel_centres[\n nearest_pixelization_1d_index\n ]\n\n sub_pixel_to_nearest_pixelization_distance = (\n (grid[sub_mask_1d_index, 0] - nearest_pixelization_pixel_center[0]) ** 2\n + (grid[sub_mask_1d_index, 1] - nearest_pixelization_pixel_center[1])\n ** 2\n )\n\n closest_separation_from_pixelization_to_neighbor = 1.0e8\n\n for neighbor_pixelization_1d_index in range(\n pixel_neighbors_size[nearest_pixelization_1d_index]\n ):\n\n neighbor = pixel_neighbors[\n nearest_pixelization_1d_index, neighbor_pixelization_1d_index\n ]\n\n separation_from_neighbor = (\n grid[sub_mask_1d_index, 0] - pixel_centres[neighbor, 0]\n ) ** 2 + (grid[sub_mask_1d_index, 1] - pixel_centres[neighbor, 1]) ** 2\n\n if (\n separation_from_neighbor\n < closest_separation_from_pixelization_to_neighbor\n ):\n closest_separation_from_pixelization_to_neighbor = (\n separation_from_neighbor\n )\n closest_neighbor_pixelization_1d_index = (\n neighbor_pixelization_1d_index\n )\n\n neighboring_pixelization_1d_index = pixel_neighbors[\n nearest_pixelization_1d_index, closest_neighbor_pixelization_1d_index\n ]\n sub_pixel_to_neighboring_pixelization_distance = (\n closest_separation_from_pixelization_to_neighbor\n )\n\n if (\n sub_pixel_to_nearest_pixelization_distance\n <= sub_pixel_to_neighboring_pixelization_distance\n ):\n sub_mask_1d_index_to_pixeliztion_1d_index[\n sub_mask_1d_index\n ] = nearest_pixelization_1d_index\n break\n else:\n nearest_pixelization_1d_index = neighboring_pixelization_1d_index\n\n return sub_mask_1d_index_to_pixeliztion_1d_index", "def test_cover_geometry_poly_w_hole2(tiler, poly_w_hole):\n tiles = [tile for tile in cover_geometry(tiler, poly_w_hole, 9)]\n assert len(tiles) == 77\n assert set(tiles) == set([(297, 82, 9), (301, 87, 9), (294, 87, 9), (299, 88, 9), (300, 85, 9), (292, 83, 9), (296, 83, 9), (298, 89, 9), (295, 82, 9), (290, 86, 9), (291, 87, 9), (297, 88, 9), (292, 87, 9), (298, 86, 9), (298, 84, 9), (294, 84, 9), (294, 88, 9), (299, 89, 9), (292, 85, 9), (300, 86, 9), (294, 82, 9), (290, 85, 9), (298, 82, 9), (295, 84, 9), (296, 87, 9), (293, 84, 9), (299, 85, 9), (291, 85, 9), (299, 86, 9), (296, 85, 9), (297, 85, 9), (296, 89, 9), (293, 89, 9), (292, 86, 9), (293, 87, 9), (291, 88, 9), (298, 88, 9), (298, 87, 9), (295, 87, 9), (296, 88, 9), (293, 83, 9), (301, 86, 9), (291, 86, 9), (297, 86, 9), (297, 89, 9), (292, 88, 9), (294, 86, 9), (294, 85, 9), (292, 82, 9), (300, 87, 9), (295, 89, 9), (290, 87, 9), (296, 82, 9), (298, 85, 9), (297, 83, 9), (291, 83, 9), (295, 83, 9), (300, 88, 9), (293, 86, 9), (299, 83, 9), (299, 84, 9), (297, 87, 9), (294, 83, 9), (297, 84, 9), (298, 83, 9), (293, 82, 9), (294, 89, 9), (296, 84, 9), (290, 84, 9), (293, 88, 9), (290, 83, 9), (295, 86, 9), (293, 85, 9), (295, 88, 9), (292, 84, 9), (291, 84, 9), (299, 87, 9)])", "def get_tile(board):\n t = [[0,0]]\n for i in range(board.shape[0] -1):\n for j in range(board.shape[1] -1):\n if board[i, j] == board[i +1, j]:\n t.append([i +1, j])\n if board[i, j] == board[i, j+1]:\n t.append([i, j+1])\n if board[i, j] == board[i+1, j+1]:\n t.append([i+1, j+1])\n # print(t)\n t = list(np.unique(t, axis=0))\n return t", "def solve_col0_tile(self, target_row):\n # replace with your code\n string = ''\n target = self.current_position(target_row, 0)\n row_difference = target_row - target[0]\n col_difference = target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'r' * (col_difference - 1)\n if row_difference > 1:\n string += 'druld' * (row_difference - 1)\n string += 'rulld' * (col_difference - 1)\n string += 'ruldrdlurdluurddlu'\n elif col_difference == 0:\n string += 'rddlu' * (row_difference - 2)\n if row_difference > 1:\n string += 'rd'\n string += 'l'\n string += 'ruldrdlurdluurddlu'\n string += 'r' * (self._width - 1)\n print 'Col 0 Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row - 1, self._width -1), 'False string'\n return string", "def GetCoastGrids(LandMask):\n \n \"\"\"\n Define a coastline map. This map will be set to 1 on all coast cells.\n \"\"\"\n \n \n CoastlineMap = np.zeros((LandMask.shape[0], LandMask.shape[1]))\n \n \"\"\"\n We will use a nested loop to loop through all cells of the Landmask cell. What this loop basically does is,\n when a cell has a value of 1 (land), it will make all surrounding cells 1, so we create kind of an extra line of \n grids around the landmask. In the end we will substract the landmask from the mask which is created by the nested loop, \n which result in only a mask with the coast grids. Notice, that when we're in the corner, upper, side, or lower row, and we\n meet a land cell, we should not make all surrounding cells 1. For example, we the lower left corner is a land grid, you should only make the inner cells 1. \n \"\"\"\n \n for i in range(LandMask.shape[0]-1):\n for j in range(LandMask.shape[1]-1):\n \n\n \"\"\"\n We have nine if statements, four for the corners, four for the sides and one for the middle\n of the landmask. \n \"\"\"\n\n if i == 0 and j == 0: #upper left corner\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i,j+1] = 1\n \n CoastlineMap[i+1,j] = 1 \n CoastlineMap[i+1, j+1] = 1\n \n \n elif i == 0 and j != 0 and j != LandMask.shape[1]-1: #upper row\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i,j-1] = 1\n CoastlineMap[i,j+1] = 1\n \n CoastlineMap[i+1, j] = 1\n CoastlineMap[i+1,j-1] = 1\n CoastlineMap[i+1,j+1] = 1\n \n \n elif i == 0 and j == LandMask.shape[1]-1: #upper right corner\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i,j-1] = 1\n \n CoastlineMap[i+1,j] = 1 \n CoastlineMap[i+1, j-1] = 1\n \n elif i != 0 and i != LandMask.shape[0]-1 and j == LandMask.shape[1]-1: #right row\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i+1,j] = 1\n CoastlineMap[i-1,j] = 1\n \n CoastlineMap[i, j-1] = 1\n CoastlineMap[i+1,j-1] = 1\n CoastlineMap[i-1,j-1] = 1\n \n elif i == LandMask.shape[0]-1 and j == LandMask.shape[1]-1: #lower right corner\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i,j-1] = 1\n \n CoastlineMap[i-1,j] = 1 \n CoastlineMap[i-1, j-1] = 1\n \n elif i == LandMask.shape[0]-1 and j != 0 and j != LandMask.shape[1]-1: #lower row\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i,j-1] = 1\n CoastlineMap[i,j+1] = 1\n \n CoastlineMap[i-1, j] = 1\n CoastlineMap[i-1,j-1] = 1\n CoastlineMap[i-1,j+1] = 1\n \n \n elif i == LandMask.shape[0]-1 and j == 0: #lower left corner\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i,j+1] = 1\n \n CoastlineMap[i+1,j] = 1 \n CoastlineMap[i+1, j+1] = 1\n \n elif i != 0 and i != LandMask.shape[0]-1 and j == 0: #left row\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i+1,j] = 1\n CoastlineMap[i-1,j] = 1\n \n CoastlineMap[i, j+1] = 1\n CoastlineMap[i+1,j+1] = 1\n CoastlineMap[i-1,j+1] = 1\n \n else:\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1 #middle\n CoastlineMap[i+1,j] = 1#lowermiddle\n CoastlineMap[i-1,j] = 1#uppermiddle\n \n CoastlineMap[i+1, j-1] = 1\n CoastlineMap[i-1, j-1] = 1\n CoastlineMap[i, j-1] =1\n \n CoastlineMap[i+1, j+1] = 1\n CoastlineMap[i-1, j+1] = 1\n CoastlineMap[i, j+1] = 1\n \n \n \n \"\"\"\n Here we substract the landmaks from the coastline mask, resulting in only\n the coastline. \n \"\"\"\n \n \n Coastgrids = CoastlineMap - LandMask\n \n return Coastgrids, CoastlineMap", "def _get_tiles_and_coords(\n self, tensor: torch.Tensor\n ) -> Tuple[torch.Tensor, List[List[int]], List[List[List[int]]]]:\n assert tensor.dim() == 4 and tensor.shape[0] == 1\n\n y_coords, y_overlaps = self._calc_tile_coords(\n tensor.shape[2], self._tile_size[0], self._tile_overlap[0]\n )\n x_coords, x_overlaps = self._calc_tile_coords(\n tensor.shape[3], self._tile_size[1], self._tile_overlap[1]\n )\n tile_coords = torch.jit.annotate(List[Tuple[int, int, int, int]], [])\n [\n [\n tile_coords.append(\n (y, y + self._tile_size[0], x, x + self._tile_size[1])\n )\n for x in x_coords\n ]\n for y in y_coords\n ]\n tiles = torch.cat([tensor[..., c[0] : c[1], c[2] : c[3]] for c in tile_coords])\n return tiles, [y_coords, x_coords], [y_overlaps, x_overlaps]", "def swipeBase (self) :\n grid = self.grid\n\n #we start by putting every tile up\n for columnNbr in range(4) :\n nbrZeros = 4 - np.count_nonzero(grid[:,columnNbr])\n\n for lineNbr in range(4) :\n counter = 0\n while (grid[lineNbr, columnNbr] == 0) and (counter < 4):\n counter += 1\n if np.count_nonzero(grid[lineNbr:4, columnNbr]) != 0 :\n for remainingLine in range (lineNbr, 3) :\n grid[remainingLine, columnNbr] = grid[remainingLine+1, columnNbr]\n grid[3, columnNbr] = 0\n\n #now we do the additions\n for lineNbr in range(3) :\n if grid[lineNbr, columnNbr] == grid[lineNbr+1, columnNbr] :\n grid[lineNbr, columnNbr] *= 2\n for remainingLine in range (lineNbr+1, 3) :\n grid[remainingLine, columnNbr] = grid[remainingLine+1, columnNbr]\n grid[3, columnNbr] = 0\n\n return (grid)", "def footprint_corner_indices():", "def maxAreaOfIsland(self, grid):\n \n def helper(x, y):\n if x < 0 or x >= len(grid) or y < 0 or y >= len(grid[0]) or grid[x][y] == 'X':\n return 0\n if grid[x][y] == 1:\n grid[x][y] = 'X'\n return 1 + helper(x - 1, y) + helper(x + 1, y) + helper(x, y + 1) + helper(x, y - 1)\n else:\n grid[x][y] = 'X'\n return 0\n \n max_area = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n max_area = max(max_area, helper(i, j)) \n return max_area\n\n \"\"\"\n - depth-first search, recursive, mark visited in a set\n - O(n), O(n)\n \"\"\"\n \n visited = set() # can use global variable instead of passing into stack\n \n def helper(x, y):\n if x < 0 or x >= len(grid) or y < 0 or y >= len(grid[0]) or (x, y) in visited:\n return 0\n visited.add((x, y))\n if grid[x][y] == 1:\n return 1 + helper(x - 1, y) + helper(x + 1, y) + helper(x, y + 1) + helper(x, y - 1)\n else:\n return 0\n \n max_area = 0\n \n for i in range(len(grid)):\n for j in range(len(grid[0])):\n max_area = max(max_area, helper(i, j)) \n return max_area\n\n \"\"\"\n - depth-first search, iterative, mark visited in a set\n - O(n), O(n)\n \"\"\"\n max_area = 0\n visited = set()\n row, col = len(grid), len(grid[0])\n for i in range(row):\n for j in range(col):\n area = 0\n n = grid[i][j]\n stack = [(i, j)] # use stack to track all neighbors (all need to be searched) \n while stack:\n x, y = stack.pop()\n if 0 <= x < row and 0 <= y < col and (x, y) not in visited:\n visited.add((x, y))\n if grid[x][y] == 1:\n area += 1\n stack += [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)]\n max_area = max(max_area, area)\n return max_area", "def compute_mesh(nrow, ncol, nele):\n tri_index = np.zeros((nele, 3))\n for i in range(nrow-1):\n for j in range(NUM):\n if j == 0:\n tri_index[i*4*NUM+j*4, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4, 1] = (i+1)\n tri_index[i*4*NUM+j*4, 2] = (i+2)\n\n tri_index[i*4*NUM+j*4+1, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4+1, 1] = (i+2)\n tri_index[i*4*NUM+j*4+1, 2] = (i+2)+(2*j+1)*nrow\n else:\n tri_index[i*4*NUM+j*4, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4, 1] = (i+1)+(2*j-1)*nrow\n tri_index[i*4*NUM+j*4, 2] = (i+2)+(2*j-1)*nrow\n\n tri_index[i*4*NUM+j*4+1, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4+1, 1] = (i+2)+(2*j-1)*nrow\n tri_index[i*4*NUM+j*4+1, 2] = (i+2)+(2*j+1)*nrow\n \n tri_index[i*4*NUM+j*4+2, 0] = (i+1)+2*j*nrow\n tri_index[i*4*NUM+j*4+2, 1] = (i+1)+2*(j+1)*nrow\n tri_index[i*4*NUM+j*4+2, 2] = (i+2)+2*(j+1)*nrow\n\n tri_index[i*4*NUM+j*4+3, 0] = (i+1)+2*j*nrow\n tri_index[i*4*NUM+j*4+3, 1] = (i+2)+2*(j+1)*nrow\n tri_index[i*4*NUM+j*4+3, 2] = (i+2)+2*j*nrow\n return tri_index", "def no_of_misplaced_tiles(state):\r\n h1 = 0\r\n goal_state = [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]\r\n for y in range(len(goal_state)):\r\n for x in range(len(goal_state[y])):\r\n if state[y][x] != goal_state[y][x]:\r\n h1 += 1\r\n return h1", "def solve(self):\n dim = self.puzzle.dimension\n\n # initial loop\n for value, (row, col) in self.puzzle:\n if value:\n self.clear_row(row, value)\n self.clear_col(col, value)\n self.clear_subgrid(row, col, value)\n self.updates.add((value, (row, col)))\n for ps in self.possibilities:\n ps.discard((row, col))\n\n while self.updates:\n while self.updates:\n # while self.updates:\n value, (row, col) = self.updates.pop()\n for i in range(1, dim + 1):\n self.check_row(i, value)\n self.check_col(i, value)\n for i in range(2, 8, 3):\n self.check_subgrid(row, i, value)\n self.check_subgrid(i, col, value)\n\n for value, (row, col) in self.puzzle:\n if not value:\n self.check_cell(row, col)\n\n # for value in range(1, dim + 1):\n # for row in [2, 5, 8]:\n # for col in [2, 5, 8]:\n # self.check_subgrid(row, col, value)", "def _pixel_to_tile(x: float, y: float) -> tuple[float, float] | None:\n if not lib.TCOD_ctx.engine:\n return None\n xy = ffi.new(\"double[2]\", (x, y))\n lib.TCOD_sys_pixel_to_tile(xy, xy + 1)\n return xy[0], xy[1]", "def solve_row1_tile(self, target_col):\n movements = self.solve_interior_tile(1, target_col)\n self.update_puzzle(\"ur\")\n return movements + \"ur\"", "def solve_part1(start):\n inputs = load_inputs(False)\n two_matches = []\n tiles = inputs.keys()\n for elem in tiles:\n matches = defaultdict(list)\n for elem2 in tiles:\n if elem != elem2 and compare_tile(inputs[elem], inputs[elem2]):\n l = matches[elem]\n l.append(elem2)\n\n if len(matches[elem]) == 2:\n print matches\n two_matches.append(elem)\n\n return reduce((lambda x, y: int(x) * int(y)), two_matches)", "def bruteforce(self):\n import time\n t1 = time.time()\n for i in range(self.td.shape[0]):\n #Get the latitude at the start of the row, this is used for the entire row\n\n if i % config.LATITUDE_STEP == 0:\n startlat = i + config.LATITUDE_STEP #move to the center of the step\n startlat += self.start #Offset for parallel segmentation\n\n # This is the latitude at the center of the tile defined by\n # the image width, and the latitude_step\n x = int(self.td.shape[1] / 2)\n y = int((startlat + config.LATITUDE_STEP) / 2)\n latitude, _ = self.temperature.pixel_to_latlon(x,y)\n\n lat_f = PchipInterpolator(self.latitudenodes, self.lookup, extrapolate=False, axis=0)\n #The reshape corresponds to the dimensions of the OLAP cube\n # 5 elevations, 5 slope azimuths, 3 slopes, 3 opacities, 3 albedos, and finally 20 TI\n data = lat_f(latitude)\n compressedlookup = data.reshape(6,5,3,3,3,20)\n # Compute the PChip interpolation function for elevation\n elevation_interp_f = PchipInterpolator(np.array([-5.0, -2.0, -1.0, 1.0, 6.0, 8.0]), compressedlookup, extrapolate=False, axis=0)\n \n for j in range(self.td.shape[1]):\n # Each interpolation is composed in 2 parts.\n # 1. The interpolation function is computed.\n # 2. The interpolation function is applied.\n #print(self.reference[i,j], self.r_ndv)\n # If either the reference or the input THEMIS have no data\n if (self.td[i,j] == self.ndv) or (self.reference[i,j] == self.r_ndv):\n #The pixel is no data in the input, propagate to the output\n self.resultdata[i,j] = self.ndv\n continue\n\n #Interpolate elevation\n try:\n new_elevation = elevation_interp_f(self.ed[i,j])\n except:\n # The elevation is bad.\n self.resultdata[i,j] = self.ndv\n self.log[i,j] = self.error_codes['elevation_out_of_bounds']\n continue\n #Interpolate Slope Azimuth\n slopeaz_f = self.compute_interpolation_function(sorted(self.slopeaz_lookup.keys()),\n new_elevation,\n config.SLOPEAZ_INTERPOLATION)\n new_slopeaz = slopeaz_f(self.sz[i,j])\n #Interpolate Slope\n slope_f = self.compute_interpolation_function(sorted(self.slope_lookup.keys()),\n new_slopeaz,\n config.SLOPE_INTERPOLATION)\n capped_slope = self.sd[i,j]\n if capped_slope > 60.0:\n capped_slope = 60.0\n new_slope = slope_f(capped_slope)\n # I am having problems here with pulling TAU properly - check montabone!\n #Interpolate Tau\n tau_f = PchipInterpolator(sorted(self.tau_lookup.keys()),\n new_slope,\n extrapolate=False,\n axis=0)\n new_tau = tau_f(self.od[i,j])\n #Interpolate Albedo\n albedo_f = self.compute_interpolation_function(sorted(self.albedo_lookup.keys()),\n new_tau,\n config.ALBEDO_INTERPOLATION)\n new_albedo = albedo_f(self.ad[i,j])\n #Interpolate Inertia\n self.resultdata[i,j] = self.extract_monotonic(self.td[i,j],\n new_albedo)", "def get_tile(self, row, col):\n # replace with your code\n return self._cells[row][col]", "def get_UFS_grid_area(dir, tile, i, j):\n #this information is in the supergrid files\n \n filename_pattern = '*grid.tile{0}.nc'.format(tile)\n \n for f_name in os.listdir(dir):\n if fnmatch.fnmatch(f_name, filename_pattern):\n filename = f_name\n if not filename:\n message = 'No filenames matching the pattern {0} found in {1}'.format(filename_pattern,dir)\n logging.critical(message)\n raise Exception(message)\n \n nc_file = Dataset('{0}/{1}'.format(dir,filename))\n \n # extract out area of grid cell\n \n #calculate supergrid indices from regular grid indices\n jpt2 = j*2+1\n ipt2 = i*2+1\n \n #from Phil Pegion: the area is calculated by adding up the 4 components of the contained supergrid cells\n area_in=nc_file['area'][jpt2-1:jpt2+1,ipt2-1:ipt2+1]\n \n return area_in.sum()", "def test_combine():\n # Create 4 square arrays:\n # 0 1 2 3\n # -----------\n # 00 11 22 33\n # 00 11 22 33\n tiles = [np.array(_square(i)) for i in range(4)]\n\n with pytest.raises(ValueError):\n _combine_tiles(tiles[0], tiles[1], tiles[2]) # Too few values.\n\n with pytest.raises(ValueError):\n _combine_tiles(tiles[0], None, None, None, None) # Too many values.\n\n # Combine them the 4 major ways:\n\n # case1: corner\n # 0X\n # XX\n case1 = _combine_tiles(tiles[0], None, None, None)\n assert case1.shape == (2, 2)\n assert (case1 == tiles[0]).all()\n\n # case2: bottom edge\n # 01\n # XX\n case2 = _combine_tiles(tiles[0], tiles[1], None, None)\n assert case2.shape == (2, 4)\n assert (case2[0:2, 0:2] == tiles[0]).all()\n assert (case2[0:2, 3:5] == tiles[1]).all()\n\n # case3: right edge\n # 0X\n # 2X\n case3 = _combine_tiles(tiles[0], None, tiles[2], None)\n assert case3.shape == (4, 2)\n assert (case3[0:2, 0:2] == tiles[0]).all()\n assert (case3[3:5, 0:2] == tiles[2]).all()\n\n # case4: interior\n # 01\n # 23\n case4 = _combine_tiles(tiles[0], tiles[1], tiles[2], tiles[3])\n assert case4.shape == (4, 4)\n assert (case4[0:2, 0:2] == tiles[0]).all()\n assert (case4[0:2, 3:5] == tiles[1]).all()\n assert (case4[3:5, 0:2] == tiles[2]).all()\n assert (case4[3:5, 3:5] == tiles[3]).all()", "def misplaced_heuristic(state):\n msp_h = 0\n size = len(state)\n for i in range (size):\n for j in range (size):\n if state[i][j] == 0:\n continue\n elif state[i][j] != i*size + j:\n msp_h += 1\n return msp_h", "def get_cells(self):\r\n return \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)", "def solve_puzzle(self):\r\n # initialize some values and start tile at bottom right corner\r\n col = self.get_width() - 1\r\n row = self.get_height() - 1\r\n move = \"\"\r\n curr_row, curr_col = self.current_position(0, 0)\r\n move_to_target = abs(curr_col - col) * \"r\" + abs(curr_row - row) * \"d\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n\r\n # apply solver methods\r\n for dummy_i in range(row, 1, -1):\r\n for dummy_j in range(col, 0, -1):\r\n move += self.solve_interior_tile(dummy_i, dummy_j)\r\n move += self.solve_col0_tile(dummy_i)\r\n \r\n for dummy_j in range(col, 1, -1):\r\n move += self.solve_row1_tile(dummy_j)\r\n move += self.solve_row0_tile(dummy_j)\r\n \r\n move += self.solve_2x2()\r\n return move", "def query_image_tile(self, coord):", "def tile_image(im):\n r1 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n r2 = np.concatenate((im[:,::-1], im, im[:, ::-1]), 1)\n r3 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n return(np.concatenate((r1, r2,r3), 0))", "def get_nr_of_misplaced_tiles(board):\n result = 0\n\n for idx, val in enumerate(board):\n if idx != val:\n result += 1\n\n return result", "def calc_tile_overlap(ra_ctr, dec_ctr, pad=0.0, min_ra=0., max_ra=180., min_dec=-90., max_dec=90.):\n overlap = ((min_dec - pad) < dec_ctr) & ((max_dec + pad) > dec_ctr)\n\n #TRAP HIGH LATITUDE CASE AND (I GUESS) TOSS BACK ALL TILES. DO BETTER LATER\n mean_dec = (min_dec + max_dec) * 0.5\n if np.abs(dec_ctr) + pad > 88.0:\n return overlap\n\n ra_pad = pad / np.cos(np.radians(mean_dec))\n\n # MERIDIAN CASES\n merid = np.where(max_ra < min_ra)\n overlap[merid] = overlap[merid] & ( ((min_ra-ra_pad) < ra_ctr) | ((max_ra+ra_pad) > ra_ctr) )[merid]\n\n # BORING CASE\n normal = np.where(max_ra > min_ra)\n overlap[normal] = overlap[normal] & ((((min_ra-ra_pad) < ra_ctr) & ((max_ra+ra_pad) > ra_ctr)))[normal]\n\n return overlap", "def choose_cell_to_assign(self):\r\n min_domain = 10\r\n max_degree = -1\r\n chosen_row = None\r\n chosen_col = None\r\n for row in range(9):\r\n for col in range(9):\r\n if self.puzzle[row][col] == 0:\r\n domain_size = len(self.grid[row][col].domain)\r\n if domain_size < min_domain:\r\n min_domain = domain_size\r\n chosen_row = row\r\n chosen_col = col\r\n elif domain_size == min_domain:\r\n degree = len(self.grid[row][col].neighbors)\r\n if degree > max_degree:\r\n max_degree = degree\r\n chosen_row = row\r\n chosen_col = col\r\n return self.grid[chosen_row][chosen_col]", "def solve(grid):\n find = find_empty(grid)\n if not find:\n return True\n\n row, col = find\n for i in range(1, 10):\n if valid(grid, i, (row, col)):\n grid[row][col] = i\n if solve(grid):\n return True\n grid[row][col] = 0\n return False", "def solve_col0_tile(self, target_row):\n solved_row, solved_col = self.current_position(target_row, 0)\n movements = \"\"\n if solved_row == target_row - 1 and solved_col == 0:\n movements = \"u\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ur\")\n movements = \"ur\" + local_board.move_tile(\n target_row - 1, 1, target_row *\n self.get_width()) + \"ruldrdlurdluurddlu\"\n movements += \"r\" * (self.get_width() - 1)\n self.update_puzzle(movements)\n return movements", "def reducer(sudoku_grid):\n for i in range(9):\n sudoku_grid = reduce_row(i,sudoku_grid)\n sudoku_grid = reduce_col(i,sudoku_grid)\n sudoku_grid = reduce_sub(i,sudoku_grid)\n return sudoku_grid", "def fn(i, j):\n if i < 0 or j < 0 or obstacleGrid[i][j]: return 0\n if i == 0 and j == 0: return 1 \n return fn(i-1, j) + fn(i, j-1)", "def index_by_border(tiles):\n by_border = {\n \"north\": defaultdict(lambda: []),\n \"east\": defaultdict(lambda: []),\n \"south\": defaultdict(lambda: []),\n \"west\": defaultdict(lambda: []),\n }\n borders = {}\n inners = {}\n for tile_id, tile_rows in tiles.items():\n north, east, south, west, inner = get_borders(tile_rows)\n by_border[\"north\"][north].append(tile_id)\n by_border[\"east\"][east].append(tile_id)\n by_border[\"south\"][south].append(tile_id)\n by_border[\"west\"][west].append(tile_id)\n inners[tile_id] = inner\n borders[tile_id] = (north, east, south, west)\n return by_border, borders, inners", "def select_cells_in_trenches(props_all, trenchLocs = TrenchLocs.MIDDLE, below_trench_quantile = 90, above_trench_quantile = 100, mother_cell_y_offset=10,inversion_mult = 1):\n def indcs_in_trenches(centy,cell_indcs, invert):\n \n cy = centy[cell_indcs]\n above_trench_cut = np.percentile(invert*cy,100 - above_trench_quantile)\n below_trench_cut = np.percentile(invert*cy,below_trench_quantile)\n idx_above = (invert*centy) > above_trench_cut - mother_cell_y_offset\n idx_below = (invert*centy) < below_trench_cut\n idx_select = np.all(np.vstack((cell_indcs,idx_above,idx_below)),axis=0)\n return idx_select\n \n \n img_height = props_all.img_height\n centy = np.array(props_all.centy)\n props_all['trench_inversion_mult'] = 1*inversion_mult\n ### Note that the y-indx is flipped in image compared to matrix coords\n if trenchLocs == TrenchLocs.MIDDLE:\n idx_select = np.zeros(centy.shape,dtype=bool)\n for pos in np.unique(props_all.pos_num):\n idx_pos = np.array(props_all.pos_num == pos)\n idx_cell_pos = indcs_in_trenches(centy,idx_pos, 1*inversion_mult)\n idx_select = np.any(np.vstack((idx_select,idx_cell_pos)),axis=0)\n \n props_clean = props_all[idx_select]\n else:\n # top position in actual picture (smallest y value in matrix)\n idx_top = centy < (img_height/2) \n idx_bottom = centy > (img_height/2)\n props_all.loc[idx_bottom,'trench_inversion_mult'] = -1*inversion_mult\n \n idx_select_top = np.zeros(idx_top.shape,dtype=bool)\n idx_select_bottom = np.zeros(idx_bottom.shape,dtype=bool)\n\n for pos in np.unique(props_all.pos_num): \n idx_pos = (props_all.pos_num == pos)\n idx_top_pos = np.all(np.vstack((idx_top,idx_pos)),axis=0)\n idx_bottom_pos = np.all(np.vstack((idx_bottom,idx_pos)),axis=0)\n\n idx_select_top_pos = indcs_in_trenches(centy,idx_top_pos, 1*inversion_mult)\n idx_select_bottom_pos = indcs_in_trenches(centy,idx_bottom_pos, -1*inversion_mult)\n idx_select_top = np.any(np.vstack((idx_select_top,idx_select_top_pos)),axis=0)\n idx_select_bottom = np.any(np.vstack((idx_select_bottom,idx_select_bottom_pos)),axis=0)\n\n \n if trenchLocs == TrenchLocs.TOP:\n idx_reasonable_cells = idx_select_top\n elif trenchLocs == TrenchLocs.BOTTOM:\n idx_reasonable_cells = idx_select_bottom\n elif trenchLocs == TrenchLocs.TOP_AND_BOTTOM:\n idx_reasonable_cells = np.any(np.vstack((idx_select_top,idx_select_bottom)),axis=0) \n \n props_clean = props_all.loc[idx_reasonable_cells,:]\n\n return props_clean", "def get_candidate_tiles(self) -> List[Point]:\n\t\tempty_tiles = set()\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tif not self.tiles[x][y] == 0:\n\t\t\t\t\tfor d in [[0,1], [1,1], [1,0], [1,-1], [0,-1], [-1,-1], [-1,0], [-1,1]]:\n\t\t\t\t\t\tif x+d[0] >= 0 and y+d[1] >= 0 and x+d[0] < self.size and y+d[1] < self.size and self.tiles[x+d[0]][y+d[1]] == 0:\n\t\t\t\t\t\t\tempty_tiles.add(Point(x+d[0],y+d[1]))\n\t\treturn list(empty_tiles)", "def infill_small_regions(I):\n n_tiles = 4 # ntiles horizontally.\n assert I.shape[0] == I.shape[1]\n tile_size = I.shape[0] // (n_tiles - 1)\n tile_delta = tile_size // 2\n\n k = 0\n I_stack = np.ones(I.shape + (2, 2)) * np.nan\n for j in range(n_tiles * 2 - 1):\n for i in range(n_tiles * 2 - 1):\n dy = slice(tile_delta * j, tile_delta * (j + 2))\n dx = slice(tile_delta * i, tile_delta * (i + 2))\n S = I[dy, dx]\n M = ndimage.binary_dilation(np.isnan(S), iterations=2)\n image_inpainted = inpaint.inpaint_biharmonic(S, M, multichannel=False)\n I_stack[dy, dx, j % 2, i % 2] = image_inpainted\n k += 1\n return np.nanmean(np.nanmean(I_stack, axis=2), axis=2)", "def solve_row0_tile(self, target_col):\n solved_row, solved_col = self.current_position(0, target_col)\n movements = \"\"\n if solved_col == target_col - 1 and solved_row == 0:\n movements = \"ld\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ld\")\n movements = \"ld\" + local_board.move_tile(\n 1, target_col - 1, target_col) + \"urdlurrdluldrruld\"\n self.update_puzzle(movements)\n return movements", "def fn(i, j):\n if 0 <= i < m and 0 <= j < n and grid[i][j] == \"1\": \n grid[i][j] = \"0\"\n for ii, jj in (i-1, j), (i, j-1), (i, j+1), (i+1, j):\n fn(ii, jj)\n return 1\n return 0", "def get_tile_indices(rows, cols, row_tile_size, col_tile_size):\n indices = list()\n num_row_tiles, num_col_tiles = get_num_tiles(rows, cols, row_tile_size, col_tile_size)\n for r in range(0, num_row_tiles):\n start_r = r * row_tile_size\n end_r = ((r + 1) * row_tile_size) if (r < num_row_tiles - 1) else rows\n for c in range(0, num_col_tiles):\n start_c = c * col_tile_size\n end_c = ((c + 1) * col_tile_size) if (c < num_col_tiles - 1) else cols\n indices.append((start_r, end_r, start_c, end_c, r + 1, c + 1))\n return indices", "def get_others(map_, r, c):\n \n map_[r][c] = '#'\n nums = 0\n\n for n_row,n_col in ((r +1,c),(r -1,c),(r,c +1),(r,c-1)):\n in_bounds = 0 <= n_row < len(map_) and 0 <= n_col < len(map_[0])\n if in_bounds and map_[n_row][n_col] == 1:\n nums += get_others(map_,n_row,n_col)\n elif (in_bounds and map_[n_row][n_col] == 0) or (not in_bounds):\n nums += 1\n\n\n\n\n\n\n\n\n\n return nums", "def solve_col0_tile(self, target_row):\r\n # check if curr_pos (i, 0) where i > 1\r\n assert self.lower_row_invariant(target_row, 0)\r\n move = \"ur\"\r\n self.update_puzzle(move)\r\n row, col = self.current_position(target_row, 0)\r\n if row == target_row and col == 0:\r\n move_to_target = (self.get_width() - 2) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n else:\r\n move_to_target = self.move_to_target(target_row - 1, 1, row, col)\r\n # add solver move to str\r\n move_to_target += \"ruldrdlurdluurddlu\" + (self.get_width() - 1) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1)\r\n return move", "def solve_col0_tile(self, target_row):\r\n moves_str = \"\"\r\n # move the zero tile from (i,0) to (i−1,1) \r\n # using the move string \"ur\"\r\n moves_str += \"ur\"\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str)\r\n # If the target tile is now at position (i,0)\r\n # you can simply move tile zero to the end of row i−1\r\n current_row, current_col = temp_grid.current_position(target_row, 0)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n if current_row == target_row and current_col == 0:\r\n rights = self._width - 1 - zero_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\" \r\n # However, if the target tile is not positioned at (i,0)\r\n # we suggest the following solution strategy:\r\n else:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ruldrdlurdluurddlu\"\r\n for dummy_r in range(self._width - 1):\r\n moves_str += \"r\"\r\n \r\n print \"solve_col0_tile\"\r\n print moves_str\r\n self.update_puzzle(moves_str)\r\n print self._grid\r\n return moves_str", "def num_tiles(self):\n return self.num_row_tiles * self.num_col_tiles", "def calcul_xy_array(img_x, img_y, tile_x, tile_y):\n array = []\n\n modu_x = img_x % tile_x\n modu_y = img_y % tile_y\n div_x = img_x // tile_x\n div_y = img_y // tile_y\n current_x = 0\n current_y = 0\n\n for i in range(div_y):\n for j in range(div_x):\n array.append((current_x, current_y))\n current_x += tile_x\n if modu_x:\n array.append((img_x - tile_x, current_y))\n current_y += tile_y\n current_x = 0\n\n if modu_y:\n current_y = img_y - tile_y\n for j in range(div_x):\n array.append((current_x, current_y))\n current_x += tile_x\n if modu_x:\n array.append((img_x - tile_x, current_y))\n\n return array", "def update_floor(tiles):\n new_tiles = defaultdict(int)\n\n # add all neighbors, because they may have to flip\n new_tiles.update({\n **dict.fromkeys(get_all_neighbor_coords(tiles), 0),\n **tiles})\n\n # check update conditions\n for tile, color in new_tiles.items():\n new_tiles[tile] = update_tile(tile, color, tiles)\n\n return new_tiles", "def generate_base_tiles(self):\n\n gdal.SetConfigOption(\"GDAL_PAM_ENABLED\", \"NO\")\n\n print \"Generating Base Tiles:\"\n if self.options.verbose:\n #mx, my = self.out_gt[0], self.out_gt[3] # OriginX, OriginY\n #px, py = self.mercator.MetersToPixels( mx, my, self.tmaxz)\n #print \"Pixel coordinates:\", px, py, (mx, my)\n print\n print \"Tiles generated from the max zoom level:\"\n print \"----------------------------------------\"\n print\n\n\n # Set the bounds\n tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]\n querysize = self.querysize\n\n # Just the center tile\n #tminx = tminx+ (tmaxx - tminx)/2\n #tminy = tminy+ (tmaxy - tminy)/2\n #tmaxx = tminx\n #tmaxy = tminy\n\n #print tminx, tminy, tmaxx, tmaxy\n tcount = (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n #print tcount\n ti = 0\n i_y_column_count=((tmaxy-tminy)+1)\n ds = self.out_ds\n tz = self.tmaxz\n if self.options.verbose:\n # tx in range(tminx, tmaxx+1) tminx[ 281596 ] tmaxx[ 281744 ] ; ((tmaxx-tmaxy)+1) x_tiles[ 23393 ]\n print \"\\ttz=[\",tz,\"] : tx in range(tminx, tmaxx+1) tminx[\",tminx,\"] tmaxx[\",tmaxx,\"] ; ((tmaxx-tmaxy)+1) x_tiles[\",tcount,\"]\"\n # ty_tms in range(tmaxy, tminy-1, -1) tmaxy[ 352409 ] tminy[ 352253 ] ; ((tmaxy-tminy)) y_tiles[ 157 ] 352409-(352253-1)\n print \"\\ttz=[\",tz,\"] : ty_tms in range(tmaxy, tminy-1, -1) tmaxy[\",tmaxy,\"] tminy[\",tminy,\"] ; ((tmaxy-tminy+1)) y_tiles[\",i_y_column_count,\"]\"\n if self.options.resume:\n i_count = self.tile_exists(0, 0, tz,2)\n if i_count == tcount:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; x/y-tiles of z[\",tz,\"] y_tiles[\",tcount,\"]\"\n return\n for tx in range(tminx, tmaxx+1):\n tmaxy_work=tmaxy\n if self.options.resume:\n i_count = self.tile_exists(tx, 0, tz,3)\n if i_count == i_y_column_count:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n break\n else:\n if i_count > 0:\n # this assums the rows are compleate, which may NOT be true\n tmaxy_work-=i_count\n if self.options.verbose:\n print \"\\tTile generation skipped to tmaxy[\",tmaxy_work,\"] because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n for ty_tms in range(tmaxy_work, tminy-1, -1): #range(tminy, tmaxy+1):\n ty_osm=self.flip_y(tz,ty_tms)\n ty=ty_tms\n if self.options.tms_osm:\n ty=ty_osm\n if self.stopped:\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n break\n ti += 1\n\n if self.options.resume:\n exists = self.tile_exists(tx, ty, tz,0)\n if exists and self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; z =\",tz,\" ; x =\",tx,\" ; y_tms =\",ty_tms, \"; y_osm =\",ty_osm\n else:\n exists = False\n\n if not exists:\n if self.options.verbose:\n print ti, '/', tcount, self.get_verbose_tile_name(tx, ty, tz)\n # Don't scale up by nearest neighbour, better change the querysize\n # to the native resolution (and return smaller query tile) for scaling\n if self.options.profile in ('mercator','geodetic'):\n if self.options.profile == 'mercator':\n # Tile bounds in EPSG:900913\n b = self.mercator.TileBounds(tx, ty_tms, tz)\n elif self.options.profile == 'geodetic':\n b = self.geodetic.TileBounds(tx, ty_tms, tz)\n\n rb, wb = self.geo_query( ds, b[0], b[3], b[2], b[1])\n nativesize = wb[0]+wb[2] # Pixel size in the raster covering query geo extent\n if self.options.verbose:\n print \"\\tNative Extent (querysize\",nativesize,\"): \", rb, wb\n\n querysize = self.querysize\n # Tile bounds in raster coordinates for ReadRaster query\n rb, wb = self.geo_query( ds, b[0], b[3], b[2], b[1], querysize=querysize)\n\n rx, ry, rxsize, rysize = rb\n wx, wy, wxsize, wysize = wb\n else: # 'raster' or 'gearth' or 'garmin' profile:\n tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom\n xsize = self.out_ds.RasterXSize # size of the raster in pixels\n ysize = self.out_ds.RasterYSize\n if tz >= self.nativezoom:\n querysize = self.tilesize # int(2**(self.nativezoom-tz) * self.tilesize)\n\n rx = (tx) * tsize\n rxsize = 0\n if tx == tmaxx:\n rxsize = xsize % tsize\n if rxsize == 0:\n rxsize = tsize\n\n rysize = 0\n if ty_tms == tmaxy:\n rysize = ysize % tsize\n if rysize == 0:\n rysize = tsize\n ry = ysize - (ty_tms * tsize) - rysize\n\n wx, wy = 0, 0\n\n wxsize, wysize = int(rxsize/float(tsize) * querysize), int(rysize/float(tsize) * querysize)\n if wysize != querysize:\n wy = querysize - wysize\n xyzzy = Xyzzy(querysize, rx, ry, rxsize, rysize, wx, wy, wxsize, wysize)\n try:\n if self.options.verbose:\n print ti,'/',tcount,' total ; z =',tz,' ; x =',tx,' ; y_tms =',ty_tms,' ; y_osm =',ty_osm\n print \"\\tReadRaster Extent: \", (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)\n self.write_base_tile(tx, ty, tz, xyzzy)\n except ImageOutputException, e:\n self.error(\"'%d/%d/%d': %s\" % (tz, tx, ty, e.message))\n\n if not self.options.verbose or self.is_subprocess:\n self.progressbar( ti / float(tcount) )\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n self.mbtiles_db=None", "def island():\n\n grid = [\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"1\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"1\", \"1\"]\n ]\n\n def dfs():\n rows = len(grid)\n cols = len(grid[0])\n count = 0\n for i in range(0, rows):\n for j in range(0, cols):\n if grid[i][j] == '1':\n check_valid(i, j, grid)\n count = count + 1\n return count\n\n def check_valid(i, j, grid=None):\n rows = len(grid)\n cols = len(grid[0])\n\n if not 0 <= i < rows or not 0 <= j < cols or grid[i][j] != '1':\n return\n\n grid[i][j] = '0'\n\n check_valid(i + 1, j, grid)\n check_valid(i - 1, j, grid)\n check_valid(i, j + 1, grid)\n check_valid(i, j - 1, grid)\n\n return dfs()", "def solve_row0_tile(self, target_col):\n # replace with your code\n string = ''\n assert self.row0_invariant(target_col), 'False precondition'\n target = self.current_position(0, target_col)\n row_difference = target[0]\n col_difference = target_col - target[1]\n if row_difference == 0:\n if col_difference == 1:\n string += 'ld'\n elif col_difference > 1:\n string += 'l' * col_difference\n string += 'drrul' * (col_difference - 2)\n string += 'druld'\n string += 'urdlurrdluldrruld'\n elif row_difference == 1:\n if col_difference == 1:\n string += 'lld'\n string += 'urdlurrdluldrruld'\n elif col_difference > 1:\n string += 'ld'\n string += 'l' * (col_difference - 1)\n string += 'urrdl' * (col_difference - 2)\n string += 'urdlurrdluldrruld'\n print 'Row 0 Path', string\n self.update_puzzle(string)\n assert self.row1_invariant(target_col - 1), 'False string'\n return string", "def cell(x, y):\n try:\n if cells[y][x]['filled'] == 1:\n return # this has already been processed\n except IndexError:\n return\n cells[y][x]['filled'] = 1 # this cell is now filled\n\n nn = []\n for nx, ny in neighbours(x, y):\n try:\n if cells[ny][nx]['filled']:\n nn.append(cells[ny][nx])\n except IndexError:\n continue\n \n c = 0 # colour weighting\n \n #------ Flippedness\n flipped = sum([i['inverted'] for i in nn if i['inverted']])\n cells[y][x]['inverted'] = (randint(0, 3) + flipped) % 4\n \n #------- Colour calculation\n avg_colour = sum([i['colour'][0] for i in nn]) / len(nn)\n avg_sat = sum([i['colour'][1] for i in nn]) / len(nn)\n avg_bri = sum([i['colour'][2] for i in nn]) / len(nn)\n \n # small chance of going totally random otherwise small variation from neighbours\n if random(100) > 90:\n h = randint(0, 100)\n s = randint(0, 100)\n b = randint(0, 100)\n else:\n h = (avg_colour + randint(-15, 15)) % 100\n s = (avg_sat + randint(-15, 15)) % 100\n b = (avg_bri + randint(-15, 15)) % 100\n cells[y][x]['colour'] = (h, s, b)\n \n #------- Alpha calculation\n d = sqrt((x*cell_size - rx)**2 + (y*cell_size - ry)**2) # distance from epicenter\n mx = sqrt((w-rx*cell_size)**2 + (h-ry*cell_size)**2)\n a = d/sqrt(w**2+h**2)*255\n cells[y][x]['alpha'] = a\n \n for cx,cy in neighbours(x, y):\n cell(cx, cy)", "def solve(grid):\n\n if is_grid_solved(grid):\n return grid\n\n new_grid = copy.deepcopy(grid)\n\n for x_element in range(len(new_grid)):\n for y_element in range(len(new_grid[x_element])):\n if new_grid[x_element][y_element] == 0:\n answers = ExactCover(new_grid, x_element, y_element)\n for answer in answers:\n new_grid[x_element][y_element] = answer\n new_grid = solve(new_grid)\n if not is_grid_solved(new_grid):\n new_grid[x_element][y_element] = 0\n else:\n break\n return new_grid\n\n return new_grid", "def get_all_potential_edges(self) -> Dict[str,\n Tuple[int, int, int, int]]:\n orig_rows = self.tile_rows\n\n ret = dict()\n\n for i in range(0, 4):\n self.rotate_right(i)\n for j in range(0, 2):\n self.flip_l_r(j)\n for k in range(0, 2):\n self.flip_t_b(k)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'rr{i}_lr{j}_tb{k}'] = edges\n\n self.tile_rows = orig_rows\n\n for j in range(0, 2):\n self.flip_l_r(j)\n for i in range(0, 4):\n self.rotate_right(i)\n for k in range(0, 2):\n self.flip_t_b(k)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'lr{j}_rr{i}_tb{k}'] = edges\n\n self.tile_rows = orig_rows\n\n for j in range(0, 2):\n self.flip_l_r(j)\n for k in range(0, 2):\n self.flip_t_b(k)\n for i in range(0, 4):\n self.rotate_right(i)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'lr{j}_tb{k}_rr{i}'] = edges\n\n self.tile_rows = orig_rows\n\n for k in range(0, 2):\n self.flip_t_b(k)\n for j in range(0, 2):\n self.flip_l_r(j)\n for i in range(0, 4):\n self.rotate_right(i)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'tb{k}_lr{j}_rr{i}'] = edges\n\n self.tile_rows = orig_rows\n\n for k in range(0, 2):\n self.flip_t_b(k)\n for i in range(0, 4):\n self.rotate_right(i)\n for j in range(0, 2):\n self.flip_l_r(j)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'tb{k}_rr{i}_lr{j}'] = edges\n\n self.tile_rows = orig_rows\n\n for i in range(0, 4):\n self.rotate_right(i)\n for k in range(0, 2):\n self.flip_t_b(k)\n for j in range(0, 2):\n self.flip_l_r(j)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'rr{i}_tb{k}_lr{j}'] = edges\n\n self.tile_rows = orig_rows\n\n return ret", "def get_tile(self, row, col):\n # replace with your code\n return self.board[row][col]", "def overlay(grid, hitmap, structure, x, y):\n\n width, height, bt, hm, portals = structure\n for row_num in range(height):\n for tile in range(width):\n grid[y + row_num][x + tile] = bt[row_num][tile]\n hitmap[y + row_num][x + tile] = hm[row_num][tile]\n\n return grid, hitmap, portals" ]
[ "0.6987945", "0.69417804", "0.65950215", "0.657609", "0.64476514", "0.6431955", "0.6417518", "0.6369899", "0.6368723", "0.6354411", "0.6283423", "0.6279517", "0.625173", "0.61543477", "0.61364645", "0.612347", "0.606173", "0.60402596", "0.6014027", "0.6010626", "0.6002269", "0.59506464", "0.5935589", "0.59238213", "0.5920119", "0.5913409", "0.58445865", "0.58415246", "0.58071923", "0.57960045", "0.57952887", "0.57606316", "0.5742454", "0.57101405", "0.5705186", "0.56812817", "0.56785774", "0.56656516", "0.5657321", "0.5657321", "0.5657321", "0.5654447", "0.5653395", "0.5647223", "0.5643784", "0.5640146", "0.56227213", "0.561851", "0.5615256", "0.5590947", "0.5581412", "0.5575736", "0.55744237", "0.5568169", "0.5566179", "0.55613047", "0.5554675", "0.5552436", "0.55494946", "0.5547484", "0.55457455", "0.55449086", "0.5543262", "0.55426204", "0.5540977", "0.55323124", "0.55318475", "0.5503056", "0.5501635", "0.549288", "0.5491309", "0.5489345", "0.5485308", "0.548433", "0.5480658", "0.54796165", "0.5471705", "0.5466684", "0.54653084", "0.5463249", "0.54630184", "0.546144", "0.54546136", "0.5449865", "0.54479885", "0.5443373", "0.54405767", "0.5440512", "0.54394233", "0.54292315", "0.5427189", "0.5420871", "0.54207194", "0.5417932", "0.54169446", "0.5396716", "0.53883904", "0.53786933", "0.5376172", "0.53748816", "0.53735965" ]
0.0
-1
Place correct tile at target position Updates puzzle and returns a move string
def solve_interior_tile(self, target_row, target_col): cur_row, cur_col = self.current_position(target_row, target_col) move_str = self.position_tile(target_row, target_col, cur_row, cur_col) self.update_puzzle(move_str) return move_str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_tile(self, target_row, target_col, val):\n # a little bit twisted here for the use of both solve_interior_tile and solve_col0_tile\n solved_row, solved_col = self.current_position(0, val)\n movements = \"\"\n if solved_row == target_row and solved_col == target_col:\n return \"\"\n if solved_row == target_row:\n if target_col > solved_col:\n movements = \"l\" * (target_col - solved_col) + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"r\" * (solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ulld\"\n elif solved_col == target_col:\n movements = \"u\" * (target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n elif solved_col < target_col:\n if solved_col == 0:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"rddlu\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n elif solved_col > target_col:\n if solved_row == 0:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"dllur\" * (\n solved_col - target_col - 1) + \"dlu\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n else:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ullddru\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n return movements", "def position_tile(self, zero_row, zero_col, correct_tile):\n \n ans = \"\" \n vert_dist = abs(zero_row - correct_tile[0])\n horiz_dist = abs(zero_col - correct_tile[1])\n \n # Updates ans, the move string, based the correct_tile's\n # position relative to the target position.\n \n # SAME ROW\n if vert_dist == 0:\n # Left of target\n if zero_col > correct_tile[1]:\n # Moves zero tile left to correct_tile's position.\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right to target position,\n # and moves zero tile to left of target position.\n if horiz_dist > 1:\n ans += str(\"urrdl\" * (horiz_dist - 1))\n # Right of target\n else:\n # Moves zero tile right to correct_tile's position.\n ans += str(\"r\" * horiz_dist)\n # Moves correct_tile left to target position,\n # and moves zero tile to left of target position.\n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ulld\")\n \n # SAME COL\n elif horiz_dist == 0:\n # Moves zero tile up to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n # Moves correct_tile down to target position, \n # and moves zero tile to left of target position.\n if vert_dist > 1:\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n # UPPER LEFT\n elif correct_tile[1] < zero_col:\n # Moves zero tile up and left to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right and down to target position,\n # and moves zero tile to left of target position.\n ans += str(\"drrul\" * (horiz_dist - 1))\n ans += str(\"druld\" * vert_dist)\n\n # UPPER RIGHT\n else:\n # Moves zero tile up and right to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"r\" * horiz_dist)\n # This if-elif-else statement moves correct_tile left and down to target position.\n # If statement is only used when target position is in row 2.\n if vert_dist == 1 and correct_tile[0] == 0:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dluld\")\n # Elif statement used when correct_tile is in the row above target position.\n elif vert_dist == 1: \n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ullddruld\")\n # Else statement used when correct_tile is 1+ rows above target position.\n else:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dlu\")\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n return ans", "def solve_puzzle(self):\r\n # initialize some values and start tile at bottom right corner\r\n col = self.get_width() - 1\r\n row = self.get_height() - 1\r\n move = \"\"\r\n curr_row, curr_col = self.current_position(0, 0)\r\n move_to_target = abs(curr_col - col) * \"r\" + abs(curr_row - row) * \"d\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n\r\n # apply solver methods\r\n for dummy_i in range(row, 1, -1):\r\n for dummy_j in range(col, 0, -1):\r\n move += self.solve_interior_tile(dummy_i, dummy_j)\r\n move += self.solve_col0_tile(dummy_i)\r\n \r\n for dummy_j in range(col, 1, -1):\r\n move += self.solve_row1_tile(dummy_j)\r\n move += self.solve_row0_tile(dummy_j)\r\n \r\n move += self.solve_2x2()\r\n return move", "def position_tile(self, target_row, target_col, current_row, current_col):\r\n moves_str = \"\"\r\n # current target is on the upper of 0\r\n if current_col == target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the left of 0\r\n elif current_row == target_row and current_col < target_col:\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n # current target is on the upperleft of 0\r\n elif current_row < target_row and current_col < target_col:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n moves_str += \"dru\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the upperright of 0\r\n elif current_col > target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"dlu\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the right of 0\r\n elif current_col > target_col and current_row == target_row:\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"ulld\"\r\n return moves_str", "def solve_row1_tile(self, target_col):\r\n moves_str = \"\"\r\n current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ur\"\r\n self.update_puzzle(moves_str)\r\n print \"solve_row1_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def move_to_target(self, target_row, target_col, row, col):\r\n move = \"\"\r\n # typical move to move target tile to target pos.\r\n solver_move = \"druld\"\r\n # move up first\r\n move = (target_row - row) * \"u\"\r\n # conditional statements for moving the tile:\r\n # 1. case curr_pos of tile and target_tile are in same col\r\n if (target_col - col) == 0:\r\n move += \"ld\" + ((target_row - row) - 1) * solver_move\r\n else:\r\n # 2. curr_pos of tile is on the left of target pos\r\n if (target_col - col) > 0:\r\n move += (target_col - col) * \"l\"\r\n if row == 0:\r\n move += (abs(target_col - col) - 1) * \"drrul\"\r\n else:\r\n move += (abs(target_col - col) - 1) * \"urrdl\"\r\n # 3. curr_pos of tile is on the right of target pos:\r\n elif (target_col - col) < 0:\r\n move += (abs(target_col - col) - 1) * \"r\"\r\n if row == 0:\r\n move += abs(target_col - col) * \"rdllu\"\r\n else:\r\n move += abs(target_col - col) * \"rulld\"\r\n move += (target_row - row) * solver_move\r\n return move", "def solve_row1_tile(self, target_col):\n assert target_col > 1, \"target_col must be > 1\"\n assert self.row1_invariant(target_col), \"tiles to right and below incorrectly ordered\"\n\n # Moves correct_tile to the target position (1, target_col),\n # and the zero tile above the target position at (0, target_col). \n correct_tile = self.current_position(1, target_col)\n move_str = self.position_tile(1, target_col, correct_tile) \n move_str += \"ur\"\n self.update_puzzle(move_str)\n\n assert self.row0_invariant(target_col)\n \n return move_str", "def solve_row1_tile(self, target_col):\n cur_row, cur_col = self.current_position(1, target_col)\n move_str = self.position_tile(1, target_col, cur_row, cur_col, need_ld=False)\n self.update_puzzle(move_str)\n return move_str", "def solve_row0_tile(self, target_col):\n move_str = 'ld'\n self.update_puzzle(move_str)\n cur_row, cur_col = self.current_position(0, target_col)\n if cur_row == 0 and cur_col == target_col:\n return move_str\n else:\n move_str += self.position_tile(1, target_col-1, cur_row, cur_col)\n move_str += 'urdlurrdluldrruld'\n self.update_puzzle(move_str[2:])\n return move_str", "def solve_puzzle(self):\n\n move_str = \"\"\n \n # Move zero tile to bottom right corner tile of puzzle.\n zero_pos = self.current_position(0,0) \n vert_dist = (self.get_height() - 1) - zero_pos[0]\n horiz_dist = (self.get_width() - 1) - zero_pos[1]\n move_str += ((\"d\" * vert_dist) + (\"r\" * horiz_dist))\n self.update_puzzle(move_str)\n \n # Solve lower rows\n if self.get_height() > 2:\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n \n # Solve top 2 rows\n if self.get_width() > 2:\n for col in range(self.get_width() - 1, 1, -1):\n move_str += self.solve_row1_tile(col)\n move_str += self.solve_row0_tile(col)\n \n # Solve 2x2\n move_str += self.solve_2x2()\n\n return move_str", "def solve_row0_tile(self, target_col):\r\n # move the zero tile from position (0,j) to (1,j−1) \r\n # using the move string \"ld\" \r\n moves_str = \"\"\r\n moves_str += \"ld\"\r\n # check whether target tile is at position (0,j).\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str) \r\n current_row, current_col = temp_grid.current_position(0, target_col)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n \r\n # If target tile is not at position (0,j).\r\n # reposition the target tile to position (1,j−1) \r\n # with tile zero in position (1,j−2).\r\n if current_row != 0 or current_col != target_col:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"urdlurrdluldrruld\"\r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_row0_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_row0_tile(self, target_col):\r\n # replace with your code\r\n assert self.row0_invariant(target_col), 'Some trouble in row0_invariant' \r\n whole_move = ''\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n # print self.get_number(current_row, current_col)\r\n zero_row, zero_col = 0, target_col\r\n # print 'Target tile position=',self.current_position(0, target_col)\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (0, target_col)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ld'\r\n \r\n recomended_move = 'ld'\r\n whole_move += recomended_move\r\n zero_col -= len(filter(lambda x: x=='l', recomended_move))\r\n zero_row += len(filter(lambda x: x=='d', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n #####Case when we check if recomended move solves the tile\r\n if self.current_position(0, target_col) == (0, target_col):\r\n # print 'Congrads recomended move made great deal !!'\r\n return whole_move\r\n #####If not, we position TT to (1, target_col-1),\r\n ##### and ZEOR to (1, target_col-2)\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (zero_row - current_row) * 'u'\r\n path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = path_up + path_side\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n \r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n counter = 0\r\n # print self.current_position(0, target_col) != (1, target_col-1)\r\n # print (zero_row,zero_col) != (1,target_col-2)\r\n ####POitioning TT and zero into positions that can be solvable\r\n while self.current_position(0, target_col) != (1, target_col-1) or \\\r\n (zero_row,zero_col) != (1,target_col-2):\r\n counter +=1\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n cyclic_moves = ''\r\n # print 'Aloha in the loop'\r\n if zero_col < current_col:\r\n # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n \r\n\r\n if current_col != target_col-1 and zero_row == 0:\r\n # print 'In the upper row and we can use down cycling'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n\r\n elif current_col != target_col-1:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col-1:\r\n # print 'Target tile under target place'\r\n # print 'DEBUG!!!!'\r\n # print self\r\n # print zero_col, target_col\r\n if zero_col == 0 and current_col == 1:\r\n cyclic_moves = 'druld'\r\n elif zero_row == 0:\r\n cyclic_moves = 'druld'\r\n \r\n else:\r\n cyclic_moves = 'urd'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n elif zero_row > current_row:\r\n # print 'DEBUG'\r\n # print 'TT under zero tile'\r\n cyclic_moves = 'uld'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n \r\n if counter > 10:\r\n # print 'COUNTER break'\r\n break\r\n\r\n #####Solving using pattern 2 x 3 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 2x3 puzzle formed recently'\r\n move2x3 = \"urdlurrdluldrruld\"\r\n whole_move += move2x3\r\n zero_col -= len(filter(lambda x: x=='l', move2x3))\r\n zero_col += len(filter(lambda x: x=='r', move2x3))\r\n zero_row += len(filter(lambda x: x=='d', move2x3))\r\n zero_row -= len(filter(lambda x: x=='u', move2x3))\r\n self.update_puzzle(move2x3)\r\n # print self\r\n assert self.row1_invariant(target_col-1), 'Some trouble in row1_invariant' \r\n return whole_move", "def solve_col0_tile(self, target_row):\r\n moves_str = \"\"\r\n # move the zero tile from (i,0) to (i−1,1) \r\n # using the move string \"ur\"\r\n moves_str += \"ur\"\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str)\r\n # If the target tile is now at position (i,0)\r\n # you can simply move tile zero to the end of row i−1\r\n current_row, current_col = temp_grid.current_position(target_row, 0)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n if current_row == target_row and current_col == 0:\r\n rights = self._width - 1 - zero_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\" \r\n # However, if the target tile is not positioned at (i,0)\r\n # we suggest the following solution strategy:\r\n else:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ruldrdlurdluurddlu\"\r\n for dummy_r in range(self._width - 1):\r\n moves_str += \"r\"\r\n \r\n print \"solve_col0_tile\"\r\n print moves_str\r\n self.update_puzzle(moves_str)\r\n print self._grid\r\n return moves_str", "def solve_col0_tile(self, target_row):\n move_str = 'ur'\n self.update_puzzle(move_str)\n cur_row, cur_col = self.current_position(target_row, 0)\n if cur_row == target_row and cur_col == 0:\n move_str += 'r' * (self._width - 2)\n else:\n move_str += self.position_tile(target_row-1, 1, cur_row, cur_col)\n move_str += 'ruldrdlurdluurddlur'\n move_str += 'r' * (self._width - 2)\n self.update_puzzle(move_str[2:])\n return move_str", "def solve_row1_tile(self, target_col):\r\n row, col = self.current_position(1, target_col)\r\n move = self.move_to_target(1, target_col, row, col)\r\n # for next move\r\n move += \"ur\"\r\n \r\n self.update_puzzle(move)\r\n return move", "def solve_puzzle(self):\n cur0_row, cur0_col = self.current_position(0, 0)\n move_str = 'd' * (self._height - cur0_row - 1) + 'r' * (self._width - cur0_col - 1)\n self.update_puzzle(move_str)\n for row in range(self._height-1, 1, -1):\n for col in range(self._width-1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n for col in range(self._width-1, 1, -1):\n assert self.row1_invariant(col)\n move_str += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n move_str += self.solve_row0_tile(col)\n move_str += self.solve_2x2()\n return move_str", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction\n for row in self._grid:\n print row\n print", "def position_tile(self, target_row, target_col, cur_row, cur_col, need_ld=True):\n move_str = ''\n if cur_row == target_row:\n if cur_col < target_col:\n move_str += 'l' * (target_col - cur_col)\n if target_col - cur_col > 1:\n move_str += 'ur'\n move_str += 'druldru' * (target_col - cur_col - 1)\n else:\n move_str += 'ur' if not need_ld else ''\n need_ld = False\n else:\n move_str += 'r' * (cur_col - target_col)\n if cur_col - target_col > 1:\n move_str += 'ul'\n move_str += 'dlurdlu' * (cur_col - target_col - 1)\n else:\n need_ld = False\n else:\n move_str += 'u' * (target_row - cur_row)\n if cur_col < target_col:\n move_str += ('l' * (target_col - cur_col) + 'dru')\n move_str += 'druldru' * (target_col - cur_col - 1)\n move_str += 'lddru' * (target_row - cur_row - 1)\n elif cur_col > target_col:\n move_str += ('r' * (cur_col - target_col) + 'dlu')\n move_str += 'dlurdlu' * (cur_col - target_col - 1)\n move_str += 'lddru' * (target_row - cur_row - 1)\n else:\n move_str += 'lddru' * (target_row - cur_row - 1)\n if need_ld:\n move_str += 'ld'\n return move_str", "def solve_row0_tile(self, target_col):\n assert target_col > 1, \"target_col must be > 1\"\n assert self.row0_invariant(target_col), \"tiles to right and below incorrectly ordered\"\n \n # Move zero tile from target position (0, target_col) to (1, target_col - 1) \n self.update_puzzle(\"ld\")\n \n move_str = \"\"\n\n # correct_tile's position is determined after moving zero tile \"ld\"\n # because its position relative to zero tile may have changed as a result.\n correct_tile = self.current_position(0, target_col) \n \n # Moves to reposition correct_tile to target position, and\n # the zero tile to (1, target_col - 1).\n if self.get_number(correct_tile[0], correct_tile[1]) != self.get_number(0, target_col):\n move_str += str(self.position_tile(1, target_col - 1, correct_tile))\n move_str += str(\"urdlurrdluldrruld\")\n\n self.update_puzzle(move_str)\n\n assert self.row1_invariant(target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n move_str = \"ld\" + move_str\n return move_str", "def solve_interior_tile(self, target_row, target_col):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, target_col), \"tiles to right and below incorrectly ordered\"\n \n correct_tile = self.current_position(target_row, target_col) \n move_str = self.position_tile(target_row, target_col, correct_tile) \n self.update_puzzle(move_str)\n \n assert self.lower_row_invariant(target_row, target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n return move_str", "def solve_row1_tile(self, target_col):\r\n # replace with your code\r\n whole_move = ''\r\n if self._grid[1][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA solve_row1_tile \"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[1][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (1 - zero_row) * 'd'\r\n # path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (1 - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (1 - zero_row) * 'd'\r\n #zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n #zero_row -= len(filter(lambda x: x=='u', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n\r\n assert self.row1_invariant(target_col), 'Some trouble in row1_invariant' \r\n \r\n #current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = 1, target_col\r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = (zero_row - self.current_position(1, target_col)[0]) * 'u' + (zero_col - self.current_position(1, target_col)[1]) * 'l'\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(1, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (1, target_col)\r\n counter = 0\r\n while self.current_position(1, target_col) != \\\r\n (1, target_col) or (zero_row, zero_col) != (0, target_col):\r\n # print 'Welcome to while loop!'\r\n cyclic_moves = ''\r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n #current_position = self.current_position(1, target_col)\r\n current_col = self.current_position(1, target_col)[1]\r\n counter +=1\r\n if self.current_position(1, target_col) == \\\r\n (1, target_col):\r\n # print 'ZERO not under TT'\r\n cyclic_moves = 'ur'\r\n whole_move += cyclic_moves\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif zero_col < current_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'ZERO tile located in the left side and down move is POSIBLE'\r\n if current_col != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif current_col != target_col and self._grid[zero_row+1][zero_col] > \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves)) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n # elif zero_col < current_col and self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n # # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n # if current_col != target_col:\r\n # # print 'not under the target place'\r\n # cyclic_moves = 'urrdl'\r\n # whole_move += cyclic_moves\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # elif current_col == target_col:\r\n # # print 'Target tile under target place'\r\n # cyclic_moves = 'urd'\r\n # whole_move += cyclic_moves\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #cyclic_moves +='ur'\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n return whole_move", "def solve_row0_tile(self, target_col):\n solved_row, solved_col = self.current_position(0, target_col)\n movements = \"\"\n if solved_col == target_col - 1 and solved_row == 0:\n movements = \"ld\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ld\")\n movements = \"ld\" + local_board.move_tile(\n 1, target_col - 1, target_col) + \"urdlurrdluldrruld\"\n self.update_puzzle(movements)\n return movements", "def solve_col0_tile(self, target_row):\r\n # check if curr_pos (i, 0) where i > 1\r\n assert self.lower_row_invariant(target_row, 0)\r\n move = \"ur\"\r\n self.update_puzzle(move)\r\n row, col = self.current_position(target_row, 0)\r\n if row == target_row and col == 0:\r\n move_to_target = (self.get_width() - 2) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n else:\r\n move_to_target = self.move_to_target(target_row - 1, 1, row, col)\r\n # add solver move to str\r\n move_to_target += \"ruldrdlurdluurddlu\" + (self.get_width() - 1) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1)\r\n return move", "def solve_col0_tile(self, target_row):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, 0), \"tiles to right and below incorrectly ordered\"\n\n # Move zero tile from target position (target_row, 0) to (target_row - 1, 1).\n self.update_puzzle(\"ur\")\n\n move_str = \"\"\n \n # correct_tile's position is determined after moving zero tile \"ur\" \n # because its position relative to zero tile may have changed as a result.\n correct_tile = self.current_position(target_row, 0)\n \n # Moves to reposition correct_tile to target position.\n if self.get_number(correct_tile[0], correct_tile[1]) != self.get_number(target_row, 0):\n move_str += str(self.position_tile(target_row - 1, 1, correct_tile))\n move_str += str(\"ruldrdlurdluurddlur\")\n\n # Moves to reposition zero tile to end of column of target_row + 1.\n move_str += str(\"r\" * (self.get_width() - 2)) \n \n self.update_puzzle(move_str)\n\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1) \n \n move_str = \"ur\" + move_str\n return move_str", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def solve_col0_tile(self, target_row):\n solved_row, solved_col = self.current_position(target_row, 0)\n movements = \"\"\n if solved_row == target_row - 1 and solved_col == 0:\n movements = \"u\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ur\")\n movements = \"ur\" + local_board.move_tile(\n target_row - 1, 1, target_row *\n self.get_width()) + \"ruldrdlurdluurddlu\"\n movements += \"r\" * (self.get_width() - 1)\n self.update_puzzle(movements)\n return movements", "def solve_interior_tile(self, target_row, target_col):\r\n whole_move = ''\r\n # replace with your code\r\n if self._grid[target_row][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA\"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[target_row][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (target_row - zero_row) * 'd'\r\n #path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd'\r\n zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n assert self.lower_row_invariant(target_row, target_col), \"Invarian is False\"\r\n \r\n #current_position = self.current_position(target_row, target_col)\r\n #current_row, current_col = self.current_position(target_row, target_col)\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, target_col)\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (target_row - self.current_position(target_row, target_col)[0]) * 'u'\r\n zero_row = target_row - len(path_up)\r\n if target_col < self.current_position(target_row, target_col)[1]: # Right move\r\n path_side = (self.current_position(target_row, target_col)[1] - target_col) * 'r'\r\n zero_col = target_col + len(path_side)\r\n else: # Left move\r\n path_side = (target_col - self.current_position(target_row, target_col)[1]) * 'l'\r\n zero_col = target_col - len(path_side)\r\n \r\n #path_for_zero = path_up + path_side\r\n # print '------------------------------------------'\r\n # print 'Move to ZERO =', path_for_zero\r\n \r\n self.update_puzzle(path_up + path_side)\r\n \r\n # print 'Grid after move:'\r\n # print self\r\n # current_position = self.current_position(target_row, target_col) \r\n # current_row, current_col = current_position\r\n # print 'Updated Target tile position=',current_position\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, target_col)\r\n # print '-----------------------------------------'\r\n \r\n\r\n ###### New moves after moving ZERO tile into target tile\r\n # counter = 0\r\n whole_move += path_up + path_side\r\n while self.current_position(target_row, target_col) != \\\r\n (target_row, target_col) or zero_col != target_col - 1:\r\n # tt_in_home = self.current_position(target_row, target_col) == \\\r\n # (target_row, target_col)\r\n\r\n cyclic_moves = ''\r\n # counter += 1\r\n #current_position = self.current_position(target_row, target_col) \r\n #current_col = self.current_position(target_row, target_col)[1]\r\n # print 'Zero coloumn', zero_col, '== Target coloumn', target_col\r\n # print zero_col == target_col \r\n \r\n #### Case 1 if ZERO located in ther right of\r\n #### target tile (after it)\r\n if zero_col > self.current_position(target_row, target_col)[1]:\r\n # print ' Look in the up puzzle, zero on the right side'\r\n # if self.current_position(target_row, target_col)[1] != target_col:\r\n # # print 'not under target place'\r\n # cyclic_moves = 'dllur'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n if self.current_position(target_row, target_col)[1] == target_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # elif self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # # print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n # cyclic_moves = 'ul'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #### Case 2 if ZERO located under the target tile, and both\r\n #### of them located under the target position of the target tile\r\n elif zero_col == self.current_position(target_row, target_col)[1] and zero_col == target_col:\r\n # print 'Both under the target place'\r\n # print 'TT in home=', tt_in_home\r\n if self.current_position(target_row, target_col) == \\\r\n (target_row, target_col):\r\n cyclic_moves = 'ld'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n else:\r\n cyclic_moves = 'lddru'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n elif zero_col < self.current_position(target_row, target_col)[1]:\r\n # print 'ZERO tile located in the left side'\r\n if self.current_position(target_row, target_col)[1] != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self.current_position(target_row, target_col)[1] == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n #zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loot counter =',counter\r\n whole_move += cyclic_moves\r\n # if counter > 12:\r\n # break\r\n # print 'Tile is solved with move '+ whole_move\r\n assert self.lower_row_invariant(target_row, target_col-1), \"Invarian is False\"\r\n return whole_move", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][\n zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][\n zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def solve_interior_tile(self, target_row, target_col):\r\n assert self._grid[target_row][target_col] == 0\r\n moves_str = \"\"\r\n target_current_row, target_current_col = self.current_position(target_row, target_col)\r\n \r\n moves_str += self.position_tile(target_row, target_col, target_current_row, target_current_col) \r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_interior_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def test_perform_move(self):\n p = hw.create_tile_puzzle(3, 3)\n self.assertFalse(p.perform_move(\"taco\"))\n self.assertTrue(p.perform_move('up'))\n self.assertEqual(p.get_board(), [[1,2,3],[4,5,0],[7,8,6]])\n self.assertFalse(p.perform_move('right'))\n p = hw.create_tile_puzzle(2, 4)\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('up'))\n self.assertFalse(p.perform_move('up'))\n self.assertEqual(p.get_board(), [[1,2,0,4],[5,6,3,7]])\n p = hw.create_tile_puzzle(1, 4)\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('left'))\n self.assertFalse(p.perform_move('down'))\n self.assertFalse(p.perform_move('left'))\n self.assertEqual(p.get_board(), [[0,1,2,3]])", "def solve_col0_tile(self, target_row):\r\n # print '----------------------------------'\r\n # print 'SOLVING ZERO COLOUMN'\r\n assert self.lower_row_invariant(target_row,0), \"Invarian is False\"\r\n whole_move = ''\r\n #current_position = self.current_position(target_row, 0)\r\n current_row, current_col = self.current_position(target_row, 0)\r\n zero_row, zero_col = target_row, 0\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, 0)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ur'\r\n \r\n recomended_move = 'ur'\r\n whole_move += recomended_move\r\n zero_col += len(filter(lambda x: x=='r', recomended_move))\r\n zero_row -= len(filter(lambda x: x=='u', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n if self.current_position(target_row, 0) == \\\r\n (target_row, 0):\r\n # print 'TT stays in correct place after recomended move'\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n self.update_puzzle(zero_to_place_move)\r\n whole_move += zero_to_place_move\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move\r\n #move ZERO tile to the right\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n ### reposition TT to (target_row -1, 1)\r\n ### reposition ZERO tile to (target_row-1,0)\r\n \r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (current_col - zero_col) * 'r'\r\n path_for_zero = (zero_row - current_row) * 'u' + (current_col - zero_col) * 'r'\r\n whole_move += path_for_zero\r\n zero_col += len(filter(lambda x: x=='r', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n counter = 0\r\n while self.current_position(target_row, 0) != (target_row -1, 1) or \\\r\n (zero_row,zero_col) != (target_row-1,0):\r\n cyclic_moves = ''\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n # print 'We are in while loop'\r\n counter += 1\r\n if zero_row < current_row:\r\n # print 'Zero row under current TT '\r\n if self.current_position(target_row, 0) == (target_row -1, 1):\r\n # print 'TT is in the reccomended posiont (target_row -1, 1) \\n and we are movind zero to the left side of TT '\r\n cyclic_moves = 'ld'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n else:\r\n # print 'TT should be one tile down'\r\n cyclic_moves = 'lddru'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n #### Case 1 if ZERO located in the right of\r\n #### target tile (after it)\r\n if zero_col > current_col:\r\n # print ' Look in the up puzzle, zero in the right side'\r\n if current_col != 1:\r\n # print 'not under target place (target_row -1, 1)'\r\n cyclic_moves = 'dllur'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n whole_move += cyclic_moves\r\n # elif current_col == 1 and self._grid[zero_row+1][zero_col] < \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n elif current_col == 1: \r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self._grid[zero_row+1][zero_col] > \\\r\n self._grid[current_position[0]][current_position[1]]:\r\n print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n cyclic_moves = 'ul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n ### Solwing 3x2 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 3x2 puzzle formed recently'\r\n move3x2 = 'ruldrdlurdluurddlur'\r\n whole_move += move3x2\r\n zero_col -= len(filter(lambda x: x=='l', move3x2))\r\n zero_col += len(filter(lambda x: x=='r', move3x2))\r\n zero_row += len(filter(lambda x: x=='d', move3x2))\r\n zero_row -= len(filter(lambda x: x=='u', move3x2))\r\n self.update_puzzle(move3x2)\r\n # print 'Grid afret 3x2 solver move'\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n #####Moving ZERO to the (target_row - 1, n - 1) position where\r\n ##### 'n' is a grid height.\r\n # print self._width-1 - zero_col\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n whole_move += zero_to_place_move\r\n self.update_puzzle(zero_to_place_move)\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move", "def solve_row0_tile(self, target_col):\n # replace with your code\n string = ''\n assert self.row0_invariant(target_col), 'False precondition'\n target = self.current_position(0, target_col)\n row_difference = target[0]\n col_difference = target_col - target[1]\n if row_difference == 0:\n if col_difference == 1:\n string += 'ld'\n elif col_difference > 1:\n string += 'l' * col_difference\n string += 'drrul' * (col_difference - 2)\n string += 'druld'\n string += 'urdlurrdluldrruld'\n elif row_difference == 1:\n if col_difference == 1:\n string += 'lld'\n string += 'urdlurrdluldrruld'\n elif col_difference > 1:\n string += 'ld'\n string += 'l' * (col_difference - 1)\n string += 'urrdl' * (col_difference - 2)\n string += 'urdlurrdluldrruld'\n print 'Row 0 Path', string\n self.update_puzzle(string)\n assert self.row1_invariant(target_col - 1), 'False string'\n return string", "def solve_col0_tile(self, target_row):\n # replace with your code\n string = ''\n target = self.current_position(target_row, 0)\n row_difference = target_row - target[0]\n col_difference = target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'r' * (col_difference - 1)\n if row_difference > 1:\n string += 'druld' * (row_difference - 1)\n string += 'rulld' * (col_difference - 1)\n string += 'ruldrdlurdluurddlu'\n elif col_difference == 0:\n string += 'rddlu' * (row_difference - 2)\n if row_difference > 1:\n string += 'rd'\n string += 'l'\n string += 'ruldrdlurdluurddlu'\n string += 'r' * (self._width - 1)\n print 'Col 0 Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row - 1, self._width -1), 'False string'\n return string", "def solve_row0_tile(self, target_col):\r\n assert self.row0_invariant(target_col)\r\n move = \"ld\"\r\n self.update_puzzle(move)\r\n \r\n row, col = self.current_position(0, target_col)\r\n if row == 0 and col == target_col:\r\n return move\r\n else:\r\n move_to_target = self.move_to_target(1, target_col - 1, row, col)\r\n # 2x3 puzzle solver\r\n move_to_target += \"urdlurrdluldrruld\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n return move", "def solve_row1_tile(self, target_col):\n # replace with your code\n print target_col\n assert self.row1_invariant(target_col), 'False precondition'\n string = ''\n target = self.current_position(1, target_col)\n row_difference = 1 - target[0]\n col_difference = target_col - target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0:\n string += 'urrdl' * (col_difference - 1)\n string += 'ur'\n elif row_difference == 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n print 'Row 1 Path', string\n self.update_puzzle(string)\n assert self.row0_invariant(target_col), 'False string'\n return string", "def solve_interior_tile(self, target_row, target_col):\n # replace with your code\n string = ''\n target = self.current_position(target_row, target_col)\n row_difference = target_row - target[0]\n #print 'Row difference', row_difference\n col_difference = target_col - target[1]\n #print 'Col difference', col_difference\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0 and col_difference > 1:\n string += 'urrdl' * (col_difference - 1)\n if row_difference == 1:\n string += 'urrdl' * (col_difference - 1)\n string += 'dru'\n if row_difference > 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n if row_difference == 1:\n string += 'ulldr' * (col_difference - 1)\n string += 'ullddru'\n if row_difference > 1:\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n string += 'lddru' * (row_difference - 1)\n if row_difference > 0:\n string += 'ld'\n print 'Interior Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row, target_col - 1), 'False string'\n return string", "def solve_puzzle(self):\n # replace with your code\n string = ''\n width = self._width\n height = self._height\n zero = self.current_position(0, 0)\n row_to_zero = height - 1 - zero[0]\n col_to_zero = width - 1 - zero[1]\n string += 'r' * col_to_zero\n string += 'd' * row_to_zero\n self.update_puzzle(string)\n if width == 2 and height == 2:\n string += self.solve_2x2()\n elif width > 2 and height == 2:\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n elif width == 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n string += self.solve_2x2()\n elif width > 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n #for row in range(height - 1, -1, -1):\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n return string", "def move_1_piece(context: GUI, old_coordinate, new_coordinate):\n\n old_tile = context.board.board_dict[old_coordinate]\n new_tile = context.board.board_dict[new_coordinate]\n\n new_tile.piece = old_tile.piece\n old_tile.piece = None\n\n context.update_move_printer(old_coordinate + \" \" + new_coordinate)", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.minimax(self, sample_space, affinity, depth_limit, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def move(puzzle: str, direction: str):\r\n position_index = puzzle.index(EMPTY)\r\n position = position_index + 1\r\n grid_width = get_grid_width(puzzle)\r\n\r\n # What direction to moved the tile if it's a valid move\r\n if direction == UP:\r\n if (position) > grid_width:\r\n return swap_position(puzzle, position_index, position_index - grid_width)\r\n\r\n elif direction == DOWN:\r\n if (len(puzzle) - position) >= grid_width:\r\n return swap_position(puzzle, position_index, position_index + grid_width)\r\n\r\n elif direction == LEFT:\r\n if (position - 1) % grid_width != 0:\r\n return swap_position(puzzle, position_index, position_index - 1)\r\n\r\n elif direction == RIGHT:\r\n if position % grid_width != 0:\r\n return swap_position(puzzle, position_index, position_index + 1)\r\n\r\n return None", "def move(argument, player):\n current_tile = world.tile_exists(player.location_x, player.location_y)\n if argument == \"north\":\n if world.tile_exists(player.location_x, player.location_y-1):\n new_tile = world.tile_exists(player.location_x, player.location_y-1)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x, player.location_y-1)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"south\":\n if world.tile_exists(player.location_x, player.location_y+1):\n new_tile = world.tile_exists(player.location_x, player.location_y+1)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x, player.location_y+1)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"east\":\n if world.tile_exists(player.location_x+1, player.location_y):\n new_tile = world.tile_exists(player.location_x + 1, player.location_y)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x+1, player.location_y)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"west\":\n if world.tile_exists(player.location_x-1, player.location_y):\n new_tile = world.tile_exists(player.location_x-1, player.location_y)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x-1, player.location_y)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"Movement not recognized. Specify a cardinal direction.\")\n return", "def solve_row1_tile(self, target_col):\n movements = self.solve_interior_tile(1, target_col)\n self.update_puzzle(\"ur\")\n return movements + \"ur\"", "def makeMove(self, movable_statement):\n ### Student code goes here\n tile = str(movable_statement.terms[0])\n fromx = str(movable_statement.terms[1])\n fromy = str(movable_statement.terms[2])\n tox = str(movable_statement.terms[3])\n toy = str(movable_statement.terms[4])\n self.kb.kb_retract(parse_input('fact: (pos ' + tile + ' ' + fromx + ' ' + fromy + ')'))\n self.kb.kb_retract(parse_input('fact: (pos empty ' + tox + ' ' + toy + ')'))\n self.kb.kb_assert(parse_input('fact: (pos ' + tile + ' ' + tox + ' ' + toy + ')'))\n self.kb.kb_assert(parse_input('fact: (pos empty ' + fromx + ' ' + fromy + ')'))", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.alphabeta(self, sample_space, affinity, depth_limit, -10000, 10001, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def test_7_replay_4(self):\n self._execute_replay_nr(4)\n\n self.grid.add_pawn(5, 'H')\n self.grid.add_pawn(3, 'B')\n self.grid.add_pawn(2, 'H')\n self.grid.add_pawn(1, 'B')\n self.grid.add_pawn(1, 'H')\n\n # self.grid.print_grid()\n # print(self.minmaxBot_7.choose_move(self.grid))", "def place_piece(self, move, piece):\n self.totMoves+=1\n print(move)\n if len(move) > 1:\n self.board[move[1][0]][move[1][1]] = ' '\n self.board[move[0][0]][move[0][1]] = piece", "def move_pieces(self, starting_loc, ending_loc):\n\n self._game_board[ending_loc[0]][ending_loc[1]] = \\\n self._game_board[starting_loc[0]][starting_loc[1]]\n self._game_board[starting_loc[0]][starting_loc[1]] = \"____\"", "def make_move(self, row, column):\n\t\tif self.board[int(row)][int(column)] == '-':\n\t\t\tself.board[int(row)][int(column)] = self.marker\n\t\telse:\n\t\t\tprint(\"That spot is occupied, you messed up, you lose your turn for doing bad things\")", "def solve_2x2(self):\n cur_row, cur_col = self.current_position(0, 0)\n move_str = 'u' * cur_row + 'l' * cur_col\n self.update_puzzle(move_str)\n if self.check_2x2_solved():\n return move_str\n else:\n while not self.check_2x2_solved():\n move_str += 'rdlu'\n self.update_puzzle('rdlu')\n return move_str", "def test_valid_moves(self):\n b1 = board.Board(self.small_plain)\n start = np.array((3, 3), dtype='int')\n k1 = knight.Knight(b1,start)\n # set move choice 0\n move_choice = 0\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((5, 4), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . S . . . .\n . . . x . . . .\n . . . x K . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n #\n # set move choice 5\n move_choice = 5\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((3, 5), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . S x K . .\n . . . x x . . .\n . . . x S . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n #\n # set move choice 6\n move_choice = 6\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((2, 7), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . K\n . . . S x S x x\n . . . x x . . .\n . . . x S . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n #\n # set move choice 3\n move_choice = 3\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((1, 5), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . K . .\n . . . . . x x S\n . . . S x S x x\n . . . x x . . .\n . . . x S . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n #\n # set move choice 2\n move_choice = 2\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((2, 3), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . x x S . .\n . . . K . x x S\n . . . S x S x x\n . . . x x . . .\n . . . x S . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n #\n # reset board\n b1 = board.Board(self.small_plain)\n start = np.array((3, 3), dtype='int')\n k1 = knight.Knight(b1,start)\n # set move choice 7\n move_choice = 7\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((4, 5), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . S x x . .\n . . . . . K . .\n . . . . . . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n #\n # set move choice 1\n move_choice = 1\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((6, 4), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . S x x . .\n . . . . . S . .\n . . . . . x . .\n . . . . K x . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n #\n # set move choice 4\n move_choice = 4\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((4, 3), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . S x x . .\n . . . K x S . .\n . . . . x x . .\n . . . . S x . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)", "def makeMove(self, move, player):", "def test_move_onto_past(self):\n b1 = board.Board(self.small_plain)\n start = np.array((3, 3), dtype='int')\n k1 = knight.Knight(b1,start)\n # set move choice 0\n move_choice = 0\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((5, 4), dtype='int')).all())\n # try to go back in strict mode (fail)\n move_choice = 4\n k1.execute_move(move_choice, strict=True)\n # confirm lack of movement\n self.assertTrue((k1.position == np.array((5, 4), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . S . . . .\n . . . x . . . .\n . . . x K . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n # try to go back without strict mode\n move_choice = 4\n k1.execute_move(move_choice)\n # confirm lack of movement\n self.assertTrue((k1.position == np.array((3, 3), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . K x . . .\n . . . x x . . .\n . . . x S . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)", "def make_move(move):\n global manatee_pos\n global hyacinths\n global hyacinth_pos\n\n # Ends the program if movement is out of bounds\n if move == (0, 0):\n return None\n new_pos = (manatee_pos[0] + move[0], manatee_pos[1] + move[1])\n if new_pos[0] < 0 or new_pos[0] >= len(map):\n return None\n if new_pos[1] < 0 or new_pos[1] >= len(map[new_pos[0]]):\n return None\n\n entity = map[new_pos[0]][new_pos[1]]\n if entity == \"#\" or entity == \"G\":\n # Runs if movement is impossible\n return None\n if entity == \" \" or entity == \".\":\n # Runs if normal movement is possible\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return None\n if entity == \"O\":\n # Runs if manatee wins game\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return \"win\"\n if entity == \"\\\\\":\n # Runs if manatee eats hyacinth\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n hyacinths += 1\n if len(hyacinth_pos) == hyacinths:\n map[grate_pos[0]][grate_pos[1]] = \"O\"\n return None\n if entity == \"*\":\n # Checks if manatee can push boat\n if move[0] == 0:\n new_boat_pos = (new_pos[0] + move[0], new_pos[1] + move[1])\n if new_boat_pos[0] < 0 or new_boat_pos[0] >= len(map):\n return None\n if new_boat_pos[1] < 0 \\\n or new_boat_pos[1] >= len(map[new_boat_pos[0]]):\n return None\n if map[new_boat_pos[0]][new_boat_pos[1]] == \" \":\n map[new_boat_pos[0]][new_boat_pos[1]] = \"*\"\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return None\n return None", "def place_piece(self, move, piece):\r\n if len(move) > 1:\r\n self.board[move[1][0]][move[1][1]] = ' '\r\n self.board[move[0][0]][move[0][1]] = piece", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def update(self, move):\n\n if not 0 <= move < 7:\n raise InvalidMove\n\n placed = False\n x = None\n y = None\n\n for row in reversed(xrange(self._rows)):\n if not self._board[row][move]:\n self._board[row][move] = self.current_player\n placed = True\n x = move\n y = row\n break\n\n if not placed:\n raise InvalidMove\n\n return (x, y)", "def place_piece(self, move, piece):\n if len(move) > 1:\n self.board[move[1][0]][move[1][1]] = ' '\n self.board[move[0][0]][move[0][1]] = piece", "def make_move(self): \n if self.counter == 0:\n #AI makes a random move to start\n ai_move = random.randrange(0,((self.size[0] * self.size[1]) - 1)) \n \n #Number to coordinate conversion\n row = ai_move % self.size[0]\n column = ai_move % self.size[0]\n self.start_game((row, column))\n self.counter = 1\n\n if (self.board[(row, column)] == 'm'):\n #print() \"\\n\", \"First move RIP!, what are the odds...\"\n self.found_mine()\n self.gameover = 1\n \n else:\n row, column = self.find_move()\n \n #0.25 second wait \n #time.sleep(0.25)\n\n #Prints out to the terminal the move and type of move\n print(row, \",\", column)\n\n #Updates the GUI\n root.update()\n \n if (self.board[(row, column)] == 'm'):\n print(\"RIP!\") \n self.found_mine() \n self.gameover = 1\n \n elif self.board[(row, column)] == '0':\n print(\"No mines in sight\") \n self.found_space((row, column))\n\n elif self.board[(row, column)] == '1':\n print(\"There is 1 mine next to this spot\") \n self.found_border((row, column))\n else:\n print(\"There are\", self.board[(row, column)], \"mines next to this spot\") \n self.found_border((row, column))", "def move(self, direction):\r\n direc = list(OFFSETS[direction])\r\n line = []\r\n dummy_board = self.board[:]\r\n if direction == 3:\r\n for i in range(self.height):\r\n self.board[i] = merge(self.board[i])\r\n self.compare(dummy_board)\r\n return self.board\r\n \r\n elif direction == 4:\r\n for i in range(self.height):\r\n line = self.board[i][::-1]\r\n self.board[i] = merge(line)\r\n self.board[i] = self.board[i][::-1]\r\n self.compare(dummy_board)\r\n return self.board\r\n \r\n \r\n elif direction == 1 or 2:\r\n dummy_board = str(self.board[:])\r\n if direction == 1:\r\n tile = [0,0]\r\n elif direction == 2:\r\n tile = [self.height - 1, 0]\r\n for i in range(self.width):\r\n tile2 = tile[:]\r\n while len(line) < self.height:\r\n line.append(self.get_tile(*tile2))\r\n tile2 = [x+y for x,y in zip(direc, tile2)]\r\n line = merge(line)\r\n tile2 = tile[:]\r\n for i in range(self.height):\r\n self.set_tile(*(tile2+[line[0]]))\r\n line.remove(line[0])\r\n tile2 = [x+y for x,y in zip(direc, tile2)]\r\n tile = [x+y for x,y in zip(tile, [0,1])]\r\n if dummy_board != self.__str__():\r\n self.new_tile()\r\n return self.board", "def solve_puzzle(self):\n # move zero tile to the lower right corner\n row, col = self.current_position(0, 0)\n movements = \"d\" * (self.get_height() - 1 - row) + \"r\" * (\n self.get_width() - 1 - col)\n self.update_puzzle(movements)\n # solve rowid from 2 by row\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col == 0:\n movements += self.solve_col0_tile(row)\n assert self.lower_row_invariant(row - 1,\n self.get_width() - 1)\n else:\n movements += self.solve_interior_tile(row, col)\n assert self.lower_row_invariant(row, col - 1)\n # solve the uppermost two rows by column\n for col in range(self.get_width() - 1, 1, -1):\n for row in range(1, -1, -1):\n if row == 0:\n assert self.row0_invariant(col)\n movements += self.solve_row0_tile(col)\n assert self.row1_invariant(col - 1)\n else:\n assert self.row1_invariant(col)\n movements += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n movements += self.solve_2x2()\n return movements", "def solve_puzzle(self):\r\n moves_str = \"\"\r\n # move zero to the most botton right\r\n zero_row, zero_col = self.current_position(0, 0)\r\n downs = self._height - 1 - zero_row\r\n rights = self._width - 1 - zero_col\r\n for dummy_d in range(downs):\r\n moves_str += \"d\"\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n self.update_puzzle(moves_str)\r\n # Solve the bottom m−2 rows of the puzzle \r\n # in a row by row manner from bottom to top. \r\n # Each individual row will be solved in a right to left order.\r\n if self._height > 2 and self._width > 2:\r\n for row in range(self._height - 1, 1, -1):\r\n for col in range(self._width - 1, 0, -1):\r\n assert self.lower_row_invariant(row, col)\r\n moves_str += self.solve_interior_tile(row, col)\r\n assert self.lower_row_invariant(row, col - 1)\r\n assert self.lower_row_invariant(row, 0)\r\n moves_str += self.solve_col0_tile(row)\r\n assert self.lower_row_invariant(row - 1, self._width - 1)\r\n # Solve the rightmost n−2 columns of the top two rows\r\n # in a right to left order). \r\n # Each column consists of two unsolved positions \r\n # and will be solved in a bottom to top order.\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n \r\n elif self._height <=2 and self._width > 2:\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n elif self._height <= 2 and self._width <= 2:\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n #elif self._height > 2 and self._width <= 2:\r\n \r\n print moves_str\r\n print self._grid\r\n return moves_str", "def play_move(board, move):\n\tboard_copy = list(board)\n\n\tboard_copy[move] = 'o'\n\treturn ''.join(board_copy)", "def test_move_over_past(self):\n b1 = board.Board(self.small_plain)\n start = np.array((3, 3), dtype='int')\n k1 = knight.Knight(b1,start)\n # set move choice 0\n move_choice = 0\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((5, 4), dtype='int')).all())\n # try to go back in strict mode (fail)\n move_choice = 3\n k1.execute_move(move_choice, strict=True)\n # confirm lack of movement\n self.assertTrue((k1.position == np.array((5, 4), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . S . . . .\n . . . x . . . .\n . . . x K . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n # try to go back without strict mode\n move_choice = 3\n k1.execute_move(move_choice, strict=False)\n # confirm lack of movement\n self.assertTrue((k1.position == np.array((4, 2), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . S . . . .\n . . K x . . . .\n . . x x S . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)", "def make_move(board, player_num, row, col):\n board[row][col] = 'X' if player_num == 1 else 'O'", "def execute_solution(offset_x, offset_y, moves):\n\n\t# Offsets for approximately where everything is given 1600x900 game window size\n\tbase_x = 46\n\tbase_y = 238\n\tfreecell_x = 314\n\tfreecell_y = 24\n\twidth = 128\n\theight = 30\n\tmodifier_x = 40\n\tmodifier_y = 19\n\n\t# Correct for retina display (change to 1 on conventional monitor)\n\tres_scale = 0.5\n\n\t# First, click the window\n\tpyautogui.mouseDown((offset_x + 100) * res_scale, (offset_y + 100) * res_scale, button=\"left\")\n\ttime.sleep(0.5)\n\tpyautogui.mouseUp()\n\ttime.sleep(1)\n\n\t# Now, replay the moves one by one\n\tfor move in moves:\n\t\t# which stack, how many cards down -> which stack, how many cards down\n\t\tx_pre, y_pre, x_post, y_post = move\n\n\t\t# If it's a regular stack, move to the offset\n\t\tif x_pre < 8:\n\t\t\tx_pre_final = offset_x + base_x + (width * x_pre) + modifier_x\n\t\t\ty_pre_final = offset_y + base_y + (height * y_pre) + modifier_y\n\t\t# Separate offsets for freecell\n\t\telse:\n\t\t\tx_pre_final = offset_x + freecell_x + (width * (x_pre - 8)) + modifier_x\n\t\t\ty_pre_final = offset_y + freecell_y + modifier_y\n\n\t\tif x_post < 8:\n\t\t\tx_post_final = offset_x + base_x + (width * x_post) + modifier_x\n\t\t\ty_post_final = offset_y + base_y + (height * y_post) + modifier_y\n\t\telse:\n\t\t\tx_post_final = offset_x + freecell_x + (width * (x_post - 8)) + modifier_x\n\t\t\ty_post_final = offset_y + freecell_y + modifier_y\n\n\t\tprint(\"Mouse to %d, %d -> drag to %d, %d\" % (x_pre_final, y_pre_final, x_post_final, y_post_final))\n\n\t\t# Move the mouse to the beginning place\n\t\tpyautogui.moveTo(x_pre_final * res_scale, y_pre_final * res_scale, duration = 0.25)\n\n\t\t# Click and drag to the end\n\t\tpyautogui.dragTo(x_post_final * res_scale, y_post_final * res_scale, duration = 0.25, button = \"left\")\n\n\t\t# Wait for a while\n\t\ttime.sleep(0.25)", "def move(self, direction):\n newx = self.x\n newy = self.y\n newy += random.randint(-1, 1)\n newx += random.randint(-1, 1)\n if self.tmap.contents[newy][newx] != '#':\n self.x = newx\n self.y = newy", "def move(self, row: int, col: int, player: int):\n def addup(dict_name, invalid_set, another_invalid, locx, locy):\n if locx == locy:\n diag_name = (1,1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n if locx == self.tar-1-locy:\n diag_name = (-1, -1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n curcol = (locy, None)\n currow = (None, locx)\n if curcol not in invalid_set:\n dict_name[curcol] += 1\n if dict_name[curcol] == self.tar:\n return player\n another_invalid.add(curcol)\n if currow not in invalid_set:\n dict_name[currow] += 1\n if dict_name[currow] == self.tar:\n return player\n another_invalid.add(currow)\n return 0\n res = 0\n if (row, col) not in self.walked:\n if player == 1:\n res = addup(self.p1, self.invalid_1, self.invalid_2, row, col)\n if player == 2:\n res = addup(self.p2, self.invalid_2, self.invalid_1, row, col)\n self.walked.add((row, col))\n return res", "def move(self, direction):\r\n # replace with your code\r\n initial_tile = self.__direct_top[direction]\r\n offset = OFFSETS[direction]\r\n direct_range = self.__direct_range[direction] \r\n backup_list = [[0 for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]\r\n \r\n for initial_count, tile_cursor in enumerate(initial_tile):\r\n tem_list = []\r\n grid_cursor = tile_cursor\r\n for dummy_cursor in range(direct_range):\r\n \r\n tem_list.append(self.grid[grid_cursor[0]][grid_cursor[1]])\r\n grid_cursor = tuple(x + y for x,y in zip(grid_cursor,offset))\r\n \r\n new_list = merge(tem_list)\r\n if self.update_dict[direction] == 0:\r\n for col_cursor in range(direct_range):\r\n backup_list[col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] == 1: \r\n for col_cursor in range(direct_range):\r\n backup_list[self.grid_height -1 - col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] ==3:\r\n backup_list[initial_count] = new_list\r\n else:\r\n for col_cursor in range(direct_range):\r\n backup_list[initial_count][self.grid_width -1 - col_cursor] = new_list[col_cursor]\r\n \r\n flag = (self.grid == backup_list)\r\n self.grid = backup_list\r\n if not flag:\r\n self.new_tile()", "def make_move(self, state):\r\n # intially set drop phase to true\r\n drop_phase = True\r\n move = [] # list to make moves with to return\r\n succ = self.succ(state) # get the successor of this state\r\n # intial postion of board to set up most advantagous spot if its empty\r\n if sum(x.count(self.my_piece) for x in self.board) == 0 and self.board[2][2] == ' ':\r\n move.insert(0, (2, 2))\r\n return move\r\n \r\n # check the number of 'r' and 'b' on board if theres 4 of each drop phase is false\r\n if sum(x.count('r') for x in self.board) == 4 and sum(x.count('b') for x in self.board) == 4:\r\n drop_phase = False\r\n\r\n # if not during drop phase use minimax to make next move from one postion to next\r\n if not drop_phase:\r\n move = []\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n f = d['from']\r\n s = sorted(succ, key=lambda e: e['f'])\r\n moveto = s[-1]\r\n move.insert(1, (moveto['from'][0], moveto['from'][1]))\r\n move.insert(0, (moveto['pos'][0], moveto['pos'][1]))\r\n return move # return the from, to move\r\n\r\n else: #else use minimax and to make move during drop phase selecting spot to place AI piece\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n hold = []\r\n move = []\r\n n = None\r\n hold = []\r\n for s in succ:\r\n p = s['pos'][0]\r\n p1 = s['pos'][1]\r\n if s['f'] == val and state[p][p1] == ' ':\r\n hold.append(s)\r\n if len(hold) == 1:\r\n row = hold[0]['pos'][0]\r\n col = hold[0]['pos'][1]\r\n else:\r\n f = sorted(hold, key=lambda e: e['pos'])\r\n row = f[0]['pos'][0]\r\n col = f[0]['pos'][1]\r\n\r\n move.insert(0, (row, col)) # return the move \r\n return move", "def move(self, board):\n\n if board.get_number_of_moves() == 0:\n random_row = randint(0, 2)\n random_column = randint(0, 2)\n\n if random_row == 1 or random_column == 1:\n random_row = 1\n random_column = 1\n elif random_row == 2:\n random_row = board.get_dimension()-1\n\n if random_column == 2:\n random_column = board.get_dimension()-1\n\n move = (random_row, random_column)\n elif board.get_number_of_moves() == 1 or board.get_number_of_moves() == 2:\n if board.get_piece(1,1) == ' ':\n move = (1, 1)\n else:\n board_dimension = board.get_dimension()-1\n corners = [(0, 0), (0, board_dimension), (board_dimension, 0), (board_dimension, board_dimension)]\n corners = self.remove_filled_positions(corners, board)\n\n move = corners[randint(0, len(corners)-1)]\n else:\n move = self.check_for_winner(board)\n\n if move == (-1, -1):\n board_dimension = board.get_dimension()-1\n corner1_moves = self.remove_filled_positions([(0, 0), (2, 2)], board)\n corner2_moves = self.remove_filled_positions([(0, 2), (2, 0)], board)\n\n non_corner_moves = self.remove_filled_positions([(1, 0), (2, 1), (1, 2), (0, 1)], board)\n\n center_piece = board.get_piece(1, 1)\n corner_pieces = [board.get_piece(0, 0), board.get_piece(board_dimension, 0), board.get_piece(0, board_dimension), board.get_piece(board_dimension, board_dimension)]\n\n if corner_pieces[0] != self._piece and corner_pieces[0] != ' ' and corner_pieces[0] == corner_pieces[3]:\n move = non_corner_moves[randint(0, 3)]\n elif corner_pieces[1] != self._piece and corner_pieces[1] != ' ' and corner_pieces[1] == corner_pieces[2]:\n move = non_corner_moves[randint(0, 3)]\n elif len(corner2_moves) > 0 and corner_pieces[0] != self._piece and corner_pieces[0] == center_piece and corner_pieces[3] == self._piece:\n move = corner2_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[1] != self._piece and corner_pieces[1] == center_piece and corner_pieces[2] == self._piece:\n move = corner1_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[2] != self._piece and corner_pieces[2] == center_piece and corner_pieces[1] == self._piece:\n move = corner1_moves[0]\n elif len(corner2_moves) > 0 and corner_pieces[3] != self._piece and corner_pieces[3] == center_piece and corner_pieces[0] == self._piece:\n move = corner2_moves[0]\n else:\n move = self.can_complete_two_in_row(board)\n\n if move == (-1, -1):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move", "def move(self, direction):\n\n # Check if there are empty tiles available\n for row in self._grid:\n if row.count(0) != 0:\n self._game_over = False\n break\n else:\n self._game_over = True\n\n # If empty tiles are not available, game over\n if self._game_over == True:\n print \"Sorry Game Over, Board Full\"\n print self.__str__()\n return None\n\n # New tiles won't be needed for illegal moves\n new_tiles_needed = False\n\n for tile in self._initial_tiles[direction]:\n old_tiles = self.traverse_grid(tile, OFFSETS[direction], self._steps[direction])\n tiles = merge(old_tiles)\n if old_tiles != tiles:\n # The old row and the new row are different after the merge\n # New tile will be needed\n new_tiles_needed = True\n self.set_grid(tile, OFFSETS[direction], tiles)\n\n if new_tiles_needed == True:\n self.new_tile()", "def makeMove(self, movable_statement):\n ### Student code goes here\n movingTile = movable_statement.terms[0].term.element\n oldColumn = movable_statement.terms[1].term.element\n oldRow = movable_statement.terms[2].term.element\n newColumn = movable_statement.terms[3].term.element\n newRow = movable_statement.terms[4].term.element\n\n empty = parse_input(\"fact: (empty ?x ?y)\")\n emptyFact = self.kb.kb_ask(empty).list_of_bindings[0][1][0]\n\n oldEmptyColumn = emptyFact.statement.terms[0].term.element #should equal newColumn\n oldEmptyRow = emptyFact.statement.terms[1].term.element #should equal newRow\n newEmptyRow = oldRow\n newEmptyColumn = oldColumn\n\n oldOn = parse_input(\"fact: (located \" + movingTile + \" \" + oldColumn + \" \" + oldRow + \")\")\n oldEmpty = parse_input(\"fact: (empty \" + oldEmptyColumn + \" \" + oldEmptyRow + \")\")\n newOn = parse_input(\"fact: (located \" + movingTile + \" \" + newColumn + \" \" + newRow + \")\")\n newEmpty = parse_input(\"fact: (empty \" + newEmptyColumn + \" \" + newEmptyRow + \")\")\n \n self.kb.kb_retract(oldOn)\n self.kb.kb_retract(oldEmpty)\n\n self.kb.kb_assert(newOn)\n self.kb.kb_assert(newEmpty)\n\n #assert all new movable statements\n # for fact in self.kb.facts:\n # if fact.statement.predicate == \"located\":\n # tile = fact.statement.terms[0].term.element\n # column = fact.statement.terms[1].term.element\n # row = fact.statement.terms[2].term.element\n\n # tileNumber = int(tile[-1])\n # columnNumber = int(column[-1])\n # rowNumber = int(row[-1])\n\n # if (columnNumber + 1 == newEmptyColumn) or (columnNumber - 1 == newEmptyColumn):\n # if (rowNumber + 1 == newEmptyRow) or (rowNumber - 1 == newEmptyRow):\n # #tile found is adjacent to empty spot so can move there\n # newMovable = parse_input(\"fact: (movable \" + tile + \" \" + columnNumber + \" \" + rowNumber + \" \" + newEmptyColumn + \" \" + newEmptyRow + \")\")\n # self.kb.kb_assert(newMovable)", "def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2", "def solve_2x2(self):\n # move zero tile to the left-up corner\n self.update_puzzle(\"lu\")\n movements = \"rdlu\"\n for _ in range(3):\n self.update_puzzle(\"rdlu\")\n if self.row0_invariant(0):\n return \"lu\" + movements\n movements += \"rdlu\"\n # the final 2x2 cannot be solved\n return \"\"", "def test_game_move_positive():\n\n\n file1=\"/home/unit_test_grids/test_game_grid.txt\"\n file2=\"/home/unit_test_grids/test_game_grid2.txt\"\n file3=\"/home/unit_test_grids/test_game_grid3.txt\"\n file4=\"/home/unit_test_grids/test_game_grid4.txt\"\n\n # Testing for move 's'.\n my_game=Game(file1)\n \n \n result=my_game.move('s')\n \n \n assert my_game.listOfMoves==['s'],\"The list of moves is not updated \"\\\n \"correctly after the player moves.\"\n \n \n assert my_game.numberOfMoves==1,\"The number of moves is not updated \"\\\n \"correctly after the player moves.\" \n \n \n assert result[0]==\"*X**\\n*A *\\n**Y*\\n\\nYou have 0 water buckets.\\n\",\"The move function is not returning\"\\\n \"the correct list. The first element of \"\\\n \"list is not the correct representation \"\\\n \"of grid configuration and player.\"\n \n \n assert result[1]==\"\",\"The move function is not returning\"\\\n \"the correct list. The second element of \"\\\n \"list is not the correct additional move \"\\\n \"message\"\n \n \n assert result[2]==\"continue\",\"The move function is not returning\"\\\n \"the correct list. The third element of \"\\\n \"list is not telling correctly whether \"\\\n \"the player can continue playing or whether\"\\\n \"the player has won or lost.\"\n\n \n \n # Testing for move 'd'.\n my_game=Game(file2)\n \n \n result=my_game.move('d')\n \n \n assert my_game.listOfMoves==['d'],\"The list of moves is not updated \"\\\n \"correctly after the player moves.\"\n \n \n assert my_game.numberOfMoves==1,\"The number of moves is not updated \"\\\n \"correctly after the player moves.\" \n \n \n assert result[0]==\"****\\nXA *\\n**Y*\\n\\nYou have 0 water buckets.\\n\",\"The move function is not returning\"\\\n \"the correct list. The first element of \"\\\n \"list is not the correct representation \"\\\n \"of grid configuration and player.\"\n \n \n assert result[1]==\"\",\"The move function is not returning\"\\\n \"the correct list. The second element of \"\\\n \"list is not the correct additional move \"\\\n \"message\"\n \n \n assert result[2]==\"continue\",\"The move function is not returning\"\\\n \"the correct list. The third element of \"\\\n \"list is not telling correctly whether \"\\\n \"the player can continue playing or whether\"\\\n \"the player has won or lost.\"\n\n\n \n \n # Testing for move 'a'.\n my_game=Game(file3)\n \n \n result=my_game.move('w')\n \n \n assert my_game.listOfMoves==['w'],\"The list of moves is not updated \"\\\n \"correctly after the player moves.\"\n \n \n assert my_game.numberOfMoves==1,\"The number of moves is not updated \"\\\n \"correctly after the player moves.\" \n \n \n assert result[0]==\"****\\n* AY\\n**X*\\n\\nYou have 0 water buckets.\\n\",\"The move function is not returning\"\\\n \"the correct list. The first element of \"\\\n \"list is not the correct representation \"\\\n \"of grid configuration and player.\"\n \n \n assert result[1]==\"\",\"The move function is not returning\"\\\n \"the correct list. The second element of \"\\\n \"list is not the correct additional move \"\\\n \"message\"\n \n \n assert result[2]==\"continue\",\"The move function is not returning\"\\\n \"the correct list. The third element of \"\\\n \"list is not telling correctly whether \"\\\n \"the player can continue playing or whether\"\\\n \"the player has won or lost.\"\n\n \n \n # Testing for move 'a'.\n my_game=Game(file4)\n \n \n result=my_game.move('a')\n \n \n assert my_game.listOfMoves==['a'],\"The list of moves is not updated \"\\\n \"correctly after the player moves.\"\n \n \n assert my_game.numberOfMoves==1,\"The number of moves is not updated \"\\\n \"correctly after the player moves.\" \n \n \n assert result[0]==\"****\\n* AX\\n*Y**\\n\\nYou have 0 water buckets.\\n\",\"The move function is not returning\"\\\n \"the correct list. The first element of \"\\\n \"list is not the correct representation \"\\\n \"of grid configuration and player.\"\n \n \n assert result[1]==\"\",\"The move function is not returning\"\\\n \"the correct list. The second element of \"\\\n \"list is not the correct additional move \"\\\n \"message\"\n \n \n assert result[2]==\"continue\",\"The move function is not returning\"\\\n \"the correct list. The third element of \"\\\n \"list is not telling correctly whether \"\\\n \"the player can continue playing or whether\"\\\n \"the player has won or lost.\"\n\n\n\n # Testing for move 'e'.\n my_game=Game(file2)\n \n result=my_game.move('e')\n \n \n assert my_game.listOfMoves==['e'],\"The list of moves is not updated \"\\\n \"correctly after the player moves.\"\n \n \n assert my_game.numberOfMoves==1,\"The number of moves is not updated \"\\\n \"correctly after the player moves.\" \n \n \n assert result[0]==\"****\\nA *\\n**Y*\\n\\nYou have 0 water buckets.\\n\",\"The move function is not returning\"\\\n \"the correct list. The first element of \"\\\n \"list is not the correct representation \"\\\n \"of grid configuration and player.\"\n \n \n assert result[1]==\"\",\"The move function is not returning\"\\\n \"the correct list. The second element of \"\\\n \"list is not the correct additional move \"\\\n \"message\"\n \n \n assert result[2]==\"continue\",\"The move function is not returning\"\\\n \"the correct list. The third element of \"\\\n \"list is not telling correctly whether \"\\\n \"the player can continue playing or whether\"\\\n \"the player has won or lost.\"", "def action(self):\r\n\r\n\r\n #have we just started?\r\n if self.player_information[\"us\"][\"nTokens\"] == 0:\r\n move = generate_starting_move(self.player_information[\"us\"][\"player_side\"], self.board_array)\r\n return move\r\n\r\n #otherwise do minimax \r\n \r\n #start off with some shallow depth:\r\n if self.turn_no < 5:\r\n depth = 3\r\n else:\r\n depth = 2\r\n \r\n #set a constraint for search depth\r\n if self.total_tokens_on_board < 6:\r\n depth = 3\r\n elif self.total_tokens_on_board < 10:\r\n depth = 2\r\n else:\r\n depth = 1\r\n \r\n #have a time reference\r\n print(f'nthrows: {self.player_information[\"us\"][\"nThrowsRemaining\"]}')\r\n starting_time = int(round(time.time(), 0))\r\n #salvage result from minimax\r\n result = minimax(self.board_dict.copy(), self.player_tokens.copy(), self.co_existance_dict.copy(),\r\n None, None, None, depth, True, -math.inf, math.inf,\r\n (-5, -5), self.player_information.copy(), self.board_array, self.board_edge, \r\n starting_time, True, self.turn_no)\r\n\r\n #clean it up a bit \r\n print(self.board_dict)\r\n #tidy it up\r\n result = result[0]\r\n print(f'pre: {result}')\r\n #in case we get a bad move redo but make it very shallow\r\n if len(result) == 1 or result == (-5, -5):\r\n #force it to return a usable move\r\n counter = 0\r\n while (len(result) == 1) or (result == (-5, -5)):\r\n result = minimax(self.board_dict.copy(), self.player_tokens.copy(), self.co_existance_dict.copy(),\r\n None, None, None, 1, True, -math.inf, math.inf,\r\n (-5, -5), self.player_information.copy(), self.board_array, self.board_edge, \r\n starting_time, False, self.turn_no)\r\n result = result[0]\r\n counter += 1\r\n \r\n #if its taking too long\r\n if counter > 2: \r\n #generate one random possible move to use \r\n allied_tokens = [token for token in self.player_tokens if self.player_tokens[token] == \"us\"]\r\n move_list = generate_moves(self.board_dict, self.player_tokens, self.co_existance_dict, allied_tokens,\r\n self.player_information, self.board_array, True, \"all\")\r\n \r\n \r\n #if there are no moves\r\n if len(move_list) == 0:\r\n if self.player_information['us']['nThrowsRemaining'] > 0:\r\n throws = generate_possible_throws(self.board_dict, self.player_tokens, self.co_existance_dict, self.player_information, \"us\",\r\n self.player_information[\"us\"][\"player_side\"], self.board_array, \"all\" )\r\n result = random.choice(throws)\r\n \r\n else:\r\n result = random.choice(move_list)\r\n print(f'random: {result}')\r\n break\r\n\r\n print(f' inside: {result}')\r\n\r\n print(result)\r\n #otherwise clean it up\r\n if result[0] == 'throw':\r\n final_result = (result[0].upper(), result[1], result[2])\r\n else:\r\n final_result = (result[0].upper(), result[2], result[3])\r\n # return final result \r\n return final_result", "def make_move(self):\n self.owner = self.game.current_turn\n self.status = 'X' if self.owner == self.game.creator else 'O'\n ####\n #Random turn??\n ####\n self.save(update_fields=['status', 'owner'])\n\n # Add log entry for move\n self.game.add_log(f'cell made at ({self.row}, {self.col}) by {self.owner}')\n\n # Set the current turn for the other player if game is not over\n # Check if find winner\n if self.game.check_win(cell=self) or\\\n self.game.get_all_game_cells().filter(status='EMPTY').count() == 0:\n print(\"Winnnnnnnn\")\n self.game.mark_complete(winner=self.owner)\n\n # Switch player turn\n self.game.switch_turn()\n\n # Let the game know about the move and result\n self.send_game_update()", "def move(self):\n\n x, y = self.position\n\n if self.in_spawn_area:\n if 0 <= x < MAP_SIZE and 0 <= y < MAP_SIZE:\n self.in_spawn_area = False\n\n preferred_direction = self.get_preferred_direction()\n\n if preferred_direction == (0, 0):\n return\n\n new_tiles = self.calculate_tiles_ahead(preferred_direction)\n\n if self.can_advance(new_tiles, preferred_direction):\n self.position = self.position[0] + preferred_direction[0] * 2, self.position[1] + preferred_direction[1] * 2\n self.update_cache_after_move(preferred_direction, new_tiles)\n self.previous_direction = preferred_direction[:]", "def apply_move(army_id, tiles_by_idx, player_info, move, dbg=False):\n def mverr(msg):\n DBGPRINT(\"ERROR! \"+msg)\n def update_tile_with_dict(tile, update_dict):\n tiles_by_idx[tile['xyidx']].update(update_dict)\n def update_tile_with_unit(tile, unit):\n update_tile_with_dict(tile, dict( (key,val) for key,val in unit.items()\n if key in UNIT_DICT_KEYS))\n def del_unit(tile):\n for key in list(tile.keys()): # copy to avoid chging during iter\n if key in UNIT_DICT_KEYS:\n del tile[key]\n def del_loaded_unit(tile):\n del tile['slot1_deployed_unit_id']\n del tile['slot1_deployed_unit_name']\n del tile['slot1_deployed_unit_health']\n def move_unit(src_tile, dest_tile):\n src_tile['moved'] = '1'\n if src_tile['xyidx'] == dest_tile['xyidx']: return\n update_tile_with_unit(dest_tile, src_tile)\n del_unit(src_tile)\n if can_capture(dest_tile, src_tile, dest_tile['unit_army_id']):\n # undo partial capture\n src_tile['capture_remaining'] = \"20\"\n\n #DBGPRINT(\"move={}\".format(move))\n if move.get('stop_worker_num', '') != '':\n return False\n data = move['data']\n if data['end_turn']: return False\n if data['purchase']:\n unit_name = data['purchase']['unit_name']\n castle = tiles_by_idx[movedict_xyidx(data['purchase'])]\n if castle['terrain_name'] != 'Castle':\n return mverr('cannot purchase new unit at {} which is not a castle'.format(\n tilestr(castle)))\n if castle.get('unit_name') not in [None, '']:\n return mverr('cannot purchase new unit at {} which is occupied'.format(\n tilestr(castle)))\n if int(player_info['funds']) < UNIT_TYPES[unit_name]['cost']:\n return mverr('insufficient funds {} to purchase a {} (cost={}) at {}'.format(\n player_info['funds'], unit_name, UNIT_TYPES[unit_name]['cost'], tilestr(castle)))\n player_info['funds'] = str(int(player_info['funds']) - UNIT_TYPES[unit_name]['cost'])\n new_unit_id = 100 + \\\n max([int(ifdictnone(tile, 'unit_id', 0)) for tile in tiles_by_idx.values()])\n update_tile_with_dict(castle, {\n 'unit_army_id': str(army_id), 'unit_army_name': 'TODO:armyname',\n 'unit_id': new_unit_id, 'unit_name': unit_name,\n 'unit_team_name': 'foo', 'health': \"100\", 'fuel': '100',\n 'primary_ammo': '100', 'secondary_ammo': '100', 'moved': '1'\n })\n if dbg: DBGPRINT('army_id={} purchase, resulting in {}'.format(\n army_id, tilestr(castle)))\n return True\n # data['move'] == True\n movemove = data['move']\n src_xyidx = dest_xyidx = movedict_xyidx(movemove)\n src_tile = dest_tile = tiles_by_idx[src_xyidx]\n if len(movemove['movements']) > 0:\n dest_xyidx = movedict_xyidx(movemove['movements'][-1])\n dest_tile = tiles_by_idx[dest_xyidx]\n\n if movemove.get('unit_action', 'simplemove') == 'unloadSlot1':\n if not is_loaded_unicorn(src_tile) and not is_loaded_skateboard(src_tile):\n return mverr(\"attempted to unload a tile that isn't a loaded {}: {}\".format(\n tile['unit_name'], tilestr(src_tile, True)))\n src_unit_name = src_tile['unit_name']\n move_unit(src_tile, dest_tile)\n unload_xyidx = int(movemove['y_coord_action'])*1000 + int(movemove['x_coord_action'])\n update_tile_with_dict(tiles_by_idx[unload_xyidx], {\n 'unit_army_id': str(army_id), 'unit_army_name': 'TODO:armyname',\n 'unit_id': src_tile.get('slot1_deployed_unit_id', 999),\n 'unit_name': src_tile['slot1_deployed_unit_name'],\n 'health': src_tile['slot1_deployed_unit_health'],\n 'unit_team_name': 'TODOunit_team_name', 'fuel': '100',\n 'primary_ammo': '100', 'secondary_ammo': '100', 'moved': '1'\n })\n del_loaded_unit(src_tile)\n if dbg: DBGPRINT('army_id={} moved & unloaded {} {} onto {}'. format(\n army_id, src_unit_name, tilestr(dest_tile),\n tilestr(tiles_by_idx[unload_xyidx])))\n return True\n\n if movemove.get('unit_action', 'simplemove') == 'load':\n if len(movemove['movements']) == 0:\n return mverr(\"can't load without movement: {}\".format(tilestr(src_tile, True)))\n if is_loaded_unicorn(dest_tile) or is_loaded_skateboard(dest_tile):\n return mverr(\"can't load {} that's already loaded: {}\".format(\n dest_tile['unit_name'], tilestr(dest_tile, True)))\n if src_tile['unit_name'] not in LOADABLE_UNITS:\n return mverr(\"can't load this type of unit: {}\".format(\n tilestr(src_tile, True)))\n # note: loading does NOT set 'moved' on the carrier\n update_tile_with_dict(dest_tile, {\n 'slot1_deployed_unit_id': src_tile['unit_id'],\n 'slot1_deployed_unit_name': src_tile['unit_name'],\n 'slot1_deployed_unit_health': src_tile['health'] })\n if dbg: DBGPRINT('army_id={} load {} {} from {}'. format(\n army_id, dest_tile['unit_name'], tilestr(dest_tile), tilestr(src_tile)))\n del_unit(src_tile)\n return True\n\n if movemove.get('unit_action', 'simplemove') == 'join':\n if src_tile['unit_name'] != dest_tile['unit_name']:\n return mverr(\"attempted to join incompatible types: {} ==> {}\".format(\n tilestr(src_tile), tilestr(dest_tile)))\n if is_loaded_unicorn(src_tile) or is_loaded_skateboard(src_tile):\n return mverr(\"attempted to join a Unicorn that's already loaded: {}\".format(\n src_tile['unit_name'], tilestr(src_tile, True)))\n if is_loaded_unicorn(dest_tile) or is_loaded_skateboard(dest_tile):\n return mverr(\"attempted to join to Unicorn that's already loaded: {}\".format(\n dest_tile['unit_name'], tilestr(dest_tile, True)))\n dest_tile['health'] = min(unit_health(dest_tile) + unit_health(src_tile), 100)\n del_unit(src_tile)\n return True\n\n if movemove.get('unit_action', 'simplemove') == 'capture':\n if src_tile.get('unit_name') not in CAPTURING_UNITS:\n return mverr(\"unit can't capture: {}\".format(tilestr(src_tile, True)))\n if dest_tile['terrain_name'] not in CAPTURABLE_TERRAIN:\n return mverr(\"terrain can't be captured: {}\".format(tilestr(dest_tile, True)))\n if is_my_building(dest_tile, army_id):\n return mverr(\"tile already captured: {}\".format(tilestr(dest_tile, True)))\n capture_remaining = get_capture_remaining(dest_tile)\n capture_remaining = max(0, int(capture_remaining) - int(10.0 * unit_health(src_tile) / 100.0))\n move_unit(src_tile, dest_tile)\n dest_tile.update({ 'capture_remaining': str(capture_remaining) })\n if capture_remaining == 0:\n if dbg: DBGPRINT('army_id={} completed capture of {}'.format(army_id, tilestr(dest_tile)))\n dest_tile.update({ 'building_army_id': army_id,\n 'capture_remaining': '20', # reset\n 'building_army_name': \"TODOarmy_name\",\n 'building_team_name': \"TODOteam_name\" })\n else:\n if dbg: DBGPRINT('army_id={} capturing {}, capture_remaining={}'.format(\n army_id, tilestr(dest_tile), capture_remaining))\n return True\n\n if 'x_coord_attack' in movemove:\n if src_tile['unit_name'] not in ATTACKING_UNITS:\n return mverr(\"unit can't attack: {}\".format(tilestr(src_tile, True)))\n defender_xyidx = int(movemove['y_coord_attack'])*1000 + int(movemove['x_coord_attack'])\n defender = tiles_by_idx[defender_xyidx]\n unit_info = UNIT_TYPES[src_tile['unit_name']]\n atkdist = dist(dest_tile, defender)\n if defender.get('unit_name') not in UNIT_TYPES.keys():\n return mverr(\"can't attack, no unit at dest: {}\".format(tilestr(defender, True)))\n if not (unit_info['atkmin'] <= dist(dest_tile, defender) <= unit_info['atkmax']):\n return mverr(\"attacker {}->{} not in range [{}-{}] from defender {}\".format(\n tilestr(src_tile), tilestr(dest_tile), unit_info['atkmin'], unit_info['atkmax'],\n tilestr(defender)))\n \n move_unit(src_tile, dest_tile)\n attacker = dest_tile\n damage = compute_damage(attacker, defender)\n if dbg: DBGPRINT('army_id={} attack {} vs {} dhealth={} dmg={}'.format(\n army_id, tilestr(attacker), tilestr(defender), defender['health'], damage))\n move['__attack'] = {'attacker':tilestr(attacker), 'attacker_health': attacker['health'],\n 'defender': tilestr(defender), 'defender_health': defender['health'],\n 'damage': damage }\n defender['health'] = str(unit_health(defender) - damage)\n rdamage = 0\n if unit_health(defender) <= 0:\n del_unit(defender)\n move['__attack']['return_damage'] = ATTACK_DEFENDER_KILLED\n if dbg: DBGPRINT('=> defender killed')\n elif defender['unit_name'] in RETURNS_FIRE_UNITS:\n rdamage = compute_damage(defender, attacker)\n move['__attack']['return_damage'] = rdamage\n if dbg: DBGPRINT('=> return dmg={} vs attacker health={}'.format(\n rdamage, attacker['health']))\n attacker['health'] = str(unit_health(defender) - rdamage)\n if unit_health(attacker) <= 0:\n move['__killed_atk'] = copy.deepcopy(attacker)\n del_unit(attacker)\n move['__attack']['return_damage'] = ATTACK_ATTACKER_KILLED\n if dbg: DBGPRINT('=> attacker killed')\n return True\n\n # simple movement\n if len(movemove['movements']) > 0:\n if dbg: DBGPRINT('army_id={} moved {} -> {}'. format(\n army_id, tilestr(src_tile), tilestr(dest_tile)))\n move_unit(src_tile, dest_tile)\n else:\n src_tile['moved'] = '1'\n return True", "def make_play(board, your_team, last_move):\n \"\"\"\n # a list containing all the entities from all the teams (either Monkeys or Queens)\n entities = board.get_entities()\n\n # just like entities, but into a map (dictionary). The key is a Vec2I object containing the position where you\n # want to get the entity. Use entity_map.get(Vec2I(x, y)) instead of entity_map[Vec2I(x, y)] if you want to avoid\n # raising a KeyError. Vec2I is used for the positions\n entity_map = board.get_entity_map()\n\n # List all the possible legal moves\n all_possible_moves = board.get_legal_moves(your_team)\n\n # You can iterate over all the entities like so:\n for entity in entities:\n position = entity.get_position()\n team = entity.get_team()\n print('Entity at position {}, is from team {}'.format(position, team))\n\n # You can get other information from the board functions.\n your_queen = board.search_queen(your_team)\n\n # There are only two teams, either Team.WHITE or Team.BLACK\n enemy_team = None\n if your_team == Team.WHITE:\n enemy_team = Team.BLACK\n else:\n enemy_team = Team.WHITE\n\n # you can do the same with this one liner\n enemy_team = Team.WHITE if your_team == Team.BLACK else Team.BLACK\n\n # get the enemy queen info from the board\n enemy_queen = board.search_queen(enemy_team)\n\n # Get the position of an entity, for example, with this queen\n # This can also work with Monkeys\n your_queen_position = enemy_queen.get_position()\n\n # Get the queen stack (number of remaining monkeys)\n your_queen_stack = your_queen.get_stack()\n\n # Print the position information, positions use the object Vec2I, defined in the file src/game/geo.py\n print(your_queen_position.x, your_queen_position.y)\n\n # Get all the possible moves for your queen\n possible_moves = your_queen.get_legal_moves()\n\n # We want to move our queen one cell down\n your_queen_x = your_queen_position.x\n your_queen_y = your_queen_position.y\n\n # Again, the game uses the Vec2I object for the positions\n new_position = Vec2I(your_queen_x, your_queen_y + 1)\n\n # As the board is a DEEP COPY of the real board, you can use it to forecast the future, for example, if you\n # want to list all your enemy moves after the move you want to select\n\n # As said, you have to return a tuple of Vec2I from this function, but to make a play you have to put those\n # two Vec2I in a Command object\n move_command = Command(your_queen_position, new_position)\n\n # Make a copy of the current game state\n current_board = board.copy_state()\n\n # Plays the command, now the board is just like you have played your decised move\n board.make_play(move_command)\n\n # Forecast all the legal moves from your opponent\n opponent_possible_responses = board.get_legal_moves()\n\n # We check if the new position is a legal move\n if new_position in possible_moves:\n # We make this play by returning the new_position\n return your_queen_position, new_position\n else:\n new_position = random.choice(possible_moves)\n return your_queen_position, new_position\n \"\"\"\n begin = time()\n np_board = board_translate(board,your_team)\n move = alpha_beta_search(np_board, your_team)\n print(\"Execution time: \" + str(time() - begin))\n move = (Vec2I(move[0][0], move[0][1]),Vec2I(move[1][0],move[1][1]))\n return move", "def perform_turn(self, arena, units):\r\n # Verify that unit hasn't died\r\n if self.dead:\r\n return 'dead', {}\r\n\r\n # Verify that enemies are still present\r\n targets = [u for u in units if u.race == self.enemy_race() and not u.dead]\r\n if len(targets) == 0:\r\n return 'no-targets', {}\r\n\r\n # Check for in-range targets\r\n targets = self.find_adjacent_targets(arena, units)\r\n if len(targets) > 0:\r\n data = self.perform_attack(arena, targets)\r\n return 'attack', data\r\n\r\n # Find reachable tiles\r\n in_range = self.find_in_range_tiles(arena, units)\r\n target, paths = find_target_tile(self.x, self.y, in_range, arena, units)\r\n\r\n if target is None:\r\n return 'no-reachable', {}\r\n\r\n # If multiple paths exist, pick the starting point using reading order\r\n optimal_paths = find_optimal_paths((self.x, self.y), target, paths)\r\n choices = sorted([op[0] for op in optimal_paths])\r\n x, y = choices[0]\r\n\r\n # Update position\r\n self.x = x\r\n self.y = y\r\n\r\n # Check for in-range targets after moving\r\n targets = self.find_adjacent_targets(arena, units)\r\n if len(targets) > 0:\r\n data = self.perform_attack(arena, targets)\r\n return 'move-attack', data\r\n else:\r\n return 'moved', {'pos': (x, y)}", "def make_move(self):\n\n # If the agent is starting a game, make an \n # initial move\n if self.get_play_status() == False: \n self.initial_move()\n return\n\n # for speeds sake, allow the reflex agent to respond to manual\n # input. comment out for automatic running.\n x = int(input('hotwire x:'))\n y = int(input('hotwire y:'))\n return self.get_game_space().set_tile(x,y,self.get_affinity())\n\n # Check wheather the the agent side is going to \n # win by making one move, make the move\n # OR\n # Check if the oponent has a compromising move \n best_move = self.victory_check()\n if best_move is None: best_move = self.counter_opponent_win()\n if best_move is None: best_move = self.counter_opponent_adv()\n if best_move is None: best_move = self.best_last_option()\n if best_move != None: \n x = best_move[0]\n y = best_move[1]\n return self.get_game_space().set_tile(x,y,self.get_affinity())", "def play_move(self, move_data, all_english_words):\r\n \r\n #first, make copy of board and try to apply move there.\r\n board_cpy = copy.deepcopy(self.myboard)\r\n \r\n rack_cpy = copy.deepcopy(self.rack) #***need to remove appropriate words from rack after we've made a move. \r\n\r\n i = 0 \r\n for cur_char in move_data[0]:\r\n new_tile = Tile(cur_char, self.game_bag.letter_freq_and_val[cur_char][1]) #create a new tile.\r\n if move_data[3] == \"horizontal\":\r\n print(\"adding cur_char {} at {} {}\".format(cur_char, move_data[1], move_data[2]+i))\r\n board_cpy.place_tile(move_data[1], move_data[2]+ i, new_tile)\r\n elif move_data[3] == \"vertical\":\r\n board_cpy.place_tile(move_data[1] + i, move_data[2], new_tile)\r\n\r\n i = i + 1\r\n #print(\"New i value is {}\".format(i))\r\n board_cpy.print_board()\r\n \r\n\r\n #once we're done placing the tiles, check for validity of entire board.\r\n cur_board_words = board_cpy.find_words_on_board()\r\n move_valid = True #assume move is valid, until proven otherwise.\r\n\r\n for word_data in cur_board_words:\r\n word = word_data[0]\r\n print(word)\r\n if word not in all_english_words:\r\n return False #do nothing else\r\n \r\n #print(\"Getting here; all words valid\")\r\n \r\n #getting here means that the move is actually valid, with no conflicts.\r\n main_board = self.myboard\r\n #In this case, add to the real board. \r\n i = 0 \r\n for cur_char in move_data[0]:\r\n new_tile = Tile(cur_char, self.game_bag.letter_freq_and_val[cur_char][1]) #create a new tile.\r\n if move_data[3] == \"horizontal\":\r\n main_board.place_tile(move_data[1], move_data[2]+ i, new_tile)\r\n elif move_data[3] == \"vertical\":\r\n main_board.place_tile(move_data[1] + i, move_data[2], new_tile)\r\n\r\n i = i + 1\r\n return True", "def winningMove():\r\n\tglobal turn, tile1, tile2, tile3, tile4, tile5, tile6, tile7, tile8, tile9\r\n\r\n\tnoWin=True\r\n\tmove=False\r\n\tif turn==\"Player1\":\r\n\t\tif validMove(1):\r\n\t\t\ttile1+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove=1\t\r\n\t\t\ttile1+=-1\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\t\tif validMove(2):\r\n\t\t\ttile2+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 2\r\n\t\t\ttile2+=-1\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\t\tif validMove(3):\r\n\t\t\ttile3+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 3\r\n\t\t\ttile3+=-1\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\t\t\r\n\t\tif validMove(4):\r\n\t\t\ttile4+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 4\t\r\n\t\t\ttile4+=-1\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\t\r\n\t\tif validMove(5):\r\n\t\t\ttile5+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 5\t\t\r\n\t\t\ttile5+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(6):\r\n\t\t\ttile6+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 6\t\r\n\t\t\ttile6+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(7):\r\n\t\t\ttile7+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 7\t\r\n\t\t\ttile7+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(8):\r\n\t\t\ttile8+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 8\t\r\n\t\t\ttile8+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(9):\r\n\t\t\ttile9+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 9\t\t\r\n\t\t\ttile9+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\r\n\telif turn==\"Player2\":\r\n\t\tif validMove(1):\r\n\t\t\ttile1+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 1\t\t\t\t\r\n\t\t\ttile1+=-2\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\t\tif validMove(2):\r\n\t\t\ttile2+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 2\r\n\t\t\ttile2+=-2\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\t\tif validMove(3):\r\n\t\t\ttile3+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 3\r\n\t\t\ttile3+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(4):\r\n\t\t\ttile4+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 4\t\r\n\t\t\ttile4+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(5):\r\n\t\t\ttile5+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 5\t\r\n\t\t\ttile5+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(6):\r\n\t\t\ttile6+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 6\t\r\n\t\t\ttile6+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(7):\r\n\t\t\ttile7+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 7\t\r\n\t\t\ttile7+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(8):\r\n\t\t\ttile8+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 8\t\r\n\t\t\ttile8+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(9):\r\n\t\t\ttile9+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 9\r\n\t\t\ttile9+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\tif noWin:\r\n\t\treturn False", "def make_move(board, picked_column, player):\n row = find_first_free_cell(board, picked_column)\n board[row][picked_column] = player\n return board, row", "def execute_move(board, move):\n\n player, spike_index, fields_to_move = Judge._validate_move(move)\n\n board.set_player_perspective(player)\n \n if spike_index == OUT_OF_BAR_SPECIAL_MOVE:\n dest_spike_index = fields_to_move - 1\n board.remove_checker_from_bar()\n else:\n dest_spike_index = spike_index + fields_to_move\n board.pop_player_checker(spike_index)\n\n if dest_spike_index >= len(INITIAL_SPIKES_STATE):\n return board\n\n board.push_player_checker(dest_spike_index)\n\n return board", "def test_move():\n human = Human()\n coordinates = [2, 1]\n dimensions = [3, 4]\n\n new_coordinates = human.move(coordinates, dimensions)\n\n possible_new_coordinates = [[2, 0], [3, 0], [3, 1], [3, 2], [2, 2], [1, 2], [1, 1], [1, 0]]\n\n assert new_coordinates in possible_new_coordinates", "def respond_to_move(self, move):\n\n # this will get the piece at the queried position,\n # will notify user if there is no piece there\n current_algebraic, new_algebraic = move\n row, column = self.algebraic_mapped_to_position[current_algebraic]\n if self.board[row][column] == empty_square:\n print(\"There is no piece at %s\" % (current_algebraic,))\n return\n piece, location = self.board[row][column]\n\n # this will get all possible moves from this position\n # and will make the move if the new position is a\n # valid move\n piece_name = self.piece_names[piece]\n moves = self.moves[piece_name]((row, column))\n \n new_row, new_column = self.algebraic_mapped_to_position[new_algebraic]\n print(\"old position %s, %s\" % (row, column))\n print(\"new algebraic %s\" % new_algebraic)\n print(\"new position %s, %s\" % (new_row, new_column))\n print(\"moves %s\" % moves)\n if (new_row, new_column) in moves:\n # this will change the game board to reflect the move\n self.board[row][column] = empty_square\n self.board[new_row][new_column] = piece+location", "def movePlayerTo(self, target):\n if self.player:\n row = 1\n if not self.player.first: # player 1 or 2\n row = -1\n\n if self.player.king:\n if abs(target.row - self.row) == 1 and abs(target.col - self.col) == 1: # move\n target.player = self.player\n self.player = None\n self.diselect()\n target.checkKing()\n return 1\n if abs(target.row - self.row) == 2 and abs(target.col - self.col) == 2: # eat\n mid = getBlockBetween(self, target)\n debugBoard()\n if mid.player and mid.player.first != self.player.first: # can eat\n mid.player = None\n target.player = self.player\n self.player = None\n self.diselect()\n target.checkKing()\n return 2\n pass\n else:\n if target.row == self.row + row and abs(target.col - self.col) == 1: # move\n target.player = self.player\n self.player = None\n self.diselect()\n target.checkKing()\n return 1\n if target.row == self.row + row * 2 and abs(target.col - self.col) == 2: # eat\n mid = getBlockBetween(self, target)\n debugBoard()\n if mid.player and mid.player.first != self.player.first: # can eat\n mid.player = None\n target.player = self.player\n self.player = None\n self.diselect()\n target.checkKing()\n getGame().board.checkWin()\n return 2\n return 0", "def move(self, row, col, player):", "def move(self, direction):\r\n # replace with your code\r\n row_increment = OFFSETS[direction][0]\r\n col_increment = OFFSETS[direction][1]\r\n changed = False\r\n for header in self._grid_headers[direction]:\r\n row_header = header[0]\r\n col_header = header[1]\r\n source_line = []\r\n # get the source line first\r\n while (row_header >= 0) and (col_header >= 0) and (row_header < self._grid_height) and (col_header < self._grid_width):\r\n source_line.append(self.get_tile(row_header, col_header))\r\n row_header += row_increment\r\n col_header += col_increment\r\n # merge\r\n result_line = merge(source_line)\r\n # write the result back\r\n row_header = header[0]\r\n col_header = header[1]\r\n result_line_index = 0\r\n while (row_header >= 0) and (col_header >= 0) and (row_header < self._grid_height) and (col_header < self._grid_width):\r\n self.set_tile(row_header, col_header, result_line[result_line_index])\r\n if result_line[result_line_index] != source_line[result_line_index]:\r\n changed = True\r\n result_line_index += 1\r\n row_header += row_increment\r\n col_header += col_increment\r\n if changed:\r\n self.new_tile()", "def test_hard_bot_map1(self):\n game = self.bot_test_map1(Difficulty.hard)\n self.assertEqual(game.first_player.ask_for_move(), (0, 11))", "def computer_move(board,move,player):\r\n com_execution(board, move, player)", "def solve_2x2(self):\n \n assert self.get_number(1,1) == 0, \"zero tile should be at row 1, col 1\"\n assert self.row1_invariant(1), \"tiles to right and below incorrectly ordered\"\n \n # Moves the zero tile to (0,0).\n self.update_puzzle(\"lu\")\n\n # Repositions the upper left 2x2 part up to 3 times, \n # each time checking whether the puzzle is solved.\n rotation_num = 0\n if self.row0_invariant(0) == False:\n for dummy_rotation in range(3):\n while self.row0_invariant(0) == False:\n rotation_num += 1\n self.update_puzzle(\"rdlu\")\n\n assert self.row0_invariant(0), \"tiles to right and below incorrectly ordered\"\n \n move_str = \"lu\" + (\"rdlu\" * rotation_num) \n return move_str", "def test_easy_bot_map1(self):\n game = self.bot_test_map1(Difficulty.easy)\n self.assertEqual(game.first_player.ask_for_move(), (0, 7))", "def make_move(self, start, end):\r\n start_pos = self.parse_pos(start) # Start and end position are lists that contain column and row\r\n end_pos = self.parse_pos(end)\r\n\r\n start_row = start_pos[0] # Position of row and columns are assigned to variables\r\n start_col = start_pos[1]\r\n end_row = end_pos[0]\r\n end_col = end_pos[1]\r\n\r\n board = self._board.get_board()\r\n start_piece = board[start_row][start_col].get_piece()\r\n end_piece = board[end_row][end_col].get_piece()\r\n\r\n\r\n # If there is no piece to be moved or game is over or piece is to be moved to its original location\r\n if start_piece is None or self._game_state != \"UNFINISHED\"\\\r\n or (start_row == end_row and start_col == end_col):\r\n return False\r\n\r\n start_piece_id = start_piece.get_player_id() # Contains the player id associated with the piece\r\n end_piece_player_id = None\r\n if end_piece is not None: # Executes if end piece contains a piece object\r\n end_piece_player_id = end_piece.get_player_id()\r\n\r\n # If Red's turn\r\n if self._player_turn == 1:\r\n if start_piece_id != 'r': # If red moves a black piece\r\n return False\r\n if start_piece.is_legal_move(start, end, start_piece, end_piece_player_id, board) : # Checks the legal move conditions\r\n if self.move_piece(start, end): # Returns False if move is invalid\r\n # Checks if move violates flying general and puts self in check\r\n if self.is_not_flying_general() is True and self.is_in_check(\"red\") is False:\r\n self.change_player_turn()\r\n self.is_in_checkmate()\r\n return True\r\n else: # Reverses the move if violates flying general rule\r\n self.reverse_move(start, end, board,end_piece_player_id, end_piece)\r\n return False\r\n\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\n # If Black's turn\r\n elif self._player_turn == -1:\r\n if start_piece_id != 'b': # If black moves a red piece\r\n return False\r\n if start_piece.is_legal_move(start, end, start_piece, end_piece_player_id, board): # Checks the legal move conditions\r\n if self.move_piece(start, end): # Returns False if move is invalid\r\n if self.is_not_flying_general() is True and self.is_in_check(\"black\") is False:\r\n self.change_player_turn()\r\n self.is_in_checkmate()\r\n return True\r\n else: # Reverses the move if violates flying general rule\r\n self.reverse_move(start, end, board, end_piece_player_id, end_piece)\r\n return False\r\n else:\r\n return False\r\n else:\r\n return False" ]
[ "0.74114114", "0.7296464", "0.7176235", "0.7139627", "0.7137439", "0.7097358", "0.7088615", "0.7081109", "0.7047905", "0.703492", "0.69906497", "0.6971217", "0.6968847", "0.6915314", "0.6911569", "0.68654275", "0.6853786", "0.68512785", "0.68391025", "0.6836436", "0.68200254", "0.6801807", "0.6784249", "0.6759126", "0.6756842", "0.6756842", "0.67421097", "0.67421097", "0.67421097", "0.6738949", "0.6720588", "0.67201746", "0.66812444", "0.6649198", "0.6639424", "0.6619017", "0.655315", "0.6529669", "0.651919", "0.65017986", "0.64769703", "0.6440429", "0.62960416", "0.6292039", "0.62652576", "0.61838126", "0.614309", "0.6129003", "0.61128145", "0.611111", "0.6108179", "0.61038464", "0.60573334", "0.60466397", "0.60355204", "0.603203", "0.60315156", "0.59916687", "0.5990266", "0.59858024", "0.5978685", "0.59716046", "0.5962989", "0.59622306", "0.59460694", "0.59304774", "0.592232", "0.58944666", "0.58909386", "0.58769304", "0.58753586", "0.58677334", "0.5864872", "0.58607024", "0.58437437", "0.58358383", "0.5835579", "0.5835146", "0.58329105", "0.5832432", "0.5828771", "0.58221734", "0.5782033", "0.57682216", "0.5768212", "0.57672966", "0.57630503", "0.5761595", "0.5747655", "0.5745494", "0.5744847", "0.57349193", "0.57274896", "0.57030755", "0.57029676", "0.5702819", "0.5702717", "0.57008946", "0.5698975", "0.56952935" ]
0.69137377
14
Solve tile in column zero on specified row (> 1) Updates puzzle and returns a move string
def solve_col0_tile(self, target_row): move_str = 'ur' self.update_puzzle(move_str) cur_row, cur_col = self.current_position(target_row, 0) if cur_row == target_row and cur_col == 0: move_str += 'r' * (self._width - 2) else: move_str += self.position_tile(target_row-1, 1, cur_row, cur_col) move_str += 'ruldrdlurdluurddlur' move_str += 'r' * (self._width - 2) self.update_puzzle(move_str[2:]) return move_str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_puzzle(self):\n\n move_str = \"\"\n \n # Move zero tile to bottom right corner tile of puzzle.\n zero_pos = self.current_position(0,0) \n vert_dist = (self.get_height() - 1) - zero_pos[0]\n horiz_dist = (self.get_width() - 1) - zero_pos[1]\n move_str += ((\"d\" * vert_dist) + (\"r\" * horiz_dist))\n self.update_puzzle(move_str)\n \n # Solve lower rows\n if self.get_height() > 2:\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n \n # Solve top 2 rows\n if self.get_width() > 2:\n for col in range(self.get_width() - 1, 1, -1):\n move_str += self.solve_row1_tile(col)\n move_str += self.solve_row0_tile(col)\n \n # Solve 2x2\n move_str += self.solve_2x2()\n\n return move_str", "def solve_puzzle(self):\n cur0_row, cur0_col = self.current_position(0, 0)\n move_str = 'd' * (self._height - cur0_row - 1) + 'r' * (self._width - cur0_col - 1)\n self.update_puzzle(move_str)\n for row in range(self._height-1, 1, -1):\n for col in range(self._width-1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n for col in range(self._width-1, 1, -1):\n assert self.row1_invariant(col)\n move_str += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n move_str += self.solve_row0_tile(col)\n move_str += self.solve_2x2()\n return move_str", "def solve_col0_tile(self, target_row):\r\n # check if curr_pos (i, 0) where i > 1\r\n assert self.lower_row_invariant(target_row, 0)\r\n move = \"ur\"\r\n self.update_puzzle(move)\r\n row, col = self.current_position(target_row, 0)\r\n if row == target_row and col == 0:\r\n move_to_target = (self.get_width() - 2) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n else:\r\n move_to_target = self.move_to_target(target_row - 1, 1, row, col)\r\n # add solver move to str\r\n move_to_target += \"ruldrdlurdluurddlu\" + (self.get_width() - 1) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1)\r\n return move", "def solve_col0_tile(self, target_row):\r\n moves_str = \"\"\r\n # move the zero tile from (i,0) to (i−1,1) \r\n # using the move string \"ur\"\r\n moves_str += \"ur\"\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str)\r\n # If the target tile is now at position (i,0)\r\n # you can simply move tile zero to the end of row i−1\r\n current_row, current_col = temp_grid.current_position(target_row, 0)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n if current_row == target_row and current_col == 0:\r\n rights = self._width - 1 - zero_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\" \r\n # However, if the target tile is not positioned at (i,0)\r\n # we suggest the following solution strategy:\r\n else:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ruldrdlurdluurddlu\"\r\n for dummy_r in range(self._width - 1):\r\n moves_str += \"r\"\r\n \r\n print \"solve_col0_tile\"\r\n print moves_str\r\n self.update_puzzle(moves_str)\r\n print self._grid\r\n return moves_str", "def solve_puzzle(self):\r\n # initialize some values and start tile at bottom right corner\r\n col = self.get_width() - 1\r\n row = self.get_height() - 1\r\n move = \"\"\r\n curr_row, curr_col = self.current_position(0, 0)\r\n move_to_target = abs(curr_col - col) * \"r\" + abs(curr_row - row) * \"d\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n\r\n # apply solver methods\r\n for dummy_i in range(row, 1, -1):\r\n for dummy_j in range(col, 0, -1):\r\n move += self.solve_interior_tile(dummy_i, dummy_j)\r\n move += self.solve_col0_tile(dummy_i)\r\n \r\n for dummy_j in range(col, 1, -1):\r\n move += self.solve_row1_tile(dummy_j)\r\n move += self.solve_row0_tile(dummy_j)\r\n \r\n move += self.solve_2x2()\r\n return move", "def solve_col0_tile(self, target_row):\n solved_row, solved_col = self.current_position(target_row, 0)\n movements = \"\"\n if solved_row == target_row - 1 and solved_col == 0:\n movements = \"u\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ur\")\n movements = \"ur\" + local_board.move_tile(\n target_row - 1, 1, target_row *\n self.get_width()) + \"ruldrdlurdluurddlu\"\n movements += \"r\" * (self.get_width() - 1)\n self.update_puzzle(movements)\n return movements", "def solve_row1_tile(self, target_col):\r\n # replace with your code\r\n whole_move = ''\r\n if self._grid[1][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA solve_row1_tile \"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[1][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (1 - zero_row) * 'd'\r\n # path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (1 - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (1 - zero_row) * 'd'\r\n #zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n #zero_row -= len(filter(lambda x: x=='u', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n\r\n assert self.row1_invariant(target_col), 'Some trouble in row1_invariant' \r\n \r\n #current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = 1, target_col\r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = (zero_row - self.current_position(1, target_col)[0]) * 'u' + (zero_col - self.current_position(1, target_col)[1]) * 'l'\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(1, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (1, target_col)\r\n counter = 0\r\n while self.current_position(1, target_col) != \\\r\n (1, target_col) or (zero_row, zero_col) != (0, target_col):\r\n # print 'Welcome to while loop!'\r\n cyclic_moves = ''\r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n #current_position = self.current_position(1, target_col)\r\n current_col = self.current_position(1, target_col)[1]\r\n counter +=1\r\n if self.current_position(1, target_col) == \\\r\n (1, target_col):\r\n # print 'ZERO not under TT'\r\n cyclic_moves = 'ur'\r\n whole_move += cyclic_moves\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif zero_col < current_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'ZERO tile located in the left side and down move is POSIBLE'\r\n if current_col != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif current_col != target_col and self._grid[zero_row+1][zero_col] > \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves)) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n # elif zero_col < current_col and self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n # # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n # if current_col != target_col:\r\n # # print 'not under the target place'\r\n # cyclic_moves = 'urrdl'\r\n # whole_move += cyclic_moves\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # elif current_col == target_col:\r\n # # print 'Target tile under target place'\r\n # cyclic_moves = 'urd'\r\n # whole_move += cyclic_moves\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #cyclic_moves +='ur'\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n return whole_move", "def solve_col0_tile(self, target_row):\n # replace with your code\n string = ''\n target = self.current_position(target_row, 0)\n row_difference = target_row - target[0]\n col_difference = target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'r' * (col_difference - 1)\n if row_difference > 1:\n string += 'druld' * (row_difference - 1)\n string += 'rulld' * (col_difference - 1)\n string += 'ruldrdlurdluurddlu'\n elif col_difference == 0:\n string += 'rddlu' * (row_difference - 2)\n if row_difference > 1:\n string += 'rd'\n string += 'l'\n string += 'ruldrdlurdluurddlu'\n string += 'r' * (self._width - 1)\n print 'Col 0 Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row - 1, self._width -1), 'False string'\n return string", "def solve_col0_tile(self, target_row):\r\n # print '----------------------------------'\r\n # print 'SOLVING ZERO COLOUMN'\r\n assert self.lower_row_invariant(target_row,0), \"Invarian is False\"\r\n whole_move = ''\r\n #current_position = self.current_position(target_row, 0)\r\n current_row, current_col = self.current_position(target_row, 0)\r\n zero_row, zero_col = target_row, 0\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, 0)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ur'\r\n \r\n recomended_move = 'ur'\r\n whole_move += recomended_move\r\n zero_col += len(filter(lambda x: x=='r', recomended_move))\r\n zero_row -= len(filter(lambda x: x=='u', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n if self.current_position(target_row, 0) == \\\r\n (target_row, 0):\r\n # print 'TT stays in correct place after recomended move'\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n self.update_puzzle(zero_to_place_move)\r\n whole_move += zero_to_place_move\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move\r\n #move ZERO tile to the right\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n ### reposition TT to (target_row -1, 1)\r\n ### reposition ZERO tile to (target_row-1,0)\r\n \r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (current_col - zero_col) * 'r'\r\n path_for_zero = (zero_row - current_row) * 'u' + (current_col - zero_col) * 'r'\r\n whole_move += path_for_zero\r\n zero_col += len(filter(lambda x: x=='r', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n counter = 0\r\n while self.current_position(target_row, 0) != (target_row -1, 1) or \\\r\n (zero_row,zero_col) != (target_row-1,0):\r\n cyclic_moves = ''\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n # print 'We are in while loop'\r\n counter += 1\r\n if zero_row < current_row:\r\n # print 'Zero row under current TT '\r\n if self.current_position(target_row, 0) == (target_row -1, 1):\r\n # print 'TT is in the reccomended posiont (target_row -1, 1) \\n and we are movind zero to the left side of TT '\r\n cyclic_moves = 'ld'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n else:\r\n # print 'TT should be one tile down'\r\n cyclic_moves = 'lddru'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n #### Case 1 if ZERO located in the right of\r\n #### target tile (after it)\r\n if zero_col > current_col:\r\n # print ' Look in the up puzzle, zero in the right side'\r\n if current_col != 1:\r\n # print 'not under target place (target_row -1, 1)'\r\n cyclic_moves = 'dllur'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n whole_move += cyclic_moves\r\n # elif current_col == 1 and self._grid[zero_row+1][zero_col] < \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n elif current_col == 1: \r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self._grid[zero_row+1][zero_col] > \\\r\n self._grid[current_position[0]][current_position[1]]:\r\n print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n cyclic_moves = 'ul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n ### Solwing 3x2 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 3x2 puzzle formed recently'\r\n move3x2 = 'ruldrdlurdluurddlur'\r\n whole_move += move3x2\r\n zero_col -= len(filter(lambda x: x=='l', move3x2))\r\n zero_col += len(filter(lambda x: x=='r', move3x2))\r\n zero_row += len(filter(lambda x: x=='d', move3x2))\r\n zero_row -= len(filter(lambda x: x=='u', move3x2))\r\n self.update_puzzle(move3x2)\r\n # print 'Grid afret 3x2 solver move'\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n #####Moving ZERO to the (target_row - 1, n - 1) position where\r\n ##### 'n' is a grid height.\r\n # print self._width-1 - zero_col\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n whole_move += zero_to_place_move\r\n self.update_puzzle(zero_to_place_move)\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move", "def solve_row0_tile(self, target_col):\r\n # replace with your code\r\n assert self.row0_invariant(target_col), 'Some trouble in row0_invariant' \r\n whole_move = ''\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n # print self.get_number(current_row, current_col)\r\n zero_row, zero_col = 0, target_col\r\n # print 'Target tile position=',self.current_position(0, target_col)\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (0, target_col)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ld'\r\n \r\n recomended_move = 'ld'\r\n whole_move += recomended_move\r\n zero_col -= len(filter(lambda x: x=='l', recomended_move))\r\n zero_row += len(filter(lambda x: x=='d', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n #####Case when we check if recomended move solves the tile\r\n if self.current_position(0, target_col) == (0, target_col):\r\n # print 'Congrads recomended move made great deal !!'\r\n return whole_move\r\n #####If not, we position TT to (1, target_col-1),\r\n ##### and ZEOR to (1, target_col-2)\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (zero_row - current_row) * 'u'\r\n path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = path_up + path_side\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n \r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n counter = 0\r\n # print self.current_position(0, target_col) != (1, target_col-1)\r\n # print (zero_row,zero_col) != (1,target_col-2)\r\n ####POitioning TT and zero into positions that can be solvable\r\n while self.current_position(0, target_col) != (1, target_col-1) or \\\r\n (zero_row,zero_col) != (1,target_col-2):\r\n counter +=1\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n cyclic_moves = ''\r\n # print 'Aloha in the loop'\r\n if zero_col < current_col:\r\n # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n \r\n\r\n if current_col != target_col-1 and zero_row == 0:\r\n # print 'In the upper row and we can use down cycling'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n\r\n elif current_col != target_col-1:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col-1:\r\n # print 'Target tile under target place'\r\n # print 'DEBUG!!!!'\r\n # print self\r\n # print zero_col, target_col\r\n if zero_col == 0 and current_col == 1:\r\n cyclic_moves = 'druld'\r\n elif zero_row == 0:\r\n cyclic_moves = 'druld'\r\n \r\n else:\r\n cyclic_moves = 'urd'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n elif zero_row > current_row:\r\n # print 'DEBUG'\r\n # print 'TT under zero tile'\r\n cyclic_moves = 'uld'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n \r\n if counter > 10:\r\n # print 'COUNTER break'\r\n break\r\n\r\n #####Solving using pattern 2 x 3 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 2x3 puzzle formed recently'\r\n move2x3 = \"urdlurrdluldrruld\"\r\n whole_move += move2x3\r\n zero_col -= len(filter(lambda x: x=='l', move2x3))\r\n zero_col += len(filter(lambda x: x=='r', move2x3))\r\n zero_row += len(filter(lambda x: x=='d', move2x3))\r\n zero_row -= len(filter(lambda x: x=='u', move2x3))\r\n self.update_puzzle(move2x3)\r\n # print self\r\n assert self.row1_invariant(target_col-1), 'Some trouble in row1_invariant' \r\n return whole_move", "def solve_row0_tile(self, target_col):\n solved_row, solved_col = self.current_position(0, target_col)\n movements = \"\"\n if solved_col == target_col - 1 and solved_row == 0:\n movements = \"ld\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ld\")\n movements = \"ld\" + local_board.move_tile(\n 1, target_col - 1, target_col) + \"urdlurrdluldrruld\"\n self.update_puzzle(movements)\n return movements", "def solve_puzzle(self):\n # move zero tile to the lower right corner\n row, col = self.current_position(0, 0)\n movements = \"d\" * (self.get_height() - 1 - row) + \"r\" * (\n self.get_width() - 1 - col)\n self.update_puzzle(movements)\n # solve rowid from 2 by row\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col == 0:\n movements += self.solve_col0_tile(row)\n assert self.lower_row_invariant(row - 1,\n self.get_width() - 1)\n else:\n movements += self.solve_interior_tile(row, col)\n assert self.lower_row_invariant(row, col - 1)\n # solve the uppermost two rows by column\n for col in range(self.get_width() - 1, 1, -1):\n for row in range(1, -1, -1):\n if row == 0:\n assert self.row0_invariant(col)\n movements += self.solve_row0_tile(col)\n assert self.row1_invariant(col - 1)\n else:\n assert self.row1_invariant(col)\n movements += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n movements += self.solve_2x2()\n return movements", "def solve_row1_tile(self, target_col):\r\n moves_str = \"\"\r\n current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ur\"\r\n self.update_puzzle(moves_str)\r\n print \"solve_row1_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_row0_tile(self, target_col):\n move_str = 'ld'\n self.update_puzzle(move_str)\n cur_row, cur_col = self.current_position(0, target_col)\n if cur_row == 0 and cur_col == target_col:\n return move_str\n else:\n move_str += self.position_tile(1, target_col-1, cur_row, cur_col)\n move_str += 'urdlurrdluldrruld'\n self.update_puzzle(move_str[2:])\n return move_str", "def solve_puzzle(self):\n # replace with your code\n string = ''\n width = self._width\n height = self._height\n zero = self.current_position(0, 0)\n row_to_zero = height - 1 - zero[0]\n col_to_zero = width - 1 - zero[1]\n string += 'r' * col_to_zero\n string += 'd' * row_to_zero\n self.update_puzzle(string)\n if width == 2 and height == 2:\n string += self.solve_2x2()\n elif width > 2 and height == 2:\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n elif width == 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n string += self.solve_2x2()\n elif width > 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n #for row in range(height - 1, -1, -1):\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n return string", "def solve_row0_tile(self, target_col):\r\n # move the zero tile from position (0,j) to (1,j−1) \r\n # using the move string \"ld\" \r\n moves_str = \"\"\r\n moves_str += \"ld\"\r\n # check whether target tile is at position (0,j).\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str) \r\n current_row, current_col = temp_grid.current_position(0, target_col)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n \r\n # If target tile is not at position (0,j).\r\n # reposition the target tile to position (1,j−1) \r\n # with tile zero in position (1,j−2).\r\n if current_row != 0 or current_col != target_col:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"urdlurrdluldrruld\"\r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_row0_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_col0_tile(self, target_row):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, 0), \"tiles to right and below incorrectly ordered\"\n\n # Move zero tile from target position (target_row, 0) to (target_row - 1, 1).\n self.update_puzzle(\"ur\")\n\n move_str = \"\"\n \n # correct_tile's position is determined after moving zero tile \"ur\" \n # because its position relative to zero tile may have changed as a result.\n correct_tile = self.current_position(target_row, 0)\n \n # Moves to reposition correct_tile to target position.\n if self.get_number(correct_tile[0], correct_tile[1]) != self.get_number(target_row, 0):\n move_str += str(self.position_tile(target_row - 1, 1, correct_tile))\n move_str += str(\"ruldrdlurdluurddlur\")\n\n # Moves to reposition zero tile to end of column of target_row + 1.\n move_str += str(\"r\" * (self.get_width() - 2)) \n \n self.update_puzzle(move_str)\n\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1) \n \n move_str = \"ur\" + move_str\n return move_str", "def solve_row0_tile(self, target_col):\n # replace with your code\n string = ''\n assert self.row0_invariant(target_col), 'False precondition'\n target = self.current_position(0, target_col)\n row_difference = target[0]\n col_difference = target_col - target[1]\n if row_difference == 0:\n if col_difference == 1:\n string += 'ld'\n elif col_difference > 1:\n string += 'l' * col_difference\n string += 'drrul' * (col_difference - 2)\n string += 'druld'\n string += 'urdlurrdluldrruld'\n elif row_difference == 1:\n if col_difference == 1:\n string += 'lld'\n string += 'urdlurrdluldrruld'\n elif col_difference > 1:\n string += 'ld'\n string += 'l' * (col_difference - 1)\n string += 'urrdl' * (col_difference - 2)\n string += 'urdlurrdluldrruld'\n print 'Row 0 Path', string\n self.update_puzzle(string)\n assert self.row1_invariant(target_col - 1), 'False string'\n return string", "def solve_row1_tile(self, target_col):\n cur_row, cur_col = self.current_position(1, target_col)\n move_str = self.position_tile(1, target_col, cur_row, cur_col, need_ld=False)\n self.update_puzzle(move_str)\n return move_str", "def solve_row1_tile(self, target_col):\r\n row, col = self.current_position(1, target_col)\r\n move = self.move_to_target(1, target_col, row, col)\r\n # for next move\r\n move += \"ur\"\r\n \r\n self.update_puzzle(move)\r\n return move", "def solve_puzzle(self):\r\n moves_str = \"\"\r\n # move zero to the most botton right\r\n zero_row, zero_col = self.current_position(0, 0)\r\n downs = self._height - 1 - zero_row\r\n rights = self._width - 1 - zero_col\r\n for dummy_d in range(downs):\r\n moves_str += \"d\"\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n self.update_puzzle(moves_str)\r\n # Solve the bottom m−2 rows of the puzzle \r\n # in a row by row manner from bottom to top. \r\n # Each individual row will be solved in a right to left order.\r\n if self._height > 2 and self._width > 2:\r\n for row in range(self._height - 1, 1, -1):\r\n for col in range(self._width - 1, 0, -1):\r\n assert self.lower_row_invariant(row, col)\r\n moves_str += self.solve_interior_tile(row, col)\r\n assert self.lower_row_invariant(row, col - 1)\r\n assert self.lower_row_invariant(row, 0)\r\n moves_str += self.solve_col0_tile(row)\r\n assert self.lower_row_invariant(row - 1, self._width - 1)\r\n # Solve the rightmost n−2 columns of the top two rows\r\n # in a right to left order). \r\n # Each column consists of two unsolved positions \r\n # and will be solved in a bottom to top order.\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n \r\n elif self._height <=2 and self._width > 2:\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n elif self._height <= 2 and self._width <= 2:\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n #elif self._height > 2 and self._width <= 2:\r\n \r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_row0_tile(self, target_col):\r\n assert self.row0_invariant(target_col)\r\n move = \"ld\"\r\n self.update_puzzle(move)\r\n \r\n row, col = self.current_position(0, target_col)\r\n if row == 0 and col == target_col:\r\n return move\r\n else:\r\n move_to_target = self.move_to_target(1, target_col - 1, row, col)\r\n # 2x3 puzzle solver\r\n move_to_target += \"urdlurrdluldrruld\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n return move", "def solve_interior_tile(self, target_row, target_col):\r\n whole_move = ''\r\n # replace with your code\r\n if self._grid[target_row][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA\"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[target_row][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (target_row - zero_row) * 'd'\r\n #path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd'\r\n zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n assert self.lower_row_invariant(target_row, target_col), \"Invarian is False\"\r\n \r\n #current_position = self.current_position(target_row, target_col)\r\n #current_row, current_col = self.current_position(target_row, target_col)\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, target_col)\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (target_row - self.current_position(target_row, target_col)[0]) * 'u'\r\n zero_row = target_row - len(path_up)\r\n if target_col < self.current_position(target_row, target_col)[1]: # Right move\r\n path_side = (self.current_position(target_row, target_col)[1] - target_col) * 'r'\r\n zero_col = target_col + len(path_side)\r\n else: # Left move\r\n path_side = (target_col - self.current_position(target_row, target_col)[1]) * 'l'\r\n zero_col = target_col - len(path_side)\r\n \r\n #path_for_zero = path_up + path_side\r\n # print '------------------------------------------'\r\n # print 'Move to ZERO =', path_for_zero\r\n \r\n self.update_puzzle(path_up + path_side)\r\n \r\n # print 'Grid after move:'\r\n # print self\r\n # current_position = self.current_position(target_row, target_col) \r\n # current_row, current_col = current_position\r\n # print 'Updated Target tile position=',current_position\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, target_col)\r\n # print '-----------------------------------------'\r\n \r\n\r\n ###### New moves after moving ZERO tile into target tile\r\n # counter = 0\r\n whole_move += path_up + path_side\r\n while self.current_position(target_row, target_col) != \\\r\n (target_row, target_col) or zero_col != target_col - 1:\r\n # tt_in_home = self.current_position(target_row, target_col) == \\\r\n # (target_row, target_col)\r\n\r\n cyclic_moves = ''\r\n # counter += 1\r\n #current_position = self.current_position(target_row, target_col) \r\n #current_col = self.current_position(target_row, target_col)[1]\r\n # print 'Zero coloumn', zero_col, '== Target coloumn', target_col\r\n # print zero_col == target_col \r\n \r\n #### Case 1 if ZERO located in ther right of\r\n #### target tile (after it)\r\n if zero_col > self.current_position(target_row, target_col)[1]:\r\n # print ' Look in the up puzzle, zero on the right side'\r\n # if self.current_position(target_row, target_col)[1] != target_col:\r\n # # print 'not under target place'\r\n # cyclic_moves = 'dllur'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n if self.current_position(target_row, target_col)[1] == target_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # elif self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # # print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n # cyclic_moves = 'ul'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #### Case 2 if ZERO located under the target tile, and both\r\n #### of them located under the target position of the target tile\r\n elif zero_col == self.current_position(target_row, target_col)[1] and zero_col == target_col:\r\n # print 'Both under the target place'\r\n # print 'TT in home=', tt_in_home\r\n if self.current_position(target_row, target_col) == \\\r\n (target_row, target_col):\r\n cyclic_moves = 'ld'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n else:\r\n cyclic_moves = 'lddru'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n elif zero_col < self.current_position(target_row, target_col)[1]:\r\n # print 'ZERO tile located in the left side'\r\n if self.current_position(target_row, target_col)[1] != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self.current_position(target_row, target_col)[1] == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n #zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loot counter =',counter\r\n whole_move += cyclic_moves\r\n # if counter > 12:\r\n # break\r\n # print 'Tile is solved with move '+ whole_move\r\n assert self.lower_row_invariant(target_row, target_col-1), \"Invarian is False\"\r\n return whole_move", "def solve_row0_tile(self, target_col):\n assert target_col > 1, \"target_col must be > 1\"\n assert self.row0_invariant(target_col), \"tiles to right and below incorrectly ordered\"\n \n # Move zero tile from target position (0, target_col) to (1, target_col - 1) \n self.update_puzzle(\"ld\")\n \n move_str = \"\"\n\n # correct_tile's position is determined after moving zero tile \"ld\"\n # because its position relative to zero tile may have changed as a result.\n correct_tile = self.current_position(0, target_col) \n \n # Moves to reposition correct_tile to target position, and\n # the zero tile to (1, target_col - 1).\n if self.get_number(correct_tile[0], correct_tile[1]) != self.get_number(0, target_col):\n move_str += str(self.position_tile(1, target_col - 1, correct_tile))\n move_str += str(\"urdlurrdluldrruld\")\n\n self.update_puzzle(move_str)\n\n assert self.row1_invariant(target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n move_str = \"ld\" + move_str\n return move_str", "def solve_row1_tile(self, target_col):\n assert target_col > 1, \"target_col must be > 1\"\n assert self.row1_invariant(target_col), \"tiles to right and below incorrectly ordered\"\n\n # Moves correct_tile to the target position (1, target_col),\n # and the zero tile above the target position at (0, target_col). \n correct_tile = self.current_position(1, target_col)\n move_str = self.position_tile(1, target_col, correct_tile) \n move_str += \"ur\"\n self.update_puzzle(move_str)\n\n assert self.row0_invariant(target_col)\n \n return move_str", "def solve_2x2(self):\n # move zero tile to the left-up corner\n self.update_puzzle(\"lu\")\n movements = \"rdlu\"\n for _ in range(3):\n self.update_puzzle(\"rdlu\")\n if self.row0_invariant(0):\n return \"lu\" + movements\n movements += \"rdlu\"\n # the final 2x2 cannot be solved\n return \"\"", "def solve_row1_tile(self, target_col):\n # replace with your code\n print target_col\n assert self.row1_invariant(target_col), 'False precondition'\n string = ''\n target = self.current_position(1, target_col)\n row_difference = 1 - target[0]\n col_difference = target_col - target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0:\n string += 'urrdl' * (col_difference - 1)\n string += 'ur'\n elif row_difference == 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n print 'Row 1 Path', string\n self.update_puzzle(string)\n assert self.row0_invariant(target_col), 'False string'\n return string", "def position_tile(self, zero_row, zero_col, correct_tile):\n \n ans = \"\" \n vert_dist = abs(zero_row - correct_tile[0])\n horiz_dist = abs(zero_col - correct_tile[1])\n \n # Updates ans, the move string, based the correct_tile's\n # position relative to the target position.\n \n # SAME ROW\n if vert_dist == 0:\n # Left of target\n if zero_col > correct_tile[1]:\n # Moves zero tile left to correct_tile's position.\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right to target position,\n # and moves zero tile to left of target position.\n if horiz_dist > 1:\n ans += str(\"urrdl\" * (horiz_dist - 1))\n # Right of target\n else:\n # Moves zero tile right to correct_tile's position.\n ans += str(\"r\" * horiz_dist)\n # Moves correct_tile left to target position,\n # and moves zero tile to left of target position.\n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ulld\")\n \n # SAME COL\n elif horiz_dist == 0:\n # Moves zero tile up to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n # Moves correct_tile down to target position, \n # and moves zero tile to left of target position.\n if vert_dist > 1:\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n # UPPER LEFT\n elif correct_tile[1] < zero_col:\n # Moves zero tile up and left to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right and down to target position,\n # and moves zero tile to left of target position.\n ans += str(\"drrul\" * (horiz_dist - 1))\n ans += str(\"druld\" * vert_dist)\n\n # UPPER RIGHT\n else:\n # Moves zero tile up and right to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"r\" * horiz_dist)\n # This if-elif-else statement moves correct_tile left and down to target position.\n # If statement is only used when target position is in row 2.\n if vert_dist == 1 and correct_tile[0] == 0:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dluld\")\n # Elif statement used when correct_tile is in the row above target position.\n elif vert_dist == 1: \n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ullddruld\")\n # Else statement used when correct_tile is 1+ rows above target position.\n else:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dlu\")\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n return ans", "def move_tile(self, target_row, target_col, val):\n # a little bit twisted here for the use of both solve_interior_tile and solve_col0_tile\n solved_row, solved_col = self.current_position(0, val)\n movements = \"\"\n if solved_row == target_row and solved_col == target_col:\n return \"\"\n if solved_row == target_row:\n if target_col > solved_col:\n movements = \"l\" * (target_col - solved_col) + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"r\" * (solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ulld\"\n elif solved_col == target_col:\n movements = \"u\" * (target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n elif solved_col < target_col:\n if solved_col == 0:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"rddlu\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n elif solved_col > target_col:\n if solved_row == 0:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"dllur\" * (\n solved_col - target_col - 1) + \"dlu\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n else:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ullddru\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n return movements", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def solve_row1_tile(self, target_col):\n movements = self.solve_interior_tile(1, target_col)\n self.update_puzzle(\"ur\")\n return movements + \"ur\"", "def solve_interior_tile(self, target_row, target_col):\r\n assert self._grid[target_row][target_col] == 0\r\n moves_str = \"\"\r\n target_current_row, target_current_col = self.current_position(target_row, target_col)\r\n \r\n moves_str += self.position_tile(target_row, target_col, target_current_row, target_current_col) \r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_interior_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_2x2(self):\r\n # replace with your code\r\n assert self.row1_invariant(1), '2x2 Dont pass row1_invariant(1)'\r\n whole_move = ''\r\n current_position = self.current_position(0, 0)\r\n # print 'Zero position =', current_position\r\n counter = 0\r\n \r\n\r\n \r\n # if current_position == (0,0):\r\n # print (0,0)\r\n # move_to_00 = 'rdlu' \r\n if current_position == (0,1):\r\n # print (0,1)\r\n move_to_00 = 'l'\r\n if current_position == (1,0):\r\n # print (1,0)\r\n move_to_00 = 'u'\r\n if current_position == (1,1):\r\n # print (1,1)\r\n move_to_00 = 'ul'\r\n whole_move += move_to_00\r\n self.update_puzzle(move_to_00)\r\n # print self\r\n # print self.get_number(1,1) < self.get_number(1,0)\r\n \r\n while self.get_number(0,0) != 0 or self.get_number(0,1) != 1:\r\n \r\n # print 'Aloha in loop!'\r\n counter +=1\r\n move = 'rdlu'\r\n whole_move += move\r\n self.update_puzzle(move)\r\n # print self\r\n if counter >5:\r\n break\r\n return whole_move", "def solve_interior_tile(self, target_row, target_col):\n cur_row, cur_col = self.current_position(target_row, target_col)\n move_str = self.position_tile(target_row, target_col, cur_row, cur_col)\n self.update_puzzle(move_str)\n return move_str", "def solve_interior_tile(self, target_row, target_col):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, target_col), \"tiles to right and below incorrectly ordered\"\n \n correct_tile = self.current_position(target_row, target_col) \n move_str = self.position_tile(target_row, target_col, correct_tile) \n self.update_puzzle(move_str)\n \n assert self.lower_row_invariant(target_row, target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n return move_str", "def solve_2x2(self):\n cur_row, cur_col = self.current_position(0, 0)\n move_str = 'u' * cur_row + 'l' * cur_col\n self.update_puzzle(move_str)\n if self.check_2x2_solved():\n return move_str\n else:\n while not self.check_2x2_solved():\n move_str += 'rdlu'\n self.update_puzzle('rdlu')\n return move_str", "def solve_2x2(self):\n \n assert self.get_number(1,1) == 0, \"zero tile should be at row 1, col 1\"\n assert self.row1_invariant(1), \"tiles to right and below incorrectly ordered\"\n \n # Moves the zero tile to (0,0).\n self.update_puzzle(\"lu\")\n\n # Repositions the upper left 2x2 part up to 3 times, \n # each time checking whether the puzzle is solved.\n rotation_num = 0\n if self.row0_invariant(0) == False:\n for dummy_rotation in range(3):\n while self.row0_invariant(0) == False:\n rotation_num += 1\n self.update_puzzle(\"rdlu\")\n\n assert self.row0_invariant(0), \"tiles to right and below incorrectly ordered\"\n \n move_str = \"lu\" + (\"rdlu\" * rotation_num) \n return move_str", "def solve_interior_tile(self, target_row, target_col):\n # replace with your code\n string = ''\n target = self.current_position(target_row, target_col)\n row_difference = target_row - target[0]\n #print 'Row difference', row_difference\n col_difference = target_col - target[1]\n #print 'Col difference', col_difference\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0 and col_difference > 1:\n string += 'urrdl' * (col_difference - 1)\n if row_difference == 1:\n string += 'urrdl' * (col_difference - 1)\n string += 'dru'\n if row_difference > 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n if row_difference == 1:\n string += 'ulldr' * (col_difference - 1)\n string += 'ullddru'\n if row_difference > 1:\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n string += 'lddru' * (row_difference - 1)\n if row_difference > 0:\n string += 'ld'\n print 'Interior Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row, target_col - 1), 'False string'\n return string", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction\n for row in self._grid:\n print row\n print", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][\n zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][\n zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def solve_2x2(self):\r\n assert self.row1_invariant(1)\r\n pos_1_0 = self.get_number(1, 0)\r\n pos_0_0 = self.get_number(0, 0)\r\n pos_0_1 = self.get_number(0, 1)\r\n # create grid and solve individual cases\r\n grid = [pos_1_0, pos_0_0, pos_0_1]\r\n if grid == [self.get_width(), 1, self.get_width() + 1]:\r\n move = \"ul\"\r\n elif grid == [1, self.get_width() + 1, self.get_width()]:\r\n move = \"lurdlu\"\r\n elif grid == [self.get_width() + 1, self.get_width(), 1]:\r\n move = \"lu\"\r\n self.update_puzzle(move)\r\n return move", "def solve(self, board: List[List[str]]) -> None:\n n = len(board)\n if n == 0:\n return\n m = len(board[0])\n for i in range(m):\n self.traverse(board, 0, i, n, m)\n self.traverse(board, n - 1, i, n, m)\n for i in range(n):\n self.traverse(board, i, 0, n, m)\n self.traverse(board, i, m - 1, n, m)\n for i in range(n):\n for j in range(m):\n board[i][j] = 'X' if board[i][j] == 'O' else board[i][j]\n for i in range(n):\n for j in range(m):\n board[i][j] = 'O' if board[i][j] == '_' else board[i][j]", "def solve(self, board) -> None:\n if board == [[]] or board == []:\n return\n\n r, c = len(board), len(board[0])\n\n from collections import deque\n queue = deque()\n for i in range(r):\n for j in range(c):\n if (i == 0 or i == r - 1 or j == 0 or j == c - 1) and board[i][j] == 'O':\n queue.append([i, j])\n board[i][j] = 'M'\n\n while queue:\n i, j = queue.popleft()\n for x, y in ((i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)):\n if 0 <= x <= r - 1 and 0 <= y <= c - 1 and board[x][y] == 'O':\n board[x][y] = 'M'\n queue.append([x, y])\n\n for i in range(r):\n for j in range(c):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == 'M':\n board[i][j] = 'O'", "def solve(puzzle):\n print(\"Solving...\")\n array_puzzle = np.asarray(puzzle)\n array_puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n goal_state = __generate_goal(len(array_puzzle[0]), len(array_puzzle))\n\n flat_puzzle = list(chain.from_iterable(puzzle)) # Flatten the list\n\n # If the puzzle doesn't contain 0, exit.\n try:\n flat_puzzle.remove(0) # Remove 0 from the list\n except:\n print(\"All puzzles must include an open tile (0).\")\n return None\n\n inversions = __count_inversions(flat_puzzle) # Count the inversions\n\n # width = len(array_puzzle[0]) # Get the width of the puzzle (columns)\n # length = len(array_puzzle) # Get the length of the puzzle (rows)\n\n oddEven = __odd_or_even(len(array_puzzle[0])) # Determine if the width is odd or even.\n start_position = __find_start(array_puzzle) # Find the start position's row\n solvable = __is_solvable(oddEven, inversions, len(array_puzzle), start_position) # Cleck if the puzzle is solvable.\n\n # If the puzzle is not solvable, return None.\n if(solvable == \"None\"):\n return None\n\n # If we cannot calculate a* (for example the given values are not all in sequential order (1-5) 4 is replaced by 6 (1,2,3,5,6))\n try:\n return __a_star(array_puzzle, goal_state)\n except:\n print(\"Please make sure there are no duplicate or skipped inputs.\")\n return None\n\n # This code was used in testing to print out the string.\n # solved = __a_star(array_puzzle, goal_state)\n # Return the moves needed to complete the puzzle.\n # return print(str(__build_string(solved)) + \" (\" + str(len(solved)) + \")\")", "def solve(self, board: List[List[str]]) -> None:\n if board == [] or board == [[]]: # corner case\n return\n\n r, c = len(board), len(board[0])\n\n def dfs(i, j): # visited i, j neighbors and change o to M\n if i < 0 or i > r - 1 or j < 0 or j > c - 1 or board[i][j] == 'X' or board[i][j] == 'M':\n return\n\n board[i][j] = 'M'\n dfs(i - 1, j)\n dfs(i + 1, j)\n dfs(i, j - 1)\n dfs(i, j + 1)\n\n for i in range(r):\n for j in range(c):\n if (i == 0 or i == r - 1 or j == 0 or j == c - 1) and board[i][j] == 'O':\n dfs(i, j)\n\n for i in range(r):\n for j in range(c):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == 'M':\n board[i][j] = 'O'", "def solve(self, board: List[List[str]]) -> None:\n rows = len(board)\n cols = len(board[0])\n\n def explore(i, j):\n if i < 0 or i >= rows:\n return\n if j < 0 or j >= cols:\n return\n if board[i][j] != \"O\":\n return\n board[i][j] = \"Y\"\n explore(i + 1, j)\n explore(i - 1, j)\n explore(i, j + 1)\n explore(i, j - 1)\n\n for i in [0, rows - 1]:\n for j in range(cols):\n explore(i, j)\n\n for j in [0, cols - 1]:\n for i in range(rows):\n explore(i, j)\n\n for i in range(rows):\n for j in range(cols):\n if board[i][j] == \"Y\":\n board[i][j] = \"O\"\n elif board[i][j] == \"O\":\n board[i][j] = \"X\"", "def solve_2x2(self):\r\n moves_str = \"\"\r\n # move zero to the most upper left\r\n zero_row, zero_col = self.current_position(0, 0)\r\n ups = zero_row - 0\r\n lefts = zero_col - 0\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n \r\n # get zero, one, two, three positions\r\n self.update_puzzle(moves_str)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n counter = 0\r\n while counter <= 3 and \\\r\n (zero_row != 0 or zero_col != 0 or \\\r\n one_row!= 0 or one_col != 1 or \\\r\n two_row != 1 or two_col != 0 or \\\r\n three_row != 1 or three_col != 1):\r\n move = CIRCLES[\"RIGHT_CIRCLES\"]\r\n moves_str += move\r\n self.update_puzzle(move)\r\n counter += 1\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n \r\n print \"solve_2x2\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve(self, board: List[List[str]]) -> None:\n if board is None or len(board) == 0:\n return \n row, col = len(board), len(board[0])\n for i in range(row):\n self.dfs(board, i, 0)\n self.dfs(board, i, col - 1)\n for j in range(col):\n self.dfs(board, 0, j)\n self.dfs(board, row-1, j)\n for i in range(row):\n for j in range(col):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n if board[i][j] == '-':\n board[i][j] = 'O'", "def position_tile(self, target_row, target_col, current_row, current_col):\r\n moves_str = \"\"\r\n # current target is on the upper of 0\r\n if current_col == target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the left of 0\r\n elif current_row == target_row and current_col < target_col:\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n # current target is on the upperleft of 0\r\n elif current_row < target_row and current_col < target_col:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n moves_str += \"dru\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the upperright of 0\r\n elif current_col > target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"dlu\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the right of 0\r\n elif current_col > target_col and current_row == target_row:\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"ulld\"\r\n return moves_str", "def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n m, n = len(board), len(board[0])\n\n def dfs(x, y):\n if not 0 <= x < m or not 0 <= y < n or board[x][y] != \"O\":\n return\n direction = [[1, 0], [0, 1], [-1, 0], [0, -1]]\n board[x][y] = \"Y\"\n for d in direction:\n dfs(x+d[0], y+d[1])\n\n for i in range(m):\n dfs(i, 0)\n dfs(i, n-1)\n for i in range(1, n-1):\n dfs(0, i)\n dfs(m-1, i)\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == \"O\":\n board[i][j] = \"X\"\n elif board[i][j] == \"Y\":\n board[i][j] = \"O\"", "def human_go(self, board):\r\n coord_pattern = re.compile(\"[0-{}]$\".format(board.shape[1]))\r\n print(\"Enter Column and press enter.\")\r\n input_str = input(\"(from 0-6)\\n\")\r\n if not coord_pattern.match(input_str):\r\n print(\"That is not in the right format, please try again...\")\r\n return self.human_go(board)\r\n else:\r\n col = int(input_str)\r\n if board[0][col] != 0:\r\n print(\"That column is already full, please try again\")\r\n self.human_go()\r\n else:\r\n for row in board[::-1]:\r\n if row[col] == 0:\r\n row[col] = -1\r\n return board", "def solve_tour(self):\n\t\tboard = [[-1 for _ in range(self.N)]for _ in range(self.N)]\n\t\tboard[0][0] = 0\n\n\t\tz = self.find_tour(board, 0, 0, 1)\n\t\tif z:\n\t\t\tfor i in range(self.N):\n\t\t\t\tfor j in range(self.N):\n\t\t\t\t\tself.solution.append(board[i][j])\n\t\t\tprint board\n\t\t\treturn self.solution\n\t\t\t\t\n\t\telse:\n\t\t\tprint(\"No solution\")", "def solve(self, board: List[List[str]]) -> None:\n if len(board) is 0: return\n width = len(board[0]) - 1\n height = len(board) - 1\n def help(x: int, y: int):\n if x < 0 or x > height or y < 0 or y > width:\n return\n if board[x][y] is \"O\":\n board[x][y] = \"g\"\n help(x - 1, y)\n help(x, y - 1)\n help(x + 1, y)\n help(x, y + 1)\n\n for i in range(width + 1):\n if board[0][i] is \"O\":\n help(0, i)\n if board[height][i] is \"O\":\n help(height, i)\n for i in range(1, height):\n if board[i][0] is \"O\":\n help(i, 0)\n if board[i][width] is \"O\":\n help(i, width)\n\n print(board)\n for i in range(width + 1):\n for j in range(height + 1):\n if board[j][i] is \"O\":\n board[j][i] = \"X\"\n elif board[j][i] is \"g\":\n board[j][i] = \"O\"", "def solve_puzzle(board):\n # Propagate value effects\n board = simplify_puzzle(board, [])\n\n # Brute force remaining cells\n board = brute(board)\n\n # Verify that the puzzle was successfully solved\n assert get_length(board)==81\n assert valid_attempt(board)\n\n return board", "def solve(self, board: List[List[str]]) -> None:\r\n if not board:\r\n return\r\n m, n = len(board), len(board[0])\r\n board_filter = lambda p: 0 <= p[0] < m and 0 <= p[1] < n and board[p[0]][p[1]] == 'O'\r\n queue = list(filter(board_filter, [(x, y) for r in range(max(m, n)) \r\n for x, y in ((r, 0), (r, n-1), (0, r), (m-1, r))]))\r\n while queue:\r\n x, y = queue.pop()\r\n board[x][y] = 'W'\r\n queue.extend(list(filter(board_filter, ((x-1, y), (x+1, y), (x, y-1), (x, y+1)))))\r\n for row in board:\r\n for i, c in enumerate(row):\r\n row[i] = 'XO'[c=='W']", "def solve(self):\n # If board is filled, board is trivially solved\n if self.check_full_board():\n return self.done\n\n # Iterate over every square in the board\n for row in range(self.num_rows):\n for col in range(self.num_columns):\n\n # If square is empty, begin plugging in possible values\n if self.check_empty_space(row, col):\n for val in range(1, 10):\n if not self.check_row(val, row) and \\\n not self.check_column(val, col) and \\\n not self.check_box(val, self.what_box(row, col)):\n self.board[row][col] = val\n \n if self.solve():\n return self.done()\n \n # Didn't work; undo assigment\n self.board[row][col] = ' '\n\n # Bad path; backtrack\n return False", "def solve(self):\n dim = self.puzzle.dimension\n\n # initial loop\n for value, (row, col) in self.puzzle:\n if value:\n self.clear_row(row, value)\n self.clear_col(col, value)\n self.clear_subgrid(row, col, value)\n self.updates.add((value, (row, col)))\n for ps in self.possibilities:\n ps.discard((row, col))\n\n while self.updates:\n while self.updates:\n # while self.updates:\n value, (row, col) = self.updates.pop()\n for i in range(1, dim + 1):\n self.check_row(i, value)\n self.check_col(i, value)\n for i in range(2, 8, 3):\n self.check_subgrid(row, i, value)\n self.check_subgrid(i, col, value)\n\n for value, (row, col) in self.puzzle:\n if not value:\n self.check_cell(row, col)\n\n # for value in range(1, dim + 1):\n # for row in [2, 5, 8]:\n # for col in [2, 5, 8]:\n # self.check_subgrid(row, col, value)", "def move(self, row, col, player):\r\n if player == 1:\r\n self.mat[row][col] = 1\r\n else:\r\n self.mat[row][col] = -1\r\n if self.checkrow(player,row) or self.checkcol(player,col):\r\n return player\r\n if row == col or row + col == self.size-1:\r\n if self.checkdiag(player):\r\n return player\r\n return 0", "def move(self, row: int, col: int, player: int):\n def addup(dict_name, invalid_set, another_invalid, locx, locy):\n if locx == locy:\n diag_name = (1,1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n if locx == self.tar-1-locy:\n diag_name = (-1, -1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n curcol = (locy, None)\n currow = (None, locx)\n if curcol not in invalid_set:\n dict_name[curcol] += 1\n if dict_name[curcol] == self.tar:\n return player\n another_invalid.add(curcol)\n if currow not in invalid_set:\n dict_name[currow] += 1\n if dict_name[currow] == self.tar:\n return player\n another_invalid.add(currow)\n return 0\n res = 0\n if (row, col) not in self.walked:\n if player == 1:\n res = addup(self.p1, self.invalid_1, self.invalid_2, row, col)\n if player == 2:\n res = addup(self.p2, self.invalid_2, self.invalid_1, row, col)\n self.walked.add((row, col))\n return res", "def solve(self, board: List[List[str]]) -> None:", "def move(self, row: int, col: int, player: int) -> int:\n self.board[row][col] = 1 if player == 1 else -1\n rowsum = sum(self.board[row])\n colsum = sum([self.board[r][col] for r in range(self.n)])\n diagsum1 = sum([self.board[i][i] for i in range(self.n)])\n diagsum2 = sum([self.board[i][-i-1] for i in range(self.n)])\n if player == 1:\n if rowsum == self.n or colsum == self.n or diagsum1 == self.n or diagsum2 == self.n:\n return 1\n else:\n if rowsum == -self.n or colsum == -self.n or diagsum1 == -self.n or diagsum2 == -self.n:\n return 2\n return 0", "def solve(self, board):\n def dfs(board, r, c):\n if r < 0 or c < 0 or r > rows - 1 or c > cols - 1 or board[r][c] == 'X' or board[r][c] == '#':\n return\n board[r][c] = '#'\n dfs(board, r - 1, c)\n dfs(board, r + 1, c)\n dfs(board, r, c - 1)\n dfs(board, r, c + 1)\n\n if len(board) == 0:\n return;\n rows = len(board)\n cols = len(board[0])\n for i in range(rows):\n for j in range(cols):\n if (i == 0 or j == 0 or i == rows - 1 or j == cols - 1) and board[i][j] == 'O':\n dfs(board, i, j)\n for i in range(rows):\n for j in range(cols):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == '#':\n board[i][j] = 'O'", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0:\n return\n self.h = len(board)\n self.w = len(board[0])\n self.board = board\n for i in range(self.h):\n for j in range(self.w):\n if i == 0 or i == self.h-1 or j == 0 or j == self.w-1:\n #print (i,j)\n self.dfs((i,j))\n for i in range(self.h):\n for j in range(self.w):\n if self.board[i][j]==\"O\":\n self.board[i][j]=\"X\"\n for i in range(self.h):\n for j in range(self.w):\n if self.board[i][j]==\"#\":\n self.board[i][j]=\"O\"", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0: return\n\n m = len(board)\n n = len(board[0])\n\n uf = UnionFind(m * n + 1)\n dummy = m * n\n\n # connect 'O' at first and last col with dummy\n for i in range(m):\n if board[i][0] == 'O':\n uf.union(dummy, i * n)\n if board[i][-1] == 'O':\n uf.union(dummy, i * n + n - 1)\n\n # connect 'O' at first and last row with dummy\n for j in range(n):\n if board[0][j] == 'O':\n uf.union(dummy, j)\n if board[-1][j] == 'O':\n uf.union(dummy, n * (m-1) + j)\n\n d = [(1, 0), (0, 1), (0, -1), (-1, 0)]\n\n for i in range(1, m-1):\n for j in range(1, n-1):\n if board[i][j] == 'O':\n for di, dj in d:\n x = i+di\n y = j+dj\n if board[x][y] == 'O':\n uf.union(x*n+y, i*n+j)\n\n # change not connected 'O' with 'X'\n for i in range(1, m-1):\n for j in range(1, n-1):\n if not uf.connected(dummy, i * n + j):\n board[i][j] = 'X'", "def solve(self, board: 'List[List[str]]') -> 'None':\n if not board:\n return\n m, n = len(board), len(board[0])\n save = [ij for k in range(m + n) for ij in ((0, k), (m - 1, k), (k, 0), (k, n - 1))]\n while save:\n i, j = save.pop()\n if -1 < i < m and -1 < j < n and board[i][j] == 'O':\n board[i][j] = 'S'\n save += (i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)\n for row in board:\n for i, c in enumerate(row):\n row[i] = 'XO'[c == 'S']", "def solve(self, board: List[List[str]]) -> None:\n def dfs(board, i, j):\n m = len(board)\n n = len(board[0])\n if i < 0 or i >= m or j < 0 or j >= n: return\n\n if board[i][j] != 'O': return\n\n board[i][j] = '#'\n [dfs(board, i+di, j+dj) for di, dj in [(0, 1), (1, 0), (0, -1), (-1, 0)]]\n\n if len(board) == 0: return\n m = len(board)\n n = len(board[0])\n\n for i in range(m):\n dfs(board, i, 0)\n dfs(board, i, n-1)\n\n for j in range(n):\n dfs(board, 0, j)\n dfs(board, m-1, j)\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == '#':\n board[i][j] = 'O'", "def solve(self, board: List[List[str]]) -> None:\n\n def expansion(i,j):\n for di,dj in {(-1,0),(1,0),(0,-1),(0,1)}:\n if -1<i+di<m and -1<j+dj<n and state[i+di][j+dj]=='O':\n return True\n return False\n\n if not board:\n return board\n\n m = len(board)\n n = len(board[0]) \n state = [['X']*n for _ in range(m)]\n\n for j in range(n):\n state[0][j] = board[0][j]\n state[m-1][j] = board[m-1][j]\n \n for i in range(m):\n state[i][0] = board[i][0]\n state[i][n-1] = board[i][n-1]\n \n flag = 1\n\n while flag:\n flag = 0\n\n for k in range(1, (1+min(m,n))//2):\n for j in range(k,n-k):\n if board[k][j]=='O' and state[k][j] == 'X' and expansion(k,j):\n state[k][j] = 'O'\n flag = 1\n \n if board[m-1-k][j]=='O' and state[m-1-k][j] == 'X' and expansion(m-1-k,j):\n state[m-1-k][j] = 'O'\n flag = 1\n \n for i in range(k,m-k):\n if board[i][k]=='O' and state[i][k] == 'X' and expansion(i,k):\n state[i][k] = 'O'\n flag = 1\n \n if board[i][n-1-k]=='O' and state[i][n-1-k] == 'X' and expansion(i,n-1-k):\n state[i][n-1-k] = 'O'\n flag = 1\n\n board[:] = state[:]", "def move(self, row: int, col: int, player: int) -> int:\n if player == 1:\n self.newList[row][col] = 1\n self.colSum[col] += 1\n self.rowSum[row] += 1\n if row == col:\n self.diag += 1\n if row + col == (self.n - 1):\n self.revDiag += 1\n if self.rowSum[row] == self.n or self.colSum[col] == self.n or self.diag == self.n or self.revDiag == self.n:\n return 1\n if player == 2:\n self.newList[row][col] = -1\n self.colSum[col] -= 1\n self.rowSum[row] -= 1\n if row == col:\n self.diag -= 1\n if row + col == (self.n - 1):\n self.revDiag -= 1\n if self.rowSum[row] == -self.n or self.colSum[col] == -self.n or self.diag == -self.n or self.revDiag == -self.n:\n return 2\n \n return 0", "def move_to_target(self, target_row, target_col, row, col):\r\n move = \"\"\r\n # typical move to move target tile to target pos.\r\n solver_move = \"druld\"\r\n # move up first\r\n move = (target_row - row) * \"u\"\r\n # conditional statements for moving the tile:\r\n # 1. case curr_pos of tile and target_tile are in same col\r\n if (target_col - col) == 0:\r\n move += \"ld\" + ((target_row - row) - 1) * solver_move\r\n else:\r\n # 2. curr_pos of tile is on the left of target pos\r\n if (target_col - col) > 0:\r\n move += (target_col - col) * \"l\"\r\n if row == 0:\r\n move += (abs(target_col - col) - 1) * \"drrul\"\r\n else:\r\n move += (abs(target_col - col) - 1) * \"urrdl\"\r\n # 3. curr_pos of tile is on the right of target pos:\r\n elif (target_col - col) < 0:\r\n move += (abs(target_col - col) - 1) * \"r\"\r\n if row == 0:\r\n move += abs(target_col - col) * \"rdllu\"\r\n else:\r\n move += abs(target_col - col) * \"rulld\"\r\n move += (target_row - row) * solver_move\r\n return move", "def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n x=len(board)\n y=len(board[0])\n visit=[[False if board[i][j]=='X' else True for j in range(y)] for i in range(x)]\n for i in range(x):\n for j in range(y):\n if visit[i][j] and board[i][j]=='O':\n queue=[[i,j]]\n visit[i][j]=False\n k=0\n surround=True\n while len(queue)>k:\n if queue[k][0]==0 or queue[k][0]==x-1 or queue[k][1]==y-1 or queue[k][1]==0:\n surround=False\n if queue[k][1]!=y-1 and visit[queue[k][0]][queue[k][1]+1]:\n queue.append([queue[k][0],queue[k][1]+1])\n visit[queue[k][0]][queue[k][1]+1]=False\n if queue[k][1]!=0 and visit[queue[k][0]][queue[k][1]-1]:\n queue.append([queue[k][0],queue[k][1]-1])\n visit[queue[k][0]][queue[k][1]-1]=False\n if queue[k][0]!=x-1 and visit[queue[k][0]+1][queue[k][1]]:\n queue.append([queue[k][0]+1,queue[k][1]])\n visit[queue[k][0]+1][queue[k][1]]=False\n if queue[k][0]!=0 and visit[queue[k][0]-1][queue[k][1]]:\n queue.append([queue[k][0]-1,queue[k][1]])\n visit[queue[k][0]-1][queue[k][1]]=False\n k+=1\n if surround:\n for i1,j1 in queue:\n board[i1][j1]='X'\n return", "def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2", "def solve(self, board: List[List[str]]) -> None:\n def DFS(board, i, j):\n q = []\n q.append([i, j])\n \n while q:\n x, y = q.pop()\n board[x][y] = \"*\"\n neighbors = ((0, 1), (0, -1), (1, 0), (-1, 0))\n for dx, dy in neighbors:\n if 0 <= x + dx <= len(board) - 1 and 0 <= y + dy <= len(board[0]) - 1 and board[x + dx][y + dy] == \"O\":\n q.append([x + dx, y + dy])\n \n \n # first row\n i = 0\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # last row\n i = len(board) - 1\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # first column\n j = 0\n for i in range(len(board)):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # last column\n j = len(board[0]) - 1\n for i in range(len(board)):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n \n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n board[i][j] = \"X\"\n elif board[i][j] == \"*\":\n board[i][j] = \"O\"", "def solve_util(self, board, col):\n try:\n if col == self.N:\n self.print_sol(board)\n return True\n\n # Trying to place this queen in all rows one by one\n res = False\n for i in range(self.N):\n if self.is_safe(board, i, col):\n board[i][col] = 1\n res = self.solve_util(board, col + 1) or res\n if type(res) == dict:\n return res\n board[i][col] = 0 # Backtracking...\n\n # if queen cannot be placed in any row in this col, then alas\n # we return false..\n return res\n except KeyboardInterrupt:\n print('Keyboard Interrupted!')\n return self.Outputs", "def solve(self, board):\r\n if not board or not board[0]:\r\n return\r\n \r\n self.m = len(board)\r\n self.n = len(board[0])\r\n boarder = []\r\n \r\n # Collecting all the 'O' on the boarder\r\n for i in range(self.m):\r\n if board[i][0] == 'O':\r\n boarder.append([i, 0])\r\n if board[i][self.n-1] == 'O':\r\n boarder.append([i, self.n-1])\r\n for j in range(self.n):\r\n if board[0][j] == 'O':\r\n boarder.append([0, j])\r\n if board[self.m-1][j] == 'O':\r\n boarder.append([self.m-1, j])\r\n \r\n for row, col in boarder:\r\n self.BFS(board, row, col)\r\n \r\n for row in range(self.m):\r\n for col in range(self.n):\r\n if board[row][col] == 'O':\r\n board[row][col] = 'X'\r\n elif board[row][col] == 'E':\r\n board[row][col] = 'O'\r\n print(board)", "def update(self, move):\n\n if not 0 <= move < 7:\n raise InvalidMove\n\n placed = False\n x = None\n y = None\n\n for row in reversed(xrange(self._rows)):\n if not self._board[row][move]:\n self._board[row][move] = self.current_player\n placed = True\n x = move\n y = row\n break\n\n if not placed:\n raise InvalidMove\n\n return (x, y)", "def solve(board) -> None:\n rows = len(board)\n if rows==0:\n return board\n cols = len(board[0])\n \n def is_border(rc):\n (rr, cc) =rc\n if rr<rows and rr< cols and rr>=0 and cc>=0 and board[rr][cc]=='O' and (rr==0 or rr==rows-1 or cc==0 or cc==cols-1):\n return True\n return False\n \n transf = []\n for r in range(rows):\n for c in range(cols):\n if board[r][c]=='O' and not is_border((r,c)) and not any(map(is_border, [(r-1, c), (r+1, c), (r, c-1), (r, c+1)])):\n transf.append((r,c))\n if transf:\n for r,c in transf:\n board[r][c]='X'\n return board", "def move(self, row, col, player):\n offset = player * 2 - 3 # 1 or -1\n self.row[row] += offset\n self.col[col] += offset\n if row == col:\n self.diag += offset\n if row + col == self.n - 1:\n self.anti_diag += offset\n if self.n in [self.row[row], self.col[col], self.diag, self.anti_diag]:\n return 2\n if -self.n in [self.row[row], self.col[col], self.diag, self.anti_diag]:\n return 1\n return 0", "def move(self, row: int, col: int, player: int) -> int:\n n = self.n\n if player == 1:\n self.rows_1[row] += 1\n self.cols_1[col] += 1\n if player == 2:\n self.rows_2[row] += 1\n self.cols_2[col] += 1\n if row == col:\n self.diag1[row] = player\n if row + col + 1 == n:\n self.diag2[row] = player\n f = 0\n g = 0\n for i in range(n):\n if self.rows_1[row] == n or self.cols_1[col] == n:\n return 1\n if self.rows_2[row] == n or self.cols_2[col] == n:\n return 2 \n if self.diag1[i] != self.diag1[0]:\n f = 1\n if self.diag2[i] != self.diag2[0]:\n g = 1\n if f == 0:\n return self.diag1[0]\n if g == 0:\n return self.diag2[0]\n return 0", "def solve(self, board: List[List[str]]) -> None:\n if len(board) <= 2 or len(board[0]) <= 2:\n return\n\n self.nRow, self.nCol = len(board), len(board[0])\n\n def helper(kr, kc):\n board[kr][kc] = '.'\n kr > 0 and board[kr - 1][kc] == 'O' and helper(kr - 1, kc)\n kr < self.nRow - 1 and board[kr + 1][kc] == 'O' and helper(kr + 1, kc)\n kc > 0 and board[kr][kc - 1] == 'O' and helper(kr, kc - 1)\n kc < self.nCol - 1 and board[kr][kc + 1] == 'O' and helper(kr, kc + 1)\n\n for kr in [0, self.nRow - 1]:\n for kc in range(self.nCol):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n for kc in [0, self.nCol - 1]:\n for kr in range(self.nRow):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n\n for kr in range(self.nRow):\n for kc in range(self.nCol):\n if board[kr][kc] == 'O':\n board[kr][kc] = 'X'\n elif board[kr][kc] == '.':\n board[kr][kc] = 'O'\n\n return", "def minimax(state, depth, player):\n if depth == 9:\n row = choice([0, 1, 2])\n col = choice([0, 1, 2])\n return row, col, ''\n\n if player == COMP:\n best = [-1, -1, float(\"-inf\")]\n else:\n best = [-1, -1, float(\"inf\")]\n\n if depth == 0 or state.has_tic_tac_toe(COMP) or state.has_tic_tac_toe(HUMAN):\n score = heuristic(state, depth)\n return [-1, -1, score]\n \"\"\"\n Checks if any of the player is one away from winning in any board and make the appropriate move.\n \"\"\"\n if player==COMP:\n empty_cells=get_empty_cells(state)\n dangerous_cells=state.is_one_away_from_tic_tac_toe((player%2)+1)\n if dangerous_cells:\n found_dangerous_cells=True\n else:\n found_dangerous_cells=False\n print \"no dangerous local boards\"\n favoring_cells=state.is_one_away_from_tic_tac_toe(player)\n if favoring_cells:\n found_favoring_cells=True\n else:\n found_favoring_cells=False\n print \"no favoring local boards\"\n if found_dangerous_cells==False and found_favoring_cells==False:\n pass\n if found_dangerous_cells==False and found_favoring_cells==True:\n empty_cells[:]=[]\n for cell in favoring_cells:\n empty_cells.append(cell)\n if found_dangerous_cells==True and found_favoring_cells==False:\n empty_cells[:]=[]\n for cell in dangerous_cells:\n empty_cells.append(cell)\n if found_dangerous_cells==True and found_favoring_cells==True:\n empty_cells[:]=[]\n for cell in dangerous_cells:\n empty_cells.append(cell)\n else:\n empty_cells=get_empty_cells(state)\n for cell in empty_cells:\n row, col = cell[0], cell[1]\n state.board[row][col] = player\n score = minimax(state, depth - 1, (player % 2) + 1)\n state.board[row][col] = 0\n score[0], score[1] = row, col\n if player == COMP:\n if score[2] >= best[2]:\n if score[2]==best[2]:\n \"\"\"\n Favors middle positions over sides or corners\n MIDDLE > CORNERS > SIDES\n \"\"\"\n if (best[0]==0 and best[1]==0) or (best[0]==0 and best[1]==2) or (best[0]==2 and best[1]==0) or (best[0]==2 and best[1]==2):\n if score[0]==0 and score[1]==0: #favoring centre position over diagonal position\n best=score\n print(\"centre position chosen over diagonal positions\")\n else:\n if ((score[0]==0 and score[1]==1) or (score[0]==1 and score[1]==0) or (score[0]==1 and score[1]==2) or (score[0]==2 and score[1]==1))==0:\n best=score #favoring any position over side position as long as the new position is not a side position too\n print(\"diagonal and centre positions chosen over side positions\")\n else:\n best = score\n else:\n bestMoves=[]\n if score[2] < best[2]:\n best=score\n return best", "def make_move(board, picked_column, player):\n row = find_first_free_cell(board, picked_column)\n board[row][picked_column] = player\n return board, row", "def solve_(self, x, y, board, path):\n if self.SOLVED:\n return\n if self.is_done(board):\n self.print_path(path)\n self.SOLVED = True\n return\n for new_x, new_y in self.next_click(x, y, board):\n if new_x is None or new_y is None:\n return\n new_board = self.click(new_x, new_y, board)\n self.solve_(\n x=0, y=0,\n board=new_board,\n path=path + [((new_x, new_y), new_board)]\n )", "def position_tile(self, target_row, target_col, cur_row, cur_col, need_ld=True):\n move_str = ''\n if cur_row == target_row:\n if cur_col < target_col:\n move_str += 'l' * (target_col - cur_col)\n if target_col - cur_col > 1:\n move_str += 'ur'\n move_str += 'druldru' * (target_col - cur_col - 1)\n else:\n move_str += 'ur' if not need_ld else ''\n need_ld = False\n else:\n move_str += 'r' * (cur_col - target_col)\n if cur_col - target_col > 1:\n move_str += 'ul'\n move_str += 'dlurdlu' * (cur_col - target_col - 1)\n else:\n need_ld = False\n else:\n move_str += 'u' * (target_row - cur_row)\n if cur_col < target_col:\n move_str += ('l' * (target_col - cur_col) + 'dru')\n move_str += 'druldru' * (target_col - cur_col - 1)\n move_str += 'lddru' * (target_row - cur_row - 1)\n elif cur_col > target_col:\n move_str += ('r' * (cur_col - target_col) + 'dlu')\n move_str += 'dlurdlu' * (cur_col - target_col - 1)\n move_str += 'lddru' * (target_row - cur_row - 1)\n else:\n move_str += 'lddru' * (target_row - cur_row - 1)\n if need_ld:\n move_str += 'ld'\n return move_str", "def solve(self, board: 'List[List[str]]') -> 'None':\n\n def dfs(i, j, tmp):\n nonlocal flag\n if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]):\n flag = False\n return\n if board[i][j] != 'O' or [i, j] in tmp:\n return\n tmp.append([i, j])\n dfs(i - 1, j, tmp)\n dfs(i + 1, j, tmp)\n dfs(i, j + 1, tmp)\n dfs(i, j - 1, tmp)\n return tmp\n\n change = []\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == 'O' and [i, j] not in change:\n tmp = []\n flag = True\n tmp = dfs(i, j, tmp[:])\n if flag:\n for loc in tmp:\n i, j = loc[0], loc[1]\n board[i][j] = 'X'\n\n for loc in change:\n i, j = loc[0], loc[1]\n board[i][j] = 'X'", "def test_perform_move(self):\n p = hw.create_tile_puzzle(3, 3)\n self.assertFalse(p.perform_move(\"taco\"))\n self.assertTrue(p.perform_move('up'))\n self.assertEqual(p.get_board(), [[1,2,3],[4,5,0],[7,8,6]])\n self.assertFalse(p.perform_move('right'))\n p = hw.create_tile_puzzle(2, 4)\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('up'))\n self.assertFalse(p.perform_move('up'))\n self.assertEqual(p.get_board(), [[1,2,0,4],[5,6,3,7]])\n p = hw.create_tile_puzzle(1, 4)\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('left'))\n self.assertFalse(p.perform_move('down'))\n self.assertFalse(p.perform_move('left'))\n self.assertEqual(p.get_board(), [[0,1,2,3]])", "def move(self, row, col, player):\n if self.winning == True:\n return\n if player == 1:\n val = 1\n else:\n val = -1\n self.row[row] += val\n self.col[col] += val\n if row == col:\n self.diagonal += val\n n = len(self.row)\n if row + col == n - 1:\n self.antidiagonal += val\n if abs(self.row[row]) == n or abs(self.col[col]) == n or abs(self.diagonal) == n or abs(self.antidiagonal) == n:\n self.winning = True\n return player\n return 0", "def move(self, row, col, player):\n toadd = 1 if player == 1 else -1\n \n self.row[row] += toadd\n self.col[col] += toadd\n if row == col: self.diagonal += toadd\n if col == self.n - row -1 : self.antidiag += toadd\n \n if abs(self.row[row]) == self.n or abs(self.col[col]) == self.n or abs(self.diagonal) == self.n or abs(self.antidiag) == self.n:\n return player\n else:\n return 0", "def move(self, row, col, player):\n if self.winning == True:\n return\n self.matrix[row][col] = player\n n = len(self.matrix)\n indicator = True\n for i in range(n):\n if self.matrix[row][i] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n \n indicator = True\n for i in range(n):\n if self.matrix[i][col] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n \n if row == col:\n indicator = True\n for i in range(n):\n if self.matrix[i][i] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n if row + col == n - 1:\n indicator = True\n for i in range(n):\n if self.matrix[i][n - 1 - i] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n return 0", "def solve_step(self,puzzle_grid,x,y):\n self.puzzleGrid = puzzle_grid\n if(self.foundStep == False):\n self.targetCell = self.puzzleGrid.grid[x][y]\n if(self.targetCell.isSolved == False):\n self.calculate_possibilities()\n if len(self.targetCell.possibilities) == 1: #README method 1\n self.targetCell.solve()\n return True\n else:\n return self.check_neighbours() #README method 2", "def solve(board):\r\n \r\n #An O(2mn) time solution; the first O(mn) traversal is to preform a bfs on all tiles attached to edge 'O' tiles (can't convert to 'X's); the second is to convert all remaining 'O's into 'X's\r\n \r\n def bfs(curr,r,c):\r\n if not curr: return\r\n prev = len(curr)\r\n for n in range(0,prev):\r\n i,j = curr[n][0],curr[n][1]\r\n board[i][j] = 'A'\r\n for x,y in [(-1, 0), (0, -1), (1, 0), (0, 1)]:\r\n x_n = i+x\r\n y_n = j+y\r\n if x_n >= 0 and x_n < r and y_n >= 0 and y_n < c and board[x_n][y_n] == \"O\":\r\n curr += [(x_n,y_n)]\r\n bfs(curr[prev:],r,c)\r\n\r\n \r\n q,r,c = [],len(board),len(board[0])\r\n if not r or q: return\r\n\r\n for i in range(r):\r\n for j in range(c):\r\n if (i==0 or j==0 or i==r-1 or j==c-1) and board[i][j] == \"O\":\r\n q += [(i,j)]\r\n \r\n bfs(q,r,c)\r\n\r\n for i in range(r):\r\n for j in range(c): \r\n if board[i][j] == \"O\": \r\n board[i][j] = \"X\"\r\n elif board[i][j] == \"A\":\r\n board[i][j] = \"O\"\r\n \r\n return", "def move(self, row: int, col: int, player: int) -> int:\n s = -1 if player == 1 else 1\n\n self.rows[row] += s\n if abs(self.rows[row]) == self.n:\n return player\n\n self.cols[col] += s\n if abs(self.cols[col]) == self.n:\n return player\n\n if row == col:\n self.diagonals[0] += s\n if abs(self.diagonals[0]) == self.n:\n return player\n\n if (row + col) == self.n - 1:\n self.diagonals[1] += s\n if abs(self.diagonals[1]) == self.n:\n return player\n\n return 0", "def solve(self, board) -> None:\n for index in range (1, len(board)-1):\n arr = board[index]\n for ch in range(1, len(arr)-1):\n if arr[ch] is 'O':\n safe = True\n if ch-1 == 0 and arr[ch-1] is 'O':\n safe = False\n if ch +1 == len(arr)-1 and arr[ch+1] is 'O':\n safe = False\n if index -1 == 0 and board[index-1][ch] is 'O':\n safe = False\n if index + 1 == len(board)-1 and board[index + 1][ch] is 'O':\n safe = False\n if safe:\n arr[ch] = 'X'", "def solve(self, board) -> None:\n coords = []\n board_len = len(board)\n row_len = len(board[0]) - 1\n # top\n # coords.append([[0, i] for i, q in enumerate(board[0]) if q == \"O\"])\n # # bottom\n # coords.append(\n # [[board_len, i] for i, q in enumerate(board[board_len]) if q == \"O\"]\n # )\n for i in range(board_len):\n row_coord = [[i,indx] for indx, q in enumerate(board[i]) if q == \"O\"]\n # import pdb; pdb.set_trace()\n for x in row_coord:\n coords.append(x)\n for x in coords:\n if len(x) == 0:\n continue\n if x[0] == 0:\n print(\"top border\")\n elif x[0] == board_len - 1:\n print(\"bottom border\")\n elif x[1] == 0:\n print(\"left border\")\n elif x[1] == row_len:\n prin(\"right border\")", "def make_move(self, column):\r\n trans_board = numpy.transpose(self.__board[::1]) # transpose the\r\n # board so that columns are now arrays\r\n if 0 not in trans_board[column] or self.get_winner() or column >= \\\r\n self.BOARD_COLUMNS or column < 0:\r\n # column is full, illegal or the game is already finished\r\n return self.ILLEGAL_MOVE # exception?\r\n else:\r\n reversed_col = list(reversed(trans_board[column]))\r\n for hole in reversed_col:\r\n if hole == 0:\r\n row_i = self.BOARD_ROWS - 1 - reversed_col.index(hole)\r\n self.__board[row_i][column] = self.__cur_player\r\n winner = self.get_winner()\r\n if winner: # is not none\r\n return winner\r\n self.__switch_player()", "def solve_soduku(sudoku, screen):\n\n myfont = pygame.font.SysFont('Times New Roman', 30)\n\n # Creates a copy of the sudoku board so that we don't mess up the original board\n solved_board = sudoku.board\n\n # Stores the index of the next number that should be tried (the index will be used with the possible_nums list)\n try_new_nums = [[0] * 9 for y in range(9)]\n\n # Creates a list that will act like a stack for the depth first search (stores tuples (row, col) for each unsolved square)\n nodes = [sudoku.find_next_empty_node((0, -1))]\n\n done = False\n\n # Keeps running until the puzzle is either solved or runs out of possible combinations\n while len(nodes) != 0:\n\n time.sleep(.001)\n\n if not done:\n update_grid(screen, (nodes[len(nodes) - 1][0], nodes[len(nodes) - 1][1]), solved_board, myfont)\n draw_lines(screen, [0, 0, 0])\n\n pygame.display.update()\n\n # finds all possible numbers that can go into the current unsolved square\n one = set(sudoku.check_vertically(nodes[len(nodes) - 1], solved_board))\n two = set(sudoku.check_horizontally(nodes[len(nodes) - 1], solved_board))\n three = set(sudoku.check_box(nodes[len(nodes) - 1], solved_board))\n possible_nums = list(one.intersection(two).intersection(three))\n\n # Determines if there is a number that can be put into the current unsolved square\n if len(possible_nums) > 0:\n\n # Stores the current number in the current unsolved square\n curr_num = solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]]\n\n # Stores the next number that will be tried in the current unsolved square\n possible_next_num = possible_nums[\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] % len(possible_nums)]\n\n # Makes sure that the code doesn't get stuck trying the same combos\n if try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] == len(possible_nums):\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Makes sure that the code doesn't get stuck on trying the same number\n if possible_next_num == curr_num:\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Sets the unsolved square to the next number that is to be tried\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = possible_next_num\n\n # Changes which index will be used to find a different number if the new number does not work\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] += 1\n\n # if there are no possible numbers for the current square, it backtracks to the last number that can change\n else:\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Determines if there is still an empty unsolved square left\n if sudoku.has_next_emtpy_node(nodes[len(nodes) - 1]):\n nodes.append(sudoku.find_next_empty_node(nodes[len(nodes) - 1]))\n else:\n update_grid(screen, (nodes[len(nodes) - 1][0], nodes[len(nodes) - 1][1]), solved_board, myfont)\n draw_lines(screen, [0, 0, 0])\n done = True" ]
[ "0.80090946", "0.8002715", "0.79543203", "0.7940283", "0.7870043", "0.7847743", "0.7818097", "0.77486104", "0.774018", "0.7731879", "0.77268267", "0.7707487", "0.77027786", "0.76879483", "0.7634697", "0.76258874", "0.7590558", "0.7560754", "0.7522215", "0.75079787", "0.75023305", "0.74233866", "0.7398959", "0.73937315", "0.738067", "0.736368", "0.7357382", "0.7326966", "0.731067", "0.7291728", "0.71680576", "0.71333486", "0.7080731", "0.7066218", "0.70588666", "0.7035129", "0.6974126", "0.69386816", "0.68907726", "0.6720271", "0.6720271", "0.6720271", "0.6713385", "0.6713165", "0.6713165", "0.667646", "0.665483", "0.6640822", "0.65920466", "0.6521621", "0.6511639", "0.6478555", "0.6446402", "0.644615", "0.6430197", "0.6418134", "0.63970447", "0.63651913", "0.635803", "0.6357214", "0.6356915", "0.6327227", "0.630201", "0.6282386", "0.6250894", "0.62454027", "0.6231285", "0.62211096", "0.6216771", "0.6215996", "0.62067497", "0.6201949", "0.61826205", "0.6179529", "0.617567", "0.61739886", "0.6143129", "0.6139901", "0.6138184", "0.61225945", "0.61218303", "0.61206347", "0.6099728", "0.60626864", "0.6058793", "0.60554326", "0.6046122", "0.6045858", "0.6039182", "0.6038449", "0.6036707", "0.6032714", "0.6029427", "0.602873", "0.602484", "0.60149074", "0.5987895", "0.5981511", "0.5981155", "0.59749615" ]
0.777891
7
Check whether the puzzle satisfies the row zero invariant at the given column (col > 1) Returns a boolean
def row0_invariant(self, target_col): result = True if self._grid[0][target_col] != 0: result = False if self._grid[1][target_col] != (target_col + self._width * 1): result = False for row in range(2, self._height): for col in range(self._width): solved_value = (col + self._width * row) if solved_value != self._grid[row][col]: result = False for row in (0, 1): for col in range(target_col+1, self._width): solved_value = (col + self._width * row) if solved_value != self._grid[row][col]: result = False return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def row1_invariant(self, target_col):\r\n # replace with your code\r\n conditions = 0\r\n current = self._grid[1][target_col] == 0\r\n if current:\r\n conditions +=1\r\n else:\r\n # print 'Tile ZERO is not at (0, %s) position' %(target_col)\r\n return False\r\n \r\n below_row = 1 + 1\r\n for _ in range(1, self._height - below_row):\r\n below_row += 1\r\n for ind in range(len(self._grid[below_row])):\r\n if self.current_position(below_row, ind) != (below_row, ind):\r\n # print 'Some tile in the lower row does not in correct place in row1_invariant' \r\n return False\r\n conditions += 1\r\n if conditions == 2:\r\n # print 'All conditions are correct!'\r\n return True", "def row0_invariant(self, target_col):\r\n # asserts that curr_tile is in target_col\r\n if self.get_number(0, target_col) != 0:\r\n return False\r\n # asserts that tile (0,j) is solved, the grid below (0,j) and to the right is solved \r\n for dummy_j in range(0, self.get_width()):\r\n for dummy_i in range(0, self.get_height()):\r\n if dummy_i > 1 or (dummy_i == 0 and dummy_j > target_col) or (dummy_i == 1 and dummy_j >= target_col):\r\n if (dummy_i, dummy_j) != self.current_position(dummy_i, dummy_j):\r\n return False\r\n return True", "def is_valid(self,row,col) -> bool:\n if(row >=0 and col>=0 and row<self.row and col<self.col and self.array[row][col]==-1 ):\n return True\n return False", "def checkSafe(Board, rows, column):\n for x in range(rows):\n if (Board[x] == column or\n Board[x] + rows - x == column or\n Board[x] + x - rows == column):\n return False\n return True", "def row0_invariant(self, target_col):\r\n # replace with your code\r\n conditions = 0\r\n current = self._grid[0][target_col] == 0\r\n if current:\r\n conditions +=1\r\n else:\r\n # print 'Tile ZERO is not at (0, %s) position' %(target_col)\r\n return False\r\n \r\n below_row = 1 + 1\r\n for _ in range(1, self._height - below_row):\r\n below_row += 1\r\n for ind in range(len(self._grid[below_row])):\r\n if self.current_position(below_row, ind) != (below_row, ind):\r\n # print 'Some tile in the lower row does not in correct place in row0_invariant' \r\n return False\r\n conditions += 1\r\n \r\n\r\n for ind in range(len(self._grid[1][target_col:])):\r\n if self.current_position(1, ind+target_col) != (1, ind+target_col):\r\n # print 'Some tile in the lower row does not in correct place in row0_invariant' \r\n return False\r\n conditions += 1\r\n if conditions == 3:\r\n # print 'All conditions are cprrect!'\r\n return True", "def row1_invariant(self, target_col):\n result = True\n if self._grid[1][target_col] != 0:\n result = False\n for row in range(2, self._height):\n for col in range(self._width):\n solved_value = (col + self._width * row)\n if solved_value != self._grid[row][col]:\n result = False\n for row in (0, 1):\n for col in range(target_col+1, self._width):\n solved_value = (col + self._width * row)\n if solved_value != self._grid[row][col]:\n result = False\n return result", "def check(chessboard, row, col, n):\n for i in range(col):\n if chessboard[row][i] == 1:\n return False\n\n for j, i in zip(range(row, -1, -1), range(col, -1, -1)):\n if chessboard[j][i] == 1:\n return False\n \n for j, i in zip(range(row, n, 1), range(col, -1, -1)):\n if chessboard[j][i] == 1:\n return False\n\n return True", "def __check_row(self, x: int, y: int) -> bool:\n return not any([self.__maze[x, y + i] for i in (-1, 0, 1)])", "def __check_col(self, x: int, y: int) -> bool:\n return not any([self.__maze[x + i, y] for i in (-1, 0, 1)])", "def square_empty(column, row):\n if np.flipud(STATE)[row][column] == '-':\n return True\n else:\n return False", "def is_valid(columns, row, col):\n # `row` is the current row; check against all previous rows\n for r in range(row):\n c = columns[r]\n # Check column\n if c == col:\n return False\n # Check diagonal\n if abs(c - col) == row - r:\n return False\n return True", "def row1_invariant(self, target_col):\r\n # assert that row 1 is solved\r\n if not self.lower_row_invariant(1, target_col):\r\n return False\r\n # asserts that tile proceeded to (1,j), the grid below (1,j) and to the right is solved\r\n for dummy_j in range(0, self.get_width()):\r\n for dummy_i in range(2, self.get_height()):\r\n if not (dummy_i, dummy_j) == self.current_position(dummy_i, dummy_j):\r\n return False\r\n return True", "def check_pivot_row(self, row):\r\n all_zeros = True\r\n for i in range(self.SIZE):\r\n if self.matrix[row][i] != 0:\r\n all_zeros = False\r\n break\r\n\r\n if all_zeros:\r\n self.check_solvability(0, self.matrix[row][-1])", "def isComplete(grid):\n for row in range(0,9):\n for col in range(0,9):\n if grid[row][col]==0:\n return False\n return True", "def row0_invariant(self, target_col):\n # replace with your code\n if self.get_number(0, target_col) != 0:\n return False\n current = 0\n for row in range(2, self.get_height()):\n if target_col == self.get_width() - 1:\n current = self._grid[row][0]\n else:\n current = self._grid[row - 1][-1] + 1\n column = self._grid[row]\n for grid in column:\n if grid != current:\n print 'Error 4'\n return False\n current += 1\n current = self._grid[1][target_col]\n for grid in self._grid[1][target_col:]:\n if grid != current:\n print 'Error 5'\n return False\n current += 1\n return True", "def check_if_column_full(self, board, x):\n for y in reversed(range(self.height // 80)):\n if board[x, 0] != 0:\n return True\n elif board[(x, y)] == 0:\n return False\n else:\n y -= y\n continue", "def solve_one(board: Board, col: int) -> bool:\n #Completed board found\n if col >= board.size:\n return True\n for row in range(board.size):\n #check if position is valid\n if check_constraints(board=board, row=row, col=col):\n #update board and continue BFS\n board.mark_tile(row=row, col=col)\n if solve_one(col=col+1, board=board):\n return True\n board.unmark_tile(row=row, col=col)\n #no valid solutions for current board position\n return False", "def check_grid(grid: List):\n for row in range(9):\n for col in range(9):\n if grid[row][col] == 0:\n return False\n return True", "def valid(n, board, row, col):\n for i in range(col):\n if board[row][i] == 1:\n return False\n x = row\n y = col\n while x >= 0 and y >= 0:\n if board[x][y] == 1:\n return False\n x -= 1\n y -= 1\n x = row\n y = col\n while x < n and y >= 0:\n if board[x][y] == 1:\n return False\n x += 1\n y -= 1\n return True", "def row0_invariant(self, target_col):\n \n # Returns False if zero tile is NOT in target position (0, target_col).\n if self.get_number(0, target_col) != 0:\n return False\n \n # Returns False if tiles to the right of target_col are NOT positioned correctly.\n if target_col < self.get_width():\n for col in range(target_col + 1, self.get_width()):\n if self.get_number(0, col) != col:\n return False\n \n # Returns False if tiles to the right of target_col in row 1 are NOT positioned correctly.\n for col in range(target_col, self.get_width()):\n if self.get_number(1, col) != col + self.get_width():\n return False\n\n # Returns False if tiles in rows 2 and below are NOT positioned correctly.\n if 1 < self.get_height():\n for row in range(2, self.get_height()):\n for col in range(self.get_width()):\n if self.get_number(row, col) != col + (row * self.get_width()):\n return False\n\n return True", "def is_solvable(self, row=0, col=0):\n if row == self.sl-1 and col == self.sl: \n return True\n\n # If column is the side length, mvoe indices to next row\n if col == self.sl:\n return self.is_solvable(row+1, 0)\n\n # If square has a value already, move to next column\n if self.puzzle[row][col] != 0: \n return self.is_solvable(row, col + 1)\n\n # If empty square, try each value in that square\n for value in range(1, self.sl+1): \n # If a valid value, recurse with that value and attempt to solve \n if self.valid_square(row, col, value): \n self.puzzle[row][col] = value\n solved = self.is_solvable(row, col + 1) \n self.puzzle[row][col] = 0\n\n # If value solves puzzle, return solved\n if solved:\n return solved\n\n return False", "def is_solved(self, grid: list):\n # Iterates over rows\n for i in range(9):\n\n if 0 in grid[i]: # Looks for 0s\n return False\n for j in range(9):\n if not self.validate_cell(grid, i, j): # validates each cell\n return False\n return True", "def check_tile_availability(self, row, col):\n return self.board[row][col] == 0", "def isSafe(board, row, col, n):\n\n \"\"\" veriying the row on left side \"\"\"\n for i in range(col):\n if board[row][i] == 1:\n return False\n\n \"\"\" veriying upper diagonal on left side \"\"\"\n for i,j in zip(range(row,-1,-1), range(col,-1,-1)):\n if board[i][j] == 1:\n return False\n\n \"\"\" veriying upper diagonal on left side \"\"\"\n for i,j in zip(range(row, n, 1), range(col,-1,-1)):\n if board[i][j] == 1:\n return False\n\n return True", "def is_in_chessboard(row_or_col):\n\n nonzero, = row_or_col.nonzero()\n\n # compute the approximate number of crossed squares\n squares = 0\n for i, j in zip(nonzero, nonzero[1:]):\n if j - i >= min_square_dim:\n squares += 1\n\n return squares >= 8", "def is_posssible_col(self,col,user_value):\n for row in range(9):\n if self.arr[row][col] == user_value:\n logging.debug(f\"is_posssible_col row(): (False) row: {row} col: {col} arr{self.arr[row][col]} == {user_value}\")\n return False\n logging.debug(f\"is_posssible_col row(): (True) row: {row} col: {col} arr{self.arr[row][col]} != {user_value}\")\n return True", "def valid_coverage_cell(self, row, column):\n\n if (row < self.cov_grid.shape[0] and\n row >= 0) and \\\n (column < self.cov_grid.shape[1] and\n column >= 0):\n return True\n else:\n return False", "def inBoard(self, row, col):\n return 0 <= row < self.rows and 0 <= col < self.cols", "def check_if_solvable(self):\n\n self.solvable=True #status of sudoku\n for i in range(0, 9):\n for j in range(0, 9):\n if self.a[i][j]==0:\n continue\n if self.check(i, j)[self.a[i][j]]==0:\n self.solvable=False\n return False", "def is_complete(sudoku_board):\n BoardArray = sudoku_board.CurrentGameBoard\n size = len(BoardArray)\n subsquare = int(math.sqrt(size))\n\n #check each cell on the board for a 0, or if the value of the cell\n #is present elsewhere within the same row, column, or square\n for row in range(size):\n for col in range(size):\n if BoardArray[row][col]==0:\n return False\n for i in range(size):\n if ((BoardArray[row][i] == BoardArray[row][col]) and i != col):\n return False\n if ((BoardArray[i][col] == BoardArray[row][col]) and i != row):\n return False\n #determine which square the cell is in\n SquareRow = row // subsquare\n SquareCol = col // subsquare\n for i in range(subsquare):\n for j in range(subsquare):\n if((BoardArray[SquareRow*subsquare+i][SquareCol*subsquare+j]\n == BoardArray[row][col])\n and (SquareRow*subsquare + i != row)\n and (SquareCol*subsquare + j != col)):\n return False\n return True", "def isToeplitz(mat):\n for j in range(row):\n if not checkDiag(mat, 0, j):\n return False\n for i in range(1, col):\n if not checkDiag(mat, i, 0):\n return False\n return True", "def is_solvable(self):\n for row, col in np.ndindex(9, 9):\n if len(self.possible_values[row][col]) < 1 and self.final_values[row][col] == 0:\n return False\n return True", "def is_valid_move(board, picked_column):\n if picked_column < 0 or picked_column >= len(board[0]):\n return False\n for row in range(len(board)):\n if board[row][picked_column] == 0:\n return True\n return False", "def row1_invariant(self, target_col):\n \n # Returns False if zero tile is NOT in target position (1, target_col).\n if self.get_number(1, target_col) != 0:\n return False\n \n # Returns False if tiles to the right of target_col are NOT positioned correctly.\n if target_col < self.get_width():\n for col in range(target_col + 1, self.get_width()):\n if self.get_number(1, col) != col + (1 * self.get_width()):\n return False\n\n # Returns False if tiles in rows 2 and below are NOT positioned correctly.\n if 1 < self.get_height():\n for row in range(2, self.get_height()):\n for col in range(self.get_width()):\n if self.get_number(row, col) != col + (row * self.get_width()):\n return False\n\n return True", "def check_col(sudoku):\r\n for col in range(9):\r\n for row in range(8):\r\n test = sudoku[row][col]\r\n for i in range(row+1,9):\r\n if sudoku[i][col] == test:\r\n return True #returns True is there is more than two of the same numbers in a column\r", "def check_constraints(board: Board, row: int, col: int) -> bool:\n if not row_constraint(board=board, row=row, col=col):\n return False\n if not upper_diagonal_constraint(board=board, row=row, col=col):\n return False\n if not lower_diagonal_constraint(board, row, col):\n return False\n return True", "def valid(self, col, rank):\n return rank <= 8 and rank > 0 and self.getColIdx(col) < 8 and self.getColIdx(col) >= 0", "def valid(self, col, rank):\n return rank <= 8 and rank > 0 and self.getColIdx(col) < 8 and self.getColIdx(col) >= 0", "def check_lost (grid):\r\n for row in range(4):\r\n for col in range(4):\r\n if grid[row][col]==0:\r\n return False\r\n if grid[0][0]==grid[0][1] or grid[0][0]==grid[1][0]:\r\n return False \r\n if grid[0][3]==grid[0][2] or grid[0][3]==grid[1][3]:\r\n return False \r\n if grid[3][0]==grid[2][0] or grid[3][0]==grid[3][1]:\r\n return False\r\n if grid[3][3]==grid[2][3] or grid[3][3]==grid[3][2]:\r\n return False \r\n if grid[0][1]==grid[0][2] or grid[0][1]==grid[1][1]:\r\n return False \r\n if grid[0][2]==grid[1][2]:\r\n return False \r\n if grid[1][1]==grid[2][1] or grid[1][1]==grid[1][2] or grid[1][1]==grid[1][0]:\r\n return False\r\n if grid[2][1]==grid[2][0] or grid[2][1]==grid[2][2] or grid[2][1]==grid[3][1]:\r\n return False \r\n if grid[1][0]==grid[2][0]:\r\n return False\r\n if grid[1][2]==grid[1][3] or grid[1][2]==grid[2][2]:\r\n return False\r\n if grid[2][2]==grid[2][3] or grid[2][2]==grid[3][2]:\r\n return False\r\n if grid[3][1]==grid[3][2]:\r\n return False\r\n else:\r\n return True", "def row_constraint(board: Board, row: int, col: int) -> bool:\n for i in range(col):\n if board.is_queen(row=row, col=i):\n return False\n return True", "def check_if_solved(self):\n for cell in self.board.values():\n if not cell.value:\n return False\n return True", "def check_bounds(self, row: int, col: int) -> bool:\n return 0 <= row < self.row and 0 <= col < self.col", "def row0_invariant(self, target_col):\r\n \r\n solved_lower_right = False\r\n solved_grid = [[col + self.get_width() * row\r\n for col in range(self.get_width())]\r\n for row in range(self._height)]\r\n if self._grid[0][target_col] == 0:\r\n solved_lower_right = True\r\n \r\n for row in range(1 + 1, self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n for row in range(0, 1):\r\n for col in range(target_col + 1, self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n if self._grid[1][target_col] != solved_grid[1][target_col]:\r\n solved_lower_right = False\r\n \r\n return solved_lower_right", "def valid_point(self, row, col):\n return self.topdown_view[row][col] == 1.0", "def is_valid(row, peg):\n return (\n (row < TRI_SIZE) and\n (row >= 0) and\n (peg < TRI_SIZE) and\n (peg >= 0) and\n (peg <= row)\n )", "def valid_guess(self, row, col):\n # if row nor col is at an edge space, returns False\n if not isinstance(row, int) or not isinstance(col, int):\n return False\n # ensures no corner spaces have been selected\n if row < 1 or row > 8:\n return False\n if col < 1 or col > 8:\n return False\n return True", "def is_in_the_grid(self, row: int, col: int) -> bool:\n return 0 <= row < self.n_row and 0 <= col < self.n_col", "def upper_diagonal_constraint(board: Board, row: int, col: int) -> bool:\n #move to first tile in diagonal\n row -= 1\n col -= 1\n #while still in board, test and move to next position return false if fails\n while row >= 0 and col >= 0:\n if board.is_queen(row=row, col=col):\n return False\n row -= 1\n col -= 1\n return True", "def test_square(self, board, row, col, test):\n if row < 0 or row > 7:\n return False\n if col < 0 or col > 7:\n return False\n \n return test(board[row][col])", "def square_check(self):\n return len(self.matrix) == len(self.matrix[0])", "def test_cell_existence(board: list, i: int, j: int) -> bool:\n return not (i < 0 or i > len(board)-1 or j < 0 or j > len(board)-1)", "def in_col(n: int, row: int, col: int, grid: List) -> bool:\n for x in range(9):\n if x!= row and n == grid[x][col]:\n return True\n return False", "def check_lost(grid):\r\n for i in range(len(grid)):\r\n for j in range(len(grid[i])):\r\n if grid[i][j] == 0:\r\n return False\r\n elif i+1 < len(grid):\r\n if grid[i][j] == grid[i+1][j]:\r\n return False\r\n elif j+1 < len(grid[i]):\r\n if grid[i][j] == grid[i][j+1]:\r\n return False \r\n return True", "def is_upper_triangular(self):\n self.check_square()\n\n for i in range(self.rows):\n for j in range(i):\n if self[i, j] != 0:\n return False\n return True", "def checkMatrix(i,j):\n # print(i,j)\n counter = Counter([grid[di][dj] for di in range(i,i+3) for dj in range(j,j+3)])\n for k in range(1,10):\n if counter[k] != 1:\n return False\n\n rows_sum = [sum(grid[k][j:j+3]) for k in range(i,i+3)]\n # print(rows_sum)\n if not all(m == 15 for m in rows_sum):\n return False\n cols_sum = [sum(grid[q][k] for q in range(i,i+3)) for k in range(j,j+3)]\n # print(cols_sum)\n if not all(m == 15 for m in cols_sum):\n return False\n dgl_sum = sum(grid[i+k][j+k] for k in range(3))\n anti_dgl_sum = sum(grid[i+k][j+2-k] for k in range(3))\n # print(dgl_sum, anti_dgl_sum)\n if dgl_sum != 15 or anti_dgl_sum != 15:\n return False\n return True", "def check(self):\n for row in self.grid:\n for i in range(1, 10):\n if row.count(i) != 1:\n return False\n\n for col in range(9):\n lst = [row[col] for row in self.grid]\n for i in range(1, 10):\n if lst.count(i) != 1:\n return False\n \n for i in range(3):\n for j in range(3):\n lst = [row[j* 3:(j*3) + 3] for row in self.grid[i * 3:(i*3) + 3]] \n flat_list = []\n for k in lst:\n for number in k:\n flat_list.append(number)\n \n for check_number in range(1, 10):\n if flat_list.count(check_number) != 1:\n return False\n return True", "def is_cell_col_valid(board, r, c):\n return all(board[i][c] <= board[r][c] for i in xrange(len(board)))", "def check_sudoku(board):\n # XXX XXX XXX\n # XXX XXX XXX\n # XXX XXX XXX\n\n # XXX XXX XXX\n # XXX XXX XXX\n # XXX XXX XXX\n\n # XXX XXX XXX\n # XXX XXX XXX\n # XXX XXX XXX\n\n # check the rows\n for i, row in enumerate(board):\n valid_row = [False] * (len(row) + 1)\n for j, val in enumerate(row):\n if valid_row[val]:\n return False\n if val != 0:\n valid_row[val] = True\n\n # check the columns\n for i, _ in enumerate(board[0]):\n valid_row = [False] * (len(board) + 1)\n for j, _ in enumerate(board):\n if valid_row[board[j][i]]:\n return False\n if val != 0:\n valid_row[board[j][i]] = True\n\n # check the blocks\n block_row = 0\n block_column= 0\n i = 0\n while i < len(board):\n j = 0\n while j < len(board):\n block_check = [False] * (len(board) + 1)\n while block_row < 3:\n block_column = 0\n while block_column < 3:\n if block_check[board[i+block_row][j+block_column]]:\n return False\n if board[i+block_row][j+block_column]:\n block_check[board[i+block_row][j+block_column]] = True\n block_column += 1\n block_row += 1\n block_row = 0\n j += 3\n i += 3\n\n return True", "def is_valid_position(self, somerow, somecol):\n valid_row = 0 <= somerow <= (self.size-1)\n valid_col = 0 <= somecol <= (self.size-1)\n #need to use self.size - 1 because while we're thinking of an 8x8 chess board, the computer is thinking of a 0x7 chess board\n return valid_row and valid_col", "def check_row(sudoku):\r\n for row in range(len(sudoku)):\r\n for col in range(len(sudoku)):\r\n if sudoku[row].count(sudoku[row][col]) != 1:\r\n return True #returns True is there is more than two of the same numbers in a row\r", "def check_matrix(self, matrix):\n for i in range(self.size):\n if (matrix[0][i] + matrix[-1][i] == i % 2 or matrix[0][i] + matrix[-1][i] == (i + 1) % 2) and (\n matrix[i][0] + matrix[i][-1] == i % 2 or matrix[i][0] + matrix[i][-1] == (i + 1) % 2):\n pass\n else:\n logging.debug(\"Matrix detection failed. Matrix passed to function \" + str(matrix))\n return False\n for i in range(self.size):\n for j in range(self.size):\n if matrix[i][j] > 1:\n logging.debug(\"Matrix detection failed. Matrix passed to function \" + str(matrix))\n return False\n logging.info(\"Matrix detected : \" + str(matrix))\n return True", "def checkEmpty(grid):\n for x in range(len(grid.board)):\n for y in range(len(grid.board[0])):\n if grid.board[x][y] == 0:\n return True\n return False", "def check(self,a,x,y):\r\n return not self.exitsinrow(self.rows,x,a) and not self.existsincol(self.rows,y,a) and \\\r\n not self.exitsinblock(self.rows, x - x % 3, y - y % 3,a)", "def col_win(board, player):\n for row in board.T:\n if check_row(row, player):\n return True\n return False", "def TestColumn(SudokuGrid):\r\n for i in range(9):\r\n for j in range(8):\r\n for k in range(j+1,9):\r\n if SudokuGrid[j][i]==SudokuGrid[k][i]:\r\n return False\r\n return True", "def is_full(self, row, col):\n self._validate_indexes(row, col)\n return self._uf.connected(self._top_idx, row * self._n + col)", "def possible(matrix: List[List[int]], x: int, y: int, n:int) -> bool:\n\n # Check for problem in row\n for i in range(0, 9):\n if matrix[x][i] == n:\n return False\n\n # Check for problem in column\n for j in range(0, 9):\n if matrix[j][y] == n:\n return False\n \n # Initial indexes for inner square\n x0 = (x // 3) * 3\n y0 = (y // 3) * 3\n\n # Check for problem in inner square\n for i in range(0, 3):\n for j in range(0, 3):\n if matrix[x0 + i][y0 + j] == n:\n return False\n \n return True", "def deadTest(self, board):\n if board[0] and board[4] and board[8]:\n return True\n if board[2] and board[4] and board[6]:\n return True\n for i in range(3):\n #check every row\n row = i * 3\n if board[row] and board[row+1] and board[row+2]:\n return True\n #check every column\n if board[i] and board[i+3] and board[i+6]:\n return True\n return False", "def deadTest(self, board):\n if board[0] and board[4] and board[8]:\n return True\n if board[2] and board[4] and board[6]:\n return True\n for i in range(3):\n #check every row\n row = i * 3\n if board[row] and board[row+1] and board[row+2]:\n return True\n #check every column\n if board[i] and board[i+3] and board[i+6]:\n return True\n return False", "def valid_input(self, row, col):\n return ((row, col) not in self.marks and\n row <= WIDTH and row > 0 and\n col in COL_MAP)", "def isComplete(self):\n for n in range(9):\n for m in range(9):\n if self.puzzle[n][m] == 0:\n return False\n return True", "def is_one_sol(self, row=0, col=0, sols=None):\n # For testing reasons, initialize with None\n if sols == None:\n sols = []\n\n # Uses an aliased list to maintain variance of number of solutions \n # found across all recursive calls, and returns when more than 1 is found\n if len(sols) > 1:\n return False\n\n # If end of puzzle is hit, the puzzle is solved, return True\n if row == self.sl-1 and col == self.sl: \n sols.append(True)\n return\n \n # If column is the side length, mvoe indices to next row\n if col == self.sl:\n return self.is_one_sol(row+1, 0, sols)\n\n # If square has a value already, move to next column\n if self.puzzle[row][col] != 0: \n return self.is_one_sol(row, col+1, sols)\n\n # If empty square, try each value in that square\n for value in range(1, self.sl+1): \n # If a valid value, recurse with that value and attempt to solve \n if self.valid_square(row, col, value): \n self.puzzle[row][col] = value\n self.is_one_sol(row, col+1, sols) \n self.puzzle[row][col] = 0\n\n if len(sols) > 1:\n return False\n\n # If exhausted all possibilities, return if only one solution found thus far\n return len(sols) == 1", "def _check_occupied(self, col, row):\n if self.board[row - 1][col - 1] == EMPTY:\n return False\n else:\n return True", "def is_solved(self):\n # Iterate through each square of the puzzle\n for row in range(self.sl):\n for col in range(self.sl):\n val = self.puzzle[row][col]\n\n # If any square value is blank (0), not solved, return False\n if val == 0:\n return False\n\n # Trick to keep DRY code: replace each value temporarily with a\n # 0, and use valid_square method with original value to determine\n # if every square is valid\n self.puzzle[row][col] = 0\n valid = self.valid_square(row, col, val)\n self.puzzle[row][col] = val\n \n # If not a valid value for square, return False\n if not valid:\n return False\n return True", "def solve(grid):\n find = find_empty(grid)\n if not find:\n return True\n\n row, col = find\n for i in range(1, 10):\n if valid(grid, i, (row, col)):\n grid[row][col] = i\n if solve(grid):\n return True\n grid[row][col] = 0\n return False", "def col_win(board):\n\tfor col in range(3):\n\t\tif board[0][col] != EMPTY and board[0][col] == board[1][col] == board[2][col]:\n\t\t\treturn True\n\treturn False", "def check(self) -> bool:\n\n\t\treturn all([all(row) for row in self.board])", "def isLegal(self):\n # checks for same values in rows\n for n in range(9):\n rows = set()\n for m in range(9):\n if self.puzzle[n][m] != 0:\n size = len(rows)\n rows.add(self.puzzle[n][m])\n if size == len(rows):\n return False\n\n #checks for same values in columns\n for m in range(9):\n cols = set()\n for n in range(9):\n if self.puzzle[n][m] != 0:\n size = len(cols)\n cols.add(self.puzzle[n][m])\n if size == len(cols):\n return False\n\n #checks for same values in sections\n sections = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n for r in sections:\n for c in sections:\n sects = set()\n for n in r:\n for m in c:\n if self.puzzle[n][m] != 0:\n size = len(sects)\n sects.add(self.puzzle[n][m])\n if size == len(sects):\n return False\n return True", "def checkWithinBound(rowWithinBound,colWithinBound):\n if(rowWithinBound == 0 and colWithinBound == 0):\n return True\n else:\n return False", "def is_zero_matrix(self):\n M = self.rep\n for i in range(self.rows):\n for j in range(self.cols):\n if M[i, j]:\n return False\n return True", "def check_lost (grid):\r\n adjacent = False\r\n zero_value = False\r\n for i in range(4): \r\n for j in range(4):\r\n if grid[i][j] == 0:\r\n zero_value = True\r\n break\r\n for i in range(3):\r\n for j in range(3):\r\n if grid[i][j] == grid[i][j+1]:\r\n adjacent = True\r\n break\r\n if grid[i][j] == grid[i+1][j]:\r\n adjacent = True\r\n break\r\n if not adjacent and not zero_value:\r\n return True\r\n return False", "def valid_square(self, row, col, value):\n # Check that the row and col are valid puzzle indices\n if not ((0 <= row < self.sl) and (0 <= col < self.sl)):\n return False\n\n # Check that the square input is empty\n if self.puzzle[row][col] != 0:\n return False\n \n # Check that the value input is a valid puzzle value\n if not (1 <= value <= self.sl):\n if self.puzzle[row][col] == 0 and value == 0:\n return True\n return False\n \n # Check each row, column and block for same number\n for i in range(self.sl): \n if self.puzzle[row][i] == value: # Check each square in row for same value\n return False\n if self.puzzle[i][col] == value: # Check each square in col for same value\n return False\n \n # Check each square in box for same value, a little more complex index-wise\n r = self.bs*(row//self.bs) + (i//self.bs) \n c = self.bs*(col//self.bs) + (i%self.bs) \n if self.puzzle[r][c] == value:\n return False\n \n return True", "def any_possible_moves(grid):\n if get_empty_cells(grid):\n return True\n for row in grid:\n if any(row[i]==row[i+1] for i in range(len(row)-1)):\n return True\n for i,val in enumerate(grid[0]):\n column = get_column(grid, i)\n if any(column[i]==column[i+1] for i in range(len(column)-1)):\n return True\n return False", "def check_if_valid(self, row, col, number):\n # Checks if all numbers in row occurs only once\n for i in range(len(self.grid[row])):\n if self.grid[row][i] == number and col != i:\n return False\n\n # Checks if all numbers in column occurs only once\n for i in range(len(self.grid)):\n if self.grid[i][col] == number and row != i:\n return False\n\n # Defines the 3x3 grid that needs to be checked\n square = [(row // 3) * 3, (col//3) * 3]\n \n # Checks if all numbers in the 3x3 square occurs only once\n for i in range(square[0] , square[0] + 3):\n for j in range(square[1], square[1] + 3):\n if number == self.grid[i][j] and i != row and j != col:\n return False\n return True", "def is_col_legal(self, from_col, from_row) -> bool:\n # If card is not playable - return False\n if from_col is None and from_row == -1:\n return False\n if self.solitaire[from_col, from_row].is_facedown:\n return False\n # If card is the last in a column: return True\n if self.is_leaf_card(from_col, from_row):\n # self.leaf_cards[from_col] = self.solitaire[from_col, from_row]\n return True\n # Loop through the column starting at \"from_row\"\n for i, card in enumerate(self.solitaire[from_col]):\n # 0 = no card or empty space\n if card == 0:\n break\n # Only look at cards starting AFTER\n if i > from_row:\n card_above = self.solitaire[from_col, i-1]\n if not card.can_be_moved_to(card_above):\n return False\n return True", "def new_input_does_not_overlap_original_board(self, col, row):\n return self.puzzle[row][col] == 0", "def sudoku_isready(A):\r\n x = isqrt(len(A))\r\n if x*x == len(A):\r\n return True\r\n return False", "def is_possible_grid(self,row,col,user_value):\n start_row = row - (row % 3)\n start_col = col - (col % 3)\n for x in range(3):\n for y in range(3):\n if self.arr[x+start_row][y+start_col] == user_value:\n logging.debug(f\"is_posssible_grid(): (False) x: {x} y: {y} s_row: {start_row} s_col: {start_col} arr[x+start_row][y+start_col]: {self.arr[x+start_row][y+start_col]} == {user_value}\")\n return False\n logging.debug(f\"is_posssible_grid(): (True) x: {x} y: {y} s_row: {start_row} s_col: {start_col} arr[x+start_row][y+start_col]: {self.arr[x+start_row][y+start_col]} != {user_value}\")\n return True", "def isMine(self, row, col):\n return self.board[row, col] == 1", "def row1_invariant(self, target_col):\r\n solved_lower_right = False\r\n solved_grid = [[col + self.get_width() * row\r\n for col in range(self.get_width())]\r\n for row in range(self._height)]\r\n if self._grid[1][target_col] == 0:\r\n solved_lower_right = True\r\n \r\n for row in range(1 + 1, self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n for row in range(0, 1):\r\n for col in range(target_col + 1, self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n return solved_lower_right", "def check_zero(col):\n return np.sum(col == 0.0)", "def check_col(grid, num, i, j):\n assert i < len(grid), 'Row is out of grid!'\n assert j < len(grid[0]), 'Column is out of grid!' \n\n found = False\n for pos, row in enumerate(grid):\n if row[j] == num and pos != i:\n found = True\n return found", "def check_full_board(self): #rows then columns\n for row in self.board:\n for column_of_row in row:\n if column_of_row == ' ':\n return False\n return True", "def valid(game_board, value, row, col):\n if len(value) > 1:\n value = \"X\"\n # Check row of new position\n for i in range(len(game_board[row])):\n if game_board[row][i] == value and i != col:\n return False\n\n # Check column of new position\n for i in range(len(game_board)):\n if game_board[i][col] == value and i != row:\n return False\n\n # Check the 3x3 square area\n start_row = 3 * (row // 3)\n start_col = 3 * (col // 3)\n for i in range(start_row, start_row+3):\n for j in range(start_col, start_col+3):\n if game_board[i][j] == value and i != row and j != col:\n return False\n\n return True", "def solve(board):\n find = find_blank(board)\n \n if not find:\n return True\n #will loop through untill the blanks are filled\n\n\n\n else:\n row, col = find\n\n for i in range(1, 10):\n if valid(board, i, (row, col)):\n board[row][col] = i\n\n if solve(board):\n return True\n board[row][col] = 0\n return False", "def is_valid_board(self):\n total = sum(range(1, self.n+1))\n d = {x : [set([]), set([])] for x in range(1, self.n+1)}\n for row_index in range(self.n):\n for col_index in range(self.n):\n num = self.board[row_index][col_index]\n try:\n if row_index in d[num][0] or col_index in d[num][1]:\n print(\"Invalid solution.\")\n return\n except KeyError:\n print(\"Unsolved solution.\") # d[0]\n return\n\n d[num][0].add(row_index)\n d[num][1].add(col_index)\n print(\"Valid solution!\")", "def check_vertical(cls, board, disc, column, row):\n next_row = row + 1\n for row_idx in range(next_row, next_row + cls.MAX_COUNT):\n try:\n if board[column][row_idx] != disc:\n return False\n except IndexError:\n return False\n return True", "def checkvalid(self,borad,row,col,n):\n # check the above column has 'Q'\n i=0\n while i!=row:\n if borad[i][col]=='Q':\n return False\n i+=1\n # check the left-top 135 and right-top 45\n i,j=row-1,col-1\n while i>=0 and j>=0:\n if borad[i][j]=='Q':\n return False\n i-=1\n j-=1\n \n i,j=row-1,col+1\n while i>=0 and j<n:\n if borad[i][j]=='Q':\n return False\n i-=1\n j+=1\n \n return True", "def is_attacked(x, y, board ):\n # checking at row and column \n row = board[x]\n for val in row:\n if val == 1:\n return True \n \n col = column(board,y)\n for val in col:\n if val == 1:\n return True\n\n # getting the dimensions of board \n n = len(board[0]) \n \n #checking the diagonal values of x,y\n for i in range(n):\n for j in range(n):\n if i+j == x+y or i-j == x-y:\n if board[i][j] == 1:\n return True \n return False", "def isfree(col,queens):\n if col in queens:\n return False\n elif any([ abs(col-col1)==len(queens)-index for index,col1 in enumerate(queens)]):\n #c[r]==c[j]; r-j==c[r]-c[j]; r-j==c[j]-c[r]\n # col is the colomn to check; len(queens) just be the row index of col, dont subtract 1\n return False\n else:\n return True" ]
[ "0.7548881", "0.752049", "0.75068325", "0.74679226", "0.742282", "0.7359223", "0.7341293", "0.73344994", "0.7321091", "0.73030216", "0.7264205", "0.7223349", "0.7205626", "0.7195732", "0.7169341", "0.7164632", "0.71609145", "0.71587044", "0.71222657", "0.7111231", "0.7105816", "0.7091535", "0.7042283", "0.70242876", "0.70187825", "0.70182234", "0.6997958", "0.6971268", "0.6920552", "0.6917151", "0.69058347", "0.6897607", "0.68944055", "0.6893499", "0.6878646", "0.6877337", "0.6872178", "0.6872178", "0.68606704", "0.6829635", "0.68292594", "0.6824819", "0.6822464", "0.68197864", "0.6816405", "0.68082774", "0.678426", "0.6755324", "0.6736844", "0.6718778", "0.6712328", "0.6709724", "0.67094207", "0.6697191", "0.6697116", "0.66914093", "0.66724175", "0.66698956", "0.666733", "0.66660887", "0.6634056", "0.66291195", "0.66251844", "0.6617892", "0.661122", "0.6608989", "0.66079324", "0.6604591", "0.6604591", "0.65974945", "0.6594181", "0.6592502", "0.6588644", "0.65884864", "0.6582442", "0.6576587", "0.65693325", "0.6564483", "0.6550946", "0.6537812", "0.65324026", "0.65300626", "0.65185386", "0.6518098", "0.65144515", "0.6507961", "0.6504347", "0.6504211", "0.64983624", "0.64846265", "0.6480135", "0.6478113", "0.64744663", "0.64710104", "0.6464473", "0.6457956", "0.64524585", "0.6444169", "0.643587", "0.6425859" ]
0.76055276
0
Check whether the puzzle satisfies the row one invariant at the given column (col > 1) Returns a boolean
def row1_invariant(self, target_col): result = True if self._grid[1][target_col] != 0: result = False for row in range(2, self._height): for col in range(self._width): solved_value = (col + self._width * row) if solved_value != self._grid[row][col]: result = False for row in (0, 1): for col in range(target_col+1, self._width): solved_value = (col + self._width * row) if solved_value != self._grid[row][col]: result = False return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def row1_invariant(self, target_col):\r\n # assert that row 1 is solved\r\n if not self.lower_row_invariant(1, target_col):\r\n return False\r\n # asserts that tile proceeded to (1,j), the grid below (1,j) and to the right is solved\r\n for dummy_j in range(0, self.get_width()):\r\n for dummy_i in range(2, self.get_height()):\r\n if not (dummy_i, dummy_j) == self.current_position(dummy_i, dummy_j):\r\n return False\r\n return True", "def row1_invariant(self, target_col):\r\n # replace with your code\r\n conditions = 0\r\n current = self._grid[1][target_col] == 0\r\n if current:\r\n conditions +=1\r\n else:\r\n # print 'Tile ZERO is not at (0, %s) position' %(target_col)\r\n return False\r\n \r\n below_row = 1 + 1\r\n for _ in range(1, self._height - below_row):\r\n below_row += 1\r\n for ind in range(len(self._grid[below_row])):\r\n if self.current_position(below_row, ind) != (below_row, ind):\r\n # print 'Some tile in the lower row does not in correct place in row1_invariant' \r\n return False\r\n conditions += 1\r\n if conditions == 2:\r\n # print 'All conditions are correct!'\r\n return True", "def checkSafe(Board, rows, column):\n for x in range(rows):\n if (Board[x] == column or\n Board[x] + rows - x == column or\n Board[x] + x - rows == column):\n return False\n return True", "def check(chessboard, row, col, n):\n for i in range(col):\n if chessboard[row][i] == 1:\n return False\n\n for j, i in zip(range(row, -1, -1), range(col, -1, -1)):\n if chessboard[j][i] == 1:\n return False\n \n for j, i in zip(range(row, n, 1), range(col, -1, -1)):\n if chessboard[j][i] == 1:\n return False\n\n return True", "def is_valid(columns, row, col):\n # `row` is the current row; check against all previous rows\n for r in range(row):\n c = columns[r]\n # Check column\n if c == col:\n return False\n # Check diagonal\n if abs(c - col) == row - r:\n return False\n return True", "def solve_one(board: Board, col: int) -> bool:\n #Completed board found\n if col >= board.size:\n return True\n for row in range(board.size):\n #check if position is valid\n if check_constraints(board=board, row=row, col=col):\n #update board and continue BFS\n board.mark_tile(row=row, col=col)\n if solve_one(col=col+1, board=board):\n return True\n board.unmark_tile(row=row, col=col)\n #no valid solutions for current board position\n return False", "def __check_row(self, x: int, y: int) -> bool:\n return not any([self.__maze[x, y + i] for i in (-1, 0, 1)])", "def valid(n, board, row, col):\n for i in range(col):\n if board[row][i] == 1:\n return False\n x = row\n y = col\n while x >= 0 and y >= 0:\n if board[x][y] == 1:\n return False\n x -= 1\n y -= 1\n x = row\n y = col\n while x < n and y >= 0:\n if board[x][y] == 1:\n return False\n x += 1\n y -= 1\n return True", "def row1_invariant(self, target_col):\n \n # Returns False if zero tile is NOT in target position (1, target_col).\n if self.get_number(1, target_col) != 0:\n return False\n \n # Returns False if tiles to the right of target_col are NOT positioned correctly.\n if target_col < self.get_width():\n for col in range(target_col + 1, self.get_width()):\n if self.get_number(1, col) != col + (1 * self.get_width()):\n return False\n\n # Returns False if tiles in rows 2 and below are NOT positioned correctly.\n if 1 < self.get_height():\n for row in range(2, self.get_height()):\n for col in range(self.get_width()):\n if self.get_number(row, col) != col + (row * self.get_width()):\n return False\n\n return True", "def row0_invariant(self, target_col):\n result = True\n if self._grid[0][target_col] != 0:\n result = False\n if self._grid[1][target_col] != (target_col + self._width * 1):\n result = False\n for row in range(2, self._height):\n for col in range(self._width):\n solved_value = (col + self._width * row)\n if solved_value != self._grid[row][col]:\n result = False\n for row in (0, 1):\n for col in range(target_col+1, self._width):\n solved_value = (col + self._width * row)\n if solved_value != self._grid[row][col]:\n result = False\n return result", "def is_valid(self,row,col) -> bool:\n if(row >=0 and col>=0 and row<self.row and col<self.col and self.array[row][col]==-1 ):\n return True\n return False", "def row_constraint(board: Board, row: int, col: int) -> bool:\n for i in range(col):\n if board.is_queen(row=row, col=i):\n return False\n return True", "def isSafe(board, row, col, n):\n\n \"\"\" veriying the row on left side \"\"\"\n for i in range(col):\n if board[row][i] == 1:\n return False\n\n \"\"\" veriying upper diagonal on left side \"\"\"\n for i,j in zip(range(row,-1,-1), range(col,-1,-1)):\n if board[i][j] == 1:\n return False\n\n \"\"\" veriying upper diagonal on left side \"\"\"\n for i,j in zip(range(row, n, 1), range(col,-1,-1)):\n if board[i][j] == 1:\n return False\n\n return True", "def check_col(sudoku):\r\n for col in range(9):\r\n for row in range(8):\r\n test = sudoku[row][col]\r\n for i in range(row+1,9):\r\n if sudoku[i][col] == test:\r\n return True #returns True is there is more than two of the same numbers in a column\r", "def __check_col(self, x: int, y: int) -> bool:\n return not any([self.__maze[x + i, y] for i in (-1, 0, 1)])", "def row0_invariant(self, target_col):\r\n # asserts that curr_tile is in target_col\r\n if self.get_number(0, target_col) != 0:\r\n return False\r\n # asserts that tile (0,j) is solved, the grid below (0,j) and to the right is solved \r\n for dummy_j in range(0, self.get_width()):\r\n for dummy_i in range(0, self.get_height()):\r\n if dummy_i > 1 or (dummy_i == 0 and dummy_j > target_col) or (dummy_i == 1 and dummy_j >= target_col):\r\n if (dummy_i, dummy_j) != self.current_position(dummy_i, dummy_j):\r\n return False\r\n return True", "def check_row(sudoku):\r\n for row in range(len(sudoku)):\r\n for col in range(len(sudoku)):\r\n if sudoku[row].count(sudoku[row][col]) != 1:\r\n return True #returns True is there is more than two of the same numbers in a row\r", "def is_posssible_col(self,col,user_value):\n for row in range(9):\n if self.arr[row][col] == user_value:\n logging.debug(f\"is_posssible_col row(): (False) row: {row} col: {col} arr{self.arr[row][col]} == {user_value}\")\n return False\n logging.debug(f\"is_posssible_col row(): (True) row: {row} col: {col} arr{self.arr[row][col]} != {user_value}\")\n return True", "def check_constraints(board: Board, row: int, col: int) -> bool:\n if not row_constraint(board=board, row=row, col=col):\n return False\n if not upper_diagonal_constraint(board=board, row=row, col=col):\n return False\n if not lower_diagonal_constraint(board, row, col):\n return False\n return True", "def is_valid(row, peg):\n return (\n (row < TRI_SIZE) and\n (row >= 0) and\n (peg < TRI_SIZE) and\n (peg >= 0) and\n (peg <= row)\n )", "def valid_coverage_cell(self, row, column):\n\n if (row < self.cov_grid.shape[0] and\n row >= 0) and \\\n (column < self.cov_grid.shape[1] and\n column >= 0):\n return True\n else:\n return False", "def is_solvable(self, row=0, col=0):\n if row == self.sl-1 and col == self.sl: \n return True\n\n # If column is the side length, mvoe indices to next row\n if col == self.sl:\n return self.is_solvable(row+1, 0)\n\n # If square has a value already, move to next column\n if self.puzzle[row][col] != 0: \n return self.is_solvable(row, col + 1)\n\n # If empty square, try each value in that square\n for value in range(1, self.sl+1): \n # If a valid value, recurse with that value and attempt to solve \n if self.valid_square(row, col, value): \n self.puzzle[row][col] = value\n solved = self.is_solvable(row, col + 1) \n self.puzzle[row][col] = 0\n\n # If value solves puzzle, return solved\n if solved:\n return solved\n\n return False", "def check(self):\n for row in self.grid:\n for i in range(1, 10):\n if row.count(i) != 1:\n return False\n\n for col in range(9):\n lst = [row[col] for row in self.grid]\n for i in range(1, 10):\n if lst.count(i) != 1:\n return False\n \n for i in range(3):\n for j in range(3):\n lst = [row[j* 3:(j*3) + 3] for row in self.grid[i * 3:(i*3) + 3]] \n flat_list = []\n for k in lst:\n for number in k:\n flat_list.append(number)\n \n for check_number in range(1, 10):\n if flat_list.count(check_number) != 1:\n return False\n return True", "def in_col(n: int, row: int, col: int, grid: List) -> bool:\n for x in range(9):\n if x!= row and n == grid[x][col]:\n return True\n return False", "def valid(self, col, rank):\n return rank <= 8 and rank > 0 and self.getColIdx(col) < 8 and self.getColIdx(col) >= 0", "def valid(self, col, rank):\n return rank <= 8 and rank > 0 and self.getColIdx(col) < 8 and self.getColIdx(col) >= 0", "def inBoard(self, row, col):\n return 0 <= row < self.rows and 0 <= col < self.cols", "def valid_guess(self, row, col):\n # if row nor col is at an edge space, returns False\n if not isinstance(row, int) or not isinstance(col, int):\n return False\n # ensures no corner spaces have been selected\n if row < 1 or row > 8:\n return False\n if col < 1 or col > 8:\n return False\n return True", "def row0_invariant(self, target_col):\r\n # replace with your code\r\n conditions = 0\r\n current = self._grid[0][target_col] == 0\r\n if current:\r\n conditions +=1\r\n else:\r\n # print 'Tile ZERO is not at (0, %s) position' %(target_col)\r\n return False\r\n \r\n below_row = 1 + 1\r\n for _ in range(1, self._height - below_row):\r\n below_row += 1\r\n for ind in range(len(self._grid[below_row])):\r\n if self.current_position(below_row, ind) != (below_row, ind):\r\n # print 'Some tile in the lower row does not in correct place in row0_invariant' \r\n return False\r\n conditions += 1\r\n \r\n\r\n for ind in range(len(self._grid[1][target_col:])):\r\n if self.current_position(1, ind+target_col) != (1, ind+target_col):\r\n # print 'Some tile in the lower row does not in correct place in row0_invariant' \r\n return False\r\n conditions += 1\r\n if conditions == 3:\r\n # print 'All conditions are cprrect!'\r\n return True", "def row1_invariant(self, target_col):\r\n solved_lower_right = False\r\n solved_grid = [[col + self.get_width() * row\r\n for col in range(self.get_width())]\r\n for row in range(self._height)]\r\n if self._grid[1][target_col] == 0:\r\n solved_lower_right = True\r\n \r\n for row in range(1 + 1, self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n for row in range(0, 1):\r\n for col in range(target_col + 1, self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n return solved_lower_right", "def is_in_chessboard(row_or_col):\n\n nonzero, = row_or_col.nonzero()\n\n # compute the approximate number of crossed squares\n squares = 0\n for i, j in zip(nonzero, nonzero[1:]):\n if j - i >= min_square_dim:\n squares += 1\n\n return squares >= 8", "def valid_point(self, row, col):\n return self.topdown_view[row][col] == 1.0", "def row0_invariant(self, target_col):\n # replace with your code\n if self.get_number(0, target_col) != 0:\n return False\n current = 0\n for row in range(2, self.get_height()):\n if target_col == self.get_width() - 1:\n current = self._grid[row][0]\n else:\n current = self._grid[row - 1][-1] + 1\n column = self._grid[row]\n for grid in column:\n if grid != current:\n print 'Error 4'\n return False\n current += 1\n current = self._grid[1][target_col]\n for grid in self._grid[1][target_col:]:\n if grid != current:\n print 'Error 5'\n return False\n current += 1\n return True", "def col_win(board, player):\n for row in board.T:\n if check_row(row, player):\n return True\n return False", "def row1_invariant(self, target_col):\n # replace with your code\n if self.lower_row_invariant(1, target_col):\n return True\n return False", "def check_if_valid(self, row, col, number):\n # Checks if all numbers in row occurs only once\n for i in range(len(self.grid[row])):\n if self.grid[row][i] == number and col != i:\n return False\n\n # Checks if all numbers in column occurs only once\n for i in range(len(self.grid)):\n if self.grid[i][col] == number and row != i:\n return False\n\n # Defines the 3x3 grid that needs to be checked\n square = [(row // 3) * 3, (col//3) * 3]\n \n # Checks if all numbers in the 3x3 square occurs only once\n for i in range(square[0] , square[0] + 3):\n for j in range(square[1], square[1] + 3):\n if number == self.grid[i][j] and i != row and j != col:\n return False\n return True", "def is_in_the_grid(self, row: int, col: int) -> bool:\n return 0 <= row < self.n_row and 0 <= col < self.n_col", "def TestColumn(SudokuGrid):\r\n for i in range(9):\r\n for j in range(8):\r\n for k in range(j+1,9):\r\n if SudokuGrid[j][i]==SudokuGrid[k][i]:\r\n return False\r\n return True", "def isLegal(self):\n # checks for same values in rows\n for n in range(9):\n rows = set()\n for m in range(9):\n if self.puzzle[n][m] != 0:\n size = len(rows)\n rows.add(self.puzzle[n][m])\n if size == len(rows):\n return False\n\n #checks for same values in columns\n for m in range(9):\n cols = set()\n for n in range(9):\n if self.puzzle[n][m] != 0:\n size = len(cols)\n cols.add(self.puzzle[n][m])\n if size == len(cols):\n return False\n\n #checks for same values in sections\n sections = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n for r in sections:\n for c in sections:\n sects = set()\n for n in r:\n for m in c:\n if self.puzzle[n][m] != 0:\n size = len(sects)\n sects.add(self.puzzle[n][m])\n if size == len(sects):\n return False\n return True", "def is_cell_col_valid(board, r, c):\n return all(board[i][c] <= board[r][c] for i in xrange(len(board)))", "def valid(game_board, value, row, col):\n if len(value) > 1:\n value = \"X\"\n # Check row of new position\n for i in range(len(game_board[row])):\n if game_board[row][i] == value and i != col:\n return False\n\n # Check column of new position\n for i in range(len(game_board)):\n if game_board[i][col] == value and i != row:\n return False\n\n # Check the 3x3 square area\n start_row = 3 * (row // 3)\n start_col = 3 * (col // 3)\n for i in range(start_row, start_row+3):\n for j in range(start_col, start_col+3):\n if game_board[i][j] == value and i != row and j != col:\n return False\n\n return True", "def possible(matrix: List[List[int]], x: int, y: int, n:int) -> bool:\n\n # Check for problem in row\n for i in range(0, 9):\n if matrix[x][i] == n:\n return False\n\n # Check for problem in column\n for j in range(0, 9):\n if matrix[j][y] == n:\n return False\n \n # Initial indexes for inner square\n x0 = (x // 3) * 3\n y0 = (y // 3) * 3\n\n # Check for problem in inner square\n for i in range(0, 3):\n for j in range(0, 3):\n if matrix[x0 + i][y0 + j] == n:\n return False\n \n return True", "def test_square(self, board, row, col, test):\n if row < 0 or row > 7:\n return False\n if col < 0 or col > 7:\n return False\n \n return test(board[row][col])", "def check(self) -> bool:\n\n\t\treturn all([all(row) for row in self.board])", "def is_four_in_column(board, row, column):\n sequence = [board[row][column] for j in range(4)]\n column = [board[i][column] for i in range(len(board))]\n if is_subset(sequence, column):\n return True\n else:\n return False", "def is_solved(self, grid: list):\n # Iterates over rows\n for i in range(9):\n\n if 0 in grid[i]: # Looks for 0s\n return False\n for j in range(9):\n if not self.validate_cell(grid, i, j): # validates each cell\n return False\n return True", "def check(self,a,x,y):\r\n return not self.exitsinrow(self.rows,x,a) and not self.existsincol(self.rows,y,a) and \\\r\n not self.exitsinblock(self.rows, x - x % 3, y - y % 3,a)", "def check_bounds(self, row: int, col: int) -> bool:\n return 0 <= row < self.row and 0 <= col < self.col", "def in_row(n: int, row: int, col: int, grid: List) -> bool:\n for x in range(9):\n if x != col and n == grid[row][x]:\n return True\n return False", "def isToeplitz(mat):\n for j in range(row):\n if not checkDiag(mat, 0, j):\n return False\n for i in range(1, col):\n if not checkDiag(mat, i, 0):\n return False\n return True", "def check_matrix(self, matrix):\n for i in range(self.size):\n if (matrix[0][i] + matrix[-1][i] == i % 2 or matrix[0][i] + matrix[-1][i] == (i + 1) % 2) and (\n matrix[i][0] + matrix[i][-1] == i % 2 or matrix[i][0] + matrix[i][-1] == (i + 1) % 2):\n pass\n else:\n logging.debug(\"Matrix detection failed. Matrix passed to function \" + str(matrix))\n return False\n for i in range(self.size):\n for j in range(self.size):\n if matrix[i][j] > 1:\n logging.debug(\"Matrix detection failed. Matrix passed to function \" + str(matrix))\n return False\n logging.info(\"Matrix detected : \" + str(matrix))\n return True", "def multipleQueensAlongRow(board):\n\n (rows, columns) = (len(board), len(board[0]))\n\n for row in range(rows):\n if board[row].count(1) > 1:\n return True\n\n return False", "def multipleQueensAlongColumns(board):\n (rows, columns) = (len(board), len(board[0]))\n\n for column in range(columns):\n count = 0\n\n for row in range(rows):\n if board[row][column] == 1:\n count += 1\n if count > 1:\n return True\n\n return False", "def square_empty(column, row):\n if np.flipud(STATE)[row][column] == '-':\n return True\n else:\n return False", "def isComplete(grid):\n for row in range(0,9):\n for col in range(0,9):\n if grid[row][col]==0:\n return False\n return True", "def is_one_sol(self, row=0, col=0, sols=None):\n # For testing reasons, initialize with None\n if sols == None:\n sols = []\n\n # Uses an aliased list to maintain variance of number of solutions \n # found across all recursive calls, and returns when more than 1 is found\n if len(sols) > 1:\n return False\n\n # If end of puzzle is hit, the puzzle is solved, return True\n if row == self.sl-1 and col == self.sl: \n sols.append(True)\n return\n \n # If column is the side length, mvoe indices to next row\n if col == self.sl:\n return self.is_one_sol(row+1, 0, sols)\n\n # If square has a value already, move to next column\n if self.puzzle[row][col] != 0: \n return self.is_one_sol(row, col+1, sols)\n\n # If empty square, try each value in that square\n for value in range(1, self.sl+1): \n # If a valid value, recurse with that value and attempt to solve \n if self.valid_square(row, col, value): \n self.puzzle[row][col] = value\n self.is_one_sol(row, col+1, sols) \n self.puzzle[row][col] = 0\n\n if len(sols) > 1:\n return False\n\n # If exhausted all possibilities, return if only one solution found thus far\n return len(sols) == 1", "def any_possible_moves(grid):\n if get_empty_cells(grid):\n return True\n for row in grid:\n if any(row[i]==row[i+1] for i in range(len(row)-1)):\n return True\n for i,val in enumerate(grid[0]):\n column = get_column(grid, i)\n if any(column[i]==column[i+1] for i in range(len(column)-1)):\n return True\n return False", "def upper_diagonal_constraint(board: Board, row: int, col: int) -> bool:\n #move to first tile in diagonal\n row -= 1\n col -= 1\n #while still in board, test and move to next position return false if fails\n while row >= 0 and col >= 0:\n if board.is_queen(row=row, col=col):\n return False\n row -= 1\n col -= 1\n return True", "def is_complete(sudoku_board):\n BoardArray = sudoku_board.CurrentGameBoard\n size = len(BoardArray)\n subsquare = int(math.sqrt(size))\n\n #check each cell on the board for a 0, or if the value of the cell\n #is present elsewhere within the same row, column, or square\n for row in range(size):\n for col in range(size):\n if BoardArray[row][col]==0:\n return False\n for i in range(size):\n if ((BoardArray[row][i] == BoardArray[row][col]) and i != col):\n return False\n if ((BoardArray[i][col] == BoardArray[row][col]) and i != row):\n return False\n #determine which square the cell is in\n SquareRow = row // subsquare\n SquareCol = col // subsquare\n for i in range(subsquare):\n for j in range(subsquare):\n if((BoardArray[SquareRow*subsquare+i][SquareCol*subsquare+j]\n == BoardArray[row][col])\n and (SquareRow*subsquare + i != row)\n and (SquareCol*subsquare + j != col)):\n return False\n return True", "def square_check(self):\n return len(self.matrix) == len(self.matrix[0])", "def TestRow(SudokuGrid):\r\n for i in range(9):\r\n for j in range(8):\r\n for k in range(j+1,9):\r\n if SudokuGrid[i][j]==SudokuGrid[i][k]:\r\n return False\r\n return True", "def is_possible_grid(self,row,col,user_value):\n start_row = row - (row % 3)\n start_col = col - (col % 3)\n for x in range(3):\n for y in range(3):\n if self.arr[x+start_row][y+start_col] == user_value:\n logging.debug(f\"is_posssible_grid(): (False) x: {x} y: {y} s_row: {start_row} s_col: {start_col} arr[x+start_row][y+start_col]: {self.arr[x+start_row][y+start_col]} == {user_value}\")\n return False\n logging.debug(f\"is_posssible_grid(): (True) x: {x} y: {y} s_row: {start_row} s_col: {start_col} arr[x+start_row][y+start_col]: {self.arr[x+start_row][y+start_col]} != {user_value}\")\n return True", "def checkvalid(self,borad,row,col,n):\n # check the above column has 'Q'\n i=0\n while i!=row:\n if borad[i][col]=='Q':\n return False\n i+=1\n # check the left-top 135 and right-top 45\n i,j=row-1,col-1\n while i>=0 and j>=0:\n if borad[i][j]=='Q':\n return False\n i-=1\n j-=1\n \n i,j=row-1,col+1\n while i>=0 and j<n:\n if borad[i][j]=='Q':\n return False\n i-=1\n j+=1\n \n return True", "def check_winner(self, row, column, symbol):\r\n self.check_row(row, symbol)\r\n self.check_column(column, symbol)\r\n self.check_diag(row, column, symbol)", "def col_win(board):\n\tfor col in range(3):\n\t\tif board[0][col] != EMPTY and board[0][col] == board[1][col] == board[2][col]:\n\t\t\treturn True\n\treturn False", "def row0_invariant(self, target_col):\n \n # Returns False if zero tile is NOT in target position (0, target_col).\n if self.get_number(0, target_col) != 0:\n return False\n \n # Returns False if tiles to the right of target_col are NOT positioned correctly.\n if target_col < self.get_width():\n for col in range(target_col + 1, self.get_width()):\n if self.get_number(0, col) != col:\n return False\n \n # Returns False if tiles to the right of target_col in row 1 are NOT positioned correctly.\n for col in range(target_col, self.get_width()):\n if self.get_number(1, col) != col + self.get_width():\n return False\n\n # Returns False if tiles in rows 2 and below are NOT positioned correctly.\n if 1 < self.get_height():\n for row in range(2, self.get_height()):\n for col in range(self.get_width()):\n if self.get_number(row, col) != col + (row * self.get_width()):\n return False\n\n return True", "def test_cell_existence(board: list, i: int, j: int) -> bool:\n return not (i < 0 or i > len(board)-1 or j < 0 or j > len(board)-1)", "def is_four_in_row(board, row, column):\n sequence = [board[row][column] for j in range(4)]\n if is_subset(sequence, board[row]):\n return True\n else:\n return False", "def checkMatrix(i,j):\n # print(i,j)\n counter = Counter([grid[di][dj] for di in range(i,i+3) for dj in range(j,j+3)])\n for k in range(1,10):\n if counter[k] != 1:\n return False\n\n rows_sum = [sum(grid[k][j:j+3]) for k in range(i,i+3)]\n # print(rows_sum)\n if not all(m == 15 for m in rows_sum):\n return False\n cols_sum = [sum(grid[q][k] for q in range(i,i+3)) for k in range(j,j+3)]\n # print(cols_sum)\n if not all(m == 15 for m in cols_sum):\n return False\n dgl_sum = sum(grid[i+k][j+k] for k in range(3))\n anti_dgl_sum = sum(grid[i+k][j+2-k] for k in range(3))\n # print(dgl_sum, anti_dgl_sum)\n if dgl_sum != 15 or anti_dgl_sum != 15:\n return False\n return True", "def deadTest(self, board):\n if board[0] and board[4] and board[8]:\n return True\n if board[2] and board[4] and board[6]:\n return True\n for i in range(3):\n #check every row\n row = i * 3\n if board[row] and board[row+1] and board[row+2]:\n return True\n #check every column\n if board[i] and board[i+3] and board[i+6]:\n return True\n return False", "def deadTest(self, board):\n if board[0] and board[4] and board[8]:\n return True\n if board[2] and board[4] and board[6]:\n return True\n for i in range(3):\n #check every row\n row = i * 3\n if board[row] and board[row+1] and board[row+2]:\n return True\n #check every column\n if board[i] and board[i+3] and board[i+6]:\n return True\n return False", "def check_lost (grid):\r\n for row in range(4):\r\n for col in range(4):\r\n if grid[row][col]==0:\r\n return False\r\n if grid[0][0]==grid[0][1] or grid[0][0]==grid[1][0]:\r\n return False \r\n if grid[0][3]==grid[0][2] or grid[0][3]==grid[1][3]:\r\n return False \r\n if grid[3][0]==grid[2][0] or grid[3][0]==grid[3][1]:\r\n return False\r\n if grid[3][3]==grid[2][3] or grid[3][3]==grid[3][2]:\r\n return False \r\n if grid[0][1]==grid[0][2] or grid[0][1]==grid[1][1]:\r\n return False \r\n if grid[0][2]==grid[1][2]:\r\n return False \r\n if grid[1][1]==grid[2][1] or grid[1][1]==grid[1][2] or grid[1][1]==grid[1][0]:\r\n return False\r\n if grid[2][1]==grid[2][0] or grid[2][1]==grid[2][2] or grid[2][1]==grid[3][1]:\r\n return False \r\n if grid[1][0]==grid[2][0]:\r\n return False\r\n if grid[1][2]==grid[1][3] or grid[1][2]==grid[2][2]:\r\n return False\r\n if grid[2][2]==grid[2][3] or grid[2][2]==grid[3][2]:\r\n return False\r\n if grid[3][1]==grid[3][2]:\r\n return False\r\n else:\r\n return True", "def check_if_solved(self):\n for cell in self.board.values():\n if not cell.value:\n return False\n return True", "def check_row(grid, num, i, j):\n assert i < len(grid), 'Row is out of grid!'\n assert j < len(grid[0]), 'Column is out of grid!'\n\n found = False\n for col in range(len(grid[i])):\n if grid[i][col] == num and col != j:\n found = True\n return found", "def contains_row(matrix, row):\n return (matrix == row).all(axis=1).any()", "def check_grid(grid: List):\n for row in range(9):\n for col in range(9):\n if grid[row][col] == 0:\n return False\n return True", "def is_attacked(x, y, board ):\n # checking at row and column \n row = board[x]\n for val in row:\n if val == 1:\n return True \n \n col = column(board,y)\n for val in col:\n if val == 1:\n return True\n\n # getting the dimensions of board \n n = len(board[0]) \n \n #checking the diagonal values of x,y\n for i in range(n):\n for j in range(n):\n if i+j == x+y or i-j == x-y:\n if board[i][j] == 1:\n return True \n return False", "def valid_square(self, row, col, value):\n # Check that the row and col are valid puzzle indices\n if not ((0 <= row < self.sl) and (0 <= col < self.sl)):\n return False\n\n # Check that the square input is empty\n if self.puzzle[row][col] != 0:\n return False\n \n # Check that the value input is a valid puzzle value\n if not (1 <= value <= self.sl):\n if self.puzzle[row][col] == 0 and value == 0:\n return True\n return False\n \n # Check each row, column and block for same number\n for i in range(self.sl): \n if self.puzzle[row][i] == value: # Check each square in row for same value\n return False\n if self.puzzle[i][col] == value: # Check each square in col for same value\n return False\n \n # Check each square in box for same value, a little more complex index-wise\n r = self.bs*(row//self.bs) + (i//self.bs) \n c = self.bs*(col//self.bs) + (i%self.bs) \n if self.puzzle[r][c] == value:\n return False\n \n return True", "def check_if_column_full(self, board, x):\n for y in reversed(range(self.height // 80)):\n if board[x, 0] != 0:\n return True\n elif board[(x, y)] == 0:\n return False\n else:\n y -= y\n continue", "def valid_input(self, row, col):\n return ((row, col) not in self.marks and\n row <= WIDTH and row > 0 and\n col in COL_MAP)", "def isValidNQueensBoard(board):\n\n count = 0\n (rows, columns) = (len(board), len(board[0]))\n\n for row in range(rows):\n for column in range(columns):\n if board[row][column] == 1:\n count += 1\n\n if (count != rows) or (rows != columns):\n return False\n\n return True", "def is_valid_position(self, somerow, somecol):\n valid_row = 0 <= somerow <= (self.size-1)\n valid_col = 0 <= somecol <= (self.size-1)\n #need to use self.size - 1 because while we're thinking of an 8x8 chess board, the computer is thinking of a 0x7 chess board\n return valid_row and valid_col", "def solve_util(self, board, col):\n try:\n if col == self.N:\n self.print_sol(board)\n return True\n\n # Trying to place this queen in all rows one by one\n res = False\n for i in range(self.N):\n if self.is_safe(board, i, col):\n board[i][col] = 1\n res = self.solve_util(board, col + 1) or res\n if type(res) == dict:\n return res\n board[i][col] = 0 # Backtracking...\n\n # if queen cannot be placed in any row in this col, then alas\n # we return false..\n return res\n except KeyboardInterrupt:\n print('Keyboard Interrupted!')\n return self.Outputs", "def check_tile_availability(self, row, col):\n return self.board[row][col] == 0", "def any_possible_moves(grid):\n\tif get_empty_cells(grid):\n\t\treturn True\n\tfor row in grid:\n\t\tif any(row[i]==row[i+1] for i in range(len(row)-1)):\n\t\t\treturn True\n\tfor i,val in enumerate(grid[0]):\n\t\tcolumn = get_column(grid, i)\n\t\tif any(column[i]==column[i+1] for i in range(len(column)-1)):\n\t\t\treturn True\n\treturn False", "def valid_column(self, col: int) -> bool:\n\n return self.check_bounds(0, col) and self.grid[0][col] == \" \"", "def isfree(col,queens):\n if col in queens:\n return False\n elif any([ abs(col-col1)==len(queens)-index for index,col1 in enumerate(queens)]):\n #c[r]==c[j]; r-j==c[r]-c[j]; r-j==c[j]-c[r]\n # col is the colomn to check; len(queens) just be the row index of col, dont subtract 1\n return False\n else:\n return True", "def check_column(self, num, num_col):\n col = self.return_col(num_col)\n for board_num in col:\n if num == board_num:\n return True\n return False", "def check_pivot_row(self, row):\r\n all_zeros = True\r\n for i in range(self.SIZE):\r\n if self.matrix[row][i] != 0:\r\n all_zeros = False\r\n break\r\n\r\n if all_zeros:\r\n self.check_solvability(0, self.matrix[row][-1])", "def is_valid_board(self):\n total = sum(range(1, self.n+1))\n d = {x : [set([]), set([])] for x in range(1, self.n+1)}\n for row_index in range(self.n):\n for col_index in range(self.n):\n num = self.board[row_index][col_index]\n try:\n if row_index in d[num][0] or col_index in d[num][1]:\n print(\"Invalid solution.\")\n return\n except KeyError:\n print(\"Unsolved solution.\") # d[0]\n return\n\n d[num][0].add(row_index)\n d[num][1].add(col_index)\n print(\"Valid solution!\")", "def check_col(grid, num, i, j):\n assert i < len(grid), 'Row is out of grid!'\n assert j < len(grid[0]), 'Column is out of grid!' \n\n found = False\n for pos, row in enumerate(grid):\n if row[j] == num and pos != i:\n found = True\n return found", "def is_cell_row_valid(board, r, c):\n return all(board[r][i] <= board[r][c] for i in xrange(len(board[r])))", "def check_row(self, num, num_row):\n row = self.return_row(num_row)\n for board_num in row:\n if num == board_num:\n return True\n return False", "def is_valid_move(board, picked_column):\n if picked_column < 0 or picked_column >= len(board[0]):\n return False\n for row in range(len(board)):\n if board[row][picked_column] == 0:\n return True\n return False", "def isMine(self, row, col):\n return self.board[row, col] == 1", "def check_vertical(cls, board, disc, column, row):\n next_row = row + 1\n for row_idx in range(next_row, next_row + cls.MAX_COUNT):\n try:\n if board[column][row_idx] != disc:\n return False\n except IndexError:\n return False\n return True", "def sudoku_isready(A):\r\n x = isqrt(len(A))\r\n if x*x == len(A):\r\n return True\r\n return False", "def row0_invariant(self, target_col):\r\n \r\n solved_lower_right = False\r\n solved_grid = [[col + self.get_width() * row\r\n for col in range(self.get_width())]\r\n for row in range(self._height)]\r\n if self._grid[0][target_col] == 0:\r\n solved_lower_right = True\r\n \r\n for row in range(1 + 1, self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n for row in range(0, 1):\r\n for col in range(target_col + 1, self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n if self._grid[1][target_col] != solved_grid[1][target_col]:\r\n solved_lower_right = False\r\n \r\n return solved_lower_right", "def check_sudoku(board):\n # XXX XXX XXX\n # XXX XXX XXX\n # XXX XXX XXX\n\n # XXX XXX XXX\n # XXX XXX XXX\n # XXX XXX XXX\n\n # XXX XXX XXX\n # XXX XXX XXX\n # XXX XXX XXX\n\n # check the rows\n for i, row in enumerate(board):\n valid_row = [False] * (len(row) + 1)\n for j, val in enumerate(row):\n if valid_row[val]:\n return False\n if val != 0:\n valid_row[val] = True\n\n # check the columns\n for i, _ in enumerate(board[0]):\n valid_row = [False] * (len(board) + 1)\n for j, _ in enumerate(board):\n if valid_row[board[j][i]]:\n return False\n if val != 0:\n valid_row[board[j][i]] = True\n\n # check the blocks\n block_row = 0\n block_column= 0\n i = 0\n while i < len(board):\n j = 0\n while j < len(board):\n block_check = [False] * (len(board) + 1)\n while block_row < 3:\n block_column = 0\n while block_column < 3:\n if block_check[board[i+block_row][j+block_column]]:\n return False\n if board[i+block_row][j+block_column]:\n block_check[board[i+block_row][j+block_column]] = True\n block_column += 1\n block_row += 1\n block_row = 0\n j += 3\n i += 3\n\n return True", "def correctSudoku(sudoku):\r\n \r\n \"\"\" rows col \"\"\"\r\n for i in range (0, 9):\r\n \trowFalse = set(np.reshape(sudoku[i, :], (9))) != correctSet\r\n \tcolFalse = set(np.reshape(sudoku[:, i], (9))) != correctSet\r\n if rowFalse or colFalse:\r\n return False\r\n \r\n \"\"\" 3x3 \"\"\"\r\n for i in range(0, 3):\r\n for j in range(0, 3):\r\n threeTimesThree = sudoku[i * 3:(i + 1) * 3, j * 3:(j + 1)*3]\r\n if set(np.reshape(threeTimesThree, (9))) != correctSet:\r\n return False\r\n return True" ]
[ "0.7744749", "0.7643967", "0.7540741", "0.7405464", "0.7363063", "0.7331644", "0.73298305", "0.72720087", "0.7253807", "0.71994525", "0.7157826", "0.71566737", "0.71124315", "0.7103953", "0.7086645", "0.70836926", "0.7064152", "0.7060724", "0.7032539", "0.7031373", "0.7020518", "0.70196646", "0.69543797", "0.6903697", "0.68981075", "0.68981075", "0.68949056", "0.68874323", "0.6883886", "0.6859798", "0.683654", "0.68352544", "0.68277216", "0.68231404", "0.68104905", "0.6798057", "0.67913043", "0.67835206", "0.67741007", "0.67703724", "0.6769373", "0.67665374", "0.67625326", "0.6758065", "0.6756662", "0.67553556", "0.6754439", "0.674822", "0.6747716", "0.6722511", "0.67173487", "0.67171884", "0.6708221", "0.670349", "0.67028886", "0.66986275", "0.6689537", "0.6682906", "0.66777724", "0.66773677", "0.66717446", "0.66585535", "0.66575634", "0.665647", "0.66442984", "0.663648", "0.6626179", "0.661065", "0.65918607", "0.65741175", "0.65741175", "0.657372", "0.6572943", "0.65630615", "0.6558324", "0.65575373", "0.655663", "0.65538996", "0.65504634", "0.65363187", "0.6532078", "0.6531551", "0.6531173", "0.6529518", "0.65284544", "0.6519943", "0.65004146", "0.6499719", "0.6492153", "0.64892364", "0.6478167", "0.64754355", "0.6475301", "0.6469903", "0.64650005", "0.64586323", "0.6455337", "0.64425474", "0.6440563", "0.6424056" ]
0.77575016
0
Solve the tile in row zero at the specified column Updates puzzle and returns a move string
def solve_row0_tile(self, target_col): move_str = 'ld' self.update_puzzle(move_str) cur_row, cur_col = self.current_position(0, target_col) if cur_row == 0 and cur_col == target_col: return move_str else: move_str += self.position_tile(1, target_col-1, cur_row, cur_col) move_str += 'urdlurrdluldrruld' self.update_puzzle(move_str[2:]) return move_str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_col0_tile(self, target_row):\r\n moves_str = \"\"\r\n # move the zero tile from (i,0) to (i−1,1) \r\n # using the move string \"ur\"\r\n moves_str += \"ur\"\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str)\r\n # If the target tile is now at position (i,0)\r\n # you can simply move tile zero to the end of row i−1\r\n current_row, current_col = temp_grid.current_position(target_row, 0)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n if current_row == target_row and current_col == 0:\r\n rights = self._width - 1 - zero_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\" \r\n # However, if the target tile is not positioned at (i,0)\r\n # we suggest the following solution strategy:\r\n else:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ruldrdlurdluurddlu\"\r\n for dummy_r in range(self._width - 1):\r\n moves_str += \"r\"\r\n \r\n print \"solve_col0_tile\"\r\n print moves_str\r\n self.update_puzzle(moves_str)\r\n print self._grid\r\n return moves_str", "def solve_col0_tile(self, target_row):\r\n # check if curr_pos (i, 0) where i > 1\r\n assert self.lower_row_invariant(target_row, 0)\r\n move = \"ur\"\r\n self.update_puzzle(move)\r\n row, col = self.current_position(target_row, 0)\r\n if row == target_row and col == 0:\r\n move_to_target = (self.get_width() - 2) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n else:\r\n move_to_target = self.move_to_target(target_row - 1, 1, row, col)\r\n # add solver move to str\r\n move_to_target += \"ruldrdlurdluurddlu\" + (self.get_width() - 1) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1)\r\n return move", "def solve_puzzle(self):\n\n move_str = \"\"\n \n # Move zero tile to bottom right corner tile of puzzle.\n zero_pos = self.current_position(0,0) \n vert_dist = (self.get_height() - 1) - zero_pos[0]\n horiz_dist = (self.get_width() - 1) - zero_pos[1]\n move_str += ((\"d\" * vert_dist) + (\"r\" * horiz_dist))\n self.update_puzzle(move_str)\n \n # Solve lower rows\n if self.get_height() > 2:\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n \n # Solve top 2 rows\n if self.get_width() > 2:\n for col in range(self.get_width() - 1, 1, -1):\n move_str += self.solve_row1_tile(col)\n move_str += self.solve_row0_tile(col)\n \n # Solve 2x2\n move_str += self.solve_2x2()\n\n return move_str", "def solve_col0_tile(self, target_row):\n solved_row, solved_col = self.current_position(target_row, 0)\n movements = \"\"\n if solved_row == target_row - 1 and solved_col == 0:\n movements = \"u\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ur\")\n movements = \"ur\" + local_board.move_tile(\n target_row - 1, 1, target_row *\n self.get_width()) + \"ruldrdlurdluurddlu\"\n movements += \"r\" * (self.get_width() - 1)\n self.update_puzzle(movements)\n return movements", "def solve_puzzle(self):\n cur0_row, cur0_col = self.current_position(0, 0)\n move_str = 'd' * (self._height - cur0_row - 1) + 'r' * (self._width - cur0_col - 1)\n self.update_puzzle(move_str)\n for row in range(self._height-1, 1, -1):\n for col in range(self._width-1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n for col in range(self._width-1, 1, -1):\n assert self.row1_invariant(col)\n move_str += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n move_str += self.solve_row0_tile(col)\n move_str += self.solve_2x2()\n return move_str", "def solve_col0_tile(self, target_row):\n move_str = 'ur'\n self.update_puzzle(move_str)\n cur_row, cur_col = self.current_position(target_row, 0)\n if cur_row == target_row and cur_col == 0:\n move_str += 'r' * (self._width - 2)\n else:\n move_str += self.position_tile(target_row-1, 1, cur_row, cur_col)\n move_str += 'ruldrdlurdluurddlur'\n move_str += 'r' * (self._width - 2)\n self.update_puzzle(move_str[2:])\n return move_str", "def solve_col0_tile(self, target_row):\n # replace with your code\n string = ''\n target = self.current_position(target_row, 0)\n row_difference = target_row - target[0]\n col_difference = target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'r' * (col_difference - 1)\n if row_difference > 1:\n string += 'druld' * (row_difference - 1)\n string += 'rulld' * (col_difference - 1)\n string += 'ruldrdlurdluurddlu'\n elif col_difference == 0:\n string += 'rddlu' * (row_difference - 2)\n if row_difference > 1:\n string += 'rd'\n string += 'l'\n string += 'ruldrdlurdluurddlu'\n string += 'r' * (self._width - 1)\n print 'Col 0 Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row - 1, self._width -1), 'False string'\n return string", "def solve_col0_tile(self, target_row):\r\n # print '----------------------------------'\r\n # print 'SOLVING ZERO COLOUMN'\r\n assert self.lower_row_invariant(target_row,0), \"Invarian is False\"\r\n whole_move = ''\r\n #current_position = self.current_position(target_row, 0)\r\n current_row, current_col = self.current_position(target_row, 0)\r\n zero_row, zero_col = target_row, 0\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, 0)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ur'\r\n \r\n recomended_move = 'ur'\r\n whole_move += recomended_move\r\n zero_col += len(filter(lambda x: x=='r', recomended_move))\r\n zero_row -= len(filter(lambda x: x=='u', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n if self.current_position(target_row, 0) == \\\r\n (target_row, 0):\r\n # print 'TT stays in correct place after recomended move'\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n self.update_puzzle(zero_to_place_move)\r\n whole_move += zero_to_place_move\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move\r\n #move ZERO tile to the right\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n ### reposition TT to (target_row -1, 1)\r\n ### reposition ZERO tile to (target_row-1,0)\r\n \r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (current_col - zero_col) * 'r'\r\n path_for_zero = (zero_row - current_row) * 'u' + (current_col - zero_col) * 'r'\r\n whole_move += path_for_zero\r\n zero_col += len(filter(lambda x: x=='r', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n counter = 0\r\n while self.current_position(target_row, 0) != (target_row -1, 1) or \\\r\n (zero_row,zero_col) != (target_row-1,0):\r\n cyclic_moves = ''\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n # print 'We are in while loop'\r\n counter += 1\r\n if zero_row < current_row:\r\n # print 'Zero row under current TT '\r\n if self.current_position(target_row, 0) == (target_row -1, 1):\r\n # print 'TT is in the reccomended posiont (target_row -1, 1) \\n and we are movind zero to the left side of TT '\r\n cyclic_moves = 'ld'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n else:\r\n # print 'TT should be one tile down'\r\n cyclic_moves = 'lddru'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n #### Case 1 if ZERO located in the right of\r\n #### target tile (after it)\r\n if zero_col > current_col:\r\n # print ' Look in the up puzzle, zero in the right side'\r\n if current_col != 1:\r\n # print 'not under target place (target_row -1, 1)'\r\n cyclic_moves = 'dllur'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n whole_move += cyclic_moves\r\n # elif current_col == 1 and self._grid[zero_row+1][zero_col] < \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n elif current_col == 1: \r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self._grid[zero_row+1][zero_col] > \\\r\n self._grid[current_position[0]][current_position[1]]:\r\n print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n cyclic_moves = 'ul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n ### Solwing 3x2 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 3x2 puzzle formed recently'\r\n move3x2 = 'ruldrdlurdluurddlur'\r\n whole_move += move3x2\r\n zero_col -= len(filter(lambda x: x=='l', move3x2))\r\n zero_col += len(filter(lambda x: x=='r', move3x2))\r\n zero_row += len(filter(lambda x: x=='d', move3x2))\r\n zero_row -= len(filter(lambda x: x=='u', move3x2))\r\n self.update_puzzle(move3x2)\r\n # print 'Grid afret 3x2 solver move'\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n #####Moving ZERO to the (target_row - 1, n - 1) position where\r\n ##### 'n' is a grid height.\r\n # print self._width-1 - zero_col\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n whole_move += zero_to_place_move\r\n self.update_puzzle(zero_to_place_move)\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move", "def solve_row0_tile(self, target_col):\n solved_row, solved_col = self.current_position(0, target_col)\n movements = \"\"\n if solved_col == target_col - 1 and solved_row == 0:\n movements = \"ld\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ld\")\n movements = \"ld\" + local_board.move_tile(\n 1, target_col - 1, target_col) + \"urdlurrdluldrruld\"\n self.update_puzzle(movements)\n return movements", "def solve_puzzle(self):\r\n # initialize some values and start tile at bottom right corner\r\n col = self.get_width() - 1\r\n row = self.get_height() - 1\r\n move = \"\"\r\n curr_row, curr_col = self.current_position(0, 0)\r\n move_to_target = abs(curr_col - col) * \"r\" + abs(curr_row - row) * \"d\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n\r\n # apply solver methods\r\n for dummy_i in range(row, 1, -1):\r\n for dummy_j in range(col, 0, -1):\r\n move += self.solve_interior_tile(dummy_i, dummy_j)\r\n move += self.solve_col0_tile(dummy_i)\r\n \r\n for dummy_j in range(col, 1, -1):\r\n move += self.solve_row1_tile(dummy_j)\r\n move += self.solve_row0_tile(dummy_j)\r\n \r\n move += self.solve_2x2()\r\n return move", "def solve_row1_tile(self, target_col):\r\n # replace with your code\r\n whole_move = ''\r\n if self._grid[1][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA solve_row1_tile \"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[1][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (1 - zero_row) * 'd'\r\n # path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (1 - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (1 - zero_row) * 'd'\r\n #zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n #zero_row -= len(filter(lambda x: x=='u', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n\r\n assert self.row1_invariant(target_col), 'Some trouble in row1_invariant' \r\n \r\n #current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = 1, target_col\r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = (zero_row - self.current_position(1, target_col)[0]) * 'u' + (zero_col - self.current_position(1, target_col)[1]) * 'l'\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(1, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (1, target_col)\r\n counter = 0\r\n while self.current_position(1, target_col) != \\\r\n (1, target_col) or (zero_row, zero_col) != (0, target_col):\r\n # print 'Welcome to while loop!'\r\n cyclic_moves = ''\r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n #current_position = self.current_position(1, target_col)\r\n current_col = self.current_position(1, target_col)[1]\r\n counter +=1\r\n if self.current_position(1, target_col) == \\\r\n (1, target_col):\r\n # print 'ZERO not under TT'\r\n cyclic_moves = 'ur'\r\n whole_move += cyclic_moves\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif zero_col < current_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'ZERO tile located in the left side and down move is POSIBLE'\r\n if current_col != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif current_col != target_col and self._grid[zero_row+1][zero_col] > \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves)) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n # elif zero_col < current_col and self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n # # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n # if current_col != target_col:\r\n # # print 'not under the target place'\r\n # cyclic_moves = 'urrdl'\r\n # whole_move += cyclic_moves\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # elif current_col == target_col:\r\n # # print 'Target tile under target place'\r\n # cyclic_moves = 'urd'\r\n # whole_move += cyclic_moves\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #cyclic_moves +='ur'\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n return whole_move", "def solve_row1_tile(self, target_col):\r\n moves_str = \"\"\r\n current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ur\"\r\n self.update_puzzle(moves_str)\r\n print \"solve_row1_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_col0_tile(self, target_row):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, 0), \"tiles to right and below incorrectly ordered\"\n\n # Move zero tile from target position (target_row, 0) to (target_row - 1, 1).\n self.update_puzzle(\"ur\")\n\n move_str = \"\"\n \n # correct_tile's position is determined after moving zero tile \"ur\" \n # because its position relative to zero tile may have changed as a result.\n correct_tile = self.current_position(target_row, 0)\n \n # Moves to reposition correct_tile to target position.\n if self.get_number(correct_tile[0], correct_tile[1]) != self.get_number(target_row, 0):\n move_str += str(self.position_tile(target_row - 1, 1, correct_tile))\n move_str += str(\"ruldrdlurdluurddlur\")\n\n # Moves to reposition zero tile to end of column of target_row + 1.\n move_str += str(\"r\" * (self.get_width() - 2)) \n \n self.update_puzzle(move_str)\n\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1) \n \n move_str = \"ur\" + move_str\n return move_str", "def solve_row0_tile(self, target_col):\r\n # replace with your code\r\n assert self.row0_invariant(target_col), 'Some trouble in row0_invariant' \r\n whole_move = ''\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n # print self.get_number(current_row, current_col)\r\n zero_row, zero_col = 0, target_col\r\n # print 'Target tile position=',self.current_position(0, target_col)\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (0, target_col)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ld'\r\n \r\n recomended_move = 'ld'\r\n whole_move += recomended_move\r\n zero_col -= len(filter(lambda x: x=='l', recomended_move))\r\n zero_row += len(filter(lambda x: x=='d', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n #####Case when we check if recomended move solves the tile\r\n if self.current_position(0, target_col) == (0, target_col):\r\n # print 'Congrads recomended move made great deal !!'\r\n return whole_move\r\n #####If not, we position TT to (1, target_col-1),\r\n ##### and ZEOR to (1, target_col-2)\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (zero_row - current_row) * 'u'\r\n path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = path_up + path_side\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n \r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n counter = 0\r\n # print self.current_position(0, target_col) != (1, target_col-1)\r\n # print (zero_row,zero_col) != (1,target_col-2)\r\n ####POitioning TT and zero into positions that can be solvable\r\n while self.current_position(0, target_col) != (1, target_col-1) or \\\r\n (zero_row,zero_col) != (1,target_col-2):\r\n counter +=1\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n cyclic_moves = ''\r\n # print 'Aloha in the loop'\r\n if zero_col < current_col:\r\n # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n \r\n\r\n if current_col != target_col-1 and zero_row == 0:\r\n # print 'In the upper row and we can use down cycling'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n\r\n elif current_col != target_col-1:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col-1:\r\n # print 'Target tile under target place'\r\n # print 'DEBUG!!!!'\r\n # print self\r\n # print zero_col, target_col\r\n if zero_col == 0 and current_col == 1:\r\n cyclic_moves = 'druld'\r\n elif zero_row == 0:\r\n cyclic_moves = 'druld'\r\n \r\n else:\r\n cyclic_moves = 'urd'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n elif zero_row > current_row:\r\n # print 'DEBUG'\r\n # print 'TT under zero tile'\r\n cyclic_moves = 'uld'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n \r\n if counter > 10:\r\n # print 'COUNTER break'\r\n break\r\n\r\n #####Solving using pattern 2 x 3 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 2x3 puzzle formed recently'\r\n move2x3 = \"urdlurrdluldrruld\"\r\n whole_move += move2x3\r\n zero_col -= len(filter(lambda x: x=='l', move2x3))\r\n zero_col += len(filter(lambda x: x=='r', move2x3))\r\n zero_row += len(filter(lambda x: x=='d', move2x3))\r\n zero_row -= len(filter(lambda x: x=='u', move2x3))\r\n self.update_puzzle(move2x3)\r\n # print self\r\n assert self.row1_invariant(target_col-1), 'Some trouble in row1_invariant' \r\n return whole_move", "def solve_row0_tile(self, target_col):\r\n # move the zero tile from position (0,j) to (1,j−1) \r\n # using the move string \"ld\" \r\n moves_str = \"\"\r\n moves_str += \"ld\"\r\n # check whether target tile is at position (0,j).\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str) \r\n current_row, current_col = temp_grid.current_position(0, target_col)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n \r\n # If target tile is not at position (0,j).\r\n # reposition the target tile to position (1,j−1) \r\n # with tile zero in position (1,j−2).\r\n if current_row != 0 or current_col != target_col:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"urdlurrdluldrruld\"\r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_row0_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def position_tile(self, zero_row, zero_col, correct_tile):\n \n ans = \"\" \n vert_dist = abs(zero_row - correct_tile[0])\n horiz_dist = abs(zero_col - correct_tile[1])\n \n # Updates ans, the move string, based the correct_tile's\n # position relative to the target position.\n \n # SAME ROW\n if vert_dist == 0:\n # Left of target\n if zero_col > correct_tile[1]:\n # Moves zero tile left to correct_tile's position.\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right to target position,\n # and moves zero tile to left of target position.\n if horiz_dist > 1:\n ans += str(\"urrdl\" * (horiz_dist - 1))\n # Right of target\n else:\n # Moves zero tile right to correct_tile's position.\n ans += str(\"r\" * horiz_dist)\n # Moves correct_tile left to target position,\n # and moves zero tile to left of target position.\n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ulld\")\n \n # SAME COL\n elif horiz_dist == 0:\n # Moves zero tile up to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n # Moves correct_tile down to target position, \n # and moves zero tile to left of target position.\n if vert_dist > 1:\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n # UPPER LEFT\n elif correct_tile[1] < zero_col:\n # Moves zero tile up and left to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right and down to target position,\n # and moves zero tile to left of target position.\n ans += str(\"drrul\" * (horiz_dist - 1))\n ans += str(\"druld\" * vert_dist)\n\n # UPPER RIGHT\n else:\n # Moves zero tile up and right to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"r\" * horiz_dist)\n # This if-elif-else statement moves correct_tile left and down to target position.\n # If statement is only used when target position is in row 2.\n if vert_dist == 1 and correct_tile[0] == 0:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dluld\")\n # Elif statement used when correct_tile is in the row above target position.\n elif vert_dist == 1: \n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ullddruld\")\n # Else statement used when correct_tile is 1+ rows above target position.\n else:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dlu\")\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n return ans", "def solve_row0_tile(self, target_col):\n # replace with your code\n string = ''\n assert self.row0_invariant(target_col), 'False precondition'\n target = self.current_position(0, target_col)\n row_difference = target[0]\n col_difference = target_col - target[1]\n if row_difference == 0:\n if col_difference == 1:\n string += 'ld'\n elif col_difference > 1:\n string += 'l' * col_difference\n string += 'drrul' * (col_difference - 2)\n string += 'druld'\n string += 'urdlurrdluldrruld'\n elif row_difference == 1:\n if col_difference == 1:\n string += 'lld'\n string += 'urdlurrdluldrruld'\n elif col_difference > 1:\n string += 'ld'\n string += 'l' * (col_difference - 1)\n string += 'urrdl' * (col_difference - 2)\n string += 'urdlurrdluldrruld'\n print 'Row 0 Path', string\n self.update_puzzle(string)\n assert self.row1_invariant(target_col - 1), 'False string'\n return string", "def solve_row0_tile(self, target_col):\n assert target_col > 1, \"target_col must be > 1\"\n assert self.row0_invariant(target_col), \"tiles to right and below incorrectly ordered\"\n \n # Move zero tile from target position (0, target_col) to (1, target_col - 1) \n self.update_puzzle(\"ld\")\n \n move_str = \"\"\n\n # correct_tile's position is determined after moving zero tile \"ld\"\n # because its position relative to zero tile may have changed as a result.\n correct_tile = self.current_position(0, target_col) \n \n # Moves to reposition correct_tile to target position, and\n # the zero tile to (1, target_col - 1).\n if self.get_number(correct_tile[0], correct_tile[1]) != self.get_number(0, target_col):\n move_str += str(self.position_tile(1, target_col - 1, correct_tile))\n move_str += str(\"urdlurrdluldrruld\")\n\n self.update_puzzle(move_str)\n\n assert self.row1_invariant(target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n move_str = \"ld\" + move_str\n return move_str", "def solve_puzzle(self):\n # replace with your code\n string = ''\n width = self._width\n height = self._height\n zero = self.current_position(0, 0)\n row_to_zero = height - 1 - zero[0]\n col_to_zero = width - 1 - zero[1]\n string += 'r' * col_to_zero\n string += 'd' * row_to_zero\n self.update_puzzle(string)\n if width == 2 and height == 2:\n string += self.solve_2x2()\n elif width > 2 and height == 2:\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n elif width == 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n string += self.solve_2x2()\n elif width > 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n #for row in range(height - 1, -1, -1):\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n return string", "def solve_puzzle(self):\n # move zero tile to the lower right corner\n row, col = self.current_position(0, 0)\n movements = \"d\" * (self.get_height() - 1 - row) + \"r\" * (\n self.get_width() - 1 - col)\n self.update_puzzle(movements)\n # solve rowid from 2 by row\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col == 0:\n movements += self.solve_col0_tile(row)\n assert self.lower_row_invariant(row - 1,\n self.get_width() - 1)\n else:\n movements += self.solve_interior_tile(row, col)\n assert self.lower_row_invariant(row, col - 1)\n # solve the uppermost two rows by column\n for col in range(self.get_width() - 1, 1, -1):\n for row in range(1, -1, -1):\n if row == 0:\n assert self.row0_invariant(col)\n movements += self.solve_row0_tile(col)\n assert self.row1_invariant(col - 1)\n else:\n assert self.row1_invariant(col)\n movements += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n movements += self.solve_2x2()\n return movements", "def solve_row0_tile(self, target_col):\r\n assert self.row0_invariant(target_col)\r\n move = \"ld\"\r\n self.update_puzzle(move)\r\n \r\n row, col = self.current_position(0, target_col)\r\n if row == 0 and col == target_col:\r\n return move\r\n else:\r\n move_to_target = self.move_to_target(1, target_col - 1, row, col)\r\n # 2x3 puzzle solver\r\n move_to_target += \"urdlurrdluldrruld\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n return move", "def solve_row1_tile(self, target_col):\n cur_row, cur_col = self.current_position(1, target_col)\n move_str = self.position_tile(1, target_col, cur_row, cur_col, need_ld=False)\n self.update_puzzle(move_str)\n return move_str", "def solve_row1_tile(self, target_col):\n assert target_col > 1, \"target_col must be > 1\"\n assert self.row1_invariant(target_col), \"tiles to right and below incorrectly ordered\"\n\n # Moves correct_tile to the target position (1, target_col),\n # and the zero tile above the target position at (0, target_col). \n correct_tile = self.current_position(1, target_col)\n move_str = self.position_tile(1, target_col, correct_tile) \n move_str += \"ur\"\n self.update_puzzle(move_str)\n\n assert self.row0_invariant(target_col)\n \n return move_str", "def solve_puzzle(self):\r\n moves_str = \"\"\r\n # move zero to the most botton right\r\n zero_row, zero_col = self.current_position(0, 0)\r\n downs = self._height - 1 - zero_row\r\n rights = self._width - 1 - zero_col\r\n for dummy_d in range(downs):\r\n moves_str += \"d\"\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n self.update_puzzle(moves_str)\r\n # Solve the bottom m−2 rows of the puzzle \r\n # in a row by row manner from bottom to top. \r\n # Each individual row will be solved in a right to left order.\r\n if self._height > 2 and self._width > 2:\r\n for row in range(self._height - 1, 1, -1):\r\n for col in range(self._width - 1, 0, -1):\r\n assert self.lower_row_invariant(row, col)\r\n moves_str += self.solve_interior_tile(row, col)\r\n assert self.lower_row_invariant(row, col - 1)\r\n assert self.lower_row_invariant(row, 0)\r\n moves_str += self.solve_col0_tile(row)\r\n assert self.lower_row_invariant(row - 1, self._width - 1)\r\n # Solve the rightmost n−2 columns of the top two rows\r\n # in a right to left order). \r\n # Each column consists of two unsolved positions \r\n # and will be solved in a bottom to top order.\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n \r\n elif self._height <=2 and self._width > 2:\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n elif self._height <= 2 and self._width <= 2:\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n #elif self._height > 2 and self._width <= 2:\r\n \r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_interior_tile(self, target_row, target_col):\r\n whole_move = ''\r\n # replace with your code\r\n if self._grid[target_row][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA\"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[target_row][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (target_row - zero_row) * 'd'\r\n #path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd'\r\n zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n assert self.lower_row_invariant(target_row, target_col), \"Invarian is False\"\r\n \r\n #current_position = self.current_position(target_row, target_col)\r\n #current_row, current_col = self.current_position(target_row, target_col)\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, target_col)\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (target_row - self.current_position(target_row, target_col)[0]) * 'u'\r\n zero_row = target_row - len(path_up)\r\n if target_col < self.current_position(target_row, target_col)[1]: # Right move\r\n path_side = (self.current_position(target_row, target_col)[1] - target_col) * 'r'\r\n zero_col = target_col + len(path_side)\r\n else: # Left move\r\n path_side = (target_col - self.current_position(target_row, target_col)[1]) * 'l'\r\n zero_col = target_col - len(path_side)\r\n \r\n #path_for_zero = path_up + path_side\r\n # print '------------------------------------------'\r\n # print 'Move to ZERO =', path_for_zero\r\n \r\n self.update_puzzle(path_up + path_side)\r\n \r\n # print 'Grid after move:'\r\n # print self\r\n # current_position = self.current_position(target_row, target_col) \r\n # current_row, current_col = current_position\r\n # print 'Updated Target tile position=',current_position\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, target_col)\r\n # print '-----------------------------------------'\r\n \r\n\r\n ###### New moves after moving ZERO tile into target tile\r\n # counter = 0\r\n whole_move += path_up + path_side\r\n while self.current_position(target_row, target_col) != \\\r\n (target_row, target_col) or zero_col != target_col - 1:\r\n # tt_in_home = self.current_position(target_row, target_col) == \\\r\n # (target_row, target_col)\r\n\r\n cyclic_moves = ''\r\n # counter += 1\r\n #current_position = self.current_position(target_row, target_col) \r\n #current_col = self.current_position(target_row, target_col)[1]\r\n # print 'Zero coloumn', zero_col, '== Target coloumn', target_col\r\n # print zero_col == target_col \r\n \r\n #### Case 1 if ZERO located in ther right of\r\n #### target tile (after it)\r\n if zero_col > self.current_position(target_row, target_col)[1]:\r\n # print ' Look in the up puzzle, zero on the right side'\r\n # if self.current_position(target_row, target_col)[1] != target_col:\r\n # # print 'not under target place'\r\n # cyclic_moves = 'dllur'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n if self.current_position(target_row, target_col)[1] == target_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # elif self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # # print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n # cyclic_moves = 'ul'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #### Case 2 if ZERO located under the target tile, and both\r\n #### of them located under the target position of the target tile\r\n elif zero_col == self.current_position(target_row, target_col)[1] and zero_col == target_col:\r\n # print 'Both under the target place'\r\n # print 'TT in home=', tt_in_home\r\n if self.current_position(target_row, target_col) == \\\r\n (target_row, target_col):\r\n cyclic_moves = 'ld'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n else:\r\n cyclic_moves = 'lddru'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n elif zero_col < self.current_position(target_row, target_col)[1]:\r\n # print 'ZERO tile located in the left side'\r\n if self.current_position(target_row, target_col)[1] != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self.current_position(target_row, target_col)[1] == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n #zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loot counter =',counter\r\n whole_move += cyclic_moves\r\n # if counter > 12:\r\n # break\r\n # print 'Tile is solved with move '+ whole_move\r\n assert self.lower_row_invariant(target_row, target_col-1), \"Invarian is False\"\r\n return whole_move", "def solve_2x2(self):\n # move zero tile to the left-up corner\n self.update_puzzle(\"lu\")\n movements = \"rdlu\"\n for _ in range(3):\n self.update_puzzle(\"rdlu\")\n if self.row0_invariant(0):\n return \"lu\" + movements\n movements += \"rdlu\"\n # the final 2x2 cannot be solved\n return \"\"", "def solve_row1_tile(self, target_col):\r\n row, col = self.current_position(1, target_col)\r\n move = self.move_to_target(1, target_col, row, col)\r\n # for next move\r\n move += \"ur\"\r\n \r\n self.update_puzzle(move)\r\n return move", "def solve_row1_tile(self, target_col):\n # replace with your code\n print target_col\n assert self.row1_invariant(target_col), 'False precondition'\n string = ''\n target = self.current_position(1, target_col)\n row_difference = 1 - target[0]\n col_difference = target_col - target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0:\n string += 'urrdl' * (col_difference - 1)\n string += 'ur'\n elif row_difference == 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n print 'Row 1 Path', string\n self.update_puzzle(string)\n assert self.row0_invariant(target_col), 'False string'\n return string", "def move_tile(self, target_row, target_col, val):\n # a little bit twisted here for the use of both solve_interior_tile and solve_col0_tile\n solved_row, solved_col = self.current_position(0, val)\n movements = \"\"\n if solved_row == target_row and solved_col == target_col:\n return \"\"\n if solved_row == target_row:\n if target_col > solved_col:\n movements = \"l\" * (target_col - solved_col) + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"r\" * (solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ulld\"\n elif solved_col == target_col:\n movements = \"u\" * (target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n elif solved_col < target_col:\n if solved_col == 0:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"rddlu\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n elif solved_col > target_col:\n if solved_row == 0:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"dllur\" * (\n solved_col - target_col - 1) + \"dlu\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n else:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ullddru\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n return movements", "def solve_interior_tile(self, target_row, target_col):\r\n assert self._grid[target_row][target_col] == 0\r\n moves_str = \"\"\r\n target_current_row, target_current_col = self.current_position(target_row, target_col)\r\n \r\n moves_str += self.position_tile(target_row, target_col, target_current_row, target_current_col) \r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_interior_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_row1_tile(self, target_col):\n movements = self.solve_interior_tile(1, target_col)\n self.update_puzzle(\"ur\")\n return movements + \"ur\"", "def solve_interior_tile(self, target_row, target_col):\n cur_row, cur_col = self.current_position(target_row, target_col)\n move_str = self.position_tile(target_row, target_col, cur_row, cur_col)\n self.update_puzzle(move_str)\n return move_str", "def solve_interior_tile(self, target_row, target_col):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, target_col), \"tiles to right and below incorrectly ordered\"\n \n correct_tile = self.current_position(target_row, target_col) \n move_str = self.position_tile(target_row, target_col, correct_tile) \n self.update_puzzle(move_str)\n \n assert self.lower_row_invariant(target_row, target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n return move_str", "def solve_2x2(self):\n cur_row, cur_col = self.current_position(0, 0)\n move_str = 'u' * cur_row + 'l' * cur_col\n self.update_puzzle(move_str)\n if self.check_2x2_solved():\n return move_str\n else:\n while not self.check_2x2_solved():\n move_str += 'rdlu'\n self.update_puzzle('rdlu')\n return move_str", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def solve(self, board: List[List[str]]) -> None:\n n = len(board)\n if n == 0:\n return\n m = len(board[0])\n for i in range(m):\n self.traverse(board, 0, i, n, m)\n self.traverse(board, n - 1, i, n, m)\n for i in range(n):\n self.traverse(board, i, 0, n, m)\n self.traverse(board, i, m - 1, n, m)\n for i in range(n):\n for j in range(m):\n board[i][j] = 'X' if board[i][j] == 'O' else board[i][j]\n for i in range(n):\n for j in range(m):\n board[i][j] = 'O' if board[i][j] == '_' else board[i][j]", "def solve_2x2(self):\n \n assert self.get_number(1,1) == 0, \"zero tile should be at row 1, col 1\"\n assert self.row1_invariant(1), \"tiles to right and below incorrectly ordered\"\n \n # Moves the zero tile to (0,0).\n self.update_puzzle(\"lu\")\n\n # Repositions the upper left 2x2 part up to 3 times, \n # each time checking whether the puzzle is solved.\n rotation_num = 0\n if self.row0_invariant(0) == False:\n for dummy_rotation in range(3):\n while self.row0_invariant(0) == False:\n rotation_num += 1\n self.update_puzzle(\"rdlu\")\n\n assert self.row0_invariant(0), \"tiles to right and below incorrectly ordered\"\n \n move_str = \"lu\" + (\"rdlu\" * rotation_num) \n return move_str", "def solve_interior_tile(self, target_row, target_col):\n # replace with your code\n string = ''\n target = self.current_position(target_row, target_col)\n row_difference = target_row - target[0]\n #print 'Row difference', row_difference\n col_difference = target_col - target[1]\n #print 'Col difference', col_difference\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0 and col_difference > 1:\n string += 'urrdl' * (col_difference - 1)\n if row_difference == 1:\n string += 'urrdl' * (col_difference - 1)\n string += 'dru'\n if row_difference > 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n if row_difference == 1:\n string += 'ulldr' * (col_difference - 1)\n string += 'ullddru'\n if row_difference > 1:\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n string += 'lddru' * (row_difference - 1)\n if row_difference > 0:\n string += 'ld'\n print 'Interior Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row, target_col - 1), 'False string'\n return string", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction\n for row in self._grid:\n print row\n print", "def solve_2x2(self):\r\n # replace with your code\r\n assert self.row1_invariant(1), '2x2 Dont pass row1_invariant(1)'\r\n whole_move = ''\r\n current_position = self.current_position(0, 0)\r\n # print 'Zero position =', current_position\r\n counter = 0\r\n \r\n\r\n \r\n # if current_position == (0,0):\r\n # print (0,0)\r\n # move_to_00 = 'rdlu' \r\n if current_position == (0,1):\r\n # print (0,1)\r\n move_to_00 = 'l'\r\n if current_position == (1,0):\r\n # print (1,0)\r\n move_to_00 = 'u'\r\n if current_position == (1,1):\r\n # print (1,1)\r\n move_to_00 = 'ul'\r\n whole_move += move_to_00\r\n self.update_puzzle(move_to_00)\r\n # print self\r\n # print self.get_number(1,1) < self.get_number(1,0)\r\n \r\n while self.get_number(0,0) != 0 or self.get_number(0,1) != 1:\r\n \r\n # print 'Aloha in loop!'\r\n counter +=1\r\n move = 'rdlu'\r\n whole_move += move\r\n self.update_puzzle(move)\r\n # print self\r\n if counter >5:\r\n break\r\n return whole_move", "def solve(self, board) -> None:\n if board == [[]] or board == []:\n return\n\n r, c = len(board), len(board[0])\n\n from collections import deque\n queue = deque()\n for i in range(r):\n for j in range(c):\n if (i == 0 or i == r - 1 or j == 0 or j == c - 1) and board[i][j] == 'O':\n queue.append([i, j])\n board[i][j] = 'M'\n\n while queue:\n i, j = queue.popleft()\n for x, y in ((i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)):\n if 0 <= x <= r - 1 and 0 <= y <= c - 1 and board[x][y] == 'O':\n board[x][y] = 'M'\n queue.append([x, y])\n\n for i in range(r):\n for j in range(c):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == 'M':\n board[i][j] = 'O'", "def human_go(self, board):\r\n coord_pattern = re.compile(\"[0-{}]$\".format(board.shape[1]))\r\n print(\"Enter Column and press enter.\")\r\n input_str = input(\"(from 0-6)\\n\")\r\n if not coord_pattern.match(input_str):\r\n print(\"That is not in the right format, please try again...\")\r\n return self.human_go(board)\r\n else:\r\n col = int(input_str)\r\n if board[0][col] != 0:\r\n print(\"That column is already full, please try again\")\r\n self.human_go()\r\n else:\r\n for row in board[::-1]:\r\n if row[col] == 0:\r\n row[col] = -1\r\n return board", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][\n zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][\n zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def solve(self, board: List[List[str]]) -> None:\n if board == [] or board == [[]]: # corner case\n return\n\n r, c = len(board), len(board[0])\n\n def dfs(i, j): # visited i, j neighbors and change o to M\n if i < 0 or i > r - 1 or j < 0 or j > c - 1 or board[i][j] == 'X' or board[i][j] == 'M':\n return\n\n board[i][j] = 'M'\n dfs(i - 1, j)\n dfs(i + 1, j)\n dfs(i, j - 1)\n dfs(i, j + 1)\n\n for i in range(r):\n for j in range(c):\n if (i == 0 or i == r - 1 or j == 0 or j == c - 1) and board[i][j] == 'O':\n dfs(i, j)\n\n for i in range(r):\n for j in range(c):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == 'M':\n board[i][j] = 'O'", "def solve(self, board: List[List[str]]) -> None:\n if board is None or len(board) == 0:\n return \n row, col = len(board), len(board[0])\n for i in range(row):\n self.dfs(board, i, 0)\n self.dfs(board, i, col - 1)\n for j in range(col):\n self.dfs(board, 0, j)\n self.dfs(board, row-1, j)\n for i in range(row):\n for j in range(col):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n if board[i][j] == '-':\n board[i][j] = 'O'", "def position_tile(self, target_row, target_col, current_row, current_col):\r\n moves_str = \"\"\r\n # current target is on the upper of 0\r\n if current_col == target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the left of 0\r\n elif current_row == target_row and current_col < target_col:\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n # current target is on the upperleft of 0\r\n elif current_row < target_row and current_col < target_col:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n moves_str += \"dru\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the upperright of 0\r\n elif current_col > target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"dlu\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the right of 0\r\n elif current_col > target_col and current_row == target_row:\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"ulld\"\r\n return moves_str", "def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n m, n = len(board), len(board[0])\n\n def dfs(x, y):\n if not 0 <= x < m or not 0 <= y < n or board[x][y] != \"O\":\n return\n direction = [[1, 0], [0, 1], [-1, 0], [0, -1]]\n board[x][y] = \"Y\"\n for d in direction:\n dfs(x+d[0], y+d[1])\n\n for i in range(m):\n dfs(i, 0)\n dfs(i, n-1)\n for i in range(1, n-1):\n dfs(0, i)\n dfs(m-1, i)\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == \"O\":\n board[i][j] = \"X\"\n elif board[i][j] == \"Y\":\n board[i][j] = \"O\"", "def solve_2x2(self):\r\n moves_str = \"\"\r\n # move zero to the most upper left\r\n zero_row, zero_col = self.current_position(0, 0)\r\n ups = zero_row - 0\r\n lefts = zero_col - 0\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n \r\n # get zero, one, two, three positions\r\n self.update_puzzle(moves_str)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n counter = 0\r\n while counter <= 3 and \\\r\n (zero_row != 0 or zero_col != 0 or \\\r\n one_row!= 0 or one_col != 1 or \\\r\n two_row != 1 or two_col != 0 or \\\r\n three_row != 1 or three_col != 1):\r\n move = CIRCLES[\"RIGHT_CIRCLES\"]\r\n moves_str += move\r\n self.update_puzzle(move)\r\n counter += 1\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n \r\n print \"solve_2x2\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve(self, board: List[List[str]]) -> None:\n rows = len(board)\n cols = len(board[0])\n\n def explore(i, j):\n if i < 0 or i >= rows:\n return\n if j < 0 or j >= cols:\n return\n if board[i][j] != \"O\":\n return\n board[i][j] = \"Y\"\n explore(i + 1, j)\n explore(i - 1, j)\n explore(i, j + 1)\n explore(i, j - 1)\n\n for i in [0, rows - 1]:\n for j in range(cols):\n explore(i, j)\n\n for j in [0, cols - 1]:\n for i in range(rows):\n explore(i, j)\n\n for i in range(rows):\n for j in range(cols):\n if board[i][j] == \"Y\":\n board[i][j] = \"O\"\n elif board[i][j] == \"O\":\n board[i][j] = \"X\"", "def solve_2x2(self):\r\n assert self.row1_invariant(1)\r\n pos_1_0 = self.get_number(1, 0)\r\n pos_0_0 = self.get_number(0, 0)\r\n pos_0_1 = self.get_number(0, 1)\r\n # create grid and solve individual cases\r\n grid = [pos_1_0, pos_0_0, pos_0_1]\r\n if grid == [self.get_width(), 1, self.get_width() + 1]:\r\n move = \"ul\"\r\n elif grid == [1, self.get_width() + 1, self.get_width()]:\r\n move = \"lurdlu\"\r\n elif grid == [self.get_width() + 1, self.get_width(), 1]:\r\n move = \"lu\"\r\n self.update_puzzle(move)\r\n return move", "def solve(self, board: List[List[str]]) -> None:\r\n if not board:\r\n return\r\n m, n = len(board), len(board[0])\r\n board_filter = lambda p: 0 <= p[0] < m and 0 <= p[1] < n and board[p[0]][p[1]] == 'O'\r\n queue = list(filter(board_filter, [(x, y) for r in range(max(m, n)) \r\n for x, y in ((r, 0), (r, n-1), (0, r), (m-1, r))]))\r\n while queue:\r\n x, y = queue.pop()\r\n board[x][y] = 'W'\r\n queue.extend(list(filter(board_filter, ((x-1, y), (x+1, y), (x, y-1), (x, y+1)))))\r\n for row in board:\r\n for i, c in enumerate(row):\r\n row[i] = 'XO'[c=='W']", "def solve(self, board: List[List[str]]) -> None:", "def solve(self, board: List[List[str]]) -> None:\n if len(board) is 0: return\n width = len(board[0]) - 1\n height = len(board) - 1\n def help(x: int, y: int):\n if x < 0 or x > height or y < 0 or y > width:\n return\n if board[x][y] is \"O\":\n board[x][y] = \"g\"\n help(x - 1, y)\n help(x, y - 1)\n help(x + 1, y)\n help(x, y + 1)\n\n for i in range(width + 1):\n if board[0][i] is \"O\":\n help(0, i)\n if board[height][i] is \"O\":\n help(height, i)\n for i in range(1, height):\n if board[i][0] is \"O\":\n help(i, 0)\n if board[i][width] is \"O\":\n help(i, width)\n\n print(board)\n for i in range(width + 1):\n for j in range(height + 1):\n if board[j][i] is \"O\":\n board[j][i] = \"X\"\n elif board[j][i] is \"g\":\n board[j][i] = \"O\"", "def solve(self, board):\r\n if not board or not board[0]:\r\n return\r\n \r\n self.m = len(board)\r\n self.n = len(board[0])\r\n boarder = []\r\n \r\n # Collecting all the 'O' on the boarder\r\n for i in range(self.m):\r\n if board[i][0] == 'O':\r\n boarder.append([i, 0])\r\n if board[i][self.n-1] == 'O':\r\n boarder.append([i, self.n-1])\r\n for j in range(self.n):\r\n if board[0][j] == 'O':\r\n boarder.append([0, j])\r\n if board[self.m-1][j] == 'O':\r\n boarder.append([self.m-1, j])\r\n \r\n for row, col in boarder:\r\n self.BFS(board, row, col)\r\n \r\n for row in range(self.m):\r\n for col in range(self.n):\r\n if board[row][col] == 'O':\r\n board[row][col] = 'X'\r\n elif board[row][col] == 'E':\r\n board[row][col] = 'O'\r\n print(board)", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0:\n return\n self.h = len(board)\n self.w = len(board[0])\n self.board = board\n for i in range(self.h):\n for j in range(self.w):\n if i == 0 or i == self.h-1 or j == 0 or j == self.w-1:\n #print (i,j)\n self.dfs((i,j))\n for i in range(self.h):\n for j in range(self.w):\n if self.board[i][j]==\"O\":\n self.board[i][j]=\"X\"\n for i in range(self.h):\n for j in range(self.w):\n if self.board[i][j]==\"#\":\n self.board[i][j]=\"O\"", "def solve(self, board: List[List[str]]) -> None:\n def dfs(board, i, j):\n m = len(board)\n n = len(board[0])\n if i < 0 or i >= m or j < 0 or j >= n: return\n\n if board[i][j] != 'O': return\n\n board[i][j] = '#'\n [dfs(board, i+di, j+dj) for di, dj in [(0, 1), (1, 0), (0, -1), (-1, 0)]]\n\n if len(board) == 0: return\n m = len(board)\n n = len(board[0])\n\n for i in range(m):\n dfs(board, i, 0)\n dfs(board, i, n-1)\n\n for j in range(n):\n dfs(board, 0, j)\n dfs(board, m-1, j)\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == '#':\n board[i][j] = 'O'", "def solve(self, board: List[List[str]]) -> None:\n if len(board) <= 2 or len(board[0]) <= 2:\n return\n\n self.nRow, self.nCol = len(board), len(board[0])\n\n def helper(kr, kc):\n board[kr][kc] = '.'\n kr > 0 and board[kr - 1][kc] == 'O' and helper(kr - 1, kc)\n kr < self.nRow - 1 and board[kr + 1][kc] == 'O' and helper(kr + 1, kc)\n kc > 0 and board[kr][kc - 1] == 'O' and helper(kr, kc - 1)\n kc < self.nCol - 1 and board[kr][kc + 1] == 'O' and helper(kr, kc + 1)\n\n for kr in [0, self.nRow - 1]:\n for kc in range(self.nCol):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n for kc in [0, self.nCol - 1]:\n for kr in range(self.nRow):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n\n for kr in range(self.nRow):\n for kc in range(self.nCol):\n if board[kr][kc] == 'O':\n board[kr][kc] = 'X'\n elif board[kr][kc] == '.':\n board[kr][kc] = 'O'\n\n return", "def solve(self, board):\n def dfs(board, r, c):\n if r < 0 or c < 0 or r > rows - 1 or c > cols - 1 or board[r][c] == 'X' or board[r][c] == '#':\n return\n board[r][c] = '#'\n dfs(board, r - 1, c)\n dfs(board, r + 1, c)\n dfs(board, r, c - 1)\n dfs(board, r, c + 1)\n\n if len(board) == 0:\n return;\n rows = len(board)\n cols = len(board[0])\n for i in range(rows):\n for j in range(cols):\n if (i == 0 or j == 0 or i == rows - 1 or j == cols - 1) and board[i][j] == 'O':\n dfs(board, i, j)\n for i in range(rows):\n for j in range(cols):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == '#':\n board[i][j] = 'O'", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0: return\n\n m = len(board)\n n = len(board[0])\n\n uf = UnionFind(m * n + 1)\n dummy = m * n\n\n # connect 'O' at first and last col with dummy\n for i in range(m):\n if board[i][0] == 'O':\n uf.union(dummy, i * n)\n if board[i][-1] == 'O':\n uf.union(dummy, i * n + n - 1)\n\n # connect 'O' at first and last row with dummy\n for j in range(n):\n if board[0][j] == 'O':\n uf.union(dummy, j)\n if board[-1][j] == 'O':\n uf.union(dummy, n * (m-1) + j)\n\n d = [(1, 0), (0, 1), (0, -1), (-1, 0)]\n\n for i in range(1, m-1):\n for j in range(1, n-1):\n if board[i][j] == 'O':\n for di, dj in d:\n x = i+di\n y = j+dj\n if board[x][y] == 'O':\n uf.union(x*n+y, i*n+j)\n\n # change not connected 'O' with 'X'\n for i in range(1, m-1):\n for j in range(1, n-1):\n if not uf.connected(dummy, i * n + j):\n board[i][j] = 'X'", "def solve(self, board: 'List[List[str]]') -> 'None':\n if not board:\n return\n m, n = len(board), len(board[0])\n save = [ij for k in range(m + n) for ij in ((0, k), (m - 1, k), (k, 0), (k, n - 1))]\n while save:\n i, j = save.pop()\n if -1 < i < m and -1 < j < n and board[i][j] == 'O':\n board[i][j] = 'S'\n save += (i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)\n for row in board:\n for i, c in enumerate(row):\n row[i] = 'XO'[c == 'S']", "def make_move(self, column):\r\n trans_board = numpy.transpose(self.__board[::1]) # transpose the\r\n # board so that columns are now arrays\r\n if 0 not in trans_board[column] or self.get_winner() or column >= \\\r\n self.BOARD_COLUMNS or column < 0:\r\n # column is full, illegal or the game is already finished\r\n return self.ILLEGAL_MOVE # exception?\r\n else:\r\n reversed_col = list(reversed(trans_board[column]))\r\n for hole in reversed_col:\r\n if hole == 0:\r\n row_i = self.BOARD_ROWS - 1 - reversed_col.index(hole)\r\n self.__board[row_i][column] = self.__cur_player\r\n winner = self.get_winner()\r\n if winner: # is not none\r\n return winner\r\n self.__switch_player()", "def move_to_target(self, target_row, target_col, row, col):\r\n move = \"\"\r\n # typical move to move target tile to target pos.\r\n solver_move = \"druld\"\r\n # move up first\r\n move = (target_row - row) * \"u\"\r\n # conditional statements for moving the tile:\r\n # 1. case curr_pos of tile and target_tile are in same col\r\n if (target_col - col) == 0:\r\n move += \"ld\" + ((target_row - row) - 1) * solver_move\r\n else:\r\n # 2. curr_pos of tile is on the left of target pos\r\n if (target_col - col) > 0:\r\n move += (target_col - col) * \"l\"\r\n if row == 0:\r\n move += (abs(target_col - col) - 1) * \"drrul\"\r\n else:\r\n move += (abs(target_col - col) - 1) * \"urrdl\"\r\n # 3. curr_pos of tile is on the right of target pos:\r\n elif (target_col - col) < 0:\r\n move += (abs(target_col - col) - 1) * \"r\"\r\n if row == 0:\r\n move += abs(target_col - col) * \"rdllu\"\r\n else:\r\n move += abs(target_col - col) * \"rulld\"\r\n move += (target_row - row) * solver_move\r\n return move", "def solve(self, board: List[List[str]]) -> None:\n if len(board) <= 2 or len(board[0]) <= 2:\n return\n\n nRow, nCol = len(board), len(board[0])\n\n def helper(kr, kc):\n board[kr][kc] = '.'\n kr > 0 and board[kr - 1][kc] == 'O' and helper(kr - 1, kc)\n kr < nRow - 1 and board[kr + 1][kc] == 'O' and helper(kr + 1, kc)\n kc > 0 and board[kr][kc - 1] == 'O' and helper(kr, kc - 1)\n kc < nCol - 1 and board[kr][kc + 1] == 'O' and helper(kr, kc + 1)\n\n for kr in [0, nRow - 1]:\n for kc in range(nCol):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n for kc in [0, nCol - 1]:\n for kr in range(nRow):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n\n for kr in range(nRow):\n for kc in range(nCol):\n if board[kr][kc] == 'O':\n board[kr][kc] = 'X'\n elif board[kr][kc] == '.':\n board[kr][kc] = 'O'\n\n return", "def move(self, row, col, player):\r\n if player == 1:\r\n self.mat[row][col] = 1\r\n else:\r\n self.mat[row][col] = -1\r\n if self.checkrow(player,row) or self.checkcol(player,col):\r\n return player\r\n if row == col or row + col == self.size-1:\r\n if self.checkdiag(player):\r\n return player\r\n return 0", "def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n x=len(board)\n y=len(board[0])\n visit=[[False if board[i][j]=='X' else True for j in range(y)] for i in range(x)]\n for i in range(x):\n for j in range(y):\n if visit[i][j] and board[i][j]=='O':\n queue=[[i,j]]\n visit[i][j]=False\n k=0\n surround=True\n while len(queue)>k:\n if queue[k][0]==0 or queue[k][0]==x-1 or queue[k][1]==y-1 or queue[k][1]==0:\n surround=False\n if queue[k][1]!=y-1 and visit[queue[k][0]][queue[k][1]+1]:\n queue.append([queue[k][0],queue[k][1]+1])\n visit[queue[k][0]][queue[k][1]+1]=False\n if queue[k][1]!=0 and visit[queue[k][0]][queue[k][1]-1]:\n queue.append([queue[k][0],queue[k][1]-1])\n visit[queue[k][0]][queue[k][1]-1]=False\n if queue[k][0]!=x-1 and visit[queue[k][0]+1][queue[k][1]]:\n queue.append([queue[k][0]+1,queue[k][1]])\n visit[queue[k][0]+1][queue[k][1]]=False\n if queue[k][0]!=0 and visit[queue[k][0]-1][queue[k][1]]:\n queue.append([queue[k][0]-1,queue[k][1]])\n visit[queue[k][0]-1][queue[k][1]]=False\n k+=1\n if surround:\n for i1,j1 in queue:\n board[i1][j1]='X'\n return", "def solve(self, board: List[List[str]]) -> None:\n def DFS(board, i, j):\n q = []\n q.append([i, j])\n \n while q:\n x, y = q.pop()\n board[x][y] = \"*\"\n neighbors = ((0, 1), (0, -1), (1, 0), (-1, 0))\n for dx, dy in neighbors:\n if 0 <= x + dx <= len(board) - 1 and 0 <= y + dy <= len(board[0]) - 1 and board[x + dx][y + dy] == \"O\":\n q.append([x + dx, y + dy])\n \n \n # first row\n i = 0\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # last row\n i = len(board) - 1\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # first column\n j = 0\n for i in range(len(board)):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # last column\n j = len(board[0]) - 1\n for i in range(len(board)):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n \n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n board[i][j] = \"X\"\n elif board[i][j] == \"*\":\n board[i][j] = \"O\"", "def move(self, row: int, col: int, player: int):\n def addup(dict_name, invalid_set, another_invalid, locx, locy):\n if locx == locy:\n diag_name = (1,1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n if locx == self.tar-1-locy:\n diag_name = (-1, -1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n curcol = (locy, None)\n currow = (None, locx)\n if curcol not in invalid_set:\n dict_name[curcol] += 1\n if dict_name[curcol] == self.tar:\n return player\n another_invalid.add(curcol)\n if currow not in invalid_set:\n dict_name[currow] += 1\n if dict_name[currow] == self.tar:\n return player\n another_invalid.add(currow)\n return 0\n res = 0\n if (row, col) not in self.walked:\n if player == 1:\n res = addup(self.p1, self.invalid_1, self.invalid_2, row, col)\n if player == 2:\n res = addup(self.p2, self.invalid_2, self.invalid_1, row, col)\n self.walked.add((row, col))\n return res", "def solve(self, board: List[List[str]]) -> None:\n\n def expansion(i,j):\n for di,dj in {(-1,0),(1,0),(0,-1),(0,1)}:\n if -1<i+di<m and -1<j+dj<n and state[i+di][j+dj]=='O':\n return True\n return False\n\n if not board:\n return board\n\n m = len(board)\n n = len(board[0]) \n state = [['X']*n for _ in range(m)]\n\n for j in range(n):\n state[0][j] = board[0][j]\n state[m-1][j] = board[m-1][j]\n \n for i in range(m):\n state[i][0] = board[i][0]\n state[i][n-1] = board[i][n-1]\n \n flag = 1\n\n while flag:\n flag = 0\n\n for k in range(1, (1+min(m,n))//2):\n for j in range(k,n-k):\n if board[k][j]=='O' and state[k][j] == 'X' and expansion(k,j):\n state[k][j] = 'O'\n flag = 1\n \n if board[m-1-k][j]=='O' and state[m-1-k][j] == 'X' and expansion(m-1-k,j):\n state[m-1-k][j] = 'O'\n flag = 1\n \n for i in range(k,m-k):\n if board[i][k]=='O' and state[i][k] == 'X' and expansion(i,k):\n state[i][k] = 'O'\n flag = 1\n \n if board[i][n-1-k]=='O' and state[i][n-1-k] == 'X' and expansion(i,n-1-k):\n state[i][n-1-k] = 'O'\n flag = 1\n\n board[:] = state[:]", "def update(self, move):\n\n if not 0 <= move < 7:\n raise InvalidMove\n\n placed = False\n x = None\n y = None\n\n for row in reversed(xrange(self._rows)):\n if not self._board[row][move]:\n self._board[row][move] = self.current_player\n placed = True\n x = move\n y = row\n break\n\n if not placed:\n raise InvalidMove\n\n return (x, y)", "def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2", "def solve(puzzle):\n print(\"Solving...\")\n array_puzzle = np.asarray(puzzle)\n array_puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n goal_state = __generate_goal(len(array_puzzle[0]), len(array_puzzle))\n\n flat_puzzle = list(chain.from_iterable(puzzle)) # Flatten the list\n\n # If the puzzle doesn't contain 0, exit.\n try:\n flat_puzzle.remove(0) # Remove 0 from the list\n except:\n print(\"All puzzles must include an open tile (0).\")\n return None\n\n inversions = __count_inversions(flat_puzzle) # Count the inversions\n\n # width = len(array_puzzle[0]) # Get the width of the puzzle (columns)\n # length = len(array_puzzle) # Get the length of the puzzle (rows)\n\n oddEven = __odd_or_even(len(array_puzzle[0])) # Determine if the width is odd or even.\n start_position = __find_start(array_puzzle) # Find the start position's row\n solvable = __is_solvable(oddEven, inversions, len(array_puzzle), start_position) # Cleck if the puzzle is solvable.\n\n # If the puzzle is not solvable, return None.\n if(solvable == \"None\"):\n return None\n\n # If we cannot calculate a* (for example the given values are not all in sequential order (1-5) 4 is replaced by 6 (1,2,3,5,6))\n try:\n return __a_star(array_puzzle, goal_state)\n except:\n print(\"Please make sure there are no duplicate or skipped inputs.\")\n return None\n\n # This code was used in testing to print out the string.\n # solved = __a_star(array_puzzle, goal_state)\n # Return the moves needed to complete the puzzle.\n # return print(str(__build_string(solved)) + \" (\" + str(len(solved)) + \")\")", "def solve(self, board: 'List[List[str]]') -> 'None':\n\n def dfs(i, j, tmp):\n nonlocal flag\n if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]):\n flag = False\n return\n if board[i][j] != 'O' or [i, j] in tmp:\n return\n tmp.append([i, j])\n dfs(i - 1, j, tmp)\n dfs(i + 1, j, tmp)\n dfs(i, j + 1, tmp)\n dfs(i, j - 1, tmp)\n return tmp\n\n change = []\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == 'O' and [i, j] not in change:\n tmp = []\n flag = True\n tmp = dfs(i, j, tmp[:])\n if flag:\n for loc in tmp:\n i, j = loc[0], loc[1]\n board[i][j] = 'X'\n\n for loc in change:\n i, j = loc[0], loc[1]\n board[i][j] = 'X'", "def move(self, row: int, col: int, player: int) -> int:\n self.board[row][col] = 1 if player == 1 else -1\n rowsum = sum(self.board[row])\n colsum = sum([self.board[r][col] for r in range(self.n)])\n diagsum1 = sum([self.board[i][i] for i in range(self.n)])\n diagsum2 = sum([self.board[i][-i-1] for i in range(self.n)])\n if player == 1:\n if rowsum == self.n or colsum == self.n or diagsum1 == self.n or diagsum2 == self.n:\n return 1\n else:\n if rowsum == -self.n or colsum == -self.n or diagsum1 == -self.n or diagsum2 == -self.n:\n return 2\n return 0", "def solve(board):\r\n \r\n #An O(2mn) time solution; the first O(mn) traversal is to preform a bfs on all tiles attached to edge 'O' tiles (can't convert to 'X's); the second is to convert all remaining 'O's into 'X's\r\n \r\n def bfs(curr,r,c):\r\n if not curr: return\r\n prev = len(curr)\r\n for n in range(0,prev):\r\n i,j = curr[n][0],curr[n][1]\r\n board[i][j] = 'A'\r\n for x,y in [(-1, 0), (0, -1), (1, 0), (0, 1)]:\r\n x_n = i+x\r\n y_n = j+y\r\n if x_n >= 0 and x_n < r and y_n >= 0 and y_n < c and board[x_n][y_n] == \"O\":\r\n curr += [(x_n,y_n)]\r\n bfs(curr[prev:],r,c)\r\n\r\n \r\n q,r,c = [],len(board),len(board[0])\r\n if not r or q: return\r\n\r\n for i in range(r):\r\n for j in range(c):\r\n if (i==0 or j==0 or i==r-1 or j==c-1) and board[i][j] == \"O\":\r\n q += [(i,j)]\r\n \r\n bfs(q,r,c)\r\n\r\n for i in range(r):\r\n for j in range(c): \r\n if board[i][j] == \"O\": \r\n board[i][j] = \"X\"\r\n elif board[i][j] == \"A\":\r\n board[i][j] = \"O\"\r\n \r\n return", "def solve(board) -> None:\n rows = len(board)\n if rows==0:\n return board\n cols = len(board[0])\n \n def is_border(rc):\n (rr, cc) =rc\n if rr<rows and rr< cols and rr>=0 and cc>=0 and board[rr][cc]=='O' and (rr==0 or rr==rows-1 or cc==0 or cc==cols-1):\n return True\n return False\n \n transf = []\n for r in range(rows):\n for c in range(cols):\n if board[r][c]=='O' and not is_border((r,c)) and not any(map(is_border, [(r-1, c), (r+1, c), (r, c-1), (r, c+1)])):\n transf.append((r,c))\n if transf:\n for r,c in transf:\n board[r][c]='X'\n return board", "def execute(self, row, col, action=None):\n assert action is not None, \"No action selected!\"\n\n if action == 'north':\n if (row-1) < 0 or self.board[row-1, col] == '*':\n return row, col\n elif action == 'south':\n if (row+1) >= self.N or self.board[row+1, col] == '*':\n return row, col\n elif action == 'east':\n if (col+1) >= self.M or self.board[row, col+1] == '*':\n return row, col\n elif action == 'west':\n if (col-1) < 0 or self.board[row, col-1] == '*':\n return row, col\n\n return row + self.step_row[action], col + self.step_col[action]", "def check_move(self, col):\n\n for i in range(len(self.board) - 1, -1, -1):\n if self.board[i][col] == 0:\n return i\n\n return \"Full\"", "def get_move(board, player):\r\n row, col = 0, 0\r\n return row, col", "def get_move(board, player):\n row, col = 0, 0\n return row, col", "def move(self, row, col, player):\n offset = player * 2 - 3 # 1 or -1\n self.row[row] += offset\n self.col[col] += offset\n if row == col:\n self.diag += offset\n if row + col == self.n - 1:\n self.anti_diag += offset\n if self.n in [self.row[row], self.col[col], self.diag, self.anti_diag]:\n return 2\n if -self.n in [self.row[row], self.col[col], self.diag, self.anti_diag]:\n return 1\n return 0", "def solve(self, board: List[List[str]]) -> None:\n # New Solution: DFS on boarder (140ms: 89.07%)\n if not board or not board[0]: return\n def dfs(i, j):\n if board[i][j]=='O':\n board[i][j] = '*'\n if i-1>=0:\n dfs(i-1, j)\n if i+1<len(board):\n dfs(i+1, j)\n if j-1>=0:\n dfs(i, j-1)\n if j+1<len(board[0]):\n dfs(i, j+1)\n height, width = len(board), len(board[0])\n for i in range(width):\n if board[0][i]=='O':\n dfs(0, i)\n if board[height-1][i]=='O':\n dfs(height-1, i)\n for i in range(height):\n if board[i][0]=='O':\n dfs(i, 0)\n if board[i][width-1]=='O':\n dfs(i, width-1)\n for i in range(height):\n for j in range(width):\n if board[i][j]=='O':\n board[i][j] = 'X'\n elif board[i][j]=='*':\n board[i][j] = 'O'", "def make_move(board, picked_column, player):\n row = find_first_free_cell(board, picked_column)\n board[row][picked_column] = player\n return board, row", "def make_move(board: Connect4Board) -> \"(row, col)\":\r\n\r\n while True:\r\n\r\n try:\r\n\r\n print('\\nPlease Specify your move. Enter the number column of a cell on the board.')\r\n print('-'*85)\r\n \r\n col = Connect4GameUI.move_col(board)\r\n row = Connect4GameUI._get_valid_row(board, col)\r\n print(row,col)\r\n return row, col\r\n\r\n break\r\n\r\n except:\r\n print('\\nInvalid move!!!')\r\n print('Please try it again.')", "def solve(self, board: List[List[str]]) -> None:\n def _dfs(i, j):\n if i < 0 or j < 0 or i >= len(board) or j >= len(board[0]) or board[i][j] in ['X', '#']: return\n board[i][j] = '#'\n _dfs(i-1, j)\n _dfs(i+1, j)\n _dfs(i, j-1)\n _dfs(i, j+1)\n\n if not board or not board[0]: return\n m, n = len(board), len(board[0])\n for i in range(0, m):\n for j in range(0, n):\n is_edge = i == 0 or j == 0 or i == m-1 or j == n-1\n if is_edge and board[i][j] == 'O':\n _dfs(i, j)\n print(board)\n\n for i in range(0, m):\n for j in range(0, n):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n if board[i][j] == '#':\n board[i][j] = 'O'", "def solve(self, board) -> None:\n for index in range (1, len(board)-1):\n arr = board[index]\n for ch in range(1, len(arr)-1):\n if arr[ch] is 'O':\n safe = True\n if ch-1 == 0 and arr[ch-1] is 'O':\n safe = False\n if ch +1 == len(arr)-1 and arr[ch+1] is 'O':\n safe = False\n if index -1 == 0 and board[index-1][ch] is 'O':\n safe = False\n if index + 1 == len(board)-1 and board[index + 1][ch] is 'O':\n safe = False\n if safe:\n arr[ch] = 'X'", "def move(self, row: int, col: int, player: int) -> int:\n if player == 1:\n self.newList[row][col] = 1\n self.colSum[col] += 1\n self.rowSum[row] += 1\n if row == col:\n self.diag += 1\n if row + col == (self.n - 1):\n self.revDiag += 1\n if self.rowSum[row] == self.n or self.colSum[col] == self.n or self.diag == self.n or self.revDiag == self.n:\n return 1\n if player == 2:\n self.newList[row][col] = -1\n self.colSum[col] -= 1\n self.rowSum[row] -= 1\n if row == col:\n self.diag -= 1\n if row + col == (self.n - 1):\n self.revDiag -= 1\n if self.rowSum[row] == -self.n or self.colSum[col] == -self.n or self.diag == -self.n or self.revDiag == -self.n:\n return 2\n \n return 0", "def move(self, row: int, col: int, player: int) -> int:\n n = self.n\n if player == 1:\n self.rows_1[row] += 1\n self.cols_1[col] += 1\n if player == 2:\n self.rows_2[row] += 1\n self.cols_2[col] += 1\n if row == col:\n self.diag1[row] = player\n if row + col + 1 == n:\n self.diag2[row] = player\n f = 0\n g = 0\n for i in range(n):\n if self.rows_1[row] == n or self.cols_1[col] == n:\n return 1\n if self.rows_2[row] == n or self.cols_2[col] == n:\n return 2 \n if self.diag1[i] != self.diag1[0]:\n f = 1\n if self.diag2[i] != self.diag2[0]:\n g = 1\n if f == 0:\n return self.diag1[0]\n if g == 0:\n return self.diag2[0]\n return 0", "def make_move(self, row, column):\n\t\tif self.board[int(row)][int(column)] == '-':\n\t\t\tself.board[int(row)][int(column)] = self.marker\n\t\telse:\n\t\t\tprint(\"That spot is occupied, you messed up, you lose your turn for doing bad things\")", "def solve(self, board) -> None:\n coords = []\n board_len = len(board)\n row_len = len(board[0]) - 1\n # top\n # coords.append([[0, i] for i, q in enumerate(board[0]) if q == \"O\"])\n # # bottom\n # coords.append(\n # [[board_len, i] for i, q in enumerate(board[board_len]) if q == \"O\"]\n # )\n for i in range(board_len):\n row_coord = [[i,indx] for indx, q in enumerate(board[i]) if q == \"O\"]\n # import pdb; pdb.set_trace()\n for x in row_coord:\n coords.append(x)\n for x in coords:\n if len(x) == 0:\n continue\n if x[0] == 0:\n print(\"top border\")\n elif x[0] == board_len - 1:\n print(\"bottom border\")\n elif x[1] == 0:\n print(\"left border\")\n elif x[1] == row_len:\n prin(\"right border\")", "def solve(self, board: List[List[str]]) -> None:\n visited = [[False for x in range(len(board[0]))] for y in range(len(board))]\n for i in range(len(board)):\n for j in range(len(board[i])):\n if not visited[i][j] and board[i][j] == 'O':\n res = []\n result = self.gatherO(board, i, j, res, visited)\n if not result:\n for coordinate in res:\n board[coordinate[0]][coordinate[1]] = 'X'", "def solve(self, board) -> None:\n x_length = len(board)\n if x_length == 0: \n return\n\n y_length = len(board[0])\n confirmed = set()\n dfs = []\n for i in range(x_length):\n if board[i][0] == 'O':\n board[i][0] = 'temp'\n dfs.append((i, 0))\n if board[i][y_length - 1] == 'O':\n board[i][y_length - 1] = 'temp'\n dfs.append((i, y_length - 1))\n for j in range(y_length):\n if board[0][j] == 'O':\n board[0][j] = 'temp'\n dfs.append((0, j))\n if board[x_length - 1][j] == 'O':\n board[x_length - 1][j] = 'temp'\n dfs.append((x_length - 1, j))\n while dfs:\n i, j = dfs.pop()\n confirmed.add((i, j))\n if i+1 < x_length and board[i+1][j] == 'O':\n board[i+1][j] = 'temp'\n dfs.append((i + 1, j))\n if i > 0 and board[i-1][j] == 'O':\n board[i-1][j] = 'temp'\n dfs.append((i-1, j))\n if j+1 < y_length and board[i][j+1] == 'O':\n board[i][j+1] = 'temp'\n dfs.append((i, j + 1))\n if j > 0 and board[i][j-1] == 'O':\n board[i][j-1] = 'temp'\n dfs.append((i, j-1))\n for i in range(x_length):\n for j in range(y_length):\n if (i, j) in confirmed:\n board[i][j] = 'O'\n else:\n board[i][j] = 'X'\n return", "def solve(self):\n dim = self.puzzle.dimension\n\n # initial loop\n for value, (row, col) in self.puzzle:\n if value:\n self.clear_row(row, value)\n self.clear_col(col, value)\n self.clear_subgrid(row, col, value)\n self.updates.add((value, (row, col)))\n for ps in self.possibilities:\n ps.discard((row, col))\n\n while self.updates:\n while self.updates:\n # while self.updates:\n value, (row, col) = self.updates.pop()\n for i in range(1, dim + 1):\n self.check_row(i, value)\n self.check_col(i, value)\n for i in range(2, 8, 3):\n self.check_subgrid(row, i, value)\n self.check_subgrid(i, col, value)\n\n for value, (row, col) in self.puzzle:\n if not value:\n self.check_cell(row, col)\n\n # for value in range(1, dim + 1):\n # for row in [2, 5, 8]:\n # for col in [2, 5, 8]:\n # self.check_subgrid(row, col, value)", "def solve_puzzle(board):\n # Propagate value effects\n board = simplify_puzzle(board, [])\n\n # Brute force remaining cells\n board = brute(board)\n\n # Verify that the puzzle was successfully solved\n assert get_length(board)==81\n assert valid_attempt(board)\n\n return board", "def solveSudoku(self, board):\n\n digits = { str(i) for i in range(1, 10) }\n rows = [ digits.copy() for _ in range(9) ]\n cols = [ digits.copy() for _ in range(9) ]\n boxs = [ [ digits.copy() for _ in range(3) ] for _ in range(3) ]\n unoccupied = set()\n\n def __recursiveSolver():\n if not unoccupied:\n return\n\n choices = digits.copy()\n for row, col in unoccupied:\n possible_moves = rows[row] & cols[col] & boxs[row // 3][col // 3]\n if len(possible_moves) < len(choices):\n action_pos = (row, col)\n choices = possible_moves\n if len(choices) == 1:\n break\n\n for choice in choices:\n (row, col) = action_pos\n\n unoccupied.remove(action_pos)\n board[row][col] = choice\n rows[row].remove(choice)\n cols[col].remove(choice)\n boxs[row // 3][col // 3].remove(choice)\n\n __recursiveSolver()\n if not unoccupied: return\n\n unoccupied.add(action_pos)\n board[row][col] = '.'\n rows[row].add(choice)\n cols[col].add(choice)\n boxs[row // 3][col // 3].add(choice)\n\n for row in range(9):\n for col in range(9):\n ch = board[row][col]\n if ch == '.':\n unoccupied.add((row, col))\n else:\n rows[row].remove(ch)\n cols[col].remove(ch)\n boxs[row // 3][col // 3].remove(ch)\n\n __recursiveSolver()", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0: return []\n nr = len(board)\n nc = len(board[0])\n\n # begin dfs from boundaries with letter \"O\"\n for r in range(nr):\n for c in range(nc):\n if r == 0 or r == nr-1 or c == 0 or c == nc-1:\n if board[r][c] == \"O\":\n self.dfs(board, r, c)\n\n # change \"O\" to \"X\" and \"#\" to \"O\"\n for r in range(nr):\n for c in range(nc):\n if board[r][c] == \"O\":\n board[r][c] = \"X\"\n elif board[r][c] == \"#\":\n board[r][c] = \"O\"" ]
[ "0.7689822", "0.76361793", "0.7602665", "0.759841", "0.7547982", "0.75370145", "0.75116783", "0.7487752", "0.7478812", "0.74497175", "0.7435299", "0.7407118", "0.7397174", "0.73776317", "0.73732615", "0.7324699", "0.72756827", "0.72559106", "0.7206573", "0.71551996", "0.7140375", "0.71336603", "0.7130972", "0.7064705", "0.7057786", "0.70349383", "0.7009467", "0.70032966", "0.6981267", "0.692232", "0.6843114", "0.68304265", "0.68043596", "0.6793013", "0.6661478", "0.66392213", "0.66384995", "0.6629243", "0.6624516", "0.6562946", "0.6547551", "0.65120554", "0.64690065", "0.6463337", "0.6463337", "0.6463337", "0.6461038", "0.6461038", "0.6442287", "0.6416701", "0.6415396", "0.6406427", "0.6388689", "0.6321458", "0.6317301", "0.62179285", "0.6161388", "0.6149291", "0.61431324", "0.612199", "0.60966474", "0.6071344", "0.6065953", "0.6054412", "0.6033915", "0.6023434", "0.6011581", "0.5990514", "0.59699047", "0.5951187", "0.59380174", "0.5924024", "0.59170175", "0.58692485", "0.58520216", "0.584852", "0.5847602", "0.58435917", "0.58354825", "0.58347327", "0.58335406", "0.5826953", "0.5814613", "0.57892853", "0.5788401", "0.5785238", "0.5765685", "0.57636666", "0.57590806", "0.5756696", "0.57558256", "0.5753852", "0.5751244", "0.5747492", "0.57417387", "0.5741668", "0.57338166", "0.5722073", "0.57192683", "0.5709736" ]
0.745573
9
Solve the tile in row one at the specified column Updates puzzle and returns a move string
def solve_row1_tile(self, target_col): cur_row, cur_col = self.current_position(1, target_col) move_str = self.position_tile(1, target_col, cur_row, cur_col, need_ld=False) self.update_puzzle(move_str) return move_str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_row1_tile(self, target_col):\r\n moves_str = \"\"\r\n current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ur\"\r\n self.update_puzzle(moves_str)\r\n print \"solve_row1_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_puzzle(self):\n cur0_row, cur0_col = self.current_position(0, 0)\n move_str = 'd' * (self._height - cur0_row - 1) + 'r' * (self._width - cur0_col - 1)\n self.update_puzzle(move_str)\n for row in range(self._height-1, 1, -1):\n for col in range(self._width-1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n for col in range(self._width-1, 1, -1):\n assert self.row1_invariant(col)\n move_str += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n move_str += self.solve_row0_tile(col)\n move_str += self.solve_2x2()\n return move_str", "def solve_puzzle(self):\n\n move_str = \"\"\n \n # Move zero tile to bottom right corner tile of puzzle.\n zero_pos = self.current_position(0,0) \n vert_dist = (self.get_height() - 1) - zero_pos[0]\n horiz_dist = (self.get_width() - 1) - zero_pos[1]\n move_str += ((\"d\" * vert_dist) + (\"r\" * horiz_dist))\n self.update_puzzle(move_str)\n \n # Solve lower rows\n if self.get_height() > 2:\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n \n # Solve top 2 rows\n if self.get_width() > 2:\n for col in range(self.get_width() - 1, 1, -1):\n move_str += self.solve_row1_tile(col)\n move_str += self.solve_row0_tile(col)\n \n # Solve 2x2\n move_str += self.solve_2x2()\n\n return move_str", "def solve_puzzle(self):\r\n # initialize some values and start tile at bottom right corner\r\n col = self.get_width() - 1\r\n row = self.get_height() - 1\r\n move = \"\"\r\n curr_row, curr_col = self.current_position(0, 0)\r\n move_to_target = abs(curr_col - col) * \"r\" + abs(curr_row - row) * \"d\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n\r\n # apply solver methods\r\n for dummy_i in range(row, 1, -1):\r\n for dummy_j in range(col, 0, -1):\r\n move += self.solve_interior_tile(dummy_i, dummy_j)\r\n move += self.solve_col0_tile(dummy_i)\r\n \r\n for dummy_j in range(col, 1, -1):\r\n move += self.solve_row1_tile(dummy_j)\r\n move += self.solve_row0_tile(dummy_j)\r\n \r\n move += self.solve_2x2()\r\n return move", "def solve_col0_tile(self, target_row):\n solved_row, solved_col = self.current_position(target_row, 0)\n movements = \"\"\n if solved_row == target_row - 1 and solved_col == 0:\n movements = \"u\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ur\")\n movements = \"ur\" + local_board.move_tile(\n target_row - 1, 1, target_row *\n self.get_width()) + \"ruldrdlurdluurddlu\"\n movements += \"r\" * (self.get_width() - 1)\n self.update_puzzle(movements)\n return movements", "def solve_col0_tile(self, target_row):\r\n # check if curr_pos (i, 0) where i > 1\r\n assert self.lower_row_invariant(target_row, 0)\r\n move = \"ur\"\r\n self.update_puzzle(move)\r\n row, col = self.current_position(target_row, 0)\r\n if row == target_row and col == 0:\r\n move_to_target = (self.get_width() - 2) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n else:\r\n move_to_target = self.move_to_target(target_row - 1, 1, row, col)\r\n # add solver move to str\r\n move_to_target += \"ruldrdlurdluurddlu\" + (self.get_width() - 1) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1)\r\n return move", "def solve_row1_tile(self, target_col):\r\n row, col = self.current_position(1, target_col)\r\n move = self.move_to_target(1, target_col, row, col)\r\n # for next move\r\n move += \"ur\"\r\n \r\n self.update_puzzle(move)\r\n return move", "def solve_row0_tile(self, target_col):\n solved_row, solved_col = self.current_position(0, target_col)\n movements = \"\"\n if solved_col == target_col - 1 and solved_row == 0:\n movements = \"ld\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ld\")\n movements = \"ld\" + local_board.move_tile(\n 1, target_col - 1, target_col) + \"urdlurrdluldrruld\"\n self.update_puzzle(movements)\n return movements", "def solve_row1_tile(self, target_col):\n assert target_col > 1, \"target_col must be > 1\"\n assert self.row1_invariant(target_col), \"tiles to right and below incorrectly ordered\"\n\n # Moves correct_tile to the target position (1, target_col),\n # and the zero tile above the target position at (0, target_col). \n correct_tile = self.current_position(1, target_col)\n move_str = self.position_tile(1, target_col, correct_tile) \n move_str += \"ur\"\n self.update_puzzle(move_str)\n\n assert self.row0_invariant(target_col)\n \n return move_str", "def solve_puzzle(self):\n # replace with your code\n string = ''\n width = self._width\n height = self._height\n zero = self.current_position(0, 0)\n row_to_zero = height - 1 - zero[0]\n col_to_zero = width - 1 - zero[1]\n string += 'r' * col_to_zero\n string += 'd' * row_to_zero\n self.update_puzzle(string)\n if width == 2 and height == 2:\n string += self.solve_2x2()\n elif width > 2 and height == 2:\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n elif width == 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n string += self.solve_2x2()\n elif width > 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n #for row in range(height - 1, -1, -1):\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n return string", "def solve_row1_tile(self, target_col):\n # replace with your code\n print target_col\n assert self.row1_invariant(target_col), 'False precondition'\n string = ''\n target = self.current_position(1, target_col)\n row_difference = 1 - target[0]\n col_difference = target_col - target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0:\n string += 'urrdl' * (col_difference - 1)\n string += 'ur'\n elif row_difference == 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n print 'Row 1 Path', string\n self.update_puzzle(string)\n assert self.row0_invariant(target_col), 'False string'\n return string", "def move_tile(self, target_row, target_col, val):\n # a little bit twisted here for the use of both solve_interior_tile and solve_col0_tile\n solved_row, solved_col = self.current_position(0, val)\n movements = \"\"\n if solved_row == target_row and solved_col == target_col:\n return \"\"\n if solved_row == target_row:\n if target_col > solved_col:\n movements = \"l\" * (target_col - solved_col) + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"r\" * (solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ulld\"\n elif solved_col == target_col:\n movements = \"u\" * (target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n elif solved_col < target_col:\n if solved_col == 0:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"rddlu\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n elif solved_col > target_col:\n if solved_row == 0:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"dllur\" * (\n solved_col - target_col - 1) + \"dlu\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n else:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ullddru\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n return movements", "def solve_col0_tile(self, target_row):\r\n moves_str = \"\"\r\n # move the zero tile from (i,0) to (i−1,1) \r\n # using the move string \"ur\"\r\n moves_str += \"ur\"\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str)\r\n # If the target tile is now at position (i,0)\r\n # you can simply move tile zero to the end of row i−1\r\n current_row, current_col = temp_grid.current_position(target_row, 0)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n if current_row == target_row and current_col == 0:\r\n rights = self._width - 1 - zero_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\" \r\n # However, if the target tile is not positioned at (i,0)\r\n # we suggest the following solution strategy:\r\n else:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ruldrdlurdluurddlu\"\r\n for dummy_r in range(self._width - 1):\r\n moves_str += \"r\"\r\n \r\n print \"solve_col0_tile\"\r\n print moves_str\r\n self.update_puzzle(moves_str)\r\n print self._grid\r\n return moves_str", "def solve_col0_tile(self, target_row):\n move_str = 'ur'\n self.update_puzzle(move_str)\n cur_row, cur_col = self.current_position(target_row, 0)\n if cur_row == target_row and cur_col == 0:\n move_str += 'r' * (self._width - 2)\n else:\n move_str += self.position_tile(target_row-1, 1, cur_row, cur_col)\n move_str += 'ruldrdlurdluurddlur'\n move_str += 'r' * (self._width - 2)\n self.update_puzzle(move_str[2:])\n return move_str", "def solve_col0_tile(self, target_row):\n # replace with your code\n string = ''\n target = self.current_position(target_row, 0)\n row_difference = target_row - target[0]\n col_difference = target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'r' * (col_difference - 1)\n if row_difference > 1:\n string += 'druld' * (row_difference - 1)\n string += 'rulld' * (col_difference - 1)\n string += 'ruldrdlurdluurddlu'\n elif col_difference == 0:\n string += 'rddlu' * (row_difference - 2)\n if row_difference > 1:\n string += 'rd'\n string += 'l'\n string += 'ruldrdlurdluurddlu'\n string += 'r' * (self._width - 1)\n print 'Col 0 Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row - 1, self._width -1), 'False string'\n return string", "def solve_row0_tile(self, target_col):\n move_str = 'ld'\n self.update_puzzle(move_str)\n cur_row, cur_col = self.current_position(0, target_col)\n if cur_row == 0 and cur_col == target_col:\n return move_str\n else:\n move_str += self.position_tile(1, target_col-1, cur_row, cur_col)\n move_str += 'urdlurrdluldrruld'\n self.update_puzzle(move_str[2:])\n return move_str", "def solve_row1_tile(self, target_col):\r\n # replace with your code\r\n whole_move = ''\r\n if self._grid[1][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA solve_row1_tile \"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[1][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (1 - zero_row) * 'd'\r\n # path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (1 - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (1 - zero_row) * 'd'\r\n #zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n #zero_row -= len(filter(lambda x: x=='u', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n\r\n assert self.row1_invariant(target_col), 'Some trouble in row1_invariant' \r\n \r\n #current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = 1, target_col\r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = (zero_row - self.current_position(1, target_col)[0]) * 'u' + (zero_col - self.current_position(1, target_col)[1]) * 'l'\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(1, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (1, target_col)\r\n counter = 0\r\n while self.current_position(1, target_col) != \\\r\n (1, target_col) or (zero_row, zero_col) != (0, target_col):\r\n # print 'Welcome to while loop!'\r\n cyclic_moves = ''\r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n #current_position = self.current_position(1, target_col)\r\n current_col = self.current_position(1, target_col)[1]\r\n counter +=1\r\n if self.current_position(1, target_col) == \\\r\n (1, target_col):\r\n # print 'ZERO not under TT'\r\n cyclic_moves = 'ur'\r\n whole_move += cyclic_moves\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif zero_col < current_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'ZERO tile located in the left side and down move is POSIBLE'\r\n if current_col != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif current_col != target_col and self._grid[zero_row+1][zero_col] > \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves)) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n # elif zero_col < current_col and self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n # # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n # if current_col != target_col:\r\n # # print 'not under the target place'\r\n # cyclic_moves = 'urrdl'\r\n # whole_move += cyclic_moves\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # elif current_col == target_col:\r\n # # print 'Target tile under target place'\r\n # cyclic_moves = 'urd'\r\n # whole_move += cyclic_moves\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #cyclic_moves +='ur'\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n return whole_move", "def solve_row1_tile(self, target_col):\n movements = self.solve_interior_tile(1, target_col)\n self.update_puzzle(\"ur\")\n return movements + \"ur\"", "def solve_row0_tile(self, target_col):\n # replace with your code\n string = ''\n assert self.row0_invariant(target_col), 'False precondition'\n target = self.current_position(0, target_col)\n row_difference = target[0]\n col_difference = target_col - target[1]\n if row_difference == 0:\n if col_difference == 1:\n string += 'ld'\n elif col_difference > 1:\n string += 'l' * col_difference\n string += 'drrul' * (col_difference - 2)\n string += 'druld'\n string += 'urdlurrdluldrruld'\n elif row_difference == 1:\n if col_difference == 1:\n string += 'lld'\n string += 'urdlurrdluldrruld'\n elif col_difference > 1:\n string += 'ld'\n string += 'l' * (col_difference - 1)\n string += 'urrdl' * (col_difference - 2)\n string += 'urdlurrdluldrruld'\n print 'Row 0 Path', string\n self.update_puzzle(string)\n assert self.row1_invariant(target_col - 1), 'False string'\n return string", "def solve_puzzle(self):\n # move zero tile to the lower right corner\n row, col = self.current_position(0, 0)\n movements = \"d\" * (self.get_height() - 1 - row) + \"r\" * (\n self.get_width() - 1 - col)\n self.update_puzzle(movements)\n # solve rowid from 2 by row\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col == 0:\n movements += self.solve_col0_tile(row)\n assert self.lower_row_invariant(row - 1,\n self.get_width() - 1)\n else:\n movements += self.solve_interior_tile(row, col)\n assert self.lower_row_invariant(row, col - 1)\n # solve the uppermost two rows by column\n for col in range(self.get_width() - 1, 1, -1):\n for row in range(1, -1, -1):\n if row == 0:\n assert self.row0_invariant(col)\n movements += self.solve_row0_tile(col)\n assert self.row1_invariant(col - 1)\n else:\n assert self.row1_invariant(col)\n movements += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n movements += self.solve_2x2()\n return movements", "def solve_row0_tile(self, target_col):\r\n # replace with your code\r\n assert self.row0_invariant(target_col), 'Some trouble in row0_invariant' \r\n whole_move = ''\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n # print self.get_number(current_row, current_col)\r\n zero_row, zero_col = 0, target_col\r\n # print 'Target tile position=',self.current_position(0, target_col)\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (0, target_col)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ld'\r\n \r\n recomended_move = 'ld'\r\n whole_move += recomended_move\r\n zero_col -= len(filter(lambda x: x=='l', recomended_move))\r\n zero_row += len(filter(lambda x: x=='d', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n #####Case when we check if recomended move solves the tile\r\n if self.current_position(0, target_col) == (0, target_col):\r\n # print 'Congrads recomended move made great deal !!'\r\n return whole_move\r\n #####If not, we position TT to (1, target_col-1),\r\n ##### and ZEOR to (1, target_col-2)\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (zero_row - current_row) * 'u'\r\n path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = path_up + path_side\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n \r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n counter = 0\r\n # print self.current_position(0, target_col) != (1, target_col-1)\r\n # print (zero_row,zero_col) != (1,target_col-2)\r\n ####POitioning TT and zero into positions that can be solvable\r\n while self.current_position(0, target_col) != (1, target_col-1) or \\\r\n (zero_row,zero_col) != (1,target_col-2):\r\n counter +=1\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n cyclic_moves = ''\r\n # print 'Aloha in the loop'\r\n if zero_col < current_col:\r\n # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n \r\n\r\n if current_col != target_col-1 and zero_row == 0:\r\n # print 'In the upper row and we can use down cycling'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n\r\n elif current_col != target_col-1:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col-1:\r\n # print 'Target tile under target place'\r\n # print 'DEBUG!!!!'\r\n # print self\r\n # print zero_col, target_col\r\n if zero_col == 0 and current_col == 1:\r\n cyclic_moves = 'druld'\r\n elif zero_row == 0:\r\n cyclic_moves = 'druld'\r\n \r\n else:\r\n cyclic_moves = 'urd'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n elif zero_row > current_row:\r\n # print 'DEBUG'\r\n # print 'TT under zero tile'\r\n cyclic_moves = 'uld'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n \r\n if counter > 10:\r\n # print 'COUNTER break'\r\n break\r\n\r\n #####Solving using pattern 2 x 3 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 2x3 puzzle formed recently'\r\n move2x3 = \"urdlurrdluldrruld\"\r\n whole_move += move2x3\r\n zero_col -= len(filter(lambda x: x=='l', move2x3))\r\n zero_col += len(filter(lambda x: x=='r', move2x3))\r\n zero_row += len(filter(lambda x: x=='d', move2x3))\r\n zero_row -= len(filter(lambda x: x=='u', move2x3))\r\n self.update_puzzle(move2x3)\r\n # print self\r\n assert self.row1_invariant(target_col-1), 'Some trouble in row1_invariant' \r\n return whole_move", "def solve_col0_tile(self, target_row):\r\n # print '----------------------------------'\r\n # print 'SOLVING ZERO COLOUMN'\r\n assert self.lower_row_invariant(target_row,0), \"Invarian is False\"\r\n whole_move = ''\r\n #current_position = self.current_position(target_row, 0)\r\n current_row, current_col = self.current_position(target_row, 0)\r\n zero_row, zero_col = target_row, 0\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, 0)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ur'\r\n \r\n recomended_move = 'ur'\r\n whole_move += recomended_move\r\n zero_col += len(filter(lambda x: x=='r', recomended_move))\r\n zero_row -= len(filter(lambda x: x=='u', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n if self.current_position(target_row, 0) == \\\r\n (target_row, 0):\r\n # print 'TT stays in correct place after recomended move'\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n self.update_puzzle(zero_to_place_move)\r\n whole_move += zero_to_place_move\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move\r\n #move ZERO tile to the right\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n ### reposition TT to (target_row -1, 1)\r\n ### reposition ZERO tile to (target_row-1,0)\r\n \r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (current_col - zero_col) * 'r'\r\n path_for_zero = (zero_row - current_row) * 'u' + (current_col - zero_col) * 'r'\r\n whole_move += path_for_zero\r\n zero_col += len(filter(lambda x: x=='r', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n counter = 0\r\n while self.current_position(target_row, 0) != (target_row -1, 1) or \\\r\n (zero_row,zero_col) != (target_row-1,0):\r\n cyclic_moves = ''\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n # print 'We are in while loop'\r\n counter += 1\r\n if zero_row < current_row:\r\n # print 'Zero row under current TT '\r\n if self.current_position(target_row, 0) == (target_row -1, 1):\r\n # print 'TT is in the reccomended posiont (target_row -1, 1) \\n and we are movind zero to the left side of TT '\r\n cyclic_moves = 'ld'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n else:\r\n # print 'TT should be one tile down'\r\n cyclic_moves = 'lddru'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n #### Case 1 if ZERO located in the right of\r\n #### target tile (after it)\r\n if zero_col > current_col:\r\n # print ' Look in the up puzzle, zero in the right side'\r\n if current_col != 1:\r\n # print 'not under target place (target_row -1, 1)'\r\n cyclic_moves = 'dllur'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n whole_move += cyclic_moves\r\n # elif current_col == 1 and self._grid[zero_row+1][zero_col] < \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n elif current_col == 1: \r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self._grid[zero_row+1][zero_col] > \\\r\n self._grid[current_position[0]][current_position[1]]:\r\n print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n cyclic_moves = 'ul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n ### Solwing 3x2 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 3x2 puzzle formed recently'\r\n move3x2 = 'ruldrdlurdluurddlur'\r\n whole_move += move3x2\r\n zero_col -= len(filter(lambda x: x=='l', move3x2))\r\n zero_col += len(filter(lambda x: x=='r', move3x2))\r\n zero_row += len(filter(lambda x: x=='d', move3x2))\r\n zero_row -= len(filter(lambda x: x=='u', move3x2))\r\n self.update_puzzle(move3x2)\r\n # print 'Grid afret 3x2 solver move'\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n #####Moving ZERO to the (target_row - 1, n - 1) position where\r\n ##### 'n' is a grid height.\r\n # print self._width-1 - zero_col\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n whole_move += zero_to_place_move\r\n self.update_puzzle(zero_to_place_move)\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move", "def solve_2x2(self):\n # move zero tile to the left-up corner\n self.update_puzzle(\"lu\")\n movements = \"rdlu\"\n for _ in range(3):\n self.update_puzzle(\"rdlu\")\n if self.row0_invariant(0):\n return \"lu\" + movements\n movements += \"rdlu\"\n # the final 2x2 cannot be solved\n return \"\"", "def solve_interior_tile(self, target_row, target_col):\r\n assert self._grid[target_row][target_col] == 0\r\n moves_str = \"\"\r\n target_current_row, target_current_col = self.current_position(target_row, target_col)\r\n \r\n moves_str += self.position_tile(target_row, target_col, target_current_row, target_current_col) \r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_interior_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_2x2(self):\n cur_row, cur_col = self.current_position(0, 0)\n move_str = 'u' * cur_row + 'l' * cur_col\n self.update_puzzle(move_str)\n if self.check_2x2_solved():\n return move_str\n else:\n while not self.check_2x2_solved():\n move_str += 'rdlu'\n self.update_puzzle('rdlu')\n return move_str", "def solve_row0_tile(self, target_col):\r\n # move the zero tile from position (0,j) to (1,j−1) \r\n # using the move string \"ld\" \r\n moves_str = \"\"\r\n moves_str += \"ld\"\r\n # check whether target tile is at position (0,j).\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str) \r\n current_row, current_col = temp_grid.current_position(0, target_col)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n \r\n # If target tile is not at position (0,j).\r\n # reposition the target tile to position (1,j−1) \r\n # with tile zero in position (1,j−2).\r\n if current_row != 0 or current_col != target_col:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"urdlurrdluldrruld\"\r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_row0_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_col0_tile(self, target_row):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, 0), \"tiles to right and below incorrectly ordered\"\n\n # Move zero tile from target position (target_row, 0) to (target_row - 1, 1).\n self.update_puzzle(\"ur\")\n\n move_str = \"\"\n \n # correct_tile's position is determined after moving zero tile \"ur\" \n # because its position relative to zero tile may have changed as a result.\n correct_tile = self.current_position(target_row, 0)\n \n # Moves to reposition correct_tile to target position.\n if self.get_number(correct_tile[0], correct_tile[1]) != self.get_number(target_row, 0):\n move_str += str(self.position_tile(target_row - 1, 1, correct_tile))\n move_str += str(\"ruldrdlurdluurddlur\")\n\n # Moves to reposition zero tile to end of column of target_row + 1.\n move_str += str(\"r\" * (self.get_width() - 2)) \n \n self.update_puzzle(move_str)\n\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1) \n \n move_str = \"ur\" + move_str\n return move_str", "def solve_interior_tile(self, target_row, target_col):\n cur_row, cur_col = self.current_position(target_row, target_col)\n move_str = self.position_tile(target_row, target_col, cur_row, cur_col)\n self.update_puzzle(move_str)\n return move_str", "def solve_interior_tile(self, target_row, target_col):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, target_col), \"tiles to right and below incorrectly ordered\"\n \n correct_tile = self.current_position(target_row, target_col) \n move_str = self.position_tile(target_row, target_col, correct_tile) \n self.update_puzzle(move_str)\n \n assert self.lower_row_invariant(target_row, target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n return move_str", "def solve_puzzle(self):\r\n moves_str = \"\"\r\n # move zero to the most botton right\r\n zero_row, zero_col = self.current_position(0, 0)\r\n downs = self._height - 1 - zero_row\r\n rights = self._width - 1 - zero_col\r\n for dummy_d in range(downs):\r\n moves_str += \"d\"\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n self.update_puzzle(moves_str)\r\n # Solve the bottom m−2 rows of the puzzle \r\n # in a row by row manner from bottom to top. \r\n # Each individual row will be solved in a right to left order.\r\n if self._height > 2 and self._width > 2:\r\n for row in range(self._height - 1, 1, -1):\r\n for col in range(self._width - 1, 0, -1):\r\n assert self.lower_row_invariant(row, col)\r\n moves_str += self.solve_interior_tile(row, col)\r\n assert self.lower_row_invariant(row, col - 1)\r\n assert self.lower_row_invariant(row, 0)\r\n moves_str += self.solve_col0_tile(row)\r\n assert self.lower_row_invariant(row - 1, self._width - 1)\r\n # Solve the rightmost n−2 columns of the top two rows\r\n # in a right to left order). \r\n # Each column consists of two unsolved positions \r\n # and will be solved in a bottom to top order.\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n \r\n elif self._height <=2 and self._width > 2:\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n elif self._height <= 2 and self._width <= 2:\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n #elif self._height > 2 and self._width <= 2:\r\n \r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_row0_tile(self, target_col):\r\n assert self.row0_invariant(target_col)\r\n move = \"ld\"\r\n self.update_puzzle(move)\r\n \r\n row, col = self.current_position(0, target_col)\r\n if row == 0 and col == target_col:\r\n return move\r\n else:\r\n move_to_target = self.move_to_target(1, target_col - 1, row, col)\r\n # 2x3 puzzle solver\r\n move_to_target += \"urdlurrdluldrruld\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n return move", "def solve_row0_tile(self, target_col):\n assert target_col > 1, \"target_col must be > 1\"\n assert self.row0_invariant(target_col), \"tiles to right and below incorrectly ordered\"\n \n # Move zero tile from target position (0, target_col) to (1, target_col - 1) \n self.update_puzzle(\"ld\")\n \n move_str = \"\"\n\n # correct_tile's position is determined after moving zero tile \"ld\"\n # because its position relative to zero tile may have changed as a result.\n correct_tile = self.current_position(0, target_col) \n \n # Moves to reposition correct_tile to target position, and\n # the zero tile to (1, target_col - 1).\n if self.get_number(correct_tile[0], correct_tile[1]) != self.get_number(0, target_col):\n move_str += str(self.position_tile(1, target_col - 1, correct_tile))\n move_str += str(\"urdlurrdluldrruld\")\n\n self.update_puzzle(move_str)\n\n assert self.row1_invariant(target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n move_str = \"ld\" + move_str\n return move_str", "def position_tile(self, zero_row, zero_col, correct_tile):\n \n ans = \"\" \n vert_dist = abs(zero_row - correct_tile[0])\n horiz_dist = abs(zero_col - correct_tile[1])\n \n # Updates ans, the move string, based the correct_tile's\n # position relative to the target position.\n \n # SAME ROW\n if vert_dist == 0:\n # Left of target\n if zero_col > correct_tile[1]:\n # Moves zero tile left to correct_tile's position.\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right to target position,\n # and moves zero tile to left of target position.\n if horiz_dist > 1:\n ans += str(\"urrdl\" * (horiz_dist - 1))\n # Right of target\n else:\n # Moves zero tile right to correct_tile's position.\n ans += str(\"r\" * horiz_dist)\n # Moves correct_tile left to target position,\n # and moves zero tile to left of target position.\n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ulld\")\n \n # SAME COL\n elif horiz_dist == 0:\n # Moves zero tile up to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n # Moves correct_tile down to target position, \n # and moves zero tile to left of target position.\n if vert_dist > 1:\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n # UPPER LEFT\n elif correct_tile[1] < zero_col:\n # Moves zero tile up and left to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right and down to target position,\n # and moves zero tile to left of target position.\n ans += str(\"drrul\" * (horiz_dist - 1))\n ans += str(\"druld\" * vert_dist)\n\n # UPPER RIGHT\n else:\n # Moves zero tile up and right to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"r\" * horiz_dist)\n # This if-elif-else statement moves correct_tile left and down to target position.\n # If statement is only used when target position is in row 2.\n if vert_dist == 1 and correct_tile[0] == 0:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dluld\")\n # Elif statement used when correct_tile is in the row above target position.\n elif vert_dist == 1: \n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ullddruld\")\n # Else statement used when correct_tile is 1+ rows above target position.\n else:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dlu\")\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n return ans", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def solve_interior_tile(self, target_row, target_col):\n # replace with your code\n string = ''\n target = self.current_position(target_row, target_col)\n row_difference = target_row - target[0]\n #print 'Row difference', row_difference\n col_difference = target_col - target[1]\n #print 'Col difference', col_difference\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0 and col_difference > 1:\n string += 'urrdl' * (col_difference - 1)\n if row_difference == 1:\n string += 'urrdl' * (col_difference - 1)\n string += 'dru'\n if row_difference > 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n if row_difference == 1:\n string += 'ulldr' * (col_difference - 1)\n string += 'ullddru'\n if row_difference > 1:\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n string += 'lddru' * (row_difference - 1)\n if row_difference > 0:\n string += 'ld'\n print 'Interior Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row, target_col - 1), 'False string'\n return string", "def solve_2x2(self):\r\n # replace with your code\r\n assert self.row1_invariant(1), '2x2 Dont pass row1_invariant(1)'\r\n whole_move = ''\r\n current_position = self.current_position(0, 0)\r\n # print 'Zero position =', current_position\r\n counter = 0\r\n \r\n\r\n \r\n # if current_position == (0,0):\r\n # print (0,0)\r\n # move_to_00 = 'rdlu' \r\n if current_position == (0,1):\r\n # print (0,1)\r\n move_to_00 = 'l'\r\n if current_position == (1,0):\r\n # print (1,0)\r\n move_to_00 = 'u'\r\n if current_position == (1,1):\r\n # print (1,1)\r\n move_to_00 = 'ul'\r\n whole_move += move_to_00\r\n self.update_puzzle(move_to_00)\r\n # print self\r\n # print self.get_number(1,1) < self.get_number(1,0)\r\n \r\n while self.get_number(0,0) != 0 or self.get_number(0,1) != 1:\r\n \r\n # print 'Aloha in loop!'\r\n counter +=1\r\n move = 'rdlu'\r\n whole_move += move\r\n self.update_puzzle(move)\r\n # print self\r\n if counter >5:\r\n break\r\n return whole_move", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction\n for row in self._grid:\n print row\n print", "def solve_2x2(self):\n \n assert self.get_number(1,1) == 0, \"zero tile should be at row 1, col 1\"\n assert self.row1_invariant(1), \"tiles to right and below incorrectly ordered\"\n \n # Moves the zero tile to (0,0).\n self.update_puzzle(\"lu\")\n\n # Repositions the upper left 2x2 part up to 3 times, \n # each time checking whether the puzzle is solved.\n rotation_num = 0\n if self.row0_invariant(0) == False:\n for dummy_rotation in range(3):\n while self.row0_invariant(0) == False:\n rotation_num += 1\n self.update_puzzle(\"rdlu\")\n\n assert self.row0_invariant(0), \"tiles to right and below incorrectly ordered\"\n \n move_str = \"lu\" + (\"rdlu\" * rotation_num) \n return move_str", "def solve_interior_tile(self, target_row, target_col):\r\n whole_move = ''\r\n # replace with your code\r\n if self._grid[target_row][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA\"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[target_row][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (target_row - zero_row) * 'd'\r\n #path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd'\r\n zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n assert self.lower_row_invariant(target_row, target_col), \"Invarian is False\"\r\n \r\n #current_position = self.current_position(target_row, target_col)\r\n #current_row, current_col = self.current_position(target_row, target_col)\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, target_col)\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (target_row - self.current_position(target_row, target_col)[0]) * 'u'\r\n zero_row = target_row - len(path_up)\r\n if target_col < self.current_position(target_row, target_col)[1]: # Right move\r\n path_side = (self.current_position(target_row, target_col)[1] - target_col) * 'r'\r\n zero_col = target_col + len(path_side)\r\n else: # Left move\r\n path_side = (target_col - self.current_position(target_row, target_col)[1]) * 'l'\r\n zero_col = target_col - len(path_side)\r\n \r\n #path_for_zero = path_up + path_side\r\n # print '------------------------------------------'\r\n # print 'Move to ZERO =', path_for_zero\r\n \r\n self.update_puzzle(path_up + path_side)\r\n \r\n # print 'Grid after move:'\r\n # print self\r\n # current_position = self.current_position(target_row, target_col) \r\n # current_row, current_col = current_position\r\n # print 'Updated Target tile position=',current_position\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, target_col)\r\n # print '-----------------------------------------'\r\n \r\n\r\n ###### New moves after moving ZERO tile into target tile\r\n # counter = 0\r\n whole_move += path_up + path_side\r\n while self.current_position(target_row, target_col) != \\\r\n (target_row, target_col) or zero_col != target_col - 1:\r\n # tt_in_home = self.current_position(target_row, target_col) == \\\r\n # (target_row, target_col)\r\n\r\n cyclic_moves = ''\r\n # counter += 1\r\n #current_position = self.current_position(target_row, target_col) \r\n #current_col = self.current_position(target_row, target_col)[1]\r\n # print 'Zero coloumn', zero_col, '== Target coloumn', target_col\r\n # print zero_col == target_col \r\n \r\n #### Case 1 if ZERO located in ther right of\r\n #### target tile (after it)\r\n if zero_col > self.current_position(target_row, target_col)[1]:\r\n # print ' Look in the up puzzle, zero on the right side'\r\n # if self.current_position(target_row, target_col)[1] != target_col:\r\n # # print 'not under target place'\r\n # cyclic_moves = 'dllur'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n if self.current_position(target_row, target_col)[1] == target_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # elif self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # # print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n # cyclic_moves = 'ul'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #### Case 2 if ZERO located under the target tile, and both\r\n #### of them located under the target position of the target tile\r\n elif zero_col == self.current_position(target_row, target_col)[1] and zero_col == target_col:\r\n # print 'Both under the target place'\r\n # print 'TT in home=', tt_in_home\r\n if self.current_position(target_row, target_col) == \\\r\n (target_row, target_col):\r\n cyclic_moves = 'ld'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n else:\r\n cyclic_moves = 'lddru'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n elif zero_col < self.current_position(target_row, target_col)[1]:\r\n # print 'ZERO tile located in the left side'\r\n if self.current_position(target_row, target_col)[1] != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self.current_position(target_row, target_col)[1] == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n #zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loot counter =',counter\r\n whole_move += cyclic_moves\r\n # if counter > 12:\r\n # break\r\n # print 'Tile is solved with move '+ whole_move\r\n assert self.lower_row_invariant(target_row, target_col-1), \"Invarian is False\"\r\n return whole_move", "def solve(self, board: List[List[str]]) -> None:\n n = len(board)\n if n == 0:\n return\n m = len(board[0])\n for i in range(m):\n self.traverse(board, 0, i, n, m)\n self.traverse(board, n - 1, i, n, m)\n for i in range(n):\n self.traverse(board, i, 0, n, m)\n self.traverse(board, i, m - 1, n, m)\n for i in range(n):\n for j in range(m):\n board[i][j] = 'X' if board[i][j] == 'O' else board[i][j]\n for i in range(n):\n for j in range(m):\n board[i][j] = 'O' if board[i][j] == '_' else board[i][j]", "def solve_2x2(self):\r\n assert self.row1_invariant(1)\r\n pos_1_0 = self.get_number(1, 0)\r\n pos_0_0 = self.get_number(0, 0)\r\n pos_0_1 = self.get_number(0, 1)\r\n # create grid and solve individual cases\r\n grid = [pos_1_0, pos_0_0, pos_0_1]\r\n if grid == [self.get_width(), 1, self.get_width() + 1]:\r\n move = \"ul\"\r\n elif grid == [1, self.get_width() + 1, self.get_width()]:\r\n move = \"lurdlu\"\r\n elif grid == [self.get_width() + 1, self.get_width(), 1]:\r\n move = \"lu\"\r\n self.update_puzzle(move)\r\n return move", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][\n zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][\n zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def solve(self, board) -> None:\n if board == [[]] or board == []:\n return\n\n r, c = len(board), len(board[0])\n\n from collections import deque\n queue = deque()\n for i in range(r):\n for j in range(c):\n if (i == 0 or i == r - 1 or j == 0 or j == c - 1) and board[i][j] == 'O':\n queue.append([i, j])\n board[i][j] = 'M'\n\n while queue:\n i, j = queue.popleft()\n for x, y in ((i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)):\n if 0 <= x <= r - 1 and 0 <= y <= c - 1 and board[x][y] == 'O':\n board[x][y] = 'M'\n queue.append([x, y])\n\n for i in range(r):\n for j in range(c):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == 'M':\n board[i][j] = 'O'", "def solve_2x2(self):\r\n moves_str = \"\"\r\n # move zero to the most upper left\r\n zero_row, zero_col = self.current_position(0, 0)\r\n ups = zero_row - 0\r\n lefts = zero_col - 0\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n \r\n # get zero, one, two, three positions\r\n self.update_puzzle(moves_str)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n counter = 0\r\n while counter <= 3 and \\\r\n (zero_row != 0 or zero_col != 0 or \\\r\n one_row!= 0 or one_col != 1 or \\\r\n two_row != 1 or two_col != 0 or \\\r\n three_row != 1 or three_col != 1):\r\n move = CIRCLES[\"RIGHT_CIRCLES\"]\r\n moves_str += move\r\n self.update_puzzle(move)\r\n counter += 1\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n \r\n print \"solve_2x2\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def human_go(self, board):\r\n coord_pattern = re.compile(\"[0-{}]$\".format(board.shape[1]))\r\n print(\"Enter Column and press enter.\")\r\n input_str = input(\"(from 0-6)\\n\")\r\n if not coord_pattern.match(input_str):\r\n print(\"That is not in the right format, please try again...\")\r\n return self.human_go(board)\r\n else:\r\n col = int(input_str)\r\n if board[0][col] != 0:\r\n print(\"That column is already full, please try again\")\r\n self.human_go()\r\n else:\r\n for row in board[::-1]:\r\n if row[col] == 0:\r\n row[col] = -1\r\n return board", "def make_move(self, column):\r\n trans_board = numpy.transpose(self.__board[::1]) # transpose the\r\n # board so that columns are now arrays\r\n if 0 not in trans_board[column] or self.get_winner() or column >= \\\r\n self.BOARD_COLUMNS or column < 0:\r\n # column is full, illegal or the game is already finished\r\n return self.ILLEGAL_MOVE # exception?\r\n else:\r\n reversed_col = list(reversed(trans_board[column]))\r\n for hole in reversed_col:\r\n if hole == 0:\r\n row_i = self.BOARD_ROWS - 1 - reversed_col.index(hole)\r\n self.__board[row_i][column] = self.__cur_player\r\n winner = self.get_winner()\r\n if winner: # is not none\r\n return winner\r\n self.__switch_player()", "def solve(self, board: List[List[str]]) -> None:\n if board == [] or board == [[]]: # corner case\n return\n\n r, c = len(board), len(board[0])\n\n def dfs(i, j): # visited i, j neighbors and change o to M\n if i < 0 or i > r - 1 or j < 0 or j > c - 1 or board[i][j] == 'X' or board[i][j] == 'M':\n return\n\n board[i][j] = 'M'\n dfs(i - 1, j)\n dfs(i + 1, j)\n dfs(i, j - 1)\n dfs(i, j + 1)\n\n for i in range(r):\n for j in range(c):\n if (i == 0 or i == r - 1 or j == 0 or j == c - 1) and board[i][j] == 'O':\n dfs(i, j)\n\n for i in range(r):\n for j in range(c):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == 'M':\n board[i][j] = 'O'", "def solve(self, board: List[List[str]]) -> None:\n rows = len(board)\n cols = len(board[0])\n\n def explore(i, j):\n if i < 0 or i >= rows:\n return\n if j < 0 or j >= cols:\n return\n if board[i][j] != \"O\":\n return\n board[i][j] = \"Y\"\n explore(i + 1, j)\n explore(i - 1, j)\n explore(i, j + 1)\n explore(i, j - 1)\n\n for i in [0, rows - 1]:\n for j in range(cols):\n explore(i, j)\n\n for j in [0, cols - 1]:\n for i in range(rows):\n explore(i, j)\n\n for i in range(rows):\n for j in range(cols):\n if board[i][j] == \"Y\":\n board[i][j] = \"O\"\n elif board[i][j] == \"O\":\n board[i][j] = \"X\"", "def solve(self, board: List[List[str]]) -> None:\n if board is None or len(board) == 0:\n return \n row, col = len(board), len(board[0])\n for i in range(row):\n self.dfs(board, i, 0)\n self.dfs(board, i, col - 1)\n for j in range(col):\n self.dfs(board, 0, j)\n self.dfs(board, row-1, j)\n for i in range(row):\n for j in range(col):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n if board[i][j] == '-':\n board[i][j] = 'O'", "def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n m, n = len(board), len(board[0])\n\n def dfs(x, y):\n if not 0 <= x < m or not 0 <= y < n or board[x][y] != \"O\":\n return\n direction = [[1, 0], [0, 1], [-1, 0], [0, -1]]\n board[x][y] = \"Y\"\n for d in direction:\n dfs(x+d[0], y+d[1])\n\n for i in range(m):\n dfs(i, 0)\n dfs(i, n-1)\n for i in range(1, n-1):\n dfs(0, i)\n dfs(m-1, i)\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == \"O\":\n board[i][j] = \"X\"\n elif board[i][j] == \"Y\":\n board[i][j] = \"O\"", "def solve(self, board: List[List[str]]) -> None:", "def move(self, row: int, col: int, player: int):\n def addup(dict_name, invalid_set, another_invalid, locx, locy):\n if locx == locy:\n diag_name = (1,1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n if locx == self.tar-1-locy:\n diag_name = (-1, -1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n curcol = (locy, None)\n currow = (None, locx)\n if curcol not in invalid_set:\n dict_name[curcol] += 1\n if dict_name[curcol] == self.tar:\n return player\n another_invalid.add(curcol)\n if currow not in invalid_set:\n dict_name[currow] += 1\n if dict_name[currow] == self.tar:\n return player\n another_invalid.add(currow)\n return 0\n res = 0\n if (row, col) not in self.walked:\n if player == 1:\n res = addup(self.p1, self.invalid_1, self.invalid_2, row, col)\n if player == 2:\n res = addup(self.p2, self.invalid_2, self.invalid_1, row, col)\n self.walked.add((row, col))\n return res", "def solve(self, board: List[List[str]]) -> None:\r\n if not board:\r\n return\r\n m, n = len(board), len(board[0])\r\n board_filter = lambda p: 0 <= p[0] < m and 0 <= p[1] < n and board[p[0]][p[1]] == 'O'\r\n queue = list(filter(board_filter, [(x, y) for r in range(max(m, n)) \r\n for x, y in ((r, 0), (r, n-1), (0, r), (m-1, r))]))\r\n while queue:\r\n x, y = queue.pop()\r\n board[x][y] = 'W'\r\n queue.extend(list(filter(board_filter, ((x-1, y), (x+1, y), (x, y-1), (x, y+1)))))\r\n for row in board:\r\n for i, c in enumerate(row):\r\n row[i] = 'XO'[c=='W']", "def position_tile(self, target_row, target_col, current_row, current_col):\r\n moves_str = \"\"\r\n # current target is on the upper of 0\r\n if current_col == target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the left of 0\r\n elif current_row == target_row and current_col < target_col:\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n # current target is on the upperleft of 0\r\n elif current_row < target_row and current_col < target_col:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n moves_str += \"dru\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the upperright of 0\r\n elif current_col > target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"dlu\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the right of 0\r\n elif current_col > target_col and current_row == target_row:\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"ulld\"\r\n return moves_str", "def move_to_target(self, target_row, target_col, row, col):\r\n move = \"\"\r\n # typical move to move target tile to target pos.\r\n solver_move = \"druld\"\r\n # move up first\r\n move = (target_row - row) * \"u\"\r\n # conditional statements for moving the tile:\r\n # 1. case curr_pos of tile and target_tile are in same col\r\n if (target_col - col) == 0:\r\n move += \"ld\" + ((target_row - row) - 1) * solver_move\r\n else:\r\n # 2. curr_pos of tile is on the left of target pos\r\n if (target_col - col) > 0:\r\n move += (target_col - col) * \"l\"\r\n if row == 0:\r\n move += (abs(target_col - col) - 1) * \"drrul\"\r\n else:\r\n move += (abs(target_col - col) - 1) * \"urrdl\"\r\n # 3. curr_pos of tile is on the right of target pos:\r\n elif (target_col - col) < 0:\r\n move += (abs(target_col - col) - 1) * \"r\"\r\n if row == 0:\r\n move += abs(target_col - col) * \"rdllu\"\r\n else:\r\n move += abs(target_col - col) * \"rulld\"\r\n move += (target_row - row) * solver_move\r\n return move", "def solve(self, board: List[List[str]]) -> None:\n if len(board) is 0: return\n width = len(board[0]) - 1\n height = len(board) - 1\n def help(x: int, y: int):\n if x < 0 or x > height or y < 0 or y > width:\n return\n if board[x][y] is \"O\":\n board[x][y] = \"g\"\n help(x - 1, y)\n help(x, y - 1)\n help(x + 1, y)\n help(x, y + 1)\n\n for i in range(width + 1):\n if board[0][i] is \"O\":\n help(0, i)\n if board[height][i] is \"O\":\n help(height, i)\n for i in range(1, height):\n if board[i][0] is \"O\":\n help(i, 0)\n if board[i][width] is \"O\":\n help(i, width)\n\n print(board)\n for i in range(width + 1):\n for j in range(height + 1):\n if board[j][i] is \"O\":\n board[j][i] = \"X\"\n elif board[j][i] is \"g\":\n board[j][i] = \"O\"", "def make_move(board, picked_column, player):\n row = find_first_free_cell(board, picked_column)\n board[row][picked_column] = player\n return board, row", "def move(self, row, col, player):\r\n if player == 1:\r\n self.mat[row][col] = 1\r\n else:\r\n self.mat[row][col] = -1\r\n if self.checkrow(player,row) or self.checkcol(player,col):\r\n return player\r\n if row == col or row + col == self.size-1:\r\n if self.checkdiag(player):\r\n return player\r\n return 0", "def execute(self, row, col, action=None):\n assert action is not None, \"No action selected!\"\n\n if action == 'north':\n if (row-1) < 0 or self.board[row-1, col] == '*':\n return row, col\n elif action == 'south':\n if (row+1) >= self.N or self.board[row+1, col] == '*':\n return row, col\n elif action == 'east':\n if (col+1) >= self.M or self.board[row, col+1] == '*':\n return row, col\n elif action == 'west':\n if (col-1) < 0 or self.board[row, col-1] == '*':\n return row, col\n\n return row + self.step_row[action], col + self.step_col[action]", "def solve(self, board):\n def dfs(board, r, c):\n if r < 0 or c < 0 or r > rows - 1 or c > cols - 1 or board[r][c] == 'X' or board[r][c] == '#':\n return\n board[r][c] = '#'\n dfs(board, r - 1, c)\n dfs(board, r + 1, c)\n dfs(board, r, c - 1)\n dfs(board, r, c + 1)\n\n if len(board) == 0:\n return;\n rows = len(board)\n cols = len(board[0])\n for i in range(rows):\n for j in range(cols):\n if (i == 0 or j == 0 or i == rows - 1 or j == cols - 1) and board[i][j] == 'O':\n dfs(board, i, j)\n for i in range(rows):\n for j in range(cols):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == '#':\n board[i][j] = 'O'", "def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2", "def move(self, row: int, col: int, player: int) -> int:\n if player == 1:\n self.newList[row][col] = 1\n self.colSum[col] += 1\n self.rowSum[row] += 1\n if row == col:\n self.diag += 1\n if row + col == (self.n - 1):\n self.revDiag += 1\n if self.rowSum[row] == self.n or self.colSum[col] == self.n or self.diag == self.n or self.revDiag == self.n:\n return 1\n if player == 2:\n self.newList[row][col] = -1\n self.colSum[col] -= 1\n self.rowSum[row] -= 1\n if row == col:\n self.diag -= 1\n if row + col == (self.n - 1):\n self.revDiag -= 1\n if self.rowSum[row] == -self.n or self.colSum[col] == -self.n or self.diag == -self.n or self.revDiag == -self.n:\n return 2\n \n return 0", "def solve(self, board):\r\n if not board or not board[0]:\r\n return\r\n \r\n self.m = len(board)\r\n self.n = len(board[0])\r\n boarder = []\r\n \r\n # Collecting all the 'O' on the boarder\r\n for i in range(self.m):\r\n if board[i][0] == 'O':\r\n boarder.append([i, 0])\r\n if board[i][self.n-1] == 'O':\r\n boarder.append([i, self.n-1])\r\n for j in range(self.n):\r\n if board[0][j] == 'O':\r\n boarder.append([0, j])\r\n if board[self.m-1][j] == 'O':\r\n boarder.append([self.m-1, j])\r\n \r\n for row, col in boarder:\r\n self.BFS(board, row, col)\r\n \r\n for row in range(self.m):\r\n for col in range(self.n):\r\n if board[row][col] == 'O':\r\n board[row][col] = 'X'\r\n elif board[row][col] == 'E':\r\n board[row][col] = 'O'\r\n print(board)", "def move(self, row: int, col: int, player: int) -> int:\n self.board[row][col] = 1 if player == 1 else -1\n rowsum = sum(self.board[row])\n colsum = sum([self.board[r][col] for r in range(self.n)])\n diagsum1 = sum([self.board[i][i] for i in range(self.n)])\n diagsum2 = sum([self.board[i][-i-1] for i in range(self.n)])\n if player == 1:\n if rowsum == self.n or colsum == self.n or diagsum1 == self.n or diagsum2 == self.n:\n return 1\n else:\n if rowsum == -self.n or colsum == -self.n or diagsum1 == -self.n or diagsum2 == -self.n:\n return 2\n return 0", "def make_move(board: Connect4Board) -> \"(row, col)\":\r\n\r\n while True:\r\n\r\n try:\r\n\r\n print('\\nPlease Specify your move. Enter the number column of a cell on the board.')\r\n print('-'*85)\r\n \r\n col = Connect4GameUI.move_col(board)\r\n row = Connect4GameUI._get_valid_row(board, col)\r\n print(row,col)\r\n return row, col\r\n\r\n break\r\n\r\n except:\r\n print('\\nInvalid move!!!')\r\n print('Please try it again.')", "def move(self, row: int, col: int, player: int) -> int:\n n = self.n\n if player == 1:\n self.rows_1[row] += 1\n self.cols_1[col] += 1\n if player == 2:\n self.rows_2[row] += 1\n self.cols_2[col] += 1\n if row == col:\n self.diag1[row] = player\n if row + col + 1 == n:\n self.diag2[row] = player\n f = 0\n g = 0\n for i in range(n):\n if self.rows_1[row] == n or self.cols_1[col] == n:\n return 1\n if self.rows_2[row] == n or self.cols_2[col] == n:\n return 2 \n if self.diag1[i] != self.diag1[0]:\n f = 1\n if self.diag2[i] != self.diag2[0]:\n g = 1\n if f == 0:\n return self.diag1[0]\n if g == 0:\n return self.diag2[0]\n return 0", "def solve(self, board: List[List[str]]) -> None:\n def dfs(board, i, j):\n m = len(board)\n n = len(board[0])\n if i < 0 or i >= m or j < 0 or j >= n: return\n\n if board[i][j] != 'O': return\n\n board[i][j] = '#'\n [dfs(board, i+di, j+dj) for di, dj in [(0, 1), (1, 0), (0, -1), (-1, 0)]]\n\n if len(board) == 0: return\n m = len(board)\n n = len(board[0])\n\n for i in range(m):\n dfs(board, i, 0)\n dfs(board, i, n-1)\n\n for j in range(n):\n dfs(board, 0, j)\n dfs(board, m-1, j)\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == '#':\n board[i][j] = 'O'", "def solve(self, board: List[List[str]]) -> None:\n if len(board) <= 2 or len(board[0]) <= 2:\n return\n\n self.nRow, self.nCol = len(board), len(board[0])\n\n def helper(kr, kc):\n board[kr][kc] = '.'\n kr > 0 and board[kr - 1][kc] == 'O' and helper(kr - 1, kc)\n kr < self.nRow - 1 and board[kr + 1][kc] == 'O' and helper(kr + 1, kc)\n kc > 0 and board[kr][kc - 1] == 'O' and helper(kr, kc - 1)\n kc < self.nCol - 1 and board[kr][kc + 1] == 'O' and helper(kr, kc + 1)\n\n for kr in [0, self.nRow - 1]:\n for kc in range(self.nCol):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n for kc in [0, self.nCol - 1]:\n for kr in range(self.nRow):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n\n for kr in range(self.nRow):\n for kc in range(self.nCol):\n if board[kr][kc] == 'O':\n board[kr][kc] = 'X'\n elif board[kr][kc] == '.':\n board[kr][kc] = 'O'\n\n return", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0:\n return\n self.h = len(board)\n self.w = len(board[0])\n self.board = board\n for i in range(self.h):\n for j in range(self.w):\n if i == 0 or i == self.h-1 or j == 0 or j == self.w-1:\n #print (i,j)\n self.dfs((i,j))\n for i in range(self.h):\n for j in range(self.w):\n if self.board[i][j]==\"O\":\n self.board[i][j]=\"X\"\n for i in range(self.h):\n for j in range(self.w):\n if self.board[i][j]==\"#\":\n self.board[i][j]=\"O\"", "def move(self, row, col, player):\n offset = player * 2 - 3 # 1 or -1\n self.row[row] += offset\n self.col[col] += offset\n if row == col:\n self.diag += offset\n if row + col == self.n - 1:\n self.anti_diag += offset\n if self.n in [self.row[row], self.col[col], self.diag, self.anti_diag]:\n return 2\n if -self.n in [self.row[row], self.col[col], self.diag, self.anti_diag]:\n return 1\n return 0", "def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n x=len(board)\n y=len(board[0])\n visit=[[False if board[i][j]=='X' else True for j in range(y)] for i in range(x)]\n for i in range(x):\n for j in range(y):\n if visit[i][j] and board[i][j]=='O':\n queue=[[i,j]]\n visit[i][j]=False\n k=0\n surround=True\n while len(queue)>k:\n if queue[k][0]==0 or queue[k][0]==x-1 or queue[k][1]==y-1 or queue[k][1]==0:\n surround=False\n if queue[k][1]!=y-1 and visit[queue[k][0]][queue[k][1]+1]:\n queue.append([queue[k][0],queue[k][1]+1])\n visit[queue[k][0]][queue[k][1]+1]=False\n if queue[k][1]!=0 and visit[queue[k][0]][queue[k][1]-1]:\n queue.append([queue[k][0],queue[k][1]-1])\n visit[queue[k][0]][queue[k][1]-1]=False\n if queue[k][0]!=x-1 and visit[queue[k][0]+1][queue[k][1]]:\n queue.append([queue[k][0]+1,queue[k][1]])\n visit[queue[k][0]+1][queue[k][1]]=False\n if queue[k][0]!=0 and visit[queue[k][0]-1][queue[k][1]]:\n queue.append([queue[k][0]-1,queue[k][1]])\n visit[queue[k][0]-1][queue[k][1]]=False\n k+=1\n if surround:\n for i1,j1 in queue:\n board[i1][j1]='X'\n return", "def solve_util(self, board, col):\n try:\n if col == self.N:\n self.print_sol(board)\n return True\n\n # Trying to place this queen in all rows one by one\n res = False\n for i in range(self.N):\n if self.is_safe(board, i, col):\n board[i][col] = 1\n res = self.solve_util(board, col + 1) or res\n if type(res) == dict:\n return res\n board[i][col] = 0 # Backtracking...\n\n # if queen cannot be placed in any row in this col, then alas\n # we return false..\n return res\n except KeyboardInterrupt:\n print('Keyboard Interrupted!')\n return self.Outputs", "def position_tile(self, target_row, target_col, cur_row, cur_col, need_ld=True):\n move_str = ''\n if cur_row == target_row:\n if cur_col < target_col:\n move_str += 'l' * (target_col - cur_col)\n if target_col - cur_col > 1:\n move_str += 'ur'\n move_str += 'druldru' * (target_col - cur_col - 1)\n else:\n move_str += 'ur' if not need_ld else ''\n need_ld = False\n else:\n move_str += 'r' * (cur_col - target_col)\n if cur_col - target_col > 1:\n move_str += 'ul'\n move_str += 'dlurdlu' * (cur_col - target_col - 1)\n else:\n need_ld = False\n else:\n move_str += 'u' * (target_row - cur_row)\n if cur_col < target_col:\n move_str += ('l' * (target_col - cur_col) + 'dru')\n move_str += 'druldru' * (target_col - cur_col - 1)\n move_str += 'lddru' * (target_row - cur_row - 1)\n elif cur_col > target_col:\n move_str += ('r' * (cur_col - target_col) + 'dlu')\n move_str += 'dlurdlu' * (cur_col - target_col - 1)\n move_str += 'lddru' * (target_row - cur_row - 1)\n else:\n move_str += 'lddru' * (target_row - cur_row - 1)\n if need_ld:\n move_str += 'ld'\n return move_str", "def update(self, move):\n\n if not 0 <= move < 7:\n raise InvalidMove\n\n placed = False\n x = None\n y = None\n\n for row in reversed(xrange(self._rows)):\n if not self._board[row][move]:\n self._board[row][move] = self.current_player\n placed = True\n x = move\n y = row\n break\n\n if not placed:\n raise InvalidMove\n\n return (x, y)", "def make_move(self, row, column):\n\t\tif self.board[int(row)][int(column)] == '-':\n\t\t\tself.board[int(row)][int(column)] = self.marker\n\t\telse:\n\t\t\tprint(\"That spot is occupied, you messed up, you lose your turn for doing bad things\")", "def solve(self, board: 'List[List[str]]') -> 'None':\n if not board:\n return\n m, n = len(board), len(board[0])\n save = [ij for k in range(m + n) for ij in ((0, k), (m - 1, k), (k, 0), (k, n - 1))]\n while save:\n i, j = save.pop()\n if -1 < i < m and -1 < j < n and board[i][j] == 'O':\n board[i][j] = 'S'\n save += (i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)\n for row in board:\n for i, c in enumerate(row):\n row[i] = 'XO'[c == 'S']", "def solve(self, board: List[List[str]]) -> None:\n if len(board) <= 2 or len(board[0]) <= 2:\n return\n\n nRow, nCol = len(board), len(board[0])\n\n def helper(kr, kc):\n board[kr][kc] = '.'\n kr > 0 and board[kr - 1][kc] == 'O' and helper(kr - 1, kc)\n kr < nRow - 1 and board[kr + 1][kc] == 'O' and helper(kr + 1, kc)\n kc > 0 and board[kr][kc - 1] == 'O' and helper(kr, kc - 1)\n kc < nCol - 1 and board[kr][kc + 1] == 'O' and helper(kr, kc + 1)\n\n for kr in [0, nRow - 1]:\n for kc in range(nCol):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n for kc in [0, nCol - 1]:\n for kr in range(nRow):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n\n for kr in range(nRow):\n for kc in range(nCol):\n if board[kr][kc] == 'O':\n board[kr][kc] = 'X'\n elif board[kr][kc] == '.':\n board[kr][kc] = 'O'\n\n return", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0: return\n\n m = len(board)\n n = len(board[0])\n\n uf = UnionFind(m * n + 1)\n dummy = m * n\n\n # connect 'O' at first and last col with dummy\n for i in range(m):\n if board[i][0] == 'O':\n uf.union(dummy, i * n)\n if board[i][-1] == 'O':\n uf.union(dummy, i * n + n - 1)\n\n # connect 'O' at first and last row with dummy\n for j in range(n):\n if board[0][j] == 'O':\n uf.union(dummy, j)\n if board[-1][j] == 'O':\n uf.union(dummy, n * (m-1) + j)\n\n d = [(1, 0), (0, 1), (0, -1), (-1, 0)]\n\n for i in range(1, m-1):\n for j in range(1, n-1):\n if board[i][j] == 'O':\n for di, dj in d:\n x = i+di\n y = j+dj\n if board[x][y] == 'O':\n uf.union(x*n+y, i*n+j)\n\n # change not connected 'O' with 'X'\n for i in range(1, m-1):\n for j in range(1, n-1):\n if not uf.connected(dummy, i * n + j):\n board[i][j] = 'X'", "def result(self, row, col, move):\n start = (row, col)\n end = self.updateCell(row, col, move)\n\n return self.change(start, end)", "def move_x(self, row, column):\n\n #returns false if game has already been won\n if self._game_state != \"UNFINISHED\":\n return False\n\n # checks if x tries to move out of bounds\n if row not in range(8) or column not in range(8):\n return False\n\n # returns false/invalid move if x tries to move more than one row at a time or\n # non diagonal\n if (row - self._current_x_row) > 1 or (column - self._current_x_column) > 1 or (\n self._current_x_row - row) > 1 or (self._current_x_column - column) > 1:\n return False\n\n if self._current_x_column == column:\n return False\n\n if self._current_x_row == row:\n return False\n\n if \"o\" in self._board[row][column]:\n return False\n\n #places x in the specified row and column if the move is legal\n else:\n self._board[self._current_x_row].remove(\"x\")\n self._board[self._current_x_row].append(\"\")\n self._board[row][column] = \"x\"\n self._current_x_row = row\n self._current_x_column = column\n self._current_row += 1\n self._lower_right = (self._current_x_row + 1, self._current_x_column + 1)\n self._lower_left = (self._current_x_row + 1, self._current_x_column - 1)\n self._upper_right = (self._current_x_row - 1, self._current_x_column + 1)\n self._upper_left = (self._current_x_row - 1, self._current_x_column - 1)\n self._row1 = (\n self._board[0][0],\n self._board[1][0],\n self._board[2][0],\n self._board[3][0],\n self._board[4][0],\n self._board[5][0],\n self._board[6][0],\n self._board[7][0])\n\n self._row7 = (\n self._board[0][7],\n self._board[1][7],\n self._board[2][7],\n self._board[3][7],\n self._board[4][7],\n self._board[5][7],\n self._board[6][7],\n self._board[7][7])\n\n\n # checks if four \"o\" pieces surrounds x, if so, then x has no more moves and o wins\n if \"x\" not in self._board[7]:\n if \"o\" in self._board[self._lower_right[0]][self._lower_right[1]] and \"o\" in \\\n self._board[self._lower_left[0]][\n self._lower_left[1]] and \"o\" in self._board[self._upper_right[0]][\n self._upper_right[1]] and \"o\" in \\\n self._board[self._upper_left[0]][self._upper_left[1]]:\n self._game_state = \"O_WON\"\n\n # checks if x is in the last column and o pieces surrounds x, x loses\n if \"x\" in self._row7 and \"o\" in self._board[self._lower_left[0]][self._lower_left[1]] and \"o\" in \\\n self._board[self._upper_left[0]][self._upper_left[1]]:\n self._game_state = \"O_WON\"\n\n # checks if x is in the first row and o surrounds x, x loses\n if \"x\" in self._board[0] and \"o\" in self._board[self._lower_right[0]][self._lower_right[1]] and \"o\" in \\\n self._board[self._lower_left[0]][self._lower_left[1]]:\n self._game_state = \"O_WON\"\n\n # checks if x is in the first column and o pieces surrounds x, x loses\n if \"x\" in self._row1 and \"o\" in self._board[self._lower_right[0]][self._lower_right[1]] and \"o\" in \\\n self._board[self._upper_right[0]][self._upper_right[1]]:\n self._game_state = \"O_WON\"\n\n # winning condition for \"x\" piece upon reaching last row\n if \"x\" in self._board[7]:\n self._game_state = \"X_WON\"\n\n return True", "def move(self, row: int, col: int, player: int) -> int:\n s = -1 if player == 1 else 1\n\n self.rows[row] += s\n if abs(self.rows[row]) == self.n:\n return player\n\n self.cols[col] += s\n if abs(self.cols[col]) == self.n:\n return player\n\n if row == col:\n self.diagonals[0] += s\n if abs(self.diagonals[0]) == self.n:\n return player\n\n if (row + col) == self.n - 1:\n self.diagonals[1] += s\n if abs(self.diagonals[1]) == self.n:\n return player\n\n return 0", "def solve(self, board: List[List[str]]) -> None:\n\n def expansion(i,j):\n for di,dj in {(-1,0),(1,0),(0,-1),(0,1)}:\n if -1<i+di<m and -1<j+dj<n and state[i+di][j+dj]=='O':\n return True\n return False\n\n if not board:\n return board\n\n m = len(board)\n n = len(board[0]) \n state = [['X']*n for _ in range(m)]\n\n for j in range(n):\n state[0][j] = board[0][j]\n state[m-1][j] = board[m-1][j]\n \n for i in range(m):\n state[i][0] = board[i][0]\n state[i][n-1] = board[i][n-1]\n \n flag = 1\n\n while flag:\n flag = 0\n\n for k in range(1, (1+min(m,n))//2):\n for j in range(k,n-k):\n if board[k][j]=='O' and state[k][j] == 'X' and expansion(k,j):\n state[k][j] = 'O'\n flag = 1\n \n if board[m-1-k][j]=='O' and state[m-1-k][j] == 'X' and expansion(m-1-k,j):\n state[m-1-k][j] = 'O'\n flag = 1\n \n for i in range(k,m-k):\n if board[i][k]=='O' and state[i][k] == 'X' and expansion(i,k):\n state[i][k] = 'O'\n flag = 1\n \n if board[i][n-1-k]=='O' and state[i][n-1-k] == 'X' and expansion(i,n-1-k):\n state[i][n-1-k] = 'O'\n flag = 1\n\n board[:] = state[:]", "def move(self, row, column, symbol):\n game_state = self.determine_game_state()\n if game_state not in (GameState.GAME_NOT_STARTED, GameState.GAME_IN_PROGRESS):\n return MoveResults.MOVE_INVALID\n\n # check for initial move\n if self.board == BLANK_BOARD and symbol == O_SYMBOL:\n return MoveResults.MOVE_INVALID\n\n # check for invalid row and column\n if row < 0 or row > 2 or column < 0 or column > 2:\n return MoveResults.MOVE_INVALID\n\n # make sure the game piece is valid\n if symbol != X_SYMBOL and symbol != O_SYMBOL:\n return MoveResults.MOVE_INVALID\n\n # make sure the game piece isn't moving out of turn\n x_moves = self.board.count(X_SYMBOL)\n o_moves = self.board.count(O_SYMBOL)\n if symbol == X_SYMBOL and x_moves > o_moves:\n return MoveResults.MOVE_INVALID\n elif symbol == O_SYMBOL and o_moves >= x_moves:\n # note that x always goes first.\n return MoveResults.MOVE_INVALID \n\n # figure out position.\n position = (3 * row) + column\n\n # make sure there's not already a piece there.\n if self.board[position] != EMPTY_SYMBOL:\n return MoveResults.MOVE_INVALID\n\n self.board = self.board[:position] + symbol + self.board[position+1:] \n return MoveResults.MOVE_VALID", "def solve(puzzle):\n print(\"Solving...\")\n array_puzzle = np.asarray(puzzle)\n array_puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n goal_state = __generate_goal(len(array_puzzle[0]), len(array_puzzle))\n\n flat_puzzle = list(chain.from_iterable(puzzle)) # Flatten the list\n\n # If the puzzle doesn't contain 0, exit.\n try:\n flat_puzzle.remove(0) # Remove 0 from the list\n except:\n print(\"All puzzles must include an open tile (0).\")\n return None\n\n inversions = __count_inversions(flat_puzzle) # Count the inversions\n\n # width = len(array_puzzle[0]) # Get the width of the puzzle (columns)\n # length = len(array_puzzle) # Get the length of the puzzle (rows)\n\n oddEven = __odd_or_even(len(array_puzzle[0])) # Determine if the width is odd or even.\n start_position = __find_start(array_puzzle) # Find the start position's row\n solvable = __is_solvable(oddEven, inversions, len(array_puzzle), start_position) # Cleck if the puzzle is solvable.\n\n # If the puzzle is not solvable, return None.\n if(solvable == \"None\"):\n return None\n\n # If we cannot calculate a* (for example the given values are not all in sequential order (1-5) 4 is replaced by 6 (1,2,3,5,6))\n try:\n return __a_star(array_puzzle, goal_state)\n except:\n print(\"Please make sure there are no duplicate or skipped inputs.\")\n return None\n\n # This code was used in testing to print out the string.\n # solved = __a_star(array_puzzle, goal_state)\n # Return the moves needed to complete the puzzle.\n # return print(str(__build_string(solved)) + \" (\" + str(len(solved)) + \")\")", "def solve(self, board: List[List[str]]) -> None:\n def DFS(board, i, j):\n q = []\n q.append([i, j])\n \n while q:\n x, y = q.pop()\n board[x][y] = \"*\"\n neighbors = ((0, 1), (0, -1), (1, 0), (-1, 0))\n for dx, dy in neighbors:\n if 0 <= x + dx <= len(board) - 1 and 0 <= y + dy <= len(board[0]) - 1 and board[x + dx][y + dy] == \"O\":\n q.append([x + dx, y + dy])\n \n \n # first row\n i = 0\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # last row\n i = len(board) - 1\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # first column\n j = 0\n for i in range(len(board)):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # last column\n j = len(board[0]) - 1\n for i in range(len(board)):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n \n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n board[i][j] = \"X\"\n elif board[i][j] == \"*\":\n board[i][j] = \"O\"", "def get_move(board, player):\r\n row, col = 0, 0\r\n return row, col", "def put(self, column):\n column -= 1\n if self.occupied[column] >= 4 or self.won:\n return 'ERROR'\n self.history.append(column + 1)\n row = self.occupied[column]\n # assign 1 to player 1, and -1 to player 2\n if len(self.history) % 2 == 1:\n player = 1\n self.board[3 - row][column] = 1\n else:\n player = -1\n self.board[3 - row][column] = 2\n # add player score to column, row and diagonal scores\n self.columnScore[column] += player\n self.rowScore[row] += player\n self.occupied[column] += 1;\n if column == row:\n self.diagonal += player\n if column + row == 3:\n self.antidiagonal += player\n # check column, row and diagonal scores\n # if absolute value of one of them is 4\n # which means the original value is either 4 or -4\n # and one of the player has occupied all 4 of them\n # which means the player has won in that row/column/diagonal\n # and thus return \"WIN\"\n if (abs(self.rowScore[row]) == 4 or abs(self.columnScore[column]) == 4\n or abs(self.diagonal) == 4 or abs(self.antidiagonal) == 4):\n self.won = True\n return 'WIN'\n # check if there is still non-full columns\n # in other words check if the board is full\n for i in range(0, self.size):\n # if board is not full return \"OK\"\n if self.occupied[i] < 4:\n return 'OK'\n # if the board is full, return \"DRAW\"\n return 'DRAW'", "def move(self, row, col, player):\n if self.winning == True:\n return\n self.matrix[row][col] = player\n n = len(self.matrix)\n indicator = True\n for i in range(n):\n if self.matrix[row][i] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n \n indicator = True\n for i in range(n):\n if self.matrix[i][col] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n \n if row == col:\n indicator = True\n for i in range(n):\n if self.matrix[i][i] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n if row + col == n - 1:\n indicator = True\n for i in range(n):\n if self.matrix[i][n - 1 - i] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n return 0", "def solve(self, board: 'List[List[str]]') -> 'None':\n\n def dfs(i, j, tmp):\n nonlocal flag\n if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]):\n flag = False\n return\n if board[i][j] != 'O' or [i, j] in tmp:\n return\n tmp.append([i, j])\n dfs(i - 1, j, tmp)\n dfs(i + 1, j, tmp)\n dfs(i, j + 1, tmp)\n dfs(i, j - 1, tmp)\n return tmp\n\n change = []\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == 'O' and [i, j] not in change:\n tmp = []\n flag = True\n tmp = dfs(i, j, tmp[:])\n if flag:\n for loc in tmp:\n i, j = loc[0], loc[1]\n board[i][j] = 'X'\n\n for loc in change:\n i, j = loc[0], loc[1]\n board[i][j] = 'X'", "def solve_(self, x, y, board, path):\n if self.SOLVED:\n return\n if self.is_done(board):\n self.print_path(path)\n self.SOLVED = True\n return\n for new_x, new_y in self.next_click(x, y, board):\n if new_x is None or new_y is None:\n return\n new_board = self.click(new_x, new_y, board)\n self.solve_(\n x=0, y=0,\n board=new_board,\n path=path + [((new_x, new_y), new_board)]\n )", "def move(self, row, col, player):\n toadd = 1 if player == 1 else -1\n \n self.row[row] += toadd\n self.col[col] += toadd\n if row == col: self.diagonal += toadd\n if col == self.n - row -1 : self.antidiag += toadd\n \n if abs(self.row[row]) == self.n or abs(self.col[col]) == self.n or abs(self.diagonal) == self.n or abs(self.antidiag) == self.n:\n return player\n else:\n return 0", "def move(self, row, col, player):", "def get_move(board, player):\n row, col = 0, 0\n return row, col", "def solve_puzzle(board):\n # Propagate value effects\n board = simplify_puzzle(board, [])\n\n # Brute force remaining cells\n board = brute(board)\n\n # Verify that the puzzle was successfully solved\n assert get_length(board)==81\n assert valid_attempt(board)\n\n return board", "def test_perform_move(self):\n p = hw.create_tile_puzzle(3, 3)\n self.assertFalse(p.perform_move(\"taco\"))\n self.assertTrue(p.perform_move('up'))\n self.assertEqual(p.get_board(), [[1,2,3],[4,5,0],[7,8,6]])\n self.assertFalse(p.perform_move('right'))\n p = hw.create_tile_puzzle(2, 4)\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('up'))\n self.assertFalse(p.perform_move('up'))\n self.assertEqual(p.get_board(), [[1,2,0,4],[5,6,3,7]])\n p = hw.create_tile_puzzle(1, 4)\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('left'))\n self.assertFalse(p.perform_move('down'))\n self.assertFalse(p.perform_move('left'))\n self.assertEqual(p.get_board(), [[0,1,2,3]])" ]
[ "0.7611342", "0.7530595", "0.7489633", "0.74769557", "0.7346029", "0.7312759", "0.72872084", "0.72763294", "0.72613305", "0.72458375", "0.72417307", "0.7236731", "0.72223794", "0.7210395", "0.71957016", "0.71925163", "0.7142041", "0.71206504", "0.70840454", "0.7064091", "0.7038805", "0.7027271", "0.69623935", "0.695526", "0.69471264", "0.694509", "0.69337404", "0.69234896", "0.6916079", "0.6878287", "0.68757343", "0.68463266", "0.6837197", "0.67863864", "0.6742842", "0.6735732", "0.67058396", "0.66578364", "0.66284144", "0.6625063", "0.6580358", "0.65620995", "0.65620995", "0.65620995", "0.65583694", "0.6551443", "0.6551443", "0.645354", "0.64497983", "0.6408909", "0.6379721", "0.6374615", "0.63398385", "0.633639", "0.6325701", "0.62638223", "0.6257799", "0.62237", "0.62125415", "0.61983734", "0.6138262", "0.60704345", "0.6063678", "0.6057803", "0.6055227", "0.6052307", "0.6052291", "0.60510117", "0.60505867", "0.60382706", "0.6028564", "0.60173017", "0.6004885", "0.6001108", "0.59939766", "0.5991939", "0.59812343", "0.5976079", "0.59581846", "0.5937005", "0.5924132", "0.59235555", "0.59208554", "0.591132", "0.59080476", "0.5894667", "0.5890435", "0.5885432", "0.5883782", "0.5882655", "0.5874512", "0.58741593", "0.5873913", "0.5868474", "0.58598614", "0.5855099", "0.584924", "0.58490455", "0.58463395", "0.58241105" ]
0.74047565
4
Solve the upper left 2x2 part of the puzzle Updates the puzzle and returns a move string
def solve_2x2(self): cur_row, cur_col = self.current_position(0, 0) move_str = 'u' * cur_row + 'l' * cur_col self.update_puzzle(move_str) if self.check_2x2_solved(): return move_str else: while not self.check_2x2_solved(): move_str += 'rdlu' self.update_puzzle('rdlu') return move_str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_puzzle(self):\n\n move_str = \"\"\n \n # Move zero tile to bottom right corner tile of puzzle.\n zero_pos = self.current_position(0,0) \n vert_dist = (self.get_height() - 1) - zero_pos[0]\n horiz_dist = (self.get_width() - 1) - zero_pos[1]\n move_str += ((\"d\" * vert_dist) + (\"r\" * horiz_dist))\n self.update_puzzle(move_str)\n \n # Solve lower rows\n if self.get_height() > 2:\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n \n # Solve top 2 rows\n if self.get_width() > 2:\n for col in range(self.get_width() - 1, 1, -1):\n move_str += self.solve_row1_tile(col)\n move_str += self.solve_row0_tile(col)\n \n # Solve 2x2\n move_str += self.solve_2x2()\n\n return move_str", "def solve_puzzle(self):\n cur0_row, cur0_col = self.current_position(0, 0)\n move_str = 'd' * (self._height - cur0_row - 1) + 'r' * (self._width - cur0_col - 1)\n self.update_puzzle(move_str)\n for row in range(self._height-1, 1, -1):\n for col in range(self._width-1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n for col in range(self._width-1, 1, -1):\n assert self.row1_invariant(col)\n move_str += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n move_str += self.solve_row0_tile(col)\n move_str += self.solve_2x2()\n return move_str", "def solve_puzzle(self):\r\n # initialize some values and start tile at bottom right corner\r\n col = self.get_width() - 1\r\n row = self.get_height() - 1\r\n move = \"\"\r\n curr_row, curr_col = self.current_position(0, 0)\r\n move_to_target = abs(curr_col - col) * \"r\" + abs(curr_row - row) * \"d\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n\r\n # apply solver methods\r\n for dummy_i in range(row, 1, -1):\r\n for dummy_j in range(col, 0, -1):\r\n move += self.solve_interior_tile(dummy_i, dummy_j)\r\n move += self.solve_col0_tile(dummy_i)\r\n \r\n for dummy_j in range(col, 1, -1):\r\n move += self.solve_row1_tile(dummy_j)\r\n move += self.solve_row0_tile(dummy_j)\r\n \r\n move += self.solve_2x2()\r\n return move", "def solve_puzzle(self):\r\n moves_str = \"\"\r\n # move zero to the most botton right\r\n zero_row, zero_col = self.current_position(0, 0)\r\n downs = self._height - 1 - zero_row\r\n rights = self._width - 1 - zero_col\r\n for dummy_d in range(downs):\r\n moves_str += \"d\"\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n self.update_puzzle(moves_str)\r\n # Solve the bottom m−2 rows of the puzzle \r\n # in a row by row manner from bottom to top. \r\n # Each individual row will be solved in a right to left order.\r\n if self._height > 2 and self._width > 2:\r\n for row in range(self._height - 1, 1, -1):\r\n for col in range(self._width - 1, 0, -1):\r\n assert self.lower_row_invariant(row, col)\r\n moves_str += self.solve_interior_tile(row, col)\r\n assert self.lower_row_invariant(row, col - 1)\r\n assert self.lower_row_invariant(row, 0)\r\n moves_str += self.solve_col0_tile(row)\r\n assert self.lower_row_invariant(row - 1, self._width - 1)\r\n # Solve the rightmost n−2 columns of the top two rows\r\n # in a right to left order). \r\n # Each column consists of two unsolved positions \r\n # and will be solved in a bottom to top order.\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n \r\n elif self._height <=2 and self._width > 2:\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n elif self._height <= 2 and self._width <= 2:\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n #elif self._height > 2 and self._width <= 2:\r\n \r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_puzzle(self):\n # replace with your code\n string = ''\n width = self._width\n height = self._height\n zero = self.current_position(0, 0)\n row_to_zero = height - 1 - zero[0]\n col_to_zero = width - 1 - zero[1]\n string += 'r' * col_to_zero\n string += 'd' * row_to_zero\n self.update_puzzle(string)\n if width == 2 and height == 2:\n string += self.solve_2x2()\n elif width > 2 and height == 2:\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n elif width == 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n string += self.solve_2x2()\n elif width > 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n #for row in range(height - 1, -1, -1):\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n return string", "def solve_2x2(self):\n # move zero tile to the left-up corner\n self.update_puzzle(\"lu\")\n movements = \"rdlu\"\n for _ in range(3):\n self.update_puzzle(\"rdlu\")\n if self.row0_invariant(0):\n return \"lu\" + movements\n movements += \"rdlu\"\n # the final 2x2 cannot be solved\n return \"\"", "def solve_2x2(self):\r\n moves_str = \"\"\r\n # move zero to the most upper left\r\n zero_row, zero_col = self.current_position(0, 0)\r\n ups = zero_row - 0\r\n lefts = zero_col - 0\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n \r\n # get zero, one, two, three positions\r\n self.update_puzzle(moves_str)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n counter = 0\r\n while counter <= 3 and \\\r\n (zero_row != 0 or zero_col != 0 or \\\r\n one_row!= 0 or one_col != 1 or \\\r\n two_row != 1 or two_col != 0 or \\\r\n three_row != 1 or three_col != 1):\r\n move = CIRCLES[\"RIGHT_CIRCLES\"]\r\n moves_str += move\r\n self.update_puzzle(move)\r\n counter += 1\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n \r\n print \"solve_2x2\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_puzzle(self):\n # move zero tile to the lower right corner\n row, col = self.current_position(0, 0)\n movements = \"d\" * (self.get_height() - 1 - row) + \"r\" * (\n self.get_width() - 1 - col)\n self.update_puzzle(movements)\n # solve rowid from 2 by row\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col == 0:\n movements += self.solve_col0_tile(row)\n assert self.lower_row_invariant(row - 1,\n self.get_width() - 1)\n else:\n movements += self.solve_interior_tile(row, col)\n assert self.lower_row_invariant(row, col - 1)\n # solve the uppermost two rows by column\n for col in range(self.get_width() - 1, 1, -1):\n for row in range(1, -1, -1):\n if row == 0:\n assert self.row0_invariant(col)\n movements += self.solve_row0_tile(col)\n assert self.row1_invariant(col - 1)\n else:\n assert self.row1_invariant(col)\n movements += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n movements += self.solve_2x2()\n return movements", "def solve_2x2(self):\r\n # replace with your code\r\n assert self.row1_invariant(1), '2x2 Dont pass row1_invariant(1)'\r\n whole_move = ''\r\n current_position = self.current_position(0, 0)\r\n # print 'Zero position =', current_position\r\n counter = 0\r\n \r\n\r\n \r\n # if current_position == (0,0):\r\n # print (0,0)\r\n # move_to_00 = 'rdlu' \r\n if current_position == (0,1):\r\n # print (0,1)\r\n move_to_00 = 'l'\r\n if current_position == (1,0):\r\n # print (1,0)\r\n move_to_00 = 'u'\r\n if current_position == (1,1):\r\n # print (1,1)\r\n move_to_00 = 'ul'\r\n whole_move += move_to_00\r\n self.update_puzzle(move_to_00)\r\n # print self\r\n # print self.get_number(1,1) < self.get_number(1,0)\r\n \r\n while self.get_number(0,0) != 0 or self.get_number(0,1) != 1:\r\n \r\n # print 'Aloha in loop!'\r\n counter +=1\r\n move = 'rdlu'\r\n whole_move += move\r\n self.update_puzzle(move)\r\n # print self\r\n if counter >5:\r\n break\r\n return whole_move", "def solve_2x2(self):\n \n assert self.get_number(1,1) == 0, \"zero tile should be at row 1, col 1\"\n assert self.row1_invariant(1), \"tiles to right and below incorrectly ordered\"\n \n # Moves the zero tile to (0,0).\n self.update_puzzle(\"lu\")\n\n # Repositions the upper left 2x2 part up to 3 times, \n # each time checking whether the puzzle is solved.\n rotation_num = 0\n if self.row0_invariant(0) == False:\n for dummy_rotation in range(3):\n while self.row0_invariant(0) == False:\n rotation_num += 1\n self.update_puzzle(\"rdlu\")\n\n assert self.row0_invariant(0), \"tiles to right and below incorrectly ordered\"\n \n move_str = \"lu\" + (\"rdlu\" * rotation_num) \n return move_str", "def solve_2x2(self):\r\n assert self.row1_invariant(1)\r\n pos_1_0 = self.get_number(1, 0)\r\n pos_0_0 = self.get_number(0, 0)\r\n pos_0_1 = self.get_number(0, 1)\r\n # create grid and solve individual cases\r\n grid = [pos_1_0, pos_0_0, pos_0_1]\r\n if grid == [self.get_width(), 1, self.get_width() + 1]:\r\n move = \"ul\"\r\n elif grid == [1, self.get_width() + 1, self.get_width()]:\r\n move = \"lurdlu\"\r\n elif grid == [self.get_width() + 1, self.get_width(), 1]:\r\n move = \"lu\"\r\n self.update_puzzle(move)\r\n return move", "def solve_2x2(self):\n # replace with your code\n string = ''\n num1 = self.get_number(0, 0)\n num2 = self.get_number(0, 1)\n num3 = self.get_number(1, 0)\n max_num = max([num1, num2, num3])\n min_num = min([num1, num2, num3])\n if num1 == min_num and num2 == max_num:\n string += 'ul'\n elif num1 == max_num and num3 == min_num:\n string += 'ul'\n string += 'rdlu' * 2\n elif num2 == min_num and num3 == max_num:\n string += 'ul'\n string += 'rdlu'\n print '2x2 Path', string\n self.update_puzzle(string)\n return string", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction\n for row in self._grid:\n print row\n print", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][\n zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][\n zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def solve_row1_tile(self, target_col):\r\n moves_str = \"\"\r\n current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ur\"\r\n self.update_puzzle(moves_str)\r\n print \"solve_row1_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve(self, board: List[List[str]]) -> None:\n n = len(board)\n if n == 0:\n return\n m = len(board[0])\n for i in range(m):\n self.traverse(board, 0, i, n, m)\n self.traverse(board, n - 1, i, n, m)\n for i in range(n):\n self.traverse(board, i, 0, n, m)\n self.traverse(board, i, m - 1, n, m)\n for i in range(n):\n for j in range(m):\n board[i][j] = 'X' if board[i][j] == 'O' else board[i][j]\n for i in range(n):\n for j in range(m):\n board[i][j] = 'O' if board[i][j] == '_' else board[i][j]", "def solve_interior_tile(self, target_row, target_col):\r\n assert self._grid[target_row][target_col] == 0\r\n moves_str = \"\"\r\n target_current_row, target_current_col = self.current_position(target_row, target_col)\r\n \r\n moves_str += self.position_tile(target_row, target_col, target_current_row, target_current_col) \r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_interior_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve(puzzle):\n print(\"Solving...\")\n array_puzzle = np.asarray(puzzle)\n array_puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n goal_state = __generate_goal(len(array_puzzle[0]), len(array_puzzle))\n\n flat_puzzle = list(chain.from_iterable(puzzle)) # Flatten the list\n\n # If the puzzle doesn't contain 0, exit.\n try:\n flat_puzzle.remove(0) # Remove 0 from the list\n except:\n print(\"All puzzles must include an open tile (0).\")\n return None\n\n inversions = __count_inversions(flat_puzzle) # Count the inversions\n\n # width = len(array_puzzle[0]) # Get the width of the puzzle (columns)\n # length = len(array_puzzle) # Get the length of the puzzle (rows)\n\n oddEven = __odd_or_even(len(array_puzzle[0])) # Determine if the width is odd or even.\n start_position = __find_start(array_puzzle) # Find the start position's row\n solvable = __is_solvable(oddEven, inversions, len(array_puzzle), start_position) # Cleck if the puzzle is solvable.\n\n # If the puzzle is not solvable, return None.\n if(solvable == \"None\"):\n return None\n\n # If we cannot calculate a* (for example the given values are not all in sequential order (1-5) 4 is replaced by 6 (1,2,3,5,6))\n try:\n return __a_star(array_puzzle, goal_state)\n except:\n print(\"Please make sure there are no duplicate or skipped inputs.\")\n return None\n\n # This code was used in testing to print out the string.\n # solved = __a_star(array_puzzle, goal_state)\n # Return the moves needed to complete the puzzle.\n # return print(str(__build_string(solved)) + \" (\" + str(len(solved)) + \")\")", "def solve(self, board: List[List[str]]) -> None:\n rows = len(board)\n cols = len(board[0])\n\n def explore(i, j):\n if i < 0 or i >= rows:\n return\n if j < 0 or j >= cols:\n return\n if board[i][j] != \"O\":\n return\n board[i][j] = \"Y\"\n explore(i + 1, j)\n explore(i - 1, j)\n explore(i, j + 1)\n explore(i, j - 1)\n\n for i in [0, rows - 1]:\n for j in range(cols):\n explore(i, j)\n\n for j in [0, cols - 1]:\n for i in range(rows):\n explore(i, j)\n\n for i in range(rows):\n for j in range(cols):\n if board[i][j] == \"Y\":\n board[i][j] = \"O\"\n elif board[i][j] == \"O\":\n board[i][j] = \"X\"", "def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n m, n = len(board), len(board[0])\n\n def dfs(x, y):\n if not 0 <= x < m or not 0 <= y < n or board[x][y] != \"O\":\n return\n direction = [[1, 0], [0, 1], [-1, 0], [0, -1]]\n board[x][y] = \"Y\"\n for d in direction:\n dfs(x+d[0], y+d[1])\n\n for i in range(m):\n dfs(i, 0)\n dfs(i, n-1)\n for i in range(1, n-1):\n dfs(0, i)\n dfs(m-1, i)\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == \"O\":\n board[i][j] = \"X\"\n elif board[i][j] == \"Y\":\n board[i][j] = \"O\"", "def solve(self, board) -> None:\n if board == [[]] or board == []:\n return\n\n r, c = len(board), len(board[0])\n\n from collections import deque\n queue = deque()\n for i in range(r):\n for j in range(c):\n if (i == 0 or i == r - 1 or j == 0 or j == c - 1) and board[i][j] == 'O':\n queue.append([i, j])\n board[i][j] = 'M'\n\n while queue:\n i, j = queue.popleft()\n for x, y in ((i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)):\n if 0 <= x <= r - 1 and 0 <= y <= c - 1 and board[x][y] == 'O':\n board[x][y] = 'M'\n queue.append([x, y])\n\n for i in range(r):\n for j in range(c):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == 'M':\n board[i][j] = 'O'", "def move_tile(self, target_row, target_col, val):\n # a little bit twisted here for the use of both solve_interior_tile and solve_col0_tile\n solved_row, solved_col = self.current_position(0, val)\n movements = \"\"\n if solved_row == target_row and solved_col == target_col:\n return \"\"\n if solved_row == target_row:\n if target_col > solved_col:\n movements = \"l\" * (target_col - solved_col) + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"r\" * (solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ulld\"\n elif solved_col == target_col:\n movements = \"u\" * (target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n elif solved_col < target_col:\n if solved_col == 0:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"rddlu\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n elif solved_col > target_col:\n if solved_row == 0:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"dllur\" * (\n solved_col - target_col - 1) + \"dlu\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n else:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ullddru\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n return movements", "def solve_interior_tile(self, target_row, target_col):\n cur_row, cur_col = self.current_position(target_row, target_col)\n move_str = self.position_tile(target_row, target_col, cur_row, cur_col)\n self.update_puzzle(move_str)\n return move_str", "def solve(self, board: List[List[str]]) -> None:\n if len(board) is 0: return\n width = len(board[0]) - 1\n height = len(board) - 1\n def help(x: int, y: int):\n if x < 0 or x > height or y < 0 or y > width:\n return\n if board[x][y] is \"O\":\n board[x][y] = \"g\"\n help(x - 1, y)\n help(x, y - 1)\n help(x + 1, y)\n help(x, y + 1)\n\n for i in range(width + 1):\n if board[0][i] is \"O\":\n help(0, i)\n if board[height][i] is \"O\":\n help(height, i)\n for i in range(1, height):\n if board[i][0] is \"O\":\n help(i, 0)\n if board[i][width] is \"O\":\n help(i, width)\n\n print(board)\n for i in range(width + 1):\n for j in range(height + 1):\n if board[j][i] is \"O\":\n board[j][i] = \"X\"\n elif board[j][i] is \"g\":\n board[j][i] = \"O\"", "def solve_col0_tile(self, target_row):\n solved_row, solved_col = self.current_position(target_row, 0)\n movements = \"\"\n if solved_row == target_row - 1 and solved_col == 0:\n movements = \"u\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ur\")\n movements = \"ur\" + local_board.move_tile(\n target_row - 1, 1, target_row *\n self.get_width()) + \"ruldrdlurdluurddlu\"\n movements += \"r\" * (self.get_width() - 1)\n self.update_puzzle(movements)\n return movements", "def solve_interior_tile(self, target_row, target_col):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, target_col), \"tiles to right and below incorrectly ordered\"\n \n correct_tile = self.current_position(target_row, target_col) \n move_str = self.position_tile(target_row, target_col, correct_tile) \n self.update_puzzle(move_str)\n \n assert self.lower_row_invariant(target_row, target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n return move_str", "def solve(self, board: List[List[str]]) -> None:", "def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n x=len(board)\n y=len(board[0])\n visit=[[False if board[i][j]=='X' else True for j in range(y)] for i in range(x)]\n for i in range(x):\n for j in range(y):\n if visit[i][j] and board[i][j]=='O':\n queue=[[i,j]]\n visit[i][j]=False\n k=0\n surround=True\n while len(queue)>k:\n if queue[k][0]==0 or queue[k][0]==x-1 or queue[k][1]==y-1 or queue[k][1]==0:\n surround=False\n if queue[k][1]!=y-1 and visit[queue[k][0]][queue[k][1]+1]:\n queue.append([queue[k][0],queue[k][1]+1])\n visit[queue[k][0]][queue[k][1]+1]=False\n if queue[k][1]!=0 and visit[queue[k][0]][queue[k][1]-1]:\n queue.append([queue[k][0],queue[k][1]-1])\n visit[queue[k][0]][queue[k][1]-1]=False\n if queue[k][0]!=x-1 and visit[queue[k][0]+1][queue[k][1]]:\n queue.append([queue[k][0]+1,queue[k][1]])\n visit[queue[k][0]+1][queue[k][1]]=False\n if queue[k][0]!=0 and visit[queue[k][0]-1][queue[k][1]]:\n queue.append([queue[k][0]-1,queue[k][1]])\n visit[queue[k][0]-1][queue[k][1]]=False\n k+=1\n if surround:\n for i1,j1 in queue:\n board[i1][j1]='X'\n return", "def solve(self, board):\r\n if not board or not board[0]:\r\n return\r\n \r\n self.m = len(board)\r\n self.n = len(board[0])\r\n boarder = []\r\n \r\n # Collecting all the 'O' on the boarder\r\n for i in range(self.m):\r\n if board[i][0] == 'O':\r\n boarder.append([i, 0])\r\n if board[i][self.n-1] == 'O':\r\n boarder.append([i, self.n-1])\r\n for j in range(self.n):\r\n if board[0][j] == 'O':\r\n boarder.append([0, j])\r\n if board[self.m-1][j] == 'O':\r\n boarder.append([self.m-1, j])\r\n \r\n for row, col in boarder:\r\n self.BFS(board, row, col)\r\n \r\n for row in range(self.m):\r\n for col in range(self.n):\r\n if board[row][col] == 'O':\r\n board[row][col] = 'X'\r\n elif board[row][col] == 'E':\r\n board[row][col] = 'O'\r\n print(board)", "def solve(self, board: List[List[str]]) -> None:\n if board == [] or board == [[]]: # corner case\n return\n\n r, c = len(board), len(board[0])\n\n def dfs(i, j): # visited i, j neighbors and change o to M\n if i < 0 or i > r - 1 or j < 0 or j > c - 1 or board[i][j] == 'X' or board[i][j] == 'M':\n return\n\n board[i][j] = 'M'\n dfs(i - 1, j)\n dfs(i + 1, j)\n dfs(i, j - 1)\n dfs(i, j + 1)\n\n for i in range(r):\n for j in range(c):\n if (i == 0 or i == r - 1 or j == 0 or j == c - 1) and board[i][j] == 'O':\n dfs(i, j)\n\n for i in range(r):\n for j in range(c):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == 'M':\n board[i][j] = 'O'", "def solve_row0_tile(self, target_col):\r\n # replace with your code\r\n assert self.row0_invariant(target_col), 'Some trouble in row0_invariant' \r\n whole_move = ''\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n # print self.get_number(current_row, current_col)\r\n zero_row, zero_col = 0, target_col\r\n # print 'Target tile position=',self.current_position(0, target_col)\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (0, target_col)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ld'\r\n \r\n recomended_move = 'ld'\r\n whole_move += recomended_move\r\n zero_col -= len(filter(lambda x: x=='l', recomended_move))\r\n zero_row += len(filter(lambda x: x=='d', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n #####Case when we check if recomended move solves the tile\r\n if self.current_position(0, target_col) == (0, target_col):\r\n # print 'Congrads recomended move made great deal !!'\r\n return whole_move\r\n #####If not, we position TT to (1, target_col-1),\r\n ##### and ZEOR to (1, target_col-2)\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (zero_row - current_row) * 'u'\r\n path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = path_up + path_side\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n \r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n counter = 0\r\n # print self.current_position(0, target_col) != (1, target_col-1)\r\n # print (zero_row,zero_col) != (1,target_col-2)\r\n ####POitioning TT and zero into positions that can be solvable\r\n while self.current_position(0, target_col) != (1, target_col-1) or \\\r\n (zero_row,zero_col) != (1,target_col-2):\r\n counter +=1\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n cyclic_moves = ''\r\n # print 'Aloha in the loop'\r\n if zero_col < current_col:\r\n # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n \r\n\r\n if current_col != target_col-1 and zero_row == 0:\r\n # print 'In the upper row and we can use down cycling'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n\r\n elif current_col != target_col-1:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col-1:\r\n # print 'Target tile under target place'\r\n # print 'DEBUG!!!!'\r\n # print self\r\n # print zero_col, target_col\r\n if zero_col == 0 and current_col == 1:\r\n cyclic_moves = 'druld'\r\n elif zero_row == 0:\r\n cyclic_moves = 'druld'\r\n \r\n else:\r\n cyclic_moves = 'urd'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n elif zero_row > current_row:\r\n # print 'DEBUG'\r\n # print 'TT under zero tile'\r\n cyclic_moves = 'uld'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n \r\n if counter > 10:\r\n # print 'COUNTER break'\r\n break\r\n\r\n #####Solving using pattern 2 x 3 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 2x3 puzzle formed recently'\r\n move2x3 = \"urdlurrdluldrruld\"\r\n whole_move += move2x3\r\n zero_col -= len(filter(lambda x: x=='l', move2x3))\r\n zero_col += len(filter(lambda x: x=='r', move2x3))\r\n zero_row += len(filter(lambda x: x=='d', move2x3))\r\n zero_row -= len(filter(lambda x: x=='u', move2x3))\r\n self.update_puzzle(move2x3)\r\n # print self\r\n assert self.row1_invariant(target_col-1), 'Some trouble in row1_invariant' \r\n return whole_move", "def solve_row1_tile(self, target_col):\r\n # replace with your code\r\n whole_move = ''\r\n if self._grid[1][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA solve_row1_tile \"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[1][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (1 - zero_row) * 'd'\r\n # path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (1 - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (1 - zero_row) * 'd'\r\n #zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n #zero_row -= len(filter(lambda x: x=='u', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n\r\n assert self.row1_invariant(target_col), 'Some trouble in row1_invariant' \r\n \r\n #current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = 1, target_col\r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = (zero_row - self.current_position(1, target_col)[0]) * 'u' + (zero_col - self.current_position(1, target_col)[1]) * 'l'\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(1, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (1, target_col)\r\n counter = 0\r\n while self.current_position(1, target_col) != \\\r\n (1, target_col) or (zero_row, zero_col) != (0, target_col):\r\n # print 'Welcome to while loop!'\r\n cyclic_moves = ''\r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n #current_position = self.current_position(1, target_col)\r\n current_col = self.current_position(1, target_col)[1]\r\n counter +=1\r\n if self.current_position(1, target_col) == \\\r\n (1, target_col):\r\n # print 'ZERO not under TT'\r\n cyclic_moves = 'ur'\r\n whole_move += cyclic_moves\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif zero_col < current_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'ZERO tile located in the left side and down move is POSIBLE'\r\n if current_col != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif current_col != target_col and self._grid[zero_row+1][zero_col] > \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves)) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n # elif zero_col < current_col and self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n # # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n # if current_col != target_col:\r\n # # print 'not under the target place'\r\n # cyclic_moves = 'urrdl'\r\n # whole_move += cyclic_moves\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # elif current_col == target_col:\r\n # # print 'Target tile under target place'\r\n # cyclic_moves = 'urd'\r\n # whole_move += cyclic_moves\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #cyclic_moves +='ur'\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n return whole_move", "def solve(self, board: List[List[str]]) -> None:\r\n if not board:\r\n return\r\n m, n = len(board), len(board[0])\r\n board_filter = lambda p: 0 <= p[0] < m and 0 <= p[1] < n and board[p[0]][p[1]] == 'O'\r\n queue = list(filter(board_filter, [(x, y) for r in range(max(m, n)) \r\n for x, y in ((r, 0), (r, n-1), (0, r), (m-1, r))]))\r\n while queue:\r\n x, y = queue.pop()\r\n board[x][y] = 'W'\r\n queue.extend(list(filter(board_filter, ((x-1, y), (x+1, y), (x, y-1), (x, y+1)))))\r\n for row in board:\r\n for i, c in enumerate(row):\r\n row[i] = 'XO'[c=='W']", "def solve_col0_tile(self, target_row):\r\n moves_str = \"\"\r\n # move the zero tile from (i,0) to (i−1,1) \r\n # using the move string \"ur\"\r\n moves_str += \"ur\"\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str)\r\n # If the target tile is now at position (i,0)\r\n # you can simply move tile zero to the end of row i−1\r\n current_row, current_col = temp_grid.current_position(target_row, 0)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n if current_row == target_row and current_col == 0:\r\n rights = self._width - 1 - zero_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\" \r\n # However, if the target tile is not positioned at (i,0)\r\n # we suggest the following solution strategy:\r\n else:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ruldrdlurdluurddlu\"\r\n for dummy_r in range(self._width - 1):\r\n moves_str += \"r\"\r\n \r\n print \"solve_col0_tile\"\r\n print moves_str\r\n self.update_puzzle(moves_str)\r\n print self._grid\r\n return moves_str", "def solve_col0_tile(self, target_row):\r\n # check if curr_pos (i, 0) where i > 1\r\n assert self.lower_row_invariant(target_row, 0)\r\n move = \"ur\"\r\n self.update_puzzle(move)\r\n row, col = self.current_position(target_row, 0)\r\n if row == target_row and col == 0:\r\n move_to_target = (self.get_width() - 2) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n else:\r\n move_to_target = self.move_to_target(target_row - 1, 1, row, col)\r\n # add solver move to str\r\n move_to_target += \"ruldrdlurdluurddlu\" + (self.get_width() - 1) * \"r\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1)\r\n return move", "def solve_col0_tile(self, target_row):\n move_str = 'ur'\n self.update_puzzle(move_str)\n cur_row, cur_col = self.current_position(target_row, 0)\n if cur_row == target_row and cur_col == 0:\n move_str += 'r' * (self._width - 2)\n else:\n move_str += self.position_tile(target_row-1, 1, cur_row, cur_col)\n move_str += 'ruldrdlurdluurddlur'\n move_str += 'r' * (self._width - 2)\n self.update_puzzle(move_str[2:])\n return move_str", "def solve(self, board: List[List[str]]) -> None:\n\n def expansion(i,j):\n for di,dj in {(-1,0),(1,0),(0,-1),(0,1)}:\n if -1<i+di<m and -1<j+dj<n and state[i+di][j+dj]=='O':\n return True\n return False\n\n if not board:\n return board\n\n m = len(board)\n n = len(board[0]) \n state = [['X']*n for _ in range(m)]\n\n for j in range(n):\n state[0][j] = board[0][j]\n state[m-1][j] = board[m-1][j]\n \n for i in range(m):\n state[i][0] = board[i][0]\n state[i][n-1] = board[i][n-1]\n \n flag = 1\n\n while flag:\n flag = 0\n\n for k in range(1, (1+min(m,n))//2):\n for j in range(k,n-k):\n if board[k][j]=='O' and state[k][j] == 'X' and expansion(k,j):\n state[k][j] = 'O'\n flag = 1\n \n if board[m-1-k][j]=='O' and state[m-1-k][j] == 'X' and expansion(m-1-k,j):\n state[m-1-k][j] = 'O'\n flag = 1\n \n for i in range(k,m-k):\n if board[i][k]=='O' and state[i][k] == 'X' and expansion(i,k):\n state[i][k] = 'O'\n flag = 1\n \n if board[i][n-1-k]=='O' and state[i][n-1-k] == 'X' and expansion(i,n-1-k):\n state[i][n-1-k] = 'O'\n flag = 1\n\n board[:] = state[:]", "def solve_row0_tile(self, target_col):\n solved_row, solved_col = self.current_position(0, target_col)\n movements = \"\"\n if solved_col == target_col - 1 and solved_row == 0:\n movements = \"ld\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ld\")\n movements = \"ld\" + local_board.move_tile(\n 1, target_col - 1, target_col) + \"urdlurrdluldrruld\"\n self.update_puzzle(movements)\n return movements", "def solve(self, board: List[List[str]]) -> None:\n if len(board) <= 2 or len(board[0]) <= 2:\n return\n\n self.nRow, self.nCol = len(board), len(board[0])\n\n def helper(kr, kc):\n board[kr][kc] = '.'\n kr > 0 and board[kr - 1][kc] == 'O' and helper(kr - 1, kc)\n kr < self.nRow - 1 and board[kr + 1][kc] == 'O' and helper(kr + 1, kc)\n kc > 0 and board[kr][kc - 1] == 'O' and helper(kr, kc - 1)\n kc < self.nCol - 1 and board[kr][kc + 1] == 'O' and helper(kr, kc + 1)\n\n for kr in [0, self.nRow - 1]:\n for kc in range(self.nCol):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n for kc in [0, self.nCol - 1]:\n for kr in range(self.nRow):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n\n for kr in range(self.nRow):\n for kc in range(self.nCol):\n if board[kr][kc] == 'O':\n board[kr][kc] = 'X'\n elif board[kr][kc] == '.':\n board[kr][kc] = 'O'\n\n return", "def solve_col0_tile(self, target_row):\r\n # print '----------------------------------'\r\n # print 'SOLVING ZERO COLOUMN'\r\n assert self.lower_row_invariant(target_row,0), \"Invarian is False\"\r\n whole_move = ''\r\n #current_position = self.current_position(target_row, 0)\r\n current_row, current_col = self.current_position(target_row, 0)\r\n zero_row, zero_col = target_row, 0\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, 0)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ur'\r\n \r\n recomended_move = 'ur'\r\n whole_move += recomended_move\r\n zero_col += len(filter(lambda x: x=='r', recomended_move))\r\n zero_row -= len(filter(lambda x: x=='u', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n if self.current_position(target_row, 0) == \\\r\n (target_row, 0):\r\n # print 'TT stays in correct place after recomended move'\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n self.update_puzzle(zero_to_place_move)\r\n whole_move += zero_to_place_move\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move\r\n #move ZERO tile to the right\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n ### reposition TT to (target_row -1, 1)\r\n ### reposition ZERO tile to (target_row-1,0)\r\n \r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (current_col - zero_col) * 'r'\r\n path_for_zero = (zero_row - current_row) * 'u' + (current_col - zero_col) * 'r'\r\n whole_move += path_for_zero\r\n zero_col += len(filter(lambda x: x=='r', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n counter = 0\r\n while self.current_position(target_row, 0) != (target_row -1, 1) or \\\r\n (zero_row,zero_col) != (target_row-1,0):\r\n cyclic_moves = ''\r\n current_position = self.current_position(target_row, 0)\r\n current_row, current_col = current_position\r\n # print 'We are in while loop'\r\n counter += 1\r\n if zero_row < current_row:\r\n # print 'Zero row under current TT '\r\n if self.current_position(target_row, 0) == (target_row -1, 1):\r\n # print 'TT is in the reccomended posiont (target_row -1, 1) \\n and we are movind zero to the left side of TT '\r\n cyclic_moves = 'ld'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n else:\r\n # print 'TT should be one tile down'\r\n cyclic_moves = 'lddru'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n #### Case 1 if ZERO located in the right of\r\n #### target tile (after it)\r\n if zero_col > current_col:\r\n # print ' Look in the up puzzle, zero in the right side'\r\n if current_col != 1:\r\n # print 'not under target place (target_row -1, 1)'\r\n cyclic_moves = 'dllur'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n whole_move += cyclic_moves\r\n # elif current_col == 1 and self._grid[zero_row+1][zero_col] < \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n elif current_col == 1: \r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self._grid[zero_row+1][zero_col] > \\\r\n self._grid[current_position[0]][current_position[1]]:\r\n print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n cyclic_moves = 'ul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n ### Solwing 3x2 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 3x2 puzzle formed recently'\r\n move3x2 = 'ruldrdlurdluurddlur'\r\n whole_move += move3x2\r\n zero_col -= len(filter(lambda x: x=='l', move3x2))\r\n zero_col += len(filter(lambda x: x=='r', move3x2))\r\n zero_row += len(filter(lambda x: x=='d', move3x2))\r\n zero_row -= len(filter(lambda x: x=='u', move3x2))\r\n self.update_puzzle(move3x2)\r\n # print 'Grid afret 3x2 solver move'\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(target_row, 0)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, 0)\r\n #####Moving ZERO to the (target_row - 1, n - 1) position where\r\n ##### 'n' is a grid height.\r\n # print self._width-1 - zero_col\r\n zero_to_place_move = 'r' * (self._width-1 - zero_col)\r\n whole_move += zero_to_place_move\r\n self.update_puzzle(zero_to_place_move)\r\n # print self\r\n assert self.lower_row_invariant(target_row-1,self._width-1), \"Invarian is False\"\r\n return whole_move", "def solve(self, board: 'List[List[str]]') -> 'None':\n if not board:\n return\n m, n = len(board), len(board[0])\n save = [ij for k in range(m + n) for ij in ((0, k), (m - 1, k), (k, 0), (k, n - 1))]\n while save:\n i, j = save.pop()\n if -1 < i < m and -1 < j < n and board[i][j] == 'O':\n board[i][j] = 'S'\n save += (i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)\n for row in board:\n for i, c in enumerate(row):\n row[i] = 'XO'[c == 'S']", "def solve_interior_tile(self, target_row, target_col):\n # replace with your code\n string = ''\n target = self.current_position(target_row, target_col)\n row_difference = target_row - target[0]\n #print 'Row difference', row_difference\n col_difference = target_col - target[1]\n #print 'Col difference', col_difference\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0 and col_difference > 1:\n string += 'urrdl' * (col_difference - 1)\n if row_difference == 1:\n string += 'urrdl' * (col_difference - 1)\n string += 'dru'\n if row_difference > 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n if row_difference == 1:\n string += 'ulldr' * (col_difference - 1)\n string += 'ullddru'\n if row_difference > 1:\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n string += 'lddru' * (row_difference - 1)\n if row_difference > 0:\n string += 'ld'\n print 'Interior Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row, target_col - 1), 'False string'\n return string", "def solve_row1_tile(self, target_col):\n cur_row, cur_col = self.current_position(1, target_col)\n move_str = self.position_tile(1, target_col, cur_row, cur_col, need_ld=False)\n self.update_puzzle(move_str)\n return move_str", "def solve_interior_tile(self, target_row, target_col):\r\n whole_move = ''\r\n # replace with your code\r\n if self._grid[target_row][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA\"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[target_row][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (target_row - zero_row) * 'd'\r\n #path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd'\r\n zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n assert self.lower_row_invariant(target_row, target_col), \"Invarian is False\"\r\n \r\n #current_position = self.current_position(target_row, target_col)\r\n #current_row, current_col = self.current_position(target_row, target_col)\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, target_col)\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (target_row - self.current_position(target_row, target_col)[0]) * 'u'\r\n zero_row = target_row - len(path_up)\r\n if target_col < self.current_position(target_row, target_col)[1]: # Right move\r\n path_side = (self.current_position(target_row, target_col)[1] - target_col) * 'r'\r\n zero_col = target_col + len(path_side)\r\n else: # Left move\r\n path_side = (target_col - self.current_position(target_row, target_col)[1]) * 'l'\r\n zero_col = target_col - len(path_side)\r\n \r\n #path_for_zero = path_up + path_side\r\n # print '------------------------------------------'\r\n # print 'Move to ZERO =', path_for_zero\r\n \r\n self.update_puzzle(path_up + path_side)\r\n \r\n # print 'Grid after move:'\r\n # print self\r\n # current_position = self.current_position(target_row, target_col) \r\n # current_row, current_col = current_position\r\n # print 'Updated Target tile position=',current_position\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, target_col)\r\n # print '-----------------------------------------'\r\n \r\n\r\n ###### New moves after moving ZERO tile into target tile\r\n # counter = 0\r\n whole_move += path_up + path_side\r\n while self.current_position(target_row, target_col) != \\\r\n (target_row, target_col) or zero_col != target_col - 1:\r\n # tt_in_home = self.current_position(target_row, target_col) == \\\r\n # (target_row, target_col)\r\n\r\n cyclic_moves = ''\r\n # counter += 1\r\n #current_position = self.current_position(target_row, target_col) \r\n #current_col = self.current_position(target_row, target_col)[1]\r\n # print 'Zero coloumn', zero_col, '== Target coloumn', target_col\r\n # print zero_col == target_col \r\n \r\n #### Case 1 if ZERO located in ther right of\r\n #### target tile (after it)\r\n if zero_col > self.current_position(target_row, target_col)[1]:\r\n # print ' Look in the up puzzle, zero on the right side'\r\n # if self.current_position(target_row, target_col)[1] != target_col:\r\n # # print 'not under target place'\r\n # cyclic_moves = 'dllur'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n if self.current_position(target_row, target_col)[1] == target_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # elif self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # # print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n # cyclic_moves = 'ul'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #### Case 2 if ZERO located under the target tile, and both\r\n #### of them located under the target position of the target tile\r\n elif zero_col == self.current_position(target_row, target_col)[1] and zero_col == target_col:\r\n # print 'Both under the target place'\r\n # print 'TT in home=', tt_in_home\r\n if self.current_position(target_row, target_col) == \\\r\n (target_row, target_col):\r\n cyclic_moves = 'ld'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n else:\r\n cyclic_moves = 'lddru'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n elif zero_col < self.current_position(target_row, target_col)[1]:\r\n # print 'ZERO tile located in the left side'\r\n if self.current_position(target_row, target_col)[1] != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self.current_position(target_row, target_col)[1] == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n #zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loot counter =',counter\r\n whole_move += cyclic_moves\r\n # if counter > 12:\r\n # break\r\n # print 'Tile is solved with move '+ whole_move\r\n assert self.lower_row_invariant(target_row, target_col-1), \"Invarian is False\"\r\n return whole_move", "def solve_col0_tile(self, target_row):\n # replace with your code\n string = ''\n target = self.current_position(target_row, 0)\n row_difference = target_row - target[0]\n col_difference = target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'r' * (col_difference - 1)\n if row_difference > 1:\n string += 'druld' * (row_difference - 1)\n string += 'rulld' * (col_difference - 1)\n string += 'ruldrdlurdluurddlu'\n elif col_difference == 0:\n string += 'rddlu' * (row_difference - 2)\n if row_difference > 1:\n string += 'rd'\n string += 'l'\n string += 'ruldrdlurdluurddlu'\n string += 'r' * (self._width - 1)\n print 'Col 0 Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row - 1, self._width -1), 'False string'\n return string", "def solve(self, board: List[List[str]]) -> None:\n if len(board) <= 2 or len(board[0]) <= 2:\n return\n\n nRow, nCol = len(board), len(board[0])\n\n def helper(kr, kc):\n board[kr][kc] = '.'\n kr > 0 and board[kr - 1][kc] == 'O' and helper(kr - 1, kc)\n kr < nRow - 1 and board[kr + 1][kc] == 'O' and helper(kr + 1, kc)\n kc > 0 and board[kr][kc - 1] == 'O' and helper(kr, kc - 1)\n kc < nCol - 1 and board[kr][kc + 1] == 'O' and helper(kr, kc + 1)\n\n for kr in [0, nRow - 1]:\n for kc in range(nCol):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n for kc in [0, nCol - 1]:\n for kr in range(nRow):\n if board[kr][kc] == 'O':\n helper(kr, kc)\n\n for kr in range(nRow):\n for kc in range(nCol):\n if board[kr][kc] == 'O':\n board[kr][kc] = 'X'\n elif board[kr][kc] == '.':\n board[kr][kc] = 'O'\n\n return", "def solve(self, board) -> None:\n x_length = len(board)\n if x_length == 0: \n return\n\n y_length = len(board[0])\n confirmed = set()\n dfs = []\n for i in range(x_length):\n if board[i][0] == 'O':\n board[i][0] = 'temp'\n dfs.append((i, 0))\n if board[i][y_length - 1] == 'O':\n board[i][y_length - 1] = 'temp'\n dfs.append((i, y_length - 1))\n for j in range(y_length):\n if board[0][j] == 'O':\n board[0][j] = 'temp'\n dfs.append((0, j))\n if board[x_length - 1][j] == 'O':\n board[x_length - 1][j] = 'temp'\n dfs.append((x_length - 1, j))\n while dfs:\n i, j = dfs.pop()\n confirmed.add((i, j))\n if i+1 < x_length and board[i+1][j] == 'O':\n board[i+1][j] = 'temp'\n dfs.append((i + 1, j))\n if i > 0 and board[i-1][j] == 'O':\n board[i-1][j] = 'temp'\n dfs.append((i-1, j))\n if j+1 < y_length and board[i][j+1] == 'O':\n board[i][j+1] = 'temp'\n dfs.append((i, j + 1))\n if j > 0 and board[i][j-1] == 'O':\n board[i][j-1] = 'temp'\n dfs.append((i, j-1))\n for i in range(x_length):\n for j in range(y_length):\n if (i, j) in confirmed:\n board[i][j] = 'O'\n else:\n board[i][j] = 'X'\n return", "def solve(board):\r\n \r\n #An O(2mn) time solution; the first O(mn) traversal is to preform a bfs on all tiles attached to edge 'O' tiles (can't convert to 'X's); the second is to convert all remaining 'O's into 'X's\r\n \r\n def bfs(curr,r,c):\r\n if not curr: return\r\n prev = len(curr)\r\n for n in range(0,prev):\r\n i,j = curr[n][0],curr[n][1]\r\n board[i][j] = 'A'\r\n for x,y in [(-1, 0), (0, -1), (1, 0), (0, 1)]:\r\n x_n = i+x\r\n y_n = j+y\r\n if x_n >= 0 and x_n < r and y_n >= 0 and y_n < c and board[x_n][y_n] == \"O\":\r\n curr += [(x_n,y_n)]\r\n bfs(curr[prev:],r,c)\r\n\r\n \r\n q,r,c = [],len(board),len(board[0])\r\n if not r or q: return\r\n\r\n for i in range(r):\r\n for j in range(c):\r\n if (i==0 or j==0 or i==r-1 or j==c-1) and board[i][j] == \"O\":\r\n q += [(i,j)]\r\n \r\n bfs(q,r,c)\r\n\r\n for i in range(r):\r\n for j in range(c): \r\n if board[i][j] == \"O\": \r\n board[i][j] = \"X\"\r\n elif board[i][j] == \"A\":\r\n board[i][j] = \"O\"\r\n \r\n return", "def solve(self, board: List[List[str]]) -> None:\n if board is None or len(board) == 0:\n return \n row, col = len(board), len(board[0])\n for i in range(row):\n self.dfs(board, i, 0)\n self.dfs(board, i, col - 1)\n for j in range(col):\n self.dfs(board, 0, j)\n self.dfs(board, row-1, j)\n for i in range(row):\n for j in range(col):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n if board[i][j] == '-':\n board[i][j] = 'O'", "def solve_row0_tile(self, target_col):\n move_str = 'ld'\n self.update_puzzle(move_str)\n cur_row, cur_col = self.current_position(0, target_col)\n if cur_row == 0 and cur_col == target_col:\n return move_str\n else:\n move_str += self.position_tile(1, target_col-1, cur_row, cur_col)\n move_str += 'urdlurrdluldrruld'\n self.update_puzzle(move_str[2:])\n return move_str", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0: return\n\n m = len(board)\n n = len(board[0])\n\n uf = UnionFind(m * n + 1)\n dummy = m * n\n\n # connect 'O' at first and last col with dummy\n for i in range(m):\n if board[i][0] == 'O':\n uf.union(dummy, i * n)\n if board[i][-1] == 'O':\n uf.union(dummy, i * n + n - 1)\n\n # connect 'O' at first and last row with dummy\n for j in range(n):\n if board[0][j] == 'O':\n uf.union(dummy, j)\n if board[-1][j] == 'O':\n uf.union(dummy, n * (m-1) + j)\n\n d = [(1, 0), (0, 1), (0, -1), (-1, 0)]\n\n for i in range(1, m-1):\n for j in range(1, n-1):\n if board[i][j] == 'O':\n for di, dj in d:\n x = i+di\n y = j+dj\n if board[x][y] == 'O':\n uf.union(x*n+y, i*n+j)\n\n # change not connected 'O' with 'X'\n for i in range(1, m-1):\n for j in range(1, n-1):\n if not uf.connected(dummy, i * n + j):\n board[i][j] = 'X'", "def position_tile(self, zero_row, zero_col, correct_tile):\n \n ans = \"\" \n vert_dist = abs(zero_row - correct_tile[0])\n horiz_dist = abs(zero_col - correct_tile[1])\n \n # Updates ans, the move string, based the correct_tile's\n # position relative to the target position.\n \n # SAME ROW\n if vert_dist == 0:\n # Left of target\n if zero_col > correct_tile[1]:\n # Moves zero tile left to correct_tile's position.\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right to target position,\n # and moves zero tile to left of target position.\n if horiz_dist > 1:\n ans += str(\"urrdl\" * (horiz_dist - 1))\n # Right of target\n else:\n # Moves zero tile right to correct_tile's position.\n ans += str(\"r\" * horiz_dist)\n # Moves correct_tile left to target position,\n # and moves zero tile to left of target position.\n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ulld\")\n \n # SAME COL\n elif horiz_dist == 0:\n # Moves zero tile up to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n # Moves correct_tile down to target position, \n # and moves zero tile to left of target position.\n if vert_dist > 1:\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n # UPPER LEFT\n elif correct_tile[1] < zero_col:\n # Moves zero tile up and left to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right and down to target position,\n # and moves zero tile to left of target position.\n ans += str(\"drrul\" * (horiz_dist - 1))\n ans += str(\"druld\" * vert_dist)\n\n # UPPER RIGHT\n else:\n # Moves zero tile up and right to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"r\" * horiz_dist)\n # This if-elif-else statement moves correct_tile left and down to target position.\n # If statement is only used when target position is in row 2.\n if vert_dist == 1 and correct_tile[0] == 0:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dluld\")\n # Elif statement used when correct_tile is in the row above target position.\n elif vert_dist == 1: \n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ullddruld\")\n # Else statement used when correct_tile is 1+ rows above target position.\n else:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dlu\")\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n return ans", "def half_turn_solve(db, position):\n print('- Table Lookup: ', end='')\n solve_sequence = half_turn_lookup.lookup_position(db, position)\n temp_cube = Cube(position)\n for move in solve_sequence:\n dyn_move(temp_cube, move)\n print(move.name, end=' ')\n print()\n return solve_sequence", "def solve_puzzle(self):\n moves = self.moves\n peg_pos = self.peg_pos\n move_z = self.move_to_height\n \n print('Solving Tower of Hanoi:')\n for i, move in enumerate(moves):\n des_peg = move[0]\n des_peg_pos = peg_pos[des_peg]\n \n #move to peg\n print(' Moving to peg: '+str(des_peg)+' at: '+str(des_peg_pos))\n self.move_to(des_peg_pos[0], des_peg_pos[1], move_z)\n \n #if index is even, pickup disk, else drop disk\n if i % 2 == 0:\n print(' Picking up disk at height: '+str(move[1]))\n self.pick(move[1])\n else:\n print(' Dropping disk')\n self.drop()\n print('Finished solving puzzle')", "def solve(board) -> None:\n rows = len(board)\n if rows==0:\n return board\n cols = len(board[0])\n \n def is_border(rc):\n (rr, cc) =rc\n if rr<rows and rr< cols and rr>=0 and cc>=0 and board[rr][cc]=='O' and (rr==0 or rr==rows-1 or cc==0 or cc==cols-1):\n return True\n return False\n \n transf = []\n for r in range(rows):\n for c in range(cols):\n if board[r][c]=='O' and not is_border((r,c)) and not any(map(is_border, [(r-1, c), (r+1, c), (r, c-1), (r, c+1)])):\n transf.append((r,c))\n if transf:\n for r,c in transf:\n board[r][c]='X'\n return board", "def solve(self, board: List[List[str]]) -> None:\n def dfs(board, i, j):\n m = len(board)\n n = len(board[0])\n if i < 0 or i >= m or j < 0 or j >= n: return\n\n if board[i][j] != 'O': return\n\n board[i][j] = '#'\n [dfs(board, i+di, j+dj) for di, dj in [(0, 1), (1, 0), (0, -1), (-1, 0)]]\n\n if len(board) == 0: return\n m = len(board)\n n = len(board[0])\n\n for i in range(m):\n dfs(board, i, 0)\n dfs(board, i, n-1)\n\n for j in range(n):\n dfs(board, 0, j)\n dfs(board, m-1, j)\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == '#':\n board[i][j] = 'O'", "def solve_tour(self):\n\t\tboard = [[-1 for _ in range(self.N)]for _ in range(self.N)]\n\t\tboard[0][0] = 0\n\n\t\tz = self.find_tour(board, 0, 0, 1)\n\t\tif z:\n\t\t\tfor i in range(self.N):\n\t\t\t\tfor j in range(self.N):\n\t\t\t\t\tself.solution.append(board[i][j])\n\t\t\tprint board\n\t\t\treturn self.solution\n\t\t\t\t\n\t\telse:\n\t\t\tprint(\"No solution\")", "def solve(self, board) -> None:\n coords = []\n board_len = len(board)\n row_len = len(board[0]) - 1\n # top\n # coords.append([[0, i] for i, q in enumerate(board[0]) if q == \"O\"])\n # # bottom\n # coords.append(\n # [[board_len, i] for i, q in enumerate(board[board_len]) if q == \"O\"]\n # )\n for i in range(board_len):\n row_coord = [[i,indx] for indx, q in enumerate(board[i]) if q == \"O\"]\n # import pdb; pdb.set_trace()\n for x in row_coord:\n coords.append(x)\n for x in coords:\n if len(x) == 0:\n continue\n if x[0] == 0:\n print(\"top border\")\n elif x[0] == board_len - 1:\n print(\"bottom border\")\n elif x[1] == 0:\n print(\"left border\")\n elif x[1] == row_len:\n prin(\"right border\")", "def solve_row1_tile(self, target_col):\n assert target_col > 1, \"target_col must be > 1\"\n assert self.row1_invariant(target_col), \"tiles to right and below incorrectly ordered\"\n\n # Moves correct_tile to the target position (1, target_col),\n # and the zero tile above the target position at (0, target_col). \n correct_tile = self.current_position(1, target_col)\n move_str = self.position_tile(1, target_col, correct_tile) \n move_str += \"ur\"\n self.update_puzzle(move_str)\n\n assert self.row0_invariant(target_col)\n \n return move_str", "def solve_puzzle(board):\n # Propagate value effects\n board = simplify_puzzle(board, [])\n\n # Brute force remaining cells\n board = brute(board)\n\n # Verify that the puzzle was successfully solved\n assert get_length(board)==81\n assert valid_attempt(board)\n\n return board", "def solve(self, board) -> None:\n for index in range (1, len(board)-1):\n arr = board[index]\n for ch in range(1, len(arr)-1):\n if arr[ch] is 'O':\n safe = True\n if ch-1 == 0 and arr[ch-1] is 'O':\n safe = False\n if ch +1 == len(arr)-1 and arr[ch+1] is 'O':\n safe = False\n if index -1 == 0 and board[index-1][ch] is 'O':\n safe = False\n if index + 1 == len(board)-1 and board[index + 1][ch] is 'O':\n safe = False\n if safe:\n arr[ch] = 'X'", "def solve_row1_tile(self, target_col):\r\n row, col = self.current_position(1, target_col)\r\n move = self.move_to_target(1, target_col, row, col)\r\n # for next move\r\n move += \"ur\"\r\n \r\n self.update_puzzle(move)\r\n return move", "def solve(self):\n # If board is filled, board is trivially solved\n if self.check_full_board():\n return self.done\n\n # Iterate over every square in the board\n for row in range(self.num_rows):\n for col in range(self.num_columns):\n\n # If square is empty, begin plugging in possible values\n if self.check_empty_space(row, col):\n for val in range(1, 10):\n if not self.check_row(val, row) and \\\n not self.check_column(val, col) and \\\n not self.check_box(val, self.what_box(row, col)):\n self.board[row][col] = val\n \n if self.solve():\n return self.done()\n \n # Didn't work; undo assigment\n self.board[row][col] = ' '\n\n # Bad path; backtrack\n return False", "def solve(self, board: 'List[List[str]]') -> 'None':\n\n def dfs(i, j, tmp):\n nonlocal flag\n if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]):\n flag = False\n return\n if board[i][j] != 'O' or [i, j] in tmp:\n return\n tmp.append([i, j])\n dfs(i - 1, j, tmp)\n dfs(i + 1, j, tmp)\n dfs(i, j + 1, tmp)\n dfs(i, j - 1, tmp)\n return tmp\n\n change = []\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == 'O' and [i, j] not in change:\n tmp = []\n flag = True\n tmp = dfs(i, j, tmp[:])\n if flag:\n for loc in tmp:\n i, j = loc[0], loc[1]\n board[i][j] = 'X'\n\n for loc in change:\n i, j = loc[0], loc[1]\n board[i][j] = 'X'", "def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2", "def solve(self, board):\n def dfs(board, r, c):\n if r < 0 or c < 0 or r > rows - 1 or c > cols - 1 or board[r][c] == 'X' or board[r][c] == '#':\n return\n board[r][c] = '#'\n dfs(board, r - 1, c)\n dfs(board, r + 1, c)\n dfs(board, r, c - 1)\n dfs(board, r, c + 1)\n\n if len(board) == 0:\n return;\n rows = len(board)\n cols = len(board[0])\n for i in range(rows):\n for j in range(cols):\n if (i == 0 or j == 0 or i == rows - 1 or j == cols - 1) and board[i][j] == 'O':\n dfs(board, i, j)\n for i in range(rows):\n for j in range(cols):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == '#':\n board[i][j] = 'O'", "def solve_row0_tile(self, target_col):\n # replace with your code\n string = ''\n assert self.row0_invariant(target_col), 'False precondition'\n target = self.current_position(0, target_col)\n row_difference = target[0]\n col_difference = target_col - target[1]\n if row_difference == 0:\n if col_difference == 1:\n string += 'ld'\n elif col_difference > 1:\n string += 'l' * col_difference\n string += 'drrul' * (col_difference - 2)\n string += 'druld'\n string += 'urdlurrdluldrruld'\n elif row_difference == 1:\n if col_difference == 1:\n string += 'lld'\n string += 'urdlurrdluldrruld'\n elif col_difference > 1:\n string += 'ld'\n string += 'l' * (col_difference - 1)\n string += 'urrdl' * (col_difference - 2)\n string += 'urdlurrdluldrruld'\n print 'Row 0 Path', string\n self.update_puzzle(string)\n assert self.row1_invariant(target_col - 1), 'False string'\n return string", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0:\n return\n self.h = len(board)\n self.w = len(board[0])\n self.board = board\n for i in range(self.h):\n for j in range(self.w):\n if i == 0 or i == self.h-1 or j == 0 or j == self.w-1:\n #print (i,j)\n self.dfs((i,j))\n for i in range(self.h):\n for j in range(self.w):\n if self.board[i][j]==\"O\":\n self.board[i][j]=\"X\"\n for i in range(self.h):\n for j in range(self.w):\n if self.board[i][j]==\"#\":\n self.board[i][j]=\"O\"", "def solve(self, board: List[List[str]]) -> None:\n def _dfs(i, j):\n if i < 0 or j < 0 or i >= len(board) or j >= len(board[0]) or board[i][j] in ['X', '#']: return\n board[i][j] = '#'\n _dfs(i-1, j)\n _dfs(i+1, j)\n _dfs(i, j-1)\n _dfs(i, j+1)\n\n if not board or not board[0]: return\n m, n = len(board), len(board[0])\n for i in range(0, m):\n for j in range(0, n):\n is_edge = i == 0 or j == 0 or i == m-1 or j == n-1\n if is_edge and board[i][j] == 'O':\n _dfs(i, j)\n print(board)\n\n for i in range(0, m):\n for j in range(0, n):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n if board[i][j] == '#':\n board[i][j] = 'O'", "def solve(self):\n while self.character.path[-1] != 88:\n n = self.next_move()\n if n is None:\n self.character.path += ['Error: Could not find full path (budget does not suffice or unreachable).']\n break\n self.character.path += [n]\n self.updated_occupied_locations()\n self.currentTurn += 1", "def solve(self, board: List[List[str]]) -> None:\n def DFS(board, i, j):\n q = []\n q.append([i, j])\n \n while q:\n x, y = q.pop()\n board[x][y] = \"*\"\n neighbors = ((0, 1), (0, -1), (1, 0), (-1, 0))\n for dx, dy in neighbors:\n if 0 <= x + dx <= len(board) - 1 and 0 <= y + dy <= len(board[0]) - 1 and board[x + dx][y + dy] == \"O\":\n q.append([x + dx, y + dy])\n \n \n # first row\n i = 0\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # last row\n i = len(board) - 1\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # first column\n j = 0\n for i in range(len(board)):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n # last column\n j = len(board[0]) - 1\n for i in range(len(board)):\n if board[i][j] == \"O\":\n DFS(board, i, j)\n \n \n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n board[i][j] = \"X\"\n elif board[i][j] == \"*\":\n board[i][j] = \"O\"", "def solve (M, cpos, move): \n if move == 64:\n print (\"\\n\\nmove: \", move)\n print (\"sum: \", sum(M))\n pprint (M)\n #exit()\n for next in get_moves(cpos, M):\n solve(ulist(M, next, move+1), next, move+1)", "def solve_row1_tile(self, target_col):\n # replace with your code\n print target_col\n assert self.row1_invariant(target_col), 'False precondition'\n string = ''\n target = self.current_position(1, target_col)\n row_difference = 1 - target[0]\n col_difference = target_col - target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0:\n string += 'urrdl' * (col_difference - 1)\n string += 'ur'\n elif row_difference == 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n print 'Row 1 Path', string\n self.update_puzzle(string)\n assert self.row0_invariant(target_col), 'False string'\n return string", "def solve(self, board: List[List[str]]) -> None:\n m = len(board)\n if m == 0:\n return\n n = len(board[0])\n uf = UnionFold(m*n+1)\n dummy = m*n\n # 搜索与边界联通的 “O”\n for i in range(0, m):\n for j in range(0, n):\n # the boundary\n if (i==0 or i == m-1 or j == 0 or j == n-1) and board[i][j] == \"O\":\n uf.union(i*n+j, dummy)\n elif board[i][j] == \"O\":\n for l in [[1,0], [0, 1], [-1, 0], [0, -1]]:\n x, y = l[0]+i, l[1]+j\n if board[x][y] == \"O\":\n uf.union(i*n+j, x*n+y)\n # 所有不和 dummy 连通的 O,都要被替换\n for i in range(1, m-1):\n for j in range(1, n-1):\n if not uf.connected(dummy, i*n+j):\n board[i][j] = \"X\"", "def solve_col0_tile(self, target_row):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, 0), \"tiles to right and below incorrectly ordered\"\n\n # Move zero tile from target position (target_row, 0) to (target_row - 1, 1).\n self.update_puzzle(\"ur\")\n\n move_str = \"\"\n \n # correct_tile's position is determined after moving zero tile \"ur\" \n # because its position relative to zero tile may have changed as a result.\n correct_tile = self.current_position(target_row, 0)\n \n # Moves to reposition correct_tile to target position.\n if self.get_number(correct_tile[0], correct_tile[1]) != self.get_number(target_row, 0):\n move_str += str(self.position_tile(target_row - 1, 1, correct_tile))\n move_str += str(\"ruldrdlurdluurddlur\")\n\n # Moves to reposition zero tile to end of column of target_row + 1.\n move_str += str(\"r\" * (self.get_width() - 2)) \n \n self.update_puzzle(move_str)\n\n assert self.lower_row_invariant(target_row - 1, self.get_width() - 1) \n \n move_str = \"ur\" + move_str\n return move_str", "def solve_row0_tile(self, target_col):\r\n # move the zero tile from position (0,j) to (1,j−1) \r\n # using the move string \"ld\" \r\n moves_str = \"\"\r\n moves_str += \"ld\"\r\n # check whether target tile is at position (0,j).\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str) \r\n current_row, current_col = temp_grid.current_position(0, target_col)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n \r\n # If target tile is not at position (0,j).\r\n # reposition the target tile to position (1,j−1) \r\n # with tile zero in position (1,j−2).\r\n if current_row != 0 or current_col != target_col:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"urdlurrdluldrruld\"\r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_row0_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve(self, board: List[List[str]]) -> None:\n # New Solution: DFS on boarder (140ms: 89.07%)\n if not board or not board[0]: return\n def dfs(i, j):\n if board[i][j]=='O':\n board[i][j] = '*'\n if i-1>=0:\n dfs(i-1, j)\n if i+1<len(board):\n dfs(i+1, j)\n if j-1>=0:\n dfs(i, j-1)\n if j+1<len(board[0]):\n dfs(i, j+1)\n height, width = len(board), len(board[0])\n for i in range(width):\n if board[0][i]=='O':\n dfs(0, i)\n if board[height-1][i]=='O':\n dfs(height-1, i)\n for i in range(height):\n if board[i][0]=='O':\n dfs(i, 0)\n if board[i][width-1]=='O':\n dfs(i, width-1)\n for i in range(height):\n for j in range(width):\n if board[i][j]=='O':\n board[i][j] = 'X'\n elif board[i][j]=='*':\n board[i][j] = 'O'", "def solve_row1_tile(self, target_col):\n movements = self.solve_interior_tile(1, target_col)\n self.update_puzzle(\"ur\")\n return movements + \"ur\"", "def solve_(self, x, y, board, path):\n if self.SOLVED:\n return\n if self.is_done(board):\n self.print_path(path)\n self.SOLVED = True\n return\n for new_x, new_y in self.next_click(x, y, board):\n if new_x is None or new_y is None:\n return\n new_board = self.click(new_x, new_y, board)\n self.solve_(\n x=0, y=0,\n board=new_board,\n path=path + [((new_x, new_y), new_board)]\n )", "def solve(self, board: List[List[str]]) -> None:\n visited = [[False for x in range(len(board[0]))] for y in range(len(board))]\n for i in range(len(board)):\n for j in range(len(board[i])):\n if not visited[i][j] and board[i][j] == 'O':\n res = []\n result = self.gatherO(board, i, j, res, visited)\n if not result:\n for coordinate in res:\n board[coordinate[0]][coordinate[1]] = 'X'", "def solveSudoku(self, board):\n\n digits = { str(i) for i in range(1, 10) }\n rows = [ digits.copy() for _ in range(9) ]\n cols = [ digits.copy() for _ in range(9) ]\n boxs = [ [ digits.copy() for _ in range(3) ] for _ in range(3) ]\n unoccupied = set()\n\n def __recursiveSolver():\n if not unoccupied:\n return\n\n choices = digits.copy()\n for row, col in unoccupied:\n possible_moves = rows[row] & cols[col] & boxs[row // 3][col // 3]\n if len(possible_moves) < len(choices):\n action_pos = (row, col)\n choices = possible_moves\n if len(choices) == 1:\n break\n\n for choice in choices:\n (row, col) = action_pos\n\n unoccupied.remove(action_pos)\n board[row][col] = choice\n rows[row].remove(choice)\n cols[col].remove(choice)\n boxs[row // 3][col // 3].remove(choice)\n\n __recursiveSolver()\n if not unoccupied: return\n\n unoccupied.add(action_pos)\n board[row][col] = '.'\n rows[row].add(choice)\n cols[col].add(choice)\n boxs[row // 3][col // 3].add(choice)\n\n for row in range(9):\n for col in range(9):\n ch = board[row][col]\n if ch == '.':\n unoccupied.add((row, col))\n else:\n rows[row].remove(ch)\n cols[col].remove(ch)\n boxs[row // 3][col // 3].remove(ch)\n\n __recursiveSolver()", "def part1_2(puzzle_input):\n [initial_state_string, configurations] = puzzle_input.split('\\n\\n')\n initial_state = re.sub('initial state: ', '', initial_state_string)\n rules_arr = configurations.split('\\n')\n rules = [re.split(' => ', line) for line in rules_arr]\n rules = {t[0]: t[1] for t in rules}\n current_state = '..........' + initial_state + '...............................................................................................................................................'\n for i in range(100): # After 100th cycle, the only change is that there is a '#' that shifts right\n next_generation_string = \"\"\n for index, pot in enumerate(current_state):\n if index == 0:\n temp_string = '..' + current_state[:3]\n elif index == 1:\n temp_string = '.' + current_state[:4]\n elif index == len(current_state) - 2:\n temp_string = current_state[-4:] + '.'\n elif index == len(current_state) - 1:\n temp_string = current_state[-3:] + '..'\n else:\n temp_string = current_state[index-2:index+3]\n if temp_string in rules:\n next_generation_string += rules[temp_string]\n else:\n next_generation_string += pot\n current_state = next_generation_string\n\n # For part 1\n part1_sum = 0\n if i == 19:\n for index, pot in enumerate(current_state):\n if pot == '#':\n part1_sum += index - 10\n print(part1_sum)\n\n # Part 2\n part2_sum = 0\n for index, pot in enumerate(current_state):\n if pot == '#':\n part2_sum += index - 10 + 50000000000 - 100\n print(part2_sum)", "def solve(self, g: List[List[str]]) -> None:\n n = len(g)\n m = len(g[0])\n for i in range(n):\n for j in range(m):\n if g[i][j] == 'O':\n g[i][j] = ' '\n def dfs(x, y):\n g[x][y]='O'\n for nx, ny in (x+1,y),(x-1,y),(x,y+1),(x,y-1):\n if 0<=nx<n and 0<=ny<m and g[nx][ny]==' ':\n dfs(nx, ny)\n for i in range(n):\n if g[i][0]==' ':\n dfs(i,0)\n if g[i][m-1]==' ':\n dfs(i,m-1)\n for i in range(m):\n if g[0][i]==' ':\n dfs(0,i)\n if g[n-1][i]==' ':\n dfs(n-1,i)\n for i in range(n):\n for j in range(m):\n if g[i][j]==' ':\n g[i][j]='X'\n return g", "def solve_puzzle(self):\n\n # for each word in the words list\n # ...for each row in the game board\n # ......for each column in each row\n for word in self.words:\n for y, row in enumerate(self.board):\n for x, col in enumerate(row):\n \n # for each direction\n # try to find a word in said direction\n for dir in self.directions:\n self.scan_word(word, y, x, dir)", "def move(self, direction):\r\n direc = list(OFFSETS[direction])\r\n line = []\r\n dummy_board = self.board[:]\r\n if direction == 3:\r\n for i in range(self.height):\r\n self.board[i] = merge(self.board[i])\r\n self.compare(dummy_board)\r\n return self.board\r\n \r\n elif direction == 4:\r\n for i in range(self.height):\r\n line = self.board[i][::-1]\r\n self.board[i] = merge(line)\r\n self.board[i] = self.board[i][::-1]\r\n self.compare(dummy_board)\r\n return self.board\r\n \r\n \r\n elif direction == 1 or 2:\r\n dummy_board = str(self.board[:])\r\n if direction == 1:\r\n tile = [0,0]\r\n elif direction == 2:\r\n tile = [self.height - 1, 0]\r\n for i in range(self.width):\r\n tile2 = tile[:]\r\n while len(line) < self.height:\r\n line.append(self.get_tile(*tile2))\r\n tile2 = [x+y for x,y in zip(direc, tile2)]\r\n line = merge(line)\r\n tile2 = tile[:]\r\n for i in range(self.height):\r\n self.set_tile(*(tile2+[line[0]]))\r\n line.remove(line[0])\r\n tile2 = [x+y for x,y in zip(direc, tile2)]\r\n tile = [x+y for x,y in zip(tile, [0,1])]\r\n if dummy_board != self.__str__():\r\n self.new_tile()\r\n return self.board", "def solve(self, board):\n self.memo={}\n def changeVal(board, i, j,memo):\n key =str(i)+','+str(j)\n print(i,j)\n print(memo)\n if key in self.memo.keys():\n return self.memo[key]\n if ((i == 0 or j == 0 or i == len(board) - 1 or j == len(board[0]) - 1) and board[i][j] == 'O'):\n self.memo[key] = False\n return False\n if board[i][j] == 'X':\n self.memo[key] = True\n return True\n if board[i][j] == 'O':\n board[i][j] = 'X'\n ans1 = changeVal(board, i + 1, j,self.memo)\n ans2 = changeVal(board, i - 1, j,self.memo)\n ans3 = changeVal(board, i,j + 1,self.memo)\n ans4 = changeVal(board, i, j - 1,self.memo)\n\n ans = ans1 and ans2 and ans3 and ans4\n self.memo[key] = ans\n if ans == False:\n board[i][j] = 'O'\n return ans\n\n for i in range(0, len(board)):\n for j in range(0, len(board[0])):\n if board[i][j] == 'O':\n changeVal(board, i, j,self.memo)\n\n return board", "def make_move(self, state):\r\n # intially set drop phase to true\r\n drop_phase = True\r\n move = [] # list to make moves with to return\r\n succ = self.succ(state) # get the successor of this state\r\n # intial postion of board to set up most advantagous spot if its empty\r\n if sum(x.count(self.my_piece) for x in self.board) == 0 and self.board[2][2] == ' ':\r\n move.insert(0, (2, 2))\r\n return move\r\n \r\n # check the number of 'r' and 'b' on board if theres 4 of each drop phase is false\r\n if sum(x.count('r') for x in self.board) == 4 and sum(x.count('b') for x in self.board) == 4:\r\n drop_phase = False\r\n\r\n # if not during drop phase use minimax to make next move from one postion to next\r\n if not drop_phase:\r\n move = []\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n f = d['from']\r\n s = sorted(succ, key=lambda e: e['f'])\r\n moveto = s[-1]\r\n move.insert(1, (moveto['from'][0], moveto['from'][1]))\r\n move.insert(0, (moveto['pos'][0], moveto['pos'][1]))\r\n return move # return the from, to move\r\n\r\n else: #else use minimax and to make move during drop phase selecting spot to place AI piece\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n hold = []\r\n move = []\r\n n = None\r\n hold = []\r\n for s in succ:\r\n p = s['pos'][0]\r\n p1 = s['pos'][1]\r\n if s['f'] == val and state[p][p1] == ' ':\r\n hold.append(s)\r\n if len(hold) == 1:\r\n row = hold[0]['pos'][0]\r\n col = hold[0]['pos'][1]\r\n else:\r\n f = sorted(hold, key=lambda e: e['pos'])\r\n row = f[0]['pos'][0]\r\n col = f[0]['pos'][1]\r\n\r\n move.insert(0, (row, col)) # return the move \r\n return move", "def action(self):\r\n\r\n\r\n #have we just started?\r\n if self.player_information[\"us\"][\"nTokens\"] == 0:\r\n move = generate_starting_move(self.player_information[\"us\"][\"player_side\"], self.board_array)\r\n return move\r\n\r\n #otherwise do minimax \r\n \r\n #start off with some shallow depth:\r\n if self.turn_no < 5:\r\n depth = 3\r\n else:\r\n depth = 2\r\n \r\n #set a constraint for search depth\r\n if self.total_tokens_on_board < 6:\r\n depth = 3\r\n elif self.total_tokens_on_board < 10:\r\n depth = 2\r\n else:\r\n depth = 1\r\n \r\n #have a time reference\r\n print(f'nthrows: {self.player_information[\"us\"][\"nThrowsRemaining\"]}')\r\n starting_time = int(round(time.time(), 0))\r\n #salvage result from minimax\r\n result = minimax(self.board_dict.copy(), self.player_tokens.copy(), self.co_existance_dict.copy(),\r\n None, None, None, depth, True, -math.inf, math.inf,\r\n (-5, -5), self.player_information.copy(), self.board_array, self.board_edge, \r\n starting_time, True, self.turn_no)\r\n\r\n #clean it up a bit \r\n print(self.board_dict)\r\n #tidy it up\r\n result = result[0]\r\n print(f'pre: {result}')\r\n #in case we get a bad move redo but make it very shallow\r\n if len(result) == 1 or result == (-5, -5):\r\n #force it to return a usable move\r\n counter = 0\r\n while (len(result) == 1) or (result == (-5, -5)):\r\n result = minimax(self.board_dict.copy(), self.player_tokens.copy(), self.co_existance_dict.copy(),\r\n None, None, None, 1, True, -math.inf, math.inf,\r\n (-5, -5), self.player_information.copy(), self.board_array, self.board_edge, \r\n starting_time, False, self.turn_no)\r\n result = result[0]\r\n counter += 1\r\n \r\n #if its taking too long\r\n if counter > 2: \r\n #generate one random possible move to use \r\n allied_tokens = [token for token in self.player_tokens if self.player_tokens[token] == \"us\"]\r\n move_list = generate_moves(self.board_dict, self.player_tokens, self.co_existance_dict, allied_tokens,\r\n self.player_information, self.board_array, True, \"all\")\r\n \r\n \r\n #if there are no moves\r\n if len(move_list) == 0:\r\n if self.player_information['us']['nThrowsRemaining'] > 0:\r\n throws = generate_possible_throws(self.board_dict, self.player_tokens, self.co_existance_dict, self.player_information, \"us\",\r\n self.player_information[\"us\"][\"player_side\"], self.board_array, \"all\" )\r\n result = random.choice(throws)\r\n \r\n else:\r\n result = random.choice(move_list)\r\n print(f'random: {result}')\r\n break\r\n\r\n print(f' inside: {result}')\r\n\r\n print(result)\r\n #otherwise clean it up\r\n if result[0] == 'throw':\r\n final_result = (result[0].upper(), result[1], result[2])\r\n else:\r\n final_result = (result[0].upper(), result[2], result[3])\r\n # return final result \r\n return final_result", "def get_all_possible_moves():\r\n \"\"\"\r\n Creates the labels for the universal chess interface into an array and returns them\r\n \"\"\"\r\n labels_array = []\r\n letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\r\n numbers = ['1', '2', '3', '4', '5', '6', '7', '8']\r\n promoted_to = ['q', 'r', 'b', 'n']\r\n\r\n for l1 in range(8):\r\n for n1 in range(8):\r\n destinations = [(t, n1) for t in range(8)] + \\\r\n [(l1, t) for t in range(8)] + \\\r\n [(l1 + t, n1 + t) for t in range(-7, 8)] + \\\r\n [(l1 + t, n1 - t) for t in range(-7, 8)] + \\\r\n [(l1 + a, n1 + b) for (a, b) in\r\n [(-2, -1), (-1, -2), (-2, 1), (1, -2), (2, -1), (-1, 2), (2, 1), (1, 2)]]\r\n for (l2, n2) in destinations:\r\n if (l1, n1) != (l2, n2) and l2 in range(8) and n2 in range(8):\r\n move = letters[l1] + numbers[n1] + letters[l2] + numbers[n2]\r\n labels_array.append(move)\r\n for l1 in range(8):\r\n l = letters[l1]\r\n for p in promoted_to:\r\n labels_array.append(l + '2' + l + '1' + p)\r\n labels_array.append(l + '7' + l + '8' + p)\r\n if l1 > 0:\r\n l_l = letters[l1 - 1]\r\n labels_array.append(l + '2' + l_l + '1' + p)\r\n labels_array.append(l + '7' + l_l + '8' + p)\r\n if l1 < 7:\r\n l_r = letters[l1 + 1]\r\n labels_array.append(l + '2' + l_r + '1' + p)\r\n labels_array.append(l + '7' + l_r + '8' + p)\r\n return labels_array", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0: return []\n nr = len(board)\n nc = len(board[0])\n\n # begin dfs from boundaries with letter \"O\"\n for r in range(nr):\n for c in range(nc):\n if r == 0 or r == nr-1 or c == 0 or c == nc-1:\n if board[r][c] == \"O\":\n self.dfs(board, r, c)\n\n # change \"O\" to \"X\" and \"#\" to \"O\"\n for r in range(nr):\n for c in range(nc):\n if board[r][c] == \"O\":\n board[r][c] = \"X\"\n elif board[r][c] == \"#\":\n board[r][c] = \"O\"", "def solve(self):\n dim = self.puzzle.dimension\n\n # initial loop\n for value, (row, col) in self.puzzle:\n if value:\n self.clear_row(row, value)\n self.clear_col(col, value)\n self.clear_subgrid(row, col, value)\n self.updates.add((value, (row, col)))\n for ps in self.possibilities:\n ps.discard((row, col))\n\n while self.updates:\n while self.updates:\n # while self.updates:\n value, (row, col) = self.updates.pop()\n for i in range(1, dim + 1):\n self.check_row(i, value)\n self.check_col(i, value)\n for i in range(2, 8, 3):\n self.check_subgrid(row, i, value)\n self.check_subgrid(i, col, value)\n\n for value, (row, col) in self.puzzle:\n if not value:\n self.check_cell(row, col)\n\n # for value in range(1, dim + 1):\n # for row in [2, 5, 8]:\n # for col in [2, 5, 8]:\n # self.check_subgrid(row, col, value)", "def solve(self, board: List[List[str]]) -> None:\n if(len(board)==0) : return \n for i in range(0, len(board[0])): \n if(board[0][i]==\"O\"):\n self.DFS(board, 0, i)\n \n if(board[len(board)-1][i]==\"O\"):\n self.DFS(board,len(board)-1,i)\n \n for i in range(0, len(board)):\n if(board[i][0]==\"O\"):\n self.DFS(board, i, 0)\n \n if(board[i][len(board[0])-1]==\"O\"):\n self.DFS(board,i, len(board[0])-1)\n \n \n for i in range(0,len(board)):\n for j in range(0, len(board[0])):\n if(board[i][j]==\"#\"):\n board[i][j]=\"O\"\n \n else:\n board[i][j]=\"X\"", "def find_best_move(state: GameState) -> None:", "def solve_row0_tile(self, target_col):\n assert target_col > 1, \"target_col must be > 1\"\n assert self.row0_invariant(target_col), \"tiles to right and below incorrectly ordered\"\n \n # Move zero tile from target position (0, target_col) to (1, target_col - 1) \n self.update_puzzle(\"ld\")\n \n move_str = \"\"\n\n # correct_tile's position is determined after moving zero tile \"ld\"\n # because its position relative to zero tile may have changed as a result.\n correct_tile = self.current_position(0, target_col) \n \n # Moves to reposition correct_tile to target position, and\n # the zero tile to (1, target_col - 1).\n if self.get_number(correct_tile[0], correct_tile[1]) != self.get_number(0, target_col):\n move_str += str(self.position_tile(1, target_col - 1, correct_tile))\n move_str += str(\"urdlurrdluldrruld\")\n\n self.update_puzzle(move_str)\n\n assert self.row1_invariant(target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n move_str = \"ld\" + move_str\n return move_str" ]
[ "0.86358064", "0.84391785", "0.820359", "0.81158245", "0.8079695", "0.7876614", "0.7855013", "0.77959996", "0.7629816", "0.75706357", "0.7446708", "0.71919894", "0.70876247", "0.6987711", "0.6876523", "0.686447", "0.686447", "0.686447", "0.6857152", "0.6857152", "0.6825898", "0.682045", "0.680644", "0.6798767", "0.67694706", "0.6757338", "0.67503834", "0.6743881", "0.6727839", "0.66785896", "0.66642034", "0.665565", "0.6631162", "0.66305166", "0.6597714", "0.6597469", "0.65839773", "0.6572454", "0.6566848", "0.65513307", "0.6532281", "0.65215236", "0.65060896", "0.64793384", "0.6475916", "0.64727217", "0.64694023", "0.6466018", "0.6461253", "0.6459988", "0.64553595", "0.64140576", "0.63782644", "0.63752735", "0.63594425", "0.6354533", "0.63507825", "0.6346083", "0.6341721", "0.6335916", "0.6334586", "0.6311966", "0.6311564", "0.6310484", "0.6304807", "0.63016754", "0.6295452", "0.62954044", "0.6293516", "0.62665886", "0.6245019", "0.6223726", "0.6215188", "0.6211001", "0.62100565", "0.6206542", "0.61962146", "0.6182959", "0.6178695", "0.6177864", "0.61600417", "0.61391354", "0.61234325", "0.61214095", "0.60961217", "0.6078878", "0.60724854", "0.60497886", "0.6030426", "0.60152173", "0.60047674", "0.6000283", "0.59923935", "0.5980893", "0.59638155", "0.59557575", "0.5953203", "0.5951496", "0.5951158", "0.5948482" ]
0.8153259
3
check if the top left 22 puzzle is solved
def check_2x2_solved(self): return self._grid[0][0] == 0 and self._grid[0][1] == 1 \ and self._grid[1][0] == self._width*1 and self._grid[1][1] == (1 + self._width * 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkPuzzle(self):\n print('Got to checkPuzzle')", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def test_is_solved(self):\n p = hw.TilePuzzle([[1, 2], [3, 0]])\n self.assertTrue(p.is_solved())\n p = hw.TilePuzzle([[0, 1], [3, 2]])\n self.assertFalse(p.is_solved())", "def checkSolution(self):\n movesToEndblock = self.gridSize - self.changeable[0] - 2\n if self.checkMove(0,movesToEndblock) == 0:\n return 0\n return 1", "def _solve_puzzle(self, test_puzzle) -> bool:\n global counter\n row = 0\n col = 0\n for i in range(81):\n # current cell\n row = i // 9\n col = i % 9\n\n # if cell is empty we check to see possible placements\n if test_puzzle[row][col] == 0:\n # trying to place number in current cell\n for n in range(1, 10):\n\n # checking if we can place n in current cell\n if not SudokuGrid.check_valid_placement(n, row, col,\n test_puzzle):\n # placing n in cell\n test_puzzle[row][col] = n\n\n # check if grid is full increment number of solutions\n # and break loop to go to previous recursions to try\n # other combinations\n if SudokuGrid.check_grid(test_puzzle):\n counter += 1\n break\n\n # otherwise recurse to place next cell\n elif self._solve_puzzle(test_puzzle):\n return True\n\n # break loop if no valid placement in cell\n break\n\n # will set current square to 0 and go back to previous recursion\n # to find another valid placement\n test_puzzle[row][col] = 0\n return False", "def isSolvable(self):\n tiles = []\n for i in range(len(self.tiles)):\n for j in range(len(self.tiles)):\n if self.tiles[j][1] * 3 + self.tiles[j][0] + 1 == i + 1:\n tiles.append(j + 1)\n count = 0\n for i in range(len(tiles) - 1):\n for j in range(i + 1, len(tiles)):\n if tiles[i] > tiles[j] and tiles[i] != 9:\n count += 1\n return count % 2 == 0 and count != 0", "def test_is_solved_when_puzzle_is_solved(self):\n self.assertTrue(self.sudoku.is_solved())", "def isComplete(self):\n for n in range(9):\n for m in range(9):\n if self.puzzle[n][m] == 0:\n return False\n return True", "def check_if_solvable(self):\n\n self.solvable=True #status of sudoku\n for i in range(0, 9):\n for j in range(0, 9):\n if self.a[i][j]==0:\n continue\n if self.check(i, j)[self.a[i][j]]==0:\n self.solvable=False\n return False", "def is_solved(self):\n # Iterate through each square of the puzzle\n for row in range(self.sl):\n for col in range(self.sl):\n val = self.puzzle[row][col]\n\n # If any square value is blank (0), not solved, return False\n if val == 0:\n return False\n\n # Trick to keep DRY code: replace each value temporarily with a\n # 0, and use valid_square method with original value to determine\n # if every square is valid\n self.puzzle[row][col] = 0\n valid = self.valid_square(row, col, val)\n self.puzzle[row][col] = val\n \n # If not a valid value for square, return False\n if not valid:\n return False\n return True", "def solve(self):\n if not self.running or self.state == \"stopping\":\n return False\n\n # Find first empty tile\n target = ()\n for i in range(9**2):\n if self.board[i // 9, i % 9] == 0:\n target = (i // 9, i % 9)\n break\n\n # If there are no empty tiles, the puzzle is solved\n if not target:\n return True\n\n # Tests all possible values\n for value in range(1, 10):\n if not self.isPossibleAssign(target, value):\n continue\n\n self.update_board(target, value)\n\n if self.solve():\n return True\n\n # In case of failure, reset and return False\n self.update_board(target, 0)\n\n return False", "def is_solvable(board: list) -> bool:\n inv_count = invserion_count(board)\n return inv_count%2 == 0", "def is_valid_board(self):\n total = sum(range(1, self.n+1))\n d = {x : [set([]), set([])] for x in range(1, self.n+1)}\n for row_index in range(self.n):\n for col_index in range(self.n):\n num = self.board[row_index][col_index]\n try:\n if row_index in d[num][0] or col_index in d[num][1]:\n print(\"Invalid solution.\")\n return\n except KeyError:\n print(\"Unsolved solution.\") # d[0]\n return\n\n d[num][0].add(row_index)\n d[num][1].add(col_index)\n print(\"Valid solution!\")", "def solve(grid):\n find = find_empty(grid)\n if not find:\n return True\n\n row, col = find\n for i in range(1, 10):\n if valid(grid, i, (row, col)):\n grid[row][col] = i\n if solve(grid):\n return True\n grid[row][col] = 0\n return False", "def checker(self) -> bool:\n checker = 0\n if len(self._puzzle) == 4:\n for i, num in enumerate(self._puzzle):\n if num == Puzzle.solution[checker]:\n if i == checker:\n checker +=1\n if checker == 4:\n return True\n else:\n return False", "def solve(self):\n\n\t\tempty_spot = self.find_unsettled_spot()\n\t\tif not empty_spot:\n\t\t\treturn True\n\t\telse:\n\t\t\trow, col = empty_spot\n\n\t\t\t# Loop through all the available numbers\n\t\t\tfor number in range(1, 10):\n\t\t\t\t# If the number has no conflicts in its row, column or subgrid\n\t\t\t\tif self.no_conflicts(row, col, number):\n\t\t\t\t\t# Then overwrite the 0 with the new number\n\t\t\t\t\tself.grid[row][col] = number\n\n\t\t\t\t\tif self.solve():\n\t\t\t\t\t\treturn True\n\n\t\t\t\t\t# This is where backtracking happens\n\t\t\t\t\t# Reset the latest position back to 0 and try with new number value\n\t\t\t\t\tself.grid[row][col] = 0\n\n\t\treturn False", "def solve_board(bd):\n if is_solved(bd):\n print_board(bd)\n return\n elif len(next_valid_boards(bd)) == 0:\n return False\n else:\n for board in next_valid_boards(bd):\n solve_board(board)", "def check_if_solved(self):\n for cell in self.board.values():\n if not cell.value:\n return False\n return True", "def solve_step(self,puzzle_grid,x,y):\n self.puzzleGrid = puzzle_grid\n if(self.foundStep == False):\n self.targetCell = self.puzzleGrid.grid[x][y]\n if(self.targetCell.isSolved == False):\n self.calculate_possibilities()\n if len(self.targetCell.possibilities) == 1: #README method 1\n self.targetCell.solve()\n return True\n else:\n return self.check_neighbours() #README method 2", "def solve(self):\n dim = self.puzzle.dimension\n\n # initial loop\n for value, (row, col) in self.puzzle:\n if value:\n self.clear_row(row, value)\n self.clear_col(col, value)\n self.clear_subgrid(row, col, value)\n self.updates.add((value, (row, col)))\n for ps in self.possibilities:\n ps.discard((row, col))\n\n while self.updates:\n while self.updates:\n # while self.updates:\n value, (row, col) = self.updates.pop()\n for i in range(1, dim + 1):\n self.check_row(i, value)\n self.check_col(i, value)\n for i in range(2, 8, 3):\n self.check_subgrid(row, i, value)\n self.check_subgrid(i, col, value)\n\n for value, (row, col) in self.puzzle:\n if not value:\n self.check_cell(row, col)\n\n # for value in range(1, dim + 1):\n # for row in [2, 5, 8]:\n # for col in [2, 5, 8]:\n # self.check_subgrid(row, col, value)", "def solve(board):\n find = find_blank(board)\n \n if not find:\n return True\n #will loop through untill the blanks are filled\n\n\n\n else:\n row, col = find\n\n for i in range(1, 10):\n if valid(board, i, (row, col)):\n board[row][col] = i\n\n if solve(board):\n return True\n board[row][col] = 0\n return False", "def has_solution(self) -> bool:\n pass", "def is_solvable(self) -> bool:\r\n inv_count = 0\r\n arr = self.current_state.flatten()\r\n for i in range(0, 9):\r\n for j in range(i + 1, 9):\r\n if arr[j] and arr[i] and arr[i] > arr[j]:\r\n inv_count += 1\r\n return inv_count % 2 == 0", "def sudoku_solver(board):\n row, col= find_empty(board)\n if row == -1 and col == -1:\n return True\n for i in range(1, 10):\n if valid(board, row, col, i):\n board[row][col] = i\n if sudoku_solver(board):\n return True\n board[row][col] = 0\n return False", "def solve_2x2(self):\r\n # replace with your code\r\n assert self.row1_invariant(1), '2x2 Dont pass row1_invariant(1)'\r\n whole_move = ''\r\n current_position = self.current_position(0, 0)\r\n # print 'Zero position =', current_position\r\n counter = 0\r\n \r\n\r\n \r\n # if current_position == (0,0):\r\n # print (0,0)\r\n # move_to_00 = 'rdlu' \r\n if current_position == (0,1):\r\n # print (0,1)\r\n move_to_00 = 'l'\r\n if current_position == (1,0):\r\n # print (1,0)\r\n move_to_00 = 'u'\r\n if current_position == (1,1):\r\n # print (1,1)\r\n move_to_00 = 'ul'\r\n whole_move += move_to_00\r\n self.update_puzzle(move_to_00)\r\n # print self\r\n # print self.get_number(1,1) < self.get_number(1,0)\r\n \r\n while self.get_number(0,0) != 0 or self.get_number(0,1) != 1:\r\n \r\n # print 'Aloha in loop!'\r\n counter +=1\r\n move = 'rdlu'\r\n whole_move += move\r\n self.update_puzzle(move)\r\n # print self\r\n if counter >5:\r\n break\r\n return whole_move", "def is_solved(self):\n colors = ['green', 'blue', 'red', 'orange', 'white', 'yellow']\n for row in range(3):\n for column in range(3):\n if self.front[row][column] != colors[0]:\n return False\n for row in range(3):\n for column in range(3):\n if self.back[row][column] != colors[1]:\n return False\n for row in range(3):\n for column in range(3):\n if self.right[row][column] != colors[2]:\n return False\n for row in range(3):\n for column in range(3):\n if self.left[row][column] != colors[3]:\n return False\n for row in range(3):\n for column in range(3):\n if self.up[row][column] != colors[4]:\n return False\n for row in range(3):\n for column in range(3):\n if self.down[row][column] != colors[5]:\n return False\n return True", "def is_solved(self, grid: list):\n # Iterates over rows\n for i in range(9):\n\n if 0 in grid[i]: # Looks for 0s\n return False\n for j in range(9):\n if not self.validate_cell(grid, i, j): # validates each cell\n return False\n return True", "def is_solvable(self):\n for row, col in np.ndindex(9, 9):\n if len(self.possible_values[row][col]) < 1 and self.final_values[row][col] == 0:\n return False\n return True", "def test_puzzle_hard():\n static_arr = [[0, 0, 0, 0, 0, 6, 0, 0, 0],\n [0, 5, 9, 0, 0, 0, 0, 0, 8],\n [2, 0, 0, 0, 0, 8, 0, 0, 0],\n [0, 4, 5, 0, 0, 0, 0, 0, 0],\n [0, 0, 3, 0, 0, 0, 0, 0, 0],\n [0, 0, 6, 0, 0, 3, 0, 5, 4],\n [0, 0, 0, 3, 2, 5, 0, 0, 6],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0]]\n ans = [[4, 3, 8, 7, 9, 6, 2, 1, 5],\n [6, 5, 9, 1, 3, 2, 4, 7, 8],\n [2, 7, 1, 4, 5, 8, 6, 9, 3],\n [8, 4, 5, 2, 1, 9, 3, 6, 7],\n [7, 1, 3, 5, 6, 4, 8, 2, 9],\n [9, 2, 6, 8, 7, 3, 1, 5, 4],\n [1, 9, 4, 3, 2, 5, 7, 8, 6],\n [3, 6, 2, 9, 8, 7, 5, 4, 1],\n [5, 8, 7, 6, 4, 1, 9, 3, 2]]\n arr = solveit(static_arr)\n assert_array_equal(arr, ans)", "def check_neighbours(self):\n for p in self.targetCell.possibilities:\n if p != 0:\n if p not in self.targetCell.row_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.column_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.box_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n return False", "def solve(self):\n if self.is_solved():\n return True\n else:\n empty_box_coordinates = self._find_empty()\n row, column = empty_box_coordinates\n for i in range(1, 10):\n if self.is_valid_number(i, empty_box_coordinates):\n self.model[row][column] = i\n\n if self.solve():\n return True\n\n self.model[row][column] = 0\n return False", "def check_lost (grid):\r\n for row in range(4):\r\n for col in range(4):\r\n if grid[row][col]==0:\r\n return False\r\n if grid[0][0]==grid[0][1] or grid[0][0]==grid[1][0]:\r\n return False \r\n if grid[0][3]==grid[0][2] or grid[0][3]==grid[1][3]:\r\n return False \r\n if grid[3][0]==grid[2][0] or grid[3][0]==grid[3][1]:\r\n return False\r\n if grid[3][3]==grid[2][3] or grid[3][3]==grid[3][2]:\r\n return False \r\n if grid[0][1]==grid[0][2] or grid[0][1]==grid[1][1]:\r\n return False \r\n if grid[0][2]==grid[1][2]:\r\n return False \r\n if grid[1][1]==grid[2][1] or grid[1][1]==grid[1][2] or grid[1][1]==grid[1][0]:\r\n return False\r\n if grid[2][1]==grid[2][0] or grid[2][1]==grid[2][2] or grid[2][1]==grid[3][1]:\r\n return False \r\n if grid[1][0]==grid[2][0]:\r\n return False\r\n if grid[1][2]==grid[1][3] or grid[1][2]==grid[2][2]:\r\n return False\r\n if grid[2][2]==grid[2][3] or grid[2][2]==grid[3][2]:\r\n return False\r\n if grid[3][1]==grid[3][2]:\r\n return False\r\n else:\r\n return True", "def solve(puzzle):\n print(\"Solving...\")\n array_puzzle = np.asarray(puzzle)\n array_puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n goal_state = __generate_goal(len(array_puzzle[0]), len(array_puzzle))\n\n flat_puzzle = list(chain.from_iterable(puzzle)) # Flatten the list\n\n # If the puzzle doesn't contain 0, exit.\n try:\n flat_puzzle.remove(0) # Remove 0 from the list\n except:\n print(\"All puzzles must include an open tile (0).\")\n return None\n\n inversions = __count_inversions(flat_puzzle) # Count the inversions\n\n # width = len(array_puzzle[0]) # Get the width of the puzzle (columns)\n # length = len(array_puzzle) # Get the length of the puzzle (rows)\n\n oddEven = __odd_or_even(len(array_puzzle[0])) # Determine if the width is odd or even.\n start_position = __find_start(array_puzzle) # Find the start position's row\n solvable = __is_solvable(oddEven, inversions, len(array_puzzle), start_position) # Cleck if the puzzle is solvable.\n\n # If the puzzle is not solvable, return None.\n if(solvable == \"None\"):\n return None\n\n # If we cannot calculate a* (for example the given values are not all in sequential order (1-5) 4 is replaced by 6 (1,2,3,5,6))\n try:\n return __a_star(array_puzzle, goal_state)\n except:\n print(\"Please make sure there are no duplicate or skipped inputs.\")\n return None\n\n # This code was used in testing to print out the string.\n # solved = __a_star(array_puzzle, goal_state)\n # Return the moves needed to complete the puzzle.\n # return print(str(__build_string(solved)) + \" (\" + str(len(solved)) + \")\")", "def solve(bo):\n find = find_empty(bo)\n\n if find:\n row, col = find\n else:\n return True\n\n for i in range(1,10):\n if valid(bo, (row, col), i):\n bo[row][col] = i\n\n if solve(bo):\n return True\n\n bo[row][col] = 0\n\n return False", "def part2():\n grid[(0, 0)] = 1\n coordinates_value = 0\n layer = 1\n x = 0; y = 0\n done = False\n while not done:\n # print(\"Layer: \", layer)\n # go right one step\n layer += 1; x += 1\n grid[(x,y)] = check_neighbours((x,y))\n\n # go up to the boundary of layer\n for y_up in range(y+1, layer):\n coord = (x, y_up)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n y = y_up\n\n # go left till the boundary of layer\n for x_left in range(x-1, -layer, -1):\n coord = (x_left, y)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n x = x_left\n\n # go down till the boundary of layer\n for y_down in range(y-1, -layer, -1):\n coord = (x, y_down)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n y = y_down\n\n # go right till the boundary of layer\n for x_right in range(x+1, layer):\n coord = (x_right, y)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n x = x_right", "def test_is_solved_when_puzzle_is_not_solved(self):\n sudoku = sudolver.Sudoku()\n self.assertFalse(sudoku.is_solved())", "def is_legal_solution(self, solution):\r\n if self.sorting_order is ScoresSortingOrder.ASCENDING:\r\n return self.fit_score(solution) == 0\r\n else:\r\n return self.fit_score(solution) == sum(x for x in range(1, 12))", "def is_solved(bd):\n \"\"\" CONSTRAINT: Assumes board is valid\"\"\"\n count = 0\n for pos in bd:\n if pos == \" \":\n count += 1\n else:\n continue\n if count > 0:\n return False\n else:\n return True", "def test_goal(puzzle_state):\n \n x = puzzle_state.dimension\n final_state = []\n \n for i in range(x*x):\n final_state += [i]\n \n final_state_tuple = tuple(final_state)\n \n if puzzle_state.config == final_state_tuple:\n return True\n else:\n return False", "def isSolved(board):\n for player in [1, 2]:\n if [player]*3 in chain(\n board, # Rows\n zip(board), # Columns\n [ # Diagonals\n [board[i][i] for i in range(len(board))],\n [board[len(board) - i - 1][i] for i in range(len(board))]\n ]\n ):\n return player\n return -1 if 0 in chain(*board) else 0", "def solve(self):\n # If board is filled, board is trivially solved\n if self.check_full_board():\n return self.done\n\n # Iterate over every square in the board\n for row in range(self.num_rows):\n for col in range(self.num_columns):\n\n # If square is empty, begin plugging in possible values\n if self.check_empty_space(row, col):\n for val in range(1, 10):\n if not self.check_row(val, row) and \\\n not self.check_column(val, col) and \\\n not self.check_box(val, self.what_box(row, col)):\n self.board[row][col] = val\n \n if self.solve():\n return self.done()\n \n # Didn't work; undo assigment\n self.board[row][col] = ' '\n\n # Bad path; backtrack\n return False", "def isLegal(self):\n # checks for same values in rows\n for n in range(9):\n rows = set()\n for m in range(9):\n if self.puzzle[n][m] != 0:\n size = len(rows)\n rows.add(self.puzzle[n][m])\n if size == len(rows):\n return False\n\n #checks for same values in columns\n for m in range(9):\n cols = set()\n for n in range(9):\n if self.puzzle[n][m] != 0:\n size = len(cols)\n cols.add(self.puzzle[n][m])\n if size == len(cols):\n return False\n\n #checks for same values in sections\n sections = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n for r in sections:\n for c in sections:\n sects = set()\n for n in r:\n for m in c:\n if self.puzzle[n][m] != 0:\n size = len(sects)\n sects.add(self.puzzle[n][m])\n if size == len(sects):\n return False\n return True", "def solve(self):\n if not self.solvable:\n print('Suduko not Solvable')\n return False\n res=self.back(0, 0)\n # if self.a[0][0]!=0:\n # res=self.back(0, 1)\n # else:\n # for i in range(1, 10):\n # self.a[0][0]=i\n # res=self.back(0, 1)\n # if res:\n # break\n if res:\n self.check_if_solvable()\n print(\"Sudoku Solved!\")\n print(self.a)\n return self.a\n else: print(\"Not Solvable\")\n return False", "def solve_puzzle(self):\n # move zero tile to the lower right corner\n row, col = self.current_position(0, 0)\n movements = \"d\" * (self.get_height() - 1 - row) + \"r\" * (\n self.get_width() - 1 - col)\n self.update_puzzle(movements)\n # solve rowid from 2 by row\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col == 0:\n movements += self.solve_col0_tile(row)\n assert self.lower_row_invariant(row - 1,\n self.get_width() - 1)\n else:\n movements += self.solve_interior_tile(row, col)\n assert self.lower_row_invariant(row, col - 1)\n # solve the uppermost two rows by column\n for col in range(self.get_width() - 1, 1, -1):\n for row in range(1, -1, -1):\n if row == 0:\n assert self.row0_invariant(col)\n movements += self.solve_row0_tile(col)\n assert self.row1_invariant(col - 1)\n else:\n assert self.row1_invariant(col)\n movements += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n movements += self.solve_2x2()\n return movements", "def checkMatrix(i,j):\n # print(i,j)\n counter = Counter([grid[di][dj] for di in range(i,i+3) for dj in range(j,j+3)])\n for k in range(1,10):\n if counter[k] != 1:\n return False\n\n rows_sum = [sum(grid[k][j:j+3]) for k in range(i,i+3)]\n # print(rows_sum)\n if not all(m == 15 for m in rows_sum):\n return False\n cols_sum = [sum(grid[q][k] for q in range(i,i+3)) for k in range(j,j+3)]\n # print(cols_sum)\n if not all(m == 15 for m in cols_sum):\n return False\n dgl_sum = sum(grid[i+k][j+k] for k in range(3))\n anti_dgl_sum = sum(grid[i+k][j+2-k] for k in range(3))\n # print(dgl_sum, anti_dgl_sum)\n if dgl_sum != 15 or anti_dgl_sum != 15:\n return False\n return True", "def solveQPuzzle(ches):\n sizeOfPuzz = len(ches)\n for row in range(sizeOfPuzz):\n num1 = 0\n num2 = 0\n for column in range(sizeOfPuzz):\n num1 += ches[row][column]\n num2 += ches[column][row]\n if num1 > 1:\n return(False)\n if num2 > 1:\n return(False)\n\n for move in range(sizeOfPuzz):\n firstMove = [0, move]\n secondMove = [move, 0]\n first = chessMoveGrab(ches, firstMove)\n second = chessMoveGrab(ches, secondMove)\n if not first:\n return(False)\n if not second:\n return(False)\n\n for move in range(sizeOfPuzz):\n firstMove = [sizeOfPuzz - 1, move]\n secondMove = [sizeOfPuzz - 1 - move, 0]\n first = chessMoveGrab(ches, firstMove, False)\n second = chessMoveGrab(ches, secondMove, False)\n if not first:\n return(False)\n if not second:\n return(False)\n return True", "def _create_solution(self) -> bool:\n row = 0\n col = 0\n for i in range(81):\n # current cell\n row = i // 9\n col = i % 9\n\n # if cell is empty we try placing number in it\n if self._grid_sol[row][col] == 0:\n shuffle(NUMLIST)\n for n in NUMLIST:\n\n # if n is viable for placement for cell then place it\n if not SudokuGrid.check_valid_placement(n, row, col,\n self._grid_sol):\n self._grid_sol[row][col] = n\n\n # check if grid is full and return true\n if SudokuGrid.check_grid(self._grid_sol):\n return True\n\n # otherwise recurse to place next cell\n elif self._create_solution():\n return True\n\n # break loop if no valid placement in cell\n break\n\n # will set current cell to 0 and go back to previous recursion\n # to find another valid cell placement combination\n self._grid_sol[row][col] = 0\n return False", "def solved(self):\r\n return self.puzzle.solved", "def solve_puzzle(board):\n # Propagate value effects\n board = simplify_puzzle(board, [])\n\n # Brute force remaining cells\n board = brute(board)\n\n # Verify that the puzzle was successfully solved\n assert get_length(board)==81\n assert valid_attempt(board)\n\n return board", "def is_solved(self):\n return self.to_grid == self.from_grid", "def solve_util(self, board, col):\n try:\n if col == self.N:\n self.print_sol(board)\n return True\n\n # Trying to place this queen in all rows one by one\n res = False\n for i in range(self.N):\n if self.is_safe(board, i, col):\n board[i][col] = 1\n res = self.solve_util(board, col + 1) or res\n if type(res) == dict:\n return res\n board[i][col] = 0 # Backtracking...\n\n # if queen cannot be placed in any row in this col, then alas\n # we return false..\n return res\n except KeyboardInterrupt:\n print('Keyboard Interrupted!')\n return self.Outputs", "def solve_puzzle(self):\n # replace with your code\n string = ''\n width = self._width\n height = self._height\n zero = self.current_position(0, 0)\n row_to_zero = height - 1 - zero[0]\n col_to_zero = width - 1 - zero[1]\n string += 'r' * col_to_zero\n string += 'd' * row_to_zero\n self.update_puzzle(string)\n if width == 2 and height == 2:\n string += self.solve_2x2()\n elif width > 2 and height == 2:\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n elif width == 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n string += self.solve_2x2()\n elif width > 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n #for row in range(height - 1, -1, -1):\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n return string", "def is_solvable(start, board):\r\n # If needed, a slower yet using less memory function can\r\n # be made with one function that multiplies visited cells by -1\r\n # and at the end corrects the whole list using abs().\r\n \r\n board = board[:] \r\n return is_solvable_new_board(start, board)", "def solve_puzzle(self):\r\n moves_str = \"\"\r\n # move zero to the most botton right\r\n zero_row, zero_col = self.current_position(0, 0)\r\n downs = self._height - 1 - zero_row\r\n rights = self._width - 1 - zero_col\r\n for dummy_d in range(downs):\r\n moves_str += \"d\"\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n self.update_puzzle(moves_str)\r\n # Solve the bottom m−2 rows of the puzzle \r\n # in a row by row manner from bottom to top. \r\n # Each individual row will be solved in a right to left order.\r\n if self._height > 2 and self._width > 2:\r\n for row in range(self._height - 1, 1, -1):\r\n for col in range(self._width - 1, 0, -1):\r\n assert self.lower_row_invariant(row, col)\r\n moves_str += self.solve_interior_tile(row, col)\r\n assert self.lower_row_invariant(row, col - 1)\r\n assert self.lower_row_invariant(row, 0)\r\n moves_str += self.solve_col0_tile(row)\r\n assert self.lower_row_invariant(row - 1, self._width - 1)\r\n # Solve the rightmost n−2 columns of the top two rows\r\n # in a right to left order). \r\n # Each column consists of two unsolved positions \r\n # and will be solved in a bottom to top order.\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n \r\n elif self._height <=2 and self._width > 2:\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n elif self._height <= 2 and self._width <= 2:\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n #elif self._height > 2 and self._width <= 2:\r\n \r\n print moves_str\r\n print self._grid\r\n return moves_str", "def is_solved(self):\n return (self.from_grid == self.to_grid)", "def solveSudoku(grid):\n\n #if the board is not empty, then check to see if its solved\n #return True if it is\n if not findEmpty(grid):\n if grid.checkBoard():\n return True\n else:\n return False\n #finds the first empty position\n p = findEmpty(grid)\n #considers 1-9 and then places it into the empty spot\n for i in range(1, 10):\n grid.board[p[0]][p[1]] = i\n #if the input is viable, then it goes solves the new given board until its solved\n if grid.checkInput(p[0], p[1]):\n if solveSudoku(grid):\n return True\n #if there are no viable options for that spot, then it backtracks \n grid.board[p[0]][p[1]] = 0\n return False", "def sudoku(puzzle):\n positions = all_pos(puzzle)\n if solve(puzzle, positions, 0):\n return puzzle\n return None", "def generate_solution(self, grid):\n number_list = [1,2,3,4,5,6,7,8,9]\n for i in range(0,81):\n row=i//9\n col=i%9\n #find next empty cell\n if grid[row][col]==0:\n shuffle(number_list)\n for number in number_list:\n if self.valid_location(grid,row,col,number):\n self.path.append((number,row,col))\n grid[row][col]=number\n if not self.find_empty_square(grid):\n return True\n else:\n if self.generate_solution(grid):\n #if the grid is full\n return True\n break\n grid[row][col]=0\n return False", "def solve_tour(self):\n\t\tboard = [[-1 for _ in range(self.N)]for _ in range(self.N)]\n\t\tboard[0][0] = 0\n\n\t\tz = self.find_tour(board, 0, 0, 1)\n\t\tif z:\n\t\t\tfor i in range(self.N):\n\t\t\t\tfor j in range(self.N):\n\t\t\t\t\tself.solution.append(board[i][j])\n\t\t\tprint board\n\t\t\treturn self.solution\n\t\t\t\t\n\t\telse:\n\t\t\tprint(\"No solution\")", "def isSolvable(state):\n\n invCount = 0\n size = len(state)\n for i in range(0, size-1):\n for j in range(i+1, size):\n if (int(state[j]) and int(state[i]) and state[i] > state[j]):\n invCount += 1\n # return (invCount%2 == 0)\n return 1", "def is_solved(self):\n return self.from_grid == self.to_grid", "def is_solved(self):\n return self.from_grid == self.to_grid", "def is_solved(self):\n return self.from_grid == self.to_grid", "def solve(grid):\n puzzle_dict = grid_values(grid)\n return search(puzzle_dict)", "def possible(matrix: List[List[int]], x: int, y: int, n:int) -> bool:\n\n # Check for problem in row\n for i in range(0, 9):\n if matrix[x][i] == n:\n return False\n\n # Check for problem in column\n for j in range(0, 9):\n if matrix[j][y] == n:\n return False\n \n # Initial indexes for inner square\n x0 = (x // 3) * 3\n y0 = (y // 3) * 3\n\n # Check for problem in inner square\n for i in range(0, 3):\n for j in range(0, 3):\n if matrix[x0 + i][y0 + j] == n:\n return False\n \n return True", "def is_valid(problem, i, j, e):\n row_map = row_maps[i]\n column_map = column_maps[j]\n sector_map = sector_maps[get_sector_number(i, j)]\n not_in_row = row_map[e-1] == 0\n not_in_column = column_map[e-1] == 0\n not_in_sector = sector_map[e-1] == 0\n\n return not_in_row and not_in_column and not_in_sector", "def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n x=len(board)\n y=len(board[0])\n visit=[[False if board[i][j]=='X' else True for j in range(y)] for i in range(x)]\n for i in range(x):\n for j in range(y):\n if visit[i][j] and board[i][j]=='O':\n queue=[[i,j]]\n visit[i][j]=False\n k=0\n surround=True\n while len(queue)>k:\n if queue[k][0]==0 or queue[k][0]==x-1 or queue[k][1]==y-1 or queue[k][1]==0:\n surround=False\n if queue[k][1]!=y-1 and visit[queue[k][0]][queue[k][1]+1]:\n queue.append([queue[k][0],queue[k][1]+1])\n visit[queue[k][0]][queue[k][1]+1]=False\n if queue[k][1]!=0 and visit[queue[k][0]][queue[k][1]-1]:\n queue.append([queue[k][0],queue[k][1]-1])\n visit[queue[k][0]][queue[k][1]-1]=False\n if queue[k][0]!=x-1 and visit[queue[k][0]+1][queue[k][1]]:\n queue.append([queue[k][0]+1,queue[k][1]])\n visit[queue[k][0]+1][queue[k][1]]=False\n if queue[k][0]!=0 and visit[queue[k][0]-1][queue[k][1]]:\n queue.append([queue[k][0]-1,queue[k][1]])\n visit[queue[k][0]-1][queue[k][1]]=False\n k+=1\n if surround:\n for i1,j1 in queue:\n board[i1][j1]='X'\n return", "def update_status(self):\n if len(self.invalid) != 0:\n return False\n for row in self.grid:\n for num in row:\n if num == 0:\n return False\n self.solved = True\n print(\"solved\")\n return True", "def check(self):\n for row in self.grid:\n for i in range(1, 10):\n if row.count(i) != 1:\n return False\n\n for col in range(9):\n lst = [row[col] for row in self.grid]\n for i in range(1, 10):\n if lst.count(i) != 1:\n return False\n \n for i in range(3):\n for j in range(3):\n lst = [row[j* 3:(j*3) + 3] for row in self.grid[i * 3:(i*3) + 3]] \n flat_list = []\n for k in lst:\n for number in k:\n flat_list.append(number)\n \n for check_number in range(1, 10):\n if flat_list.count(check_number) != 1:\n return False\n return True", "def goal_test(state): \n size = len(state)\n for i in range (size):\n for j in range (size):\n if state[i][j] != i*size + j:\n return False \n return True", "def is_solvable(self, row=0, col=0):\n if row == self.sl-1 and col == self.sl: \n return True\n\n # If column is the side length, mvoe indices to next row\n if col == self.sl:\n return self.is_solvable(row+1, 0)\n\n # If square has a value already, move to next column\n if self.puzzle[row][col] != 0: \n return self.is_solvable(row, col + 1)\n\n # If empty square, try each value in that square\n for value in range(1, self.sl+1): \n # If a valid value, recurse with that value and attempt to solve \n if self.valid_square(row, col, value): \n self.puzzle[row][col] = value\n solved = self.is_solvable(row, col + 1) \n self.puzzle[row][col] = 0\n\n # If value solves puzzle, return solved\n if solved:\n return solved\n\n return False", "def q1(puzzle):\n mysudoku = build_csp(puzzle)\n solution = mysudoku.backtracking_search()\n return solution, mysudoku", "def solvable(grid):\n y = x = 1\n stack = deque([(0, y, x,)])\n goal = len(grid) - 2\n found = np.ones_like(grid, dtype=bool)\n \n while stack:\n i, y, x = stack.popleft()\n i += 1\n for y2, x2 in solve_perfect.neighbors(y, x, grid):\n if found[y2, x2]:\n if y2 == goal and x2 == goal:\n return i\n else:\n found[y2, x2] = False\n stack.append((i, y2, x2,))\n \n return 0", "def check_correctness(sol_list, board, pents):\n # All tiles used\n if len(sol_list) != len(pents):\n return False\n # Construct board\n sol_board = np.zeros(board.shape)\n seen_pents = [0]*len(pents)\n for pent, coord in sol_list:\n pidx = get_pent_idx(pent)\n if seen_pents[pidx] != 0:\n return False\n else:\n seen_pents[pidx] = 1\n if not add_pentomino(sol_board, pent, coord, True, pents): \n return False\n \n # Check same number of squares occupied\n if np.count_nonzero(board) != np.count_nonzero(sol_board):\n return False\n # Check overlap\n if np.count_nonzero(board) != np.count_nonzero(np.multiply(board, sol_board)):\n return False\n \n return True", "def check_lost (grid):\r\n t=0\r\n for o in range(len(grid)):\r\n for e in range(len(grid[o])):\r\n if grid[o][e]==0:\r\n t+=1\r\n else:\r\n ()\r\n r=0\r\n for o in range(len(grid)):\r\n for e in range(len(grid[o])-1):\r\n if grid[o][e]==grid[o][e+1]:\r\n r+=1\r\n elif grid[o][3]==grid[o][2]:\r\n r+=1 \r\n else:\r\n ()\r\n \r\n v=0\r\n for o in range(len(grid)):\r\n for e in range(len(grid[o])-1):\r\n if grid[e][o]==grid[e+1][o]:\r\n v+=1\r\n elif grid[3][o]==grid[2][o]:\r\n v+=1 \r\n else:\r\n () \r\n \r\n if t==0 and r==0 and v==0:\r\n return True\r\n else:\r\n return False", "def check_win(puzzle: str, solution: str) -> bool:\r\n # Check if every character besides the last is the same\r\n return puzzle[:-1] == solution[:-1]", "def solve_one(board: Board, col: int) -> bool:\n #Completed board found\n if col >= board.size:\n return True\n for row in range(board.size):\n #check if position is valid\n if check_constraints(board=board, row=row, col=col):\n #update board and continue BFS\n board.mark_tile(row=row, col=col)\n if solve_one(col=col+1, board=board):\n return True\n board.unmark_tile(row=row, col=col)\n #no valid solutions for current board position\n return False", "def puzzle_01() -> None:\n\n containers = load_containers()\n print_puzzle_solution(len(tuple(filter(\n lambda combination: sum(combination) == EGGNOG_LITRES,\n [combination\n for i in range(len(containers))\n for combination in combinations(containers, i)]))))", "def solve_soduku(sudoku, screen):\n\n myfont = pygame.font.SysFont('Times New Roman', 30)\n\n # Creates a copy of the sudoku board so that we don't mess up the original board\n solved_board = sudoku.board\n\n # Stores the index of the next number that should be tried (the index will be used with the possible_nums list)\n try_new_nums = [[0] * 9 for y in range(9)]\n\n # Creates a list that will act like a stack for the depth first search (stores tuples (row, col) for each unsolved square)\n nodes = [sudoku.find_next_empty_node((0, -1))]\n\n done = False\n\n # Keeps running until the puzzle is either solved or runs out of possible combinations\n while len(nodes) != 0:\n\n time.sleep(.001)\n\n if not done:\n update_grid(screen, (nodes[len(nodes) - 1][0], nodes[len(nodes) - 1][1]), solved_board, myfont)\n draw_lines(screen, [0, 0, 0])\n\n pygame.display.update()\n\n # finds all possible numbers that can go into the current unsolved square\n one = set(sudoku.check_vertically(nodes[len(nodes) - 1], solved_board))\n two = set(sudoku.check_horizontally(nodes[len(nodes) - 1], solved_board))\n three = set(sudoku.check_box(nodes[len(nodes) - 1], solved_board))\n possible_nums = list(one.intersection(two).intersection(three))\n\n # Determines if there is a number that can be put into the current unsolved square\n if len(possible_nums) > 0:\n\n # Stores the current number in the current unsolved square\n curr_num = solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]]\n\n # Stores the next number that will be tried in the current unsolved square\n possible_next_num = possible_nums[\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] % len(possible_nums)]\n\n # Makes sure that the code doesn't get stuck trying the same combos\n if try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] == len(possible_nums):\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Makes sure that the code doesn't get stuck on trying the same number\n if possible_next_num == curr_num:\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Sets the unsolved square to the next number that is to be tried\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = possible_next_num\n\n # Changes which index will be used to find a different number if the new number does not work\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] += 1\n\n # if there are no possible numbers for the current square, it backtracks to the last number that can change\n else:\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Determines if there is still an empty unsolved square left\n if sudoku.has_next_emtpy_node(nodes[len(nodes) - 1]):\n nodes.append(sudoku.find_next_empty_node(nodes[len(nodes) - 1]))\n else:\n update_grid(screen, (nodes[len(nodes) - 1][0], nodes[len(nodes) - 1][1]), solved_board, myfont)\n draw_lines(screen, [0, 0, 0])\n done = True", "def win_check(self):\n\t\t# Create a temp var to capture the number of correct matches\n\t\tright = 0\n\t\t# retrieve peg_guess_color_list for current round\n\t\tguess = self.model.guesses[self.model.status]\n\t\t# retreive solution list\n\t\tsolution = self.model.guesses[\"solution\"]\n\t\t# compare values in each index for both lists against eachother\n\t\tfor i in range(len(solution.pegs)):\n\t\t\t# \n\t\t\tif solution.pegs[i].peg_color == guess.pegs[i].peg_color:\n\t\t\t\tright += 1\n\n\t\t\t\tprint(\"Yay, it works!\")\n\n\t\t# If all indexes of the peg_colors in the solution and guess are True:\n\t\tif right == 4:\n\t\t\treturn True\n\n\t\telse:\n\t\t\treturn False", "def did_solve(self):\n return self._solution[\"status\"] == \"optimal\"", "def check_tie(board):\n return 0 not in board[0]", "def is_proper(grid):\n clauses = sudoku_clauses()\n for i in range(1, 10):\n for j in range(1, 10):\n d = grid[i - 1][j - 1]\n # For each digit already known, a clause (with one literal).\n # Note:\n # We could also remove all variables for the known cells\n # altogether (which would be more efficient). However, for\n # the sake of simplicity, we decided not to do that.\n if d:\n clauses.append([v(i, j, d)])\n\n def py_itersolve(clauses): # don't use this function!\n while True: # (it is only here to explain things)\n sol = pycosat.solve(clauses)\n if isinstance(sol, list):\n yield sol\n clauses.append([-x for x in sol])\n else: # no more solutions -- stop iteration\n return\n\n # solve the SAT problem\n generator = py_itersolve(clauses)\n t = 0\n for solution in generator:\n t += 1\n if t == 2:\n return False\n return True", "def createsolution(self,rows):\r\n l = [0,0]\r\n if not self.findzero(l):\r\n return True\r\n x = l[0]\r\n y = l[1]\r\n for i in range(1,10):\r\n if self.check(i, x, y):\r\n rows[x][y] = i\r\n if self.createsolution(rows):\r\n return True\r\n rows[x][y] = 0\r\n return False", "def goal_test(self, state):\n self.numbernodes += 1\n\n i = 0\n for box in state.boxes :\n for coord in self.board.positionGoal :\n if coord[0] == box.y and coord[1] == box.x : \n i+=1\n if i == 0 : return False\n i = 0\n return True", "def solve_with_bruteforce(grid):\n\n res = check_sudoku(grid)\n if res is None or res is False:\n return res\n \n for row in range(0, 9):\n for col in range(0, 9):\n if grid[row][col] == 0:\n for n in range(1,10):\n grid[row][col] = n\n solution = solve_with_bruteforce(grid)\n if solution is False:\n grid[row][col] = 0\n else:\n return solution\n return False\n return grid", "def noSol(self):\n noSol = False \n\n cost_min_bilet = 100000\n\n for a in self.info.autobuze:\n if a.price < cost_min_bilet:\n cost_min_bilet = a.price\n\n for o in self.info.oameni:\n if o.money < cost_min_bilet and o.remaining_dest != []: \n noSol = True\n break\n \n set_destinatii = set()\n\n for o in self.info.oameni:\n if o.current_loc in set_destinatii:\n noSol = True\n break\n else:\n set_destinatii.add(o.current_loc)\n\n return noSol", "def is_solved(self):\n i = 0\n for row in self._marker:\n for x in row:\n if x == \"*\":\n i += 1\n if i > 1:\n return False\n return True", "def solve(self):\r\n while not self.done():\r\n self.no_open_cells()\r\n self.all_cells_are_mines()\r\n self.no_mines()\r\n if not self.done():\r\n self.obvious_cells()\r\n if not self.done():\r\n made_progress = self.safe_neighbour_difference()\r\n if made_progress:\r\n continue\r\n if not self.done():\r\n made_progress = self.adjacent_combinations()\r\n if made_progress:\r\n continue\r\n return", "def solve(board):\r\n cell = firstEmptyCell(board)\r\n # Base case for recursion\r\n # no empty cells, also all other cells filled validly, so board solved\r\n if cell is None:\r\n return True\r\n \r\n x, y = cell\r\n for i in range(1,10):\r\n # if i is valid at cell in board, go to next empty cell (recursively)\r\n if isvalid(board, i, cell):\r\n board[x][y] = i\r\n\r\n # try filling next empty cell till successful\r\n if solve(board):\r\n return True\r\n\r\n # i is invalid in cell, try i+1\r\n board[x][y] = 0\r\n \r\n # board can't be solved\r\n return False", "def check(self):\n winner = None\n count = 0\n\n for y in range(self.gridSize):\n if winner != None:\n return winner\n P1, P2 = 0, 0\n for item in self.grid[y]:\n # Check row of the grid\n if item == \"P1\":\n P1 += 1\n elif item == \"P2\":\n P2 += 1\n winner = self.checkval(P1, P2, self.gridSize)\n if winner != None:\n return winner\n P1, P2 = 0, 0\n for x in range(self.gridSize):\n # Check column of the grid\n if self.grid[x][y] == \"P1\":\n P1 += 1\n elif self.grid[x][y] == \"P2\":\n P2 += 1\n winner = self.checkval(P1, P2, self.gridSize)\n if winner != None:\n return winner\n P1, P2 = 0, 0\n for y in range(self.gridSize):\n # Check right top to left bottom across the grid\n for x in range(self.gridSize):\n if x == y:\n if self.grid[x][y] == \"P1\":\n P1 += 1\n elif self.grid[x][y] == \"P2\":\n P2 += 1\n winner = self.checkval(P1, P2, self.gridSize)\n if winner != None:\n return winner\n P1, P2 = 0, 0\n for y in range(self.gridSize):\n # Check the left top to the right bottom across the grid\n for x in range(self.gridSize - 1, -1, -1):\n # Check how many filled spaces there are\n if \".\" not in self.grid[y][x]:\n count += 1\n if x + y == self.gridSize - 1:\n if self.grid[y][x] == \"P1\":\n P1 += 1\n elif self.grid[y][x] == \"P2\":\n P2 += 1\n winner = self.checkval(P1, P2, self.gridSize)\n # Check if there is a winner if so return the winner\n if winner != None:\n return winner\n # Check if the fields that are filled are equal to the possible spaces to be filled in the grid\n if count == self.gridSize**2:\n return \"Tie\"", "def solve_puzzle(self):\n cur0_row, cur0_col = self.current_position(0, 0)\n move_str = 'd' * (self._height - cur0_row - 1) + 'r' * (self._width - cur0_col - 1)\n self.update_puzzle(move_str)\n for row in range(self._height-1, 1, -1):\n for col in range(self._width-1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n for col in range(self._width-1, 1, -1):\n assert self.row1_invariant(col)\n move_str += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n move_str += self.solve_row0_tile(col)\n move_str += self.solve_2x2()\n return move_str", "def isComplete(grid):\n for row in range(0,9):\n for col in range(0,9):\n if grid[row][col]==0:\n return False\n return True", "def solve_puzzle(self):\n\n # for each word in the words list\n # ...for each row in the game board\n # ......for each column in each row\n for word in self.words:\n for y, row in enumerate(self.board):\n for x, col in enumerate(row):\n \n # for each direction\n # try to find a word in said direction\n for dir in self.directions:\n self.scan_word(word, y, x, dir)", "def is_one_sol(self, row=0, col=0, sols=None):\n # For testing reasons, initialize with None\n if sols == None:\n sols = []\n\n # Uses an aliased list to maintain variance of number of solutions \n # found across all recursive calls, and returns when more than 1 is found\n if len(sols) > 1:\n return False\n\n # If end of puzzle is hit, the puzzle is solved, return True\n if row == self.sl-1 and col == self.sl: \n sols.append(True)\n return\n \n # If column is the side length, mvoe indices to next row\n if col == self.sl:\n return self.is_one_sol(row+1, 0, sols)\n\n # If square has a value already, move to next column\n if self.puzzle[row][col] != 0: \n return self.is_one_sol(row, col+1, sols)\n\n # If empty square, try each value in that square\n for value in range(1, self.sl+1): \n # If a valid value, recurse with that value and attempt to solve \n if self.valid_square(row, col, value): \n self.puzzle[row][col] = value\n self.is_one_sol(row, col+1, sols) \n self.puzzle[row][col] = 0\n\n if len(sols) > 1:\n return False\n\n # If exhausted all possibilities, return if only one solution found thus far\n return len(sols) == 1", "def check_game_status2(board):\n board = np.array(board)\n for i in range(7):\n for j in range(6):\n if checkWin(board, j, i, 1):\n return 1\n if checkWin(board, j, i, 2):\n return 2\n if isfull(board):\n return 0\n return -1", "def bts_solver(self) -> bool:\n empty_position = self.find_zero()\n if empty_position == \"\":\n self.print_board(self.sudoku_board)\n return True\n\n for value in range(1, 10):\n if self.is_valid(self.sudoku_board, empty_position, value):\n self.sudoku_board[empty_position] = value\n if self.bts_solver():\n return True\n self.sudoku_board[empty_position] = 0\n return False", "def puzzle_02() -> None:\n\n containers = load_containers()\n combinations_lengths = tuple(map(\n lambda combination: len(combination),\n filter(lambda combination: sum(combination) == EGGNOG_LITRES,\n [combination\n for i in range(len(containers))\n for combination in combinations(containers, i)])))\n print_puzzle_solution(combinations_lengths.count(min(combinations_lengths)))", "def valid_attempt(board):\n for i in range(n):\n if [] in board[i]:\n return 0\n return 1", "def checkPossibleMoves():\n for row in range(9):\n for column in range(7):\n if board[row][column] == board[row][column+1]: #A\n a = board[row][column]\n if column != 6: #column +3 would lead to an error\n if a == board[row+1][column+2] or a == board[row][column+3] or a == board[row-1][column+2] or a == board[row-1][column-1] or a == board[row][column-2] or a ==board[row+1][column-1]:\n return False\n else: \n if a == board[row+1][column+2] or a == board[row-1][column+2] or a == board[row-1][column-1] or a == board[row][column-2] or a ==board[row+1][column-1]:\n return False\n if board[row][column] == board[row][column+2]: # B\n if board[row][column] == board[row+1][column+1] or board[row][column] == board[row-1][column+1]:\n return False\n\n if board[row][column] == board[row+1][column]: #C\n a = board[row][column]\n if row != 8: #row +3 would lead to an error\n if a == board[row-1][column+1] or a == board[row-2][column] or a == board[row-1][column-1] or a == board[row+2][column-1] or a == board[row+3][column] or a == board[row+2][column+1]:\n return False\n else:\n if a == board[row-1][column+1] or a == board[row-2][column] or a == board[row-1][column-1] or a == board[row+2][column-1] or a == board[row+2][column+1]:\n return False\n\n if board[row][column] == board[row+2][column]: #D\n if board[row][column] == board[row+1][column-1] or board[row][column] == board[row+1][column+1]:\n return False\n return True" ]
[ "0.7565178", "0.73852265", "0.7355203", "0.73358935", "0.7311697", "0.7294157", "0.7242904", "0.7190867", "0.7131681", "0.71189845", "0.70927525", "0.70451087", "0.70317227", "0.70084786", "0.6971756", "0.6908844", "0.69036686", "0.6901154", "0.68942595", "0.68830353", "0.68520164", "0.6844793", "0.68418574", "0.6818569", "0.678551", "0.6765203", "0.6761923", "0.67421913", "0.6722176", "0.6700753", "0.66988045", "0.6662489", "0.6660329", "0.66589206", "0.664284", "0.66295534", "0.66238743", "0.6622986", "0.6603509", "0.6602277", "0.6600199", "0.6592584", "0.658682", "0.65864986", "0.6543401", "0.65380263", "0.6510344", "0.6492977", "0.64812374", "0.6477437", "0.64702374", "0.6452199", "0.6439196", "0.6433545", "0.642738", "0.64255035", "0.64141804", "0.6409164", "0.63976175", "0.63974285", "0.6393021", "0.6393021", "0.6393021", "0.63882834", "0.63882166", "0.63632023", "0.63623196", "0.6361822", "0.6355549", "0.6355374", "0.63546914", "0.6353401", "0.63477635", "0.6345294", "0.6345209", "0.6334646", "0.63344073", "0.6331926", "0.63190025", "0.6317897", "0.6315225", "0.6310738", "0.63092", "0.6288249", "0.62689376", "0.62619454", "0.6261448", "0.6255833", "0.6252768", "0.62438023", "0.6231413", "0.62237775", "0.62194353", "0.6216181", "0.6209923", "0.6189095", "0.6168428", "0.616017", "0.6159464", "0.61565703" ]
0.6677703
31
Generate a solution string for a puzzle Updates the puzzle and returns a move string
def solve_puzzle(self): cur0_row, cur0_col = self.current_position(0, 0) move_str = 'd' * (self._height - cur0_row - 1) + 'r' * (self._width - cur0_col - 1) self.update_puzzle(move_str) for row in range(self._height-1, 1, -1): for col in range(self._width-1, -1, -1): assert self.lower_row_invariant(row, col) if col != 0: move_str += self.solve_interior_tile(row, col) else: move_str += self.solve_col0_tile(row) for col in range(self._width-1, 1, -1): assert self.row1_invariant(col) move_str += self.solve_row1_tile(col) assert self.row0_invariant(col) move_str += self.solve_row0_tile(col) move_str += self.solve_2x2() return move_str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_puzzle(self):\n\n move_str = \"\"\n \n # Move zero tile to bottom right corner tile of puzzle.\n zero_pos = self.current_position(0,0) \n vert_dist = (self.get_height() - 1) - zero_pos[0]\n horiz_dist = (self.get_width() - 1) - zero_pos[1]\n move_str += ((\"d\" * vert_dist) + (\"r\" * horiz_dist))\n self.update_puzzle(move_str)\n \n # Solve lower rows\n if self.get_height() > 2:\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n \n # Solve top 2 rows\n if self.get_width() > 2:\n for col in range(self.get_width() - 1, 1, -1):\n move_str += self.solve_row1_tile(col)\n move_str += self.solve_row0_tile(col)\n \n # Solve 2x2\n move_str += self.solve_2x2()\n\n return move_str", "def solve_puzzle(self):\n # replace with your code\n string = ''\n width = self._width\n height = self._height\n zero = self.current_position(0, 0)\n row_to_zero = height - 1 - zero[0]\n col_to_zero = width - 1 - zero[1]\n string += 'r' * col_to_zero\n string += 'd' * row_to_zero\n self.update_puzzle(string)\n if width == 2 and height == 2:\n string += self.solve_2x2()\n elif width > 2 and height == 2:\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n elif width == 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n string += self.solve_2x2()\n elif width > 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n #for row in range(height - 1, -1, -1):\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n return string", "def solve_puzzle(self):\r\n # initialize some values and start tile at bottom right corner\r\n col = self.get_width() - 1\r\n row = self.get_height() - 1\r\n move = \"\"\r\n curr_row, curr_col = self.current_position(0, 0)\r\n move_to_target = abs(curr_col - col) * \"r\" + abs(curr_row - row) * \"d\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n\r\n # apply solver methods\r\n for dummy_i in range(row, 1, -1):\r\n for dummy_j in range(col, 0, -1):\r\n move += self.solve_interior_tile(dummy_i, dummy_j)\r\n move += self.solve_col0_tile(dummy_i)\r\n \r\n for dummy_j in range(col, 1, -1):\r\n move += self.solve_row1_tile(dummy_j)\r\n move += self.solve_row0_tile(dummy_j)\r\n \r\n move += self.solve_2x2()\r\n return move", "def gen_solve_to_text(self):\n\n count = 0\n self.url = \"scramble: \\n\"\n for move in self.scramble.split():\n self.url += \"{} \".format(move)\n self.url += \"\\n\\nsolve:\\n\"\n\n for move in self.solve_stats:\n if self.comms_unparsed_bool:\n if move[\"comment\"] != \"\":\n if \"mistake\" in move[\"comment\"]:\n move[\"comment\"] = \"{}\\n//{}\".format(move[\"comment\"].split(\"mistake\")[0], \"mistake from here\")\n if \"#\" in move[\"comment\"]:\n piece = move[\"comment\"].split(\"#\")[0]\n move[\"comment\"] = move[\"comment\"].split(\"#\")[1]\n if self.url.rfind(\"\\n\") != -1:\n alg = self.url[self.url.rfind(\"\\n\") + 1:]\n self.url = self.url[:self.url.rfind(\"\\n\") + 1] + \"\\n//{}\\n\".format(piece) + alg\n self.url += self.comms_unparsed[count]\n count += 1\n self.url += \"// {} \\n\".format(move[\"comment\"])\n else:\n if \"move\" in move:\n if move[\"move\"] != \"\":\n self.url += \"{} \".format(move[\"move\"])\n if move[\"comment\"] != \"\":\n if \"mistake\" in move[\"comment\"]:\n move[\"comment\"] = \"{}\\n//{}\".format(move[\"comment\"].split(\"mistake\")[0], \"mistake from here\")\n if \"#\" in move[\"comment\"]:\n piece = move[\"comment\"].split(\"#\")[0]\n move[\"comment\"] = move[\"comment\"].split(\"#\")[1]\n if self.url.rfind(\"\\n\") != -1:\n alg = self.url[self.url.rfind(\"\\n\") + 1:]\n self.url = self.url[:self.url.rfind(\"\\n\") + 1] + \"//{}\\n\".format(piece) + alg\n\n self.url += \"// {} \\n\".format(move[\"comment\"])\n else:\n self.url += \"// {} \\n\".format(move[\"comment\"])", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction\n for row in self._grid:\n print row\n print", "def solve_puzzle(self):\r\n moves_str = \"\"\r\n # move zero to the most botton right\r\n zero_row, zero_col = self.current_position(0, 0)\r\n downs = self._height - 1 - zero_row\r\n rights = self._width - 1 - zero_col\r\n for dummy_d in range(downs):\r\n moves_str += \"d\"\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n self.update_puzzle(moves_str)\r\n # Solve the bottom m−2 rows of the puzzle \r\n # in a row by row manner from bottom to top. \r\n # Each individual row will be solved in a right to left order.\r\n if self._height > 2 and self._width > 2:\r\n for row in range(self._height - 1, 1, -1):\r\n for col in range(self._width - 1, 0, -1):\r\n assert self.lower_row_invariant(row, col)\r\n moves_str += self.solve_interior_tile(row, col)\r\n assert self.lower_row_invariant(row, col - 1)\r\n assert self.lower_row_invariant(row, 0)\r\n moves_str += self.solve_col0_tile(row)\r\n assert self.lower_row_invariant(row - 1, self._width - 1)\r\n # Solve the rightmost n−2 columns of the top two rows\r\n # in a right to left order). \r\n # Each column consists of two unsolved positions \r\n # and will be solved in a bottom to top order.\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n \r\n elif self._height <=2 and self._width > 2:\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n elif self._height <= 2 and self._width <= 2:\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n #elif self._height > 2 and self._width <= 2:\r\n \r\n print moves_str\r\n print self._grid\r\n return moves_str", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def __str__(self):\n puzzle_string = '—' * 13 + '\\n'\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n puzzle_string += '│{0: >2}'.format(str(self.position[i][j]))\n if j == self.PUZZLE_NUM_COLUMNS - 1:\n puzzle_string += '│\\n'\n\n puzzle_string += '—' * 13 + '\\n'\n return puzzle_string", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][\n zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][\n zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def solve_2x2(self):\n cur_row, cur_col = self.current_position(0, 0)\n move_str = 'u' * cur_row + 'l' * cur_col\n self.update_puzzle(move_str)\n if self.check_2x2_solved():\n return move_str\n else:\n while not self.check_2x2_solved():\n move_str += 'rdlu'\n self.update_puzzle('rdlu')\n return move_str", "def solve_2x2(self):\n # replace with your code\n string = ''\n num1 = self.get_number(0, 0)\n num2 = self.get_number(0, 1)\n num3 = self.get_number(1, 0)\n max_num = max([num1, num2, num3])\n min_num = min([num1, num2, num3])\n if num1 == min_num and num2 == max_num:\n string += 'ul'\n elif num1 == max_num and num3 == min_num:\n string += 'ul'\n string += 'rdlu' * 2\n elif num2 == min_num and num3 == max_num:\n string += 'ul'\n string += 'rdlu'\n print '2x2 Path', string\n self.update_puzzle(string)\n return string", "def solve(puzzle):\n print(\"Solving...\")\n array_puzzle = np.asarray(puzzle)\n array_puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n goal_state = __generate_goal(len(array_puzzle[0]), len(array_puzzle))\n\n flat_puzzle = list(chain.from_iterable(puzzle)) # Flatten the list\n\n # If the puzzle doesn't contain 0, exit.\n try:\n flat_puzzle.remove(0) # Remove 0 from the list\n except:\n print(\"All puzzles must include an open tile (0).\")\n return None\n\n inversions = __count_inversions(flat_puzzle) # Count the inversions\n\n # width = len(array_puzzle[0]) # Get the width of the puzzle (columns)\n # length = len(array_puzzle) # Get the length of the puzzle (rows)\n\n oddEven = __odd_or_even(len(array_puzzle[0])) # Determine if the width is odd or even.\n start_position = __find_start(array_puzzle) # Find the start position's row\n solvable = __is_solvable(oddEven, inversions, len(array_puzzle), start_position) # Cleck if the puzzle is solvable.\n\n # If the puzzle is not solvable, return None.\n if(solvable == \"None\"):\n return None\n\n # If we cannot calculate a* (for example the given values are not all in sequential order (1-5) 4 is replaced by 6 (1,2,3,5,6))\n try:\n return __a_star(array_puzzle, goal_state)\n except:\n print(\"Please make sure there are no duplicate or skipped inputs.\")\n return None\n\n # This code was used in testing to print out the string.\n # solved = __a_star(array_puzzle, goal_state)\n # Return the moves needed to complete the puzzle.\n # return print(str(__build_string(solved)) + \" (\" + str(len(solved)) + \")\")", "def solve_row1_tile(self, target_col):\r\n moves_str = \"\"\r\n current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ur\"\r\n self.update_puzzle(moves_str)\r\n print \"solve_row1_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def generate_strings(self, new_puzzle):\n return new_puzzle._start", "def shuffle_puzzle(solution: str) -> str:\r\n shuffled_solution = solution[:-1]\r\n\r\n # Do more shuffling for bigger puzzles.\r\n swaps = len(solution) * 2\r\n for _ in range(swaps):\r\n # Pick two indices in the puzzle randomly.\r\n index1, index2 = random.sample(range(len(shuffled_solution)), k=2)\r\n shuffled_solution = swap_position(shuffled_solution, index1, index2)\r\n\r\n return shuffled_solution + EMPTY", "def solution_to_string(self):\n solution_vector_index_format = [index+1 if elem == 1 else -index-1 for index, elem in enumerate(self.solution_vector)]\n return \" \".join(map(str, solution_vector_index_format))", "def solve_puzzle(self):\n # move zero tile to the lower right corner\n row, col = self.current_position(0, 0)\n movements = \"d\" * (self.get_height() - 1 - row) + \"r\" * (\n self.get_width() - 1 - col)\n self.update_puzzle(movements)\n # solve rowid from 2 by row\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col == 0:\n movements += self.solve_col0_tile(row)\n assert self.lower_row_invariant(row - 1,\n self.get_width() - 1)\n else:\n movements += self.solve_interior_tile(row, col)\n assert self.lower_row_invariant(row, col - 1)\n # solve the uppermost two rows by column\n for col in range(self.get_width() - 1, 1, -1):\n for row in range(1, -1, -1):\n if row == 0:\n assert self.row0_invariant(col)\n movements += self.solve_row0_tile(col)\n assert self.row1_invariant(col - 1)\n else:\n assert self.row1_invariant(col)\n movements += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n movements += self.solve_2x2()\n return movements", "def __str__(self):\n value = str(self.puzzle) + str(\" \") + str(self.g) + str(\" \") + str(self.h)\n return value", "def solution(self) -> str:\n\n # \"Starting after the cup labeled 1, collect the other cups' labels clockwise into a single string with no\n # extra characters.\"\n\n self.current = 1\n eight_cups = self.pick_up_cups(8) # 9 cups in the circle, so all cups except '1' is 8 cups.\n\n answer = ''\n for cup in eight_cups:\n answer += str(cup)\n return answer", "def __str__(self):\r\n\t\toutStr = \"\"\r\n\t\toutStr += \"Heuristic Level: \" + str(self.heuristic)\r\n\t\toutStr += \"\\n-\" + \"-----\"*self.n\r\n\t\tfor row in self.board:\r\n\t\t\ttempStr = (\"\\n|\" + \" %2d |\" * self.n)\r\n\t\t\toutStr += tempStr % tuple(row)\r\n\t\t\toutStr += \"\\n-\" + \"-----\"*self.n\r\n\r\n\t\treturn outStr", "def solve_col0_tile(self, target_row):\n # replace with your code\n string = ''\n target = self.current_position(target_row, 0)\n row_difference = target_row - target[0]\n col_difference = target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'r' * (col_difference - 1)\n if row_difference > 1:\n string += 'druld' * (row_difference - 1)\n string += 'rulld' * (col_difference - 1)\n string += 'ruldrdlurdluurddlu'\n elif col_difference == 0:\n string += 'rddlu' * (row_difference - 2)\n if row_difference > 1:\n string += 'rd'\n string += 'l'\n string += 'ruldrdlurdluurddlu'\n string += 'r' * (self._width - 1)\n print 'Col 0 Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row - 1, self._width -1), 'False string'\n return string", "def part1_2(puzzle_input):\n [initial_state_string, configurations] = puzzle_input.split('\\n\\n')\n initial_state = re.sub('initial state: ', '', initial_state_string)\n rules_arr = configurations.split('\\n')\n rules = [re.split(' => ', line) for line in rules_arr]\n rules = {t[0]: t[1] for t in rules}\n current_state = '..........' + initial_state + '...............................................................................................................................................'\n for i in range(100): # After 100th cycle, the only change is that there is a '#' that shifts right\n next_generation_string = \"\"\n for index, pot in enumerate(current_state):\n if index == 0:\n temp_string = '..' + current_state[:3]\n elif index == 1:\n temp_string = '.' + current_state[:4]\n elif index == len(current_state) - 2:\n temp_string = current_state[-4:] + '.'\n elif index == len(current_state) - 1:\n temp_string = current_state[-3:] + '..'\n else:\n temp_string = current_state[index-2:index+3]\n if temp_string in rules:\n next_generation_string += rules[temp_string]\n else:\n next_generation_string += pot\n current_state = next_generation_string\n\n # For part 1\n part1_sum = 0\n if i == 19:\n for index, pot in enumerate(current_state):\n if pot == '#':\n part1_sum += index - 10\n print(part1_sum)\n\n # Part 2\n part2_sum = 0\n for index, pot in enumerate(current_state):\n if pot == '#':\n part2_sum += index - 10 + 50000000000 - 100\n print(part2_sum)", "def solve_row0_tile(self, target_col):\n # replace with your code\n string = ''\n assert self.row0_invariant(target_col), 'False precondition'\n target = self.current_position(0, target_col)\n row_difference = target[0]\n col_difference = target_col - target[1]\n if row_difference == 0:\n if col_difference == 1:\n string += 'ld'\n elif col_difference > 1:\n string += 'l' * col_difference\n string += 'drrul' * (col_difference - 2)\n string += 'druld'\n string += 'urdlurrdluldrruld'\n elif row_difference == 1:\n if col_difference == 1:\n string += 'lld'\n string += 'urdlurrdluldrruld'\n elif col_difference > 1:\n string += 'ld'\n string += 'l' * (col_difference - 1)\n string += 'urrdl' * (col_difference - 2)\n string += 'urdlurrdluldrruld'\n print 'Row 0 Path', string\n self.update_puzzle(string)\n assert self.row1_invariant(target_col - 1), 'False string'\n return string", "def __str__(self):\n def align_column(grid):\n board = \"\"\n for i in range(self.n):\n board += str(grid[i]) + \"\\n\"\n return board.strip()\n return (\"===Current Stage===\\n\"\n \"{}\\n\"\n \"====Goal Board=====\\n\"\n \"{}\".format(align_column(self.from_grid),\n align_column(self.to_grid)))", "def solve_2x2(self):\n # move zero tile to the left-up corner\n self.update_puzzle(\"lu\")\n movements = \"rdlu\"\n for _ in range(3):\n self.update_puzzle(\"rdlu\")\n if self.row0_invariant(0):\n return \"lu\" + movements\n movements += \"rdlu\"\n # the final 2x2 cannot be solved\n return \"\"", "def solve_row1_tile(self, target_col):\n cur_row, cur_col = self.current_position(1, target_col)\n move_str = self.position_tile(1, target_col, cur_row, cur_col, need_ld=False)\n self.update_puzzle(move_str)\n return move_str", "def generate_new_puzzle():\n new_puzzle = pb() \n\n # only generate solvable puzzles\n while not new_puzzle.is_solvable():\n new_puzzle = pb()\n\n return new_puzzle", "def solve_col0_tile(self, target_row):\r\n moves_str = \"\"\r\n # move the zero tile from (i,0) to (i−1,1) \r\n # using the move string \"ur\"\r\n moves_str += \"ur\"\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str)\r\n # If the target tile is now at position (i,0)\r\n # you can simply move tile zero to the end of row i−1\r\n current_row, current_col = temp_grid.current_position(target_row, 0)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n if current_row == target_row and current_col == 0:\r\n rights = self._width - 1 - zero_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\" \r\n # However, if the target tile is not positioned at (i,0)\r\n # we suggest the following solution strategy:\r\n else:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ruldrdlurdluurddlu\"\r\n for dummy_r in range(self._width - 1):\r\n moves_str += \"r\"\r\n \r\n print \"solve_col0_tile\"\r\n print moves_str\r\n self.update_puzzle(moves_str)\r\n print self._grid\r\n return moves_str", "def print_solution_position(solution: str, puzzle: str) -> None:\r\n print(\r\n \"Solution:\\n{}\\n\\nCurrent position:\\n{}\\n\".format(\r\n generate_grid(solution), generate_grid(puzzle)\r\n )\r\n )", "def solve_row1_tile(self, target_col):\n # replace with your code\n print target_col\n assert self.row1_invariant(target_col), 'False precondition'\n string = ''\n target = self.current_position(1, target_col)\n row_difference = 1 - target[0]\n col_difference = target_col - target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0:\n string += 'urrdl' * (col_difference - 1)\n string += 'ur'\n elif row_difference == 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n print 'Row 1 Path', string\n self.update_puzzle(string)\n assert self.row0_invariant(target_col), 'False string'\n return string", "def demo():\n\n # Initialize board with all cells having possible values 1..9\n board = board_init()\n\n # Unsolved demo puzzle\n # Hard puzzle by Arto Inkala:\n # http://abcnews.go.com/blogs/headlines/2012/06/can-you-solve-the-hardest-ever-sudoku/\n read_puzzle(board, \"8..........36......7..9.2...5...7.......457.....1...3...1....68..85...1..9....4..\")\n\n # Print unsolved puzzle\n print(\"Initial Sudoku board:\")\n print_board(board)\n\n # Solve the puzzle\n board = solve_puzzle(board)\n\n # Print the solution\n print(\"Solution:\")\n print_board(board)\n\n\n # Write output to file\n write_to_file(board)\n \n return 0", "def PrintSolution(self):\n sol = \"\"\n charMap = {\n Magnets.EMPTY: '.',\n Magnets.PLUS: '+',\n Magnets.MINUS: '-',\n }\n for row in self.Solution():\n for space in row:\n sol = sol + charMap.get(space, '?')\n sol = sol + '\\n'\n return sol", "def solve_interior_tile(self, target_row, target_col):\n # replace with your code\n string = ''\n target = self.current_position(target_row, target_col)\n row_difference = target_row - target[0]\n #print 'Row difference', row_difference\n col_difference = target_col - target[1]\n #print 'Col difference', col_difference\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'l' * col_difference\n if row_difference == 0 and col_difference > 1:\n string += 'urrdl' * (col_difference - 1)\n if row_difference == 1:\n string += 'urrdl' * (col_difference - 1)\n string += 'dru'\n if row_difference > 1:\n string += 'drrul' * (col_difference - 1)\n string += 'dru'\n elif col_difference < 0:\n col_difference = abs(col_difference)\n string += 'r' * col_difference\n if row_difference == 1:\n string += 'ulldr' * (col_difference - 1)\n string += 'ullddru'\n if row_difference > 1:\n string += 'dllur' * (col_difference - 1)\n string += 'dlu'\n string += 'lddru' * (row_difference - 1)\n if row_difference > 0:\n string += 'ld'\n print 'Interior Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row, target_col - 1), 'False string'\n return string", "def __str__(self):\n rep = \"\"\n for row in range(self._dim):\n for col in range(self._dim):\n rep += STRMAP[self._board[row][col]]\n if col == self._dim - 1:\n rep += \"\\n\"\n else:\n rep += \" | \"\n if row != self._dim - 1:\n rep += \"-\" * (4 * self._dim - 3)\n rep += \"\\n\"\n return rep", "def solve_col0_tile(self, target_row):\n move_str = 'ur'\n self.update_puzzle(move_str)\n cur_row, cur_col = self.current_position(target_row, 0)\n if cur_row == target_row and cur_col == 0:\n move_str += 'r' * (self._width - 2)\n else:\n move_str += self.position_tile(target_row-1, 1, cur_row, cur_col)\n move_str += 'ruldrdlurdluurddlur'\n move_str += 'r' * (self._width - 2)\n self.update_puzzle(move_str[2:])\n return move_str", "def solve_interior_tile(self, target_row, target_col):\r\n assert self._grid[target_row][target_col] == 0\r\n moves_str = \"\"\r\n target_current_row, target_current_col = self.current_position(target_row, target_col)\r\n \r\n moves_str += self.position_tile(target_row, target_col, target_current_row, target_current_col) \r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_interior_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve(self, board: List[List[str]]) -> None:\n n = len(board)\n if n == 0:\n return\n m = len(board[0])\n for i in range(m):\n self.traverse(board, 0, i, n, m)\n self.traverse(board, n - 1, i, n, m)\n for i in range(n):\n self.traverse(board, i, 0, n, m)\n self.traverse(board, i, m - 1, n, m)\n for i in range(n):\n for j in range(m):\n board[i][j] = 'X' if board[i][j] == 'O' else board[i][j]\n for i in range(n):\n for j in range(m):\n board[i][j] = 'O' if board[i][j] == '_' else board[i][j]", "def solve_2x2(self):\r\n # replace with your code\r\n assert self.row1_invariant(1), '2x2 Dont pass row1_invariant(1)'\r\n whole_move = ''\r\n current_position = self.current_position(0, 0)\r\n # print 'Zero position =', current_position\r\n counter = 0\r\n \r\n\r\n \r\n # if current_position == (0,0):\r\n # print (0,0)\r\n # move_to_00 = 'rdlu' \r\n if current_position == (0,1):\r\n # print (0,1)\r\n move_to_00 = 'l'\r\n if current_position == (1,0):\r\n # print (1,0)\r\n move_to_00 = 'u'\r\n if current_position == (1,1):\r\n # print (1,1)\r\n move_to_00 = 'ul'\r\n whole_move += move_to_00\r\n self.update_puzzle(move_to_00)\r\n # print self\r\n # print self.get_number(1,1) < self.get_number(1,0)\r\n \r\n while self.get_number(0,0) != 0 or self.get_number(0,1) != 1:\r\n \r\n # print 'Aloha in loop!'\r\n counter +=1\r\n move = 'rdlu'\r\n whole_move += move\r\n self.update_puzzle(move)\r\n # print self\r\n if counter >5:\r\n break\r\n return whole_move", "def solve_row0_tile(self, target_col):\n move_str = 'ld'\n self.update_puzzle(move_str)\n cur_row, cur_col = self.current_position(0, target_col)\n if cur_row == 0 and cur_col == target_col:\n return move_str\n else:\n move_str += self.position_tile(1, target_col-1, cur_row, cur_col)\n move_str += 'urdlurrdluldrruld'\n self.update_puzzle(move_str[2:])\n return move_str", "def __str__(self):\n board = \"\"\" 0 1 2 3 4 5\\n\"\"\"\n\n for y in range(Board.board_size):\n board += str(y) + \" \"\n for x in range(Board.board_size):\n piece = self.board[x][y] if self.board[x][y] is not None else \".\"\n\n piece = str(piece).lower() if piece in self.player_1_pieces else str(piece)\n\n board += piece\n board += \" \"\n board += \"\\n\"\n return board", "def solve_2x2(self):\r\n moves_str = \"\"\r\n # move zero to the most upper left\r\n zero_row, zero_col = self.current_position(0, 0)\r\n ups = zero_row - 0\r\n lefts = zero_col - 0\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n \r\n # get zero, one, two, three positions\r\n self.update_puzzle(moves_str)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n counter = 0\r\n while counter <= 3 and \\\r\n (zero_row != 0 or zero_col != 0 or \\\r\n one_row!= 0 or one_col != 1 or \\\r\n two_row != 1 or two_col != 0 or \\\r\n three_row != 1 or three_col != 1):\r\n move = CIRCLES[\"RIGHT_CIRCLES\"]\r\n moves_str += move\r\n self.update_puzzle(move)\r\n counter += 1\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n \r\n print \"solve_2x2\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def interactive_strategy(game: Game) -> str:\n move = input(\"Enter a move: \")\n return game.str_to_move(move)", "def solve_puzzle(self):\n moves = self.moves\n peg_pos = self.peg_pos\n move_z = self.move_to_height\n \n print('Solving Tower of Hanoi:')\n for i, move in enumerate(moves):\n des_peg = move[0]\n des_peg_pos = peg_pos[des_peg]\n \n #move to peg\n print(' Moving to peg: '+str(des_peg)+' at: '+str(des_peg_pos))\n self.move_to(des_peg_pos[0], des_peg_pos[1], move_z)\n \n #if index is even, pickup disk, else drop disk\n if i % 2 == 0:\n print(' Picking up disk at height: '+str(move[1]))\n self.pick(move[1])\n else:\n print(' Dropping disk')\n self.drop()\n print('Finished solving puzzle')", "def __str__(self):\n\n strme = []\n move = \"move {} {} {} {}\".format(self.key, len(self.movers),\n self.pfreq, self.rmin)\n strme.append(move)\n\n for mover in self.movers:\n strme.append(self.print_mover(mover))\n\n return \"\\n\".join(strme)", "def main():\n\n game = JanggiGame()\n game.display_board()\n print(game.make_move('a7','a6'))\n print(game.make_move('h1','g3'))\n print(game.make_move('a10','a7'))\n print(game.make_move('b1','d4'))\n print(game.make_move('a7','b7'))\n print(game.make_move('c1','a2'))\n print(game.make_move('b7','b3'))\n print(game.make_move('h3','b3'))\n print(game.make_move('e7','e6'))\n print(game.make_move('i1','h1'))\n print(game.make_move('i7','h7'))\n print(game.make_move('a2','b4'))\n print(game.make_move('b10','d7'))\n print(game.make_move('b4','a6'))\n print(game.make_move('i10','i9'))\n print(game.make_move('a6','b8'))\n print(game.make_move('c10','b8'))\n print(game.make_move('b3','b9'))\n print(game.make_move('i9','i6'))\n print(game.make_move('a1','b1'))\n print(game.make_move('b8','c6'))\n print(game.make_move('b1','b8'))\n print(game.make_move('h8','h1'))\n print(game.make_move('g3','h1'))\n print(game.make_move('e6','d6'))\n print(game.make_move('h1','g3'))\n print(game.make_move('d6','d5'))\n print(game.make_move('d4','b1'))\n print(game.make_move('i6','e6'))\n print(game.make_move('i4','i5'))\n print(game.make_move('c6','d4'))\n print(game.make_move('c4','d4'))\n print(game.make_move('d5','d4'))\n print(game.make_move('g4','f4'))\n print(game.make_move('d4','e4'))\n print(game.make_move('f4','e4'))\n print(game.make_move('e6','e4'))\n print(game.make_move('g3','e4'))\n print(game.make_move('h10','i8'))\n print(game.make_move('e4','f6'))\n print(game.make_move('g7','g6'))\n print(game.make_move('b1','d4'))\n print(game.make_move('g6','f6'))\n print(game.make_move('d4','f7'))\n print(game.make_move('d7','f4'))\n print(game.make_move('f7','c9'))\n print(game.make_move('d10','d9'))\n print(game.make_move('a4','a5'))\n print(game.make_move('f6','f5'))\n print(game.make_move('g1','e4'))\n print(game.make_move('c7','c6'))\n print(game.make_move('b8','i8'))\n print(game.make_move('f5','e5'))\n print(game.make_move('e4','g1'))\n print(game.make_move('e5','d5'))\n print(game.make_move('i8','i9'))\n print(game.make_move('f10','f9'))\n print(game.make_move('a5','a6'))\n print(game.make_move('d5','d4'))\n print(game.make_move('a6','a7'))\n print(game.make_move('d4','d3'))\n print(game.make_move('e2','d3'))\n print(game.make_move('e9','e8'))\n print(game.make_move('i9','f9'))\n print(game.make_move('h7','h6'))\n print(game.make_move('a7','b7'))\n print(game.make_move('g10','e7'))\n print(game.make_move('f9','f7'))\n print(game.make_move('d9','d10'))\n print(game.make_move('f7','e7'))\n print(game.make_move('e8','f8'))\n print(game.make_move('b7','c7'))\n print(game.make_move('h6','h5'))\n print(game.make_move('e7','e10'))\n print(game.make_move('h5','h4'))\n print(game.make_move('c7','d7'))\n print(game.make_move('h4','h3'))\n print(game.make_move('d7','e7'))\n print(game.make_move('h3','h2'))\n print(game.make_move('e7','f7'))\n game.display_board()\n print('Red in check: '+str(game.is_in_check('red')))\n print('Blue in check: '+str(game.is_in_check('blue')))\n print(game.get_game_state())", "def solve(self, board: List[List[str]]) -> None:\n if len(board) is 0: return\n width = len(board[0]) - 1\n height = len(board) - 1\n def help(x: int, y: int):\n if x < 0 or x > height or y < 0 or y > width:\n return\n if board[x][y] is \"O\":\n board[x][y] = \"g\"\n help(x - 1, y)\n help(x, y - 1)\n help(x + 1, y)\n help(x, y + 1)\n\n for i in range(width + 1):\n if board[0][i] is \"O\":\n help(0, i)\n if board[height][i] is \"O\":\n help(height, i)\n for i in range(1, height):\n if board[i][0] is \"O\":\n help(i, 0)\n if board[i][width] is \"O\":\n help(i, width)\n\n print(board)\n for i in range(width + 1):\n for j in range(height + 1):\n if board[j][i] is \"O\":\n board[j][i] = \"X\"\n elif board[j][i] is \"g\":\n board[j][i] = \"O\"", "def generate_ai_move(board):\n if terminal(board): # if the game is over, do nothing\n pass\n else: \n move = minimax(board) # use minimax algorithm to generate optimal move\n res = result(board, move[0])\n emit(\"update\", res)", "def solve_tour(self):\n\t\tboard = [[-1 for _ in range(self.N)]for _ in range(self.N)]\n\t\tboard[0][0] = 0\n\n\t\tz = self.find_tour(board, 0, 0, 1)\n\t\tif z:\n\t\t\tfor i in range(self.N):\n\t\t\t\tfor j in range(self.N):\n\t\t\t\t\tself.solution.append(board[i][j])\n\t\t\tprint board\n\t\t\treturn self.solution\n\t\t\t\t\n\t\telse:\n\t\t\tprint(\"No solution\")", "def random_puzzle(N=17):\n values = dict((s, digits) for s in squares)\n for s in shuffled(squares):\n if not assign(values, s, random.choice(values[s])):\n break\n ds = [values[s] for s in squares if len(values[s]) == 1]\n if len(ds) >= N and len(set(ds)) >= 8:\n return ''.join(values[s] if len(values[s]) == 1 else '.' for s in squares)\n return random_puzzle(N) ## Give up and make a new puzzle", "def __str__(self):\r\n out = \"##\"*(self.width+1)+\"\\n\"\r\n for i in range(self.height):\r\n out += \"#\"\r\n for j in range(self.width):\r\n if self.grid[i][j] == 0:\r\n out += \"##\"\r\n else:\r\n if not self.showSolution:\r\n out += \" \"\r\n elif (i,j) in self.solution:\r\n out += \"**\"\r\n else:\r\n out += \" \"\r\n out += \"#\\n\"\r\n return out + \"##\"*(self.width+1)", "def swap_position(puzzle: str, from_index: int, to_index: int) -> str:\r\n puzzle_list = list(puzzle)\r\n\r\n # Used to swap the positons of the chracters in the array\r\n puzzle_list[from_index], puzzle_list[to_index] = (\r\n puzzle_list[to_index],\r\n puzzle_list[from_index],\r\n )\r\n return \"\".join(puzzle_list)", "def position_tile(self, zero_row, zero_col, correct_tile):\n \n ans = \"\" \n vert_dist = abs(zero_row - correct_tile[0])\n horiz_dist = abs(zero_col - correct_tile[1])\n \n # Updates ans, the move string, based the correct_tile's\n # position relative to the target position.\n \n # SAME ROW\n if vert_dist == 0:\n # Left of target\n if zero_col > correct_tile[1]:\n # Moves zero tile left to correct_tile's position.\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right to target position,\n # and moves zero tile to left of target position.\n if horiz_dist > 1:\n ans += str(\"urrdl\" * (horiz_dist - 1))\n # Right of target\n else:\n # Moves zero tile right to correct_tile's position.\n ans += str(\"r\" * horiz_dist)\n # Moves correct_tile left to target position,\n # and moves zero tile to left of target position.\n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ulld\")\n \n # SAME COL\n elif horiz_dist == 0:\n # Moves zero tile up to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n # Moves correct_tile down to target position, \n # and moves zero tile to left of target position.\n if vert_dist > 1:\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n # UPPER LEFT\n elif correct_tile[1] < zero_col:\n # Moves zero tile up and left to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right and down to target position,\n # and moves zero tile to left of target position.\n ans += str(\"drrul\" * (horiz_dist - 1))\n ans += str(\"druld\" * vert_dist)\n\n # UPPER RIGHT\n else:\n # Moves zero tile up and right to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"r\" * horiz_dist)\n # This if-elif-else statement moves correct_tile left and down to target position.\n # If statement is only used when target position is in row 2.\n if vert_dist == 1 and correct_tile[0] == 0:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dluld\")\n # Elif statement used when correct_tile is in the row above target position.\n elif vert_dist == 1: \n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ullddruld\")\n # Else statement used when correct_tile is 1+ rows above target position.\n else:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dlu\")\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n return ans", "def gen_url(self):\n self.url = \"https://www.cubedb.net/?rank=3&title={}&time={}&scramble=\".format(self.name_of_solve, self.time_solve)\n for move in self.scramble.split():\n if \"\\'\" in move:\n move.replace(\"\\'\", \"-\")\n self.url += \"{}_\".format(move)\n self.url += \"&alg=\"\n count = 0\n for move in self.solve_stats:\n if self.comms_unparsed_bool:\n if self.comms_unparsed_bool:\n if move[\"comment\"] != \"\":\n if \"mistake\" in move[\"comment\"]:\n move[\"comment\"] = \"{}%0A//{}\".format(move[\"comment\"].split(\"mistake\")[0],\n \"mistake from here\")\n if \"#\" in move[\"comment\"]:\n piece = move[\"comment\"].split(\"#\")[0]\n move[\"comment\"] = move[\"comment\"].split(\"#\")[1]\n if self.url.rfind(\"%0A\") != -1:\n alg = self.url[self.url.rfind(\"%0A\") + 3:]\n self.url = self.url[:self.url.rfind(\"%0A\") + 3] + \"%0A//{}%0A\".format(piece) + alg\n else:\n alg = self.url[self.url.rfind(\"=\") + 1:]\n self.url = self.url[:self.url.rfind(\"=\") + 1] + \"%0A//{}%0A\".format(piece) + alg\n self.url += self.comms_unparsed[count]\n count += 1\n self.url += \"// {} %0A\".format(move[\"comment\"])\n\n\n else:\n if \"move\" in move:\n if move[\"move\"] != \"\":\n if \"\\'\" in move[\"move\"]:\n move[\"move\"].replace(\"\\'\", \"-\")\n self.url += \"{}_\".format(move[\"move\"])\n if move[\"comment\"] != \"\":\n if \"mistake\" in move[\"comment\"]:\n move[\"comment\"] = \"{}%0A//{}\".format(move[\"comment\"].split(\"mistake\")[0], \"mistake from here\")\n if \"#\" in move[\"comment\"]:\n piece = move[\"comment\"].split(\"#\")[0]\n move[\"comment\"] = move[\"comment\"].split(\"#\")[1]\n if self.url.rfind(\"%0A\") != -1:\n alg = self.url[self.url.rfind(\"%0A\") + 3:]\n self.url = self.url[:self.url.rfind(\"%0A\") + 3] + \"//{}%0A\".format(piece) + alg\n else:\n alg = self.url[self.url.rfind(\"=\") + 1:]\n self.url = self.url[:self.url.rfind(\"=\") + 1] + \"//{}%0A\".format(piece) + alg\n\n self.url += \"// {} %0A\".format(move[\"comment\"])\n else:\n self.url += \"// {} %0A\".format(move[\"comment\"])", "def print_solution():\n pass", "def solve_interior_tile(self, target_row, target_col):\n cur_row, cur_col = self.current_position(target_row, target_col)\n move_str = self.position_tile(target_row, target_col, cur_row, cur_col)\n self.update_puzzle(move_str)\n return move_str", "def solve(self, board: List[List[str]]) -> None:", "def solve_row0_tile(self, target_col):\r\n # replace with your code\r\n assert self.row0_invariant(target_col), 'Some trouble in row0_invariant' \r\n whole_move = ''\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n # print self.get_number(current_row, current_col)\r\n zero_row, zero_col = 0, target_col\r\n # print 'Target tile position=',self.current_position(0, target_col)\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (0, target_col)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ld'\r\n \r\n recomended_move = 'ld'\r\n whole_move += recomended_move\r\n zero_col -= len(filter(lambda x: x=='l', recomended_move))\r\n zero_row += len(filter(lambda x: x=='d', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n #####Case when we check if recomended move solves the tile\r\n if self.current_position(0, target_col) == (0, target_col):\r\n # print 'Congrads recomended move made great deal !!'\r\n return whole_move\r\n #####If not, we position TT to (1, target_col-1),\r\n ##### and ZEOR to (1, target_col-2)\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (zero_row - current_row) * 'u'\r\n path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = path_up + path_side\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n \r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n counter = 0\r\n # print self.current_position(0, target_col) != (1, target_col-1)\r\n # print (zero_row,zero_col) != (1,target_col-2)\r\n ####POitioning TT and zero into positions that can be solvable\r\n while self.current_position(0, target_col) != (1, target_col-1) or \\\r\n (zero_row,zero_col) != (1,target_col-2):\r\n counter +=1\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n cyclic_moves = ''\r\n # print 'Aloha in the loop'\r\n if zero_col < current_col:\r\n # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n \r\n\r\n if current_col != target_col-1 and zero_row == 0:\r\n # print 'In the upper row and we can use down cycling'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n\r\n elif current_col != target_col-1:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col-1:\r\n # print 'Target tile under target place'\r\n # print 'DEBUG!!!!'\r\n # print self\r\n # print zero_col, target_col\r\n if zero_col == 0 and current_col == 1:\r\n cyclic_moves = 'druld'\r\n elif zero_row == 0:\r\n cyclic_moves = 'druld'\r\n \r\n else:\r\n cyclic_moves = 'urd'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n elif zero_row > current_row:\r\n # print 'DEBUG'\r\n # print 'TT under zero tile'\r\n cyclic_moves = 'uld'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n \r\n if counter > 10:\r\n # print 'COUNTER break'\r\n break\r\n\r\n #####Solving using pattern 2 x 3 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 2x3 puzzle formed recently'\r\n move2x3 = \"urdlurrdluldrruld\"\r\n whole_move += move2x3\r\n zero_col -= len(filter(lambda x: x=='l', move2x3))\r\n zero_col += len(filter(lambda x: x=='r', move2x3))\r\n zero_row += len(filter(lambda x: x=='d', move2x3))\r\n zero_row -= len(filter(lambda x: x=='u', move2x3))\r\n self.update_puzzle(move2x3)\r\n # print self\r\n assert self.row1_invariant(target_col-1), 'Some trouble in row1_invariant' \r\n return whole_move", "def solve_col0_tile(self, target_row):\n solved_row, solved_col = self.current_position(target_row, 0)\n movements = \"\"\n if solved_row == target_row - 1 and solved_col == 0:\n movements = \"u\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ur\")\n movements = \"ur\" + local_board.move_tile(\n target_row - 1, 1, target_row *\n self.get_width()) + \"ruldrdlurdluurddlu\"\n movements += \"r\" * (self.get_width() - 1)\n self.update_puzzle(movements)\n return movements", "def convertBoard(self):\n \n board = \"\"\n \n for m in self.squares:\n board += str(convertMarker(m)) + \" \"\n \n return board", "def get_all_possible_moves():\r\n \"\"\"\r\n Creates the labels for the universal chess interface into an array and returns them\r\n \"\"\"\r\n labels_array = []\r\n letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\r\n numbers = ['1', '2', '3', '4', '5', '6', '7', '8']\r\n promoted_to = ['q', 'r', 'b', 'n']\r\n\r\n for l1 in range(8):\r\n for n1 in range(8):\r\n destinations = [(t, n1) for t in range(8)] + \\\r\n [(l1, t) for t in range(8)] + \\\r\n [(l1 + t, n1 + t) for t in range(-7, 8)] + \\\r\n [(l1 + t, n1 - t) for t in range(-7, 8)] + \\\r\n [(l1 + a, n1 + b) for (a, b) in\r\n [(-2, -1), (-1, -2), (-2, 1), (1, -2), (2, -1), (-1, 2), (2, 1), (1, 2)]]\r\n for (l2, n2) in destinations:\r\n if (l1, n1) != (l2, n2) and l2 in range(8) and n2 in range(8):\r\n move = letters[l1] + numbers[n1] + letters[l2] + numbers[n2]\r\n labels_array.append(move)\r\n for l1 in range(8):\r\n l = letters[l1]\r\n for p in promoted_to:\r\n labels_array.append(l + '2' + l + '1' + p)\r\n labels_array.append(l + '7' + l + '8' + p)\r\n if l1 > 0:\r\n l_l = letters[l1 - 1]\r\n labels_array.append(l + '2' + l_l + '1' + p)\r\n labels_array.append(l + '7' + l_l + '8' + p)\r\n if l1 < 7:\r\n l_r = letters[l1 + 1]\r\n labels_array.append(l + '2' + l_r + '1' + p)\r\n labels_array.append(l + '7' + l_r + '8' + p)\r\n return labels_array", "def solve_row1_tile(self, target_col):\n assert target_col > 1, \"target_col must be > 1\"\n assert self.row1_invariant(target_col), \"tiles to right and below incorrectly ordered\"\n\n # Moves correct_tile to the target position (1, target_col),\n # and the zero tile above the target position at (0, target_col). \n correct_tile = self.current_position(1, target_col)\n move_str = self.position_tile(1, target_col, correct_tile) \n move_str += \"ur\"\n self.update_puzzle(move_str)\n\n assert self.row0_invariant(target_col)\n \n return move_str", "def __str__(self):\n board = ''\n board_2 = ''\n\n for row in self.from_grid:\n for space in row:\n board += ' ' + space\n board += '\\n'\n\n for row in self.to_grid:\n for space in row:\n board_2 += ' ' + space\n board_2 += '\\n'\n\n return 'Current State:\\n' + board + 'Target State:\\n' + board_2", "def solve(self, board: List[List[str]]) -> None:\n rows = len(board)\n cols = len(board[0])\n\n def explore(i, j):\n if i < 0 or i >= rows:\n return\n if j < 0 or j >= cols:\n return\n if board[i][j] != \"O\":\n return\n board[i][j] = \"Y\"\n explore(i + 1, j)\n explore(i - 1, j)\n explore(i, j + 1)\n explore(i, j - 1)\n\n for i in [0, rows - 1]:\n for j in range(cols):\n explore(i, j)\n\n for j in [0, cols - 1]:\n for i in range(rows):\n explore(i, j)\n\n for i in range(rows):\n for j in range(cols):\n if board[i][j] == \"Y\":\n board[i][j] = \"O\"\n elif board[i][j] == \"O\":\n board[i][j] = \"X\"", "def phase_8(self):\n\n def problem_1():\n test_board_1 = board(5, 5, snake_init_coordinates = [4, 2], fruit_init_coordinates = [0, 2])\n render = Render_engine('terminal', test_board_1)\n\n print(\"Before move\")\n print(\"*******************************\")\n render.render_terminal(test_board_1)\n\n print(\"\\n\\nafter move right\")\n print(\"*******************************\")\n test_board_1.Snake_move(\"right\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n\n print(\"\\n\\nafter move up\")\n print(\"*******************************\")\n test_board_1.Snake_move(\"up\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n\n print(\"\\n\\nafter move right\")\n print(\"*******************************\")\n test_board_1.Snake_move(\"right\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\\n\\n\")\n \n def problem_2():\n test_board_1 = board(5, 5, snake_init_coordinates = [3, 1], fruit_init_coordinates = [3, 2])\n test_board_1.Snake_init_from_lst([[3, 1], [4, 1], [4, 2], [4, 3], [4, 4], [3, 4], [2, 4], [1, 4], [0, 4], [0, 3]])\n test_board_1.Update_board()\n render = Render_engine('terminal', test_board_1)\n print(\"Before move\")\n print(\"*******************************\")\n render.render_terminal(test_board_1)\n\n print(\"\\n\\nAfter move right\")\n print(\"*******************************\")\n test_board_1.Snake_move(\"right\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\\n\\n\")\n\n def problem_3():\n try:\n test_board_1 = board(5, 5, snake_init_coordinates = [3, 1], fruit_init_coordinates = [1, 2])\n test_board_1.Snake_init_from_lst([[3,4], [3, 3]])\n test_board_1.Update_board()\n render = Render_engine('terminal', test_board_1)\n print(\"Before move\")\n print(\"*******************************\")\n render.render_terminal(test_board_1)\n\n print(\"\\n\\nAfter move right\")\n print(\"*******************************\")\n test_board_1.Snake_move(\"right\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n except GameBoardIndexError as error:\n print(\"Snake crash because\", str(error))\n\n print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\\n\\n\")\n \n def problem_4():\n try:\n test_board_1 = board(5, 5, snake_init_coordinates = [3, 1], fruit_init_coordinates = [1, 2])\n test_board_1.Snake_init_from_lst([[3, 3], [3, 2], [3, 1], [4, 1], [4, 2], [4, 3], [4, 4], [3, 4], [2, 4], [1, 4], [0, 4], [0, 3]])\n test_board_1.Update_board()\n render = Render_engine('terminal', test_board_1)\n print(\"Before move\")\n print(\"*******************************\")\n render.render_terminal(test_board_1)\n\n print(\"\\n\\nAfter move right\")\n print(\"*******************************\")\n test_board_1.Snake_move(\"right\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n except GameBoardIndexError as error:\n print(\"Snake crash because\", str(error))\n\n print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\\n\\n\")\n\n problem_1()\n problem_2()\n problem_3()\n problem_4()", "def solve_2x2(self):\n \n assert self.get_number(1,1) == 0, \"zero tile should be at row 1, col 1\"\n assert self.row1_invariant(1), \"tiles to right and below incorrectly ordered\"\n \n # Moves the zero tile to (0,0).\n self.update_puzzle(\"lu\")\n\n # Repositions the upper left 2x2 part up to 3 times, \n # each time checking whether the puzzle is solved.\n rotation_num = 0\n if self.row0_invariant(0) == False:\n for dummy_rotation in range(3):\n while self.row0_invariant(0) == False:\n rotation_num += 1\n self.update_puzzle(\"rdlu\")\n\n assert self.row0_invariant(0), \"tiles to right and below incorrectly ordered\"\n \n move_str = \"lu\" + (\"rdlu\" * rotation_num) \n return move_str", "def move_tile(self, target_row, target_col, val):\n # a little bit twisted here for the use of both solve_interior_tile and solve_col0_tile\n solved_row, solved_col = self.current_position(0, val)\n movements = \"\"\n if solved_row == target_row and solved_col == target_col:\n return \"\"\n if solved_row == target_row:\n if target_col > solved_col:\n movements = \"l\" * (target_col - solved_col) + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"r\" * (solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ulld\"\n elif solved_col == target_col:\n movements = \"u\" * (target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n elif solved_col < target_col:\n if solved_col == 0:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"rddlu\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n elif solved_col > target_col:\n if solved_row == 0:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"dllur\" * (\n solved_col - target_col - 1) + \"dlu\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n else:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ullddru\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n return movements", "def sim_move(self, state, move):\n out = ''\n for val in self.moves[move]:\n out += state[val]\n return out", "def solve(self):\r\n values = 'abcdef'\r\n solution = [\"X\"] * NUMBLANKS\r\n self.create_list(solution)\r\n\r\n def extend_solution(position):\r\n for value in values:\r\n solution[position] = value\r\n #self.print_board(solution)\r\n #print(''.join(solution))\r\n if self.check_no_conflicts(solution):\r\n # solution = solution2\r\n if position >= NUMBLANKS - 1 or extend_solution(position + 1):\r\n return solution\r\n else:\r\n solution[position] = \"X\"\r\n if value == values[-1]:\r\n solution[position - 1] = \"X\"\r\n if position < NUMBLANKS - 1:\r\n solution[position + 1] = \"X\"\r\n\r\n return None\r\n\r\n return extend_solution(0)", "def solve_row0_tile(self, target_col):\r\n # move the zero tile from position (0,j) to (1,j−1) \r\n # using the move string \"ld\" \r\n moves_str = \"\"\r\n moves_str += \"ld\"\r\n # check whether target tile is at position (0,j).\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str) \r\n current_row, current_col = temp_grid.current_position(0, target_col)\r\n zero_row, zero_col = temp_grid.current_position(0, 0)\r\n \r\n # If target tile is not at position (0,j).\r\n # reposition the target tile to position (1,j−1) \r\n # with tile zero in position (1,j−2).\r\n if current_row != 0 or current_col != target_col:\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"urdlurrdluldrruld\"\r\n \r\n self.update_puzzle(moves_str)\r\n print \"solve_row0_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve_row0_tile(self, target_col):\n solved_row, solved_col = self.current_position(0, target_col)\n movements = \"\"\n if solved_col == target_col - 1 and solved_row == 0:\n movements = \"ld\"\n else:\n local_board = self.clone()\n local_board.update_puzzle(\"ld\")\n movements = \"ld\" + local_board.move_tile(\n 1, target_col - 1, target_col) + \"urdlurrdluldrruld\"\n self.update_puzzle(movements)\n return movements", "def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n m, n = len(board), len(board[0])\n\n def dfs(x, y):\n if not 0 <= x < m or not 0 <= y < n or board[x][y] != \"O\":\n return\n direction = [[1, 0], [0, 1], [-1, 0], [0, -1]]\n board[x][y] = \"Y\"\n for d in direction:\n dfs(x+d[0], y+d[1])\n\n for i in range(m):\n dfs(i, 0)\n dfs(i, n-1)\n for i in range(1, n-1):\n dfs(0, i)\n dfs(m-1, i)\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == \"O\":\n board[i][j] = \"X\"\n elif board[i][j] == \"Y\":\n board[i][j] = \"O\"", "def __str__(self):\n\n strme = []\n move = \"move {} {} {}\".format(self.key, len(self.movers), self.pfreq)\n strme.append(move)\n\n for mover in self.movers:\n strme.append(self.print_mover(mover))\n\n return \"\\n\".join(strme)", "def str_with_solution(self, solution):\n bak = self.__data.copy()\n for k, v in solution.items():\n self.__data[k] = v\n res = str(self)\n self.__data = bak\n return res", "def __str__(self):\n result = \"\"\n for i in range(3):\n for j in range(3):\n if self.board[i][j] == 5:\n result += \" x\"\n elif self.board[i][j] == 7:\n result += \" о\"\n else:\n result += \" #\"\n result += \"\\n\"\n return result", "def move_simplifier(move_input) -> str:\n short_input = move_input.strip().lower()\n short_input = short_input.replace(\"in rage\", \"\")\n\n for old, new in const.REPLACE.items():\n short_input = short_input.replace(old, new)\n\n # cd works, ewgf doesn't, for some reason\n if short_input[:2].lower() == 'cd' and short_input[:3].lower() != 'cds':\n short_input = short_input.lower().replace('cd', 'fnddf')\n if short_input[:2].lower() == 'wr':\n short_input = short_input.lower().replace('wr', 'fff')\n return short_input", "def print_puzzle(state):\r\n \r\n print('-----')\r\n for i in range(4):\r\n print('|', end=\"\")\r\n for j in range(3):\r\n if state[i][j] == 0:\r\n print(\" |\", end=\"\")\r\n else:\r\n print(\"\", state[i][j], \"|\", end=\"\")\r\n if i == 0:\r\n break\r\n print('\\n-------------')", "def __str__(self):\n line = ''\n line += self.board_state.__str__()\n line += self.move.__str__()\n line += '\\n'\n return line", "def send_challenge_solution(self):\n post = DOMAIN + self.maze_path\n solution = \"\".join(s for s in self.solution)\n print(post)\n req = requests.post(post, json={'directions': solution})\n r = req.json()\n print(r)\n try:\n if r['result'] == 'correct':\n self.completion = True\n except KeyError as error:\n print(error)", "def get_solution(self, display=False) -> str:\n\n if display:\n print()\n\n # Start the game\n self._open_zeros(display)\n\n # Find all mines or enter the fail state\n while self._nfound < self._nmines:\n self._mark_spaces(display)\n board_altered = self._open_safe_spaces(display)\n if not board_altered and len(self._unknowns):\n # Create exclusion zones\n self._make_ties()\n\n # self._expand_ties()\n\n # Find safe spaces\n safe_spaces = {pos: zone for unknown in self._unknowns.values() for zone, num_undiscovered in\n unknown.zones.items() for pos in zone if num_undiscovered == 0}\n\n # Open safe spaces\n for pos, zone in safe_spaces.items():\n self._open(*pos)\n if display and safe_spaces:\n print('After \"Logical Analysis - Safe Spaces\":')\n print(repr(self), \"\\n\")\n\n # Find mines\n mines = {pos: zone for unknown in self._unknowns.values() for zone, num_undiscovered in\n unknown.zones.items() for pos in zone if num_undiscovered == len(zone)}\n\n # Mark mines\n for pos, zone in mines.items():\n self._mark(*pos)\n if display and mines:\n print('After \"Logical Analysis - Mines\":')\n print(repr(self), \"\\n\")\n\n \"\"\"remaining_zones = {pos: zone for unknown in self._unknowns.values() for zone, num_undiscovered in \n unknown.zones.items() for pos in zone}\"\"\"\n\n # Enter fail state if no changes were made to the board\n if not safe_spaces and not mines:\n if display:\n print(\"> Unable to solve any further without guessing...\\n\")\n return '?'\n\n # Open remaining '?'s\n for pos in list(self._unknowns):\n self._open(*pos)\n\n if display:\n print(f'Game completed. Finished board:\\n{repr(self)}')\n return str(self).strip()", "def solve_interior_tile(self, target_row, target_col):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, target_col), \"tiles to right and below incorrectly ordered\"\n \n correct_tile = self.current_position(target_row, target_col) \n move_str = self.position_tile(target_row, target_col, correct_tile) \n self.update_puzzle(move_str)\n \n assert self.lower_row_invariant(target_row, target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n return move_str", "def solve_row1_tile(self, target_col):\r\n row, col = self.current_position(1, target_col)\r\n move = self.move_to_target(1, target_col, row, col)\r\n # for next move\r\n move += \"ur\"\r\n \r\n self.update_puzzle(move)\r\n return move", "def solve_row1_tile(self, target_col):\r\n # replace with your code\r\n whole_move = ''\r\n if self._grid[1][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA solve_row1_tile \"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[1][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (1 - zero_row) * 'd'\r\n # path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (1 - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (1 - zero_row) * 'd'\r\n #zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n #zero_row -= len(filter(lambda x: x=='u', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n\r\n assert self.row1_invariant(target_col), 'Some trouble in row1_invariant' \r\n \r\n #current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = 1, target_col\r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = (zero_row - self.current_position(1, target_col)[0]) * 'u' + (zero_col - self.current_position(1, target_col)[1]) * 'l'\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(1, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (1, target_col)\r\n counter = 0\r\n while self.current_position(1, target_col) != \\\r\n (1, target_col) or (zero_row, zero_col) != (0, target_col):\r\n # print 'Welcome to while loop!'\r\n cyclic_moves = ''\r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n #current_position = self.current_position(1, target_col)\r\n current_col = self.current_position(1, target_col)[1]\r\n counter +=1\r\n if self.current_position(1, target_col) == \\\r\n (1, target_col):\r\n # print 'ZERO not under TT'\r\n cyclic_moves = 'ur'\r\n whole_move += cyclic_moves\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif zero_col < current_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'ZERO tile located in the left side and down move is POSIBLE'\r\n if current_col != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif current_col != target_col and self._grid[zero_row+1][zero_col] > \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves)) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n # elif zero_col < current_col and self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n # # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n # if current_col != target_col:\r\n # # print 'not under the target place'\r\n # cyclic_moves = 'urrdl'\r\n # whole_move += cyclic_moves\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # elif current_col == target_col:\r\n # # print 'Target tile under target place'\r\n # cyclic_moves = 'urd'\r\n # whole_move += cyclic_moves\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #cyclic_moves +='ur'\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n return whole_move", "def solve_puzzle(board):\n # Propagate value effects\n board = simplify_puzzle(board, [])\n\n # Brute force remaining cells\n board = brute(board)\n\n # Verify that the puzzle was successfully solved\n assert get_length(board)==81\n assert valid_attempt(board)\n\n return board", "def solve(self, board: List[List[str]]) -> None:\r\n if not board:\r\n return\r\n m, n = len(board), len(board[0])\r\n board_filter = lambda p: 0 <= p[0] < m and 0 <= p[1] < n and board[p[0]][p[1]] == 'O'\r\n queue = list(filter(board_filter, [(x, y) for r in range(max(m, n)) \r\n for x, y in ((r, 0), (r, n-1), (0, r), (m-1, r))]))\r\n while queue:\r\n x, y = queue.pop()\r\n board[x][y] = 'W'\r\n queue.extend(list(filter(board_filter, ((x-1, y), (x+1, y), (x, y-1), (x, y+1)))))\r\n for row in board:\r\n for i, c in enumerate(row):\r\n row[i] = 'XO'[c=='W']", "def __str__(self):\n return \"{}\\n\\n{}\".format(self.puzzle,\n \"\\n\".join([str(x) for x in self.children]))", "def solve(self, board: 'List[List[str]]') -> 'None':\n if not board:\n return\n m, n = len(board), len(board[0])\n save = [ij for k in range(m + n) for ij in ((0, k), (m - 1, k), (k, 0), (k, n - 1))]\n while save:\n i, j = save.pop()\n if -1 < i < m and -1 < j < n and board[i][j] == 'O':\n board[i][j] = 'S'\n save += (i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)\n for row in board:\n for i, c in enumerate(row):\n row[i] = 'XO'[c == 'S']", "def simplify_puzzle(board, done_cells):\n # Initialization\n not_done = True\n # Main loop for propagation\n while not_done:\n old_length = get_length(board)\n for i in range(n):\n for j in range(n):\n # If the value is the only possibility, propagate its effects\n # Append the coordinates to a list to keep track of what has already been done_cells\n if len(board[i][j]) == 1:# and (i,j) not in done_cells:\n done_cells.append((i,j))\n eliminate(board, i,j)\n # If the value is the only possibility within a row/column/square\n # fix that value and propagate its effects\n elif len(board[i][j]) > 1:\n check_single_value(board, done_cells, i, j)\n # Check if nothing changes or if the puzzle is solved\n new_length = get_length(board)\n if new_length == old_length:\n not_done = False\n return board", "def solve(self):\n print(\"Problem %s Answer: %s\" % (self.number, self.solution()))", "def buildpuzzle(self):\r\n self.puzzle = copy.deepcopy(self.rows)\r\n if self.difficulty == 1:\r\n self.removedigits(1)\r\n if self.difficulty == 2:\r\n self.removedigits(2)\r\n if self.difficulty == 3:\r\n self.removedigits(3)", "def print_puzzle(board):\n\n row_size = get_row_size(board)\n output = '\\n'\n\n for idx, val in enumerate(board):\n output += \" {} \".format(val)\n if idx % row_size == row_size - 1:\n output += \"\\n\"\n\n return output", "def write_puzzle(to_file: str, solution: str):\n with open(to_file, \"w\") as file:\n file.write(solution)", "def update_game_state(self):\n # if board is not filled out, returns a valid move message\n for row in self.board:\n if 0 in row:\n return \"Valid input\"\n\n # if board is filled out, verifies if solution is valid and updates game state\n self.game_state = alg.check_solution(self.board)\n return self.game_state", "def __str__(self) -> str:\r\n output: str = \"\"\r\n\r\n for row_i in range(Board._NUM_ROWS):\r\n for col_i in range(Board._NUM_COLS):\r\n pos: Pos2D = Pos2D(col_i, row_i)\r\n output += (\"{} \".format(self.squares[pos].get_representation()))\r\n # Finished row, add new line.\r\n output += \"\\n\"\r\n\r\n return output", "def solve_2x2(self):\r\n assert self.row1_invariant(1)\r\n pos_1_0 = self.get_number(1, 0)\r\n pos_0_0 = self.get_number(0, 0)\r\n pos_0_1 = self.get_number(0, 1)\r\n # create grid and solve individual cases\r\n grid = [pos_1_0, pos_0_0, pos_0_1]\r\n if grid == [self.get_width(), 1, self.get_width() + 1]:\r\n move = \"ul\"\r\n elif grid == [1, self.get_width() + 1, self.get_width()]:\r\n move = \"lurdlu\"\r\n elif grid == [self.get_width() + 1, self.get_width(), 1]:\r\n move = \"lu\"\r\n self.update_puzzle(move)\r\n return move", "def checkio(game_result: List[str]) -> str:\n columns = list(map(''.join, zip(*game_result)))\n diagonals = list(map(''.join, zip(*[(r[i], r[2 - i]) for i, r in enumerate(game_result)])))\n lines = game_result + columns + diagonals\n if 'XXX' in lines:\n return 'X'\n if 'OOO' in lines:\n return 'O'\n return 'D'" ]
[ "0.7559481", "0.73133874", "0.69816166", "0.66144603", "0.6516676", "0.63399726", "0.6325148", "0.6325148", "0.6299051", "0.6299051", "0.6299051", "0.6297025", "0.62952787", "0.6252764", "0.6244446", "0.6211515", "0.61357105", "0.6083045", "0.6063561", "0.6049944", "0.602346", "0.60146224", "0.5877995", "0.58751774", "0.58524394", "0.5840316", "0.5820368", "0.5808467", "0.5736328", "0.57171214", "0.57126945", "0.5704984", "0.56979954", "0.56906664", "0.56794447", "0.56731904", "0.56703484", "0.5664075", "0.56469446", "0.56336886", "0.56334734", "0.56332994", "0.56246066", "0.5619862", "0.5576606", "0.5547956", "0.55432945", "0.55378973", "0.5528637", "0.552671", "0.5525852", "0.55210936", "0.5519365", "0.55164677", "0.55090624", "0.5504902", "0.5500162", "0.54908866", "0.5481495", "0.545569", "0.5452343", "0.5445236", "0.54393244", "0.5438978", "0.5431901", "0.5431255", "0.5424614", "0.5420033", "0.54187286", "0.5417277", "0.5416201", "0.54140353", "0.5408603", "0.54044926", "0.5388384", "0.5379698", "0.53762066", "0.5350646", "0.5347149", "0.53461766", "0.5343092", "0.533981", "0.5339515", "0.53319395", "0.53261906", "0.5324382", "0.53183866", "0.5318265", "0.53127867", "0.5310188", "0.5308387", "0.53058314", "0.53057444", "0.5293753", "0.5293425", "0.5293079", "0.5291912", "0.52903426", "0.5289559", "0.5288493" ]
0.7348655
1
Clear properties. Mapping properties show up in CXSMILES make validation less readable.
def _getProductCXSMILES(product): for a in product.GetAtoms(): for k in a.GetPropsAsDict(): a.ClearProp(k) return Chem.MolToCXSmiles(product)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_properties(self):\n self.properties.clear()", "def clearProperties(*args):", "def clearProperties(*args):", "def clearProperties(*args):", "def clearProperties(*args):", "def reset(self):\n self.valid_passes = set()\n self.property_set.clear()", "def reset_properties(self):\n self.__elements_count = 0\n self.__elements = {}", "def clearProperty(*args):", "def clearProperty(*args):", "def clearProperty(*args):", "def clearProperty(*args):", "def clear(self) -> None:\n # Delete these so the .by_class/name values are cleared.\n self['classname'] = 'info_null'\n del self['targetname']\n self._keys.clear()\n # Clear $fixup as well.\n self._fixup = None", "def reset(self):\r\n instdict = self.__dict__\r\n classdict = self.__class__.__dict__\r\n # To reset them, we simply remove them from the instance dict. At that\r\n # point, it's as if they had never been computed. On the next access,\r\n # the accessor function from the parent class will be called, simply\r\n # because that's how the python descriptor protocol works.\r\n for mname, mval in classdict.items():\r\n if mname in instdict and isinstance(mval, OneTimeProperty):\r\n delattr(self, mname)", "def clear(self):\n self._map = {}", "def clearMap(self):\n for key in self.componentMap.keys():\n del self.componentMap[key][:]", "def clear_cached_attributes(self):\n setattr(self, '_atoms', None)\n setattr(self, '_bonds', None)\n setattr(self, '_rings', None)\n setattr(self, '_ring_systems', None)", "def clean(self):\n all_props = self.__class__.CONFIG_PROPERTIES\n for prop_name in self._modified:\n attr_config = all_props.get(prop_name)\n if attr_config and attr_config.input_func:\n self._config[prop_name] = attr_config.input_func(self._config[prop_name])\n self._modified.clear()", "def _clean(self):\n map(self.__delitem__, self.keys())\n self._original = []\n self._columns = {}\n self._modified, self._deleted = {}, {}", "def clear(self):\n self.__dict__.clear()", "def clear(self):\n\t\tself.membersWithErrors.clear()", "def _reset_derived_prop_(self):\n self._derived_properties[\"photosamplers\"] = None", "def __reset__(self):\n\n for i in self.__dict__.keys():\n self.__dict__[i] = None", "def clear(self) :\n self.__dict__ = {}", "def clear(self):\n self.raster_path_line.clear()\n self.labels_path.clear()\n self.shapefile_path.clear()\n self.costumelabels.clear()\n self.layer_name.clear()\n self.class_name.clear()\n self.idfield.clear()", "def clear_attrs(self):\n self._attributes.clear()", "def resets_attributes(self):\n \n self.path_dict = None\n self.poss_dict = None\n self.check_dict = None\n self.long_dict = None\n self.rep_counter = 0\n self.cap_counter = 0\n \n self.board = []\n self.coords = []\n self.chess_coords = []\n self.empty = \" \"", "def clear(self):\n for key in self.keys():\n del self[key]", "def clear(self):\r\n self.firstname_value.set('')\r\n self.lastname_value.set('')\r\n self.id_number_value.set('')\r\n self.country_value.set('')", "def remove_property(class_, name):\n mapper = class_.mapper\n table = class_.__table__\n columns = class_.mapper.c\n column = columns[name]\n del columns._data[name]\n del mapper.columns[name]\n columns._all_cols.remove(column)\n mapper._cols_by_table[table].remove(column)\n mapper.class_manager.uninstrument_attribute(name)\n del mapper._props[name]", "def reset ():\n global __variant_explicit_properties\n\n __variant_explicit_properties = {}", "def clear(self):\n self.name = ''\n self.public_email = False\n self.avatar = None\n self.bio = ''\n self.website = ''\n self.twitter = ''\n self.facebook = ''\n self.mozillians = ''\n self.irc_handle = ''\n self.city = ''\n self.is_fxa_migrated = False\n self.fxa_uid = ''", "def reset(self):\n self.values = None\n self.keys = None\n self.mask = None", "def clear(self):\n LongObjectHashMap.self.clear()", "def clear(self):\n LongObjectHashMap.self.clear()", "def remove_all_fields(self):\n self.fields = None", "def reset(self):\n if hasattr(self, \"W\"):\n del self.W\n if hasattr(self, \"T\"):\n del self.T\n if hasattr(self, \"P\"):\n del self.P", "def removePropertyMap(self, propertyName: unicode) -> bool:\n ...", "def clear_address(self): #DONE\n for component_name in self.__keys:\n self.address[component_name] = Component(component_name, '')", "def clear(self):\n super(ReadOnlyDict, self).clear() # pragma: no cover", "def reset(self):\n self._unset_defaults_and_overrides()\n self.clear()", "def clear_all(self):\n self._data = {}\n self.uncache()\n self.dirty = True\n self.shipping_method = None\n self.payment_method = None\n self.customer_comment = \"\"", "def clear(self) -> None:\n self.raw = ''\n self.extent = None # type: ignore[assignment]\n self._lines = []\n self.items = None\n self.seen_headers = {}\n return", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.mean_\n del self.var_", "def reset(self):\n self._maps = {}", "def clear(self):\n\n for a in self.formats + self.other_clear:\n setattr(self, a, None)\n self.filename = None\n self.timestamp = None\n self.lastfail = None", "def cleanup(self):\n for key in list(self.__dict__.keys()):\n delattr(self, key)", "def reset_map(self):\n self.x = None\n self.X = None\n self.y = None\n self.Y = None\n self.data = None\n self.sampling = None\n self.size = None", "def clear_attributes(self):\n self.attrs = etad.AttributeContainer()", "def clear_keymap(self):\n self.keymap = {}", "def clear(self):\n # Orphan all objects\n for obj in self.uow:\n state(obj).session = None\n self.uow.clear()\n self.imap.clear()", "def clear_field_values(self):\n\t\tlogging.info(\"Clearing values in the field[] dictionary of the object\")\n\t\tlogging.debug(\"Before = \" + str(self.field))\n\t\tfor key, value in self.fields.items():\n\t\t\tself.field[str(key)] = None\n\t\tlogging.debug(\"After = \" + str(self.field))\n\t\treturn", "def removeAll(self):\n self.pDict.clear()", "def reset_property(self, _, prop):\n dst = prop.get_merged_equivalent().clone()\n create_pseudo_values([dst])\n cmd = commands.ReplaceObject(obj=prop, repl=dst)\n self.execute(cmd)\n\n # Reset the view to make sure the changes are properly displayed.\n self.reset_value_view(None)", "def _reset(self):\r\n self.pop(\"signature\", False)\r\n self.pop(\"signatures\", False)\r\n self.pop(\"signSignature\", False)\r\n self.pop(\"secondSignature\", False)\r\n self.pop(\"id\", False)", "def _reset(self):\n self._set(\"_n_init_features\", None)\n self._set(\"_n_output_features\", None)\n self._set(\"_n_intervals\", None)\n self._set(\"_mapper\", {})\n self._set(\"_cpp_preprocessor\", None)\n self._set(\"_fitted\", False)", "def reset(self):\n self.bbox = None\n self.true = None\n self.meta = None", "def _reset(self):\n [delattr(self, attr) for attr in ('_XtX', '_XtY', 'coef_', 'intercept_', 'is_fitted_') if hasattr(self, attr)]", "def reset(self):\n self.task = None\n self.properties['Task'] = ''\n self.properties['Reward'] = ''\n self.properties['Category'] = ''\n self.properties['Last Edit'] = int(self._map.now().strftime(\"%j\"))\n self.properties['Icon'] = ''\n self.properties['Shadow Pokemon'] = ''\n self.properties['Shadow Time'] = ''\n self.properties['Old_Category'] = ''\n self.properties['Old_Icon'] = ''", "def clear(self):\n for key in self.__data.keys():\n del self.__data[key]", "def set_old_props(self):\n self.old_props = {k: v for k, v in self.node.props.items()}", "def _strip_map(mols):\n for m in mols:\n [a.ClearProp('molAtomMapNumber')\n for a in m.GetAtoms() if a.HasProp('molAtomMapNumber')]\n return mols", "def _reset(self):\n\n # Checking one attribute is enough, because they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.n_samples_seen_\n del self.mean_\n del self.var_", "def full_clear(self):\n self.clear()\n self.class_hooks.clear()", "def remove_property(self, name):\n if (not name in self.properties):\n return\n del self.properties[name]", "def unset_some_properties(device: Device, test_cfg: TestCfg):\n cprint(\"\\nUnset some device owned properties.\", color=\"cyan\", flush=True)\n for key, _ in test_cfg.mock_data.items():\n if key not in [\"datetime_endpoint\", \"booleanarray_endpoint\"]:\n device.unset_property(test_cfg.interface_device_prop, \"/sensor-id/\" + key)\n time.sleep(0.005)\n\n cprint(\"\\nUnset some server owned properties.\", color=\"cyan\", flush=True)\n for key, _ in test_cfg.mock_data.items():\n if key not in [\"binaryblob_endpoint\", \"stringarray_endpoint\"]:\n delete_server_interface(test_cfg, test_cfg.interface_server_prop, \"/sensor-id/\" + key)\n time.sleep(0.005)", "def Clear(self) -> None:", "def clear(self):\n self.globalDefines = {}\n self.axiom = self.setAxiomFromString(\"\")\n self.clearProductions()\n self.niterations = 1\n self.resultPString = None", "def clear_all(self):\n self._set_all(0x00, 0x00, 0x00)", "def clearKeys(self):\n for attr in self._filter():\n pm.cutKey(attr)", "def clearData(self):\r\n self.title.setVal(\"\"),\r\n self.first.setVal(\"\"),\r\n self.middle.setVal(\"\"),\r\n self.last.setVal(\"\"),\r\n self.suffix.setVal(\"\"),\r\n self.phone.setVal(\"\"),\r\n self.ext.setVal(\"\"),\r\n self.email.setVal(\"\"),\r\n self.affiliation.setVal(\"\")\r\n self.fullName.setVal(\"\")", "def clear(self):\n self._post_init()", "def _resetPrefixDict(self):\r\n self._getPrefixDict().clear()", "def reset(self):\n self.fontname = None\n self.size = -1\n self.valign = None\n self.bold = False\n self.italics = False\n self.smallcaps = False", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_", "def clear(self):\n self.knownStrings.clear()", "def clear(self) -> None:", "def _remove_swarm_keys(self):\n for key in SWARM_PROPERTIES:\n self.spec.pop(key, None)", "def clear(self):\n dict.clear(self)\n self._sequence = []", "def clear_fields(self):\n\n if not self.is_root:\n return\n self.field_data.clear()", "def reset(self):\n # must NOT reset color map here, otherwise we loose provided configs by user,\n # which are more important in this case for result images vs whatever the model task specified\n self.class_names = None\n self._map = None", "def reset_attributes(self):\n\n self.ell = None\n self.ell_jacobian = None\n self.ell_hessian = None\n\n self.ell_hyperparam = None\n self.ell_jacobian_hyperparam = None\n self.ell_hessian_hyperparam = None\n\n self.Y = None\n self.Cinv = None\n self.C = None\n self.Mz = None\n self.MMz = None\n self.sigma2 = None\n self.sigma02 = None\n self.Kninv = None\n self.KnpKninv = None\n\n self.Y_C_Mz_hyperparam = None\n self.sigma_hyperparam = None\n self.MMz_hyperparam = None\n self.Kninv_KnpKninv_hyperparam = None", "def clear(cls):\r\n cls._goals_by_phase.clear()\r\n cls._phase_by_goal.clear()", "def clear(self):\n self._store = {}", "def clearArrayProperty(self, prop, verbose = 1):\n\n s = self.atoms.shape[0]\n if prop.lower() == \"w_sep_c\":\n self.w_sep_c = np.zeros((s, 1))\n elif prop.lower() == \"w_seps_c\":\n self.w_seps_c = np.zeros((s, 1))\n elif prop.lower() == \"e_int_c\":\n self.e_int_c = np.zeros((s, 1))\n elif prop.lower() == \"w_sep_d\":\n self.w_sep_d = np.zeros((s, 1))\n elif prop.lower() == \"w_seps_d\":\n self.w_seps_d = np.zeros((s, 1))\n elif prop.lower() == \"e_int_d\":\n self.e_int_d = np.zeros((s, 1))\n\n if verbose > 0:\n string = \"Property: %s, reset (set to 0) and reshaped to (%i,1)\" % (prop, s)\n ut.infoPrint(string)", "def clear_map(self):\n self.rooms = []\n\n self.dungeon.clear_dungeon()", "def clear(self):\r\n self._instance = None\r\n self.__resources = {}\r\n self.__m2m = {}\r\n self._suspended_permissions = {}\r\n self._suspended_traversing = {}\r\n self._suspended_minimals = {}\r\n self._empty = {}\r\n self._synchronized_fields = set()\r\n self._synchronized_memberships = False\r\n self._synchronized_permissions = set()\r\n self._suspended_inv_minimals = {}", "def _reset(lp):\n if hasattr(lp, \"solverModel\"):\n delattr(lp, \"solverModel\")\n for v in lp.variables():\n if hasattr(v, \"_xprs\"):\n delattr(v, \"_xprs\")\n for c in lp.constraints.values():\n if hasattr(c, \"_xprs\"):\n delattr(c, \"_xprs\")", "def clear(self):\n pass", "def clear(self):\n pass", "def clear(self):\n pass", "def cleanup(self):\n for attribute in self._all_db_field_names:\n delattr(self, attribute)", "def _clear_cached_properties(self, setting, **kwargs):\n if setting == 'MEDIA_ROOT':\n self.__dict__.pop('base_location', None)\n self.__dict__.pop('location', None)\n elif setting == 'MEDIA_URL':\n self.__dict__.pop('base_url', None)\n elif setting == 'FILE_UPLOAD_PERMISSIONS':\n self.__dict__.pop('file_permissions_mode', None)\n elif setting == 'FILE_UPLOAD_DIRECTORY_PERMISSIONS':\n self.__dict__.pop('directory_permissions_mode', None)", "def reset(self):\n self.data = {}\n self.is_bound = False\n self._errors = None", "def clear(self):\n ...", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):" ]
[ "0.8134951", "0.7525838", "0.7525838", "0.7525838", "0.7525838", "0.6747215", "0.6691966", "0.66214865", "0.66214865", "0.66214865", "0.66214865", "0.65654373", "0.6476267", "0.64220273", "0.64089143", "0.6397436", "0.6378762", "0.6357874", "0.62982965", "0.6284592", "0.62551296", "0.6242108", "0.6222831", "0.6207823", "0.61974955", "0.61671", "0.6154139", "0.6144113", "0.61137664", "0.6070633", "0.6068794", "0.6061452", "0.6028115", "0.6028115", "0.60249805", "0.6021637", "0.60155267", "0.60110766", "0.5992698", "0.5976229", "0.59662616", "0.59509397", "0.59435177", "0.5942154", "0.593895", "0.5935717", "0.59181273", "0.588302", "0.58687586", "0.58619446", "0.5861002", "0.5854894", "0.58530676", "0.5843799", "0.58306056", "0.58200854", "0.5805998", "0.58037895", "0.58003485", "0.5800249", "0.5794563", "0.57923", "0.5791024", "0.57777596", "0.5777267", "0.5772149", "0.576034", "0.5758525", "0.57507914", "0.5739744", "0.5739353", "0.5735869", "0.57225364", "0.5719995", "0.5719995", "0.5712934", "0.57112956", "0.5702865", "0.57003295", "0.5687167", "0.5685976", "0.56824136", "0.56747586", "0.56723267", "0.56721765", "0.56643796", "0.5660295", "0.5659632", "0.56499285", "0.56499285", "0.56499285", "0.5642381", "0.5642036", "0.5636163", "0.56354237", "0.56269157", "0.56269157", "0.56269157", "0.56269157", "0.56269157", "0.56269157" ]
0.0
-1
Run a reaction and combine the products in a single string. Makes errors readable ish
def _reactAndSummarize(rxn_smarts, *smiles): rxn = rdChemReactions.ReactionFromSmarts(rxn_smarts) mols = [Chem.MolFromSmiles(s) for s in smiles] products = [] for prods in rxn.RunReactants(mols): products.append(' + '.join(map(_getProductCXSMILES, prods))) products = ' OR '.join(products) return products
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reaction_str(self):\n\n def format(number):\n return str(number).rstrip(\".0\") + \" \"\n\n reactant_bits = []\n product_bits = []\n for met in sorted(self._metabolites, key=attrgetter(\"id\")):\n coefficient = self._metabolites[met]\n if coefficient >= 0:\n product_bits.append(format(coefficient) + met.id)\n else:\n reactant_bits.append(format(abs(coefficient)) + met.id)\n\n reaction_string = ' + '.join(reactant_bits)\n if self.gapfill_direction == '=':\n reaction_string += ' <=> '\n elif self.gapfill_direction == '<':\n reaction_string += ' <-- '\n elif self.gapfill_direction == '>':\n reaction_string += ' --> '\n reaction_string += ' + '.join(product_bits)\n return reaction_string", "def rxn(self, string, k = 1, rtype='condensed'):\n reactants, products = string.split('->')\n reactants = reactants.split('+')\n products = products.split('+')\n\n reactants = [self.cplx(x) for x in reactants]\n products = [self.cplx(x) for x in products]\n self.reactions.add(PepperReaction(reactants, products, rtype.strip(), rate=k))", "async def send_react(self, reactions, *args, **kwargs):\n message = await self.send(*args, **kwargs)\n if isinstance(reactions, str): # Handle two-character emojis\n reactions = (reactions,)\n for reaction in reactions:\n await self.add_reaction(message, reaction)\n return message", "def get_reaction(reaction_type):\n\n if reaction_type == \"neg\":\n speechcon = \"<say-as interpret-as='interjection'>\" \\\n + random.choice(NEG_SPEECHCONS) + \"</say-as>\"\n ans = random.choice(NEG_ANS)\n elif reaction_type == \"pos\":\n speechcon = random.choice(POS_SPEECHCONS)\n ans = random.choice(POS_ANS)\n else:\n raise ValueError\n\n return speechcon + ans", "def chain(self):\n commodity = self.commodity\n reactions = set()\n reaction_count = 0\n\n for comm in commodity:\n\n n = len(comm)\n repeated = r2_index(comm)\n inloop_r_count = 0\n\n for x in range(0, n - 1):\n\n if self.recombination == Recomb_1:\n\n i = x + 1\n\n if comm[x] != comm[x + 1]:\n reaction_count = reaction_count + 1\n inloop_r_count = inloop_r_count + 1\n\n if inloop_r_count == 1: # inital reaction\n left1 = [comm[x] for i in range(0, n)]\n right1 = [comm[x + 1] for i in range(0, n)]\n # reaction_n = \"r{}\".format(reaction_count)\n r = self.recombination(reaction_count, join_str(left1), join_str(right1), i)\n left2, right2 = r.product()\n # react_str_.append(reaction_n, str(r))\n reactions.add(r)\n continue\n\n else:\n left1 = left2\n right1 = [comm[x + 1] for i in range(0, n)]\n # reaction_n = \"r{}\".format(reaction_count)\n r = self.recombination(reaction_count, join_str(left1), join_str(right1), i)\n left2, right2 = r.product()\n reactions.add(r)\n\n\n elif self.recombination == Recomb_2:\n\n reaction_count = reaction_count + 1\n inloop_r_count = inloop_r_count + 1\n\n if inloop_r_count == 1: # inital reaction\n left1 = [repeated[0][0] for i in range(0, n)]\n right1 = [repeated[1][0] for i in range(0, n)]\n i = repeated[1][1]\n r = self.recombination(reaction_count, join_str(left1), join_str(right1), i)\n left2, right2 = r.product()\n # react_str_.append(reaction_n, str(r))\n reactions.add(r)\n continue\n\n else:\n if right2 == comm:\n break\n else:\n left1 = right2\n right1 = [repeated[inloop_r_count][0] for i in range(0, n)]\n # reaction_n = \"r{}\".format(reaction_count)\n i = repeated[inloop_r_count][1]\n r = self.recombination(reaction_count, join_str(left1), join_str(right1), i)\n left2, right2 = r.product()\n reactions.add(r)\n\n # all same char in comm\n elif comm == n * comm[0]:\n left1 = [comm[x] for i in range(0, n)]\n right1 = [comm[x + 1] for i in range(0, n)]\n # reaction_n = \"r{}\".format(reaction_count)\n r = self.recombination(reaction_count, join_str(left1), join_str(right1), i)\n left2, right2 = r.product()\n # react_str_.append(reaction_n, str(r))\n reactions.add(r)\n break\n\n # reaction_comm[comm] = reactions\n self.reactions = reactions\n return self.reactions", "def addReaction(\n self, reactants, products, expression, local_params=None, rxn_id=\"\"\n ):\n\n r1 = self.model.createReaction()\n self.check(r1, \"create reaction\")\n if len(rxn_id) == 0:\n rxn_id = \"v\" + str(self.model.getNumReactions())\n self.check(r1.setId(rxn_id), \"set reaction id\")\n self.check(r1.setReversible(False), \"set reaction reversibility flag\")\n self.check(r1.setFast(False), 'set reaction \"fast\" attribute')\n\n for re in reactants:\n if re is not None and \"$\" in re:\n re.translate(None, \"$\")\n re_split = re.split()\n if len(re_split) == 1:\n sto = 1.0\n re_id = re\n elif len(re_split) == 2 and re_split[0].isdigit():\n sto = float(re_split[0])\n re_id = re_split[1]\n else:\n err_msg = (\n \"Error: reactants must be listed in format 'S' or '(float)' S'\"\n )\n raise SystemExit(err_msg)\n s1 = self.model.getSpecies(re_id)\n species_ref1 = r1.createReactant()\n self.check(species_ref1, \"create reactant\")\n self.check(species_ref1.setSpecies(s1.getId()), \"assign reactant species\")\n self.check(\n species_ref1.setStoichiometry(sto), \"assign reactant stoichiometry\"\n )\n if self.document.getLevel() == 3:\n self.check(\n species_ref1.setConstant(True), 'set \"constant\" on species ref 1'\n )\n\n for pro in products:\n if pro is not None and \"$\" in pro:\n pro.translate(None, \"$\")\n pro_split = pro.split()\n if len(pro_split) == 1:\n sto = 1.0\n pro_id = pro\n elif len(pro_split) == 2:\n sto = float(pro_split[0])\n pro_id = pro_split[1]\n else:\n err_msg = \"Error: products must be listed in format 'S' or '(float)' S'\"\n raise SystemExit(err_msg)\n s2 = self.model.getSpecies(pro_id)\n species_ref2 = r1.createProduct()\n self.check(species_ref2, \"create product\")\n self.check(species_ref2.setSpecies(s2.getId()), \"assign product species\")\n self.check(species_ref2.setStoichiometry(sto), \"set product stoichiometry\")\n if self.document.getLevel() == 3:\n self.check(\n species_ref2.setConstant(True), 'set \"constant\" on species ref 2'\n )\n\n math_ast = libsbml.parseL3Formula(expression)\n self.check(math_ast, \"create AST for rate expression\")\n\n kinetic_law = r1.createKineticLaw()\n self.check(kinetic_law, \"create kinetic law\")\n self.check(kinetic_law.setMath(math_ast), \"set math on kinetic law\")\n if local_params is not None:\n for param in local_params.keys():\n val = local_params.get(param)\n if self.document.getLevel() == 3:\n p = kinetic_law.createLocalParameter()\n else:\n p = kinetic_law.createParameter()\n self.check(p, \"create local parameter\")\n self.check(p.setId(param), \"set id of local parameter\")\n self.check(p.setValue(val), \"set value of local parameter\")\n return r1", "def __str__(self):\n reprStr = 'Help Mario build Iron Man suit!'+'\\n' +'To make the ' + self._name + ',you need:'+'\\n'\n for part in self._supplies:\n reprStr = reprStr + str(part.getCount()) + ' ' + part.getData() + '\\n'\n return reprStr", "def _GetReactionSideString(side):\n sdata = []\n for c_w_coeff in side:\n if c_w_coeff.coeff == 1:\n sdata.append(c_w_coeff.GetName())\n else:\n sdata.append('%d %s' % (c_w_coeff.coeff,\n c_w_coeff.GetName()))\n return ' + '.join(sdata)", "def __str__(self):\n s = \"\"\n for e in self._sub_effects:\n s += str(e) + \" ^ \"\n return s[0:-3] if len(self._sub_effects) > 0 else \"Void\"", "def test_react(self):\n procnum = 1\n\n spc_a = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spc_a, spc), ['H_Abstraction']) for spc in spcs]\n\n reaction_list = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n self.assertIsNotNone(reaction_list)\n self.assertEqual(len(reaction_list), 3)\n self.assertTrue(all([isinstance(rxn, TemplateReaction) for rxn in reaction_list]))", "def genReactionAntString(self, revTag = \"RevRe__\",\n iRevTag = \"IrRevRe__\"):\n \n lines = self.antString.splitlines()\n lines = [line.split(\"#\")[0] for line in lines]\n rLines = [line.split(\":\") for line in lines if\n len(line.split(\":\"))==2]\n rLines = [[line[0]]+line[1].split(\";\") for line in rLines\n if len(line[1].split(\";\"))>=2]\n rLines = [[part.strip() for part in line] for line in rLines]\n rLines = [line for line in rLines if (\"->\" in line[1]) or\n (\"=>\" in line[1])]\n rLines = [[line[0], \"->\" in line[1], line[2]] for line in rLines]\n rLines = [[revTag+line[0], line[1], line[2]] if line[1] else\n [iRevTag+line[0], line[1], line[2]] for line in rLines]\n rLines = [line[0]+\" := \"+line[2]+\";\" for line in rLines]\n primed = False\n for i, line in zip(range(len(lines)),lines):\n if line.strip().startswith(\"model\"):\n primed = True\n if (line.strip() == \"end\") and primed:\n break\n print(\"line \"+str(i))\n indent = \"\"\n while indent == \"\" and i>0:\n i = i-1\n indent = re.search(r'^\\s*', lines[i]).group()\n rLines = [indent+line for line in rLines]\n self.reactionAntString = \"\\n\".join(lines[:i+1]+rLines+lines[i+1:])", "def get_reaction_label(rmg_reaction):\n reactants = rmg_reaction.reactants\n products = rmg_reaction.products\n if len(reactants) > 1:\n reactants_string = '+'.join([reactant.molecule[0].toSMILES() for reactant in reactants])\n else:\n reactants_string = reactants[0].molecule[0].toSMILES()\n if len(products) > 1:\n products_string = '+'.join([product.molecule[0].toSMILES() for product in products])\n else:\n products_string = products[0].molecule[0].toSMILES()\n reaction_label = '_'.join([reactants_string, products_string])\n return reaction_label", "def tex_reaction_scheme(self):\n \n if self.reaction_matrix is None or self.input_params is None:\n return 'undefined'\n \n species = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n eqn = []\n \n reactants, products = self.reaction_matrix.nonzero()\n for r,p,k in zip(reactants, products,self.input_params.keys()):\n eqn.append( species[r] + r'\\xrightarrow{{' + k + '}}' + species[p])\n \n latex_eqn = r'$' + ','.join(eqn) + r'$'\n return latex_eqn", "def idea(self,irc,msg,args):\n company = self.pick(self.vPrefix) + \\\n self.pick(self.vMidfix) + \\\n self.pick(self.vPostfix)\n product = self.pick(self.vBased) + \" \" + \\\n self.pick(self.vProd) + \" \" + \\\n self.pick(self.vVia) \n irc.reply(\"%s - %s\" % (company,product))", "def RecipeToText(recipe):\n\n\tout = []\n\tworld = None\n\tfor (annotation, next_world) in recipe[1:]:\n\t\tcommand = annotation[0]\n\t\targuments = annotation[1]\n\n\t\trecipe_text = ''\n\t\tif command == 'create_ing':\n\t\t\t# TODO: When computing BLEU score, we may wish to ignore create_ing\n\t\t\t# commands since they are trivially translated\n\t\t\trecipe_text += '%s.' % arguments[1]\n\n\t\telif command == 'create_tool':\n\t\t\t# TODO: This is a horrible hack but we need some way to make sure that the\n\t\t\t# length of the outputted string is equal to that of the list of original\n\t\t\t# texts.\n\t\t\trecipe_text = '<create_tool>'\n\n\t\telif command == 'combine':\n\t\t\trecipe_text += 'Combine '\n\n\t\t\trecipe_text += ', '.join([world.I_d[ing] for ing in arguments[0]])\n\n\t\t\tif not IsNull(arguments[3]):\n\t\t\t\trecipe_text += ', %s' % arguments[3]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'separate':\n\t\t\trecipe_text += 'Separate '\n\t\t\trecipe_text += '%s and %s' % (world.I_d[arguments[0]], next_world.I_d[arguments[1]])\n\n\t\t\tif not IsNull(arguments[5]):\n\t\t\t\trecipe_text += ', %s' % arguments[5]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'put':\n\t\t\trecipe_text += 'Put %s in %s. ' % (world.I_d[arguments[0]], world.T_d[arguments[1]])\n\n\t\telif command == 'remove':\n\t\t\trecipe_text += 'Remove %s from %s. ' % (world.I_d[arguments[0]], world.T_d[arguments[1]])\n\n\t\telif command == 'cut':\n\t\t\trecipe_text += 'Chop %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ' with %s' % world.T_d[arguments[1]]\n\n\t\t\tif not IsNull(arguments[4]):\n\t\t\t\trecipe_text += ', %s' % arguments[4]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'mix':\n\t\t\trecipe_text += 'Mix %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ' with %s' % world.T_d[arguments[1]]\n\n\t\t\tif not IsNull(arguments[4]):\n\t\t\t\trecipe_text += ', %s' % arguments[4]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'cook':\n\t\t\trecipe_text += 'Cook %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ' with %s' % world.T_d[arguments[1]]\n\n\t\t\tif not IsNull(arguments[4]):\n\t\t\t\trecipe_text += ', %s' % arguments[4]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'do':\n\t\t\trecipe_text += 'Taking %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ' with %s' % world.T_d[arguments[1]]\n\n\t\t\tif not IsNull(arguments[4]):\n\t\t\t\trecipe_text += ', %s' % arguments[4]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'serve':\n\t\t\trecipe_text += 'Serve %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ', %s' % arguments[1]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'set':\n\t\t\trecipe_text += 'Set %s on %s. ' % (world.T_d[arguments[0]], arguments[1])\n\n\t\telif command == 'leave':\n\t\t\trecipe_text += 'Leave %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ', %s' % arguments[1]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'chefcheck':\n\t\t\trecipe_text += 'Check %s for %s. ' % (world.I_d[arguments[0]], arguments[1])\n\n\t\tworld = next_world\n\t\tout.append(recipe_text)\n\n\treturn out", "def test_reaction_inverts_stereo(self):\n reaction = '[C@:1]>>[C@@:1]'\n\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br |o1:1|')\n products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@H](Cl)Br |&1:1|')\n products = _reactAndSummarize(reaction, 'FC(Cl)Br')\n self.assertEqual(products, 'FC(Cl)Br')", "async def on_reaction_add(reaction, user):\n #Before doing anything\n #Check to see if the reaction was a karma emoji\n if reaction.emoji == initKarma.goodKarma:\n consoleMessage = 'Writing to karmaData file :: Increasing '\n consoleMessage += reaction.message.author.name\n consoleMessage += '\\'s karma by 1!'\n print(consoleMessage)\n await karmaUpdate(client, reaction.message, '+1')\n if reaction.emoji == initKarma.badKarma:\n consoleMessage = 'Writing to karmaData file :: Decreasing '\n consoleMessage += reaction.message.author.name\n consoleMessage += '\\'s karma by 1!'\n print(consoleMessage)\n await karmaUpdate(client, reaction.message, '-1')", "def test_reaction_ignores_stereo(self):\n reaction = '[C:1]>>[C:1]'\n reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br']\n for reactant in reactants:\n products = _reactAndSummarize(reaction, reactant)\n self.assertEqual(products, reactant)", "def generate_msg(props, alert=False, user_pref=None, past=False):\n\t\tmessage = emojize(\":rocket:\", use_aliases=True)\n\t\tif past:\n\t\t\tmessage += ' Launch was held on: ' + props['when'].format('YYYY-MM-DD HH:mm:ss ZZ') + '.\\n'\n\t\t\tif props['holdreason']:\n\t\t\t\tmessage += 'The launch has been *held*. Reason: ' + props['holdreason'] + '\\n'\n\t\t\tif props['failreason']:\n\t\t\t\tmessage += 'Unfortunately, the launch *failed*. Reason: ' + props['failreason'] + '\\n'\n\t\telse:\n\t\t\tif alert:\n\t\t\t\tmessage += ' *Launch is going to happen in some minutes!* '\n\t\tmessage += ' *' + props['name'] + '*' + '\\n'\n\n\t\tif not alert and not past:\n\t\t\tmessage += 'A launch will happen _' + props['when'].humanize() + '_! \\n'\n\t\t\tmessage += 'I mean ' + props['when'].format('YYYY-MM-DD HH:mm:ss ZZ') + '\\n'\n\n\t\tif past:\n\t\t\tmessage += 'Taken from *'\n\t\telse:\n\t\t\tmessage += 'Taking from *'\n\n\t\tmessage += props['location'] + '*.\\n'\n\t\tdescr = Interface.generate_description(props['missions'])\n\t\tmessage += '*Mission description*\\n' + descr + '\\n' if descr else ''\n\t\tmessage += '\\n'\n\n\t\tif props['urls']:\n\t\t\tmessage += 'Watch it here: \\n' if not past else 'You could have watched it here: \\n'\n\t\t\tfor url in props['urls']:\n\t\t\t\tmessage += ' • [' + url + '](' + url +')\\n'\n\t\telse:\n\t\t\tmessage += 'Unfortunately there '\n\t\t\tmessage += 'are' if not past else 'were'\n\t\t\tmessage += ' no reported webcasts ' \\\n\t\t\t\t\t + emojize(':disappointed_relieved:', use_aliases=True)\n\n\t\treturn message", "def clue(self):\n if self.item == \"receipt\":\n print(\"The receipt reads that Jay bought 'diltiazem' medication 4 days ago.\")\n print(\"Diltiazem: medication for high blood pressure, when \"\n \"consumed by an individual in large quantities without high blood\"\n \"pressure, can cause heart failure.\")\n else:\n print(\"That is the wrong item!\")", "def supercombiner(bot, ev):\n # ported from jenni\n s = 'u'\n for i in iter(range(1, 3000)):\n if unicodedata.category(chr(i)) == \"Mn\":\n s += chr(i)\n if len(s) > 100:\n break\n bot.say(s)", "def concatenate(strings: List[str]) -> str:\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"4\")\n # END OF SOLUTION", "def addProduct(self, *args):\n return _libsbml.Reaction_addProduct(self, *args)", "def run(self):\n logging.debug('Displaying Info: ' + self.recipe.name)\n\n msg = PREFIX[1:] + PREFIX.join(self.recipe.info().split('\\n'))\n print(msg)\n return msg", "async def react_with_text(\n pre_command, message: Message, is_private: bool, guild_id: int, author_id: int\n):\n if (\n (is_private or is_whitelisted(\"s_to_ringel_s\", guild_id))\n and author_id == constants.POLYid\n and \"s\" in message.content\n and await pre_command(\n message=message,\n channel=message.channel,\n command=\"s_to_ringel_s\",\n delete_message=False,\n )\n ):\n return {TEXT: \"*\" + message.content.replace(\"s\", \"ß\")}\n\n if (\n (\n is_private\n or guild_id not in constants.ayy_lmao_blacklist\n or author_id == constants.NYAid\n )\n and (message.content.lower() == \"ayy\")\n and await pre_command(\n message=message,\n channel=message.channel,\n command=\"ayy\",\n delete_message=False,\n )\n ):\n return {TEXT: \"Lmao\"}\n\n if (\n author_id in [constants.NYAid, constants.TRISTANid]\n and message.content.lower() == \"qyy\"\n and await pre_command(\n message=message,\n channel=message.channel,\n command=\"qyy\",\n delete_message=False,\n )\n ):\n return {TEXT: \"Kmao\"}\n\n if (\n message.content.lower() == \"lmao\"\n and author_id == constants.NYAid\n and await pre_command(\n message=message,\n channel=message.channel,\n command=\"lmao\",\n delete_message=False,\n )\n ):\n return {TEXT: \"Ayy\"}\n\n if (\n (is_private or guild_id not in constants.lenny_blacklist)\n and \"lenny\" in message.content.split(\" \")\n and await pre_command(\n message=message,\n channel=message.channel,\n command=\"response_lenny\",\n delete_message=False,\n )\n ):\n return {TEXT: \"( ͡° ͜ʖ ͡°)\"}\n\n if (\n is_private or guild_id not in constants.ded_blacklist\n ) and \"ded\" == message.content:\n ten_mins_ago = datetime.utcnow() - timedelta(minutes=10)\n try:\n history = message.channel.history(limit=2, after=ten_mins_ago)\n await history.next()\n await history.next()\n except NoMoreItems:\n if await pre_command(\n message=message,\n channel=message.channel,\n command=\"response_ded\",\n delete_message=False,\n ):\n return {TEXT: random.choice(command_text.ded)}\n\n if (\n (is_private or guild_id not in constants.table_unflip_blacklist)\n and message.content == \"(╯°□°)╯︵ ┻━┻\"\n and await pre_command(\n message=message,\n channel=message.channel,\n command=\"tableflip\",\n delete_message=False,\n )\n ):\n return {TEXT: \"┬─┬ ノ( ゜-゜ノ)\"}\n return {}", "def test_react_parallel(self):\n import rmgpy.rmg.main\n rmgpy.rmg.main.maxproc = 2\n procnum = 2\n\n spc_a = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spc_a, spc), ['H_Abstraction']) for spc in spcs]\n\n reaction_list = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n self.assertIsNotNone(reaction_list)\n self.assertEqual(len(reaction_list), 3)\n self.assertTrue(all([isinstance(rxn, TemplateReaction) for rxn in reaction_list]))\n\n # Reset module level maxproc back to default\n rmgpy.rmg.main.maxproc = 1", "def bot_failed_comprehension(error_message=None):\n result = \"\"\n\n if error_message:\n result += error_message + \"\\n\"\n\n result += \"Please see [here]\"\n result += \"(https://www.reddit.com/r/NHL_Stats/comments/74skjv/bot_details/do0tjzz/) \"\n result += \"for tips on proper usage.\\n\\n\"\n return result", "def nuclear_reaction_energy(*args, **kwargs) -> u.J: # noqa: C901, PLR0915\n\n # TODO: Allow for neutrinos, under the assumption that they have no mass.\n\n # TODO: Add check for lepton number conservation; however, we might wish\n # to have violation of lepton number issuing a warning since these are\n # often omitted from nuclear reactions when calculating the energy since\n # the mass is tiny.\n\n errmsg = \"Invalid nuclear reaction.\"\n\n def process_particles_list(\n unformatted_particles_list: list[Union[str, Particle]]\n ) -> list[Particle]:\n \"\"\"\n Take an unformatted list of particles and puts each\n particle into standard form, while allowing an integer and\n asterisk immediately preceding a particle to act as a\n multiplier. A string argument will be treated as a list\n containing that string as its sole item.\n \"\"\"\n\n if isinstance(unformatted_particles_list, str):\n unformatted_particles_list = [unformatted_particles_list]\n\n if not isinstance(unformatted_particles_list, (list, tuple)):\n raise TypeError(\n \"The input to process_particles_list should be a \"\n \"string, list, or tuple.\"\n )\n\n particles = []\n\n for original_item in unformatted_particles_list:\n try:\n item = original_item.strip()\n\n if item.count(\"*\") == 1 and item[0].isdigit():\n multiplier_str, item = item.split(\"*\")\n multiplier = int(multiplier_str)\n else:\n multiplier = 1\n\n try:\n particle = Particle(item)\n except InvalidParticleError as exc:\n raise ParticleError(errmsg) from exc\n\n if particle.element and not particle.isotope:\n raise ParticleError(errmsg)\n\n particles += [particle] * multiplier\n\n except ParticleError:\n raise ParticleError(\n f\"{original_item} is not a valid reactant or \"\n \"product in a nuclear reaction.\"\n ) from None\n\n return particles\n\n def total_baryon_number(particles: list[Particle]) -> int:\n \"\"\"\n Find the total number of baryons minus the number of\n antibaryons in a list of particles.\n \"\"\"\n return sum(particle.baryon_number for particle in particles)\n\n def total_charge(particles: list[Particle]) -> int:\n \"\"\"\n Find the total charge number in a list of nuclides\n (excluding bound electrons) and other particles.\n \"\"\"\n total_charge = 0\n for particle in particles:\n if particle.isotope:\n total_charge += particle.atomic_number\n elif not particle.element:\n total_charge += particle.charge_number\n return total_charge\n\n def add_mass_energy(particles: list[Particle]) -> u.Quantity:\n \"\"\"\n Find the total mass energy from a list of particles, while\n taking the masses of the fully ionized isotopes.\n \"\"\"\n total_mass_energy = 0.0 * u.J\n for particle in particles:\n total_mass_energy += particle.mass_energy\n return total_mass_energy.to(u.J)\n\n input_err_msg = (\n \"The inputs to nuclear_reaction_energy should be either \"\n \"a string representing a nuclear reaction (e.g., \"\n \"'D + T -> He-4 + n') or the keywords 'reactants' and \"\n \"'products' as lists with the nucleons or particles \"\n \"involved in the reaction (e.g., reactants=['D', 'T'] \"\n \"and products=['He-4', 'n'].\"\n )\n\n reaction_string_is_input = args and not kwargs and len(args) == 1\n\n reactants_products_are_inputs = kwargs and not args and len(kwargs) == 2\n\n if reaction_string_is_input == reactants_products_are_inputs:\n raise ParticleError(input_err_msg)\n\n if reaction_string_is_input:\n reaction = args[0]\n\n if not isinstance(reaction, str):\n raise TypeError(input_err_msg)\n elif \"->\" not in reaction:\n raise ParticleError(\n f\"The reaction '{reaction}' is missing a '->'\"\n \" or '-->' between the reactants and products.\"\n )\n\n try:\n LHS_string, RHS_string = re.split(\"-+>\", reaction)\n LHS_list = re.split(r\" \\+ \", LHS_string)\n RHS_list = re.split(r\" \\+ \", RHS_string)\n reactants = process_particles_list(LHS_list)\n products = process_particles_list(RHS_list)\n except ParticleError as ex:\n raise ParticleError(f\"{reaction} is not a valid nuclear reaction.\") from ex\n\n elif reactants_products_are_inputs:\n try:\n reactants = process_particles_list(kwargs[\"reactants\"])\n products = process_particles_list(kwargs[\"products\"])\n except TypeError as t:\n raise TypeError(input_err_msg) from t\n except ParticleError as e:\n raise ParticleError(errmsg) from e\n\n if total_baryon_number(reactants) != total_baryon_number(products):\n raise ParticleError(\n f\"The baryon number is not conserved for {reactants = } and {products = }.\"\n )\n\n if total_charge(reactants) != total_charge(products):\n raise ParticleError(\n f\"Total charge is not conserved for {reactants = } and {products = }.\"\n )\n\n return add_mass_energy(reactants) - add_mass_energy(products)", "def madlibs(a, b, c, d='hyena', e='butt heads'):\n str1 = f'{a} went out to find {b}. It was {c}.'\n str1 += f' A {d} was around, trying to {e}.'\n return str1", "def test_make_new_reaction(self):\n\n procnum = 2\n spcA = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs]\n\n rxns = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n\n cerm = CoreEdgeReactionModel()\n\n for rxn in rxns:\n cerm.make_new_reaction(rxn)\n\n \"\"\"\n 3 expected H-abstraction reactions:\n OH + CC = H2O + C[CH2]\n OH + [CH3] = H2O + [CH2]\n OH + [CH3] = [O] + C\n \"\"\"\n\n # count no. of entries in reactionDict:\n counter = 0\n for fam, v1 in cerm.reaction_dict.items():\n for key2, v2 in v1.items():\n for key3, rxnList in v2.items():\n counter += len(rxnList)\n\n self.assertEquals(counter, 3)", "def CesarChiff(message_a_chiff:str, decal:int)->str:\r\n mot_crypte = \"\"\r\n for lettre in message_a_chiff:\r\n mot_crypte += cryptage(decal, lettre)\r\n\r\n return mot_crypte", "def test_reaction_preserves_stereo(self):\n reaction = '[C@:1]>>[C@:1]'\n reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br']\n for reactant in reactants:\n products = _reactAndSummarize(reaction, reactant)\n self.assertEqual(products, reactant)", "def to_string(self):\r\n production_dict = self.get_production_dict()\r\n\r\n string_prods = ['S -> ' + ' | '.join([''.join(symbols) for symbols in production_dict.pop('S')])]\r\n for non_terminal, symbols_list in production_dict.items():\r\n string_prods.append(non_terminal + ' -> ' + ' | '.join([''.join(symbols) for symbols in symbols_list]))\r\n\r\n # concateate em\r\n return '\\n'.join(string_prods)", "def prod(r):\n if isinstance(r, str):\n return r, []\n if isinstance(r, RuleSet):\n idx = np.random.choice(r.N)\n rule = r.rulelist[idx]\n words = rule.split()\n words = [link(word) for word in words]\n rl, ll = zip(*[prod(word) for word in words])\n l2 = []\n for e in ll:\n l2 += e\n return \" \".join(rl), [idx + r.rulesetidx] + l2", "def test_react_all(self):\n procnum = 1\n\n spcs = [\n Species().from_smiles('C=C'),\n Species().from_smiles('[CH3]'),\n Species().from_smiles('[OH]'),\n Species().from_smiles('CCCCCCCCCCC')\n ]\n\n n = len(spcs)\n reaction_list, spc_tuples = react_all(spcs, n, np.ones(n), np.ones([n, n]), np.ones([n, n, n]), procnum)\n self.assertIsNotNone(reaction_list)\n self.assertEqual(len(reaction_list), 34)\n self.assertEqual(len(spc_tuples), 34)\n\n flat_rxn_list = list(itertools.chain.from_iterable(reaction_list))\n self.assertEqual(len(flat_rxn_list), 44)\n self.assertTrue(all([isinstance(rxn, TemplateReaction) for rxn in flat_rxn_list]))", "def extras_msg(extras):\r\n\r\n if len(extras) == 1:\r\n verb = \"was\"\r\n else:\r\n verb = \"were\"\r\n return \", \".join(repr(extra) for extra in extras), verb", "def _str_eval_ob(eval, act, ctxt, *obs) :\n if len(obs) == 1 :\n topic = obs[0]\n text = obs[0]\n else :\n topic = obs[0]\n text = obs[1]\n return [make_action_link(text[0], \"ask Irving Q. Tep about \"+topic[0])]", "async def credits(self, ctx):\n embed = discord.Embed(title='{}:'.format(ctx.message.author.name),\n description='\\n:ok_hand: :laughing:\\n :telephone_receiver::shirt::call_me:\\n :jeans: :fire:',\n colour=0xf20006)\n last_message = await self.bot.say(embed=embed)\n await self.bot.add_reaction(last_message, self.emojiUnicode['succes'])", "def rsset2str(self, elements):\n if len(elements) == 0:\n return \"0\"\n s = \"{\"\n for c in elements:\n s += \" \" + self._reaction_system.get_entity_name(c)\n s += \" }\"\n return s", "def r_4(comps: 'list(Compound)', is_input):\r\n react: str\r\n\r\n if is_input:\r\n iSiMe1 = Compound(\"Mg\")\r\n iSaAc = Compound(\"Cu(HSO4)2\")\r\n if len(comps) == 1:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe1 = comps[0]\r\n me = list(iSiMe1.formula.consist.keys())[0].name\r\n me_oxs = get_me_oxs(me)\r\n\r\n if me_oxs == 0:\r\n return \"\"\r\n if is_me_activer(\"Cu\", 2, me, me_oxs):\r\n return \"\"\r\n else:\r\n iSaAc = comps[0]\r\n ((me, me_oxs), _) = iSa_oxs(iSaAc.formula)\r\n else:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe1 = comps[0]\r\n iSaAc = comps[1]\r\n else:\r\n iSiMe1 = comps[1]\r\n iSaAc = comps[0]\r\n\r\n me1 = list(iSiMe1.formula.consist.keys())[0].name\r\n me1_oxs = get_me_oxs(me1)\r\n if me1_oxs == 0:\r\n return \"\"\r\n ((me2, me2_oxs), (an, an_oxs)) = iSa_oxs(iSaAc.formula)\r\n if not is_me_activer(me1, me1_oxs, me2, me2_oxs):\r\n return \"\"\r\n if not is_me_activer(\"Na\", 1, me1, me1_oxs):\r\n return \"\"\r\n\r\n iSiMe2 = Compound(simple(me2))\r\n iSaNo = Compound(iSaNo_create(me1, me1_oxs, an, an_oxs))\r\n iAc = Compound(iAc_create(an, an_oxs))\r\n\r\n react = f\"{iSiMe1} + {iSaAc} -> {iSiMe2} + {iSaNo} + {iAc}\"\r\n else:\r\n iSiMe2 = Compound(\"Cu\")\r\n iSaNo = Compound(\"MgSO4\")\r\n iAc = Compound(\"H2SO4\")\r\n if len(comps) == 1:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe2 = comps[0]\r\n me = list(iSiMe2.formula.consist.keys())[0].name\r\n me_oxs = get_me_oxs(me)\r\n if me_oxs == 0:\r\n return \"\"\r\n elif \"iSa\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n ((me, me_oxs), (an, an_oxs)) = iSa_oxs(iSaNo.formula)\r\n iAc = Compound(iAc_create(an, an_oxs))\r\n if is_me_activer(\"Cu\", 2, me, me_oxs):\r\n return \"\"\r\n if me == \"Cu\":\r\n iSiMe = Compound(\"Ag\")\r\n else:\r\n iAc = comps[0]\r\n (an, an_oxs) = iAc_oxs(iAc.formula)\r\n iSaNo = Compound(iSaNo_create(\"Zn\", 2, an, an_oxs))\r\n elif len(comps) == 2:\r\n for i in range(0, 2):\r\n for j in range(0, 2):\r\n if \"iSi\" in comps[i].comp_type and \"iSa\" in comps[j].comp_type:\r\n iSiMe = comps[i]\r\n iSaNo = comps[j]\r\n ((me, me_oxs), (an, an_oxs)) = iSa_oxs(iSaNo.formula)\r\n iAc = Compound(iAc_create(an, an_oxs))\r\n break\r\n if \"iSi\" in comps[i].comp_type and \"iAc\" in comps[j].comp_type:\r\n iSiMe = comps[i]\r\n iAc = comps[j]\r\n (an, an_oxs) = iAc_oxs(iAc.formula)\r\n iSaNo = Compound(iSaNo_create(\"Mg\", 2, an, an_oxs))\r\n break\r\n if \"iSa\" in comps[i].comp_type and \"iAc\" in comps[j].comp_type:\r\n iSaNo = comps[i]\r\n iAc = comps[j]\r\n ((me, me_oxs), (an, an_oxs)) = iSa_oxs(iSaNo.formula)\r\n if me == \"Cu\":\r\n iSiMe = Compound(\"Ag\")\r\n break\r\n else:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe2 = comps[0]\r\n if \"iSa\" in comps[1].comp_type:\r\n iSaNo = comps[1]\r\n iAc = comps[2]\r\n else:\r\n iSaNo = comps[2]\r\n iAc = comps[1]\r\n elif \"iSa\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n if \"iSi\" in comps[1].comp_type:\r\n iSiMe2 = comps[1]\r\n iAc = comps[2]\r\n else:\r\n iSiMe2 = comps[2]\r\n iAc = comps[1]\r\n elif \"iAc\" in comps[0].comp_type:\r\n iAc = comps[0]\r\n if \"iSi\" in comps[1].comp_type:\r\n iSiMe2 = comps[1]\r\n iSaNo = comps[2]\r\n else:\r\n iSiMe2 = comps[2]\r\n iSaNo = comps[1]\r\n\r\n me2 = list(iSiMe2.formula.consist.keys())[0].name\r\n me2_oxs = get_me_oxs(me2)\r\n if me2_oxs == 0:\r\n return \"\"\r\n ((me1, me1_oxs), (an1, an1_oxs)) = iSa_oxs(iSaNo.formula)\r\n (an2, an2_oxs) = iAc_oxs(iAc.formula)\r\n if an1 != an2:\r\n return \"\"\r\n if an1_oxs == 1:\r\n return \"\"\r\n if not is_me_activer(me1, me1_oxs, me2, me2_oxs):\r\n return \"\"\r\n if not is_me_activer(\"Na\", 1, me1, me1_oxs):\r\n return \"\"\r\n\r\n iSiMe1 = Compound(simple(me1))\r\n iSaAc = Compound(iSaAc_create(me2, me2_oxs, an1, an1_oxs))\r\n\r\n react = f\"{iSiMe1} + {iSaAc} -> {iSiMe2} + {iSaNo} + {iAc}\"\r\n\r\n return Reaction(react)", "def execute(self, performer, target=None):\n pre_results = self.act.execute(performer, target)\n format_dict = {'performer': performer.name,\n 'attr': self.act.attr_str,\n 'value': abs(self.act.value) / 2}\n \n if target:\n format_dict['target'] = target.name\n \n try:\n pre_results[1] += ' However, this backfires and negatively affects {performer}.'\n pre_results[2] += ' {performer}\\'s {attr} also reduced by {value} points!'\n \n pre_results[1] = pre_results[1].format(**format_dict)\n pre_results[2] = pre_results[2].format(**format_dict)\n performer.decr_attr(self.act.attr_str, abs(self.act.value) / 2)\n\n return pre_results\n\n except IndexError:\n print 'Expected at least two result strings from Action'\n print self.act.name", "def armbuy(self):\n\t\tthismsg = \"\\r\\n\"+self.ESC+\"12C\"+self.ESC+\"1;33m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1m\"+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.ESC+\"1m\"+self.A220+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"47m\"+self.A178+self.ESC+\"40m\"+self.A223+self.ESC+\"2C\"+self.ESC+\"37m The\"+self.ESC+\"CSaga\"+self.ESC+\"Cof\"+self.ESC+\"Cthe\"+self.ESC+\"CRed\"+self.ESC+\"CDragon\"+self.ESC+\"C-\"+self.ESC+\"C\"+self.ESC+\"33mArmour\"+self.ESC+\"CList \"+self.ESC+\"2C\"+self.A220+self.ESC+\"47m\"+self.A178+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"47m\"+self.A178+self.A219+self.ESC+\"40m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1m\"+self.A223+self.A223+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1m\"+self.A223+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1m\"+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1m\"+self.A223+self.ESC+\"0;33m\"+self.A223+self.A223+self.ESC+\"1m\"+self.A223+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"47m\"+self.A178+self.A178+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"43m\"+self.A219+self.ESC+\"4C\"+self.ESC+\"40mArmour\"+self.ESC+\"25CPrice\"+self.ESC+\"8C\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"43m\"+self.A219+self.ESC+\"2C\"+self.ESC+\"0;33m1.\"+self.ESC+\"CCoat\"+self.ESC+\"1;30m...................................\"+self.ESC+\"33m200\"+self.ESC+\"C\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"43m\"+self.A219+self.ESC+\"2C\"+self.ESC+\"0;33m2.\"+self.ESC+\"CHeavy\"+self.ESC+\"CCoat\"+self.ESC+\"1;30m...........................\"+self.ESC+\"33m1,000\"+self.ESC+\"C\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"43m\"+self.A219+self.ESC+\"2C\"+self.ESC+\"0;33m3.\"+self.ESC+\"CLeather\"+self.ESC+\"CVest\"+self.ESC+\"1;30m.........................\"+self.ESC+\"33m3,000\"+self.ESC+\"C\"+self.ESC+\"43m\"+self.A178+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"43m\"+self.A178+self.ESC+\"2C\"+self.ESC+\"0;33m4.\"+self.ESC+\"CBronze\"+self.ESC+\"CArmour\"+self.ESC+\"1;30m.......................\"+self.ESC+\"33m10,000\"+self.ESC+\"C\"+self.ESC+\"43m\"+self.A177+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"43m\"+self.A177+self.ESC+\"2C\"+self.ESC+\"0;33m5.\"+self.ESC+\"CIron\"+self.ESC+\"CArmour\"+self.ESC+\"1;30m.........................\"+self.ESC+\"33m30,000\"+self.ESC+\"C\"+self.ESC+\"43m\"+self.A176+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"0;33m\"+self.A223+self.ESC+\"2C6.\"+self.ESC+\"CGraphite\"+self.ESC+\"CArmour\"+self.ESC+\"1;30m....................\"+self.ESC+\"33m100,000\"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.A176+self.ESC+\"2C7.\"+self.ESC+\"CErdrick's\"+self.ESC+\"CArmour\"+self.ESC+\"1;30m...................\"+self.ESC+\"33m150,000\"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A176+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.A177+self.ESC+\"2C8.\"+self.ESC+\"CArmour\"+self.ESC+\"Cof\"+self.ESC+\"CDeath\"+self.ESC+\"1;30m....................\"+self.ESC+\"33m200,000\"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A177+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.A178+self.ESC+\"2C9.\"+self.ESC+\"CAble's\"+self.ESC+\"CArmour\"+self.ESC+\"1;30m......................\"+self.ESC+\"33m400,000\"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A178+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.A219+self.ESC+\"C10.\"+self.ESC+\"CFull\"+self.ESC+\"CBody\"+self.ESC+\"CArmour\"+self.ESC+\"1;30m.................\"+self.ESC+\"33m1,000,000\"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.A223+self.ESC+\"C11.\"+self.ESC+\"CBlood\"+self.ESC+\"CArmour\"+self.ESC+\"1;30m.....................\"+self.ESC+\"33m4,000,000\"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.A219+self.ESC+\"C12.\"+self.ESC+\"CMagic\"+self.ESC+\"CProtection\"+self.ESC+\"1;30m................\"+self.ESC+\"33m10,000,000\"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.A219+self.ESC+\"C13.\"+self.ESC+\"CBelar's\"+self.ESC+\"CMail\"+self.ESC+\"1;30m....................\"+self.ESC+\"33m40,000,000\"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.A219+self.ESC+\"C14.\"+self.ESC+\"CGolden\"+self.ESC+\"CArmour\"+self.ESC+\"1;30m..................\"+self.ESC+\"33m100,000,000\"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.A219+self.ESC+\"C15.\"+self.ESC+\"CArmour\"+self.ESC+\"COf\"+self.ESC+\"CLore\"+self.ESC+\"1;30m.................\"+self.ESC+\"33m400,000,000\"+self.ESC+\"C\"+self.ESC+\"43m\"+self.A176+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"43m\"+self.A176+self.ESC+\"0;33m\"+self.A219+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A178+self.A220+self.A220+self.A220+self.A176+self.A220+self.A220+self.A178+self.A220+self.ESC+\"C\"+self.A220+self.A220+self.A220+self.A220+self.A178+self.A220+self.A220+self.A220+self.ESC+\"C\"+self.A220+self.A220+self.A221+self.A220+self.A220+self.A220+self.A220+self.A178+self.A220+self.A220+self.A176+self.ESC+\"C\"+self.A220+self.A220+self.A220+self.A220+self.A178+self.A220+self.A220+self.A220+self.A220+self.A219+self.ESC+\"1;43m\"+self.A176+self.ESC+\"40m\"+self.ESC+\"0m\\r\\n\"\n\t\treturn thismsg", "def produce_message_for_sending() -> str:\n return f\"You can donate your money here:\\n`{card_donations}`\"", "def concatenate_items(items, conjunction='and'):\n text = ''\n if not items:\n text = ''\n elif len(items) == 1:\n text = items[0]\n elif len(items) == 2:\n text = '{} {} {}'.format(items[0], conjunction, items[1])\n else:\n text = ', '.join(items[:-1])\n text += ', {} {}'.format(conjunction, items[-1])\n return text", "def __str__(self):\n return '{0}'.format(self.effect_name)", "def use(self):\n return_string = ''\n item = input(f\"What do you want to use?\\n>\")\n if item in self.backpack:\n if self.backpack[item].type is \"Food\":\n if (self.health + self.backpack[item].heal_amount) > standard_health:\n self.health = standard_health\n else:\n self.health += self.backpack[item].heal_amount\n self.backpack[item].charges -= 1\n return_string = f\"You ate {self.backpack[item].name}. {self.backpack[item].heal_amount} health restored\"\n if self.backpack[item].charges == 0:\n del self.backpack[item]\n return return_string\n else:\n return \"You cant eat this\"\n else:\n return \"You dont have this\"", "def __str__(self):\n # define the prefix\n prefix = \"py_SEX2GOL: \"\n\n # compose the feedback\n big_str = \"{0:s} Setup:\\n\".format(prefix)\n big_str += \"{0:s} Input g/prism image: {0:s} \\n\".format(prefix, self.grisim)\n big_str += \"{0:s} Configuration file name: {0:s} \\n\".format(prefix, self.config)\n big_str += \"{0:s} Direct image: {0:s} \\n\".format(prefix, self.dirname)\n big_str += \"{0:s} G/Prism extension: {0:s} \\n\".format(prefix, self.grism_extinfo['axe_ext'])\n big_str += \"{0:s} Direct image extension: {0:s} \\n\".format(prefix, self.dirname_extinfo['axe_ext'])\n big_str += \"{0:s} Input catalog name: {0:s} \\n\".format(prefix, self.in_sex)\n big_str += \"{0:s} Output catalog name: {0:s} \".format(prefix, self.out_sex)\n\n # return the string\n return big_str", "def energy_line(experiment, procedure, h = '', n = '', c = '', s = ''):\n # It is assumed that the procedure has been validated\n # Try to get the list of energy terms to use from the experiment\n try:\n terms = experiment[\"CHARMM Energy Terms\"]\n except (KeyError, TypeError, AttributeError, IPRO_Error):\n terms = defaultCHARMMEnergies\n # Generate the line\n text = \"skip all excl\"\n for term in terms:\n text += \" \" + term\n # Include additional terms, as indicated by the provided strings\n if h != '' and \"harm\" not in terms:\n text += \" harm\"\n if n != '' and \"noe\" not in terms:\n text += \" noe\"\n if (procedure == \"perturbation\" or c != '') and \"cdih\" not in terms:\n text += \" cdih\"\n if s != '' and \"gbener\" not in terms:\n text += \" gbener\"\n text += \"\\n\"\n return text", "def make_error_string(controller, errors):\n return \"\\n\".join([controller.get_string(error) for error in errors])", "def combine_text(evt):\n global output\n output = output + evt.result.text\n print(evt.result.text)", "async def cast(self, ctx:commands.Context, bait_type:str):\r\n\r\n if not await self.IsSpecialized(ctx.guild, ctx.channel.id, POOL_CHANNEL):\r\n return\r\n profile = self.config.member(ctx.message.author)\r\n\r\n await profile.currently_fishing.set(True)\r\n modified_fish_weights = await self.startfishing(ctx, profile, bait_type)\r\n\r\n embed = Embed(title=f'{ctx.message.author.display_name} cast their rod into the shimmering waves at {ctx.channel}', color=0x7300ff)\r\n embed.set_footer(text='Not even a nibble yet...')\r\n msg = await ctx.send(embed=embed)\r\n start_adding_reactions(msg, ['🎣'])\r\n\r\n pred = ReactionPredicate.with_emojis(['🎣'], msg, ctx.author)\r\n time_left = await self.GetSetting(ctx.guild, 'max_fishing_length')\r\n min_pause = await self.GetSetting(ctx.guild, 'min_fishing_wait')\r\n max_pause = await self.GetSetting(ctx.guild, 'max_fishing_wait')\r\n curr_fish = None\r\n rarity = None\r\n while time_left >= 0:\r\n try:\r\n timer = time_left if time_left < max_pause else randint(min_pause, max_pause)\r\n time_left -= timer\r\n await ctx.bot.wait_for('reaction_add', check=pred, timeout=timer)\r\n except asyncio.TimeoutError:\r\n if curr_fish is None:\r\n rarity = choices(FISH_RARITIES, modified_fish_weights)[0]\r\n rarity_list = self.fishing_rarities.get(rarity)\r\n curr_fish = rarity_list[randint(0, len(rarity_list) - 1)] if not await profile.bryan_mode() else self.SEA_BASS\r\n embed.set_footer(text=RARITY_DESCRIPTIONS[rarity])\r\n else:\r\n curr_fish = None\r\n embed.set_footer(text='The rod drifts in the water')\r\n await msg.edit(embed=embed)\r\n\r\n if pred.result == 0:\r\n break\r\n\r\n if curr_fish is None or time_left <= 0:\r\n embed.set_footer(text='You feel a twist as the line snaps :(')\r\n await msg.edit(embed=embed)\r\n await msg.clear_reactions()\r\n else:\r\n new_fish = curr_fish.ToFishCatch(RARITY_VALUES[rarity])\r\n embed.set_footer(text=f'You pulled a {new_fish[\"name\"]} ({new_fish[\"size\"]} inches) out of the water!\\nDo you want to keep or release?')\r\n embed.set_thumbnail(url=curr_fish.image)\r\n await msg.edit(embed=embed)\r\n await msg.clear_reactions()\r\n\r\n start_adding_reactions(msg, ['🥤', '🐟'])\r\n\r\n pred = ReactionPredicate.with_emojis(['🥤', '🐟'], msg, ctx.author)\r\n try:\r\n await ctx.bot.wait_for(\"reaction_add\", check=pred, timeout=15)\r\n except asyncio.TimeoutError:\r\n if await self.AddFish(ctx.message.author, new_fish):\r\n embed.set_footer(text=f'Timed out, {new_fish[\"name\"]} was added to your bucket')\r\n else:\r\n embed.set_footer(text=f'Timed out and your bucket was full, so {new_fish[\"name\"]} was released :(')\r\n await msg.edit(embed=embed)\r\n await msg.clear_reactions()\r\n else:\r\n if pred.result == 0:\r\n if await self.AddFish(ctx.message.author, new_fish):\r\n embed.set_footer(text=f'{new_fish[\"name\"]} was added to your bucket!')\r\n else:\r\n embed.set_footer(text=f'Your bucket was full, so you had to release {new_fish[\"name\"]} :(')\r\n else:\r\n embed.set_footer(text=f'You let {new_fish[\"name\"]} swim away...')\r\n await msg.edit(embed=embed)\r\n await msg.clear_reactions()\r\n\r\n if randint(0, 100) < 100 * await self.GetSetting(ctx.guild, 'bait_recovery_chance'):\r\n await ctx.send(f'Your {bait_type} is still on the end of the rod! (+1 {bait_type})')\r\n else:\r\n user_bait = await profile.bait()\r\n user_bait[bait_type] -= 1\r\n await profile.bait.set(user_bait)\r\n\r\n await profile.currently_fishing.set(False)\r\n #if not await profile.mawiam_mode():\r\n #await profile.nextcast.set(time() + await self.GetSetting(ctx.guild, 'fishing_delay'))\r\n\r\n await self.CheckSchools(ctx)", "def test_reaction_destroys_stereo(self):\n reaction = '[C@:1]>>[C:1]'\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'FC(Cl)Br')\n self.assertEqual(products, 'FC(Cl)Br')\n\n reaction = '[C@:1]F>>[C:1]F'\n # Reaction destroys stereo (but preserves unaffected group\n products = _reactAndSummarize(reaction,\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')\n # Reaction destroys stereo (but preserves the rest of the group\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)[C@@H](Cl)Br |&1:1,3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')", "def r_1(comps: 'list(Compound)', is_input):\r\n react: str\r\n\r\n if is_input:\r\n iSaAc = Compound(\"NaHCO3\")\r\n iBa = Compound(\"NaOH\")\r\n if len(comps) == 1:\r\n if \"iSa\" in comps[0].comp_type:\r\n iSaAc = comps[0]\r\n ((me, me_oxs), _) = iSa_oxs(iSaAc.formula)\r\n iBa = Compound(iBa_create(me, me_oxs))\r\n else:\r\n iBa = comps[0]\r\n (me, me_oxs) = iBa_oxs(iBa.formula)\r\n iSaAc = Compound(iSaAc_create(me, me_oxs, \"CO3\", 2))\r\n else:\r\n if \"iSa\" in comps[0].comp_type:\r\n iSaAc = comps[0]\r\n iBa = comps[1]\r\n else:\r\n iSaAc = comps[1]\r\n iBa = comps[0]\r\n\r\n ((me1, me1_oxs), (an, an_oxs)) = iSa_oxs(iSaAc.formula)\r\n (me2, me2_oxs) = iBa_oxs(iBa.formula)\r\n if (me1, me1_oxs) != (me2, me2_oxs):\r\n return \"\"\r\n\n iSaNo = Compound(iSaNo_create(me1, me1_oxs, an, an_oxs))\r\n\r\n react = f\"{iSaAc} + {iBa} -> {iSaNo} + H2O\"\r\n else:\r\n iSaNo = Compound(\"Na2CO3\")\r\n if len(comps) == 1:\r\n if \"iSa\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n else:\r\n if \"iSa\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n else:\r\n iSaNo = comps[1]\r\n\r\n ((me, me_oxs), (an, an_oxs)) = iSa_oxs(iSaNo.formula)\r\n if an_oxs == 1:\r\n return \"\"\r\n\r\n iSaAc = Compound(iSaAc_create(me, me_oxs, an, an_oxs))\r\n iBa = Compound(iBa_create(me, me_oxs))\r\n\r\n react = f\"{iSaAc} + {iBa} -> {iSaNo} + H2O\"\r\n\n return Reaction(react)", "async def on_reaction_remove(reaction, user):\n #Before doing anything\n #Check to see if the reaction was a karma emoji\n if reaction.emoji == initKarma.goodKarma:\n consoleMessage = 'Writing to karmaData file :: Decreasing '\n consoleMessage += reaction.message.author.name\n consoleMessage += '\\'s karma by 1!'\n print(consoleMessage)\n await karmaUpdate(client, reaction.message, '-1')\n if reaction.emoji == initKarma.badKarma:\n consoleMessage = 'Writing to karmaData file :: Increasing '\n consoleMessage += reaction.message.author.name\n consoleMessage += '\\'s karma by 1!'\n print(consoleMessage)\n await karmaUpdate(client, reaction.message, '+1')", "def explode(self):\n return \"...it's a glove.\"", "def conjure(self, irc, msg, args, channel):\n if (self._size(channel) < self.registryValue('size', channel)):\n history = self.registryValue('history', channel)\n item = random.choice(history)\n methods = (\n \"spins up the LHC to create %s\",\n \"pulls %s out of your ear\",\n \"wills %s into being\",\n \"conjures up %s with the +3 staff of conjuring\",\n \"calls forth %s from the void\",\n \"invents %s\",\n \"ponders %s into existance\",\n \"considers the set of all real numbers and reduces it to %s\",\n \"casts a void*, resulting in %s\",\n \"drags %s out from behind a curtain\",\n \"orders an airstrike consisting of two nukes and %s\",\n \"orders a 6\\\" BLT with a side of %s\",\n \"pulls %s out of his bag of holding\",\n \"ヽ( ゚ヮ゚)ノ.・゚*。・+☆ %s\",\n \"(╯‵Д′)╯彡 %s\",\n )\n method = random.choice(methods)\n irc.reply(method%(item), action=True)\n self._addItem(channel, item)\n else:\n methods = (\n \"thinks real hard to no avail\",\n \"calls out into the void with no result\",\n \"can't conjure anything when the bag is full\",\n \"needs more pylons\"\n )\n irc.reply(random.choice(methods), action = True)", "def command_line_string(res_list, pdb_filename, output_file):\n\n\n temp_str = \"\"\n temp_str += \"mutmodel \"\n\n for x in res_list:\n temp_str += \"-m \" + str(x) + \" ARG \"\n temp_str += str(pdb_filename) + \" \"\n temp_str += str(output_file) + \" \"\n\n if not res_list:\n temp_str =\"cp \" + str(pdb_filename) + \" \" + str(output_file)\n print(\"no prolines replaced\")\n else:\n print(\"Prolines found :\")\n print(res_list)\n\n print(temp_str)\n\n return(temp_str)", "def __str__(self):\n # Power/toughness, seen only if it's a creature\n pt = \"\"\n if \"power\" in self:\n pt = \"{0}/{1}\".format(self.power,\n self.toughness).replace(\"*\", \"\\*\")\n # Append loyalty to the end of oracle text if the creature is a\n # planeswalker\n if \"loyalty\" in self:\n self.oracle_text = \"{0}\\nStarting Loyalty: {1}\".format(\n self.oracle_text, self.loyalty)\n\n flavor = \"*{0}*\".format(\n self.flavor_text) if \"flavor_text\" in self else \"\"\n\n return \"**{0}** {1}\\n{2} {3}\\n{4}\\n{5}\\n\\n\".format(self.name,\n self.mana_cost,\n self.type_line,\n pt,\n self.oracle_text,\n flavor)", "def createProduct(self):\n return _libsbml.Reaction_createProduct(self)", "def real_process(raw):\n\n prod = product.TextProduct(raw)\n pil = prod.afos[:3]\n wfo = prod.source[1:]\n # sigh, can't use originating center for the route\n if (pil == \"OEP\"):\n wfo = prod.afos[3:]\n\n #raw = raw.replace(\"'\", \"\\\\'\")\n sqlraw = raw.replace(\"\\015\\015\\012\", \"\\n\").replace(\"\\000\", \"\").strip()\n\n # FTM sometimes have 'garbage' characters included, get em out\n #if (pil == \"FTM\"):\n # sqlraw = re.sub(\"[^\\n\\ra-zA-Z0-9:\\.,\\s\\$\\*]\", \"\", sqlraw)\n\n # Always insert the product into the text archive database\n product_id = prod.get_product_id()\n sql = \"\"\"INSERT into text_products(product, product_id) values (%s,%s)\"\"\"\n myargs = (sqlraw, product_id)\n if (len(prod.segments) > 0 and prod.segments[0].sbw):\n giswkt = 'SRID=4326;%s' % (MultiPolygon([prod.segments[0].sbw]).wkt,)\n sql = \"\"\"INSERT into text_products(product, product_id, geom) values (%s,%s,%s)\"\"\" \n myargs = (sqlraw, product_id, giswkt)\n deffer = POSTGIS.runOperation(sql, myargs)\n deffer.addErrback( common.email_error, sqlraw)\n myurl = \"%s?pid=%s\" % (config.get('urls', 'product'), product_id)\n\n xtra = {\n \"product_id\": product_id,\n }\n\n # Just send with optional headline to rooms...\n if SIMPLE_PRODUCTS.__contains__(pil):\n xtra['channels'] = wfo\n if pil in NEW_ROUTING:\n xtra['channels'] = prod.afos\n prodtxt = \"(%s)\" % (pil,)\n if reference.prodDefinitions.has_key(pil):\n prodtxt = reference.prodDefinitions[pil]\n\n mess = \"%s: %s issues %s %s\" % (wfo, wfo, prodtxt, myurl)\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s</a> \" % (centertext.get(wfo,wfo), myurl, prodtxt)\n if (not [\"HWO\",\"NOW\",\"ZFP\"].__contains__(pil) and \n len(prod.segments) > 0 and \n len(prod.segments[0].headlines) > 0 and \n len(prod.segments[0].headlines[0]) < 200 ):\n htmlmess += \"... %s ...\" % (prod.segments[0].headlines[0],)\n\n jabber.sendMessage(mess, htmlmess, xtra)\n\n channels = [wfo,]\n if pil in NEW_ROUTING:\n channels = [prod.afos,]\n # TODO: remove manual hack\n if prod.afos == 'RFDBIS':\n channels = ['BIS',]\n # Also send message to any 'subscribing WFO chatrooms'\n for key in routes.keys():\n if (re.match(key, prod.afos)):\n for wfo2 in routes[key]:\n mess = \"%s: %s issues %s %s\" % \\\n (wfo2, wfo, prodtxt, myurl)\n jabber.sendMessage(mess, htmlmess, xtra)\n channels.append( wfo2 )\n\n twt = prodtxt\n url = myurl\n common.tweet(channels, twt, url)\n if prod.afos == \"PNSARX\":\n snowfall_pns(prod)\n # We are done for this product\n return\n\n\n # Now, lets look at segments ?\n if (pil == \"RVF\"):\n for seg in prod.segments:\n tokens = re.findall(\"\\.E ([A-Z0-9]{5}) \", seg.raw)\n if (len(tokens) == 0):\n print 'Whoa, did not find NWSLI?', seg\n return\n hsas = re.findall(\"HSA:([A-Z]{3}) \", seg.raw)\n prodtxt = reference.prodDefinitions[pil]\n mess = \"%s: %s issues %s\" % \\\n (wfo, wfo, prodtxt)\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s</a> for \" \\\n % (wfo, myurl, prodtxt)\n usednwsli = {}\n hsa_cnt = -1\n rivers = {}\n for nwsli in tokens:\n if usednwsli.has_key(nwsli):\n continue\n usednwsli[nwsli] = 1\n hsa_cnt += 1\n if (nwsli_dict.has_key(nwsli)):\n rname = nwsli_dict[nwsli]['rname']\n r = nwsli_dict[nwsli]['river']\n else:\n rname = \"((%s))\" % (nwsli,)\n r = \"Unknown River\"\n if not rivers.has_key(r):\n rivers[r] = \"<br/>%s \" % (r,)\n if len(hsas) > hsa_cnt and \\\n reference.wfo_dict.has_key( hsas[hsa_cnt] ):\n uri = AHPS_TEMPLATE[ reference.wfo_dict[hsas[hsa_cnt]]['region'] ] %\\\n (hsas[hsa_cnt].lower(), nwsli.lower() ) \n rivers[r] += \"<a href=\\\"%s\\\">%s</a> (%s), \" % (uri, rname, nwsli)\n else:\n rivers[r] += \"%s (%s), \" % (rname, nwsli)\n for r in rivers.keys():\n htmlmess += \" %s\" % (rivers[r][:-2],)\n jabber.sendMessage(mess[:-1] +\" \"+ myurl, htmlmess[:-1], xtra)\n continue\n\n# PUBLIC ADVISORY NUMBER 10 FOR REMNANTS OF BARRY\n# TROPICAL DEPRESSION BARRY ADVISORY NUMBER 5\n# TROPICAL STORM BARRY INTERMEDIATE ADVISORY NUMBER 2A\n\n if (pil == \"TCM\" or pil == \"TCP\" or pil == \"TCD\"):\n mess = \"%s: %s issues %s %s\" % (wfo, wfo, pil, myurl)\n prodtxt = \"(%s)\" % (pil,)\n if reference.prodDefinitions.has_key(pil):\n prodtxt = reference.prodDefinitions[pil]\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s</a> \" % (wfo, myurl, prodtxt)\n jabber.sendMessage(mess, htmlmess, xtra)\n \n common.tweet([wfo], prodtxt, myurl)\n\n\n for key in routes.keys():\n if (re.match(key, prod.afos)):\n channels = []\n for wfo2 in routes[key]:\n mess = \"%s: %s %s\" % \\\n (wfo2, prod.afos, myurl)\n htmlmess = \"<a href=\\\"%s\\\">%s</a>\" % (myurl, prodtxt)\n tokens = re.findall(\"(.*) (DISCUSSION|INTERMEDIATE ADVISORY|FORECAST/ADVISORY|ADVISORY|MEMEME) NUMBER\\s+([0-9]+)\", raw.replace(\"PUBLIC ADVISORY\", \"ZZZ MEMEME\") )\n if (len(tokens) > 0):\n tt = tokens[0][0]\n what = tokens[0][1]\n tnum = tokens[0][2]\n if (tokens[0][1] == \"MEMEME\"):\n tokens2 = re.findall(\"(PUBLIC ADVISORY) NUMBER\\s+([0-9]+) FOR (.*)\", raw)\n what = tokens2[0][0]\n tt = tokens2[0][2]\n mess = \"%s: %s issues %s #%s for %s %s\" % (wfo2, centertext.get(wfo, wfo), what, tnum, tt, myurl)\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s #%s</a> for %s\" % ( centertext.get(wfo, wfo), myurl, what, tnum, tt)\n #print htmlmess, mess\n jabber.sendMessage(mess, htmlmess, xtra)\n channels.append( wfo2 )\n twt = \"%s issues %s %s for %s\" % (centertext.get(wfo, wfo), what, tnum, tt)\n common.tweet(channels, twt, myurl)\n\n\n for seg in prod.segments:\n # The segment needs to have ugc codes\n if (len(seg.ugcs) == 0):\n continue\n # If the product has VTEC, it is handled by the vtec ingestor\n if (len(seg.vtec) > 0 and ['MWS','HLS'].__contains__(pil)):\n log.msg(\"VTEC FOUND!, skipping\")\n continue\n\n # If the product has HVTEC, it is handled by other ingestor too\n if (len(seg.hvtec) > 0 and ['FLW','FFA','FLS'].__contains__(pil)):\n log.msg(\"HVTEC FOUND!, skipping\")\n continue\n\n counties = countyText(seg.ugcs)\n if (counties.strip() == \"\"):\n counties = \"entire area\"\n expire = \"\"\n if seg.ugcexpire is not None:\n if prod.z:\n expire = \"till \"+ (seg.ugcexpire - datetime.timedelta(hours= reference.offsets[prod.z] )).strftime(\"%-I:%M %p \")+ prod.z\n\n prodtxt = \"(%s)\" % (pil,)\n if reference.prodDefinitions.has_key(pil):\n prodtxt = reference.prodDefinitions[pil]\n mess = \"%s: %s issues %s for %s %s %s\" % \\\n (wfo, wfo, prodtxt, counties, expire, myurl)\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s</a> for %s %s\" % (wfo, myurl, prodtxt, counties, expire)\n jabber.sendMessage(mess, htmlmess, xtra)\n twt = \"%s for %s %s\" % (prodtxt, counties, expire)\n common.tweet([wfo,], twt, myurl)\n\n# PUBLIC ADVISORY NUMBER 10 FOR REMNANTS OF BARRY\n# TROPICAL DEPRESSION BARRY ADVISORY NUMBER 5\n# TROPICAL STORM BARRY INTERMEDIATE ADVISORY NUMBER 2A\n\n if (pil == \"TCM\" or pil == \"TCP\" or pil == \"TCD\"):\n mess = \"%s: %s issues %s %s\" % (wfo, wfo, pil, myurl)\n prodtxt = \"(%s)\" % (pil,)\n if reference.prodDefinitions.has_key(pil):\n prodtxt = reference.prodDefinitions[pil]\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s</a> \" % (wfo, myurl, prodtxt)\n jabber.sendMessage(mess, htmlmess, xtra)\n common.tweet([wfo,], prodtxt, myurl)\n\n\n\n for key in routes.keys():\n if (re.match(key, prod.afos)):\n channels = []\n for wfo2 in routes[key]:\n mess = \"%s: %s %s\" % \\\n (wfo2, prod.afos, myurl)\n htmlmess = \"<a href=\\\"%s\\\">%s</a>\" % (myurl, prodtxt)\n tokens = re.findall(\"(.*) (DISCUSSION|INTERMEDIATE ADVISORY|FORECAST/ADVISORY|ADVISORY|MEMEME) NUMBER\\s+([0-9]+)\", raw.replace(\"PUBLIC ADVISORY\", \"ZZZ MEMEME\") )\n if (len(tokens) > 0):\n tt = tokens[0][0]\n what = tokens[0][1]\n tnum = tokens[0][2]\n if (tokens[0][1] == \"MEMEME\"):\n tokens2 = re.findall(\"(PUBLIC ADVISORY) NUMBER\\s+([0-9]+) FOR (.*)\", raw)\n what = tokens2[0][0]\n tt = tokens2[0][2]\n mess = \"%s: %s issues %s #%s for %s %s\" % (wfo2, centertext.get(wfo, wfo), what, tnum, tt, myurl)\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s #%s</a> for %s\" % ( centertext.get(wfo, wfo), myurl, what, tnum, tt)\n #print htmlmess, mess\n jabber.sendMessage(mess, htmlmess, xtra)\n channels.append( wfo2 )\n twt = \"%s issues %s %s for %s\" % (centertext.get(wfo, wfo), what, tnum, tt)\n common.tweet(channels, twt, myurl)", "def generate_enzyme_module_reaction_name(self, update_enzyme=False):\n name = \"\"\n items = defaultdict(list)\n for met in self.metabolites:\n key = \"Enz\" if isinstance(met, EnzymeModuleForm) else \"Lig\"\n key += \" React\" if met in self.reactants else \" Prod\"\n items[key].append(met)\n\n for enz_r, enz_p in zip(items[\"Enz React\"], items[\"Enz Prod\"]):\n r_dict, p_dict = (\n getattr(enz_r, \"bound_metabolites\"),\n getattr(enz_p, \"bound_metabolites\"),\n )\n diff = {}\n for key in list(set(p_dict).union(set(r_dict))):\n if key in p_dict and key in r_dict:\n coeff = abs(p_dict[key] - r_dict[key])\n elif key in p_dict or key in r_dict:\n coeff = [d[key] for d in [r_dict, p_dict] if key in d].pop()\n if coeff != 0:\n diff[key] = coeff\n\n if diff:\n if list(diff) != list(items[\"Lig React\"]) and list(diff) != list(\n items[\"Lig Prod\"]\n ):\n name_str = enz_r._remove_compartment_from_id_str()\n name_str += \" catalyzation\"\n else:\n name_str = \"-\".join(\n [\n m._remove_compartment_from_id_str()\n for m in [enz_r] + list(diff)\n ]\n )\n name_str += str(\n \" binding\"\n if list(diff) == list(items[\"Lig React\"])\n else \" release\"\n )\n name = name_str\n\n if not name:\n name = \"-\".join(\n [\n enz_form._remove_compartment_from_id_str()\n for enz_form in [enz_r, enz_p]\n ]\n )\n name += \" transition\"\n\n # Assign the new name to the name attribute\n if update_enzyme:\n self.name = name\n\n return name", "def gen_reactions(self, model, options):\n Avogadro = model.parameters.get_one(id='Avogadro')\n c = model.compartments.get_one(id='c')\n\n # basic metabolic reactions\n for basic_reaction in options['basic']:\n\n # reaction\n reaction = model.reactions.get_or_create(submodel=model.submodels.get_one(id=basic_reaction['submodel']),\n id=basic_reaction['id'])\n reaction.name = basic_reaction['name']\n reaction.participants = []\n for participant in basic_reaction['participants']:\n reaction.participants.add(model.species_types.get_one(id=participant['id']).species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=participant['coefficient']))\n\n # rate law\n model.parameters.create(id='kcat_'+basic_reaction['id'],\n value=basic_reaction['rate_law']['k_cat']['value'],\n type=wc_ontology['WC:k_cat'],\n units=unit_registry.parse_units(basic_reaction['rate_law']['k_cat']['units']))\n for km in basic_reaction['rate_law']['k_m']:\n model.parameters.create(id='km_{}_{}'.format(basic_reaction['id'], km['id']),\n value=km['value'],\n type=wc_ontology['WC:K_m'],\n units=unit_registry.parse_units('M'))\n reactants = [participant['id'] for participant in basic_reaction['participants'] if participant['coefficient']<0]\n if 'h' in reactants:\n reactants.remove('h')\n if 'h2o' in reactants:\n reactants.remove('h2o')\n rate_law_exp, errors = wc_lang.RateLawExpression.deserialize(\n '{}{}'.format('kcat_'+basic_reaction['id'], ' '.join(['* ({}[c] / (km_{}_{} * Avogadro * volume_c + {}[c]))'.format(reactant, basic_reaction['id'], reactant, reactant) for reactant in reactants])),\n self.get_rate_law_context(model))\n\n rate_law = model.rate_laws.create(direction=wc_lang.RateLawDirection.forward,\n type=None,\n expression=rate_law_exp,\n reaction=reaction,\n )\n rate_law.id = rate_law.gen_id()\n\n # rna\n rna_species_types = [species_types for species_types in model.species_types if species_types.type == wc_ontology['WC:RNA']]\n\n # rna transcription\n for km in options['rna']['transcription']['k_m']:\n model.parameters.create(id='km_{}_trans'.format(km['id']), value=km['value'], type=wc_ontology['WC:K_m'], units=unit_registry.parse_units('M'))\n\n for i, rna_species_type in enumerate(rna_species_types):\n reaction = model.reactions.get_or_create(submodel=model.submodels.get_one(id=options['rna']['submodel']), id='transcription_{}'.format(rna_species_type.id))\n reaction.name = 'transcription {}'.format(rna_species_type.name)\n reaction.participants = []\n # participants\n rna_str = rna_species_type.structure.value\n # lhs\n reaction.participants.add(model.species_types.get_one(id='atp').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=-rna_str.count('A')))\n reaction.participants.add(model.species_types.get_one(id='gtp').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=-rna_str.count('G')))\n reaction.participants.add(model.species_types.get_one(id='ctp').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=-rna_str.count('C')))\n reaction.participants.add(model.species_types.get_one(id='utp').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=-rna_str.count('U')))\n reaction.participants.add(model.species_types.get_one(id='h2o').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=-1))\n # rhs\n reaction.participants.add(rna_species_type.species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=1))\n reaction.participants.add(model.species_types.get_one(id='ppi').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=len(rna_str)))\n reaction.participants.add(model.species_types.get_one(id='h').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=1))\n # rate law\n model.parameters.create(\n id='k_trans_{}'.format(rna_species_type.id),\n value=math.log(2)/model.parameters.get_one(id='half_life_{}'.format(rna_species_type.id)).value * 8,\n type=wc_ontology['WC:k_cat'],\n units=unit_registry.parse_units('s^-1 / M'))\n rate_law_str = 'k_trans_{}'.format(rna_species_type.id)\n if 'A' in rna_str:\n rate_law_str += ' * (atp[c] / (km_atp_trans * Avogadro * volume_c + atp[c]))'\n if 'G' in rna_str:\n rate_law_str += ' * (gtp[c] / (km_gtp_trans * Avogadro * volume_c + gtp[c]))'\n if 'C' in rna_str:\n rate_law_str += ' * (ctp[c] / (km_ctp_trans * Avogadro * volume_c + ctp[c]))'\n if 'U' in rna_str:\n rate_law_str += ' * (utp[c] / (km_utp_trans * Avogadro * volume_c + utp[c]))'\n rate_law_str += ' * rna_pol[c] / (Avogadro * volume_c)'\n\n reaction_rate_law_exp, errors = wc_lang.RateLawExpression.deserialize(\n rate_law_str,\n self.get_rate_law_context(model))\n reaction_rate_law = model.rate_laws.create(direction=wc_lang.RateLawDirection.forward,\n type=None,\n expression=reaction_rate_law_exp,\n reaction=reaction,\n )\n reaction_rate_law.id = reaction_rate_law.gen_id()\n\n # rna degradation\n for i, rna_species_type in enumerate(rna_species_types):\n reaction = model.reactions.get_or_create(submodel=model.submodels.get_one(id=options['rna']['submodel']), id='degradation_{}'.format(rna_species_type.id))\n reaction.name = 'transcription {}'.format(rna_species_type.name)\n reaction.participants = []\n # participants\n rna_str = rna_species_type.structure.value\n # lhs\n reaction.participants.add(rna_species_type.species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=-1))\n reaction.participants.add(model.species_types.get_one(id='h2o').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=-(len(rna_str)-1)))\n # rhs\n reaction.participants.add(model.species_types.get_one(id='amp').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=rna_str.count('A')))\n reaction.participants.add(model.species_types.get_one(id='gmp').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=rna_str.count('G')))\n reaction.participants.add(model.species_types.get_one(id='cmp').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=rna_str.count('C')))\n reaction.participants.add(model.species_types.get_one(id='ump').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=rna_str.count('U')))\n reaction.participants.add(model.species_types.get_one(id='h').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=len(rna_str)-1))\n # rate law\n model.parameters.create(\n id='k_deg_{}'.format(rna_species_type.id),\n value=math.log(2)/model.parameters.get_one(id='half_life_{}'.format(rna_species_type.id)).value,\n type=wc_ontology['WC:k_cat'],\n units=unit_registry.parse_units('s^-1 / M'))\n model.parameters.create(\n id='km_deg_{}'.format(rna_species_type.id),\n value=1 / Avogadro.value / c.init_volume.mean,\n type=wc_ontology['WC:K_m'],\n units=unit_registry.parse_units('M'))\n reaction_rate_law_exp, errors = wc_lang.RateLawExpression.deserialize(\n 'k_deg_{}'\n ' * {}[c] / (km_deg_{} * Avogadro * volume_c + {}[c])'\n ' * rna_se[c] / (Avogadro * volume_c)'.format(rna_species_type.id, rna_species_type.id, rna_species_type.id, rna_species_type.id),\n self.get_rate_law_context(model))\n reaction_rate_law = model.rate_laws.create(direction=wc_lang.RateLawDirection.forward,\n type=None,\n expression=reaction_rate_law_exp,\n reaction=reaction,\n )\n reaction_rate_law.id = reaction_rate_law.gen_id()", "async def fruitmachine(self, ctx: commands.Context):\n slots = [get(ctx.guild.emojis, name='roobs'),\n get(ctx.guild.emojis, name='wess'),\n get(ctx.guild.emojis, name='yeng'),\n get(ctx.guild.emojis, name='blek'),\n get(ctx.guild.emojis, name='pyrr'),\n get(ctx.guild.emojis, name='noodle'),\n get(ctx.guild.emojis, name='nora'),\n get(ctx.guild.emojis, name='renbo'),\n get(ctx.guild.emojis, name='hapbun'),\n get(ctx.guild.emojis, name='hapshork'),\n get(ctx.guild.emojis, name='skatergorl'),\n get(ctx.guild.emojis, name='rainbowgorl')]\n\n content = discord.Embed()\n\n def gen_post(player, first, second, third, under_text=None, finish=False):\n content.description = \"**Welcome to Yutu's Casino {}!**\\n\\n\".format(ctx.author)\n content.description += \"**[ {} {} {} ]**\\n\\n\".format(first, second, third)\n if under_text is not None:\n content.description += \"{}\\n\".format(under_text)\n if player.coins == 0:\n content.description += \"You are out of coins.\\n\\n\"\n else:\n content.description += \"You currently have **{}** coins.\\n\\n\".format(player.coins)\n if finish:\n content.description += \"Thank you for playing!\"\n else:\n content.description += \"Add a 🔁 react to spin the slots. Add ❌ to stop.\"\n return content\n\n with orm.db_session:\n await ctx.message.delete()\n\n try:\n player = self.Player[ctx.author.id]\n except orm.ObjectNotFound:\n player = self.Player(id=ctx.author.id, coins=10)\n gen_post(player, '❓', '❓', '❓')\n post = await ctx.send(embed=content)\n await post.add_reaction('🔁')\n await post.add_reaction('❌')\n\n def chk(reaction, user):\n return (str(reaction.emoji) in ['❌', '🔁'] and\n user == ctx.author and\n reaction.message.id == post.id)\n\n while True:\n if player.coins == 0:\n break\n try:\n react, _ = await ctx.bot.wait_for(\"reaction_add\", check=chk, timeout=300)\n except asyncio.TimeoutError:\n break\n if str(react.emoji) == '❌':\n break\n player.coins -= 1\n first, second, third = random.choice(slots), random.choice(slots), random.choice(slots)\n\n if first == second == third:\n player.coins += 20\n tag = \":trophy: Jackpot! :trophy:\\nYou win 20 coins!\"\n elif first == second or first == third or second == third:\n player.coins += 5\n tag = \"You win 5 coins!\"\n else:\n tag = \"Better luck next time.\"\n gen_post(player, first, second, third, under_text=tag)\n await post.edit(embed=content)\n gen_post(player, '❓', '❓', '❓', finish=True)\n await post.edit(embed=content, delete_after=30)", "async def on_reaction_add(reaction, user):\n if reaction.message.content.startswith('http'):\n curator = re.sub(r'\\d|\\W|(TravelFeed)','',str(user),re.IGNORECASE|re.DOTALL)\n if not user.id in discordcuratorlist and not user.id == botid:\n \"\"\"Checks if user who added reaction is a curator\"\"\"\n await loop.create_task(send_discord(\"Curator unauthorised: \"+curator, logchannel))\n return\n else:\n author, permlink = resolve_authorperm(reaction.message.content)\n post = Comment(construct_authorperm(author, permlink))\n if reaction.emoji == '🌍':\n await bot.add_reaction(reaction.message, \"⏳\")\n actionqueue.put(Post_Action(post, \"tf100\", curator, reaction.message))\n elif reaction.emoji == '🌐': \n await bot.add_reaction(reaction.message, \"⏳\") \n actionqueue.put(Post_Action(post, \"tf50\", curator, reaction.message))\n elif reaction.emoji == '👥':\n await bot.add_reaction(reaction.message, \"⏳\")\n actionqueue.put(Post_Action(post, \"coop100\", None, reaction.message))\n elif reaction.emoji == '👋':\n await bot.add_reaction(reaction.message, \"⏳\")\n actionqueue.put(Post_Action(post, \"ad10\", curator, reaction.message))\n elif reaction.emoji == '📏':\n await bot.add_reaction(reaction.message, \"⏳\")\n actionqueue.put(Post_Action(post, \"short0\", None, reaction.message))\n elif reaction.emoji == '🇬🇧':\n await bot.add_reaction(reaction.message, \"⏳\")\n actionqueue.put(Post_Action(post, \"lang0\", None, reaction.message))\n elif reaction.emoji == '📝':\n await bot.add_reaction(reaction.message, \"⏳\")\n actionqueue.put(Post_Action(post, \"copyright0\", None, reaction.message))", "def __str__(self):\n left = ''\n right = ''\n for i in range(len(self.ant)):\n left += Prop.__str__(self.ant[i]) + \", \"\n \n for i in range(len(self.con)):\n right += Prop.__str__(self.con[i]) + \", \"\n return left[:-2] + '|-- ' + right[:-2]", "def explode(self):\n\n return \"...it's a glove.\"", "def as_action_str(string: str) -> str:", "def react_all(core_spc_list, num_old_core_species, unimolecular_react, bimolecular_react, trimolecular_react=None, procnum=1):\n # Select reactive species that can undergo unimolecular reactions:\n spc_tuples = [(core_spc_list[i],)\n for i in range(num_old_core_species) if (unimolecular_react[i] and core_spc_list[i].reactive)]\n\n for i in range(num_old_core_species):\n for j in range(i, num_old_core_species):\n # Find reactions involving the species that are bimolecular.\n # This includes a species reacting with itself (if its own concentration is high enough).\n if bimolecular_react[i, j]:\n if core_spc_list[i].reactive and core_spc_list[j].reactive:\n spc_tuples.append((core_spc_list[i], core_spc_list[j]))\n\n if trimolecular_react is not None:\n for i in range(num_old_core_species):\n for j in range(i, num_old_core_species):\n for k in range(j, num_old_core_species):\n # Find reactions involving the species that are trimolecular.\n if trimolecular_react[i, j, k]:\n if core_spc_list[i].reactive and core_spc_list[j].reactive and core_spc_list[k].reactive:\n spc_tuples.append((core_spc_list[i], core_spc_list[j], core_spc_list[k]))\n\n if procnum == 1:\n # React all families like normal (provide empty argument for only_families)\n spc_fam_tuples = list(zip(spc_tuples))\n else:\n # Identify and split families that are prone to generate many reactions into sublists.\n family_list = list(get_db('kinetics').families.keys())\n major_families = [\n 'H_Abstraction', 'R_Recombination', 'Intra_Disproportionation', 'Intra_RH_Add_Endocyclic',\n 'Singlet_Carbene_Intra_Disproportionation', 'Intra_ene_reaction', 'Disproportionation',\n '1,4_Linear_birad_scission', 'R_Addition_MultipleBond', '2+2_cycloaddition_Cd', 'Diels_alder_addition',\n 'Intra_RH_Add_Exocyclic', 'Intra_Retro_Diels_alder_bicyclic', 'Intra_2+2_cycloaddition_Cd',\n 'Birad_recombination', 'Intra_Diels_alder_monocyclic', '1,4_Cyclic_birad_scission', '1,2_Insertion_carbene',\n ]\n\n split_list = []\n leftovers = []\n for fam in family_list:\n if fam in major_families:\n split_list.append([fam])\n else:\n leftovers.append(fam)\n split_list.append(leftovers)\n\n # Only employ family splitting for reactants that have a larger number than min_atoms\n min_atoms = 10\n spc_fam_tuples = []\n for i, spc_tuple in enumerate(spc_tuples):\n if any([len(spc.molecule[0].atoms) > min_atoms for spc in spc_tuple]):\n for item in split_list:\n spc_fam_tuples.append((spc_tuple, item))\n else:\n spc_fam_tuples.append((spc_tuple,))\n\n return react(spc_fam_tuples, procnum), [fam_tuple[0] for fam_tuple in spc_fam_tuples]", "def W(self, multiplier=1):\n multiplier = str(multiplier);\n weapon_dice_count = self.Attribute_Power(\"weapon-num-dice\");\n weapon_dice = self.Attribute_Power(\"weapon-dice\");\n return \"\".join((\"(\", multiplier, \"*\", weapon_dice_count, \")d\", weapon_dice));", "def errors_icons(self):\n msg_errors_lifes = ''\n for i in range(0,5):\n if self.letters_wrong <= i:\n msg_errors_lifes += ' ♥ '\n else:\n msg_errors_lifes += ' ☠ ' \n return msg_errors_lifes", "def gen_reaction(tabs):\n global pbeam\n pbeam = TLorentzVector(0, 0, Ebeam, Ebeam)\n global ptarg\n ptarg = TLorentzVector(0, 0, 0, m_proton)\n pinitial = pbeam + ptarg\n global s\n s = pinitial.Mag2()\n q_in = (s - m_proton**2) / (2 * math.sqrt(s))\n q_cm = math.sqrt((s - m_proton**2 + m_omega**2)**2 / (4 * s) - m_omega**2)\n EomegaCM = math.sqrt(m_omega**2 + q_cm**2)\n EprotonCM = math.sqrt(m_proton**2 + q_cm**2)\n costhetaCM = (2 * q_in * EomegaCM - m_omega**2 - tabs) / (2 * q_in * q_cm)\n if abs(costhetaCM) > 1:\n print \"tabs =\", tabs, \"is out of range, please try another value\"\n return 0\n costheta0 = random.Uniform(-1, 1)\n phi0 = random.Uniform(-math.pi, math.pi)\n costheta1 = random.Uniform(-1, 1)\n phi1 = random.Uniform(-math.pi, math.pi)\n pomega = gen_omega(costheta0, phi0, costheta1, phi1)\n sinthetaCM = math.sqrt(1 - costhetaCM**2)\n beta = TVector3(q_cm * sinthetaCM, 0, q_cm * costhetaCM) * (1 / EomegaCM)\n pomega.Boost(beta)\n pgamma[0].Boost(beta)\n pgamma[1].Boost(beta)\n pgamma[2].Boost(beta)\n global precoil\n precoil = TLorentzVector(-q_cm * sinthetaCM, 0, -q_cm * costhetaCM, EprotonCM)\n betaCM = pinitial.Vect() * (1 / pinitial[3])\n pgamma[0].Boost(betaCM)\n pgamma[1].Boost(betaCM)\n pgamma[2].Boost(betaCM)\n pomega.Boost(betaCM)\n precoil.Boost(betaCM)\n return pomega", "def _repr_(self):\n if self.parent()._chart.manifold().options.textbook_output:\n return str(ExpressionNice(self._express))\n else:\n return str(self._express)", "def __str__(self):\n ingredient_names = [str(ingredient) for ingredient in self.ingredients]\n return ', '.join(ingredient_names)", "def wepbuy(self):\n\t\tthismsg = \"\\r\\n\"+self.ESC+\"14C\"+self.ESC+\"1;34m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"0;34m\"+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.A220+self.A220+self.ESC+\"0;34m\"+self.A220+self.ESC+\"1m\"+self.A220+self.ESC+\"0;34m\"+self.A220+self.A220+self.ESC+\"1m\"+self.A220+self.ESC+\"0;34m\"+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"46m\"+self.A178+self.ESC+\"40m\"+self.A223+self.ESC+\"2C\"+self.ESC+\"37m The\"+self.ESC+\"CSaga\"+self.ESC+\"Cof\"+self.ESC+\"Cthe\"+self.ESC+\"CRed\"+self.ESC+\"CDragon\"+self.ESC+\"C-\"+self.ESC+\"C\"+self.ESC+\"34mWeapons\"+self.ESC+\"CList \"+self.ESC+\"C\"+self.A220+self.ESC+\"46m\"+self.A178+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"46m\"+self.A178+self.ESC+\"44m\"+self.A219+self.ESC+\"40m\"+self.A223+self.A223+self.A223+self.A223+self.ESC+\"0;34m\"+self.A223+self.ESC+\"1m\"+self.A223+self.A223+self.A223+self.ESC+\"0;34m\"+self.A223+self.ESC+\"1m\"+self.A223+self.A223+self.ESC+\"0;34m\"+self.A223+self.ESC+\"1m\"+self.A223+self.ESC+\"0;34m\"+self.A223+self.A223+self.ESC+\"1m\"+self.A223+self.ESC+\"0;34m\"+self.A223+self.ESC+\"1m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"46m\"+self.A178+self.A178+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"46m\"+self.A219+self.ESC+\"2C\"+self.ESC+\"0;36mWeapons\"+self.ESC+\"27CPrice\"+self.ESC+\"7C\"+self.ESC+\"1;34;44m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"44m\"+self.A219+self.ESC+\"2C\"+self.ESC+\"40m1.\"+self.ESC+\"CStick\"+self.ESC+\"0;34m..................................\"+self.ESC+\"36m200\"+self.ESC+\"C\"+self.ESC+\"1;34;44m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"44m\"+self.A219+self.ESC+\"2C\"+self.ESC+\"40m2.\"+self.ESC+\"CDagger\"+self.ESC+\"0;34m...............................\"+self.ESC+\"36m1,000\"+self.ESC+\"C\"+self.ESC+\"1;34;44m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"44m\"+self.A178+self.ESC+\"2C\"+self.ESC+\"40m3.\"+self.ESC+\"CShort\"+self.ESC+\"CSword\"+self.ESC+\"0;34m..........................\"+self.ESC+\"36m3,000\"+self.ESC+\"C\"+self.ESC+\"1;34;44m\"+self.A178+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"44m\"+self.A177+self.ESC+\"2C\"+self.ESC+\"40m4.\"+self.ESC+\"CLong\"+self.ESC+\"CSword\"+self.ESC+\"0;34m..........................\"+self.ESC+\"36m10,000\"+self.ESC+\"C\"+self.ESC+\"1;34;44m\"+self.A177+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"44m\"+self.A176+self.ESC+\"2C\"+self.ESC+\"40m5.\"+self.ESC+\"CHuge\"+self.ESC+\"CAxe\"+self.ESC+\"0;34m............................\"+self.ESC+\"36m30,000\"+self.ESC+\"C\"+self.ESC+\"1;34;44m\"+self.A176+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"0;34m\"+self.A223+self.ESC+\"2C\"+self.ESC+\"1m6.\"+self.ESC+\"CBone\"+self.ESC+\"CCruncher\"+self.ESC+\"0;34m......................\"+self.ESC+\"36m100,000\"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.A176+self.ESC+\"2C\"+self.ESC+\"1m7.\"+self.ESC+\"CTwin\"+self.ESC+\"CSwords\"+self.ESC+\"0;34m........................\"+self.ESC+\"36m150,000\"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A176+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.A177+self.ESC+\"2C\"+self.ESC+\"1m8.\"+self.ESC+\"CPower\"+self.ESC+\"CAxe\"+self.ESC+\"0;34m..........................\"+self.ESC+\"36m200,000\"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A177+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.A178+self.ESC+\"2C\"+self.ESC+\"1m9.\"+self.ESC+\"CAble's\"+self.ESC+\"CSword\"+self.ESC+\"0;34m.......................\"+self.ESC+\"36m400,000\"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A178+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.A219+self.ESC+\"C\"+self.ESC+\"1m10.\"+self.ESC+\"CWan's\"+self.ESC+\"CWeapon\"+self.ESC+\"0;34m.....................\"+self.ESC+\"36m1,000,000\"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.A223+self.ESC+\"C\"+self.ESC+\"1m11.\"+self.ESC+\"CSpear\"+self.ESC+\"COf\"+self.ESC+\"CGold\"+self.ESC+\"0;34m....................\"+self.ESC+\"36m4,000,000\"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.A219+self.ESC+\"C\"+self.ESC+\"1m12.\"+self.ESC+\"CCrystal\"+self.ESC+\"CShard\"+self.ESC+\"0;34m...................\"+self.ESC+\"36m10,000,000\"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.A219+self.ESC+\"C\"+self.ESC+\"1m13.\"+self.ESC+\"CNiras's\"+self.ESC+\"CTeeth\"+self.ESC+\"0;34m...................\"+self.ESC+\"36m40,000,000\"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.A219+self.ESC+\"C\"+self.ESC+\"1m14.\"+self.ESC+\"CBlood\"+self.ESC+\"CSword\"+self.ESC+\"0;34m....................\"+self.ESC+\"36m100,000,000\"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.A219+self.ESC+\"C\"+self.ESC+\"1m15.\"+self.ESC+\"CDeath\"+self.ESC+\"CSword\"+self.ESC+\"0;34m....................\"+self.ESC+\"36m400,000,000\"+self.ESC+\"C\"+self.ESC+\"1;34;44m\"+self.A176+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"44m\"+self.A176+self.ESC+\"0;34m\"+self.A219+self.A220+self.A220+self.A220+self.A220+self.A220+self.A178+self.ESC+\"C\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A178+self.ESC+\"C\"+self.A220+self.A220+self.A220+self.ESC+\"C\"+self.A220+self.A220+self.ESC+\"C\"+self.A220+self.A176+self.ESC+\"C\"+self.A220+self.ESC+\"C\"+self.A220+self.A220+self.A220+self.A220+self.A178+self.A220+self.A220+self.A220+self.A220+self.A176+self.A220+self.A220+self.A219+self.ESC+\"1;44m\"+self.A176+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"0m\\r\\n\";\n\t\treturn thismsg", "def make_third_recommendation() -> str:\r\n requirement_mappings = {\r\n \"Organic Red Helles\": 0,\r\n \"Organic Pilsner\": 0,\r\n \"Organic Dunkel\": 0}\r\n helles_info = bottles_required(\"Organic Red Helles\")\r\n pilsner_info = bottles_required(\"Organic Pilsner\")\r\n dunkel_info = bottles_required(\"Organic Dunkel\")\r\n\r\n requirement_mappings[\"Organic Red Helles\"] = helles_info[2]\r\n requirement_mappings[\"Organic Pilsner\"] = pilsner_info[2]\r\n requirement_mappings[\"Organic Dunkel\"] = dunkel_info[2]\r\n most_needed_recipe = max(requirement_mappings, key=requirement_mappings.get)\r\n\r\n return render_template(\"make_third_recommendation.html\",\r\n helles_display=helles_info,\r\n pilsner_display=pilsner_info,\r\n dunkel_display=dunkel_info,\r\n most_needed_display=most_needed_recipe)", "def add_product( product_id, reaction, model, compartment = \"default\", arguments = DEFAULT_ARGUMENTS):\n if product_id is None and len( reaction.getListOfProducts()) > 0:\n product_ref = reaction.getListOfProducts()[0]\n product_species = product_ref.getSpecies()\n return product_species\n else:\n if product_id is None:\n reaction_id = reaction.getId();\n product_id = reaction_id + \"_product_nr\" + str(len( reaction.getListOfProducts()));\n \n product_prefix = PRODUCT_PREFIX.get( reaction.getName().lower()) \n if product_prefix is None:\n product_prefix = \"\"\n\n reactant = None\n if len( reaction.getListOfReactants()) > 0:\n reactant = model.getSpecies( reaction.getListOfReactants()[0].getSpecies())\n if reactant != None:\n product_name = product_prefix + reactant.getName();\n else:\n product_name = product_prefix + \"Product\";\n add_species( None, model, id = product_id, name = product_name, compartment = compartment, arguments = arguments);\n \n product_ref = reaction.createProduct()\n check( product_ref, 'create product reference', arguments = arguments);\n check( product_ref.setSpecies( product_id), 'assign product species', arguments = arguments);\n check( product_ref.setMetaId( \"metaid_0000\" + product_id), 'set meta ID', arguments = arguments);\n check( product_ref.addCVTerm( add_cvterm( GENERIC_REACTION_SBO_MAPPING[\"product\"])), 'set controlled vocab SBO term for product', arguments = arguments);\n # check( product_ref.addCVTerm(add_cvterm(STANDOFF_ENTITY_TO_SBO_MAPPING[product.type])), 'set controlled vocab SBO term 2 for product')\n return product_id", "def prod(): \n query = \"SELECT * FROM ProducedMsg;\"\n tablestr = dbwrapper._query_pretty(query)\n result = string.replace(str(tablestr),'\\n','<br>')\n return result", "async def eval_error(self, ctx, error):\n if isinstance(error, commands.CheckFailure):\n embed = discord.Embed(title=\"You do not have permission to use eval\", colour=RED)\n else:\n cmd = ctx.message.content.split(\" \", maxsplit=1)[1].strip(\"` \")\n trace = format_traceback(traceback.format_exception(type(error), error, error.__traceback__))\n embed = discord.Embed(title=\"Evaluation\", description=f\"**Error**\\n```python\\n{trace}```\", colour=RED)\n embed.add_field(name=\"Input\", value=f\"```python\\n{cmd}\\n```\", inline=False)\n embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.avatar_url)\n await ctx.message.delete()\n await ctx.send(embed=embed)", "async def augment(self, ctx, *, augment: str):\n try:\n augment = self.get_entry('Augment', augment.lower())\n except RuntimeError as e:\n return await ctx.send(e)\n\n type = augment['Type']\n price = augment['Sell Price']\n miranium = augment.get('Required Miranium')\n mat_1 = augment.get('Material 1')\n mat_2 = augment.get('Material 2')\n mat_3 = augment.get('Material 3')\n drop = augment.get('Drop')\n resource = augment.get('Precious Resource')\n\n total_tickets = 0\n\n embed = discord.Embed(title=augment['Name'], color=self.colors[augment[\"Rarity\"]])\n embed.add_field(name='Effect', value=augment['Effect'], inline=False)\n\n if type != 'Augment': # Remove when augment json fully updated\n embed.add_field(name='Type', value=type)\n\n if price != 0: # Remove when augment json fully updated\n embed.add_field(name='Sell Price', value=price)\n\n if miranium:\n embed.add_field(name='Required Miranium', value=miranium)\n\n if mat_1:\n name = mat_1[\"Name\"]\n amount = mat_1[\"Amount\"]\n\n tickets = self.materials[name.lower()]['price'] * amount\n total_tickets += tickets\n\n embed.add_field(name='Material 1', value=f'{amount} {name}\\n({tickets} Tickets)')\n\n if mat_2:\n name = mat_2[\"Name\"]\n amount = mat_2[\"Amount\"]\n\n tickets = self.materials[name.lower()]['price'] * amount\n total_tickets += tickets\n\n embed.add_field(name='Material 2', value=f'{amount} {name}\\n({tickets} Tickets)')\n\n if mat_3:\n name = mat_3[\"Name\"]\n amount = mat_3[\"Amount\"]\n\n tickets = self.materials[name.lower()]['price'] * amount\n total_tickets += tickets\n\n embed.add_field(name='Material 3', value=f'{amount} {name}\\n({tickets} Tickets)')\n\n if drop:\n embed.add_field(name='Drop', value=drop)\n if resource:\n embed.add_field(name='Precious Resource', value=f'{resource[\"Amount\"]} {resource[\"Name\"]}', inline=False)\n\n if total_tickets != 0:\n embed.add_field(name='Total Tickets', value=total_tickets)\n\n await ctx.send(embed=embed)", "def r_1(comps: 'list(Compound)', is_input):\r\n react: str\r\n\r\n if is_input:\r\n iSiMe = Compound(\"Cu\")\r\n if len(comps) == 1:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe = comps[0]\r\n else:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe = comps[0]\r\n else:\r\n iSiMe = comps[1]\r\n\r\n me = list(iSiMe.formula.consist.keys())[0].name\r\n if me in [\"Au\", \"Pt\"]:\r\n return \"\"\r\n me_oxs = get_me_oxs(me)\r\n if me_oxs == 0:\r\n return \"\"\r\n\r\n iSaNo = Compound(iSaNo_create(me, me_oxs, \"NO3\", 1))\r\n\r\n react = f\"{iSiMe} + HNO3 -> {iSaNo} + NO2 + H2O\"\r\n else:\r\n iSaNo = Compound(\"Cu(NO3)2\")\r\n if len(comps) == 1:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif len(comps) == 2:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif \"iSaNo\" in comps[1].comp_type:\r\n iSaNo = comps[1]\r\n else:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif \"iSaNo\" in comps[1].comp_type:\r\n iSaNo = comps[1]\r\n else:\r\n iSaNo = comps[2]\r\n\r\n ((me, me_oxs), (an, an_oxs)) = iSa_oxs(iSaNo.formula)\r\n if an != \"NO3\":\r\n return \"\"\r\n if me in [\"Au\", \"Pt\"]:\r\n return \"\"\r\n\r\n iSiMe = Compound(simple(me))\r\n\r\n react = f\"{iSiMe} + HNO3 -> {iSaNo} + NO2 + H2O\"\r\n\r\n return Reaction(react)", "def whisper(*text: str, sep=' '):\n return f'<amazon:effect name=”whispered”>{sep.join(text)}</amazon:effect>'", "def explode(self):\n fire_potential = self.flannability * self.weight\n if fire_potential < 10:\n return '...fizzle'\n elif fire_potential < 50:\n return '...boom!'\n else:\n return '...BABOOM!!'\n\n # part 3 sublass", "def testMakeNewReaction(self):\n\n spcA = Species().fromSMILES('[OH]')\n spcs = [Species().fromSMILES('CC'), Species().fromSMILES('[CH3]')]\n spcTuples = [(spcA, spc) for spc in spcs]\n\n rxns = list(react(*spcTuples))\n\n cerm = CoreEdgeReactionModel()\n\n for rxn in rxns:\n cerm.makeNewReaction(rxn)\n\n \"\"\"\n 3 expected H-abstraction reactions:\n OH + CC = H2O + C[CH2]\n OH + [CH3] = H2O + [CH2]\n OH + [CH3] = [O] + C\n \"\"\"\n\n # count no. of entries in reactionDict:\n counter = 0\n for fam, v1 in cerm.reactionDict.iteritems():\n for key2, v2 in v1.iteritems():\n for key3, rxnList in v2.iteritems():\n counter += len(rxnList)\n\n self.assertEquals(counter, 3)", "def error_str(messages: Iterable[Error]) -> str:\n return '\\n\\n'.join(str(m) for m in messages if is_error(m.code))", "def combine(self, irc, msg, args, channel, number):\n contents = self.registryValue('contents', channel)\n number = int(number)\n if (len(contents) < number):\n irc.error(\"I only have %s things in my bag.\"%(len(contents)))\n return\n if (number < 2):\n irc.error(\"I can only combine at least two items, no fewer.\")\n return\n items = []\n for i in range(0, number):\n item = random.choice(contents)\n contents.remove(item)\n items.append(item)\n if (len(self.registryValue('history', channel)) == 0):\n result = \"something almost quite but not entirely unlike tea\"\n else:\n result = random.choice(self.registryValue('history', channel))\n contents.append(result)\n self.setRegistryValue('contents', contents, channel)\n if (len(items) == 2):\n irc.reply(\"combined %s and %s to create %s!\"%(items[0], items[1], result), action = True)\n else:\n irc.reply(\"combined %s, and %s to create %s!\"%(', '.join(items[0:-1]), items[-1], result), action = True)", "def __str__(self):\n all_cheeses = []\n for height in range(self.get_number_of_cheeses()):\n for stool in range(self.get_number_of_stools()):\n if self._cheese_at(stool, height) is not None:\n all_cheeses.append(self._cheese_at(stool, height))\n max_cheese_size = max([c.size for c in all_cheeses]) \\\n if len(all_cheeses) > 0 else 0\n stool_str = \"=\" * (2 * max_cheese_size + 1)\n stool_spacing = \" \"\n stools_str = (stool_str + stool_spacing) * self.get_number_of_stools()\n\n def _cheese_str(size):\n # helper for string representation of cheese\n if size == 0:\n return \" \" * len(stool_str)\n cheese_part = \"-\" + \"--\" * (size - 1)\n space_filler = \" \" * int((len(stool_str) - len(cheese_part)) / 2)\n return space_filler + cheese_part + space_filler\n\n lines = \"\"\n for height in range(self.get_number_of_cheeses() - 1, -1, -1):\n line = \"\"\n for stool in range(self.get_number_of_stools()):\n c = self._cheese_at(stool, height)\n if isinstance(c, Cheese):\n s = _cheese_str(int(c.size))\n else:\n s = _cheese_str(0)\n line += s + stool_spacing\n lines += line + \"\\n\"\n lines += stools_str\n\n return lines", "def _concatenate_instance(\n self,\n emotion: str,\n target_utterance: str,\n evidence_utterance: str,\n conversation_history: str,\n ) -> str:\n concatenated_text = (\n \" \"\n + emotion\n + \" <SEP> \"\n + target_utterance\n + \" <SEP> \"\n + evidence_utterance\n + \" <SEP> \"\n + conversation_history\n )\n\n return concatenated_text", "def getStringForAndDifferential(self, a, b, c):\n command = \"(({0} & {2}) | ({1} & {2}) | (~{2}))\".format(a,b,c)\n return command", "def __str__(self):\n if self.commands:\n if hpccm.config.g_ctype == container_type.DOCKER:\n # Format:\n # RUN cmd1 && \\\n # cmd2 && \\\n # cmd3\n s = ['RUN {}'.format(self.commands[0])]\n s.extend([' {}'.format(x) for x in self.commands[1:]])\n return ' && \\\\\\n'.join(s)\n elif hpccm.config.g_ctype == container_type.SINGULARITY:\n # Format:\n # %post\n # cmd1\n # cmd2\n # cmd3\n s = ['%post']\n s.extend([' {}'.format(x) for x in self.commands])\n return '\\n'.join(s)\n else:\n raise RuntimeError('Unknown container type')\n else:\n return ''", "def check_equipped(self):\n equipped_str = \"\"\n for body_part, item in self.equipped.items():\n if item is None:\n equipped_str += f\"On {body_part} slot you have equipped nothing\"\n else:\n equipped_str += f\"On {body_part} slot you have equipped {item}\"\n return equipped_str", "def info_reactions_simple_biochemical_get():\n reactions = _reaction_by_group(661) # 661 == Simple Biochemical Reactions\n return reactions, 200", "def work(self):\n return \"{0} {1}\".format(super().work()[:-1], \"and start programming.\")", "def multiply_string(message, n):\r\n return message*n", "async def listreact(self, ctx):\n emojis = await self.conf.guild(ctx.guild).reactions()\n msg = f\"Smart Reactions for {ctx.guild.name}:\\n\"\n for emoji in emojis:\n for command in emojis[emoji]:\n msg += f\"{emoji}: {command}\\n\"\n for page in pagify(msg, delims=[\"\\n\"]):\n await ctx.send(page)", "def explode(self):\n\n comb = self.flammability * self.weight\n if comb < 10:\n return \"...fizzle.\"\n elif comb >= 10 and comb < 50:\n return \"...boom!\"\n return \"...BABOOM!!\"", "def test_stereogroup_is_spectator_to_reaction(self):\n # 5a. Reaction preserves unrelated stereo\n products = _reactAndSummarize('[C@:1]F>>[C@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5b. Reaction ignores unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5c. Reaction inverts unrelated stereo'\n products = _reactAndSummarize('[C@:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5d. Reaction destroys unrelated stereo' 1:3|\n products = _reactAndSummarize('[C@:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |o1:3|')\n # 5e. Reaction assigns unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')", "def construct_result_message(iden: int, payload: str) -> str:\n iden_str = str(iden)\n return f'{{\"id\":{iden_str},\"type\":\"result\",\"success\":true,\"result\":{payload}}}'", "def __str__(self):\n termStrings = []\n for term in self.LHS:\n coefficient = term[0]\n unknownSet = term[1]\n\n termString = str(coefficient) + ' * '\n unknownStrings = []\n for unknown in unknownSet:\n unknownString = unknown[0].__class__.__name__ + '@' + str(id(unknown[0]))[-4:] + '.' + unknown[1] # last 4 digits of variable ID . attribute name\n unknownStrings.append(unknownString)\n termString += str.join(' * ', unknownStrings)\n termStrings.append(termString)\n\n termStrings = str.join(' + ', termStrings)\n return termStrings + ' = ' + str(self.RHS)", "def to_crasher(self, error_message: Optional[str] = None) -> Text:\n lines = []\n if error_message:\n lines.append('// Exception:')\n lines.extend(('// ' + l).strip() for l in error_message.splitlines())\n lines.append('//')\n lines.append('// options: ' + self.options.to_json())\n if self.args_batch is not None:\n for args in self.args_batch: # pylint: disable=not-an-iterable\n lines.append('// args: ' + '; '.join(str(a) for a in args))\n return '\\n'.join(lines) + '\\n' + self.input_text + '\\n'", "def __str__(self):\n\t\ttxt = \"____RECIPE____\\n\\nname: {}\\ncooking_lvl: {}\\ncooking time: {}\\nIngredients: {}\\nRecipe type: {}\\n\\\nDescription:{}\\n______________\\n\".format(self.name, self.cooking_lvl, self.cooking_time, \\\n\t\t\tself.ingredients, self.recipe_type, self.description)\n\t\treturn txt" ]
[ "0.6317691", "0.59467554", "0.57581663", "0.5728929", "0.5590028", "0.5556531", "0.5528526", "0.55182624", "0.5469046", "0.54264945", "0.5413973", "0.5307061", "0.5297744", "0.5281674", "0.5246843", "0.52160555", "0.51447445", "0.51412046", "0.5139743", "0.511896", "0.50641066", "0.5057844", "0.50475806", "0.5043912", "0.5030848", "0.5024914", "0.502204", "0.50214237", "0.5020959", "0.5020217", "0.5016599", "0.5016278", "0.5015962", "0.5009501", "0.50043726", "0.5004039", "0.49936083", "0.49915576", "0.49710888", "0.49644408", "0.49361375", "0.4924474", "0.4918384", "0.4912146", "0.48832947", "0.48738328", "0.48675016", "0.48611942", "0.48466578", "0.48354033", "0.48341945", "0.48219076", "0.4818807", "0.4811965", "0.48076427", "0.48015657", "0.480075", "0.47903323", "0.47882774", "0.47853354", "0.47771823", "0.47719625", "0.4770633", "0.47704354", "0.4764789", "0.47628", "0.47491258", "0.47482595", "0.47453532", "0.47438362", "0.47411707", "0.47386113", "0.47361007", "0.47348896", "0.47346935", "0.47332522", "0.4733032", "0.47326413", "0.47256172", "0.4725599", "0.47231206", "0.4720458", "0.4716213", "0.47156075", "0.47115675", "0.47051322", "0.4704815", "0.47043037", "0.47022864", "0.47009206", "0.4692931", "0.46909735", "0.46887624", "0.46867755", "0.46718988", "0.46669996", "0.4660345", "0.46602672", "0.4651033", "0.46420622" ]
0.5951013
1
StereoGroup atoms are in the reaction, but the reaction doesn't affect the chirality at the stereo centers > preserve stereo group
def test_reaction_preserves_stereo(self): reaction = '[C@:1]>>[C@:1]' reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br'] for reactant in reactants: products = _reactAndSummarize(reaction, reactant) self.assertEqual(products, reactant)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reaction_defines_stereo(self):\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'FC(Cl)Br')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n\n # Remove group with defined stereo\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |&1:3|')\n\n # Remove atoms with defined stereo from group\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')", "def test_reaction_destroys_stereo(self):\n reaction = '[C@:1]>>[C:1]'\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'FC(Cl)Br')\n self.assertEqual(products, 'FC(Cl)Br')\n\n reaction = '[C@:1]F>>[C:1]F'\n # Reaction destroys stereo (but preserves unaffected group\n products = _reactAndSummarize(reaction,\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')\n # Reaction destroys stereo (but preserves the rest of the group\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)[C@@H](Cl)Br |&1:1,3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')", "def test_stereogroup_is_spectator_to_reaction(self):\n # 5a. Reaction preserves unrelated stereo\n products = _reactAndSummarize('[C@:1]F>>[C@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5b. Reaction ignores unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5c. Reaction inverts unrelated stereo'\n products = _reactAndSummarize('[C@:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5d. Reaction destroys unrelated stereo' 1:3|\n products = _reactAndSummarize('[C@:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |o1:3|')\n # 5e. Reaction assigns unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')", "def test_reaction_copies_stereogroup(self):\n # Stereogroup atoms are in the reaction with multiple copies in the product\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )\n\n # Stereogroup atoms are not in the reaction, but have multiple copies in the\n # product.\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )", "def test_reaction_splits_stereogroup(self):\n products = _reactAndSummarize('[C:1]OO[C:2]>>[C:2]O.O[C:1]',\n 'F[C@H](Cl)OO[C@@H](Cl)Br |o1:1,5|')\n # Two product sets, each with two mols:\n self.assertEqual(products.count('|o1:1|'), 4)", "def test_reaction_ignores_stereo(self):\n reaction = '[C:1]>>[C:1]'\n reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br']\n for reactant in reactants:\n products = _reactAndSummarize(reaction, reactant)\n self.assertEqual(products, reactant)", "def testStereo(self):\r\n smi_and_cansmi = [\r\n ('OC(=O)[C@@H](CCC(N)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](CCC(N)=O)N', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](N)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@@H](N)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](N)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](N)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](C(O)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](C(O)=O)N', 'NC(=O)CC[C@H](N)C(=O)O')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)", "def test_parameterize_mol_missing_stereo_rdkit(self, force_field):\n toolkit_registry = ToolkitRegistry(\n toolkit_precedence=[RDKitToolkitWrapper, AmberToolsToolkitWrapper]\n )\n\n molecule = Molecule.from_smiles(\"CC1CCC(=O)O1\", allow_undefined_stereo=True)\n topology = Topology.from_molecules([molecule])\n\n force_field.create_openmm_system(\n topology,\n toolkit_registry=toolkit_registry,\n )", "def GetStereoisomerCount(m, options=...): # -> Any:\n ...", "def test_reaction_inverts_stereo(self):\n reaction = '[C@:1]>>[C@@:1]'\n\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br |o1:1|')\n products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@H](Cl)Br |&1:1|')\n products = _reactAndSummarize(reaction, 'FC(Cl)Br')\n self.assertEqual(products, 'FC(Cl)Br')", "def EnumerateStereoisomers(m, options=..., verbose=...): # -> Generator[Unknown, None, None]:\n ...", "def reflect_local_stereo(gra):\n atm_par_dct = atom_stereo_parities(gra)\n atm_par_dct = dict_.transform_values(\n atm_par_dct, lambda x: x if x is None else not x)\n gra = set_atom_stereo_parities(gra, atm_par_dct)\n return gra", "def atom_stereo_keys(sgr):\n atm_ste_keys = dict_.keys_by_value(atom_stereo_parities(sgr),\n lambda x: x in [True, False])\n return atm_ste_keys", "def testMoreStereo(self):\r\n smi_and_cansmi = [\r\n ('Cl[C@](C)(I)Br', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](C)(I)Br', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](C)(Br)Cl', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](C)(Br)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](Cl)(Br)C', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](Cl)(Br)C', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](Br)(I)C', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](Br)(I)C', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](C)(Cl)I', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](C)(Cl)I', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](C)(Cl)Br', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](C)(Cl)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](Cl)(I)Br', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](Cl)(I)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](Br)(Cl)I', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](Br)(Cl)I', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](Br)(Cl)C', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](Br)(Cl)C', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](C)(Br)I', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](C)(Br)I', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@](I)(Br)C', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](I)(Br)C', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](I)(Br)Cl', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](I)(Br)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](Cl)(C)Br', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](Cl)(C)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](Cl)(Br)I', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](Cl)(Br)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](C)(I)Cl', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](C)(I)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](Br)(C)Cl', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](Br)(C)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](Br)(I)Cl', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](Br)(I)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](I)(Cl)Br', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](I)(Cl)Br', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](I)(C)Br', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](I)(C)Br', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](I)(C)Cl', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](I)(C)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@](Br)(C)I', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](Br)(C)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](Cl)(C)I', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](Cl)(C)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](I)(Cl)C', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](I)(Cl)C', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](Cl)(I)C', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](Cl)(I)C', 'C[C@@](Cl)(Br)I')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)", "def test_parameterize_mol_missing_stereo_openeye(self, force_field):\n toolkit_registry = ToolkitRegistry(toolkit_precedence=[OpenEyeToolkitWrapper])\n\n molecule = Molecule.from_smiles(\"CC1CCC(=O)O1\", allow_undefined_stereo=True)\n topology = Topology.from_molecules([molecule])\n\n force_field.create_openmm_system(\n topology,\n toolkit_registry=toolkit_registry,\n )", "def stereoWarpK_noMotion_singleSided(curImageInfo, conversionParam, globalParam): \n h, w, u = curImageInfo.originalImageResized.shape # shape after resize\n K = 1\n N = h * w * K\n gr = np.mean(curImageInfo.originalImageResized, 2) # not 3 as it is zero based :3\n grs = cv2.GaussianBlur(gr, (5, 5), 1)\n \n # One heuristic for converting depth to disparity\n disparity0 = imnormalize(1/(1+imnormalize(curImageInfo.depthResized)))*conversionParam.maxDisp - conversionParam.maxDisp/2;\n \n if conversionParam.spatialSmoothnessSwitch == True:\n # Smoothing the depth spatially according to adjacent pixels by using Gx, Gy gradients\n # Vertical and Horizontal Edges\n dx = cv2.filter2D(grs, -1, np.transpose(np.array([[-1, 1, 0]])))\n dy = cv2.filter2D(grs, -1, np.array([[-1, 1, 0]]))\n \n W = ( imnormalize(disparity0) + sigmoid(np.sqrt(np.power(dx, 2) + np.power(dy, 2)), 0.01, 500) ) / 2 \n \n A = np.transpose(spdiags(np.transpose(W).flatten(), 0, N, N, \"csc\") \\\n + (conversionParam.spatialSmoothCoeff_x * globalParam.Gx.transpose() * globalParam.Gx) \\\n + (conversionParam.spatialSmoothCoeff_y * globalParam.Gy.transpose() * globalParam.Gy))\n \n b = np.transpose(W).flatten() * np.transpose(disparity0).flatten()\n \n [x, flag] = cg(A, b, np.transpose(disparity0).flatten(), 5e-1, 50)\n \n disparity = np.transpose(np.reshape(x, (w, h))) # remove (h, w, 1, K)\n else:\n disparity = disparity0\n \n curImageInfo.leftImage = curImageInfo.originalImage\n \n # The -ve sign to convert the white to black and black to white \n warpright = -disparity\n \n # only the warping interp2 is done on the original size image with no resizing to have good estimation\n warpright = cv2.resize(warpright, (curImageInfo.originalImage.shape[1], curImageInfo.originalImage.shape[0]), \n interpolation=cv2.INTER_LINEAR)\n \n curImageInfo.rightImage = (clip(warpImage_v2((curImageInfo.originalImage), (warpright), \n conversionParam.resizeFactor, globalParam.xx, globalParam.yy, globalParam.YY)))\n \n return disparity", "def test_enumerating_stereo_options(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n\n # test undefined only\n mol = Molecule.from_smiles(\n \"ClC=CCl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert len(isomers) == 2\n for isomer in isomers:\n assert isomer.n_conformers == 0\n\n mol = Molecule.from_smiles(\n r\"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert isomers == []\n\n mol = Molecule.from_smiles(\n r\"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=False, rationalise=False\n )\n\n assert len(isomers) == 1\n\n # test max isomers\n mol = Molecule.from_smiles(\n \"BrC=C[C@H]1OC(C2)(F)C2(Cl)C1\",\n toolkit_registry=toolkit,\n allow_undefined_stereo=True,\n )\n isomers = mol.enumerate_stereoisomers(\n max_isomers=5,\n undefined_only=True,\n toolkit_registry=toolkit,\n rationalise=True,\n )\n\n assert len(isomers) <= 5\n for isomer in isomers:\n assert isomer.n_conformers == 1\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def stereo_score(alignment):\n #dictionary with properties for each residue\n dic_prop = {'I': [1, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n 'L': [1, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n 'V': [1, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n 'C': [1, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n 'A': [1, 0, 1, 0, 1, 0, 0, 0, 0, 0],\n 'G': [1, 0, 1, 0, 1, 0, 0, 0, 0, 0],\n 'M': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'F': [1, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n 'Y': [1, 1, 0, 0, 0, 0, 1, 0, 0, 0],\n 'W': [1, 1, 0, 0, 0, 0, 1, 0, 0, 0],\n 'H': [1, 1, 0, 0, 0, 0, 1, 1, 0, 1],\n 'K': [1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n 'R': [0, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n 'E': [0, 1, 0, 0, 0, 0, 0, 0, 1, 1],\n 'Q': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'D': [0, 1, 1, 0, 0, 0, 0, 0, 1, 1],\n 'N': [0, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n 'S': [0, 1, 1, 0, 1, 0, 0, 0, 0, 0],\n 'T': [1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n 'P': [0, 0, 1, 1, 0, 0, 0, 0, 0, 0],\n 'B': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'Z': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'X': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n '-': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}\n score_list = []\n for i in range(0, alignment.get_alignment_length()):\n #extract the unique residues in the alignment\n column = ''.join(set(alignment[:, i]))\n stereo_list = []\n #loop through each residue\n for res in range(0, len(column)):\n #replace the residue with list of properties\n residue = column[res]\n #append the properties list to a\n stereo_prop = dic_prop.get(residue)\n stereo_list.append(stereo_prop)\n #number of common properties\n count_stereo = sum(len(set(i)) == 1 for i in zip(*stereo_list))\n #add the number of properties to a list\n score_list.append(count_stereo)\n score_list_final = [float(i*0.1) for i in score_list]\n return score_list_final", "def test_enumerating_stereo_options(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n\n # test undefined only\n mol = Molecule.from_smiles(\n \"ClC=CCl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert len(isomers) == 2\n for isomer in isomers:\n assert isomer.n_conformers == 0\n\n mol = Molecule.from_smiles(\n \"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert isomers == []\n\n mol = Molecule.from_smiles(\n \"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=False, rationalise=False\n )\n\n assert len(isomers) == 1\n\n # test max isomers\n mol = Molecule.from_smiles(\n \"BrC=C[C@H]1OC(C2)(F)C2(Cl)C1\",\n toolkit_registry=toolkit,\n allow_undefined_stereo=True,\n )\n isomers = mol.enumerate_stereoisomers(\n max_isomers=5,\n undefined_only=True,\n toolkit_registry=toolkit,\n rationalise=True,\n )\n\n assert len(isomers) <= 5\n for isomer in isomers:\n assert isomer.n_conformers == 1\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def has_stereo(gra):\n return bool(atom_stereo_keys(gra) or bond_stereo_keys(gra))", "def proc_sw_only_morphs(forward_pairs, morphs, backward_pairs):\n sandwich_pairs = []\n if not backward_pairs:\n forward_pairs[-1].morphs.extend(morphs)\n elif len(morphs) == 1:\n morph = morphs[0]\n morph_str = str(morph)\n if morph_str in ['이/VCP', '하/VX'] and backward_pairs[0].morphs[0].tag.startswith('E'):\n # '이' 긍정지정사나 '하' 보조용언 뒤에 어미가 나올 경우\n backward_pairs[0].morphs.insert(0, morphs[0])\n del morphs[:]\n elif morph_str == '에/JKB' and backward_pairs[0].morphs[0].tag == 'JX':\n # '에' 부사격조사 뒤에 보조사가 나올 경우\n backward_pairs[0].morphs.insert(0, morphs[0])\n del morphs[:]\n elif morph_str == 'ᆯ/ETM' and forward_pairs[-1].morphs[-1].tag.startswith('V'):\n # 'ㄹ' 관형형전성어미 앞에 용언이 나올 경우\n forward_pairs[-1].morphs.append(morphs[0])\n del morphs[:]\n elif morph.tag in ['EC', 'EF'] and forward_pairs[-1].morphs[-1].tag.startswith('V'):\n # 연결어미나 종결어미 앞에 용언이 나올 경우\n forward_pairs[-1].morphs.append(morphs[0])\n del morphs[:]\n elif morph.tag.startswith('XS'):\n # append suffixes to the end of forward pair list\n forward_pairs[-1].morphs.append(morphs[0])\n del morphs[:]\n else:\n raise AlignError()\n else:\n morphs_str = ' + '.join([str(morph) for morph in morphs])\n if morphs_str == '(/SS + 대북/NNG + (/SS + 대북/NNG + )/SS + )/SS' and forward_pairs[-1].word_str == u'대북':\n del morphs[:]\n elif morphs_str == '(/SS + 동경/NNP + )/SS' and forward_pairs[-1].word_str == u'도쿄':\n del morphs[:]\n else:\n raise AlignError()\n return sandwich_pairs", "def bond_stereo_keys(sgr):\n bnd_ste_keys = dict_.keys_by_value(bond_stereo_parities(sgr),\n lambda x: x in [True, False])\n return bnd_ste_keys", "def test_check_for_existing_reaction_removes_duplicates_in_opposite_directions(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n s1 = Species().from_smiles(\"[H]\")\n s2 = Species().from_smiles(\"CC\")\n s3 = Species().from_smiles(\"[H][H]\")\n s4 = Species().from_smiles(\"C[CH2]\")\n s1.label = 'H'\n s2.label = 'CC'\n s3.label = 'HH'\n s4.label = 'C[CH2]'\n\n rxn_f = TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction')\n )\n\n rxn_r = TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction')\n )\n\n rxn_f.reactants.sort()\n rxn_f.products.sort()\n\n cerm.add_reaction_to_core(rxn_f)\n cerm.register_reaction(rxn_f)\n\n reactions = cerm.search_retrieve_reactions(rxn_r)\n self.assertEqual(1, len(reactions), 'cerm.search_retrieve_reactions could not identify reverse reaction')\n\n found, rxn = cerm.check_for_existing_reaction(rxn_r)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to identify existing reaction in the reverse direction')\n self.assertEqual(rxn, rxn_f)", "def test_enumerating_stereobonds(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = Molecule.from_smiles(\n \"ClC=CCl\", allow_undefined_stereo=True, toolkit_registry=toolkit\n )\n\n # use the default options\n isomers = mol.enumerate_stereoisomers()\n assert len(isomers) == 2\n\n assert mol not in isomers\n # make sure the input molecule is only different by bond stereo\n for ismol in isomers:\n assert (\n Molecule.are_isomorphic(\n mol,\n ismol,\n return_atom_map=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n assert mol.is_isomorphic_with(ismol) is False\n\n # make sure the isomers are different\n assert isomers[0].is_isomorphic_with(isomers[1]) is False\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def test_enumerating_stereobonds(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = Molecule.from_smiles(\n \"ClC=CCl\", allow_undefined_stereo=True, toolkit_registry=toolkit\n )\n\n # use the default options\n isomers = mol.enumerate_stereoisomers()\n assert len(isomers) == 2\n\n assert mol not in isomers\n # make sure the input molecule is only different by bond stereo\n for ismol in isomers:\n assert (\n Molecule.are_isomorphic(\n mol,\n ismol,\n return_atom_map=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n assert mol.is_isomorphic_with(ismol) is False\n\n # make sure the isomers are different\n assert isomers[0].is_isomorphic_with(isomers[1]) is False\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def process_stereo(self, image1, image2, disp1, disp2):\n return _elas.Elas_process_stereo(self, image1, image2, disp1, disp2)", "def mutate_residue(pose, mutant_position, mutant_aa,\n pack_radius = 0.0, pack_scorefxn = '' ):\n #### a MutateResidue Mover exists similar to this except it does not pack\n #### the area around the mutant residue (no pack_radius feature)\n #mutator = MutateResidue(mutant_position, mutant_aa)\n #mutator.apply(test_pose)\n\n if pose.is_fullatom() == False:\n IOError( 'mutate_residue only works with fullatom poses' )\n\n\n # create a standard scorefxn by default\n if not pack_scorefxn:\n pack_scorefxn = rosetta.core.scoring.get_score_function()\n\n task = pyrosetta.standard_packer_task(pose)\n\n # the Vector1 of booleans (a specific object) is needed for specifying the\n # mutation, this demonstrates another more direct method of setting\n # PackerTask options for design\n aa_bool = rosetta.utility.vector1_bool()\n # PyRosetta uses several ways of tracking amino acids (ResidueTypes)\n # the numbers 1-20 correspond individually to the 20 proteogenic amino acids\n # aa_from_oneletter returns the integer representation of an amino acid\n # from its one letter code\n # convert mutant_aa to its integer representation\n mutant_aa = rosetta.core.chemical.aa_from_oneletter_code(mutant_aa)\n\n # mutation is performed by using a PackerTask with only the mutant\n # amino acid available during design\n # to do this, construct a Vector1 of booleans indicating which amino acid\n # (by its numerical designation, see above) to allow\n for i in range(1, 21):\n # in Python, logical expression are evaluated with priority, thus the\n # line below appends to aa_bool the truth (True or False) of the\n # statement i == mutant_aa\n aa_bool.append( i == int(mutant_aa) )\n\n # modify the mutating residue's assignment in the PackerTask using the\n # Vector1 of booleans across the proteogenic amino acids\n task.nonconst_residue_task(mutant_position\n ).restrict_absent_canonical_aas(aa_bool)\n\n # prevent residues from packing by setting the per-residue \"options\" of\n # the PackerTask\n restrict_non_nbrs_from_repacking(pose, mutant_position, task, pack_radius)\n\n # apply the mutation and pack nearby residues\n #print task\n packer = rosetta.protocols.simple_moves.PackRotamersMover(pack_scorefxn, task)\n packer.apply(pose)", "def atom_parity_evaluator_to_local_stereo_(gra):\n return atom_parity_evaluator_from_local_stereo_(gra)", "def _propose_atoms_in_order(self, atom_group):\n atom_torsions= []\n logp = []\n assert len(atom_group) == len(set(atom_group)), \"There are duplicate atom indices in the list of atom proposal indices\"\n while len(atom_group) > 0:\n #initialise an eligible_torsions_list\n eligible_torsions_list = list()\n\n for atom_index in atom_group:\n\n # Find the shortest path up to length four from the atom in question:\n shortest_paths = nx.algorithms.single_source_shortest_path(self._residue_graph, atom_index, cutoff=4)\n\n # Loop through the destination and path of each path and append to eligible_torsions_list\n # if destination has a position and path[1:3] is a subset of atoms with positions\n for destination, path in shortest_paths.items():\n\n # Check if the path is length 4 (a torsion) and that the destination has a position. Continue if not.\n if len(path) != 4 or destination not in self._atoms_with_positions_set:\n continue\n\n # If the last atom is in atoms with positions, check to see if the others are also.\n # If they are, append the torsion to the list of possible torsions to propose\n if set(path[1:3]).issubset(self._atoms_with_positions_set):\n eligible_torsions_list.append(path)\n\n assert len(eligible_torsions_list) != 0, \"There is a connectivity issue; there are no torsions from which to choose\"\n #now we have to randomly choose a single torsion\n ntorsions = len(eligible_torsions_list)\n random_torsion_index = np.random.choice(range(ntorsions))\n random_torsion = eligible_torsions_list[random_torsion_index]\n\n #append random torsion to the atom_torsions and remove source atom from the atom_group\n chosen_atom_index = random_torsion[0]\n first_old_atom_index = random_torsion[1]\n atom_torsions.append(random_torsion)\n atom_group.remove(chosen_atom_index)\n\n #add atom to atoms with positions and corresponding set\n self._atoms_with_positions_set.add(chosen_atom_index)\n\n #add a bond from the new to the previous torsion atom in the _reference_connectivity_graph\n self._reference_connectivity_graph.add_edge(chosen_atom_index, first_old_atom_index)\n\n #add the log probability of the choice to logp\n logp.append(np.log(1./ntorsions))\n\n # Ensure that logp is not ill-defined\n assert len(logp) == len(atom_torsions), \"There is a mismatch in the size of the atom torsion proposals and the associated logps\"\n\n return atom_torsions, logp", "def cleanUpRigPose(self):\n\n # show the proxy geo\n cmds.select(self.name + \"_mover_grp\", hi=True)\n allNodes = cmds.ls(sl=True)\n for node in allNodes:\n if node.find(\"_proxy_geo\") != -1:\n if cmds.nodeType(node) == \"mesh\":\n parent = cmds.listRelatives(node, parent=True)[0]\n cmds.lockNode(parent, lock=False)\n cmds.setAttr(parent + \".v\", lock=False)\n cmds.setAttr(parent + \".v\", 1)\n cmds.lockNode(parent, lock=True)\n\n # unlock mover group for this module and make invisible\n cmds.lockNode(self.name + \"_mover_grp\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=False)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", 0)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=True)\n cmds.lockNode(self.name + \"_mover_grp\", lock=True)\n\n # get the joints created by this module and remove the constraints\n joints = self.returnCreatedJoints\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n for joint in joints:\n if cmds.objExists(joint + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)\n\n else:\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointBaseName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointBaseName = jointBaseName.partition(nameData[1])[0]\n\n if cmds.objExists(self.name + \"_\" + jointBaseName + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)", "def setupForRigPose(self):\n\n # unlock joint movers\n cmds.select(\"JointMover\", hi=True)\n jmNodes = cmds.ls(sl=True)\n for node in jmNodes:\n cmds.lockNode(node, lock=False)\n\n # find the mover shapes and set their visibility\n movers = self.returnJointMovers\n globalMovers = movers[0]\n shapes = []\n\n for each in movers:\n for mover in each:\n child = cmds.listRelatives(mover, children=True, shapes=True)\n if len(child) > 0:\n shapes.append(mover + \"|\" + child[0])\n\n for shape in shapes:\n cmds.setAttr(shape + \".v\", lock=False)\n cmds.setAttr(shape + \".v\", 0, lock=True)\n\n # show global movers\n shapes = []\n for mover in globalMovers:\n child = cmds.listRelatives(mover, children=True, shapes=True)\n if len(child) > 0:\n shapes.append(mover + \"|\" + child[0])\n\n for shape in shapes:\n cmds.setAttr(shape + \".v\", lock=False)\n cmds.setAttr(shape + \".v\", 1, lock=True)\n\n # unlock mover group for this module and make visible\n cmds.lockNode(self.name + \"_mover_grp\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", 1)\n\n # hide the proxy geo\n cmds.select(self.name + \"_mover_grp\", hi=True)\n allNodes = cmds.ls(sl=True)\n for node in allNodes:\n if node.find(\"_proxy_geo\") != -1:\n if cmds.nodeType(node) == \"mesh\":\n parent = cmds.listRelatives(node, parent=True)[0]\n cmds.lockNode(parent, lock=False)\n cmds.setAttr(parent + \".v\", lock=False)\n cmds.setAttr(parent + \".v\", 0)\n cmds.lockNode(parent, lock=True)\n\n # get the joints created by this module\n joints = self.returnCreatedJoints\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n for joint in joints:\n\n if cmds.objExists(joint + \"_mover_offset\"):\n cmds.parentConstraint(joint + \"_mover_offset\", joint)\n else:\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointBaseName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointBaseName = jointBaseName.partition(nameData[1])[0]\n\n if cmds.objExists(self.name + \"_\" + jointBaseName + \"_mover_offset\"):\n cmds.parentConstraint(self.name + \"_\" + jointBaseName + \"_mover_offset\", joint)\n\n # lock joint movers\n cmds.select(\"JointMover\", hi=True)\n jmNodes = cmds.ls(sl=True)\n for node in jmNodes:\n cmds.lockNode(node, lock=True)", "def test_enumerating_stereo_partially_defined(\n self, toolkit_class, smiles, undefined_only, expected\n ):\n\n if not toolkit_class.is_available():\n pytest.skip(\"Required toolkit is unavailable\")\n\n toolkit = toolkit_class()\n\n # test undefined only\n mol = Molecule.from_smiles(\n smiles, toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n stereoisomers = mol.enumerate_stereoisomers(\n undefined_only=undefined_only, rationalise=False\n )\n\n # Ensure that the results of the enumeration are what the test expects.\n # This roundtrips the expected output from SMILES --> OFFMol --> SMILES,\n # since the SMILES for stereoisomers generated in this test may change depending\n # on which cheminformatics toolkit is used.\n expected = {\n Molecule.from_smiles(stereoisomer, allow_undefined_stereo=True).to_smiles(\n explicit_hydrogens=True, isomeric=True, mapped=False\n )\n for stereoisomer in expected\n }\n actual = {\n stereoisomer.to_smiles(explicit_hydrogens=True, isomeric=True, mapped=False)\n for stereoisomer in stereoisomers\n }\n\n assert expected == actual", "def test_enumerating_stereocenters(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = Molecule.from_smiles(\n \"NC(Cl)(F)O\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n\n isomers = mol.enumerate_stereoisomers(toolkit_registry=toolkit)\n\n assert len(isomers) == 2\n # make sure the mol is not in the isomers and that they only differ by stereo chem\n assert mol not in isomers\n for ismol in isomers:\n assert ismol.n_conformers != 0\n assert (\n Molecule.are_isomorphic(\n mol,\n ismol,\n return_atom_map=False,\n atom_stereochemistry_matching=False,\n )[0]\n is True\n )\n assert mol.is_isomorphic_with(ismol) is False\n\n # make sure the two isomers are different\n assert isomers[0].is_isomorphic_with(isomers[1]) is False\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def test_enumerating_stereocenters(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = Molecule.from_smiles(\n \"NC(Cl)(F)O\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n\n isomers = mol.enumerate_stereoisomers(toolkit_registry=toolkit)\n\n assert len(isomers) == 2\n # make sure the mol is not in the isomers and that they only differ by stereo chem\n assert mol not in isomers\n for ismol in isomers:\n assert ismol.n_conformers != 0\n assert (\n Molecule.are_isomorphic(\n mol,\n ismol,\n return_atom_map=False,\n atom_stereochemistry_matching=False,\n )[0]\n is True\n )\n assert mol.is_isomorphic_with(ismol) is False\n\n # make sure the two isomers are different\n assert isomers[0].is_isomorphic_with(isomers[1]) is False\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def bond_parity_evaluator_to_local_stereo_(gra):\n return bond_parity_evaluator_from_local_stereo_(gra)", "def test_add_new_surface_objects(self):\n\n # create object with ReactionSystem behavior\n class rsys:\n pass\n\n class item:\n pass\n\n T = item()\n P = item()\n T.value_si = 1000.0\n P.value_si = 101000.0\n rsys.T = T\n rsys.P = P\n procnum = 2\n\n cerm = CoreEdgeReactionModel()\n\n spcA = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs]\n\n rxns = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n rxns += list(itertools.chain.from_iterable(react([((spcs[0], spcs[1]), ['H_Abstraction'])], procnum)))\n\n for rxn in rxns:\n cerm.make_new_reaction(rxn)\n\n cerm.core.species = [spcA] + spcs\n\n corerxns = []\n edgerxns = []\n edgespcs = set()\n for rxn in rxns:\n if set(rxn.reactants + rxn.products) <= set(cerm.core.species):\n corerxns.append(rxn)\n else:\n edgespcs |= set(cerm.core.species) - set(rxn.reactants + rxn.products)\n edgerxns.append(rxn)\n\n cerm.edge.species += list(edgespcs)\n\n cerm.core.reactions = corerxns\n cerm.edge.reactions = edgerxns\n\n cerm.surface.species = []\n cerm.surface.reactions = []\n\n new_surface_reactions = [cerm.edge.reactions[0]]\n new_surface_species = []\n obj = new_surface_reactions\n\n cerm.add_new_surface_objects(obj, new_surface_species, new_surface_reactions, rsys)\n\n empty = set()\n\n self.assertEqual(cerm.new_surface_spcs_add, empty)\n self.assertEqual(cerm.new_surface_spcs_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_add, set([cerm.edge.reactions[0]]))", "def DeMorgan_equivalence(self, position_list=[]):\n\t\treturn self.__class__(_replace_match_at(self, position_list, [\n\t\t\t[ ((neg, A), disj, (neg, B)), (neg, (A, conj, B)) ],\n\t\t\t[ (neg, (A, conj, B)), ((neg, A), disj, (neg, B)) ],\n\t\t\t[ ((neg, A), conj, (neg, B)), (neg, (A, disj, B)) ],\n\t\t\t[ (neg, (A, disj, B)), ((neg, A), conj, (neg, B)) ]\n\t\t]))", "def _add_pairblend(self, dgmod, rigid, transform):\n\n assert isinstance(dgmod, cmdx.DGModifier)\n\n pair_blend = dgmod.create_node(\"pairBlend\")\n dgmod.set_attr(pair_blend[\"isHistoricallyInteresting\"], False)\n\n # Establish initial values, before keyframes\n # tm = cmdx.Tm(self._cache[(transform, \"matrix\")])\n\n # Read from matrix, as opposed to the rotate/translate channels\n # to account for jointOrient, pivots and all manner of things\n # translate = tm.translation()\n # rotate = tm.rotation()\n\n translate = self._cache[(transform, \"translate\")]\n rotate = self._cache[(transform, \"rotate\")]\n\n dgmod.set_attr(pair_blend[\"inTranslate1\"], translate)\n dgmod.set_attr(pair_blend[\"inRotate1\"], rotate)\n\n dgmod.connect(rigid[\"outputTranslateX\"], pair_blend[\"inTranslateX2\"])\n dgmod.connect(rigid[\"outputTranslateY\"], pair_blend[\"inTranslateY2\"])\n dgmod.connect(rigid[\"outputTranslateZ\"], pair_blend[\"inTranslateZ2\"])\n dgmod.connect(rigid[\"outputRotateX\"], pair_blend[\"inRotateX2\"])\n dgmod.connect(rigid[\"outputRotateY\"], pair_blend[\"inRotateY2\"])\n dgmod.connect(rigid[\"outputRotateZ\"], pair_blend[\"inRotateZ2\"])\n\n # Let the animator see the raw animation values, no physics\n dgmod.connect(self._tree_root[0][\"simulated\"], pair_blend[\"weight\"])\n\n if self._opts[\"autoKey\"]:\n # Generate default animation curves, it's expected since you can no\n # longer see whether channels are keyed or not, now being green.\n time = cmdx.currentTime()\n mapping = (\n (\"animCurveTL\", translate.x, \"inTranslateX1\"),\n (\"animCurveTL\", translate.y, \"inTranslateY1\"),\n (\"animCurveTL\", translate.z, \"inTranslateZ1\"),\n (\"animCurveTA\", rotate.x, \"inRotateX1\"),\n (\"animCurveTA\", rotate.y, \"inRotateY1\"),\n (\"animCurveTA\", rotate.z, \"inRotateZ1\")\n )\n\n for curve, value, dst in mapping:\n curve = dgmod.create_node(curve)\n curve.key(time, value)\n dgmod.connect(curve[\"output\"], pair_blend[dst])\n\n # Transfer existing animation/connections\n for src, dst in transform.data.get(\"priorConnections\", {}).items():\n dst = pair_blend[dst]\n dgmod.connect(src, dst)\n\n commands._connect_transform(dgmod, pair_blend, transform)\n\n return pair_blend", "def atomisticSphere (flag, filin, filout, max_distance = 15, analysis = 1, atom_central = \"mean_point\", debug = 1):\n \n list_atom_pocket = parsePDB.loadCoordSectionPDB(filin)\n dico_stock_count = tool.generateStructCompositionAtomistic (max_distance, 3)\n \n if atom_central == \"mean_point\" : \n central_point = generateMeansPointPocket (list_atom_pocket)\n # else append barycenter pocket calculated by RADI\n \n for atom in list_atom_pocket : \n distance = parsePDB.distanceTwoatoms(central_point, atom)\n # print distance\n element = atom[\"element\"]\n name_atom = atom[\"name\"]\n residue = tool.transformAA(atom[\"resName\"])\n \n for distance_key in dico_stock_count.keys() : \n if distance <= distance_key or distance > max_distance : \n dico_stock_count [distance_key] [\"atom\"] = dico_stock_count [distance_key] [\"atom\"] + 1\n if element == \"C\" : \n dico_stock_count [distance_key] [\"carbon\"] = dico_stock_count [distance_key] [\"carbon\"] + 1\n elif element == \"N\" : \n dico_stock_count [distance_key] [\"nitrogen\"] = dico_stock_count [distance_key] [\"nitrogen\"] + 1\n elif element == \"S\" : \n dico_stock_count [distance_key] [\"sulfur\"] = dico_stock_count [distance_key] [\"sulfur\"] + 1\n elif element == \"O\" : \n dico_stock_count [distance_key] [\"oxygen\"] = dico_stock_count [distance_key] [\"oxygen\"] + 1\n elif element == \"H\" : \n dico_stock_count [distance_key] [\"hydrogen\"] = dico_stock_count [distance_key] [\"hydrogen\"] + 1\n \n if residue in dico_Hacceptor.keys () : \n if name_atom in dico_Hacceptor[residue] : \n dico_stock_count [distance_key] [\"hbond_acceptor\"] = dico_stock_count [distance_key] [\"hbond_acceptor\"] + 1\n \n if residue in dico_atom_Car : \n if name_atom in dico_atom_Car[residue] : \n dico_stock_count [distance_key] [\"aromatic\"] = dico_stock_count [distance_key] [\"aromatic\"] + 1\n \n if residue in dico_atom_hydrophobic : \n if name_atom in dico_atom_hydrophobic[residue] : \n dico_stock_count [distance_key] [\"hydrophobic\"] = dico_stock_count [distance_key] [\"hydrophobic\"] + 1\n \n if residue in dico_atom_Carg : \n if name_atom in dico_atom_Carg[residue] : \n dico_stock_count [distance_key] [\"alcool\"] = dico_stock_count [distance_key] [\"alcool\"] + 1\n \n \n if residue in dico_Hdonor.keys () : \n if name_atom in dico_Hdonor[residue] : \n dico_stock_count [distance_key] [\"hbond_donor\"] = dico_stock_count [distance_key] [\"hbond_donor\"] + 1\n \n if name_atom == \"CA\" or name_atom == \"O\" or name_atom == \"C\" or name_atom == \"N\" or name_atom == \"H\" or name_atom == \"HA\" :\n dico_stock_count [distance_key] [\"main_chain\"] = dico_stock_count [distance_key] [\"main_chain\"] + 1\n else : \n dico_stock_count [distance_key] [\"side_chain\"] = dico_stock_count [distance_key] [\"side_chain\"] + 1\n \n for distance_key in dico_stock_count.keys () : \n nb_atom = float(dico_stock_count [distance_key] [\"atom\"])\n if nb_atom == 0 : \n filout.write (flag + \"_atom_\" + str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_side_chain_\"+ str(distance_key) + \"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_main_chain_\" + str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_sulfur_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_carbone_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_nitrogen_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_oxygen_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hydrogen_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hbond_acceptor_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hbond_donor_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_alcool_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hydrophobic_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_aromatic_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n \n else : \n filout.write (flag + \"_atom_\" + str(distance_key) +\"\\t\" + str(nb_atom) + \"\\n\")\n filout.write (flag + \"_side_chain_\"+ str(distance_key) + \"\\t\" + str (dico_stock_count [distance_key] [\"side_chain\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_main_chain_\" + str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"main_chain\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_sulfur_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"sulfur\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_carbone_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"carbon\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_nitrogen_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"nitrogen\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_oxygen_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"oxygen\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hydrogen_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hydrogen\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hbond_acceptor_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hbond_acceptor\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hbond_donor_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hbond_donor\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_alcool_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"alcool\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hydrophobic_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hydrophobic\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_aromatic_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"aromatic\"] / nb_atom) + \"\\n\")", "def addMods(file, spec, data_card, channel_bins, systs):\n for im, modifier in enumerate(systs):\n if \"normsys\" in modifier[1]: ##normsys\n # write normsys as 'shape?' so that Combine doesn't try to combine normsys and histosys mods of the same name\n data_card.systs.append((modifier[0], False, \"shape?\", [], {}))\n for channel in spec[\"channels\"]:\n data_card.systs[im][4].update({channel[\"name\"]: {}})\n for sample in channel[\"samples\"]:\n for i in data_card.systs:\n i[4][channel[\"name\"]].update({sample[\"name\"]: 0.0})\n\n if \"lumi\" in modifier[1]: ##lumi\n # Write lumi as lnN since they act the same way on the model\n data_card.systs.append((modifier[0], False, \"lnN\", [], {}))\n for channel in spec[\"channels\"]:\n data_card.systs[im][4].update({channel[\"name\"]: {}})\n for sample in channel[\"samples\"]:\n for i in data_card.systs:\n i[4][channel[\"name\"]].update({sample[\"name\"]: 0.0})\n\n if \"histosys\" in modifier[1]: ##histosys\n data_card.systs.append((modifier[0], False, \"shape\", [], {}))\n for channel in spec[\"channels\"]:\n data_card.systs[im][4].update({channel[\"name\"]: {}})\n for sample in channel[\"samples\"]:\n for i in data_card.systs:\n i[4][channel[\"name\"]].update({sample[\"name\"]: 0.0})\n\n for idxc, channel in enumerate(spec[\"channels\"]):\n for idxs, sample in enumerate(channel[\"samples\"]):\n mods = sample[\"modifiers\"]\n names = [mod[\"name\"] for mod in mods]\n for syst in data_card.systs:\n name = syst[0]\n if name in names:\n syst_type = syst[2]\n # if systematic name is a modifier for this sample\n if \"shape?\" in syst_type: ##normsys\n for mod in mods:\n if mod[\"type\"] == \"normsys\" and mod[\"name\"] == name:\n if mod[\"data\"][\"lo\"] == 0:\n # asymmetric lnN\n syst[4][channel[\"name\"]].update(\n {\n sample[\"name\"]: str(\n mod[\"data\"][\"lo\"] + 1e-9\n )\n + \"/\"\n + str(mod[\"data\"][\"hi\"])\n }\n )\n elif mod[\"data\"][\"hi\"] == 0:\n # asymmetric lnN\n syst[4][channel[\"name\"]].update(\n {\n sample[\"name\"]: str(mod[\"data\"][\"lo\"])\n + \"/\"\n + str(mod[\"data\"][\"hi\"] + 1e-9)\n }\n )\n else:\n # asymmetric lnN\n syst[4][channel[\"name\"]].update(\n {\n sample[\"name\"]: str(mod[\"data\"][\"lo\"])\n + \"/\"\n + str(mod[\"data\"][\"hi\"])\n }\n )\n if \"lnN\" in syst_type: ##lumi only\n for mod in mods:\n if mod[\"type\"] == \"lumi\" and mod[\"name\"] == name:\n for measurement in spec[\"measurements\"]:\n for param in measurement[\"config\"][\"parameters\"]:\n if mod[\"name\"] == param[\"name\"]:\n # asymmetric lnN\n syst[4][channel[\"name\"]].update(\n {\n sample[\"name\"]: str(\n param[\"auxdata\"][0]\n - param[\"sigmas\"][0]\n )\n + \"/\"\n + str(\n param[\"auxdata\"][0]\n + param[\"sigmas\"][0]\n )\n }\n )\n\n if \"shape\" in syst_type: ##histosys\n for mod in mods:\n if mod[\"type\"] == \"histosys\" and mod[\"name\"] == name:\n syst[4][channel[\"name\"]].update({sample[\"name\"]: 1.0})\n hi_data = hist.Hist.new.Regular(\n channel_bins[channel[\"name\"]],\n 0,\n channel_bins[channel[\"name\"]],\n ).Weight()\n hi_data[...] = np.stack(\n [\n mod[\"data\"][\"hi_data\"],\n [\n 0\n for _ in range(\n channel_bins[channel[\"name\"]]\n )\n ],\n ],\n axis=-1,\n )\n lo_data = hist.Hist.new.Regular(\n channel_bins[channel[\"name\"]],\n 0,\n channel_bins[channel[\"name\"]],\n ).Weight()\n lo_data[...] = np.stack(\n [\n mod[\"data\"][\"lo_data\"],\n [\n 0\n for _ in range(\n channel_bins[channel[\"name\"]]\n )\n ],\n ],\n axis=-1,\n )\n file[\n channel[\"name\"]\n + \"/\"\n + spec[\"channels\"][idxc][\"samples\"][idxs][\"name\"]\n + \"_\"\n + name\n + \"Up\"\n ] = hi_data\n file[\n channel[\"name\"]\n + \"/\"\n + spec[\"channels\"][idxc][\"samples\"][idxs][\"name\"]\n + \"_\"\n + name\n + \"Down\"\n ] = lo_data", "def build_reactive_complex(self, settings_manager: SettingsManager):\n import scine_database as db\n import scine_utilities as utils\n\n start_structure_ids = self._calculation.get_structures()\n start_structures = [db.Structure(sid, self._structures) for sid in start_structure_ids]\n self.save_initial_graphs_and_charges(settings_manager, start_structures)\n if len(start_structures) == 1:\n # For an intramolecular structure it is sufficient to provide one\n # structure that is both, start structure and reactive complex\n structure = start_structures[0]\n atoms = structure.get_atoms()\n self.random_displace_atoms(atoms, self.settings[self.rc_key][\"displacement\"]) # breaks symmetry\n return atoms\n\n if len(start_structures) == 2:\n # Intermolecular reactions reactions require in situ generation of the reactive complex\n s0 = start_structures[0]\n s1 = start_structures[1]\n\n # Get coordinates\n atoms1 = s0.get_atoms()\n atoms2 = s1.get_atoms()\n elements1 = atoms1.elements\n elements2 = atoms2.elements\n coordinates1 = atoms1.positions\n coordinates2 = atoms2.positions\n # Calculate reactive center mean position\n if self.exploration_key + \"_lhs_list\" in self.settings[self.exploration_key]:\n sites1 = self.settings[self.exploration_key][self.exploration_key + \"_lhs_list\"]\n sites2 = self.settings[self.exploration_key][self.exploration_key + \"_rhs_list\"]\n self.settings[self.exploration_key][self.exploration_key + \"_rhs_list\"] = list(\n idx + len(elements1) for idx in sites2\n )\n elif \"nt_associations\" in self.settings[self.exploration_key]:\n sites1 = []\n sites2 = []\n nAtoms1 = len(atoms1.elements)\n for i in range(0, len(self.settings[self.exploration_key][\"nt_associations\"]), 2):\n at1 = self.settings[self.exploration_key][\"nt_associations\"][i]\n at2 = self.settings[self.exploration_key][\"nt_associations\"][i + 1]\n if at1 >= nAtoms1 > at2:\n sites1.append(at2)\n sites2.append(at1 - nAtoms1)\n if at2 >= nAtoms1 > at1:\n sites1.append(at1)\n sites2.append(at2 - nAtoms1)\n else:\n self.raise_named_exception(\n \"Reactive complex can not be build: missing reactive atoms list(s).\"\n )\n reactive_center1 = np.mean(coordinates1[sites1], axis=0)\n reactive_center2 = np.mean(coordinates2[sites2], axis=0)\n # Place reactive center mean position into origin\n coord1 = coordinates1 - reactive_center1\n coord2 = coordinates2 - reactive_center2\n positions = self._orient_coordinates(coord1, coord2)\n atoms = utils.AtomCollection(elements1 + elements2, positions)\n self.random_displace_atoms(atoms, self.settings[self.rc_key][\"displacement\"]) # breaks symmetry\n return atoms\n\n # should not be reachable\n self.raise_named_exception(\n \"Reactive complexes built from more than 2 structures are not supported.\"\n )", "def cleaveSurfAtom(entry,max_bonds=1,supercell=2,group_structs=True):\n \n \n struct = copy.deepcopy(entry[0])\n results = getStructureType(entry,supercell=supercell,returnSS=True)\n \n # If the crystal is 3D\n if results[0]=='conventional':\n struct = copy.deepcopy(entry[0])\n og_binary_matrix = getDistMat(struct,entry[1]-1)\n og_num_bonds = sum(sum(np.array(og_binary_matrix)))\n struct.make_supercell(supercell)\n binary_matrix= getDistMat(struct,entry[1]-1)\n bonds = []\n \n # Get list of bonded atoms\n for i in range(len(og_binary_matrix)):\n for pair in [(i,j) for j in range(i+1,len(og_binary_matrix)) \n if og_binary_matrix[i][j]==1]:\n bonds.append(pair)\n allCombos = []\n combNum = 0\n \n # Get list of all combinations of bonds\n for i in range(max_bonds+1):\n for com in list(itertools.combinations(bonds,i)):\n allCombos.append(com)\n combNum+=1\n\n combos = allCombos\n jjj=0\n all_structs = []\n \n # For each bond combination\n for combo in combos:\n broken=0\n jjj+=1\n modified_matrix = np.array(binary_matrix)\n for pair in combo:\n i,j = pair\n i=i*supercell**3\n j=j*supercell**3\n # Break bonds in the loop\n for shift in range(supercell**3):\n for shift2 in range(supercell**3):\n modified_matrix[i+shift][j+shift2]=0\n modified_matrix[j+shift][i+shift2]=0\n\n new_num_bonds=sum(sum(modified_matrix))\n broken=int(og_num_bonds-new_num_bonds) \n seed_index=0\n old_cluster_size=len(buildNetwork(binary_matrix,seed_index))/supercell**3\n cluster = buildNetwork(modified_matrix,seed_index)\n hetero=False\n # If the new set of atoms is not empty\n if cluster!=set():\n scale = len(cluster)/old_cluster_size\n compo = Composition.from_dict(Counter([struct[l].specie.name \n for l in list(cluster)]))\n if compo.reduced_formula != struct.composition.reduced_formula:\n # i.e. the cluster does not have the same composition\n # as the overall crystal; therefore there are other\n # clusters of varying composition.\n hetero = True\n motiif = getDim(scale,supercell)\n\n # If formula of new network matches the original cell\n if not hetero:\n if motiif=='layered':\n cluster_sites = [struct.sites[n] for n in cluster]\n all_structs.append(struct.from_sites(cluster_sites))\n\n if group_structs:\n matched = [x[0] for x in \n StructureMatcher(stol=1E-6,primitive_cell=False,\n scale=False).group_structures(all_structs)]\n else:\n matched=all_structs\n return(matched) \n\n else:\n print('Material is does not have a 3D motiif')\n print('Try increasing radii tolerance if appropriate')\n return([])", "def add_synth_group(self, name=\"\"):\n return None", "def stereo_callback(self, stereo_msg):\r\n start = time.time()\r\n self.cam0_curr_img_msg = stereo_msg.cam0_msg\r\n self.cam1_curr_img_msg = stereo_msg.cam1_msg\r\n\r\n # Build the image pyramids once since they're used at multiple places.\r\n self.create_image_pyramids()\r\n\r\n # Detect features in the first frame.\r\n if self.is_first_img:\r\n if not self.config.load_features_flag:\r\n self.initialize_first_frame()\r\n self.is_first_img = False\r\n # Draw results.\r\n # self.draw_features_stereo()\r\n else:\r\n if not self.config.load_features_flag:\r\n # Track the feature in the previous image.\r\n t = time.time()\r\n self.track_features()\r\n print('___track_features:', time.time() - t)\r\n t = time.time()\r\n\r\n # Add new features into the current image.\r\n self.add_new_features()\r\n print('___add_new_features:', time.time() - t)\r\n t = time.time()\r\n self.prune_features()\r\n print('___prune_features:', time.time() - t)\r\n t = time.time()\r\n # Draw results.\r\n # self.draw_features_stereo()\r\n print('___draw_features_stereo:', time.time() - t)\r\n t = time.time()\r\n\r\n print('===image process elapsed:', time.time() - start, f'({stereo_msg.timestamp})')\r\n\r\n if not self.config.load_features_flag:\r\n try:\r\n self.save_features() \r\n return self.publish()\r\n finally:\r\n self.cam0_prev_img_msg = self.cam0_curr_img_msg\r\n self.prev_features = self.curr_features\r\n self.prev_cam0_pyramid = self.curr_cam0_pyramid\r\n\r\n # Initialize the current features to empty vectors.\r\n self.curr_features = [[] for _ in range(self.config.grid_num)]\r\n else:\r\n self.load_features()\r\n return self.publish()", "def makeResidueAtomSets(residue, aromaticsEquivalent=True):\n \n getResidueMapping(residue)\n \n equivalent = {}\n elementSymbolDict = {}\n nonequivalent = {}\n multiSet = {}\n chemAtomSetDict = {}\n inMultiSet = {}\n molType = residue.molResidue.molType\n \n for atom in residue.atoms: \n chemAtom = atom.chemAtom\n chemAtomSetDict[atom] = chemAtom\n elementSymbol = chemAtom.elementSymbol\n chemAtomSet = chemAtom.chemAtomSet\n\n if chemAtomSet is None:\n name = chemAtom.name\n makeAtomSet(name,(atom,),None,'simple')\n \n else:\n name = chemAtomSet.name\n elementSymbolDict[name] = elementSymbol\n chemAtomSetDict[name] = chemAtomSet\n if chemAtomSet.isEquivalent:\n if equivalent.get(name) is None:\n equivalent[name] = []\n equivalent[name].append(atom)\n \n elif (chemAtomSet.isEquivalent is None) and atom.atomSet and (len(atom.atomSet.atoms) > 1):\n # aromatic rotation prev set\n if equivalent.get(name) is None:\n equivalent[name] = []\n equivalent[name].append(atom)\n \n elif (chemAtomSet.isEquivalent is None) and (not atom.atomSet) and aromaticsEquivalent:\n # aromatic rotation to be set\n if equivalent.get(name) is None:\n equivalent[name] = []\n equivalent[name].append(atom)\n \n else:\n if nonequivalent.get(name) is None:\n nonequivalent[name] = []\n nonequivalent[name].append(atom)\n \n if chemAtomSet.chemAtomSet is not None:\n multiName = chemAtomSet.chemAtomSet.name\n chemAtomSetDict[multiName] = chemAtomSet.chemAtomSet\n elementSymbolDict[multiName] = elementSymbol\n if multiSet.get(multiName) is None:\n multiSet[multiName] = {}\n multiSet[multiName][name] = 1\n inMultiSet[name] = multiName\n\n for groupName in equivalent.keys():\n atoms = equivalent[groupName]\n elementSymbol = elementSymbolDict[groupName]\n chemAtomSet = chemAtomSetDict[groupName]\n if len(atoms)==2:\n # not enough atoms for multi sets!\n makeAtomSet(groupName,atoms,chemAtomSet,'simple')\n else:\n if inMultiSet.get(groupName):\n # e.g. for Val Hg1*\n makeAtomSet(groupName,atoms,chemAtomSet,'stereo')\n \n else:\n makeAtomSet(groupName,atoms,chemAtomSet,'simple')\n\n for groupName in nonequivalent.keys():\n atoms = nonequivalent[groupName]\n elementSymbol = elementSymbolDict[groupName]\n chemAtomSet = chemAtomSetDict[groupName]\n atomSetNames = []\n \n if len(atoms) == 1:\n atom = atoms[0]\n # not enough atoms for prochiral. Corrupt ChemComp\n makeAtomSet(atom.name, atoms, None, 'simple')\n continue\n \n for atom in atoms:\n name = chemAtomSetDict[atom].name\n makeAtomSet(name,(atom,),chemAtomSet,'stereo')\n atomSetNames.append(name)\n\n for n, atom in enumerate(atoms):\n \n #name = chemAtomSetDict[atom].name\n #name2 = makeNonStereoName(molType, name, n)\n # Shouldn't have to do this if non-equiv groups have paired names\n \n name2 = makeNonStereoName(molType, '%s%d' % (chemAtomSet.name[:-1], n), n)\n \n makeGuiMultiAtomSet(residue, name2, atomSetNames,\n elementSymbol,'nonstereo',chemAtomSet)\n\n makeGuiMultiAtomSet(residue, groupName, atomSetNames,\n elementSymbol,'ambiguous',chemAtomSet)\n\n for groupName in multiSet.keys():\n atomSetNames = multiSet[groupName].keys()\n elementSymbol = elementSymbolDict[groupName]\n chemAtomSet = chemAtomSetDict[groupName]\n if \"|\" in groupName:\n # we don't do these pseudoatoms in Analysis\n continue\n\n # e.g. for Val Hga*\n for n, atomSetName in enumerate(atomSetNames):\n name2 = makeNonStereoName(molType, atomSetName, n)\n makeGuiMultiAtomSet(residue, name2, atomSetNames,\n elementSymbol,'nonstereo',chemAtomSet)\n \n makeGuiMultiAtomSet(residue, groupName, atomSetNames,\n elementSymbol,'ambiguous',chemAtomSet)", "def restricted_automorphism_group(self):\n if '_restricted_automorphism_group' in self.__dict__:\n return self._restricted_automorphism_group\n\n from sage.groups.perm_gps.permgroup import PermutationGroup\n\n if self.field() is QQ:\n def rational_approximation(c):\n return c\n\n else: # self.field() is RDF\n c_list = []\n def rational_approximation(c):\n # Implementation detail: Return unique integer if two\n # c-values are the same up to machine precision. But\n # you can think of it as a uniquely-chosen rational\n # approximation.\n for i,x in enumerate(c_list):\n if self._is_zero(x-c):\n return i\n c_list.append(c)\n return len(c_list)-1\n \n # The algorithm identifies the restricted automorphism group\n # with the automorphism group of a edge-colored graph. The\n # nodes of the graph are the V-representation objects. If all\n # V-representation objects are vertices, the edges are\n # labelled by numbers (to be computed below). Roughly\n # speaking, the edge label is the inner product of the\n # coordinate vectors with some orthogonalization thrown in\n # [BSS].\n def edge_label_compact(i,j,c_ij):\n return c_ij\n\n # In the non-compact case we also label the edges by the type\n # of the V-representation object. This ensures that vertices,\n # rays, and lines are only permuted amongst themselves.\n def edge_label_noncompact(i,j,c_ij):\n return (self.Vrepresentation(i).type(), c_ij, self.Vrepresentation(j).type())\n\n if self.is_compact():\n edge_label = edge_label_compact\n else:\n edge_label = edge_label_noncompact\n\n # good coordinates for the V-representation objects\n v_list = []\n for v in self.Vrepresentation():\n v_coords = list(self._affine_coordinates(v))\n if v.is_vertex():\n v_coords = [1]+v_coords\n else:\n v_coords = [0]+v_coords\n v_list.append(vector(v_coords))\n\n # Finally, construct the graph\n Qinv = sum( v.column() * v.row() for v in v_list ).inverse()\n\n # Was set to sparse = False, but there is a problem with Graph\n # backends. It should probably be set back to sparse = False as soon as\n # the backends are fixed.\n G = Graph(sparse=True)\n for i in range(0,len(v_list)):\n for j in range(i+1,len(v_list)):\n v_i = v_list[i]\n v_j = v_list[j]\n c_ij = rational_approximation( v_i * Qinv * v_j )\n G.add_edge(i,j, edge_label(i,j,c_ij))\n\n group, node_dict = G.automorphism_group(edge_labels=True, translation=True)\n\n # Relabel the permutation group\n perm_to_vertex = dict( (i,v+1) for v,i in node_dict.items() )\n group = PermutationGroup([ [ tuple([ perm_to_vertex[i] for i in cycle ])\n for cycle in generator.cycle_tuples() ]\n for generator in group.gens() ])\n\n self._restricted_automorphism_group = group\n return group", "def sequence_tunable(\n mol,\n OP_REMOVE_ISOTOPE=True, OP_NEUTRALISE_CHARGE=True,\n OP_REMOVE_STEREO=False, OP_COMMUTE_INCHI=False,\n OP_KEEP_BIGGEST=True, OP_ADD_HYDROGEN=True,\n OP_KEKULIZE=True, OP_NEUTRALISE_CHARGE_LATE=True\n ):\n F = Filters()\n # Always perform the basics..\n Cleanup(mol)\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n AssignStereochemistry(mol, cleanIt=True, force=True, flagPossibleStereoCenters=True) # Fix bug TD201904.01\n # \n if OP_REMOVE_ISOTOPE:\n mol = F.remove_isotope(mol)\n if OP_NEUTRALISE_CHARGE:\n mol = F.neutralise_charge(mol)\n if any([OP_REMOVE_ISOTOPE, OP_REMOVE_ISOTOPE]):\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n # \n if OP_REMOVE_STEREO:\n mol = F.remove_stereo(mol)\n OP_COMMUTE_INCHI = True\n if OP_COMMUTE_INCHI:\n mol = F.commute_inchi(mol)\n if OP_KEEP_BIGGEST:\n mol = F.keep_biggest(mol)\n if any([OP_REMOVE_STEREO, OP_COMMUTE_INCHI, OP_KEEP_BIGGEST]):\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n #\n if OP_NEUTRALISE_CHARGE_LATE:\n mol = F.neutralise_charge(mol)\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n #\n if OP_ADD_HYDROGEN:\n mol = F.add_hydrogen(mol, addCoords=True)\n if OP_KEKULIZE:\n mol = F.kekulize(mol)\n #\n return mol", "def test_modeller_mutations():\n mol_id = 'Abl'\n abl_path = examples_paths()['abl']\n with mmtools.utils.temporary_directory() as tmp_dir:\n # Safety check: protein must have WT residue: THR at residue 85 in chain A\n has_wt_residue = False\n with open(abl_path, 'r') as f:\n for line in f:\n if (line[:6] == 'ATOM ') and (line[21] == 'A') and (int(line[22:26]) == 85) and (line[17:20]=='THR'):\n has_wt_residue = True\n break\n assert has_wt_residue\n\n yaml_content = get_template_script(tmp_dir)\n exp_builder = ExperimentBuilder(yaml_content)\n output_dir = exp_builder._db.get_molecule_dir(mol_id)\n output_path = os.path.join(output_dir, 'Abl.pdb')\n\n # We haven't set the strip_protons options, so this shouldn't do anything\n exp_builder._db._setup_molecules(mol_id)\n assert not os.path.exists(output_path)\n\n # Calling modeller with WT creates a file (although the protein is not mutated).\n exp_builder._db.molecules[mol_id]['modeller'] = {\n 'apply_mutations': {\n 'chain_id': 'A',\n 'mutations': 'WT',\n }\n }\n setup_molecule_output_check(exp_builder._db, mol_id, output_path)\n os.remove(output_path) # Remove file for next check.\n\n\n # Reinitialize exp_builder\n exp_builder = ExperimentBuilder(yaml_content)\n\n # Now we set the strip_protons options and repeat for the mutant case\n exp_builder._db.molecules[mol_id]['modeller'] = {\n 'apply_mutations': {\n 'chain_id': 'A',\n 'mutations': 'T85I',\n }\n }\n setup_molecule_output_check(exp_builder._db, mol_id, output_path)\n\n # Safety check: protein must have mutated residue: ILE at residue 85 in chain A\n has_mut_residue = False\n with open(output_path, 'r') as f:\n for line in f:\n if (line[:6] == 'ATOM ') and (line[21] == 'A') and (int(line[22:26]) == 85) and (line[17:20]=='ILE'):\n has_mut_residue = True\n break\n assert has_mut_residue", "def pick(self, inv, pl, group):\r\n if self.rect.colliderect(pl):\r\n group.remove(self)\r\n if inv.count('key') == 0:\r\n inv += ['key']\r\n music_acceptor.activatedPortalSound()", "def composite_scene(orig_scene, mask_seam, match_scene, dialation_mask, orig_scene1, method=\"paste\", repeat=1):\n avg_pixel = np.mean(orig_scene1[orig_scene1 != 0])\n \n output = np.zeros(orig_scene.shape)\n if method==\"seamlessclone\":\n width, height, _ = match_scene.shape\n center = (height/2, width/2)\n \n # create plain white mask\n mask = np.zeros(match_scene.shape, match_scene.dtype) + 255\n \n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = avg_pixel\n \n \n \n #image_to_compare\n output_blend = cv2.seamlessClone(match_scene.astype(np.uint8), \n orig_scene_impute.astype(np.uint8), \n mask, center,cv2.NORMAL_CLONE)\n \n #implot(output_blend)\n # now reapply the mask with alpha blending to fix it up again.\n \n \"\"\"\n TO DO CHANGE IT FROM THE DILATION + MASK SEAM, NEED TO FIND THE INTERSECTION OF THESE TWO TO BE THE \n REAL MASK TO BLUR\n \"\"\"\n dilation_mask = mask_seam.copy()\n \n dilation_mask = cv2.GaussianBlur(dilation_mask, (101,101), 0) # blur mask and do a alpha blend... between the \n #implot(dilation_mask, 'gray')\n \n dilation_mask = dilation_mask/255.0\n \n \n \n # 0 is black, 1 is white\n #output = cv2.addWeighted(output_blend, dialation_mask, orig_scene, 1-dialation_mask)\n #print dialation_mask\n #print dialation_mask.shape\n #print output_blend.shape\n #a = cv2.multiply(output_blend.astype(np.float), dialation_mask)\n \n for _ in range(10):\n # some kind of layered alpha blend by the dilation mask values...\n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]\n output_blend = cv2.add(cv2.multiply(output_blend.astype(np.float), dilation_mask),\n cv2.multiply(orig_scene_impute.astype(np.float), 1-dilation_mask), 0)\n \n \n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]\n output_blend = cv2.add(cv2.multiply(output_blend.astype(np.float), dilation_mask),\n cv2.multiply(orig_scene_impute.astype(np.float), 1-dilation_mask), 0)\n \n \n \n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]\n output = cv2.seamlessClone(match_scene.astype(np.uint8), \n output_blend.astype(np.uint8), \n mask, center,cv2.NORMAL_CLONE)\n \n # complete blend with seamlessclone...\n \n \n # output = np.maximum(output_blend, orig_scene_impute)\n # or just darken...\n \n \n #if repeat == 1:\n # return output_blend\n #output = composite_scene(orig_scene_impute, mask_seam, output_blend, dialation_mask, method=\"paste\")\n \n\n\n elif method==\"paste\":\n output[mask_seam == 0] = orig_scene[mask_seam == 0]\n output[mask_seam != 0] = match_scene[mask_seam != 0]\n \n elif method==\"alphablend\":\n output_blend = output.copy()\n output_blend[mask_seam == 0] = orig_scene[mask_seam == 0]\n output_blend[mask_seam != 0] = match_scene[mask_seam != 0]\n \n \n \n \n else:\n output[mask_seam == 0] = orig_scene[mask_seam == 0]\n output[mask_seam != 0] = match_scene[mask_seam != 0]\n return output", "def stereo_symbol(self):\n\n return np.array([bond.stereo_symbol for bond in self])", "def test_isomorphic_striped_stereochemistry(self):\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n # Ensure default value is respected and order does not matter\n assert Molecule.are_isomorphic(mol1, mol2, strip_pyrimidal_n_atom_stereo=True)\n assert Molecule.are_isomorphic(mol1, mol2)\n assert Molecule.are_isomorphic(mol2, mol1)\n\n assert mol1 == mol2\n assert Molecule.from_smiles(\"CCC[N@](C)CC\") == Molecule.from_smiles(\n \"CCC[N@@](C)CC\"\n )\n\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n assert not Molecule.are_isomorphic(\n mol1,\n mol2,\n strip_pyrimidal_n_atom_stereo=False,\n atom_stereochemistry_matching=True,\n bond_stereochemistry_matching=True,\n )[0]", "def semcor2R(args):\r\n input_files = list_files(*args.input_files)\r\n output_file = Path(args.output_file)\r\n senses = args.sense\r\n multiword = senses or args.multiword\r\n if senses and output_file == output_default / 'semcor2r.csv':\r\n output_file = output_default / 'semcor2r_semtagged.csv'\r\n with output_file.open('w') as file:\r\n file.write(\"\\t\".join([\"concordance\", \"file\", \"token_id\", \"wordform\", \"PoS\", \"lemma\"]))\r\n if senses:\r\n file.write('\\twnsn\\tsense_key')\r\n file.write('\\n')\r\n for input_file in input_files:\r\n corpus_file = CorpusFile(input_file)\r\n for word in corpus_file.text.find_all(['wf', 'punc']):\r\n index = 0\r\n if word.name == 'punc':\r\n index += 1\r\n continue\r\n if not multiword:\r\n for token in Token.from_tag(word).get_components():\r\n token_id = '/'.join([corpus_file.shortname, token.wordform, str(index)])\r\n if args.verbose and type(token.status)==tuple:\r\n report_token_status(token, token_id)\r\n file.write('\\t'.join([corpus_file.concordance, corpus_file.shortname, token_id, token.wordform, token.pos, token.lemma]) + '\\n')\r\n index += 1\r\n else:\r\n token = Token.from_tag(word)\r\n if senses and not token.has_senses:\r\n continue\r\n token_id = '/'.join([corpus_file.shortname, token.wordform, str(index)])\r\n if args.verbose and type(token.status)==tuple:\r\n report_token_status(token, token_id)\r\n file.write('\\t'.join([corpus_file.concordance, corpus_file.shortname, token_id, token.wordform, token.pos, token.lemma]))\r\n index += 1\r\n if senses:\r\n file.write('\\t{}\\t{}'.format(token.wnsn, token.sense_key))\r\n file.write('\\n')\r\n print('File \"{}\" processed.'.format(input_file.stem))", "def chordmode():\n for token in consume():\n if source.inSelection and isinstance(token, tokenizer.Pitch):\n transpose(token, 0)", "def stereo(func):\n @wraps(func)\n def stereoFunc(*args, **kwargs):\n # trying to find a way to have a method\n # calling another method not do the stereo repeat\n # so if calling an internal func from a stereo func,\n # add the stereo kw arg to call\n # it will be removed before calling underlying func\n\n if 'stereo' in kwargs.keys():\n _stereo = kwargs['stereo']\n del(kwargs['stereo'])\n else:\n _stereo = 1\n res = func(*args, **kwargs)\n if args[0].stereo and _stereo: # self.stereo\n _LOGGER.debug(\"Stereo Command {}:{}\".format(func.__name__, args))\n largs = list(args)\n if type(largs[1]) == str:\n largs[1] = chr(ord(largs[1])+1)\n else:\n largs[1] = largs[1] + 1\n if func.__name__[3:9] == \"Matrix\": # do stereo on input and output\n _LOGGER.debug(\"Matrix Stereo Command {}\".format(func.__name__))\n if type(largs[2]) == str:\n largs[2] = chr(ord(largs[2])+1)\n else:\n largs[2] = largs[2] + 1\n res2 = func(*largs, **kwargs)\n if res != res2:\n _LOGGER.debug(\"Stereo out of sync {} : {}\".format(res, res2))\n warnings.warn(\"Stereo out of sync\", RuntimeWarning)\n if res is not None:\n return res\n return stereoFunc", "def add_synth_group(self, name=\"\"):\n pass", "def build(self, X, Y, w=None):\n super(MorseSmaleComplex, self).build(X, Y, w)\n\n if self.debug:\n sys.stdout.write(\"Decomposition: \")\n start = time.perf_counter()\n\n stableManifolds = MorseComplex(debug=self.debug)\n unstableManifolds = MorseComplex(debug=self.debug)\n\n stableManifolds._build_for_morse_smale_complex(self, False)\n unstableManifolds._build_for_morse_smale_complex(self, True)\n\n self.min_indices = unstableManifolds.max_indices\n self.max_indices = stableManifolds.max_indices\n\n # If a degenerate point is both a minimum and a maximum, it\n # could potentially appear twice, but would be masked by the\n # minimum key which would wipe the maximum merge\n self.merge_sequence = stableManifolds.merge_sequence.copy()\n self.merge_sequence.update(unstableManifolds.merge_sequence)\n self.persistences = sorted(\n stableManifolds.persistences + unstableManifolds.persistences\n )\n\n self.base_partitions = {}\n base = np.array([[None, None]] * len(Y))\n for key, items in unstableManifolds.base_partitions.items():\n base[np.array(items), 0] = key\n for key, items in stableManifolds.base_partitions.items():\n base[np.array(items), 1] = key\n\n keys = set(map(tuple, base))\n for key in keys:\n self.base_partitions[key] = np.where(\n np.logical_and(base[:, 0] == key[0], base[:, 1] == key[1])\n )[0]\n\n if self.debug:\n end = time.perf_counter()\n sys.stdout.write(\"%f s\\n\" % (end - start))", "def xyz2facestereo(x,y,z):\n ax = np.abs(x)\n ay = np.abs(y)\n az = np.abs(z)\n mskx = (y != x) & (z != x)\n mskyz = z != y\n msk0 = ( x >= ay) & ( x >= az) & mskx\n msk3 = (-x >= ay) & (-x >= az) & mskx\n msk1 = ( y >= az) & mskyz\n msk4 = (-y >= az) & mskyz\n msk2 = z > 0\n f = (1-msk0)*(msk3*3 + (1-msk3)*(msk1 + (1-msk1)*(msk4*4 + (1-msk4)*(msk2*2 + (1-msk2)*5))))\n xnew = np.choose(f, ( y, -x, -x, -z, -z, y))\n ynew = np.choose(f, ( z, z, -y, -y, x, x))\n znew = np.choose(f, ( x, y, z, -x, -y, -z))\n X,Y = xyz2stereo(xnew, ynew, znew)\n\n return f,X,Y", "def braid_group_action(self):\n G = []\n for c in self:\n c = c.relabel()\n if any(c in g for g in G):\n continue\n G.append(c.braid_group_orbit())\n return G", "def test_isomorphic_stripped_stereochemistry(self):\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n # Ensure default value is respected and order does not matter\n assert Molecule.are_isomorphic(mol1, mol2, strip_pyrimidal_n_atom_stereo=True)\n assert Molecule.are_isomorphic(mol1, mol2)\n assert Molecule.are_isomorphic(mol2, mol1)\n\n assert mol1 == mol2\n assert Molecule.from_smiles(\"CCC[N@](C)CC\") == Molecule.from_smiles(\n \"CCC[N@@](C)CC\"\n )", "def get_torsion_contrib(self, groupBy='m'):\n e4 = 0.0\n es4 = []\n iass4 = []\n\n types4_z = []\n types4_m = []\n\n zs8 = [8, 16, 34] # zs of group 8 elements\n set_hybs = set(['SP2','SP3'])\n\n for ib in range(self.nb):\n j, k = self.ias2[ib]\n if self.zs[j] > self.zs[k]:\n tv = j; k = j; j = tv\n neibs1 = self.m.GetAtomWithIdx(j).GetNeighbors(); n1 = len(neibs1);\n neibs2 = self.m.GetAtomWithIdx(k).GetNeighbors(); n2 = len(neibs2);\n for i0 in range(n1):\n for l0 in range(n2):\n i = neibs1[i0].GetIdx(); l = neibs2[l0].GetIdx()\n if len(set([i,j,k,l])) == 4:\n eijkl = 0.0\n ias = [ i,j,k,l ]; iass4.append(ias)\n zsi = [ self.zs[ia] for ia in ias ]\n types4_z.append( '-'.join([ '%d'%zi for zi in zsi ]) )\n types4_m.append( '-'.join([ self.atypes[ia] for ia in ias ]) )\n V = rcr.GetUFFTorsionParams(self.m, i, j, k, l)\n tor = rdMolTransforms.GetDihedralRad(self.m.GetConformer(), i,j,k,l)\n hyb2 = self.hybs[j]\n hyb3 = self.hybs[k]\n if (hyb2 == 'SP3') and (hyb3 == 'SP3'):\n order = 3; cosNPhi0 = -1 # Phi0 = 60 degree\n if self.bos[ib] == 1 and set([self.zs[j],self.zs[k]]) <= set(zs8):\n orde = 2; cosNPhi0 = -1\n eijkl = 0.5*V*( 1.0 - cosNPhi0*np.cos(tor*order) )\n elif (hyb2 == 'SP2') and (hyb3 == 'SP2'):\n order = 2; cosNPhi0 = 1.0 # phi0 = 180\n eijkl = 0.5*V*( 1.0 - cosNPhi0*np.cos(tor*order) )\n elif set([hyb2,hyb3]) == set_hybs:\n # SP2 - SP3, this is, by default, independent of atom type in UFF\n order = 6; cosNPhi0 = 1.0 # phi0 = 0\n if self.bos[ib] == 1.0:\n # special case between group 6 sp3 and non-group 6 sp2:\n #if hyb2 == 'SP3' and hyb3 == 'SP3' and set([zs[j],zs[k]]) <= zs8:\n # order = 2; cosNPhi0 = -1 # phi0 = 90\n if ((self.zs[j] in zs8) and (self.hybs[k] == 'SP2')) or \\\n ((self.zs[k] in zs8) and (self.hybs[j] == 'SP2')):\n order = 2; cosNPhi0 = -1 # phi0 = 90\n eijkl = 0.5*V*( 1.0 - cosNPhi0*np.cos(tor*order) )\n #else:\n # raise '#ERROR: unknown senario?'\n #print '[i,j,k,l] = [%d,%d,%d,%d], eijkl = %.4f'%(i,j,k,l, eijkl )\n #print V, order, cosNPhi0, tor, eijkl\n es4.append(eijkl)\n e4 += eijkl\n self.e4 = e4\n self.es4 = es4\n self.n4 = len(es4)\n self.types4 = {'m': types4_m, 'n': types4_z}[groupBy]\n #return e4, n4, types4, es4", "def is_surjective(self):\n # Testing equality of free modules over PIDs is unreliable\n # see Trac #11579 for explanation and status\n # We test if image equals codomain with two inclusions\n # reverse inclusion of below is trivially true\n return self.codomain().is_submodule(self.image())", "def space_group(self) -> PermutationGroup:\n return self._full_translation_group @ self.point_group", "def test_add_lone_pairs_by_atom_valance(self):\n adj1 = \"\"\"multiplicity 3\n1 N u2 p1 c0 {2,S}\n2 S u0 p2 c0 {1,S} {3,S}\n3 H u0 p0 c0 {2,S}\"\"\"\n mol1 = Molecule().from_adjacency_list(adjlist=adj1)\n converter.add_lone_pairs_by_atom_valance(mol1)\n self.assertEqual(mol1.to_smiles(), '[N]S')\n mol1.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol1)\n self.assertEqual(mol1.to_smiles(), 'N#S')\n\n adj2 = \"\"\"multiplicity 3\n1 N u2 p1 c0 {2,S}\n2 N u0 p1 c0 {1,S} {3,S} {4,S}\n3 H u0 p0 c0 {2,S}\n4 H u0 p0 c0 {2,S}\"\"\"\n mol2 = Molecule().from_adjacency_list(adjlist=adj2)\n converter.add_lone_pairs_by_atom_valance(mol2)\n self.assertEqual(mol2.to_smiles(), '[N]N')\n mol2.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol2)\n self.assertEqual(mol2.to_smiles(), '[N-]=[NH2+]')\n\n adj3 = \"\"\"multiplicity 3\n1 C u0 p0 c0 {2,S} {5,S} {6,S} {7,S}\n2 C u0 p0 c0 {1,S} {3,S} {8,S} {9,S}\n3 C u2 p0 c0 {2,S} {4,S}\n4 H u0 p0 c0 {3,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {1,S}\n7 H u0 p0 c0 {1,S}\n8 H u0 p0 c0 {2,S}\n9 H u0 p0 c0 {2,S}\"\"\"\n mol3 = Molecule().from_adjacency_list(adjlist=adj3)\n converter.add_lone_pairs_by_atom_valance(mol3)\n self.assertEqual(mol3.to_smiles(), '[CH]CC')\n mol3.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol3)\n self.assertEqual(mol3.to_adjacency_list(), \"\"\"1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}\n2 C u0 p0 c0 {1,S} {6,S} {7,S} {8,S}\n3 C u0 p1 c0 {1,S} {9,S}\n4 H u0 p0 c0 {1,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {2,S}\n7 H u0 p0 c0 {2,S}\n8 H u0 p0 c0 {2,S}\n9 H u0 p0 c0 {3,S}\n\"\"\")\n\n adj4 = \"\"\"multiplicity 3\n1 C u0 p0 c0 {2,S} {4,S} {5,S} {6,S}\n2 C u0 p0 c0 {1,S} {3,S} {7,S} {8,S}\n3 N u2 p1 c0 {2,S}\n4 H u0 p0 c0 {1,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {1,S}\n7 H u0 p0 c0 {2,S}\n8 H u0 p0 c0 {2,S}\"\"\"\n mol4 = Molecule().from_adjacency_list(adjlist=adj4)\n converter.add_lone_pairs_by_atom_valance(mol4)\n self.assertEqual(mol4.to_smiles(), 'CC[N]')\n mol4.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol4)\n self.assertEqual(mol4.to_adjacency_list(), \"\"\"1 N u0 p2 c0 {3,S}\n2 C u0 p0 c0 {3,S} {4,S} {5,S} {6,S}\n3 C u0 p0 c0 {1,S} {2,S} {7,S} {8,S}\n4 H u0 p0 c0 {2,S}\n5 H u0 p0 c0 {2,S}\n6 H u0 p0 c0 {2,S}\n7 H u0 p0 c0 {3,S}\n8 H u0 p0 c0 {3,S}\n\"\"\")", "async def nogroup(ctx):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n author = ctx.message.author\n roles = author.roles\n for role in roles:\n if role.name.lower() in changeable_groups:\n roles.remove(role)\n await amor_manager.replace_roles(author, *roles)\n await amor_manager.say('{0} removed from color groups'.format(author.name))", "def correctKITTILabelForStereo(label):\n # TODO: check extensively\n base = 15.0\n scale = 1.07\n new_label = copy.deepcopy(label)\n if new_label['box3D']['location']['z'] > base:\n new_label['box3D']['location']['z'] = base + (new_label['box3D']['location']['z']-base)*scale\n return new_label", "def evolve(self):\n # Start with first player\n self.phase.set(1)\n\n #self.first_player\n \n # Autopass turn if no cards left for player\n \n \n pass", "def stereo_match(self, cam0_points):\r\n cam0_points = np.array(cam0_points)\r\n if len(cam0_points) == 0:\r\n return []\r\n\r\n R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu\r\n cam0_points_undistorted = self.undistort_points(\r\n cam0_points, self.cam0_intrinsics,\r\n self.cam0_distortion_model, self.cam0_distortion_coeffs, R_cam0_cam1)\r\n cam1_points = self.distort_points(\r\n cam0_points_undistorted, self.cam1_intrinsics,\r\n self.cam1_distortion_model, self.cam1_distortion_coeffs)\r\n cam1_points_copy = cam1_points.copy()\r\n\r\n # Track features using LK optical flow method.\r\n cam0_points = cam0_points.astype(np.float32)\r\n cam1_points = cam1_points.astype(np.float32)\r\n cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(\r\n self.curr_cam0_pyramid, self.curr_cam1_pyramid,\r\n cam0_points, cam1_points, **self.config.lk_params)\r\n\r\n cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(\r\n self.curr_cam1_pyramid, self.curr_cam0_pyramid, \r\n cam1_points, cam0_points.copy(), **self.config.lk_params)\r\n err = np.linalg.norm(cam0_points - cam0_points_, axis=1)\r\n\r\n # cam1_points_undistorted = self.undistort_points(\r\n # cam1_points, self.cam1_intrinsics,\r\n # self.cam1_distortion_model, self.cam1_distortion_coeffs, R_cam0_cam1)\r\n disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])\r\n \r\n\r\n \r\n inlier_markers = np.logical_and.reduce(\r\n [inlier_markers.reshape(-1), err < 3, disparity < 20])\r\n\r\n # Mark those tracked points out of the image region as untracked.\r\n img = self.cam1_curr_img_msg.image\r\n for i, point in enumerate(cam1_points):\r\n if not inlier_markers[i]:\r\n continue\r\n if (point[0] < 0 or point[0] > img.shape[1]-1 or \r\n point[1] < 0 or point[1] > img.shape[0]-1):\r\n inlier_markers[i] = 0\r\n\r\n # Compute the relative rotation between the cam0 frame and cam1 frame.\r\n t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)\r\n # Compute the essential matrix.\r\n E = skew(t_cam0_cam1) @ R_cam0_cam1\r\n\r\n # Further remove outliers based on the known essential matrix.\r\n cam0_points_undistorted = self.undistort_points(\r\n cam0_points, self.cam0_intrinsics,\r\n self.cam0_distortion_model, self.cam0_distortion_coeffs)\r\n cam1_points_undistorted = self.undistort_points(\r\n cam1_points, self.cam1_intrinsics,\r\n self.cam1_distortion_model, self.cam1_distortion_coeffs)\r\n\r\n norm_pixel_unit = 4.0 / (\r\n self.cam0_intrinsics[0] + self.cam0_intrinsics[1] +\r\n self.cam1_intrinsics[0] + self.cam1_intrinsics[1])\r\n\r\n for i in range(len(cam0_points_undistorted)):\r\n if not inlier_markers[i]:\r\n continue\r\n pt0 = np.array([*cam0_points_undistorted[i], 1.0])\r\n pt1 = np.array([*cam1_points_undistorted[i], 1.0])\r\n epipolar_line = E @ pt0\r\n error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(\r\n epipolar_line[:2])\r\n\r\n if error > self.config.stereo_threshold * norm_pixel_unit:\r\n inlier_markers[i] = 0\r\n\r\n return cam1_points, inlier_markers", "def substereomers(gra):\n _assigned = functools.partial(\n dict_.filter_by_value, func=lambda x: x is not None)\n\n known_atm_ste_par_dct = _assigned(atom_stereo_parities(gra))\n known_bnd_ste_par_dct = _assigned(bond_stereo_parities(gra))\n\n def _is_compatible(sgr):\n atm_ste_par_dct = _assigned(atom_stereo_parities(sgr))\n bnd_ste_par_dct = _assigned(bond_stereo_parities(sgr))\n _compat_atm_assgns = (set(known_atm_ste_par_dct.items()) <=\n set(atm_ste_par_dct.items()))\n _compat_bnd_assgns = (set(known_bnd_ste_par_dct.items()) <=\n set(bnd_ste_par_dct.items()))\n return _compat_atm_assgns and _compat_bnd_assgns\n\n sgrs = tuple(filter(_is_compatible, stereomers(gra)))\n return sgrs", "def semigroup_generators(self):\n return self.ambient().semigroup_generators().map(self.retract)", "def get_atom_contrib(self, groupBy='m'):\n self.es1 = -0.5 * np.array(self.zs)**2.4 * h2kc\n self.types1 = {'m': self.atypes, 'n': self.zs}[groupBy]", "def blendShapeEnvelopeOff():\n obj = cmds.ls(selection = True)\n history = cmds.listHistory(obj)\n bsHistory = cmds.ls(history, type = 'blendShape')\n for bs in bsHistory:\n cmds.setAttr(bs+'.'+'envelope',0.0) #note not changing blend target weights", "def get_disparity(self, imgL, imgR):\n # SGBM Parameters -----------------\n window_size = 1 # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely\n param = {'minDisparity': 0, 'numDisparities': 32, 'blockSize': 5, 'P1': 10, 'P2': 20, 'disp12MaxDiff': 1,\n 'preFilterCap': 65, 'uniquenessRatio': 10, 'speckleWindowSize': 150, 'speckleRange': 2, 'mode': 2}\n left_matcher = cv2.StereoSGBM_create(**param)\n # left_matcher = cv2.StereoSGBM_create(\n # minDisparity=-1,\n # numDisparities=5*16, # max_disp has to be dividable by 16 f. E. HH 192, 256\n # blockSize=window_size,\n # P1=8 * 3 * window_size,\n # # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely\n # P2=32 * 3 * window_size,\n # disp12MaxDiff=12,\n # uniquenessRatio=10,\n # speckleWindowSize=50,\n # speckleRange=32,\n # preFilterCap=63,\n # mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY\n # )\n right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)\n # FILTER Parameters\n lmbda = 8000\n sigma = 1.3\n wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)\n wls_filter.setLambda(lmbda)\n\n wls_filter.setSigmaColor(sigma)\n displ = left_matcher.compute(imgL, imgR) # .astype(np.float32)/16\n dispr = right_matcher.compute(imgR, imgL) # .astype(np.float32)/16\n displ = np.int16(displ)\n dispr = np.int16(dispr)\n filteredImg = wls_filter.filter(displ, imgL, None, dispr) # important to put \"imgL\" here!!!\n filteredImg = cv2.normalize(src=filteredImg, dst=filteredImg, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX)\n filteredImg = np.uint8(filteredImg)\n # 除以16得到真实视差(因为SGBM算法得到的视差是×16的)\n displ[displ < 0] = 0\n # disparity.astype(np.float32) / 16.\n displ = np.divide(displ.astype(np.float32), 16.)\n return filteredImg, displ", "def test_clashing_atoms():\n benzene_path = examples_paths()['benzene']\n toluene_path = examples_paths()['toluene']\n with mmtools.utils.temporary_directory() as tmp_dir:\n yaml_content = get_template_script(tmp_dir, keep_openeye=True)\n system_id = 'explicit-system'\n system_description = yaml_content['systems'][system_id]\n system_description['pack'] = True\n system_description['solvent'] = utils.CombinatorialLeaf(['vacuum', 'PME'])\n\n # Sanity check: at the beginning molecules clash\n toluene_pos = utils.get_oe_mol_positions(utils.load_oe_molecules(toluene_path, molecule_idx=0))\n benzene_pos = utils.get_oe_mol_positions(utils.load_oe_molecules(benzene_path, molecule_idx=0))\n assert pipeline.compute_min_dist(toluene_pos, benzene_pos) < pipeline.SetupDatabase.CLASH_THRESHOLD\n\n exp_builder = ExperimentBuilder(yaml_content)\n\n for sys_id in [system_id + '_vacuum', system_id + '_PME']:\n system_dir = os.path.dirname(\n exp_builder._db.get_system(sys_id)[0].position_path)\n\n # Get positions of molecules in the final system\n prmtop = openmm.app.AmberPrmtopFile(os.path.join(system_dir, 'complex.prmtop'))\n inpcrd = openmm.app.AmberInpcrdFile(os.path.join(system_dir, 'complex.inpcrd'))\n positions = inpcrd.getPositions(asNumpy=True).value_in_unit(unit.angstrom)\n topography = Topography(prmtop.topology, ligand_atoms='resname TOL')\n benzene_pos2 = positions.take(topography.receptor_atoms, axis=0)\n toluene_pos2 = positions.take(topography.ligand_atoms, axis=0)\n # atom_indices = pipeline.find_components(prmtop.createSystem(), prmtop.topology, 'resname TOL')\n # benzene_pos2 = positions.take(atom_indices['receptor'], axis=0)\n # toluene_pos2 = positions.take(atom_indices['ligand'], axis=0)\n\n # Test that clashes are resolved in the system\n min_dist, max_dist = pipeline.compute_min_max_dist(toluene_pos2, benzene_pos2)\n assert min_dist >= pipeline.SetupDatabase.CLASH_THRESHOLD\n\n # For solvent we check that molecule is within the box\n if sys_id == system_id + '_PME':\n assert max_dist <= exp_builder._db.solvents['PME']['clearance'].value_in_unit(unit.angstrom)", "def resetCoronalSegment(self):\r\n #research\r\n profprint()\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeGreen\")\r\n if sGreen == None :\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode3\")\r\n reformatLogic = slicer.vtkSlicerReformatLogic()\r\n #sGreen.SetSliceVisible(0)\r\n sGreen.SetOrientationToCoronal()\r\n #sw = slicer.app.layoutManager().sliceWidget(\"Green\")\r\n #sw.fitSliceToBackground()\r\n sGreen.Modified()", "def _ignore_collision(self):\n # The legacy version only ignores collision of child links of active joints.\n for link in self.cabinet.get_links():\n for s in link.get_collision_shapes():\n g0, g1, g2, g3 = s.get_collision_groups()\n s.set_collision_groups(g0, g1, g2 | 1 << 31, g3)", "def _auto_influence(self, mod, rigid, pair_blend):\n\n constraint = rigid.sibling(type=\"rdConstraint\")\n\n # This is fine (but what does it mean? :O )\n if not constraint:\n return\n\n def bake_joint_orient(mat, orient):\n \"\"\"Bake jointOrient values\n\n Such that keyframes can be made without\n taking those into account. E.g. a joint with 0 rotate\n but 45 degrees of jointOrient should only require a key\n with 0 degrees.\n\n \"\"\"\n\n assert isinstance(mat, cmdx.om.MMatrix)\n assert isinstance(orient, cmdx.om.MQuaternion)\n\n mat_tm = cmdx.om.MTransformationMatrix(mat)\n new_quat = mat_tm.rotation(asQuaternion=True) * orient\n mat_tm.setRotation(new_quat)\n\n return mat_tm.asMatrix()\n\n transform = rigid.parent()\n\n joint_orient = self._cache[(transform, \"jointOrient\")]\n\n # pairBlend directly feeds into the drive matrix\n compose = mod.create_node(\"composeMatrix\", name=\"composePairBlend\")\n mod.connect(pair_blend[\"inTranslate1\"], compose[\"inputTranslate\"])\n mod.connect(pair_blend[\"inRotate1\"], compose[\"inputRotate\"])\n\n # A drive is relative the parent frame, but the pairblend is relative\n # the parent Maya transform. In case these are not the same, we'll\n # map the pairblend into the space of the parent frame.\n parent_rigid = constraint[\"parentRigid\"].connection()\n\n # Could be connected to a scene too\n if parent_rigid.type() != \"rdRigid\":\n return\n\n relative = mod.create_node(\"multMatrix\", name=\"makeRelative\")\n\n # From this space..\n parent_transform_matrix = rigid[\"inputParentInverseMatrix\"].asMatrix()\n parent_transform_matrix = parent_transform_matrix.inverse()\n\n # To this space..\n parent_rigid_matrix = parent_rigid[\"cachedRestMatrix\"].asMatrix()\n parent_rigid_matrix = parent_rigid_matrix.inverse()\n\n total_matrix = parent_transform_matrix * parent_rigid_matrix\n total_matrix = bake_joint_orient(total_matrix, joint_orient)\n\n mod.connect(compose[\"outputMatrix\"], relative[\"matrixIn\"][0])\n mod.set_attr(relative[\"matrixIn\"][1], total_matrix)\n\n mod.connect(relative[\"matrixSum\"], constraint[\"driveMatrix\"])\n\n # Keep channel box clean\n mod.set_attr(compose[\"isHistoricallyInteresting\"], False)\n mod.set_attr(relative[\"isHistoricallyInteresting\"], False)", "def test_group(self):\n # leave out particle 0\n group = hoomd.group.tags(1,2)\n\n # compute forces\n f = azplugins.restrain.plane(group=group, point=(0,0,0), normal=(1,0,0), k=2.0)\n hoomd.run(1)\n np.testing.assert_array_almost_equal(f.forces[0].force, ( 0.,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].force, ( 2.,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].force, ( 6.,0,0))\n self.assertAlmostEqual(f.forces[0].energy, 0.)\n self.assertAlmostEqual(f.forces[1].energy, 1.)\n self.assertAlmostEqual(f.forces[2].energy, 9.)\n np.testing.assert_array_almost_equal(f.forces[0].virial, (0,0,0,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].virial, (-2.,0,4.,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].virial, (12.,0,0,0,0,0))", "def gen_michaelis_menten_like_rate_law(model, reaction, modifiers=None, modifier_reactants=None, exclude_substrates=None):\n modifier_species = []\n all_species = {}\n all_volumes = {}\n all_observables = {}\n all_parameters = {}\n if modifiers:\n for modifier in modifiers:\n if type(modifier) == wc_lang.Observable:\n all_observables[modifier.id] = modifier\n for species in modifier.expression.species:\n modifier_species.append(species) \n elif type(modifier) == wc_lang.Species:\n modifier_species.append(modifier)\n all_species[modifier.gen_id()] = modifier\n else:\n raise TypeError('The modifiers contain element(s) that is not an observable or a species') \n\n if modifier_reactants is None:\n additional_reactants = []\n else:\n additional_reactants = modifier_reactants\n\n if exclude_substrates:\n excluded_reactants = exclude_substrates\n else:\n excluded_reactants = [] \n\n avogadro = model.parameters.get_or_create(\n id='Avogadro',\n type=None,\n value=scipy.constants.Avogadro,\n units=unit_registry.parse_units('molecule mol^-1'))\n all_parameters[avogadro.id] = avogadro\n\n model_k_cat = model.parameters.get_or_create(id='k_cat_{}'.format(reaction.id),\n type=wc_ontology['WC:k_cat'],\n units=unit_registry.parse_units('s^-1{}'.format(\n (' * molecule^{{-{}}}'.format(len(modifiers))) if modifiers else '')))\n all_parameters[model_k_cat.id] = model_k_cat\n\n expression_terms = [] \n for species in reaction.get_reactants():\n\n if (species not in modifier_species or species in additional_reactants) and species not in excluded_reactants:\n\n all_species[species.gen_id()] = species\n\n model_k_m = model.parameters.get_or_create(id='K_m_{}_{}'.format(reaction.id, species.species_type.id),\n type=wc_ontology['WC:K_m'],\n units=unit_registry.parse_units('M'))\n all_parameters[model_k_m.id] = model_k_m\n\n volume = species.compartment.init_density.function_expressions[0].function\n all_volumes[volume.id] = volume\n\n expression_terms.append('({} / ({} + {} * {} * {}))'.format(species.gen_id(),\n species.gen_id(),\n model_k_m.id, avogadro.id,\n volume.id))\n\n expression = '{}{}{}'.format(\n model_k_cat.id,\n (' * {}'.format(' * '.join([i.id if type(i)==wc_lang.Observable else i.gen_id() \\\n for i in modifiers]))) if modifiers else '',\n (' * {}'.format(' * '.join(expression_terms))) if expression_terms else '')\n \n rate_law_expression, error = wc_lang.RateLawExpression.deserialize(expression, {\n wc_lang.Parameter: all_parameters,\n wc_lang.Species: all_species,\n wc_lang.Observable: all_observables,\n wc_lang.Function: all_volumes,\n })\n assert error is None, str(error)\n\n return rate_law_expression, list(all_parameters.values())", "def sew_dart(self, degree, dart1, dart2, merge_attribute = True):\r\n if self.is_free(degree, dart1) and self.is_free(degree, dart2):\r\n if degree==2:\r\n\t\t\t# involution sur le degre:\r\n self.link_darts(2,dart1,dart2)\r\n\t\t\t\r\n\t\t\t# involution sur (a0 a2)\r\n a0_b1 = self.alphas[0][dart1]\r\n a0_b2 = self.alphas[0][dart2]\r\n\t\t\t\r\n self.link_darts(2,a0_b1, a0_b2)\r\n \r\n\r\n self.link_darts(degree,dart1, dart2)\r\n\t #if merge_attribute:\r\n\t\t# pass#self.set_position(dart, np.mean([get_position(dart1), get_position(dart2)]))\r", "def known_organisms():\n return [\"rat\"]", "def _get_reaction_path(self):\n ## check if the atoms are on the same side of the unit cell\n cell = self.atomsIS.get_cell() # same cell used in IS and FS hopefully\n # get the vector respresenting the difference of the two \n vector_all = self.atomsIS.get_positions() - self.atomsFS.get_positions()\n vectors = vector_all[self.indices]\n min_vec = []\n for v in vectors:\n vmin, vlen = geometry.find_mic(v, cell, pbc=True)\n min_vec.append(vmin)\n ravel_vec = np.ravel(min_vec)\n self.modes.append( ravel_vec / np.linalg.norm(ravel_vec) )", "def set_original_planes(self, display_opt):\n\n # get 4-chamber view\n four_ch_view_plane_normal = self.find_4ch_view(display_opt)\n\n # set rodriguez rotation around midline (apex to C)\n axis_of_rot = np.array(self.epi_apex_node - self.C)\n self.axis_of_rot_normalized = axis_of_rot/np.linalg.norm(axis_of_rot)\n\n # get 2-chamber view (90-counterclock rotation from 4ch)\n new_P = my_rodriguez_rotation(self.plane_pts, self.axis_of_rot_normalized,\n math.radians(self.orig_view_angles[1])) # rodriguez rotation around midline\n two_ch_view_plane_normal = find_plane_eq(new_P[0, :], new_P[1, :], new_P[2, :])\n\n # get 3-chamber view (additional 30-60 counterclock rotation from 3ch)\n new_P = my_rodriguez_rotation(self.plane_pts, self.axis_of_rot_normalized, math.radians(self.orig_view_angles[2]))\n three_ch_view_plane_normal = find_plane_eq(new_P[0, :], new_P[1, :], new_P[2, :])\n\n if display_opt:\n _ = self.mesh_slicer(four_ch_view_plane_normal, 'mesh')\n _ = self.mesh_slicer(two_ch_view_plane_normal, 'mesh')\n _ = self.mesh_slicer(three_ch_view_plane_normal, 'mesh')\n\n self.original_planes = np.vstack((four_ch_view_plane_normal,\n two_ch_view_plane_normal,\n three_ch_view_plane_normal))", "def n_stereo_centers(mol: Mol) -> int:\n n = 0\n try:\n rdmolops.FindPotentialStereo(mol, cleanIt=False)\n n = rdMolDescriptors.CalcNumAtomStereoCenters(mol)\n except Exception:\n pass\n return n", "def convertStereo(u, v, disparity, info):\n stereoModel = image_geometry.StereoCameraModel()\n stereoModel.fromCameraInfo(info['l'], info['r'])\n (x,y,z) = stereoModel.projectPixelTo3d((u,v), disparity)\n\n cameraPoint = PointStamped()\n cameraPoint.header.frame_id = info['l'].header.frame_id\n cameraPoint.header.stamp = rospy.Time.now()\n cameraPoint.point = Point(x,y,z)\n return cameraPoint", "def mask(\n self, enc: SplitEncoding, random: bool = False, detach: bool = False\n ) -> tuple[SplitEncoding, SplitEncoding]:\n zs = enc.zs\n zy = enc.zy\n if detach:\n zs = zs.detach()\n zy = zy.detach()\n if random:\n zs_m = SplitEncoding(zs=torch.randn_like(zs), zy=zy)\n zy_m = SplitEncoding(zs=zs, zy=torch.randn_like(zy))\n else:\n zs_m = SplitEncoding(zs=torch.zeros_like(zs), zy=zy)\n zy_m = SplitEncoding(zs=zs, zy=torch.zeros_like(zy))\n return zs_m, zy_m", "def test_distributeReactor(self):\n original_reactor = self.action.r\n self.action._distributeReactor(self.cs)\n if context.MPI_RANK == 0:\n self.assertEqual(original_reactor, self.action.r)\n else:\n self.assertNotEqual(original_reactor, self.action.r)\n self.assertIsNone(self.action.r.core.lib)", "def rmsd_cluster(input, ref, output, clusters):\n ifs = oemolistream()\n if not ifs.open(input):\n OEThrow.Fatal(\"Unable to open %s for reading\" % input)\n poses = list()\n mol = OEMol()\n while OEReadMolecule(ifs, mol):\n mol_copy = OEMol(mol)\n #print(dir(mol_copy))\n #print(mol_copy.NumConfs())\n for conf in mol_copy.GetConfs():\n poses.append(conf)\n ifs.close()\n print(\"%d poses read\" % len(poses))\n\n # Create a list of centroids, starting with first molecule.\n centroids = list()\n\n # Make first pose our first centroid.\n centroids.append(poses.pop(0))\n if int(clusters) < len(poses):\n print(\"Will return %s poses...\" % clusters)\n else:\n print(\"Will return %s poses...\" % (len(poses)+1))\n while len(centroids) < int(clusters) and len(poses)>0:\n print(len(centroids))\n # Compute distance from all poses to closest centroid.\n min_rmsd = numpy.zeros([len(poses)])\n for (pose_index, pose) in enumerate(poses):\n centroids_rmsds = [OERMSD(pose, centroid) for centroid in centroids]\n min_rmsd[pose_index] = min(centroids_rmsds)\n # Find pose that is farthest away from all current centroids.\n farthest_pose_index = min_rmsd.argmax()\n print(\"Farthest pose is %d at %f A away from centroids\" % (farthest_pose_index, min_rmsd[farthest_pose_index]))\n # Move farthest pose to centroids.\n centroids.append(poses.pop(farthest_pose_index))\n # Write out all centroids.\n ofs=oemolostream()\n if not ofs.open(output):\n OEThrow.Fatal(\"Unable to open %s for writing\" % itf.GetString(\"-o\"))\n for mol in centroids:\n #OEWritePDBFile(ofs, mol)\n OEWriteMolecule(ofs, mol)\n\n print(\"Done!\")\n\n return 0", "def test_molecules_from_xyz(self):\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz6['dict'])\n\n # check that the atom order is the same\n self.assertTrue(s_mol.atoms[0].is_sulfur())\n self.assertTrue(b_mol.atoms[0].is_sulfur())\n self.assertTrue(s_mol.atoms[1].is_oxygen())\n self.assertTrue(b_mol.atoms[1].is_oxygen())\n self.assertTrue(s_mol.atoms[2].is_oxygen())\n self.assertTrue(b_mol.atoms[2].is_oxygen())\n self.assertTrue(s_mol.atoms[3].is_nitrogen())\n self.assertTrue(b_mol.atoms[3].is_nitrogen())\n self.assertTrue(s_mol.atoms[4].is_carbon())\n self.assertTrue(b_mol.atoms[4].is_carbon())\n self.assertTrue(s_mol.atoms[5].is_hydrogen())\n self.assertTrue(b_mol.atoms[5].is_hydrogen())\n self.assertTrue(s_mol.atoms[6].is_hydrogen())\n self.assertTrue(b_mol.atoms[6].is_hydrogen())\n self.assertTrue(s_mol.atoms[7].is_hydrogen())\n self.assertTrue(b_mol.atoms[7].is_hydrogen())\n self.assertTrue(s_mol.atoms[8].is_hydrogen())\n self.assertTrue(b_mol.atoms[8].is_hydrogen())\n self.assertTrue(s_mol.atoms[9].is_hydrogen())\n self.assertTrue(b_mol.atoms[9].is_hydrogen())\n\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz7['dict'])\n self.assertTrue(s_mol.atoms[0].is_oxygen())\n self.assertTrue(b_mol.atoms[0].is_oxygen())\n self.assertTrue(s_mol.atoms[2].is_carbon())\n self.assertTrue(b_mol.atoms[2].is_carbon())\n\n expected_bonded_adjlist = \"\"\"multiplicity 2\n1 O u0 p2 c0 {6,S} {10,S}\n2 O u0 p2 c0 {3,S} {28,S}\n3 C u0 p0 c0 {2,S} {8,S} {14,S} {15,S}\n4 C u0 p0 c0 {7,S} {16,S} {17,S} {18,S}\n5 C u0 p0 c0 {7,S} {19,S} {20,S} {21,S}\n6 C u0 p0 c0 {1,S} {22,S} {23,S} {24,S}\n7 C u1 p0 c0 {4,S} {5,S} {9,S}\n8 C u0 p0 c0 {3,S} {10,D} {11,S}\n9 C u0 p0 c0 {7,S} {11,D} {12,S}\n10 C u0 p0 c0 {1,S} {8,D} {13,S}\n11 C u0 p0 c0 {8,S} {9,D} {25,S}\n12 C u0 p0 c0 {9,S} {13,D} {26,S}\n13 C u0 p0 c0 {10,S} {12,D} {27,S}\n14 H u0 p0 c0 {3,S}\n15 H u0 p0 c0 {3,S}\n16 H u0 p0 c0 {4,S}\n17 H u0 p0 c0 {4,S}\n18 H u0 p0 c0 {4,S}\n19 H u0 p0 c0 {5,S}\n20 H u0 p0 c0 {5,S}\n21 H u0 p0 c0 {5,S}\n22 H u0 p0 c0 {6,S}\n23 H u0 p0 c0 {6,S}\n24 H u0 p0 c0 {6,S}\n25 H u0 p0 c0 {11,S}\n26 H u0 p0 c0 {12,S}\n27 H u0 p0 c0 {13,S}\n28 H u0 p0 c0 {2,S}\n\"\"\"\n expected_mol = Molecule().from_adjacency_list(expected_bonded_adjlist)\n self.assertEqual(b_mol.to_adjacency_list(), expected_bonded_adjlist)\n # the is_isomorphic test must come after the adjlist test since it changes the atom order\n self.assertTrue(b_mol.is_isomorphic(expected_mol))\n\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz10['dict'], multiplicity=1, charge=0)\n self.assertIsNotNone(s_mol)\n self.assertIsNotNone(b_mol)\n for atom1, atom2 in zip(s_mol.atoms, b_mol.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n for atom1, symbol in zip(s_mol.atoms, self.xyz10['dict']['symbols']):\n self.assertEqual(atom1.symbol, symbol)\n\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz_dict_13, multiplicity=1, charge=0)\n for atom1, atom2 in zip(s_mol.atoms, b_mol.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n for atom1, symbol in zip(s_mol.atoms, self.xyz_dict_13['symbols']):\n self.assertEqual(atom1.symbol, symbol)\n self.assertEqual(s_mol.multiplicity, 1)\n self.assertEqual(b_mol.multiplicity, 1)\n self.assertFalse(any(atom.radical_electrons for atom in b_mol.atoms))", "def test_process_stereo_file(self):\n test_path = pathlib.Path(__file__).parent.absolute() / 'data/stereo.wav'\n self.default_kwargs['input_file'] = test_path\n self.encoder = FileEncoder(**self.default_kwargs)\n self.encoder.process()", "def sidechain(self):\n\n return self.atoms - self.backbone()", "def setAtomType4Gromacs(self):\n atNames = [at.atomTypeName for at in self.atomTypes]\n #print atNames\n delAtomTypes = []\n modAtomTypes = []\n atomTypesGromacs = []\n dictAtomTypes = {}\n for at in self.atomTypes:\n atName = at.atomTypeName\n dictAtomTypes[atName] = at\n if atName.islower() and atName.upper() in atNames:\n #print atName, atName.upper()\n atUpper = self.atomTypes[atNames.index(atName.upper())]\n #print at.atomTypeName,at.mass, at.ACOEF, at.BCOEF\n #print atUpper.atomTypeName, atUpper.mass, atUpper.ACOEF, atUpper.BCOEF\n if at.ACOEF is atUpper.ACOEF and at.BCOEF is at.BCOEF:\n delAtomTypes.append(atName)\n else:\n newAtName = atName+'_'\n modAtomTypes.append(atName)\n atomType = AtomType(newAtName, at.mass, at.ACOEF, at.BCOEF)\n atomTypesGromacs.append(atomType)\n dictAtomTypes[newAtName] = atomType\n else:\n atomTypesGromacs.append(at)\n\n atomsGromacs = []\n for a in self.atoms:\n atName = a.atomType.atomTypeName\n if atName in delAtomTypes:\n atom = Atom(a.atomName, dictAtomTypes[atName.upper()], a.id, \\\n a.resid, a.mass, a.charge, a.coords)\n atomsGromacs.append(atom)\n elif atName in modAtomTypes:\n atom = Atom(a.atomName, dictAtomTypes[atName + '_'], a.id, \\\n a.resid, a.mass, a.charge, a.coords)\n atomsGromacs.append(atom)\n else:\n atomsGromacs.append(a)\n\n self.atomTypesGromacs = atomTypesGromacs\n self.atomsGromacs = atomsGromacs\n #print [i.atomTypeName for i in atomTypesGromacs]\n #print modAtomTypes\n #print delAtomTypes", "def react(molecules):\n alphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n return random.choice(alphabet)", "def test_random_permute_inverse_changes_group(self):\n # reproducible arbitrariness\n np.random.seed(232)\n\n nchan = 3\n nsteps = 20\n rho = 1.0/4\n target = np.random.randn(nchan, nsteps)\n\n controller = LinearController(self.G, target, tau=None)\n\n controller.set_random_permute_inverse(rho)\n self.assertIsNotNone(controller.permute_inverse)\n\n n_per_group = self.N/nchan\n groups0 = np.arange(self.N)/n_per_group\n groups1 = controller.permute_inverse/n_per_group\n\n # check that the right fraction of assignments are kept intact\n self.assertEqual(np.sum(groups0 != groups1), rho*self.N)", "def receptor_activity_brute_force(self, ret_correlations=False):\n S_ni = self.sens_mat\n Z = 0\n r_n = np.zeros(self.Nr)\n if ret_correlations:\n r_nm = np.zeros((self.Nr, self.Nr))\n \n # iterate over all mixtures\n for c, prob_c in self._iterate_mixtures():\n # get the activity vector associated with m\n a_n = (np.dot(S_ni, c) >= 1)\n Z += prob_c\n\n r_n[a_n] += prob_c\n if ret_correlations:\n r_nm[np.outer(a_n, a_n)] += prob_c\n \n # return the normalized output\n r_n /= Z\n if ret_correlations:\n r_nm /= Z\n return r_n, r_nm\n else:\n return r_n", "def test_process_stereo(self):\n self.encoder = StreamEncoder(**self.default_kwargs)\n test_samples = np.random.rand(DEFAULT_BLOCKSIZE, 2).astype('int16')\n self.encoder.process(test_samples)\n self.encoder.finish()\n self.assertTrue(self.write_callback_called)", "def decompose_level_lex(self, ranks):\n for index in self.box_space.points:\n index_1d = self.box_space.index_to_1d(index)\n self.rank_of_box[index] = (ranks * index_1d) / self.box_space.volume", "def neutralise(self):\n smi = self.smiles\n\n patts = [\n # Imidazoles\n ('[n+;H]','n'),\n # Amines\n ('[N+;!H0]','N'),\n # Carboxylic acids and alcohols\n ('[$([O-]);!$([O-][#7])]','O'),\n # Thiols\n ('[S-;X1]','S'),\n # Sulfonamides\n ('[$([N-;X2]S(=O)=O)]','N'),\n # Enamines\n ('[$([N-;X2][C,N]=C)]','N'),\n # Tetrazoles\n ('[n-]','[nH]'),\n # Sulfoxides\n ('[$([S-]=O)]','S'),\n # Amides\n ('[$([N-]C=O)]','N') ]\n\n reactions = [(Chem.MolFromSmarts(x),Chem.MolFromSmiles(y,False)) for x,y in patts]\n\n m = Chem.MolFromSmiles(smi)\n for i,(reactant, product) in enumerate(reactions):\n while m.HasSubstructMatch(reactant):\n rms = AllChem.ReplaceSubstructs(m, reactant, product)\n m = rms[0]\n\n # it doesn't matter is u choose to output a canonical smiles as the\n # sequence of atoms is changed calling `AllChem.ReplaceSubstructs\n self.smiles = Chem.MolToSmiles(m, isomericSmiles=False) #, canonical=False)", "def molecule(self):\n return self._molecule", "def update(self):\n #self.model.states[Polymerase].molecules\n\n DNA_obj = self.model.states[DNA].get_molecules(\"DNA\")[0]\n\n for i in range(1): #500\n DNA_obj.bind_polymerase()\n \n for i in range(50): #50\n DNA_obj.move_polymerase()\n #print(DNA_obj.poly_transcript)\n \n\n\n\n #print(self.test.poly_status)\n #print(DNA_obj.poly_pos)" ]
[ "0.75458586", "0.7168434", "0.7091707", "0.6843433", "0.6454386", "0.60892975", "0.5650636", "0.55933136", "0.55537635", "0.5514306", "0.54825073", "0.5466172", "0.54264325", "0.5393186", "0.5328103", "0.5303823", "0.5297103", "0.52903897", "0.5283487", "0.5206587", "0.5044619", "0.5036317", "0.50087094", "0.4991789", "0.4991789", "0.49752337", "0.4939817", "0.4934892", "0.49026865", "0.49001265", "0.4890496", "0.48490685", "0.48152995", "0.48152995", "0.47983536", "0.47927165", "0.4787903", "0.47868764", "0.4776874", "0.47351912", "0.47307733", "0.47199816", "0.47087768", "0.46925113", "0.4690585", "0.46891207", "0.46829128", "0.4668097", "0.466239", "0.46393454", "0.46187806", "0.46170828", "0.46054557", "0.4603458", "0.45698017", "0.45673123", "0.45613527", "0.45587453", "0.45472977", "0.45351893", "0.45286945", "0.4519835", "0.45103234", "0.4505077", "0.4501114", "0.44854462", "0.44853416", "0.447494", "0.44731227", "0.4469117", "0.44659972", "0.44627002", "0.44600978", "0.4455483", "0.44528103", "0.44478276", "0.44476795", "0.4440844", "0.4435319", "0.44309828", "0.4423531", "0.4419775", "0.44164145", "0.4406364", "0.44027588", "0.43986303", "0.4395876", "0.43925598", "0.43864202", "0.43754005", "0.43721136", "0.43689993", "0.43678844", "0.43668684", "0.43590438", "0.4353747", "0.4352342", "0.43518728", "0.43470097", "0.4345909" ]
0.5968604
6
StereoGroup atoms are in the reaction, but the reaction doesn't specify the chirality at the stereo centers > preserve stereo group
def test_reaction_ignores_stereo(self): reaction = '[C:1]>>[C:1]' reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br'] for reactant in reactants: products = _reactAndSummarize(reaction, reactant) self.assertEqual(products, reactant)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reaction_defines_stereo(self):\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'FC(Cl)Br')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n\n # Remove group with defined stereo\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |&1:3|')\n\n # Remove atoms with defined stereo from group\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')", "def test_reaction_destroys_stereo(self):\n reaction = '[C@:1]>>[C:1]'\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'FC(Cl)Br')\n self.assertEqual(products, 'FC(Cl)Br')\n\n reaction = '[C@:1]F>>[C:1]F'\n # Reaction destroys stereo (but preserves unaffected group\n products = _reactAndSummarize(reaction,\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')\n # Reaction destroys stereo (but preserves the rest of the group\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)[C@@H](Cl)Br |&1:1,3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')", "def test_stereogroup_is_spectator_to_reaction(self):\n # 5a. Reaction preserves unrelated stereo\n products = _reactAndSummarize('[C@:1]F>>[C@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5b. Reaction ignores unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5c. Reaction inverts unrelated stereo'\n products = _reactAndSummarize('[C@:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5d. Reaction destroys unrelated stereo' 1:3|\n products = _reactAndSummarize('[C@:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |o1:3|')\n # 5e. Reaction assigns unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')", "def test_reaction_copies_stereogroup(self):\n # Stereogroup atoms are in the reaction with multiple copies in the product\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )\n\n # Stereogroup atoms are not in the reaction, but have multiple copies in the\n # product.\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )", "def test_reaction_splits_stereogroup(self):\n products = _reactAndSummarize('[C:1]OO[C:2]>>[C:2]O.O[C:1]',\n 'F[C@H](Cl)OO[C@@H](Cl)Br |o1:1,5|')\n # Two product sets, each with two mols:\n self.assertEqual(products.count('|o1:1|'), 4)", "def test_reaction_preserves_stereo(self):\n reaction = '[C@:1]>>[C@:1]'\n reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br']\n for reactant in reactants:\n products = _reactAndSummarize(reaction, reactant)\n self.assertEqual(products, reactant)", "def test_parameterize_mol_missing_stereo_rdkit(self, force_field):\n toolkit_registry = ToolkitRegistry(\n toolkit_precedence=[RDKitToolkitWrapper, AmberToolsToolkitWrapper]\n )\n\n molecule = Molecule.from_smiles(\"CC1CCC(=O)O1\", allow_undefined_stereo=True)\n topology = Topology.from_molecules([molecule])\n\n force_field.create_openmm_system(\n topology,\n toolkit_registry=toolkit_registry,\n )", "def GetStereoisomerCount(m, options=...): # -> Any:\n ...", "def EnumerateStereoisomers(m, options=..., verbose=...): # -> Generator[Unknown, None, None]:\n ...", "def testStereo(self):\r\n smi_and_cansmi = [\r\n ('OC(=O)[C@@H](CCC(N)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](CCC(N)=O)N', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](N)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@@H](N)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](N)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](N)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](C(O)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](C(O)=O)N', 'NC(=O)CC[C@H](N)C(=O)O')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)", "def atom_stereo_keys(sgr):\n atm_ste_keys = dict_.keys_by_value(atom_stereo_parities(sgr),\n lambda x: x in [True, False])\n return atm_ste_keys", "def reflect_local_stereo(gra):\n atm_par_dct = atom_stereo_parities(gra)\n atm_par_dct = dict_.transform_values(\n atm_par_dct, lambda x: x if x is None else not x)\n gra = set_atom_stereo_parities(gra, atm_par_dct)\n return gra", "def test_parameterize_mol_missing_stereo_openeye(self, force_field):\n toolkit_registry = ToolkitRegistry(toolkit_precedence=[OpenEyeToolkitWrapper])\n\n molecule = Molecule.from_smiles(\"CC1CCC(=O)O1\", allow_undefined_stereo=True)\n topology = Topology.from_molecules([molecule])\n\n force_field.create_openmm_system(\n topology,\n toolkit_registry=toolkit_registry,\n )", "def has_stereo(gra):\n return bool(atom_stereo_keys(gra) or bond_stereo_keys(gra))", "def test_enumerating_stereo_options(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n\n # test undefined only\n mol = Molecule.from_smiles(\n \"ClC=CCl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert len(isomers) == 2\n for isomer in isomers:\n assert isomer.n_conformers == 0\n\n mol = Molecule.from_smiles(\n r\"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert isomers == []\n\n mol = Molecule.from_smiles(\n r\"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=False, rationalise=False\n )\n\n assert len(isomers) == 1\n\n # test max isomers\n mol = Molecule.from_smiles(\n \"BrC=C[C@H]1OC(C2)(F)C2(Cl)C1\",\n toolkit_registry=toolkit,\n allow_undefined_stereo=True,\n )\n isomers = mol.enumerate_stereoisomers(\n max_isomers=5,\n undefined_only=True,\n toolkit_registry=toolkit,\n rationalise=True,\n )\n\n assert len(isomers) <= 5\n for isomer in isomers:\n assert isomer.n_conformers == 1\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def test_enumerating_stereo_options(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n\n # test undefined only\n mol = Molecule.from_smiles(\n \"ClC=CCl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert len(isomers) == 2\n for isomer in isomers:\n assert isomer.n_conformers == 0\n\n mol = Molecule.from_smiles(\n \"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert isomers == []\n\n mol = Molecule.from_smiles(\n \"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=False, rationalise=False\n )\n\n assert len(isomers) == 1\n\n # test max isomers\n mol = Molecule.from_smiles(\n \"BrC=C[C@H]1OC(C2)(F)C2(Cl)C1\",\n toolkit_registry=toolkit,\n allow_undefined_stereo=True,\n )\n isomers = mol.enumerate_stereoisomers(\n max_isomers=5,\n undefined_only=True,\n toolkit_registry=toolkit,\n rationalise=True,\n )\n\n assert len(isomers) <= 5\n for isomer in isomers:\n assert isomer.n_conformers == 1\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def test_reaction_inverts_stereo(self):\n reaction = '[C@:1]>>[C@@:1]'\n\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br |o1:1|')\n products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@H](Cl)Br |&1:1|')\n products = _reactAndSummarize(reaction, 'FC(Cl)Br')\n self.assertEqual(products, 'FC(Cl)Br')", "def stereoWarpK_noMotion_singleSided(curImageInfo, conversionParam, globalParam): \n h, w, u = curImageInfo.originalImageResized.shape # shape after resize\n K = 1\n N = h * w * K\n gr = np.mean(curImageInfo.originalImageResized, 2) # not 3 as it is zero based :3\n grs = cv2.GaussianBlur(gr, (5, 5), 1)\n \n # One heuristic for converting depth to disparity\n disparity0 = imnormalize(1/(1+imnormalize(curImageInfo.depthResized)))*conversionParam.maxDisp - conversionParam.maxDisp/2;\n \n if conversionParam.spatialSmoothnessSwitch == True:\n # Smoothing the depth spatially according to adjacent pixels by using Gx, Gy gradients\n # Vertical and Horizontal Edges\n dx = cv2.filter2D(grs, -1, np.transpose(np.array([[-1, 1, 0]])))\n dy = cv2.filter2D(grs, -1, np.array([[-1, 1, 0]]))\n \n W = ( imnormalize(disparity0) + sigmoid(np.sqrt(np.power(dx, 2) + np.power(dy, 2)), 0.01, 500) ) / 2 \n \n A = np.transpose(spdiags(np.transpose(W).flatten(), 0, N, N, \"csc\") \\\n + (conversionParam.spatialSmoothCoeff_x * globalParam.Gx.transpose() * globalParam.Gx) \\\n + (conversionParam.spatialSmoothCoeff_y * globalParam.Gy.transpose() * globalParam.Gy))\n \n b = np.transpose(W).flatten() * np.transpose(disparity0).flatten()\n \n [x, flag] = cg(A, b, np.transpose(disparity0).flatten(), 5e-1, 50)\n \n disparity = np.transpose(np.reshape(x, (w, h))) # remove (h, w, 1, K)\n else:\n disparity = disparity0\n \n curImageInfo.leftImage = curImageInfo.originalImage\n \n # The -ve sign to convert the white to black and black to white \n warpright = -disparity\n \n # only the warping interp2 is done on the original size image with no resizing to have good estimation\n warpright = cv2.resize(warpright, (curImageInfo.originalImage.shape[1], curImageInfo.originalImage.shape[0]), \n interpolation=cv2.INTER_LINEAR)\n \n curImageInfo.rightImage = (clip(warpImage_v2((curImageInfo.originalImage), (warpright), \n conversionParam.resizeFactor, globalParam.xx, globalParam.yy, globalParam.YY)))\n \n return disparity", "def testMoreStereo(self):\r\n smi_and_cansmi = [\r\n ('Cl[C@](C)(I)Br', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](C)(I)Br', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](C)(Br)Cl', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](C)(Br)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](Cl)(Br)C', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](Cl)(Br)C', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](Br)(I)C', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](Br)(I)C', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](C)(Cl)I', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](C)(Cl)I', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](C)(Cl)Br', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](C)(Cl)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](Cl)(I)Br', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](Cl)(I)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](Br)(Cl)I', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](Br)(Cl)I', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](Br)(Cl)C', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](Br)(Cl)C', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](C)(Br)I', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](C)(Br)I', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@](I)(Br)C', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](I)(Br)C', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](I)(Br)Cl', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](I)(Br)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](Cl)(C)Br', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](Cl)(C)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](Cl)(Br)I', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](Cl)(Br)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](C)(I)Cl', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](C)(I)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](Br)(C)Cl', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](Br)(C)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](Br)(I)Cl', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](Br)(I)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](I)(Cl)Br', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](I)(Cl)Br', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](I)(C)Br', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](I)(C)Br', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](I)(C)Cl', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](I)(C)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@](Br)(C)I', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](Br)(C)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](Cl)(C)I', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](Cl)(C)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](I)(Cl)C', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](I)(Cl)C', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](Cl)(I)C', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](Cl)(I)C', 'C[C@@](Cl)(Br)I')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)", "def stereo_score(alignment):\n #dictionary with properties for each residue\n dic_prop = {'I': [1, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n 'L': [1, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n 'V': [1, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n 'C': [1, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n 'A': [1, 0, 1, 0, 1, 0, 0, 0, 0, 0],\n 'G': [1, 0, 1, 0, 1, 0, 0, 0, 0, 0],\n 'M': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'F': [1, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n 'Y': [1, 1, 0, 0, 0, 0, 1, 0, 0, 0],\n 'W': [1, 1, 0, 0, 0, 0, 1, 0, 0, 0],\n 'H': [1, 1, 0, 0, 0, 0, 1, 1, 0, 1],\n 'K': [1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n 'R': [0, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n 'E': [0, 1, 0, 0, 0, 0, 0, 0, 1, 1],\n 'Q': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'D': [0, 1, 1, 0, 0, 0, 0, 0, 1, 1],\n 'N': [0, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n 'S': [0, 1, 1, 0, 1, 0, 0, 0, 0, 0],\n 'T': [1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n 'P': [0, 0, 1, 1, 0, 0, 0, 0, 0, 0],\n 'B': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'Z': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'X': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n '-': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}\n score_list = []\n for i in range(0, alignment.get_alignment_length()):\n #extract the unique residues in the alignment\n column = ''.join(set(alignment[:, i]))\n stereo_list = []\n #loop through each residue\n for res in range(0, len(column)):\n #replace the residue with list of properties\n residue = column[res]\n #append the properties list to a\n stereo_prop = dic_prop.get(residue)\n stereo_list.append(stereo_prop)\n #number of common properties\n count_stereo = sum(len(set(i)) == 1 for i in zip(*stereo_list))\n #add the number of properties to a list\n score_list.append(count_stereo)\n score_list_final = [float(i*0.1) for i in score_list]\n return score_list_final", "def bond_stereo_keys(sgr):\n bnd_ste_keys = dict_.keys_by_value(bond_stereo_parities(sgr),\n lambda x: x in [True, False])\n return bnd_ste_keys", "def test_enumerating_stereobonds(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = Molecule.from_smiles(\n \"ClC=CCl\", allow_undefined_stereo=True, toolkit_registry=toolkit\n )\n\n # use the default options\n isomers = mol.enumerate_stereoisomers()\n assert len(isomers) == 2\n\n assert mol not in isomers\n # make sure the input molecule is only different by bond stereo\n for ismol in isomers:\n assert (\n Molecule.are_isomorphic(\n mol,\n ismol,\n return_atom_map=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n assert mol.is_isomorphic_with(ismol) is False\n\n # make sure the isomers are different\n assert isomers[0].is_isomorphic_with(isomers[1]) is False\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def test_enumerating_stereobonds(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = Molecule.from_smiles(\n \"ClC=CCl\", allow_undefined_stereo=True, toolkit_registry=toolkit\n )\n\n # use the default options\n isomers = mol.enumerate_stereoisomers()\n assert len(isomers) == 2\n\n assert mol not in isomers\n # make sure the input molecule is only different by bond stereo\n for ismol in isomers:\n assert (\n Molecule.are_isomorphic(\n mol,\n ismol,\n return_atom_map=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n assert mol.is_isomorphic_with(ismol) is False\n\n # make sure the isomers are different\n assert isomers[0].is_isomorphic_with(isomers[1]) is False\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def process_stereo(self, image1, image2, disp1, disp2):\n return _elas.Elas_process_stereo(self, image1, image2, disp1, disp2)", "def atom_parity_evaluator_to_local_stereo_(gra):\n return atom_parity_evaluator_from_local_stereo_(gra)", "def proc_sw_only_morphs(forward_pairs, morphs, backward_pairs):\n sandwich_pairs = []\n if not backward_pairs:\n forward_pairs[-1].morphs.extend(morphs)\n elif len(morphs) == 1:\n morph = morphs[0]\n morph_str = str(morph)\n if morph_str in ['이/VCP', '하/VX'] and backward_pairs[0].morphs[0].tag.startswith('E'):\n # '이' 긍정지정사나 '하' 보조용언 뒤에 어미가 나올 경우\n backward_pairs[0].morphs.insert(0, morphs[0])\n del morphs[:]\n elif morph_str == '에/JKB' and backward_pairs[0].morphs[0].tag == 'JX':\n # '에' 부사격조사 뒤에 보조사가 나올 경우\n backward_pairs[0].morphs.insert(0, morphs[0])\n del morphs[:]\n elif morph_str == 'ᆯ/ETM' and forward_pairs[-1].morphs[-1].tag.startswith('V'):\n # 'ㄹ' 관형형전성어미 앞에 용언이 나올 경우\n forward_pairs[-1].morphs.append(morphs[0])\n del morphs[:]\n elif morph.tag in ['EC', 'EF'] and forward_pairs[-1].morphs[-1].tag.startswith('V'):\n # 연결어미나 종결어미 앞에 용언이 나올 경우\n forward_pairs[-1].morphs.append(morphs[0])\n del morphs[:]\n elif morph.tag.startswith('XS'):\n # append suffixes to the end of forward pair list\n forward_pairs[-1].morphs.append(morphs[0])\n del morphs[:]\n else:\n raise AlignError()\n else:\n morphs_str = ' + '.join([str(morph) for morph in morphs])\n if morphs_str == '(/SS + 대북/NNG + (/SS + 대북/NNG + )/SS + )/SS' and forward_pairs[-1].word_str == u'대북':\n del morphs[:]\n elif morphs_str == '(/SS + 동경/NNP + )/SS' and forward_pairs[-1].word_str == u'도쿄':\n del morphs[:]\n else:\n raise AlignError()\n return sandwich_pairs", "def test_enumerating_stereo_partially_defined(\n self, toolkit_class, smiles, undefined_only, expected\n ):\n\n if not toolkit_class.is_available():\n pytest.skip(\"Required toolkit is unavailable\")\n\n toolkit = toolkit_class()\n\n # test undefined only\n mol = Molecule.from_smiles(\n smiles, toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n stereoisomers = mol.enumerate_stereoisomers(\n undefined_only=undefined_only, rationalise=False\n )\n\n # Ensure that the results of the enumeration are what the test expects.\n # This roundtrips the expected output from SMILES --> OFFMol --> SMILES,\n # since the SMILES for stereoisomers generated in this test may change depending\n # on which cheminformatics toolkit is used.\n expected = {\n Molecule.from_smiles(stereoisomer, allow_undefined_stereo=True).to_smiles(\n explicit_hydrogens=True, isomeric=True, mapped=False\n )\n for stereoisomer in expected\n }\n actual = {\n stereoisomer.to_smiles(explicit_hydrogens=True, isomeric=True, mapped=False)\n for stereoisomer in stereoisomers\n }\n\n assert expected == actual", "def test_enumerating_stereocenters(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = Molecule.from_smiles(\n \"NC(Cl)(F)O\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n\n isomers = mol.enumerate_stereoisomers(toolkit_registry=toolkit)\n\n assert len(isomers) == 2\n # make sure the mol is not in the isomers and that they only differ by stereo chem\n assert mol not in isomers\n for ismol in isomers:\n assert ismol.n_conformers != 0\n assert (\n Molecule.are_isomorphic(\n mol,\n ismol,\n return_atom_map=False,\n atom_stereochemistry_matching=False,\n )[0]\n is True\n )\n assert mol.is_isomorphic_with(ismol) is False\n\n # make sure the two isomers are different\n assert isomers[0].is_isomorphic_with(isomers[1]) is False\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def test_enumerating_stereocenters(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = Molecule.from_smiles(\n \"NC(Cl)(F)O\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n\n isomers = mol.enumerate_stereoisomers(toolkit_registry=toolkit)\n\n assert len(isomers) == 2\n # make sure the mol is not in the isomers and that they only differ by stereo chem\n assert mol not in isomers\n for ismol in isomers:\n assert ismol.n_conformers != 0\n assert (\n Molecule.are_isomorphic(\n mol,\n ismol,\n return_atom_map=False,\n atom_stereochemistry_matching=False,\n )[0]\n is True\n )\n assert mol.is_isomorphic_with(ismol) is False\n\n # make sure the two isomers are different\n assert isomers[0].is_isomorphic_with(isomers[1]) is False\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def bond_parity_evaluator_to_local_stereo_(gra):\n return bond_parity_evaluator_from_local_stereo_(gra)", "def test_check_for_existing_reaction_removes_duplicates_in_opposite_directions(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n s1 = Species().from_smiles(\"[H]\")\n s2 = Species().from_smiles(\"CC\")\n s3 = Species().from_smiles(\"[H][H]\")\n s4 = Species().from_smiles(\"C[CH2]\")\n s1.label = 'H'\n s2.label = 'CC'\n s3.label = 'HH'\n s4.label = 'C[CH2]'\n\n rxn_f = TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction')\n )\n\n rxn_r = TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction')\n )\n\n rxn_f.reactants.sort()\n rxn_f.products.sort()\n\n cerm.add_reaction_to_core(rxn_f)\n cerm.register_reaction(rxn_f)\n\n reactions = cerm.search_retrieve_reactions(rxn_r)\n self.assertEqual(1, len(reactions), 'cerm.search_retrieve_reactions could not identify reverse reaction')\n\n found, rxn = cerm.check_for_existing_reaction(rxn_r)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to identify existing reaction in the reverse direction')\n self.assertEqual(rxn, rxn_f)", "def mutate_residue(pose, mutant_position, mutant_aa,\n pack_radius = 0.0, pack_scorefxn = '' ):\n #### a MutateResidue Mover exists similar to this except it does not pack\n #### the area around the mutant residue (no pack_radius feature)\n #mutator = MutateResidue(mutant_position, mutant_aa)\n #mutator.apply(test_pose)\n\n if pose.is_fullatom() == False:\n IOError( 'mutate_residue only works with fullatom poses' )\n\n\n # create a standard scorefxn by default\n if not pack_scorefxn:\n pack_scorefxn = rosetta.core.scoring.get_score_function()\n\n task = pyrosetta.standard_packer_task(pose)\n\n # the Vector1 of booleans (a specific object) is needed for specifying the\n # mutation, this demonstrates another more direct method of setting\n # PackerTask options for design\n aa_bool = rosetta.utility.vector1_bool()\n # PyRosetta uses several ways of tracking amino acids (ResidueTypes)\n # the numbers 1-20 correspond individually to the 20 proteogenic amino acids\n # aa_from_oneletter returns the integer representation of an amino acid\n # from its one letter code\n # convert mutant_aa to its integer representation\n mutant_aa = rosetta.core.chemical.aa_from_oneletter_code(mutant_aa)\n\n # mutation is performed by using a PackerTask with only the mutant\n # amino acid available during design\n # to do this, construct a Vector1 of booleans indicating which amino acid\n # (by its numerical designation, see above) to allow\n for i in range(1, 21):\n # in Python, logical expression are evaluated with priority, thus the\n # line below appends to aa_bool the truth (True or False) of the\n # statement i == mutant_aa\n aa_bool.append( i == int(mutant_aa) )\n\n # modify the mutating residue's assignment in the PackerTask using the\n # Vector1 of booleans across the proteogenic amino acids\n task.nonconst_residue_task(mutant_position\n ).restrict_absent_canonical_aas(aa_bool)\n\n # prevent residues from packing by setting the per-residue \"options\" of\n # the PackerTask\n restrict_non_nbrs_from_repacking(pose, mutant_position, task, pack_radius)\n\n # apply the mutation and pack nearby residues\n #print task\n packer = rosetta.protocols.simple_moves.PackRotamersMover(pack_scorefxn, task)\n packer.apply(pose)", "def DeMorgan_equivalence(self, position_list=[]):\n\t\treturn self.__class__(_replace_match_at(self, position_list, [\n\t\t\t[ ((neg, A), disj, (neg, B)), (neg, (A, conj, B)) ],\n\t\t\t[ (neg, (A, conj, B)), ((neg, A), disj, (neg, B)) ],\n\t\t\t[ ((neg, A), conj, (neg, B)), (neg, (A, disj, B)) ],\n\t\t\t[ (neg, (A, disj, B)), ((neg, A), conj, (neg, B)) ]\n\t\t]))", "def makeResidueAtomSets(residue, aromaticsEquivalent=True):\n \n getResidueMapping(residue)\n \n equivalent = {}\n elementSymbolDict = {}\n nonequivalent = {}\n multiSet = {}\n chemAtomSetDict = {}\n inMultiSet = {}\n molType = residue.molResidue.molType\n \n for atom in residue.atoms: \n chemAtom = atom.chemAtom\n chemAtomSetDict[atom] = chemAtom\n elementSymbol = chemAtom.elementSymbol\n chemAtomSet = chemAtom.chemAtomSet\n\n if chemAtomSet is None:\n name = chemAtom.name\n makeAtomSet(name,(atom,),None,'simple')\n \n else:\n name = chemAtomSet.name\n elementSymbolDict[name] = elementSymbol\n chemAtomSetDict[name] = chemAtomSet\n if chemAtomSet.isEquivalent:\n if equivalent.get(name) is None:\n equivalent[name] = []\n equivalent[name].append(atom)\n \n elif (chemAtomSet.isEquivalent is None) and atom.atomSet and (len(atom.atomSet.atoms) > 1):\n # aromatic rotation prev set\n if equivalent.get(name) is None:\n equivalent[name] = []\n equivalent[name].append(atom)\n \n elif (chemAtomSet.isEquivalent is None) and (not atom.atomSet) and aromaticsEquivalent:\n # aromatic rotation to be set\n if equivalent.get(name) is None:\n equivalent[name] = []\n equivalent[name].append(atom)\n \n else:\n if nonequivalent.get(name) is None:\n nonequivalent[name] = []\n nonequivalent[name].append(atom)\n \n if chemAtomSet.chemAtomSet is not None:\n multiName = chemAtomSet.chemAtomSet.name\n chemAtomSetDict[multiName] = chemAtomSet.chemAtomSet\n elementSymbolDict[multiName] = elementSymbol\n if multiSet.get(multiName) is None:\n multiSet[multiName] = {}\n multiSet[multiName][name] = 1\n inMultiSet[name] = multiName\n\n for groupName in equivalent.keys():\n atoms = equivalent[groupName]\n elementSymbol = elementSymbolDict[groupName]\n chemAtomSet = chemAtomSetDict[groupName]\n if len(atoms)==2:\n # not enough atoms for multi sets!\n makeAtomSet(groupName,atoms,chemAtomSet,'simple')\n else:\n if inMultiSet.get(groupName):\n # e.g. for Val Hg1*\n makeAtomSet(groupName,atoms,chemAtomSet,'stereo')\n \n else:\n makeAtomSet(groupName,atoms,chemAtomSet,'simple')\n\n for groupName in nonequivalent.keys():\n atoms = nonequivalent[groupName]\n elementSymbol = elementSymbolDict[groupName]\n chemAtomSet = chemAtomSetDict[groupName]\n atomSetNames = []\n \n if len(atoms) == 1:\n atom = atoms[0]\n # not enough atoms for prochiral. Corrupt ChemComp\n makeAtomSet(atom.name, atoms, None, 'simple')\n continue\n \n for atom in atoms:\n name = chemAtomSetDict[atom].name\n makeAtomSet(name,(atom,),chemAtomSet,'stereo')\n atomSetNames.append(name)\n\n for n, atom in enumerate(atoms):\n \n #name = chemAtomSetDict[atom].name\n #name2 = makeNonStereoName(molType, name, n)\n # Shouldn't have to do this if non-equiv groups have paired names\n \n name2 = makeNonStereoName(molType, '%s%d' % (chemAtomSet.name[:-1], n), n)\n \n makeGuiMultiAtomSet(residue, name2, atomSetNames,\n elementSymbol,'nonstereo',chemAtomSet)\n\n makeGuiMultiAtomSet(residue, groupName, atomSetNames,\n elementSymbol,'ambiguous',chemAtomSet)\n\n for groupName in multiSet.keys():\n atomSetNames = multiSet[groupName].keys()\n elementSymbol = elementSymbolDict[groupName]\n chemAtomSet = chemAtomSetDict[groupName]\n if \"|\" in groupName:\n # we don't do these pseudoatoms in Analysis\n continue\n\n # e.g. for Val Hga*\n for n, atomSetName in enumerate(atomSetNames):\n name2 = makeNonStereoName(molType, atomSetName, n)\n makeGuiMultiAtomSet(residue, name2, atomSetNames,\n elementSymbol,'nonstereo',chemAtomSet)\n \n makeGuiMultiAtomSet(residue, groupName, atomSetNames,\n elementSymbol,'ambiguous',chemAtomSet)", "def _propose_atoms_in_order(self, atom_group):\n atom_torsions= []\n logp = []\n assert len(atom_group) == len(set(atom_group)), \"There are duplicate atom indices in the list of atom proposal indices\"\n while len(atom_group) > 0:\n #initialise an eligible_torsions_list\n eligible_torsions_list = list()\n\n for atom_index in atom_group:\n\n # Find the shortest path up to length four from the atom in question:\n shortest_paths = nx.algorithms.single_source_shortest_path(self._residue_graph, atom_index, cutoff=4)\n\n # Loop through the destination and path of each path and append to eligible_torsions_list\n # if destination has a position and path[1:3] is a subset of atoms with positions\n for destination, path in shortest_paths.items():\n\n # Check if the path is length 4 (a torsion) and that the destination has a position. Continue if not.\n if len(path) != 4 or destination not in self._atoms_with_positions_set:\n continue\n\n # If the last atom is in atoms with positions, check to see if the others are also.\n # If they are, append the torsion to the list of possible torsions to propose\n if set(path[1:3]).issubset(self._atoms_with_positions_set):\n eligible_torsions_list.append(path)\n\n assert len(eligible_torsions_list) != 0, \"There is a connectivity issue; there are no torsions from which to choose\"\n #now we have to randomly choose a single torsion\n ntorsions = len(eligible_torsions_list)\n random_torsion_index = np.random.choice(range(ntorsions))\n random_torsion = eligible_torsions_list[random_torsion_index]\n\n #append random torsion to the atom_torsions and remove source atom from the atom_group\n chosen_atom_index = random_torsion[0]\n first_old_atom_index = random_torsion[1]\n atom_torsions.append(random_torsion)\n atom_group.remove(chosen_atom_index)\n\n #add atom to atoms with positions and corresponding set\n self._atoms_with_positions_set.add(chosen_atom_index)\n\n #add a bond from the new to the previous torsion atom in the _reference_connectivity_graph\n self._reference_connectivity_graph.add_edge(chosen_atom_index, first_old_atom_index)\n\n #add the log probability of the choice to logp\n logp.append(np.log(1./ntorsions))\n\n # Ensure that logp is not ill-defined\n assert len(logp) == len(atom_torsions), \"There is a mismatch in the size of the atom torsion proposals and the associated logps\"\n\n return atom_torsions, logp", "def setupForRigPose(self):\n\n # unlock joint movers\n cmds.select(\"JointMover\", hi=True)\n jmNodes = cmds.ls(sl=True)\n for node in jmNodes:\n cmds.lockNode(node, lock=False)\n\n # find the mover shapes and set their visibility\n movers = self.returnJointMovers\n globalMovers = movers[0]\n shapes = []\n\n for each in movers:\n for mover in each:\n child = cmds.listRelatives(mover, children=True, shapes=True)\n if len(child) > 0:\n shapes.append(mover + \"|\" + child[0])\n\n for shape in shapes:\n cmds.setAttr(shape + \".v\", lock=False)\n cmds.setAttr(shape + \".v\", 0, lock=True)\n\n # show global movers\n shapes = []\n for mover in globalMovers:\n child = cmds.listRelatives(mover, children=True, shapes=True)\n if len(child) > 0:\n shapes.append(mover + \"|\" + child[0])\n\n for shape in shapes:\n cmds.setAttr(shape + \".v\", lock=False)\n cmds.setAttr(shape + \".v\", 1, lock=True)\n\n # unlock mover group for this module and make visible\n cmds.lockNode(self.name + \"_mover_grp\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", 1)\n\n # hide the proxy geo\n cmds.select(self.name + \"_mover_grp\", hi=True)\n allNodes = cmds.ls(sl=True)\n for node in allNodes:\n if node.find(\"_proxy_geo\") != -1:\n if cmds.nodeType(node) == \"mesh\":\n parent = cmds.listRelatives(node, parent=True)[0]\n cmds.lockNode(parent, lock=False)\n cmds.setAttr(parent + \".v\", lock=False)\n cmds.setAttr(parent + \".v\", 0)\n cmds.lockNode(parent, lock=True)\n\n # get the joints created by this module\n joints = self.returnCreatedJoints\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n for joint in joints:\n\n if cmds.objExists(joint + \"_mover_offset\"):\n cmds.parentConstraint(joint + \"_mover_offset\", joint)\n else:\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointBaseName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointBaseName = jointBaseName.partition(nameData[1])[0]\n\n if cmds.objExists(self.name + \"_\" + jointBaseName + \"_mover_offset\"):\n cmds.parentConstraint(self.name + \"_\" + jointBaseName + \"_mover_offset\", joint)\n\n # lock joint movers\n cmds.select(\"JointMover\", hi=True)\n jmNodes = cmds.ls(sl=True)\n for node in jmNodes:\n cmds.lockNode(node, lock=True)", "def test_isomorphic_striped_stereochemistry(self):\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n # Ensure default value is respected and order does not matter\n assert Molecule.are_isomorphic(mol1, mol2, strip_pyrimidal_n_atom_stereo=True)\n assert Molecule.are_isomorphic(mol1, mol2)\n assert Molecule.are_isomorphic(mol2, mol1)\n\n assert mol1 == mol2\n assert Molecule.from_smiles(\"CCC[N@](C)CC\") == Molecule.from_smiles(\n \"CCC[N@@](C)CC\"\n )\n\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n assert not Molecule.are_isomorphic(\n mol1,\n mol2,\n strip_pyrimidal_n_atom_stereo=False,\n atom_stereochemistry_matching=True,\n bond_stereochemistry_matching=True,\n )[0]", "def add_synth_group(self, name=\"\"):\n return None", "def stereo_symbol(self):\n\n return np.array([bond.stereo_symbol for bond in self])", "def cleanUpRigPose(self):\n\n # show the proxy geo\n cmds.select(self.name + \"_mover_grp\", hi=True)\n allNodes = cmds.ls(sl=True)\n for node in allNodes:\n if node.find(\"_proxy_geo\") != -1:\n if cmds.nodeType(node) == \"mesh\":\n parent = cmds.listRelatives(node, parent=True)[0]\n cmds.lockNode(parent, lock=False)\n cmds.setAttr(parent + \".v\", lock=False)\n cmds.setAttr(parent + \".v\", 1)\n cmds.lockNode(parent, lock=True)\n\n # unlock mover group for this module and make invisible\n cmds.lockNode(self.name + \"_mover_grp\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=False)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", 0)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=True)\n cmds.lockNode(self.name + \"_mover_grp\", lock=True)\n\n # get the joints created by this module and remove the constraints\n joints = self.returnCreatedJoints\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n for joint in joints:\n if cmds.objExists(joint + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)\n\n else:\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointBaseName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointBaseName = jointBaseName.partition(nameData[1])[0]\n\n if cmds.objExists(self.name + \"_\" + jointBaseName + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)", "def xyz2facestereo(x,y,z):\n ax = np.abs(x)\n ay = np.abs(y)\n az = np.abs(z)\n mskx = (y != x) & (z != x)\n mskyz = z != y\n msk0 = ( x >= ay) & ( x >= az) & mskx\n msk3 = (-x >= ay) & (-x >= az) & mskx\n msk1 = ( y >= az) & mskyz\n msk4 = (-y >= az) & mskyz\n msk2 = z > 0\n f = (1-msk0)*(msk3*3 + (1-msk3)*(msk1 + (1-msk1)*(msk4*4 + (1-msk4)*(msk2*2 + (1-msk2)*5))))\n xnew = np.choose(f, ( y, -x, -x, -z, -z, y))\n ynew = np.choose(f, ( z, z, -y, -y, x, x))\n znew = np.choose(f, ( x, y, z, -x, -y, -z))\n X,Y = xyz2stereo(xnew, ynew, znew)\n\n return f,X,Y", "def semcor2R(args):\r\n input_files = list_files(*args.input_files)\r\n output_file = Path(args.output_file)\r\n senses = args.sense\r\n multiword = senses or args.multiword\r\n if senses and output_file == output_default / 'semcor2r.csv':\r\n output_file = output_default / 'semcor2r_semtagged.csv'\r\n with output_file.open('w') as file:\r\n file.write(\"\\t\".join([\"concordance\", \"file\", \"token_id\", \"wordform\", \"PoS\", \"lemma\"]))\r\n if senses:\r\n file.write('\\twnsn\\tsense_key')\r\n file.write('\\n')\r\n for input_file in input_files:\r\n corpus_file = CorpusFile(input_file)\r\n for word in corpus_file.text.find_all(['wf', 'punc']):\r\n index = 0\r\n if word.name == 'punc':\r\n index += 1\r\n continue\r\n if not multiword:\r\n for token in Token.from_tag(word).get_components():\r\n token_id = '/'.join([corpus_file.shortname, token.wordform, str(index)])\r\n if args.verbose and type(token.status)==tuple:\r\n report_token_status(token, token_id)\r\n file.write('\\t'.join([corpus_file.concordance, corpus_file.shortname, token_id, token.wordform, token.pos, token.lemma]) + '\\n')\r\n index += 1\r\n else:\r\n token = Token.from_tag(word)\r\n if senses and not token.has_senses:\r\n continue\r\n token_id = '/'.join([corpus_file.shortname, token.wordform, str(index)])\r\n if args.verbose and type(token.status)==tuple:\r\n report_token_status(token, token_id)\r\n file.write('\\t'.join([corpus_file.concordance, corpus_file.shortname, token_id, token.wordform, token.pos, token.lemma]))\r\n index += 1\r\n if senses:\r\n file.write('\\t{}\\t{}'.format(token.wnsn, token.sense_key))\r\n file.write('\\n')\r\n print('File \"{}\" processed.'.format(input_file.stem))", "def atomisticSphere (flag, filin, filout, max_distance = 15, analysis = 1, atom_central = \"mean_point\", debug = 1):\n \n list_atom_pocket = parsePDB.loadCoordSectionPDB(filin)\n dico_stock_count = tool.generateStructCompositionAtomistic (max_distance, 3)\n \n if atom_central == \"mean_point\" : \n central_point = generateMeansPointPocket (list_atom_pocket)\n # else append barycenter pocket calculated by RADI\n \n for atom in list_atom_pocket : \n distance = parsePDB.distanceTwoatoms(central_point, atom)\n # print distance\n element = atom[\"element\"]\n name_atom = atom[\"name\"]\n residue = tool.transformAA(atom[\"resName\"])\n \n for distance_key in dico_stock_count.keys() : \n if distance <= distance_key or distance > max_distance : \n dico_stock_count [distance_key] [\"atom\"] = dico_stock_count [distance_key] [\"atom\"] + 1\n if element == \"C\" : \n dico_stock_count [distance_key] [\"carbon\"] = dico_stock_count [distance_key] [\"carbon\"] + 1\n elif element == \"N\" : \n dico_stock_count [distance_key] [\"nitrogen\"] = dico_stock_count [distance_key] [\"nitrogen\"] + 1\n elif element == \"S\" : \n dico_stock_count [distance_key] [\"sulfur\"] = dico_stock_count [distance_key] [\"sulfur\"] + 1\n elif element == \"O\" : \n dico_stock_count [distance_key] [\"oxygen\"] = dico_stock_count [distance_key] [\"oxygen\"] + 1\n elif element == \"H\" : \n dico_stock_count [distance_key] [\"hydrogen\"] = dico_stock_count [distance_key] [\"hydrogen\"] + 1\n \n if residue in dico_Hacceptor.keys () : \n if name_atom in dico_Hacceptor[residue] : \n dico_stock_count [distance_key] [\"hbond_acceptor\"] = dico_stock_count [distance_key] [\"hbond_acceptor\"] + 1\n \n if residue in dico_atom_Car : \n if name_atom in dico_atom_Car[residue] : \n dico_stock_count [distance_key] [\"aromatic\"] = dico_stock_count [distance_key] [\"aromatic\"] + 1\n \n if residue in dico_atom_hydrophobic : \n if name_atom in dico_atom_hydrophobic[residue] : \n dico_stock_count [distance_key] [\"hydrophobic\"] = dico_stock_count [distance_key] [\"hydrophobic\"] + 1\n \n if residue in dico_atom_Carg : \n if name_atom in dico_atom_Carg[residue] : \n dico_stock_count [distance_key] [\"alcool\"] = dico_stock_count [distance_key] [\"alcool\"] + 1\n \n \n if residue in dico_Hdonor.keys () : \n if name_atom in dico_Hdonor[residue] : \n dico_stock_count [distance_key] [\"hbond_donor\"] = dico_stock_count [distance_key] [\"hbond_donor\"] + 1\n \n if name_atom == \"CA\" or name_atom == \"O\" or name_atom == \"C\" or name_atom == \"N\" or name_atom == \"H\" or name_atom == \"HA\" :\n dico_stock_count [distance_key] [\"main_chain\"] = dico_stock_count [distance_key] [\"main_chain\"] + 1\n else : \n dico_stock_count [distance_key] [\"side_chain\"] = dico_stock_count [distance_key] [\"side_chain\"] + 1\n \n for distance_key in dico_stock_count.keys () : \n nb_atom = float(dico_stock_count [distance_key] [\"atom\"])\n if nb_atom == 0 : \n filout.write (flag + \"_atom_\" + str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_side_chain_\"+ str(distance_key) + \"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_main_chain_\" + str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_sulfur_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_carbone_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_nitrogen_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_oxygen_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hydrogen_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hbond_acceptor_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hbond_donor_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_alcool_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hydrophobic_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_aromatic_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n \n else : \n filout.write (flag + \"_atom_\" + str(distance_key) +\"\\t\" + str(nb_atom) + \"\\n\")\n filout.write (flag + \"_side_chain_\"+ str(distance_key) + \"\\t\" + str (dico_stock_count [distance_key] [\"side_chain\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_main_chain_\" + str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"main_chain\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_sulfur_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"sulfur\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_carbone_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"carbon\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_nitrogen_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"nitrogen\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_oxygen_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"oxygen\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hydrogen_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hydrogen\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hbond_acceptor_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hbond_acceptor\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hbond_donor_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hbond_donor\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_alcool_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"alcool\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hydrophobic_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hydrophobic\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_aromatic_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"aromatic\"] / nb_atom) + \"\\n\")", "def addMods(file, spec, data_card, channel_bins, systs):\n for im, modifier in enumerate(systs):\n if \"normsys\" in modifier[1]: ##normsys\n # write normsys as 'shape?' so that Combine doesn't try to combine normsys and histosys mods of the same name\n data_card.systs.append((modifier[0], False, \"shape?\", [], {}))\n for channel in spec[\"channels\"]:\n data_card.systs[im][4].update({channel[\"name\"]: {}})\n for sample in channel[\"samples\"]:\n for i in data_card.systs:\n i[4][channel[\"name\"]].update({sample[\"name\"]: 0.0})\n\n if \"lumi\" in modifier[1]: ##lumi\n # Write lumi as lnN since they act the same way on the model\n data_card.systs.append((modifier[0], False, \"lnN\", [], {}))\n for channel in spec[\"channels\"]:\n data_card.systs[im][4].update({channel[\"name\"]: {}})\n for sample in channel[\"samples\"]:\n for i in data_card.systs:\n i[4][channel[\"name\"]].update({sample[\"name\"]: 0.0})\n\n if \"histosys\" in modifier[1]: ##histosys\n data_card.systs.append((modifier[0], False, \"shape\", [], {}))\n for channel in spec[\"channels\"]:\n data_card.systs[im][4].update({channel[\"name\"]: {}})\n for sample in channel[\"samples\"]:\n for i in data_card.systs:\n i[4][channel[\"name\"]].update({sample[\"name\"]: 0.0})\n\n for idxc, channel in enumerate(spec[\"channels\"]):\n for idxs, sample in enumerate(channel[\"samples\"]):\n mods = sample[\"modifiers\"]\n names = [mod[\"name\"] for mod in mods]\n for syst in data_card.systs:\n name = syst[0]\n if name in names:\n syst_type = syst[2]\n # if systematic name is a modifier for this sample\n if \"shape?\" in syst_type: ##normsys\n for mod in mods:\n if mod[\"type\"] == \"normsys\" and mod[\"name\"] == name:\n if mod[\"data\"][\"lo\"] == 0:\n # asymmetric lnN\n syst[4][channel[\"name\"]].update(\n {\n sample[\"name\"]: str(\n mod[\"data\"][\"lo\"] + 1e-9\n )\n + \"/\"\n + str(mod[\"data\"][\"hi\"])\n }\n )\n elif mod[\"data\"][\"hi\"] == 0:\n # asymmetric lnN\n syst[4][channel[\"name\"]].update(\n {\n sample[\"name\"]: str(mod[\"data\"][\"lo\"])\n + \"/\"\n + str(mod[\"data\"][\"hi\"] + 1e-9)\n }\n )\n else:\n # asymmetric lnN\n syst[4][channel[\"name\"]].update(\n {\n sample[\"name\"]: str(mod[\"data\"][\"lo\"])\n + \"/\"\n + str(mod[\"data\"][\"hi\"])\n }\n )\n if \"lnN\" in syst_type: ##lumi only\n for mod in mods:\n if mod[\"type\"] == \"lumi\" and mod[\"name\"] == name:\n for measurement in spec[\"measurements\"]:\n for param in measurement[\"config\"][\"parameters\"]:\n if mod[\"name\"] == param[\"name\"]:\n # asymmetric lnN\n syst[4][channel[\"name\"]].update(\n {\n sample[\"name\"]: str(\n param[\"auxdata\"][0]\n - param[\"sigmas\"][0]\n )\n + \"/\"\n + str(\n param[\"auxdata\"][0]\n + param[\"sigmas\"][0]\n )\n }\n )\n\n if \"shape\" in syst_type: ##histosys\n for mod in mods:\n if mod[\"type\"] == \"histosys\" and mod[\"name\"] == name:\n syst[4][channel[\"name\"]].update({sample[\"name\"]: 1.0})\n hi_data = hist.Hist.new.Regular(\n channel_bins[channel[\"name\"]],\n 0,\n channel_bins[channel[\"name\"]],\n ).Weight()\n hi_data[...] = np.stack(\n [\n mod[\"data\"][\"hi_data\"],\n [\n 0\n for _ in range(\n channel_bins[channel[\"name\"]]\n )\n ],\n ],\n axis=-1,\n )\n lo_data = hist.Hist.new.Regular(\n channel_bins[channel[\"name\"]],\n 0,\n channel_bins[channel[\"name\"]],\n ).Weight()\n lo_data[...] = np.stack(\n [\n mod[\"data\"][\"lo_data\"],\n [\n 0\n for _ in range(\n channel_bins[channel[\"name\"]]\n )\n ],\n ],\n axis=-1,\n )\n file[\n channel[\"name\"]\n + \"/\"\n + spec[\"channels\"][idxc][\"samples\"][idxs][\"name\"]\n + \"_\"\n + name\n + \"Up\"\n ] = hi_data\n file[\n channel[\"name\"]\n + \"/\"\n + spec[\"channels\"][idxc][\"samples\"][idxs][\"name\"]\n + \"_\"\n + name\n + \"Down\"\n ] = lo_data", "def is_surjective(self):\n # Testing equality of free modules over PIDs is unreliable\n # see Trac #11579 for explanation and status\n # We test if image equals codomain with two inclusions\n # reverse inclusion of below is trivially true\n return self.codomain().is_submodule(self.image())", "def restricted_automorphism_group(self):\n if '_restricted_automorphism_group' in self.__dict__:\n return self._restricted_automorphism_group\n\n from sage.groups.perm_gps.permgroup import PermutationGroup\n\n if self.field() is QQ:\n def rational_approximation(c):\n return c\n\n else: # self.field() is RDF\n c_list = []\n def rational_approximation(c):\n # Implementation detail: Return unique integer if two\n # c-values are the same up to machine precision. But\n # you can think of it as a uniquely-chosen rational\n # approximation.\n for i,x in enumerate(c_list):\n if self._is_zero(x-c):\n return i\n c_list.append(c)\n return len(c_list)-1\n \n # The algorithm identifies the restricted automorphism group\n # with the automorphism group of a edge-colored graph. The\n # nodes of the graph are the V-representation objects. If all\n # V-representation objects are vertices, the edges are\n # labelled by numbers (to be computed below). Roughly\n # speaking, the edge label is the inner product of the\n # coordinate vectors with some orthogonalization thrown in\n # [BSS].\n def edge_label_compact(i,j,c_ij):\n return c_ij\n\n # In the non-compact case we also label the edges by the type\n # of the V-representation object. This ensures that vertices,\n # rays, and lines are only permuted amongst themselves.\n def edge_label_noncompact(i,j,c_ij):\n return (self.Vrepresentation(i).type(), c_ij, self.Vrepresentation(j).type())\n\n if self.is_compact():\n edge_label = edge_label_compact\n else:\n edge_label = edge_label_noncompact\n\n # good coordinates for the V-representation objects\n v_list = []\n for v in self.Vrepresentation():\n v_coords = list(self._affine_coordinates(v))\n if v.is_vertex():\n v_coords = [1]+v_coords\n else:\n v_coords = [0]+v_coords\n v_list.append(vector(v_coords))\n\n # Finally, construct the graph\n Qinv = sum( v.column() * v.row() for v in v_list ).inverse()\n\n # Was set to sparse = False, but there is a problem with Graph\n # backends. It should probably be set back to sparse = False as soon as\n # the backends are fixed.\n G = Graph(sparse=True)\n for i in range(0,len(v_list)):\n for j in range(i+1,len(v_list)):\n v_i = v_list[i]\n v_j = v_list[j]\n c_ij = rational_approximation( v_i * Qinv * v_j )\n G.add_edge(i,j, edge_label(i,j,c_ij))\n\n group, node_dict = G.automorphism_group(edge_labels=True, translation=True)\n\n # Relabel the permutation group\n perm_to_vertex = dict( (i,v+1) for v,i in node_dict.items() )\n group = PermutationGroup([ [ tuple([ perm_to_vertex[i] for i in cycle ])\n for cycle in generator.cycle_tuples() ]\n for generator in group.gens() ])\n\n self._restricted_automorphism_group = group\n return group", "def get_disparity(self, imgL, imgR):\n # SGBM Parameters -----------------\n window_size = 1 # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely\n param = {'minDisparity': 0, 'numDisparities': 32, 'blockSize': 5, 'P1': 10, 'P2': 20, 'disp12MaxDiff': 1,\n 'preFilterCap': 65, 'uniquenessRatio': 10, 'speckleWindowSize': 150, 'speckleRange': 2, 'mode': 2}\n left_matcher = cv2.StereoSGBM_create(**param)\n # left_matcher = cv2.StereoSGBM_create(\n # minDisparity=-1,\n # numDisparities=5*16, # max_disp has to be dividable by 16 f. E. HH 192, 256\n # blockSize=window_size,\n # P1=8 * 3 * window_size,\n # # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely\n # P2=32 * 3 * window_size,\n # disp12MaxDiff=12,\n # uniquenessRatio=10,\n # speckleWindowSize=50,\n # speckleRange=32,\n # preFilterCap=63,\n # mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY\n # )\n right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)\n # FILTER Parameters\n lmbda = 8000\n sigma = 1.3\n wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)\n wls_filter.setLambda(lmbda)\n\n wls_filter.setSigmaColor(sigma)\n displ = left_matcher.compute(imgL, imgR) # .astype(np.float32)/16\n dispr = right_matcher.compute(imgR, imgL) # .astype(np.float32)/16\n displ = np.int16(displ)\n dispr = np.int16(dispr)\n filteredImg = wls_filter.filter(displ, imgL, None, dispr) # important to put \"imgL\" here!!!\n filteredImg = cv2.normalize(src=filteredImg, dst=filteredImg, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX)\n filteredImg = np.uint8(filteredImg)\n # 除以16得到真实视差(因为SGBM算法得到的视差是×16的)\n displ[displ < 0] = 0\n # disparity.astype(np.float32) / 16.\n displ = np.divide(displ.astype(np.float32), 16.)\n return filteredImg, displ", "def _add_pairblend(self, dgmod, rigid, transform):\n\n assert isinstance(dgmod, cmdx.DGModifier)\n\n pair_blend = dgmod.create_node(\"pairBlend\")\n dgmod.set_attr(pair_blend[\"isHistoricallyInteresting\"], False)\n\n # Establish initial values, before keyframes\n # tm = cmdx.Tm(self._cache[(transform, \"matrix\")])\n\n # Read from matrix, as opposed to the rotate/translate channels\n # to account for jointOrient, pivots and all manner of things\n # translate = tm.translation()\n # rotate = tm.rotation()\n\n translate = self._cache[(transform, \"translate\")]\n rotate = self._cache[(transform, \"rotate\")]\n\n dgmod.set_attr(pair_blend[\"inTranslate1\"], translate)\n dgmod.set_attr(pair_blend[\"inRotate1\"], rotate)\n\n dgmod.connect(rigid[\"outputTranslateX\"], pair_blend[\"inTranslateX2\"])\n dgmod.connect(rigid[\"outputTranslateY\"], pair_blend[\"inTranslateY2\"])\n dgmod.connect(rigid[\"outputTranslateZ\"], pair_blend[\"inTranslateZ2\"])\n dgmod.connect(rigid[\"outputRotateX\"], pair_blend[\"inRotateX2\"])\n dgmod.connect(rigid[\"outputRotateY\"], pair_blend[\"inRotateY2\"])\n dgmod.connect(rigid[\"outputRotateZ\"], pair_blend[\"inRotateZ2\"])\n\n # Let the animator see the raw animation values, no physics\n dgmod.connect(self._tree_root[0][\"simulated\"], pair_blend[\"weight\"])\n\n if self._opts[\"autoKey\"]:\n # Generate default animation curves, it's expected since you can no\n # longer see whether channels are keyed or not, now being green.\n time = cmdx.currentTime()\n mapping = (\n (\"animCurveTL\", translate.x, \"inTranslateX1\"),\n (\"animCurveTL\", translate.y, \"inTranslateY1\"),\n (\"animCurveTL\", translate.z, \"inTranslateZ1\"),\n (\"animCurveTA\", rotate.x, \"inRotateX1\"),\n (\"animCurveTA\", rotate.y, \"inRotateY1\"),\n (\"animCurveTA\", rotate.z, \"inRotateZ1\")\n )\n\n for curve, value, dst in mapping:\n curve = dgmod.create_node(curve)\n curve.key(time, value)\n dgmod.connect(curve[\"output\"], pair_blend[dst])\n\n # Transfer existing animation/connections\n for src, dst in transform.data.get(\"priorConnections\", {}).items():\n dst = pair_blend[dst]\n dgmod.connect(src, dst)\n\n commands._connect_transform(dgmod, pair_blend, transform)\n\n return pair_blend", "def cleaveSurfAtom(entry,max_bonds=1,supercell=2,group_structs=True):\n \n \n struct = copy.deepcopy(entry[0])\n results = getStructureType(entry,supercell=supercell,returnSS=True)\n \n # If the crystal is 3D\n if results[0]=='conventional':\n struct = copy.deepcopy(entry[0])\n og_binary_matrix = getDistMat(struct,entry[1]-1)\n og_num_bonds = sum(sum(np.array(og_binary_matrix)))\n struct.make_supercell(supercell)\n binary_matrix= getDistMat(struct,entry[1]-1)\n bonds = []\n \n # Get list of bonded atoms\n for i in range(len(og_binary_matrix)):\n for pair in [(i,j) for j in range(i+1,len(og_binary_matrix)) \n if og_binary_matrix[i][j]==1]:\n bonds.append(pair)\n allCombos = []\n combNum = 0\n \n # Get list of all combinations of bonds\n for i in range(max_bonds+1):\n for com in list(itertools.combinations(bonds,i)):\n allCombos.append(com)\n combNum+=1\n\n combos = allCombos\n jjj=0\n all_structs = []\n \n # For each bond combination\n for combo in combos:\n broken=0\n jjj+=1\n modified_matrix = np.array(binary_matrix)\n for pair in combo:\n i,j = pair\n i=i*supercell**3\n j=j*supercell**3\n # Break bonds in the loop\n for shift in range(supercell**3):\n for shift2 in range(supercell**3):\n modified_matrix[i+shift][j+shift2]=0\n modified_matrix[j+shift][i+shift2]=0\n\n new_num_bonds=sum(sum(modified_matrix))\n broken=int(og_num_bonds-new_num_bonds) \n seed_index=0\n old_cluster_size=len(buildNetwork(binary_matrix,seed_index))/supercell**3\n cluster = buildNetwork(modified_matrix,seed_index)\n hetero=False\n # If the new set of atoms is not empty\n if cluster!=set():\n scale = len(cluster)/old_cluster_size\n compo = Composition.from_dict(Counter([struct[l].specie.name \n for l in list(cluster)]))\n if compo.reduced_formula != struct.composition.reduced_formula:\n # i.e. the cluster does not have the same composition\n # as the overall crystal; therefore there are other\n # clusters of varying composition.\n hetero = True\n motiif = getDim(scale,supercell)\n\n # If formula of new network matches the original cell\n if not hetero:\n if motiif=='layered':\n cluster_sites = [struct.sites[n] for n in cluster]\n all_structs.append(struct.from_sites(cluster_sites))\n\n if group_structs:\n matched = [x[0] for x in \n StructureMatcher(stol=1E-6,primitive_cell=False,\n scale=False).group_structures(all_structs)]\n else:\n matched=all_structs\n return(matched) \n\n else:\n print('Material is does not have a 3D motiif')\n print('Try increasing radii tolerance if appropriate')\n return([])", "def stereo(func):\n @wraps(func)\n def stereoFunc(*args, **kwargs):\n # trying to find a way to have a method\n # calling another method not do the stereo repeat\n # so if calling an internal func from a stereo func,\n # add the stereo kw arg to call\n # it will be removed before calling underlying func\n\n if 'stereo' in kwargs.keys():\n _stereo = kwargs['stereo']\n del(kwargs['stereo'])\n else:\n _stereo = 1\n res = func(*args, **kwargs)\n if args[0].stereo and _stereo: # self.stereo\n _LOGGER.debug(\"Stereo Command {}:{}\".format(func.__name__, args))\n largs = list(args)\n if type(largs[1]) == str:\n largs[1] = chr(ord(largs[1])+1)\n else:\n largs[1] = largs[1] + 1\n if func.__name__[3:9] == \"Matrix\": # do stereo on input and output\n _LOGGER.debug(\"Matrix Stereo Command {}\".format(func.__name__))\n if type(largs[2]) == str:\n largs[2] = chr(ord(largs[2])+1)\n else:\n largs[2] = largs[2] + 1\n res2 = func(*largs, **kwargs)\n if res != res2:\n _LOGGER.debug(\"Stereo out of sync {} : {}\".format(res, res2))\n warnings.warn(\"Stereo out of sync\", RuntimeWarning)\n if res is not None:\n return res\n return stereoFunc", "def stereo_callback(self, stereo_msg):\r\n start = time.time()\r\n self.cam0_curr_img_msg = stereo_msg.cam0_msg\r\n self.cam1_curr_img_msg = stereo_msg.cam1_msg\r\n\r\n # Build the image pyramids once since they're used at multiple places.\r\n self.create_image_pyramids()\r\n\r\n # Detect features in the first frame.\r\n if self.is_first_img:\r\n if not self.config.load_features_flag:\r\n self.initialize_first_frame()\r\n self.is_first_img = False\r\n # Draw results.\r\n # self.draw_features_stereo()\r\n else:\r\n if not self.config.load_features_flag:\r\n # Track the feature in the previous image.\r\n t = time.time()\r\n self.track_features()\r\n print('___track_features:', time.time() - t)\r\n t = time.time()\r\n\r\n # Add new features into the current image.\r\n self.add_new_features()\r\n print('___add_new_features:', time.time() - t)\r\n t = time.time()\r\n self.prune_features()\r\n print('___prune_features:', time.time() - t)\r\n t = time.time()\r\n # Draw results.\r\n # self.draw_features_stereo()\r\n print('___draw_features_stereo:', time.time() - t)\r\n t = time.time()\r\n\r\n print('===image process elapsed:', time.time() - start, f'({stereo_msg.timestamp})')\r\n\r\n if not self.config.load_features_flag:\r\n try:\r\n self.save_features() \r\n return self.publish()\r\n finally:\r\n self.cam0_prev_img_msg = self.cam0_curr_img_msg\r\n self.prev_features = self.curr_features\r\n self.prev_cam0_pyramid = self.curr_cam0_pyramid\r\n\r\n # Initialize the current features to empty vectors.\r\n self.curr_features = [[] for _ in range(self.config.grid_num)]\r\n else:\r\n self.load_features()\r\n return self.publish()", "def substereomers(gra):\n _assigned = functools.partial(\n dict_.filter_by_value, func=lambda x: x is not None)\n\n known_atm_ste_par_dct = _assigned(atom_stereo_parities(gra))\n known_bnd_ste_par_dct = _assigned(bond_stereo_parities(gra))\n\n def _is_compatible(sgr):\n atm_ste_par_dct = _assigned(atom_stereo_parities(sgr))\n bnd_ste_par_dct = _assigned(bond_stereo_parities(sgr))\n _compat_atm_assgns = (set(known_atm_ste_par_dct.items()) <=\n set(atm_ste_par_dct.items()))\n _compat_bnd_assgns = (set(known_bnd_ste_par_dct.items()) <=\n set(bnd_ste_par_dct.items()))\n return _compat_atm_assgns and _compat_bnd_assgns\n\n sgrs = tuple(filter(_is_compatible, stereomers(gra)))\n return sgrs", "def known_organisms():\n return [\"rat\"]", "def test_isomorphic_stripped_stereochemistry(self):\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n # Ensure default value is respected and order does not matter\n assert Molecule.are_isomorphic(mol1, mol2, strip_pyrimidal_n_atom_stereo=True)\n assert Molecule.are_isomorphic(mol1, mol2)\n assert Molecule.are_isomorphic(mol2, mol1)\n\n assert mol1 == mol2\n assert Molecule.from_smiles(\"CCC[N@](C)CC\") == Molecule.from_smiles(\n \"CCC[N@@](C)CC\"\n )", "def build(self, X, Y, w=None):\n super(MorseSmaleComplex, self).build(X, Y, w)\n\n if self.debug:\n sys.stdout.write(\"Decomposition: \")\n start = time.perf_counter()\n\n stableManifolds = MorseComplex(debug=self.debug)\n unstableManifolds = MorseComplex(debug=self.debug)\n\n stableManifolds._build_for_morse_smale_complex(self, False)\n unstableManifolds._build_for_morse_smale_complex(self, True)\n\n self.min_indices = unstableManifolds.max_indices\n self.max_indices = stableManifolds.max_indices\n\n # If a degenerate point is both a minimum and a maximum, it\n # could potentially appear twice, but would be masked by the\n # minimum key which would wipe the maximum merge\n self.merge_sequence = stableManifolds.merge_sequence.copy()\n self.merge_sequence.update(unstableManifolds.merge_sequence)\n self.persistences = sorted(\n stableManifolds.persistences + unstableManifolds.persistences\n )\n\n self.base_partitions = {}\n base = np.array([[None, None]] * len(Y))\n for key, items in unstableManifolds.base_partitions.items():\n base[np.array(items), 0] = key\n for key, items in stableManifolds.base_partitions.items():\n base[np.array(items), 1] = key\n\n keys = set(map(tuple, base))\n for key in keys:\n self.base_partitions[key] = np.where(\n np.logical_and(base[:, 0] == key[0], base[:, 1] == key[1])\n )[0]\n\n if self.debug:\n end = time.perf_counter()\n sys.stdout.write(\"%f s\\n\" % (end - start))", "def sequence_tunable(\n mol,\n OP_REMOVE_ISOTOPE=True, OP_NEUTRALISE_CHARGE=True,\n OP_REMOVE_STEREO=False, OP_COMMUTE_INCHI=False,\n OP_KEEP_BIGGEST=True, OP_ADD_HYDROGEN=True,\n OP_KEKULIZE=True, OP_NEUTRALISE_CHARGE_LATE=True\n ):\n F = Filters()\n # Always perform the basics..\n Cleanup(mol)\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n AssignStereochemistry(mol, cleanIt=True, force=True, flagPossibleStereoCenters=True) # Fix bug TD201904.01\n # \n if OP_REMOVE_ISOTOPE:\n mol = F.remove_isotope(mol)\n if OP_NEUTRALISE_CHARGE:\n mol = F.neutralise_charge(mol)\n if any([OP_REMOVE_ISOTOPE, OP_REMOVE_ISOTOPE]):\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n # \n if OP_REMOVE_STEREO:\n mol = F.remove_stereo(mol)\n OP_COMMUTE_INCHI = True\n if OP_COMMUTE_INCHI:\n mol = F.commute_inchi(mol)\n if OP_KEEP_BIGGEST:\n mol = F.keep_biggest(mol)\n if any([OP_REMOVE_STEREO, OP_COMMUTE_INCHI, OP_KEEP_BIGGEST]):\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n #\n if OP_NEUTRALISE_CHARGE_LATE:\n mol = F.neutralise_charge(mol)\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n #\n if OP_ADD_HYDROGEN:\n mol = F.add_hydrogen(mol, addCoords=True)\n if OP_KEKULIZE:\n mol = F.kekulize(mol)\n #\n return mol", "def add_synth_group(self, name=\"\"):\n pass", "def test_add_new_surface_objects(self):\n\n # create object with ReactionSystem behavior\n class rsys:\n pass\n\n class item:\n pass\n\n T = item()\n P = item()\n T.value_si = 1000.0\n P.value_si = 101000.0\n rsys.T = T\n rsys.P = P\n procnum = 2\n\n cerm = CoreEdgeReactionModel()\n\n spcA = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs]\n\n rxns = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n rxns += list(itertools.chain.from_iterable(react([((spcs[0], spcs[1]), ['H_Abstraction'])], procnum)))\n\n for rxn in rxns:\n cerm.make_new_reaction(rxn)\n\n cerm.core.species = [spcA] + spcs\n\n corerxns = []\n edgerxns = []\n edgespcs = set()\n for rxn in rxns:\n if set(rxn.reactants + rxn.products) <= set(cerm.core.species):\n corerxns.append(rxn)\n else:\n edgespcs |= set(cerm.core.species) - set(rxn.reactants + rxn.products)\n edgerxns.append(rxn)\n\n cerm.edge.species += list(edgespcs)\n\n cerm.core.reactions = corerxns\n cerm.edge.reactions = edgerxns\n\n cerm.surface.species = []\n cerm.surface.reactions = []\n\n new_surface_reactions = [cerm.edge.reactions[0]]\n new_surface_species = []\n obj = new_surface_reactions\n\n cerm.add_new_surface_objects(obj, new_surface_species, new_surface_reactions, rsys)\n\n empty = set()\n\n self.assertEqual(cerm.new_surface_spcs_add, empty)\n self.assertEqual(cerm.new_surface_spcs_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_add, set([cerm.edge.reactions[0]]))", "def chordmode():\n for token in consume():\n if source.inSelection and isinstance(token, tokenizer.Pitch):\n transpose(token, 0)", "def convertStereo(u, v, disparity, info):\n stereoModel = image_geometry.StereoCameraModel()\n stereoModel.fromCameraInfo(info['l'], info['r'])\n (x,y,z) = stereoModel.projectPixelTo3d((u,v), disparity)\n\n cameraPoint = PointStamped()\n cameraPoint.header.frame_id = info['l'].header.frame_id\n cameraPoint.header.stamp = rospy.Time.now()\n cameraPoint.point = Point(x,y,z)\n return cameraPoint", "def test_process_stereo_file(self):\n test_path = pathlib.Path(__file__).parent.absolute() / 'data/stereo.wav'\n self.default_kwargs['input_file'] = test_path\n self.encoder = FileEncoder(**self.default_kwargs)\n self.encoder.process()", "def pick(self, inv, pl, group):\r\n if self.rect.colliderect(pl):\r\n group.remove(self)\r\n if inv.count('key') == 0:\r\n inv += ['key']\r\n music_acceptor.activatedPortalSound()", "def n_stereo_centers(mol: Mol) -> int:\n n = 0\n try:\n rdmolops.FindPotentialStereo(mol, cleanIt=False)\n n = rdMolDescriptors.CalcNumAtomStereoCenters(mol)\n except Exception:\n pass\n return n", "def build_reactive_complex(self, settings_manager: SettingsManager):\n import scine_database as db\n import scine_utilities as utils\n\n start_structure_ids = self._calculation.get_structures()\n start_structures = [db.Structure(sid, self._structures) for sid in start_structure_ids]\n self.save_initial_graphs_and_charges(settings_manager, start_structures)\n if len(start_structures) == 1:\n # For an intramolecular structure it is sufficient to provide one\n # structure that is both, start structure and reactive complex\n structure = start_structures[0]\n atoms = structure.get_atoms()\n self.random_displace_atoms(atoms, self.settings[self.rc_key][\"displacement\"]) # breaks symmetry\n return atoms\n\n if len(start_structures) == 2:\n # Intermolecular reactions reactions require in situ generation of the reactive complex\n s0 = start_structures[0]\n s1 = start_structures[1]\n\n # Get coordinates\n atoms1 = s0.get_atoms()\n atoms2 = s1.get_atoms()\n elements1 = atoms1.elements\n elements2 = atoms2.elements\n coordinates1 = atoms1.positions\n coordinates2 = atoms2.positions\n # Calculate reactive center mean position\n if self.exploration_key + \"_lhs_list\" in self.settings[self.exploration_key]:\n sites1 = self.settings[self.exploration_key][self.exploration_key + \"_lhs_list\"]\n sites2 = self.settings[self.exploration_key][self.exploration_key + \"_rhs_list\"]\n self.settings[self.exploration_key][self.exploration_key + \"_rhs_list\"] = list(\n idx + len(elements1) for idx in sites2\n )\n elif \"nt_associations\" in self.settings[self.exploration_key]:\n sites1 = []\n sites2 = []\n nAtoms1 = len(atoms1.elements)\n for i in range(0, len(self.settings[self.exploration_key][\"nt_associations\"]), 2):\n at1 = self.settings[self.exploration_key][\"nt_associations\"][i]\n at2 = self.settings[self.exploration_key][\"nt_associations\"][i + 1]\n if at1 >= nAtoms1 > at2:\n sites1.append(at2)\n sites2.append(at1 - nAtoms1)\n if at2 >= nAtoms1 > at1:\n sites1.append(at1)\n sites2.append(at2 - nAtoms1)\n else:\n self.raise_named_exception(\n \"Reactive complex can not be build: missing reactive atoms list(s).\"\n )\n reactive_center1 = np.mean(coordinates1[sites1], axis=0)\n reactive_center2 = np.mean(coordinates2[sites2], axis=0)\n # Place reactive center mean position into origin\n coord1 = coordinates1 - reactive_center1\n coord2 = coordinates2 - reactive_center2\n positions = self._orient_coordinates(coord1, coord2)\n atoms = utils.AtomCollection(elements1 + elements2, positions)\n self.random_displace_atoms(atoms, self.settings[self.rc_key][\"displacement\"]) # breaks symmetry\n return atoms\n\n # should not be reachable\n self.raise_named_exception(\n \"Reactive complexes built from more than 2 structures are not supported.\"\n )", "def test_modeller_mutations():\n mol_id = 'Abl'\n abl_path = examples_paths()['abl']\n with mmtools.utils.temporary_directory() as tmp_dir:\n # Safety check: protein must have WT residue: THR at residue 85 in chain A\n has_wt_residue = False\n with open(abl_path, 'r') as f:\n for line in f:\n if (line[:6] == 'ATOM ') and (line[21] == 'A') and (int(line[22:26]) == 85) and (line[17:20]=='THR'):\n has_wt_residue = True\n break\n assert has_wt_residue\n\n yaml_content = get_template_script(tmp_dir)\n exp_builder = ExperimentBuilder(yaml_content)\n output_dir = exp_builder._db.get_molecule_dir(mol_id)\n output_path = os.path.join(output_dir, 'Abl.pdb')\n\n # We haven't set the strip_protons options, so this shouldn't do anything\n exp_builder._db._setup_molecules(mol_id)\n assert not os.path.exists(output_path)\n\n # Calling modeller with WT creates a file (although the protein is not mutated).\n exp_builder._db.molecules[mol_id]['modeller'] = {\n 'apply_mutations': {\n 'chain_id': 'A',\n 'mutations': 'WT',\n }\n }\n setup_molecule_output_check(exp_builder._db, mol_id, output_path)\n os.remove(output_path) # Remove file for next check.\n\n\n # Reinitialize exp_builder\n exp_builder = ExperimentBuilder(yaml_content)\n\n # Now we set the strip_protons options and repeat for the mutant case\n exp_builder._db.molecules[mol_id]['modeller'] = {\n 'apply_mutations': {\n 'chain_id': 'A',\n 'mutations': 'T85I',\n }\n }\n setup_molecule_output_check(exp_builder._db, mol_id, output_path)\n\n # Safety check: protein must have mutated residue: ILE at residue 85 in chain A\n has_mut_residue = False\n with open(output_path, 'r') as f:\n for line in f:\n if (line[:6] == 'ATOM ') and (line[21] == 'A') and (int(line[22:26]) == 85) and (line[17:20]=='ILE'):\n has_mut_residue = True\n break\n assert has_mut_residue", "def space_group(self) -> PermutationGroup:\n return self._full_translation_group @ self.point_group", "def stereo_match(self, cam0_points):\r\n cam0_points = np.array(cam0_points)\r\n if len(cam0_points) == 0:\r\n return []\r\n\r\n R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu\r\n cam0_points_undistorted = self.undistort_points(\r\n cam0_points, self.cam0_intrinsics,\r\n self.cam0_distortion_model, self.cam0_distortion_coeffs, R_cam0_cam1)\r\n cam1_points = self.distort_points(\r\n cam0_points_undistorted, self.cam1_intrinsics,\r\n self.cam1_distortion_model, self.cam1_distortion_coeffs)\r\n cam1_points_copy = cam1_points.copy()\r\n\r\n # Track features using LK optical flow method.\r\n cam0_points = cam0_points.astype(np.float32)\r\n cam1_points = cam1_points.astype(np.float32)\r\n cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(\r\n self.curr_cam0_pyramid, self.curr_cam1_pyramid,\r\n cam0_points, cam1_points, **self.config.lk_params)\r\n\r\n cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(\r\n self.curr_cam1_pyramid, self.curr_cam0_pyramid, \r\n cam1_points, cam0_points.copy(), **self.config.lk_params)\r\n err = np.linalg.norm(cam0_points - cam0_points_, axis=1)\r\n\r\n # cam1_points_undistorted = self.undistort_points(\r\n # cam1_points, self.cam1_intrinsics,\r\n # self.cam1_distortion_model, self.cam1_distortion_coeffs, R_cam0_cam1)\r\n disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])\r\n \r\n\r\n \r\n inlier_markers = np.logical_and.reduce(\r\n [inlier_markers.reshape(-1), err < 3, disparity < 20])\r\n\r\n # Mark those tracked points out of the image region as untracked.\r\n img = self.cam1_curr_img_msg.image\r\n for i, point in enumerate(cam1_points):\r\n if not inlier_markers[i]:\r\n continue\r\n if (point[0] < 0 or point[0] > img.shape[1]-1 or \r\n point[1] < 0 or point[1] > img.shape[0]-1):\r\n inlier_markers[i] = 0\r\n\r\n # Compute the relative rotation between the cam0 frame and cam1 frame.\r\n t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)\r\n # Compute the essential matrix.\r\n E = skew(t_cam0_cam1) @ R_cam0_cam1\r\n\r\n # Further remove outliers based on the known essential matrix.\r\n cam0_points_undistorted = self.undistort_points(\r\n cam0_points, self.cam0_intrinsics,\r\n self.cam0_distortion_model, self.cam0_distortion_coeffs)\r\n cam1_points_undistorted = self.undistort_points(\r\n cam1_points, self.cam1_intrinsics,\r\n self.cam1_distortion_model, self.cam1_distortion_coeffs)\r\n\r\n norm_pixel_unit = 4.0 / (\r\n self.cam0_intrinsics[0] + self.cam0_intrinsics[1] +\r\n self.cam1_intrinsics[0] + self.cam1_intrinsics[1])\r\n\r\n for i in range(len(cam0_points_undistorted)):\r\n if not inlier_markers[i]:\r\n continue\r\n pt0 = np.array([*cam0_points_undistorted[i], 1.0])\r\n pt1 = np.array([*cam1_points_undistorted[i], 1.0])\r\n epipolar_line = E @ pt0\r\n error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(\r\n epipolar_line[:2])\r\n\r\n if error > self.config.stereo_threshold * norm_pixel_unit:\r\n inlier_markers[i] = 0\r\n\r\n return cam1_points, inlier_markers", "async def nogroup(ctx):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n author = ctx.message.author\n roles = author.roles\n for role in roles:\n if role.name.lower() in changeable_groups:\n roles.remove(role)\n await amor_manager.replace_roles(author, *roles)\n await amor_manager.say('{0} removed from color groups'.format(author.name))", "def get_torsion_contrib(self, groupBy='m'):\n e4 = 0.0\n es4 = []\n iass4 = []\n\n types4_z = []\n types4_m = []\n\n zs8 = [8, 16, 34] # zs of group 8 elements\n set_hybs = set(['SP2','SP3'])\n\n for ib in range(self.nb):\n j, k = self.ias2[ib]\n if self.zs[j] > self.zs[k]:\n tv = j; k = j; j = tv\n neibs1 = self.m.GetAtomWithIdx(j).GetNeighbors(); n1 = len(neibs1);\n neibs2 = self.m.GetAtomWithIdx(k).GetNeighbors(); n2 = len(neibs2);\n for i0 in range(n1):\n for l0 in range(n2):\n i = neibs1[i0].GetIdx(); l = neibs2[l0].GetIdx()\n if len(set([i,j,k,l])) == 4:\n eijkl = 0.0\n ias = [ i,j,k,l ]; iass4.append(ias)\n zsi = [ self.zs[ia] for ia in ias ]\n types4_z.append( '-'.join([ '%d'%zi for zi in zsi ]) )\n types4_m.append( '-'.join([ self.atypes[ia] for ia in ias ]) )\n V = rcr.GetUFFTorsionParams(self.m, i, j, k, l)\n tor = rdMolTransforms.GetDihedralRad(self.m.GetConformer(), i,j,k,l)\n hyb2 = self.hybs[j]\n hyb3 = self.hybs[k]\n if (hyb2 == 'SP3') and (hyb3 == 'SP3'):\n order = 3; cosNPhi0 = -1 # Phi0 = 60 degree\n if self.bos[ib] == 1 and set([self.zs[j],self.zs[k]]) <= set(zs8):\n orde = 2; cosNPhi0 = -1\n eijkl = 0.5*V*( 1.0 - cosNPhi0*np.cos(tor*order) )\n elif (hyb2 == 'SP2') and (hyb3 == 'SP2'):\n order = 2; cosNPhi0 = 1.0 # phi0 = 180\n eijkl = 0.5*V*( 1.0 - cosNPhi0*np.cos(tor*order) )\n elif set([hyb2,hyb3]) == set_hybs:\n # SP2 - SP3, this is, by default, independent of atom type in UFF\n order = 6; cosNPhi0 = 1.0 # phi0 = 0\n if self.bos[ib] == 1.0:\n # special case between group 6 sp3 and non-group 6 sp2:\n #if hyb2 == 'SP3' and hyb3 == 'SP3' and set([zs[j],zs[k]]) <= zs8:\n # order = 2; cosNPhi0 = -1 # phi0 = 90\n if ((self.zs[j] in zs8) and (self.hybs[k] == 'SP2')) or \\\n ((self.zs[k] in zs8) and (self.hybs[j] == 'SP2')):\n order = 2; cosNPhi0 = -1 # phi0 = 90\n eijkl = 0.5*V*( 1.0 - cosNPhi0*np.cos(tor*order) )\n #else:\n # raise '#ERROR: unknown senario?'\n #print '[i,j,k,l] = [%d,%d,%d,%d], eijkl = %.4f'%(i,j,k,l, eijkl )\n #print V, order, cosNPhi0, tor, eijkl\n es4.append(eijkl)\n e4 += eijkl\n self.e4 = e4\n self.es4 = es4\n self.n4 = len(es4)\n self.types4 = {'m': types4_m, 'n': types4_z}[groupBy]\n #return e4, n4, types4, es4", "def composite_scene(orig_scene, mask_seam, match_scene, dialation_mask, orig_scene1, method=\"paste\", repeat=1):\n avg_pixel = np.mean(orig_scene1[orig_scene1 != 0])\n \n output = np.zeros(orig_scene.shape)\n if method==\"seamlessclone\":\n width, height, _ = match_scene.shape\n center = (height/2, width/2)\n \n # create plain white mask\n mask = np.zeros(match_scene.shape, match_scene.dtype) + 255\n \n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = avg_pixel\n \n \n \n #image_to_compare\n output_blend = cv2.seamlessClone(match_scene.astype(np.uint8), \n orig_scene_impute.astype(np.uint8), \n mask, center,cv2.NORMAL_CLONE)\n \n #implot(output_blend)\n # now reapply the mask with alpha blending to fix it up again.\n \n \"\"\"\n TO DO CHANGE IT FROM THE DILATION + MASK SEAM, NEED TO FIND THE INTERSECTION OF THESE TWO TO BE THE \n REAL MASK TO BLUR\n \"\"\"\n dilation_mask = mask_seam.copy()\n \n dilation_mask = cv2.GaussianBlur(dilation_mask, (101,101), 0) # blur mask and do a alpha blend... between the \n #implot(dilation_mask, 'gray')\n \n dilation_mask = dilation_mask/255.0\n \n \n \n # 0 is black, 1 is white\n #output = cv2.addWeighted(output_blend, dialation_mask, orig_scene, 1-dialation_mask)\n #print dialation_mask\n #print dialation_mask.shape\n #print output_blend.shape\n #a = cv2.multiply(output_blend.astype(np.float), dialation_mask)\n \n for _ in range(10):\n # some kind of layered alpha blend by the dilation mask values...\n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]\n output_blend = cv2.add(cv2.multiply(output_blend.astype(np.float), dilation_mask),\n cv2.multiply(orig_scene_impute.astype(np.float), 1-dilation_mask), 0)\n \n \n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]\n output_blend = cv2.add(cv2.multiply(output_blend.astype(np.float), dilation_mask),\n cv2.multiply(orig_scene_impute.astype(np.float), 1-dilation_mask), 0)\n \n \n \n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]\n output = cv2.seamlessClone(match_scene.astype(np.uint8), \n output_blend.astype(np.uint8), \n mask, center,cv2.NORMAL_CLONE)\n \n # complete blend with seamlessclone...\n \n \n # output = np.maximum(output_blend, orig_scene_impute)\n # or just darken...\n \n \n #if repeat == 1:\n # return output_blend\n #output = composite_scene(orig_scene_impute, mask_seam, output_blend, dialation_mask, method=\"paste\")\n \n\n\n elif method==\"paste\":\n output[mask_seam == 0] = orig_scene[mask_seam == 0]\n output[mask_seam != 0] = match_scene[mask_seam != 0]\n \n elif method==\"alphablend\":\n output_blend = output.copy()\n output_blend[mask_seam == 0] = orig_scene[mask_seam == 0]\n output_blend[mask_seam != 0] = match_scene[mask_seam != 0]\n \n \n \n \n else:\n output[mask_seam == 0] = orig_scene[mask_seam == 0]\n output[mask_seam != 0] = match_scene[mask_seam != 0]\n return output", "def is_standard_residue(self):\n return True", "def is_standard_residue(self):\n return True", "def wls_filter(matcher: cv2.StereoSGBM, dis_l: np.ndarray, dis_r: np.ndarray, img_l: np.ndarray) -> np.ndarray:\n lmbda, sigma = 8000, 0.8 # Configure filter parameters\n wls = cv2.ximgproc.createDisparityWLSFilter(matcher_left=matcher) # create disparity WLS filter\n wls.setLambda(lmbda) # set lambda to configured value\n wls.setSigmaColor(sigma) # set sigma to configured value\n return wls.filter(dis_l, img_l, None, dis_r) # return the filtered disparity map", "def correctKITTILabelForStereo(label):\n # TODO: check extensively\n base = 15.0\n scale = 1.07\n new_label = copy.deepcopy(label)\n if new_label['box3D']['location']['z'] > base:\n new_label['box3D']['location']['z'] = base + (new_label['box3D']['location']['z']-base)*scale\n return new_label", "def semigroup_generators(self):\n return self.ambient().semigroup_generators().map(self.retract)", "def test_process_stereo(self):\n self.encoder = StreamEncoder(**self.default_kwargs)\n test_samples = np.random.rand(DEFAULT_BLOCKSIZE, 2).astype('int16')\n self.encoder.process(test_samples)\n self.encoder.finish()\n self.assertTrue(self.write_callback_called)", "def resetCoronalSegment(self):\r\n #research\r\n profprint()\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeGreen\")\r\n if sGreen == None :\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode3\")\r\n reformatLogic = slicer.vtkSlicerReformatLogic()\r\n #sGreen.SetSliceVisible(0)\r\n sGreen.SetOrientationToCoronal()\r\n #sw = slicer.app.layoutManager().sliceWidget(\"Green\")\r\n #sw.fitSliceToBackground()\r\n sGreen.Modified()", "def is_standard_residue(self):\n return False", "def braid_group_action(self):\n G = []\n for c in self:\n c = c.relabel()\n if any(c in g for g in G):\n continue\n G.append(c.braid_group_orbit())\n return G", "def test_add_lone_pairs_by_atom_valance(self):\n adj1 = \"\"\"multiplicity 3\n1 N u2 p1 c0 {2,S}\n2 S u0 p2 c0 {1,S} {3,S}\n3 H u0 p0 c0 {2,S}\"\"\"\n mol1 = Molecule().from_adjacency_list(adjlist=adj1)\n converter.add_lone_pairs_by_atom_valance(mol1)\n self.assertEqual(mol1.to_smiles(), '[N]S')\n mol1.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol1)\n self.assertEqual(mol1.to_smiles(), 'N#S')\n\n adj2 = \"\"\"multiplicity 3\n1 N u2 p1 c0 {2,S}\n2 N u0 p1 c0 {1,S} {3,S} {4,S}\n3 H u0 p0 c0 {2,S}\n4 H u0 p0 c0 {2,S}\"\"\"\n mol2 = Molecule().from_adjacency_list(adjlist=adj2)\n converter.add_lone_pairs_by_atom_valance(mol2)\n self.assertEqual(mol2.to_smiles(), '[N]N')\n mol2.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol2)\n self.assertEqual(mol2.to_smiles(), '[N-]=[NH2+]')\n\n adj3 = \"\"\"multiplicity 3\n1 C u0 p0 c0 {2,S} {5,S} {6,S} {7,S}\n2 C u0 p0 c0 {1,S} {3,S} {8,S} {9,S}\n3 C u2 p0 c0 {2,S} {4,S}\n4 H u0 p0 c0 {3,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {1,S}\n7 H u0 p0 c0 {1,S}\n8 H u0 p0 c0 {2,S}\n9 H u0 p0 c0 {2,S}\"\"\"\n mol3 = Molecule().from_adjacency_list(adjlist=adj3)\n converter.add_lone_pairs_by_atom_valance(mol3)\n self.assertEqual(mol3.to_smiles(), '[CH]CC')\n mol3.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol3)\n self.assertEqual(mol3.to_adjacency_list(), \"\"\"1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}\n2 C u0 p0 c0 {1,S} {6,S} {7,S} {8,S}\n3 C u0 p1 c0 {1,S} {9,S}\n4 H u0 p0 c0 {1,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {2,S}\n7 H u0 p0 c0 {2,S}\n8 H u0 p0 c0 {2,S}\n9 H u0 p0 c0 {3,S}\n\"\"\")\n\n adj4 = \"\"\"multiplicity 3\n1 C u0 p0 c0 {2,S} {4,S} {5,S} {6,S}\n2 C u0 p0 c0 {1,S} {3,S} {7,S} {8,S}\n3 N u2 p1 c0 {2,S}\n4 H u0 p0 c0 {1,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {1,S}\n7 H u0 p0 c0 {2,S}\n8 H u0 p0 c0 {2,S}\"\"\"\n mol4 = Molecule().from_adjacency_list(adjlist=adj4)\n converter.add_lone_pairs_by_atom_valance(mol4)\n self.assertEqual(mol4.to_smiles(), 'CC[N]')\n mol4.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol4)\n self.assertEqual(mol4.to_adjacency_list(), \"\"\"1 N u0 p2 c0 {3,S}\n2 C u0 p0 c0 {3,S} {4,S} {5,S} {6,S}\n3 C u0 p0 c0 {1,S} {2,S} {7,S} {8,S}\n4 H u0 p0 c0 {2,S}\n5 H u0 p0 c0 {2,S}\n6 H u0 p0 c0 {2,S}\n7 H u0 p0 c0 {3,S}\n8 H u0 p0 c0 {3,S}\n\"\"\")", "def mask(\n self, enc: SplitEncoding, random: bool = False, detach: bool = False\n ) -> tuple[SplitEncoding, SplitEncoding]:\n zs = enc.zs\n zy = enc.zy\n if detach:\n zs = zs.detach()\n zy = zy.detach()\n if random:\n zs_m = SplitEncoding(zs=torch.randn_like(zs), zy=zy)\n zy_m = SplitEncoding(zs=zs, zy=torch.randn_like(zy))\n else:\n zs_m = SplitEncoding(zs=torch.zeros_like(zs), zy=zy)\n zy_m = SplitEncoding(zs=zs, zy=torch.zeros_like(zy))\n return zs_m, zy_m", "def rmsd_cluster(input, ref, output, clusters):\n ifs = oemolistream()\n if not ifs.open(input):\n OEThrow.Fatal(\"Unable to open %s for reading\" % input)\n poses = list()\n mol = OEMol()\n while OEReadMolecule(ifs, mol):\n mol_copy = OEMol(mol)\n #print(dir(mol_copy))\n #print(mol_copy.NumConfs())\n for conf in mol_copy.GetConfs():\n poses.append(conf)\n ifs.close()\n print(\"%d poses read\" % len(poses))\n\n # Create a list of centroids, starting with first molecule.\n centroids = list()\n\n # Make first pose our first centroid.\n centroids.append(poses.pop(0))\n if int(clusters) < len(poses):\n print(\"Will return %s poses...\" % clusters)\n else:\n print(\"Will return %s poses...\" % (len(poses)+1))\n while len(centroids) < int(clusters) and len(poses)>0:\n print(len(centroids))\n # Compute distance from all poses to closest centroid.\n min_rmsd = numpy.zeros([len(poses)])\n for (pose_index, pose) in enumerate(poses):\n centroids_rmsds = [OERMSD(pose, centroid) for centroid in centroids]\n min_rmsd[pose_index] = min(centroids_rmsds)\n # Find pose that is farthest away from all current centroids.\n farthest_pose_index = min_rmsd.argmax()\n print(\"Farthest pose is %d at %f A away from centroids\" % (farthest_pose_index, min_rmsd[farthest_pose_index]))\n # Move farthest pose to centroids.\n centroids.append(poses.pop(farthest_pose_index))\n # Write out all centroids.\n ofs=oemolostream()\n if not ofs.open(output):\n OEThrow.Fatal(\"Unable to open %s for writing\" % itf.GetString(\"-o\"))\n for mol in centroids:\n #OEWritePDBFile(ofs, mol)\n OEWriteMolecule(ofs, mol)\n\n print(\"Done!\")\n\n return 0", "def set_original_planes(self, display_opt):\n\n # get 4-chamber view\n four_ch_view_plane_normal = self.find_4ch_view(display_opt)\n\n # set rodriguez rotation around midline (apex to C)\n axis_of_rot = np.array(self.epi_apex_node - self.C)\n self.axis_of_rot_normalized = axis_of_rot/np.linalg.norm(axis_of_rot)\n\n # get 2-chamber view (90-counterclock rotation from 4ch)\n new_P = my_rodriguez_rotation(self.plane_pts, self.axis_of_rot_normalized,\n math.radians(self.orig_view_angles[1])) # rodriguez rotation around midline\n two_ch_view_plane_normal = find_plane_eq(new_P[0, :], new_P[1, :], new_P[2, :])\n\n # get 3-chamber view (additional 30-60 counterclock rotation from 3ch)\n new_P = my_rodriguez_rotation(self.plane_pts, self.axis_of_rot_normalized, math.radians(self.orig_view_angles[2]))\n three_ch_view_plane_normal = find_plane_eq(new_P[0, :], new_P[1, :], new_P[2, :])\n\n if display_opt:\n _ = self.mesh_slicer(four_ch_view_plane_normal, 'mesh')\n _ = self.mesh_slicer(two_ch_view_plane_normal, 'mesh')\n _ = self.mesh_slicer(three_ch_view_plane_normal, 'mesh')\n\n self.original_planes = np.vstack((four_ch_view_plane_normal,\n two_ch_view_plane_normal,\n three_ch_view_plane_normal))", "def _ignore_collision(self):\n # The legacy version only ignores collision of child links of active joints.\n for link in self.cabinet.get_links():\n for s in link.get_collision_shapes():\n g0, g1, g2, g3 = s.get_collision_groups()\n s.set_collision_groups(g0, g1, g2 | 1 << 31, g3)", "def blendShapeEnvelopeOff():\n obj = cmds.ls(selection = True)\n history = cmds.listHistory(obj)\n bsHistory = cmds.ls(history, type = 'blendShape')\n for bs in bsHistory:\n cmds.setAttr(bs+'.'+'envelope',0.0) #note not changing blend target weights", "def gen_michaelis_menten_like_rate_law(model, reaction, modifiers=None, modifier_reactants=None, exclude_substrates=None):\n modifier_species = []\n all_species = {}\n all_volumes = {}\n all_observables = {}\n all_parameters = {}\n if modifiers:\n for modifier in modifiers:\n if type(modifier) == wc_lang.Observable:\n all_observables[modifier.id] = modifier\n for species in modifier.expression.species:\n modifier_species.append(species) \n elif type(modifier) == wc_lang.Species:\n modifier_species.append(modifier)\n all_species[modifier.gen_id()] = modifier\n else:\n raise TypeError('The modifiers contain element(s) that is not an observable or a species') \n\n if modifier_reactants is None:\n additional_reactants = []\n else:\n additional_reactants = modifier_reactants\n\n if exclude_substrates:\n excluded_reactants = exclude_substrates\n else:\n excluded_reactants = [] \n\n avogadro = model.parameters.get_or_create(\n id='Avogadro',\n type=None,\n value=scipy.constants.Avogadro,\n units=unit_registry.parse_units('molecule mol^-1'))\n all_parameters[avogadro.id] = avogadro\n\n model_k_cat = model.parameters.get_or_create(id='k_cat_{}'.format(reaction.id),\n type=wc_ontology['WC:k_cat'],\n units=unit_registry.parse_units('s^-1{}'.format(\n (' * molecule^{{-{}}}'.format(len(modifiers))) if modifiers else '')))\n all_parameters[model_k_cat.id] = model_k_cat\n\n expression_terms = [] \n for species in reaction.get_reactants():\n\n if (species not in modifier_species or species in additional_reactants) and species not in excluded_reactants:\n\n all_species[species.gen_id()] = species\n\n model_k_m = model.parameters.get_or_create(id='K_m_{}_{}'.format(reaction.id, species.species_type.id),\n type=wc_ontology['WC:K_m'],\n units=unit_registry.parse_units('M'))\n all_parameters[model_k_m.id] = model_k_m\n\n volume = species.compartment.init_density.function_expressions[0].function\n all_volumes[volume.id] = volume\n\n expression_terms.append('({} / ({} + {} * {} * {}))'.format(species.gen_id(),\n species.gen_id(),\n model_k_m.id, avogadro.id,\n volume.id))\n\n expression = '{}{}{}'.format(\n model_k_cat.id,\n (' * {}'.format(' * '.join([i.id if type(i)==wc_lang.Observable else i.gen_id() \\\n for i in modifiers]))) if modifiers else '',\n (' * {}'.format(' * '.join(expression_terms))) if expression_terms else '')\n \n rate_law_expression, error = wc_lang.RateLawExpression.deserialize(expression, {\n wc_lang.Parameter: all_parameters,\n wc_lang.Species: all_species,\n wc_lang.Observable: all_observables,\n wc_lang.Function: all_volumes,\n })\n assert error is None, str(error)\n\n return rate_law_expression, list(all_parameters.values())", "def sample_community(self) -> bool:\n community_prediction = self.get_community_prediction()\n return flip(community_prediction)", "def test_molecules_from_xyz(self):\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz6['dict'])\n\n # check that the atom order is the same\n self.assertTrue(s_mol.atoms[0].is_sulfur())\n self.assertTrue(b_mol.atoms[0].is_sulfur())\n self.assertTrue(s_mol.atoms[1].is_oxygen())\n self.assertTrue(b_mol.atoms[1].is_oxygen())\n self.assertTrue(s_mol.atoms[2].is_oxygen())\n self.assertTrue(b_mol.atoms[2].is_oxygen())\n self.assertTrue(s_mol.atoms[3].is_nitrogen())\n self.assertTrue(b_mol.atoms[3].is_nitrogen())\n self.assertTrue(s_mol.atoms[4].is_carbon())\n self.assertTrue(b_mol.atoms[4].is_carbon())\n self.assertTrue(s_mol.atoms[5].is_hydrogen())\n self.assertTrue(b_mol.atoms[5].is_hydrogen())\n self.assertTrue(s_mol.atoms[6].is_hydrogen())\n self.assertTrue(b_mol.atoms[6].is_hydrogen())\n self.assertTrue(s_mol.atoms[7].is_hydrogen())\n self.assertTrue(b_mol.atoms[7].is_hydrogen())\n self.assertTrue(s_mol.atoms[8].is_hydrogen())\n self.assertTrue(b_mol.atoms[8].is_hydrogen())\n self.assertTrue(s_mol.atoms[9].is_hydrogen())\n self.assertTrue(b_mol.atoms[9].is_hydrogen())\n\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz7['dict'])\n self.assertTrue(s_mol.atoms[0].is_oxygen())\n self.assertTrue(b_mol.atoms[0].is_oxygen())\n self.assertTrue(s_mol.atoms[2].is_carbon())\n self.assertTrue(b_mol.atoms[2].is_carbon())\n\n expected_bonded_adjlist = \"\"\"multiplicity 2\n1 O u0 p2 c0 {6,S} {10,S}\n2 O u0 p2 c0 {3,S} {28,S}\n3 C u0 p0 c0 {2,S} {8,S} {14,S} {15,S}\n4 C u0 p0 c0 {7,S} {16,S} {17,S} {18,S}\n5 C u0 p0 c0 {7,S} {19,S} {20,S} {21,S}\n6 C u0 p0 c0 {1,S} {22,S} {23,S} {24,S}\n7 C u1 p0 c0 {4,S} {5,S} {9,S}\n8 C u0 p0 c0 {3,S} {10,D} {11,S}\n9 C u0 p0 c0 {7,S} {11,D} {12,S}\n10 C u0 p0 c0 {1,S} {8,D} {13,S}\n11 C u0 p0 c0 {8,S} {9,D} {25,S}\n12 C u0 p0 c0 {9,S} {13,D} {26,S}\n13 C u0 p0 c0 {10,S} {12,D} {27,S}\n14 H u0 p0 c0 {3,S}\n15 H u0 p0 c0 {3,S}\n16 H u0 p0 c0 {4,S}\n17 H u0 p0 c0 {4,S}\n18 H u0 p0 c0 {4,S}\n19 H u0 p0 c0 {5,S}\n20 H u0 p0 c0 {5,S}\n21 H u0 p0 c0 {5,S}\n22 H u0 p0 c0 {6,S}\n23 H u0 p0 c0 {6,S}\n24 H u0 p0 c0 {6,S}\n25 H u0 p0 c0 {11,S}\n26 H u0 p0 c0 {12,S}\n27 H u0 p0 c0 {13,S}\n28 H u0 p0 c0 {2,S}\n\"\"\"\n expected_mol = Molecule().from_adjacency_list(expected_bonded_adjlist)\n self.assertEqual(b_mol.to_adjacency_list(), expected_bonded_adjlist)\n # the is_isomorphic test must come after the adjlist test since it changes the atom order\n self.assertTrue(b_mol.is_isomorphic(expected_mol))\n\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz10['dict'], multiplicity=1, charge=0)\n self.assertIsNotNone(s_mol)\n self.assertIsNotNone(b_mol)\n for atom1, atom2 in zip(s_mol.atoms, b_mol.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n for atom1, symbol in zip(s_mol.atoms, self.xyz10['dict']['symbols']):\n self.assertEqual(atom1.symbol, symbol)\n\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz_dict_13, multiplicity=1, charge=0)\n for atom1, atom2 in zip(s_mol.atoms, b_mol.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n for atom1, symbol in zip(s_mol.atoms, self.xyz_dict_13['symbols']):\n self.assertEqual(atom1.symbol, symbol)\n self.assertEqual(s_mol.multiplicity, 1)\n self.assertEqual(b_mol.multiplicity, 1)\n self.assertFalse(any(atom.radical_electrons for atom in b_mol.atoms))", "def stereomers(gra):\n bool_vals = (False, True)\n\n def _expand_atom_stereo(sgr):\n atm_ste_keys = stereogenic_atom_keys(sgr)\n nste_atms = len(atm_ste_keys)\n sgrs = [set_atom_stereo_parities(sgr, dict(zip(atm_ste_keys,\n atm_ste_par_vals)))\n for atm_ste_par_vals\n in itertools.product(bool_vals, repeat=nste_atms)]\n return sgrs\n\n def _expand_bond_stereo(sgr):\n bnd_ste_keys = stereogenic_bond_keys(sgr)\n nste_bnds = len(bnd_ste_keys)\n sgrs = [set_bond_stereo_parities(sgr, dict(zip(bnd_ste_keys,\n bnd_ste_par_vals)))\n for bnd_ste_par_vals\n in itertools.product(bool_vals, repeat=nste_bnds)]\n return sgrs\n\n last_sgrs = []\n sgrs = [without_stereo_parities(gra)]\n\n while sgrs != last_sgrs:\n last_sgrs = sgrs\n sgrs = list(itertools.chain(*map(_expand_atom_stereo, sgrs)))\n sgrs = list(itertools.chain(*map(_expand_bond_stereo, sgrs)))\n\n return tuple(sorted(sgrs, key=frozen))", "def test_group(self):\n # leave out particle 0\n group = hoomd.group.tags(1,2)\n\n # compute forces\n f = azplugins.restrain.plane(group=group, point=(0,0,0), normal=(1,0,0), k=2.0)\n hoomd.run(1)\n np.testing.assert_array_almost_equal(f.forces[0].force, ( 0.,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].force, ( 2.,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].force, ( 6.,0,0))\n self.assertAlmostEqual(f.forces[0].energy, 0.)\n self.assertAlmostEqual(f.forces[1].energy, 1.)\n self.assertAlmostEqual(f.forces[2].energy, 9.)\n np.testing.assert_array_almost_equal(f.forces[0].virial, (0,0,0,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].virial, (-2.,0,4.,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].virial, (12.,0,0,0,0,0))", "def test_to_multiframe_xyz_rdkit(self):\n from openff.toolkit.utils import RDKitToolkitWrapper\n\n tkw = RDKitToolkitWrapper()\n # load in an SDF of butane with multiple conformers in it\n molecules = Molecule.from_file(\n get_data_file_path(\"molecules/butane_multi.sdf\"),\n \"sdf\",\n toolkit_registry=tkw,\n )\n # now we want to combine the conformers to one molecule\n butane = molecules[0]\n for mol in molecules[1:]:\n butane.add_conformer(mol._conformers[0])\n\n # make sure we have the 7 conformers\n assert butane.n_conformers == 7\n with NamedTemporaryFile(suffix=\".xyz\") as iofile:\n # try and write out the xyz file\n butane.to_file(iofile.name, \"xyz\", toolkit_registry=tkw)\n\n # now lets check whats in the file\n with open(iofile.name) as xyz_data:\n data = xyz_data.readlines()\n # make sure we have the correct amount of lines writen\n assert len(data) == 112\n # make sure all headers and frame data was writen\n assert data.count(\"14\\n\") == 7\n for i in range(1, 8):\n assert f\"C4H10 Frame {i}\\n\" in data\n\n # now make sure the first line of the coordinates are correct in every frame\n coords = [\n \"C 1.8902000000 0.0426000000 0.2431000000\\n\",\n \"C 1.8976000000 -0.0233000000 0.2846000000\\n\",\n \"C -1.8794000000 -0.1793000000 -0.2565000000\\n\",\n \"C -1.5206000000 -0.0165000000 0.2787000000\\n\",\n \"C -1.4890000000 -0.2619000000 0.4871000000\\n\",\n \"C -1.4941000000 -0.2249000000 -0.0958000000\\n\",\n \"C -1.8827000000 -0.0372000000 0.1937000000\\n\",\n ]\n for coord in coords:\n assert coord in data", "def setSpherePickable(self, obj, dictName):\n obj.sim.reparentTo(self.selectable)\n obj.sim.find('**/pSphere1').node().setIntoCollideMask(BitMask32.bit(1))\n obj.sim.find('**/pSphere1').node().setTag(dictName, obj.id)", "def remove_alt_confs (hierarchy, always_keep_one_conformer=False) :\n for model in hierarchy.models() :\n for chain in model.chains() :\n for residue_group in chain.residue_groups() :\n atom_groups = residue_group.atom_groups()\n assert (len(atom_groups) > 0)\n if always_keep_one_conformer :\n if (len(atom_groups) == 1) and (atom_groups[0].altloc == '') :\n continue\n atom_groups_and_occupancies = []\n for atom_group in atom_groups :\n if (atom_group.altloc == '') :\n continue\n mean_occ = flex.mean(atom_group.atoms().extract_occ())\n atom_groups_and_occupancies.append((atom_group, mean_occ))\n atom_groups_and_occupancies.sort(lambda a,b: cmp(b[1], a[1]))\n for atom_group, occ in atom_groups_and_occupancies[1:] :\n residue_group.remove_atom_group(atom_group=atom_group)\n single_conf, occ = atom_groups_and_occupancies[0]\n single_conf.altloc = ''\n else :\n for atom_group in atom_groups :\n if (not atom_group.altloc in [\"\", \"A\"]) :\n residue_group.remove_atom_group(atom_group=atom_group)\n else :\n atom_group.altloc = \"\"\n if (len(residue_group.atom_groups()) == 0) :\n chain.remove_residue_group(residue_group=residue_group)\n if (len(chain.residue_groups()) == 0) :\n model.remove_chain(chain=chain)\n atoms = hierarchy.atoms()\n new_occ = flex.double(atoms.size(), 1.0)\n atoms.set_occ(new_occ)", "def test_clashing_atoms():\n benzene_path = examples_paths()['benzene']\n toluene_path = examples_paths()['toluene']\n with mmtools.utils.temporary_directory() as tmp_dir:\n yaml_content = get_template_script(tmp_dir, keep_openeye=True)\n system_id = 'explicit-system'\n system_description = yaml_content['systems'][system_id]\n system_description['pack'] = True\n system_description['solvent'] = utils.CombinatorialLeaf(['vacuum', 'PME'])\n\n # Sanity check: at the beginning molecules clash\n toluene_pos = utils.get_oe_mol_positions(utils.load_oe_molecules(toluene_path, molecule_idx=0))\n benzene_pos = utils.get_oe_mol_positions(utils.load_oe_molecules(benzene_path, molecule_idx=0))\n assert pipeline.compute_min_dist(toluene_pos, benzene_pos) < pipeline.SetupDatabase.CLASH_THRESHOLD\n\n exp_builder = ExperimentBuilder(yaml_content)\n\n for sys_id in [system_id + '_vacuum', system_id + '_PME']:\n system_dir = os.path.dirname(\n exp_builder._db.get_system(sys_id)[0].position_path)\n\n # Get positions of molecules in the final system\n prmtop = openmm.app.AmberPrmtopFile(os.path.join(system_dir, 'complex.prmtop'))\n inpcrd = openmm.app.AmberInpcrdFile(os.path.join(system_dir, 'complex.inpcrd'))\n positions = inpcrd.getPositions(asNumpy=True).value_in_unit(unit.angstrom)\n topography = Topography(prmtop.topology, ligand_atoms='resname TOL')\n benzene_pos2 = positions.take(topography.receptor_atoms, axis=0)\n toluene_pos2 = positions.take(topography.ligand_atoms, axis=0)\n # atom_indices = pipeline.find_components(prmtop.createSystem(), prmtop.topology, 'resname TOL')\n # benzene_pos2 = positions.take(atom_indices['receptor'], axis=0)\n # toluene_pos2 = positions.take(atom_indices['ligand'], axis=0)\n\n # Test that clashes are resolved in the system\n min_dist, max_dist = pipeline.compute_min_max_dist(toluene_pos2, benzene_pos2)\n assert min_dist >= pipeline.SetupDatabase.CLASH_THRESHOLD\n\n # For solvent we check that molecule is within the box\n if sys_id == system_id + '_PME':\n assert max_dist <= exp_builder._db.solvents['PME']['clearance'].value_in_unit(unit.angstrom)", "def get_atom_contrib(self, groupBy='m'):\n self.es1 = -0.5 * np.array(self.zs)**2.4 * h2kc\n self.types1 = {'m': self.atypes, 'n': self.zs}[groupBy]", "def format_molecule_for_orca(self):\n options = collections.defaultdict(lambda: collections.defaultdict(dict))\n self.update_geometry()\n factor = 1.0 if self.PYunits == 'Angstrom' else psi_bohr2angstroms\n\n text = \"\"\n text += '* xyz {} {}\\n'.format(self.molecular_charge(), self.multiplicity())\n\n n_frags = self.nfragments()\n for fr in range(n_frags):\n if self.fragment_types[fr] == 'Absent':\n pass\n else:\n for at in range(self.fragments[fr][0], self.fragments[fr][1] + 1):\n if self.fragment_types[fr] == 'Ghost':\n # TODO: add support for ghost atoms\n # atom += ':'\n continue\n x, y, z = self.atoms[at].compute()\n atom = self.symbol(at)\n if n_frags > 1:\n text += ' {:2s}({:d}) {:> 17.12f} {:> 17.12f} {:> 17.12f}\\n'.format(\\\n atom, fr + 1, x * factor, y * factor, z * factor)\n else:\n text += ' {:2s} {:> 17.12f} {:> 17.12f} {:> 17.12f}\\n'.format(\\\n atom, x * factor, y * factor, z * factor)\n text += '*'\n\n return text, options", "def set_channel_group(self, channel_group):\n super().set_channel_group(channel_group)\n self.skip_flags = self.flagspace.all_flags() # everything but 0", "def test_strip_atom_stereochemistry(self):\n mol = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n nitrogen_idx = [\n atom.molecule_atom_index for atom in mol.atoms if atom.symbol == \"N\"\n ][0]\n\n # TODO: This fails with RDKitToolkitWrapper because it perceives\n # the stereochemistry of this nitrogen as None\n assert mol.atoms[nitrogen_idx].stereochemistry == \"S\"\n mol.strip_atom_stereochemistry(smarts=\"[N+0X3:1](-[*])(-[*])(-[*])\")\n assert mol.atoms[nitrogen_idx].stereochemistry is None\n\n mol = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n assert mol.atoms[nitrogen_idx].stereochemistry == \"S\"\n mol.strip_atom_stereochemistry(smarts=\"[N+0X3:1](-[*])(-[*])(-[*])\")\n assert mol.atoms[nitrogen_idx].stereochemistry is None", "def showSimpleCylinders(self): \n #from morphforge.morphology.util import TriMeshBuilderVerySimple\n import sys\n sys.path.append('/usr/share/pyshared/')\n \n #import morphforge\n from morphforge.morphology.mesh import MeshBuilderRings\n MonkeyPatchMayaVi()\n #import enthought.mayavi.mlab as mlab\n from mayavi import mlab\n \n assert len(self.morphs)==1\n mesh = MeshBuilderRings().build(self.morphs[0])\n \n \n @mlab.show\n def _showSimpleCylinders():\n\t \n #c = TriMeshBuilderVerySimple(self.morphs[0])\n #mlab.triangular_mesh(c.x, c.y, c.z, c.triangles, colormap=self.colormap)\n mlab.triangular_mesh(mesh.vertices[:,0], mesh.vertices[:,1], mesh.vertices[:,2], mesh.triangles, colormap=self.colormap)\n \n _showSimpleCylinders()", "def sew_dart(self, degree, dart1, dart2, merge_attribute = True):\r\n if self.is_free(degree, dart1) and self.is_free(degree, dart2):\r\n if degree==2:\r\n\t\t\t# involution sur le degre:\r\n self.link_darts(2,dart1,dart2)\r\n\t\t\t\r\n\t\t\t# involution sur (a0 a2)\r\n a0_b1 = self.alphas[0][dart1]\r\n a0_b2 = self.alphas[0][dart2]\r\n\t\t\t\r\n self.link_darts(2,a0_b1, a0_b2)\r\n \r\n\r\n self.link_darts(degree,dart1, dart2)\r\n\t #if merge_attribute:\r\n\t\t# pass#self.set_position(dart, np.mean([get_position(dart1), get_position(dart2)]))\r" ]
[ "0.7502093", "0.7083151", "0.696663", "0.6748644", "0.6419238", "0.5837475", "0.576132", "0.5678274", "0.5618676", "0.55914426", "0.5525545", "0.5486366", "0.54649514", "0.54351145", "0.54303616", "0.5415109", "0.5333785", "0.5333203", "0.5273259", "0.5203159", "0.51855856", "0.51046175", "0.51046175", "0.5024102", "0.5020128", "0.49730116", "0.49083507", "0.4899518", "0.4899518", "0.4895541", "0.48298353", "0.47531375", "0.47494993", "0.47401398", "0.47276866", "0.47245154", "0.4723265", "0.47042802", "0.4702397", "0.46981806", "0.4684555", "0.4658972", "0.46510276", "0.46425232", "0.46396774", "0.46346247", "0.46270737", "0.46154788", "0.45924473", "0.4589961", "0.45859405", "0.4584875", "0.45822716", "0.45785066", "0.4576877", "0.45697027", "0.45531353", "0.45503622", "0.45440066", "0.45357174", "0.45242375", "0.4520924", "0.4494208", "0.44898114", "0.4488593", "0.44805512", "0.44778287", "0.4470318", "0.4456328", "0.4448935", "0.44169223", "0.44169223", "0.44131818", "0.4399018", "0.4388066", "0.4384824", "0.43808892", "0.43784052", "0.43737292", "0.43695077", "0.43636173", "0.43405223", "0.43402606", "0.43370697", "0.43346432", "0.43326417", "0.43291086", "0.43169907", "0.43149117", "0.43141985", "0.43121105", "0.43078792", "0.43062404", "0.43056595", "0.43040133", "0.42989373", "0.42979616", "0.4294214", "0.42923275", "0.4289705" ]
0.6020158
5
StereoGroup atoms are in the reaction, and the reaction inverts the specified chirality at the stereo centers. > preserve stereo group
def test_reaction_inverts_stereo(self): reaction = '[C@:1]>>[C@@:1]' products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|') self.assertEqual(products, 'F[C@@H](Cl)Br |o1:1|') products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|') self.assertEqual(products, 'F[C@H](Cl)Br |&1:1|') products = _reactAndSummarize(reaction, 'FC(Cl)Br') self.assertEqual(products, 'FC(Cl)Br')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reaction_defines_stereo(self):\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'FC(Cl)Br')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n\n # Remove group with defined stereo\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |&1:3|')\n\n # Remove atoms with defined stereo from group\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')", "def test_reaction_destroys_stereo(self):\n reaction = '[C@:1]>>[C:1]'\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'FC(Cl)Br')\n self.assertEqual(products, 'FC(Cl)Br')\n\n reaction = '[C@:1]F>>[C:1]F'\n # Reaction destroys stereo (but preserves unaffected group\n products = _reactAndSummarize(reaction,\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')\n # Reaction destroys stereo (but preserves the rest of the group\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)[C@@H](Cl)Br |&1:1,3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')", "def test_stereogroup_is_spectator_to_reaction(self):\n # 5a. Reaction preserves unrelated stereo\n products = _reactAndSummarize('[C@:1]F>>[C@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5b. Reaction ignores unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5c. Reaction inverts unrelated stereo'\n products = _reactAndSummarize('[C@:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5d. Reaction destroys unrelated stereo' 1:3|\n products = _reactAndSummarize('[C@:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |o1:3|')\n # 5e. Reaction assigns unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')", "def test_reaction_copies_stereogroup(self):\n # Stereogroup atoms are in the reaction with multiple copies in the product\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )\n\n # Stereogroup atoms are not in the reaction, but have multiple copies in the\n # product.\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )", "def reflect_local_stereo(gra):\n atm_par_dct = atom_stereo_parities(gra)\n atm_par_dct = dict_.transform_values(\n atm_par_dct, lambda x: x if x is None else not x)\n gra = set_atom_stereo_parities(gra, atm_par_dct)\n return gra", "def test_reaction_ignores_stereo(self):\n reaction = '[C:1]>>[C:1]'\n reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br']\n for reactant in reactants:\n products = _reactAndSummarize(reaction, reactant)\n self.assertEqual(products, reactant)", "def stereoWarpK_noMotion_singleSided(curImageInfo, conversionParam, globalParam): \n h, w, u = curImageInfo.originalImageResized.shape # shape after resize\n K = 1\n N = h * w * K\n gr = np.mean(curImageInfo.originalImageResized, 2) # not 3 as it is zero based :3\n grs = cv2.GaussianBlur(gr, (5, 5), 1)\n \n # One heuristic for converting depth to disparity\n disparity0 = imnormalize(1/(1+imnormalize(curImageInfo.depthResized)))*conversionParam.maxDisp - conversionParam.maxDisp/2;\n \n if conversionParam.spatialSmoothnessSwitch == True:\n # Smoothing the depth spatially according to adjacent pixels by using Gx, Gy gradients\n # Vertical and Horizontal Edges\n dx = cv2.filter2D(grs, -1, np.transpose(np.array([[-1, 1, 0]])))\n dy = cv2.filter2D(grs, -1, np.array([[-1, 1, 0]]))\n \n W = ( imnormalize(disparity0) + sigmoid(np.sqrt(np.power(dx, 2) + np.power(dy, 2)), 0.01, 500) ) / 2 \n \n A = np.transpose(spdiags(np.transpose(W).flatten(), 0, N, N, \"csc\") \\\n + (conversionParam.spatialSmoothCoeff_x * globalParam.Gx.transpose() * globalParam.Gx) \\\n + (conversionParam.spatialSmoothCoeff_y * globalParam.Gy.transpose() * globalParam.Gy))\n \n b = np.transpose(W).flatten() * np.transpose(disparity0).flatten()\n \n [x, flag] = cg(A, b, np.transpose(disparity0).flatten(), 5e-1, 50)\n \n disparity = np.transpose(np.reshape(x, (w, h))) # remove (h, w, 1, K)\n else:\n disparity = disparity0\n \n curImageInfo.leftImage = curImageInfo.originalImage\n \n # The -ve sign to convert the white to black and black to white \n warpright = -disparity\n \n # only the warping interp2 is done on the original size image with no resizing to have good estimation\n warpright = cv2.resize(warpright, (curImageInfo.originalImage.shape[1], curImageInfo.originalImage.shape[0]), \n interpolation=cv2.INTER_LINEAR)\n \n curImageInfo.rightImage = (clip(warpImage_v2((curImageInfo.originalImage), (warpright), \n conversionParam.resizeFactor, globalParam.xx, globalParam.yy, globalParam.YY)))\n \n return disparity", "def test_reaction_splits_stereogroup(self):\n products = _reactAndSummarize('[C:1]OO[C:2]>>[C:2]O.O[C:1]',\n 'F[C@H](Cl)OO[C@@H](Cl)Br |o1:1,5|')\n # Two product sets, each with two mols:\n self.assertEqual(products.count('|o1:1|'), 4)", "def set_original_planes(self, display_opt):\n\n # get 4-chamber view\n four_ch_view_plane_normal = self.find_4ch_view(display_opt)\n\n # set rodriguez rotation around midline (apex to C)\n axis_of_rot = np.array(self.epi_apex_node - self.C)\n self.axis_of_rot_normalized = axis_of_rot/np.linalg.norm(axis_of_rot)\n\n # get 2-chamber view (90-counterclock rotation from 4ch)\n new_P = my_rodriguez_rotation(self.plane_pts, self.axis_of_rot_normalized,\n math.radians(self.orig_view_angles[1])) # rodriguez rotation around midline\n two_ch_view_plane_normal = find_plane_eq(new_P[0, :], new_P[1, :], new_P[2, :])\n\n # get 3-chamber view (additional 30-60 counterclock rotation from 3ch)\n new_P = my_rodriguez_rotation(self.plane_pts, self.axis_of_rot_normalized, math.radians(self.orig_view_angles[2]))\n three_ch_view_plane_normal = find_plane_eq(new_P[0, :], new_P[1, :], new_P[2, :])\n\n if display_opt:\n _ = self.mesh_slicer(four_ch_view_plane_normal, 'mesh')\n _ = self.mesh_slicer(two_ch_view_plane_normal, 'mesh')\n _ = self.mesh_slicer(three_ch_view_plane_normal, 'mesh')\n\n self.original_planes = np.vstack((four_ch_view_plane_normal,\n two_ch_view_plane_normal,\n three_ch_view_plane_normal))", "def process_stereo(self, image1, image2, disp1, disp2):\n return _elas.Elas_process_stereo(self, image1, image2, disp1, disp2)", "def test_reaction_preserves_stereo(self):\n reaction = '[C@:1]>>[C@:1]'\n reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br']\n for reactant in reactants:\n products = _reactAndSummarize(reaction, reactant)\n self.assertEqual(products, reactant)", "def EnumerateStereoisomers(m, options=..., verbose=...): # -> Generator[Unknown, None, None]:\n ...", "def xyz2facestereo(x,y,z):\n ax = np.abs(x)\n ay = np.abs(y)\n az = np.abs(z)\n mskx = (y != x) & (z != x)\n mskyz = z != y\n msk0 = ( x >= ay) & ( x >= az) & mskx\n msk3 = (-x >= ay) & (-x >= az) & mskx\n msk1 = ( y >= az) & mskyz\n msk4 = (-y >= az) & mskyz\n msk2 = z > 0\n f = (1-msk0)*(msk3*3 + (1-msk3)*(msk1 + (1-msk1)*(msk4*4 + (1-msk4)*(msk2*2 + (1-msk2)*5))))\n xnew = np.choose(f, ( y, -x, -x, -z, -z, y))\n ynew = np.choose(f, ( z, z, -y, -y, x, x))\n znew = np.choose(f, ( x, y, z, -x, -y, -z))\n X,Y = xyz2stereo(xnew, ynew, znew)\n\n return f,X,Y", "def inverse(self, point):\n raise NotImplementedError('The Lie group inverse is not implemented.')", "def atom_parity_evaluator_to_local_stereo_(gra):\n return atom_parity_evaluator_from_local_stereo_(gra)", "def bond_parity_evaluator_to_local_stereo_(gra):\n return bond_parity_evaluator_from_local_stereo_(gra)", "def atom_stereo_keys(sgr):\n atm_ste_keys = dict_.keys_by_value(atom_stereo_parities(sgr),\n lambda x: x in [True, False])\n return atm_ste_keys", "def DeMorgan_equivalence(self, position_list=[]):\n\t\treturn self.__class__(_replace_match_at(self, position_list, [\n\t\t\t[ ((neg, A), disj, (neg, B)), (neg, (A, conj, B)) ],\n\t\t\t[ (neg, (A, conj, B)), ((neg, A), disj, (neg, B)) ],\n\t\t\t[ ((neg, A), conj, (neg, B)), (neg, (A, disj, B)) ],\n\t\t\t[ (neg, (A, disj, B)), ((neg, A), conj, (neg, B)) ]\n\t\t]))", "def __invert__(self):\n result = self.clone()\n if result.rank == 0:\n result.share ^= -1\n return result", "def testStereo(self):\r\n smi_and_cansmi = [\r\n ('OC(=O)[C@@H](CCC(N)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](CCC(N)=O)N', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](N)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@@H](N)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](N)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](N)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](C(O)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](C(O)=O)N', 'NC(=O)CC[C@H](N)C(=O)O')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)", "def invert(base = None, corrective = None, name = None):\n cmds.loadPlugin('cvshapeinverter_plugin.py', qt = True)\n if not base or not corrective:\n sel = cmds.ls(sl = True)\n if not sel or len(sel) != 2:\n cmds.undoInfo(closeChunk = True)\n raise RuntimeError, 'Select base then corrective'\n base, corrective = sel\n\n # Get points on base mesh\n base_points = get_points(base)\n point_count = base_points.length()\n\n # Get points on corrective mesh\n corrective_points = get_points(corrective)\n\n # Get the intermediate mesh\n orig_mesh = get_shape(base, intermediate = True)\n\n # Get the component offset axes\n orig_points = get_points(orig_mesh)\n x_points = OpenMaya.MPointArray(orig_points)\n y_points = OpenMaya.MPointArray(orig_points)\n z_points = OpenMaya.MPointArray(orig_points)\n\n cmds.undoInfo(openChunk = True)\n for i in range(point_count):\n x_points[i].x += 1.0\n y_points[i].y += 1.0\n z_points[i].z += 1.0\n\n set_points(orig_mesh, x_points)\n x_points = get_points(base)\n set_points(orig_mesh, y_points)\n y_points = get_points(base)\n set_points(orig_mesh, z_points)\n z_points = get_points(base)\n set_points(orig_mesh, orig_points)\n\n # Create the mesh to get the inversion deformer\n if not name:\n name = '%s_inverted' % corrective\n\n inverted_shapes = cmds.duplicate(base, name = name)[0]\n # Delete the unnessary shapes\n shapes = cmds.listRelatives(inverted_shapes, children = True, shapes = True, path = True)\n for s in shapes:\n if cmds.getAttr('%s.intermediateObject' % s):\n cmds.delete(s)\n set_points(inverted_shapes, orig_points)\n # Unlock the transformation attrs\n for attr in 'trs':\n for x in 'xyz':\n cmds.setAttr('%s.%s%s' % (inverted_shapes, attr, x), lock = False)\n cmds.setAttr('%s.visibility' % inverted_shapes, 1)\n deformer = cmds.deformer(inverted_shapes, type = 'cvShapeInverter')[0]\n\n # Figure out which rig facial controls are involved in the deformation of this base mesh.\n rigCharacterGroup = SERigObjectTypeHelper.findRelatedRigCharacterGroup(base)\n modifiedFaceControls = SERigHumanFacialComponent.getTransModifiedFaceControls(rigCharacterGroup)\n modifiedFaceProxyControls = SERigHumanFacialComponent.getTransModifiedFaceProxyControls(rigCharacterGroup)\n\n # Cache all control transformation changes involved in the deformation. \n controlTransTable = {}\n for control in modifiedFaceControls:\n trans = SERigObjectTypeHelper.getRigCtrlTransByCtrlName(control)\n controlTransTable[control] = trans\n\n for control in modifiedFaceProxyControls:\n trans = SERigObjectTypeHelper.getRigCtrlTransByCtrlName(control)\n controlTransTable[control] = trans\n\n encodedControlTransTable = cPickle.dumps(controlTransTable)\n\n # Cache control trans table to the deformer node.\n at = 'controlTransTable'\n cmds.addAttr(deformer, ln = at, dataType = 'string')\n cmds.setAttr(deformer + '.' + at, encodedControlTransTable, type = 'string', l = 1)\n\n # Debug.\n #res = str(cmds.getAttr(deformer + '.' + at))\n #table = cPickle.loads(res)\n #print(table)\n\n # Calculate the inversion matrices\n deformer_mobj = get_mobject(deformer)\n fn_deformer = OpenMaya.MFnDependencyNode(deformer_mobj)\n plug_matrix = fn_deformer.findPlug('inversionMatrix', False)\n fn_matrix_data = OpenMaya.MFnMatrixData()\n for i in range(point_count):\n matrix = OpenMaya.MMatrix()\n set_matrix_row(matrix, x_points[i] - base_points[i], 0)\n set_matrix_row(matrix, y_points[i] - base_points[i], 1)\n set_matrix_row(matrix, z_points[i] - base_points[i], 2)\n set_matrix_row(matrix, corrective_points[i], 3)\n matrix = matrix.inverse()\n matrix_mobj = fn_matrix_data.create(matrix)\n\n plug_matrixElement = plug_matrix.elementByLogicalIndex(i)\n plug_matrixElement.setMObject(matrix_mobj)\n\n # Store the base points.\n fn_point_data = OpenMaya.MFnPointArrayData()\n point_data_mobj = fn_point_data.create(base_points)\n plug_deformed_points = fn_deformer.findPlug('deformedPoints', False)\n plug_deformed_points.setMObject(point_data_mobj)\n\n cmds.connectAttr('%s.outMesh' % get_shape(corrective), '%s.correctiveMesh' % deformer)\n\n cmds.undoInfo(closeChunk = True)\n return inverted_shapes", "def mask(\n self, enc: SplitEncoding, random: bool = False, detach: bool = False\n ) -> tuple[SplitEncoding, SplitEncoding]:\n zs = enc.zs\n zy = enc.zy\n if detach:\n zs = zs.detach()\n zy = zy.detach()\n if random:\n zs_m = SplitEncoding(zs=torch.randn_like(zs), zy=zy)\n zy_m = SplitEncoding(zs=zs, zy=torch.randn_like(zy))\n else:\n zs_m = SplitEncoding(zs=torch.zeros_like(zs), zy=zy)\n zy_m = SplitEncoding(zs=zs, zy=torch.zeros_like(zy))\n return zs_m, zy_m", "def evert(self):\n for e in self.edges:\n self.invert()\n for f in self.faces:\n f.invert()", "def convertStereo(u, v, disparity, info):\n stereoModel = image_geometry.StereoCameraModel()\n stereoModel.fromCameraInfo(info['l'], info['r'])\n (x,y,z) = stereoModel.projectPixelTo3d((u,v), disparity)\n\n cameraPoint = PointStamped()\n cameraPoint.header.frame_id = info['l'].header.frame_id\n cameraPoint.header.stamp = rospy.Time.now()\n cameraPoint.point = Point(x,y,z)\n return cameraPoint", "def GetStereoisomerCount(m, options=...): # -> Any:\n ...", "def has_stereo(gra):\n return bool(atom_stereo_keys(gra) or bond_stereo_keys(gra))", "def bond_stereo_keys(sgr):\n bnd_ste_keys = dict_.keys_by_value(bond_stereo_parities(sgr),\n lambda x: x in [True, False])\n return bnd_ste_keys", "def __invert__(self):\r\n if self.field.characteristic == 2:\r\n return runtime.invert(self)\r\n\r\n return super().__invert__()", "def SwapSides(self):\n for c in self.reactants:\n c.coeff = -c.coeff", "def cleanUpRigPose(self):\n\n # show the proxy geo\n cmds.select(self.name + \"_mover_grp\", hi=True)\n allNodes = cmds.ls(sl=True)\n for node in allNodes:\n if node.find(\"_proxy_geo\") != -1:\n if cmds.nodeType(node) == \"mesh\":\n parent = cmds.listRelatives(node, parent=True)[0]\n cmds.lockNode(parent, lock=False)\n cmds.setAttr(parent + \".v\", lock=False)\n cmds.setAttr(parent + \".v\", 1)\n cmds.lockNode(parent, lock=True)\n\n # unlock mover group for this module and make invisible\n cmds.lockNode(self.name + \"_mover_grp\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=False)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", 0)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=True)\n cmds.lockNode(self.name + \"_mover_grp\", lock=True)\n\n # get the joints created by this module and remove the constraints\n joints = self.returnCreatedJoints\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n for joint in joints:\n if cmds.objExists(joint + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)\n\n else:\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointBaseName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointBaseName = jointBaseName.partition(nameData[1])[0]\n\n if cmds.objExists(self.name + \"_\" + jointBaseName + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)", "def flipNormals(self):\n self.flip = not self.flip", "def test_parameterize_mol_missing_stereo_rdkit(self, force_field):\n toolkit_registry = ToolkitRegistry(\n toolkit_precedence=[RDKitToolkitWrapper, AmberToolsToolkitWrapper]\n )\n\n molecule = Molecule.from_smiles(\"CC1CCC(=O)O1\", allow_undefined_stereo=True)\n topology = Topology.from_molecules([molecule])\n\n force_field.create_openmm_system(\n topology,\n toolkit_registry=toolkit_registry,\n )", "def test_enumerating_stereo_options(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n\n # test undefined only\n mol = Molecule.from_smiles(\n \"ClC=CCl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert len(isomers) == 2\n for isomer in isomers:\n assert isomer.n_conformers == 0\n\n mol = Molecule.from_smiles(\n r\"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert isomers == []\n\n mol = Molecule.from_smiles(\n r\"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=False, rationalise=False\n )\n\n assert len(isomers) == 1\n\n # test max isomers\n mol = Molecule.from_smiles(\n \"BrC=C[C@H]1OC(C2)(F)C2(Cl)C1\",\n toolkit_registry=toolkit,\n allow_undefined_stereo=True,\n )\n isomers = mol.enumerate_stereoisomers(\n max_isomers=5,\n undefined_only=True,\n toolkit_registry=toolkit,\n rationalise=True,\n )\n\n assert len(isomers) <= 5\n for isomer in isomers:\n assert isomer.n_conformers == 1\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def test_enumerating_stereo_options(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n\n # test undefined only\n mol = Molecule.from_smiles(\n \"ClC=CCl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert len(isomers) == 2\n for isomer in isomers:\n assert isomer.n_conformers == 0\n\n mol = Molecule.from_smiles(\n \"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert isomers == []\n\n mol = Molecule.from_smiles(\n \"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=False, rationalise=False\n )\n\n assert len(isomers) == 1\n\n # test max isomers\n mol = Molecule.from_smiles(\n \"BrC=C[C@H]1OC(C2)(F)C2(Cl)C1\",\n toolkit_registry=toolkit,\n allow_undefined_stereo=True,\n )\n isomers = mol.enumerate_stereoisomers(\n max_isomers=5,\n undefined_only=True,\n toolkit_registry=toolkit,\n rationalise=True,\n )\n\n assert len(isomers) <= 5\n for isomer in isomers:\n assert isomer.n_conformers == 1\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def stereo(func):\n @wraps(func)\n def stereoFunc(*args, **kwargs):\n # trying to find a way to have a method\n # calling another method not do the stereo repeat\n # so if calling an internal func from a stereo func,\n # add the stereo kw arg to call\n # it will be removed before calling underlying func\n\n if 'stereo' in kwargs.keys():\n _stereo = kwargs['stereo']\n del(kwargs['stereo'])\n else:\n _stereo = 1\n res = func(*args, **kwargs)\n if args[0].stereo and _stereo: # self.stereo\n _LOGGER.debug(\"Stereo Command {}:{}\".format(func.__name__, args))\n largs = list(args)\n if type(largs[1]) == str:\n largs[1] = chr(ord(largs[1])+1)\n else:\n largs[1] = largs[1] + 1\n if func.__name__[3:9] == \"Matrix\": # do stereo on input and output\n _LOGGER.debug(\"Matrix Stereo Command {}\".format(func.__name__))\n if type(largs[2]) == str:\n largs[2] = chr(ord(largs[2])+1)\n else:\n largs[2] = largs[2] + 1\n res2 = func(*largs, **kwargs)\n if res != res2:\n _LOGGER.debug(\"Stereo out of sync {} : {}\".format(res, res2))\n warnings.warn(\"Stereo out of sync\", RuntimeWarning)\n if res is not None:\n return res\n return stereoFunc", "def setupForRigPose(self):\n\n # unlock joint movers\n cmds.select(\"JointMover\", hi=True)\n jmNodes = cmds.ls(sl=True)\n for node in jmNodes:\n cmds.lockNode(node, lock=False)\n\n # find the mover shapes and set their visibility\n movers = self.returnJointMovers\n globalMovers = movers[0]\n shapes = []\n\n for each in movers:\n for mover in each:\n child = cmds.listRelatives(mover, children=True, shapes=True)\n if len(child) > 0:\n shapes.append(mover + \"|\" + child[0])\n\n for shape in shapes:\n cmds.setAttr(shape + \".v\", lock=False)\n cmds.setAttr(shape + \".v\", 0, lock=True)\n\n # show global movers\n shapes = []\n for mover in globalMovers:\n child = cmds.listRelatives(mover, children=True, shapes=True)\n if len(child) > 0:\n shapes.append(mover + \"|\" + child[0])\n\n for shape in shapes:\n cmds.setAttr(shape + \".v\", lock=False)\n cmds.setAttr(shape + \".v\", 1, lock=True)\n\n # unlock mover group for this module and make visible\n cmds.lockNode(self.name + \"_mover_grp\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", 1)\n\n # hide the proxy geo\n cmds.select(self.name + \"_mover_grp\", hi=True)\n allNodes = cmds.ls(sl=True)\n for node in allNodes:\n if node.find(\"_proxy_geo\") != -1:\n if cmds.nodeType(node) == \"mesh\":\n parent = cmds.listRelatives(node, parent=True)[0]\n cmds.lockNode(parent, lock=False)\n cmds.setAttr(parent + \".v\", lock=False)\n cmds.setAttr(parent + \".v\", 0)\n cmds.lockNode(parent, lock=True)\n\n # get the joints created by this module\n joints = self.returnCreatedJoints\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n for joint in joints:\n\n if cmds.objExists(joint + \"_mover_offset\"):\n cmds.parentConstraint(joint + \"_mover_offset\", joint)\n else:\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointBaseName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointBaseName = jointBaseName.partition(nameData[1])[0]\n\n if cmds.objExists(self.name + \"_\" + jointBaseName + \"_mover_offset\"):\n cmds.parentConstraint(self.name + \"_\" + jointBaseName + \"_mover_offset\", joint)\n\n # lock joint movers\n cmds.select(\"JointMover\", hi=True)\n jmNodes = cmds.ls(sl=True)\n for node in jmNodes:\n cmds.lockNode(node, lock=True)", "def decompose_level_lex(self, ranks):\n for index in self.box_space.points:\n index_1d = self.box_space.index_to_1d(index)\n self.rank_of_box[index] = (ranks * index_1d) / self.box_space.volume", "def test_random_permute_inverse_changes_group(self):\n # reproducible arbitrariness\n np.random.seed(232)\n\n nchan = 3\n nsteps = 20\n rho = 1.0/4\n target = np.random.randn(nchan, nsteps)\n\n controller = LinearController(self.G, target, tau=None)\n\n controller.set_random_permute_inverse(rho)\n self.assertIsNotNone(controller.permute_inverse)\n\n n_per_group = self.N/nchan\n groups0 = np.arange(self.N)/n_per_group\n groups1 = controller.permute_inverse/n_per_group\n\n # check that the right fraction of assignments are kept intact\n self.assertEqual(np.sum(groups0 != groups1), rho*self.N)", "def proc_sw_only_morphs(forward_pairs, morphs, backward_pairs):\n sandwich_pairs = []\n if not backward_pairs:\n forward_pairs[-1].morphs.extend(morphs)\n elif len(morphs) == 1:\n morph = morphs[0]\n morph_str = str(morph)\n if morph_str in ['이/VCP', '하/VX'] and backward_pairs[0].morphs[0].tag.startswith('E'):\n # '이' 긍정지정사나 '하' 보조용언 뒤에 어미가 나올 경우\n backward_pairs[0].morphs.insert(0, morphs[0])\n del morphs[:]\n elif morph_str == '에/JKB' and backward_pairs[0].morphs[0].tag == 'JX':\n # '에' 부사격조사 뒤에 보조사가 나올 경우\n backward_pairs[0].morphs.insert(0, morphs[0])\n del morphs[:]\n elif morph_str == 'ᆯ/ETM' and forward_pairs[-1].morphs[-1].tag.startswith('V'):\n # 'ㄹ' 관형형전성어미 앞에 용언이 나올 경우\n forward_pairs[-1].morphs.append(morphs[0])\n del morphs[:]\n elif morph.tag in ['EC', 'EF'] and forward_pairs[-1].morphs[-1].tag.startswith('V'):\n # 연결어미나 종결어미 앞에 용언이 나올 경우\n forward_pairs[-1].morphs.append(morphs[0])\n del morphs[:]\n elif morph.tag.startswith('XS'):\n # append suffixes to the end of forward pair list\n forward_pairs[-1].morphs.append(morphs[0])\n del morphs[:]\n else:\n raise AlignError()\n else:\n morphs_str = ' + '.join([str(morph) for morph in morphs])\n if morphs_str == '(/SS + 대북/NNG + (/SS + 대북/NNG + )/SS + )/SS' and forward_pairs[-1].word_str == u'대북':\n del morphs[:]\n elif morphs_str == '(/SS + 동경/NNP + )/SS' and forward_pairs[-1].word_str == u'도쿄':\n del morphs[:]\n else:\n raise AlignError()\n return sandwich_pairs", "def blendShapeEnvelopeOff():\n obj = cmds.ls(selection = True)\n history = cmds.listHistory(obj)\n bsHistory = cmds.ls(history, type = 'blendShape')\n for bs in bsHistory:\n cmds.setAttr(bs+'.'+'envelope',0.0) #note not changing blend target weights", "def test_parameterize_mol_missing_stereo_openeye(self, force_field):\n toolkit_registry = ToolkitRegistry(toolkit_precedence=[OpenEyeToolkitWrapper])\n\n molecule = Molecule.from_smiles(\"CC1CCC(=O)O1\", allow_undefined_stereo=True)\n topology = Topology.from_molecules([molecule])\n\n force_field.create_openmm_system(\n topology,\n toolkit_registry=toolkit_registry,\n )", "def setReversible(self, *args):\n return _libsbml.Reaction_setReversible(self, *args)", "def _updateIsosurfaces(self):\n # Sorting using minus, this supposes data 'object' to be max values\n sortedIso = sorted(self.getIsosurfaces(),\n key=lambda isosurface: - isosurface.getLevel())\n self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]", "def Inverted(self):\n return self._CreateTransformed(self._filtered_symbols,\n filtered_symbols=self._symbols,\n section_name=SECTION_MULTIPLE)", "def resetCoronalSegment(self):\r\n #research\r\n profprint()\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeGreen\")\r\n if sGreen == None :\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode3\")\r\n reformatLogic = slicer.vtkSlicerReformatLogic()\r\n #sGreen.SetSliceVisible(0)\r\n sGreen.SetOrientationToCoronal()\r\n #sw = slicer.app.layoutManager().sliceWidget(\"Green\")\r\n #sw.fitSliceToBackground()\r\n sGreen.Modified()", "def _add_pairblend(self, dgmod, rigid, transform):\n\n assert isinstance(dgmod, cmdx.DGModifier)\n\n pair_blend = dgmod.create_node(\"pairBlend\")\n dgmod.set_attr(pair_blend[\"isHistoricallyInteresting\"], False)\n\n # Establish initial values, before keyframes\n # tm = cmdx.Tm(self._cache[(transform, \"matrix\")])\n\n # Read from matrix, as opposed to the rotate/translate channels\n # to account for jointOrient, pivots and all manner of things\n # translate = tm.translation()\n # rotate = tm.rotation()\n\n translate = self._cache[(transform, \"translate\")]\n rotate = self._cache[(transform, \"rotate\")]\n\n dgmod.set_attr(pair_blend[\"inTranslate1\"], translate)\n dgmod.set_attr(pair_blend[\"inRotate1\"], rotate)\n\n dgmod.connect(rigid[\"outputTranslateX\"], pair_blend[\"inTranslateX2\"])\n dgmod.connect(rigid[\"outputTranslateY\"], pair_blend[\"inTranslateY2\"])\n dgmod.connect(rigid[\"outputTranslateZ\"], pair_blend[\"inTranslateZ2\"])\n dgmod.connect(rigid[\"outputRotateX\"], pair_blend[\"inRotateX2\"])\n dgmod.connect(rigid[\"outputRotateY\"], pair_blend[\"inRotateY2\"])\n dgmod.connect(rigid[\"outputRotateZ\"], pair_blend[\"inRotateZ2\"])\n\n # Let the animator see the raw animation values, no physics\n dgmod.connect(self._tree_root[0][\"simulated\"], pair_blend[\"weight\"])\n\n if self._opts[\"autoKey\"]:\n # Generate default animation curves, it's expected since you can no\n # longer see whether channels are keyed or not, now being green.\n time = cmdx.currentTime()\n mapping = (\n (\"animCurveTL\", translate.x, \"inTranslateX1\"),\n (\"animCurveTL\", translate.y, \"inTranslateY1\"),\n (\"animCurveTL\", translate.z, \"inTranslateZ1\"),\n (\"animCurveTA\", rotate.x, \"inRotateX1\"),\n (\"animCurveTA\", rotate.y, \"inRotateY1\"),\n (\"animCurveTA\", rotate.z, \"inRotateZ1\")\n )\n\n for curve, value, dst in mapping:\n curve = dgmod.create_node(curve)\n curve.key(time, value)\n dgmod.connect(curve[\"output\"], pair_blend[dst])\n\n # Transfer existing animation/connections\n for src, dst in transform.data.get(\"priorConnections\", {}).items():\n dst = pair_blend[dst]\n dgmod.connect(src, dst)\n\n commands._connect_transform(dgmod, pair_blend, transform)\n\n return pair_blend", "def flip(self):", "def unsetReversible(self):\n return _libsbml.Reaction_unsetReversible(self)", "def testMoreStereo(self):\r\n smi_and_cansmi = [\r\n ('Cl[C@](C)(I)Br', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](C)(I)Br', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](C)(Br)Cl', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](C)(Br)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](Cl)(Br)C', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](Cl)(Br)C', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](Br)(I)C', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](Br)(I)C', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](C)(Cl)I', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](C)(Cl)I', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](C)(Cl)Br', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](C)(Cl)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](Cl)(I)Br', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](Cl)(I)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](Br)(Cl)I', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](Br)(Cl)I', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](Br)(Cl)C', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](Br)(Cl)C', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](C)(Br)I', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](C)(Br)I', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@](I)(Br)C', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](I)(Br)C', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](I)(Br)Cl', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](I)(Br)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](Cl)(C)Br', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](Cl)(C)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](Cl)(Br)I', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](Cl)(Br)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](C)(I)Cl', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](C)(I)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](Br)(C)Cl', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](Br)(C)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](Br)(I)Cl', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](Br)(I)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](I)(Cl)Br', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](I)(Cl)Br', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](I)(C)Br', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](I)(C)Br', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](I)(C)Cl', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](I)(C)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@](Br)(C)I', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](Br)(C)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](Cl)(C)I', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](Cl)(C)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](I)(Cl)C', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](I)(Cl)C', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](Cl)(I)C', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](Cl)(I)C', 'C[C@@](Cl)(Br)I')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)", "def revolver(self):\r\n\t\tself.__revuelto=True", "def build(self, X, Y, w=None):\n super(MorseSmaleComplex, self).build(X, Y, w)\n\n if self.debug:\n sys.stdout.write(\"Decomposition: \")\n start = time.perf_counter()\n\n stableManifolds = MorseComplex(debug=self.debug)\n unstableManifolds = MorseComplex(debug=self.debug)\n\n stableManifolds._build_for_morse_smale_complex(self, False)\n unstableManifolds._build_for_morse_smale_complex(self, True)\n\n self.min_indices = unstableManifolds.max_indices\n self.max_indices = stableManifolds.max_indices\n\n # If a degenerate point is both a minimum and a maximum, it\n # could potentially appear twice, but would be masked by the\n # minimum key which would wipe the maximum merge\n self.merge_sequence = stableManifolds.merge_sequence.copy()\n self.merge_sequence.update(unstableManifolds.merge_sequence)\n self.persistences = sorted(\n stableManifolds.persistences + unstableManifolds.persistences\n )\n\n self.base_partitions = {}\n base = np.array([[None, None]] * len(Y))\n for key, items in unstableManifolds.base_partitions.items():\n base[np.array(items), 0] = key\n for key, items in stableManifolds.base_partitions.items():\n base[np.array(items), 1] = key\n\n keys = set(map(tuple, base))\n for key in keys:\n self.base_partitions[key] = np.where(\n np.logical_and(base[:, 0] == key[0], base[:, 1] == key[1])\n )[0]\n\n if self.debug:\n end = time.perf_counter()\n sys.stdout.write(\"%f s\\n\" % (end - start))", "def composite_scene(orig_scene, mask_seam, match_scene, dialation_mask, orig_scene1, method=\"paste\", repeat=1):\n avg_pixel = np.mean(orig_scene1[orig_scene1 != 0])\n \n output = np.zeros(orig_scene.shape)\n if method==\"seamlessclone\":\n width, height, _ = match_scene.shape\n center = (height/2, width/2)\n \n # create plain white mask\n mask = np.zeros(match_scene.shape, match_scene.dtype) + 255\n \n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = avg_pixel\n \n \n \n #image_to_compare\n output_blend = cv2.seamlessClone(match_scene.astype(np.uint8), \n orig_scene_impute.astype(np.uint8), \n mask, center,cv2.NORMAL_CLONE)\n \n #implot(output_blend)\n # now reapply the mask with alpha blending to fix it up again.\n \n \"\"\"\n TO DO CHANGE IT FROM THE DILATION + MASK SEAM, NEED TO FIND THE INTERSECTION OF THESE TWO TO BE THE \n REAL MASK TO BLUR\n \"\"\"\n dilation_mask = mask_seam.copy()\n \n dilation_mask = cv2.GaussianBlur(dilation_mask, (101,101), 0) # blur mask and do a alpha blend... between the \n #implot(dilation_mask, 'gray')\n \n dilation_mask = dilation_mask/255.0\n \n \n \n # 0 is black, 1 is white\n #output = cv2.addWeighted(output_blend, dialation_mask, orig_scene, 1-dialation_mask)\n #print dialation_mask\n #print dialation_mask.shape\n #print output_blend.shape\n #a = cv2.multiply(output_blend.astype(np.float), dialation_mask)\n \n for _ in range(10):\n # some kind of layered alpha blend by the dilation mask values...\n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]\n output_blend = cv2.add(cv2.multiply(output_blend.astype(np.float), dilation_mask),\n cv2.multiply(orig_scene_impute.astype(np.float), 1-dilation_mask), 0)\n \n \n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]\n output_blend = cv2.add(cv2.multiply(output_blend.astype(np.float), dilation_mask),\n cv2.multiply(orig_scene_impute.astype(np.float), 1-dilation_mask), 0)\n \n \n \n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]\n output = cv2.seamlessClone(match_scene.astype(np.uint8), \n output_blend.astype(np.uint8), \n mask, center,cv2.NORMAL_CLONE)\n \n # complete blend with seamlessclone...\n \n \n # output = np.maximum(output_blend, orig_scene_impute)\n # or just darken...\n \n \n #if repeat == 1:\n # return output_blend\n #output = composite_scene(orig_scene_impute, mask_seam, output_blend, dialation_mask, method=\"paste\")\n \n\n\n elif method==\"paste\":\n output[mask_seam == 0] = orig_scene[mask_seam == 0]\n output[mask_seam != 0] = match_scene[mask_seam != 0]\n \n elif method==\"alphablend\":\n output_blend = output.copy()\n output_blend[mask_seam == 0] = orig_scene[mask_seam == 0]\n output_blend[mask_seam != 0] = match_scene[mask_seam != 0]\n \n \n \n \n else:\n output[mask_seam == 0] = orig_scene[mask_seam == 0]\n output[mask_seam != 0] = match_scene[mask_seam != 0]\n return output", "def revert_coordinate_space(channels, R0, T0):\n n, d = channels.shape\n\n channels_rec = copy.copy(channels)\n R_prev = R0\n T_prev = T0\n rootRotInd = np.arange(3, 6)\n\n # Loop through the passed posses\n for ii in range(n):\n\n R_diff = expmap2rotmat(channels[ii, rootRotInd])\n R = R_diff.dot(R_prev)\n\n channels_rec[ii, rootRotInd] = rotmat2expmap(R)\n T = T_prev + (\n (R_prev.T).dot(np.reshape(channels[ii, :3], [3, 1]))\n ).reshape(-1)\n channels_rec[ii, :3] = T\n\n T_prev = T\n R_prev = R\n\n return channels_rec", "def test_check_for_existing_reaction_removes_duplicates_in_opposite_directions(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n s1 = Species().from_smiles(\"[H]\")\n s2 = Species().from_smiles(\"CC\")\n s3 = Species().from_smiles(\"[H][H]\")\n s4 = Species().from_smiles(\"C[CH2]\")\n s1.label = 'H'\n s2.label = 'CC'\n s3.label = 'HH'\n s4.label = 'C[CH2]'\n\n rxn_f = TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction')\n )\n\n rxn_r = TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction')\n )\n\n rxn_f.reactants.sort()\n rxn_f.products.sort()\n\n cerm.add_reaction_to_core(rxn_f)\n cerm.register_reaction(rxn_f)\n\n reactions = cerm.search_retrieve_reactions(rxn_r)\n self.assertEqual(1, len(reactions), 'cerm.search_retrieve_reactions could not identify reverse reaction')\n\n found, rxn = cerm.check_for_existing_reaction(rxn_r)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to identify existing reaction in the reverse direction')\n self.assertEqual(rxn, rxn_f)", "def orient(self):\n if self.oriented:\n return\n self.substrates = self.substrates.simplify(SHP_EPSILON)\n self.substrates = shapely.ops.orient(self.substrates)\n self.oriented = True", "def stereo_score(alignment):\n #dictionary with properties for each residue\n dic_prop = {'I': [1, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n 'L': [1, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n 'V': [1, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n 'C': [1, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n 'A': [1, 0, 1, 0, 1, 0, 0, 0, 0, 0],\n 'G': [1, 0, 1, 0, 1, 0, 0, 0, 0, 0],\n 'M': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'F': [1, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n 'Y': [1, 1, 0, 0, 0, 0, 1, 0, 0, 0],\n 'W': [1, 1, 0, 0, 0, 0, 1, 0, 0, 0],\n 'H': [1, 1, 0, 0, 0, 0, 1, 1, 0, 1],\n 'K': [1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n 'R': [0, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n 'E': [0, 1, 0, 0, 0, 0, 0, 0, 1, 1],\n 'Q': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'D': [0, 1, 1, 0, 0, 0, 0, 0, 1, 1],\n 'N': [0, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n 'S': [0, 1, 1, 0, 1, 0, 0, 0, 0, 0],\n 'T': [1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n 'P': [0, 0, 1, 1, 0, 0, 0, 0, 0, 0],\n 'B': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'Z': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'X': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n '-': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}\n score_list = []\n for i in range(0, alignment.get_alignment_length()):\n #extract the unique residues in the alignment\n column = ''.join(set(alignment[:, i]))\n stereo_list = []\n #loop through each residue\n for res in range(0, len(column)):\n #replace the residue with list of properties\n residue = column[res]\n #append the properties list to a\n stereo_prop = dic_prop.get(residue)\n stereo_list.append(stereo_prop)\n #number of common properties\n count_stereo = sum(len(set(i)) == 1 for i in zip(*stereo_list))\n #add the number of properties to a list\n score_list.append(count_stereo)\n score_list_final = [float(i*0.1) for i in score_list]\n return score_list_final", "def find_4ch_view(self, display_opt):\n # step 1: check if self.C exists\n if self.C is None:\n print('Center of inner rim poly is not set! exiting..')\n sys.exit()\n\n # step 2: find y direction\n pt_rv_dir = 50.0*self.rv_dir\n\n # set plane_pts :\n self.plane_pts = np.vstack((self.C, pt_rv_dir, self.epi_apex_node))\n\n # construct plane using p1, p2 and the apex node\n four_ch_view_plane_normal = find_plane_eq(self.C, pt_rv_dir, self.epi_apex_node)\n\n if display_opt:\n # display x-y-z actor\n axes = get_axes_actor([50,50,50], [0,0,0])\n\n c_irp_act = include_points(self.C, 1, 10, (1,0,1))\n pt_rv_dir_act = include_points(pt_rv_dir, 1, 10, (1,1,0))\n epi_apex_act = include_points(list(self.epi_apex_node), 1, 10, (1,0,1))\n\n ren = vtk.vtkRenderer()\n ren.AddActor(self.meshActor)\n ren.AddActor(c_irp_act)\n ren.AddActor(epi_apex_act)\n ren.AddActor(pt_rv_dir_act)\n ren.AddActor(axes)\n vtk_show(ren)\n # # step 1: find center of endo_poly\n # endo_numpy = numpy_support.vtk_to_numpy(self.endo_poly.GetPoints().GetData())\n # com = np.mean(endo_numpy, 0)\n #\n # # step 2: construct line rv_dir that is translated at position com\n # pSource = com - 100*self.rv_dir\n # pTarget = com + 100*self.rv_dir\n\n # # step 3: find intersection of line with endo_poly\n # bspTree = vtk.vtkModifiedBSPTree()\n # bspTree.SetDataSet(self.endo_poly) # cut through endo polydata (not mesh)\n # bspTree.BuildLocator()\n #\n # # set these as plane_pts\n # p1 = pSource\n # p2 = pTarget\n #\n # four_ch_valve_pts = [p1, p2]\n # self.plane_pts = np.vstack((p1, p2, self.epi_apex_node))\n #\n # # construct plane using p1, p2 and the apex node\n # four_ch_view_plane_normal = find_plane_eq(p1, p2, self.epi_apex_node)\n\n # if display_opt:\n # # display x-y-z actor\n # axes = get_axes_actor([80,80,80], [0,0,0])\n #\n # p1_act = include_points(p1, 1, 10, (1,0,1))\n # p2_act = include_points(p2, 1, 10, (1,1,0))\n # epi_apex_act = include_points(list(self.epi_apex_node), 1, 10, (1,0,1))\n # endo_apex_act = include_points(list(self.endo_apex_node), 1, 10, (1,0,1))\n #\n # ren = vtk.vtkRenderer()\n # ren.AddActor(self.meshActor)\n # ren.AddActor(p1_act)\n # ren.AddActor(p2_act)\n # ren.AddActor(axes)\n # ren.AddActor(epi_apex_act)\n # ren.AddActor(endo_apex_act)\n #\n # vtk_show(ren)\n\n # display the 4-ch view\n _ = self.mesh_slicer(four_ch_view_plane_normal, 'mesh')\n\n return four_ch_view_plane_normal", "def pick(self, inv, pl, group):\r\n if self.rect.colliderect(pl):\r\n group.remove(self)\r\n if inv.count('key') == 0:\r\n inv += ['key']\r\n music_acceptor.activatedPortalSound()", "def set_channel_group(self, channel_group):\n super().set_channel_group(channel_group)\n self.skip_flags = self.flagspace.all_flags() # everything but 0", "def inv(self):\n return self.conjugate()", "def __invert__(self):\n return self.fam.c_unop('invert', self)", "def voxelize4(self, materials):\n\t\tlayers = list()\n\t\tlayersR = list()\n\t\tlayersG = list()\n\t\tlayersB = list()\n\t\t\n\t\tlayerMaterial = list()\n\t\tself.volumeComposition = list()\n\t\tfor l in range(len(materials)):\n\t\t\tlayerMaterial.append(list())\n\t\t\tself.volumeComposition.append(list())\n\n\t\tvolumeGeneral = list()\n\t\tm = 0\n\t\tfor i in self.slicePoints:\n\t\t\t#print self.boolResult[m].shape\n\t\t\ttupleResultR = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleResultG = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleResultB = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleMaterial = list()\n\t\t\tfor l in range(len(materials)):\n\t\t\t\ttupleMaterial.append(numpy.zeros(self.boolLayers[m].shape, dtype=float))\n\t\t\t\n\t\t\tj = numpy.nditer(self.boolLayers[m], flags=['multi_index'], op_flags=['readwrite'])\n\t\t\twhile not j.finished:\n\t\t\t\tif j[0] == True:\n\t\t\t\t#tupleResult[j.multi_index] = round((i[direction] - minValue) * ratio)\n\t\t\t\t#tupleResult[j.multi_index] = 78\n\t\t\t\t\tprint type(j.multi_index)\n\t\t\t\t\tprint j.multi_index\n\t\t\t\t\t#tupleResult[j.multi_index] = planeWeight * math.fabs((j.multi_index[1] - planeOrigin[0]) * planeNormal[0] + (j.multi_index[0] - planeOrigin[1]) * planeNormal[1] + (i[2] - planeOrigin[2]) * planeNormal[2]) + pointWeight * math.sqrt(math.pow((j.multi_index[1]- pointValue[0]),2) + math.pow((j.multi_index[0] - pointValue[1]), 2)+math.pow((i[2] - pointValue[2]),2))\n\t\t\t\t\t\n\t\t\t\t\tdistanceList = []\n\t\t\t\t\ttotalDistance = 0.0\n\t\t\t\t\tfor k in range(len(materials)):\n\t\t\t\t\t\tif materials[k].controlSourceType == \"Plane\":\n\t\t\t\t\t\t\tGplane = math.fabs((j.multi_index[1] - materials[k].origin[0]) * materials[k].normal[0] + (j.multi_index[0] - materials[k].origin[1]) * materials[k].normal[1] + (i[2] - materials[k].origin[2]) * materials[k].normal[2])\n\t\t\t\t\t\t\tdistanceList.append(Gplane)\n\t\t\t\t\t\t\ttotalDistance += Gplane\n\t\t\t\t\t\tif materials[k].controlSourceType == \"Point\":\n\t\t\t\t\t\t\tGpoint = (math.sqrt(math.pow((j.multi_index[1]- materials[k].point[0]),2) + math.pow((j.multi_index[0] - materials[k].point[1]), 2)+math.pow((i[2] - materials[k].point[2]),2)))\n\t\t\t\t\t\t\tdistanceList.append(Gpoint)\n\t\t\t\t\t\t\ttotalDistance += Gpoint\n\t\t\t\t\tfor k in range(len(distanceList)):\n\t\t\t\t\t\tdistanceList[k] = distanceList[k] / totalDistance\n\t\t\t\t\t\tdistanceList[k] = 1.0 - distanceList[k]\n\t\t\t\t\t\t\n\t\t\t\t\t\ttupleMaterial[k][j.multi_index] = distanceList[k]\n\t\t\t\t\t\t\n\t\t\t\t\t\ttupleResultR[j.multi_index] += materials[k].materialColor[0] * distanceList[k] * materials[k].weight\n\t\t\t\t\t\ttupleResultG[j.multi_index] += materials[k].materialColor[1] * distanceList[k] * materials[k].weight\n\t\t\t\t\t\ttupleResultB[j.multi_index] += materials[k].materialColor[2] * distanceList[k] * materials[k].weight\n\t\t\t\t\t#if(tupleResult[j.multi_index] > 0):\n\t\t\t\t\t#\ttupleResult[j.multi_index] = round(tupleResult[j.multi_index]) \n\t\t\t\t\t#if(tupleResult[j.multi_index] == 0):\n\t\t\t\t\t#\t\ttupleResult[j.multi_index] = 1\n\t\t\t\t\t#if(tupleResult[j.multi_index] < 0):\n\t\t\t\t\t#\ttupleResult[j.multi_index] = round(0 - tupleResult[j.multi_index]) \n\t\t\t\telse:\n\t\t\t\t\ttupleResultR[j.multi_index] = 0\n\t\t\t\t\ttupleResultG[j.multi_index] = 0\n\t\t\t\t\ttupleResultB[j.multi_index] = 0\n\t\t\t\t\tfor k in range(len(materials)):\n\t\t\t\t\t\ttupleMaterial[k][j.multi_index] = 0.0\n\t\t\t\tj.iternext()\n\t\t\tlayersR.append(tupleResultR)\n\t\t\tlayersG.append(tupleResultG)\n\t\t\tlayersB.append(tupleResultB)\n\t\t\tfor k in range(len(materials)):\n\t\t\t\tlayerMaterial[k].append(tupleMaterial[k])\n\t\t\t\t\n\t\t\tm = m + 1\n\t\tprint \"i got here\"\n\t\tvolumeR=numpy.array(layersR) # create the 3d volume\n\t\tvolumeG=numpy.array(layersG) \n\t\tvolumeB=numpy.array(layersB)\n\t\tfor k in range(len(materials)):\n\t\t\tself.volumeComposition[k] = numpy.array(layerMaterial[k])\n\t\t\n\t\tvolumeGeneral.append(volumeR)\n\t\tvolumeGeneral.append(volumeG)\n\t\tvolumeGeneral.append(volumeB)\n\t\treturn volumeGeneral", "def compute_photometric_stereo_impl(lights, images):\n height, width, channel = images[0].shape\n albedo = np.zeros((height, width, channel))\n normals = np.zeros((height, width, 3))\n L = lights.T\n left = np.linalg.inv(L.T.dot(L))\n for i in range(height):\n for j in range(width):\n for c in range(channel):\n I = [img[i,j,c] for img in images]\n G = left.dot(L.T.dot(I))\n k = np.linalg.norm(G)\n if k < 1e-7: k = 0\n else: normals[i][j] += G/k\n albedo[i][j][c] = k\n normals /= channel\n return albedo, normals", "def normalise(self):\n for at in self.atoms:\n if at.x < 0. :\n at.x = self.coordx + at.x\n if at.y < 0. :\n at.y = self.coordy + at.y\n if at.z < 0. :\n at.z = self.coordz + at.z", "def mirrorTransformations(self):\n\n currentSelection = cmds.ls(sl=True)\n\n # get the mirror module\n networkNode = self.returnNetworkNode\n mirrorModule = cmds.getAttr(networkNode + \".mirrorModule\")\n moduleName = cmds.getAttr(networkNode + \".moduleName\")\n parent = cmds.getAttr(networkNode + \".parentModuleBone\")\n\n # get mirror module instance and information\n mirrorInst = self.returnMirrorModuleInst\n\n # turn off aim mode\n mirrorInst.aimMode_Setup(False)\n\n # turn off coplanar mode IF it exists on the module\n try:\n state = mirrorInst.coplanarBtn.isChecked()\n if state:\n mirrorInst.coplanarBtn.setChecked(False)\n mirrorInst.coplanarMode()\n except:\n pass\n\n moverTypes = self.returnJointMovers\n for moverType in moverTypes:\n for jointMover in moverType:\n attrs = cmds.listAttr(jointMover, keyable=True)\n\n for attr in attrs:\n value = cmds.getAttr(jointMover + \".\" + attr)\n\n mirrorMover = jointMover.partition(moduleName)[2]\n mirrorMover = mirrorModule + mirrorMover\n mirrorAttrs = [\"translateX\", \"translateY\", \"translateZ\"]\n\n if attr in mirrorAttrs:\n cmds.setAttr(mirrorMover + \".\" + attr, value * -1)\n else:\n cmds.setAttr(mirrorMover + \".\" + attr, value)\n\n cmds.select(clear=True)\n if len(currentSelection) > 0:\n cmds.select(currentSelection)\n\n # turn aim mode on\n mirrorInst.aimMode_Setup(True)\n\n # extend functionality\n self.mirrorTransformations_Custom()", "def concerted_unimolecular_elimination(rct_zmas, prd_zmas):\n\n # Initialize info for the returns\n ret = None, None, None, None, None\n finish_build = True\n\n # Attempt to build appropriate z-matrix\n prd_zmas, prd_gras = shifted_standard_zmas_graphs(\n prd_zmas, remove_stereo=True)\n if len(rct_zmas) == 1:\n count = 1\n while True:\n rct_zmas, rct_gras = shifted_standard_zmas_graphs(\n rct_zmas, remove_stereo=True)\n init_zma, = rct_zmas\n\n tras, _, _ = automol.graph.reac.elimination(rct_gras, prd_gras)\n if tras is not None:\n if len(tras[0]) == 1:\n tras = [tras]\n min_dist = 100.\n frm_bnd_key = None\n for tra_i in tras:\n # Get the bond formation and breaking keys\n bnd_key, = automol.graph.trans.formed_bond_keys(tra_i)\n geo = automol.zmatrix.geometry(rct_zmas[0])\n dist = automol.geom.distance(geo, *list(bnd_key))\n if dist < min_dist:\n min_dist = dist\n frm_bnd_key = bnd_key\n tra = tra_i\n brk_keys = automol.graph.trans.broken_bond_keys(tra)\n brk_bnd_key1, brk_bnd_key2 = brk_keys\n init_zma, = rct_zmas\n\n\n # Get index for migrating atom (or bond-form atom in group)\n for bnd_key in (brk_bnd_key1, brk_bnd_key2):\n if bnd_key & frm_bnd_key:\n mig_key = next(iter(bnd_key & frm_bnd_key))\n for key in frm_bnd_key:\n if key != mig_key:\n a1_idx = key\n\n # Get chain for redefining the rc1_atm1_key z-matrix entries\n _, gras = shifted_standard_zmas_graphs(\n [init_zma], remove_stereo=True)\n gra = functools.reduce(automol.graph.union, gras)\n xgr1, = automol.graph.connected_components(gra)\n atm1_neighbors = _atom_neighbor_keys(xgr1)[a1_idx]\n for idx in atm1_neighbors:\n num_keys = len(_atom_neighbor_keys(xgr1)[idx])\n if idx != mig_key and num_keys > 1:\n a2_idx = idx\n atm2_neighbors = _atom_neighbor_keys(xgr1)[a2_idx]\n for idx in atm2_neighbors:\n if idx not in (mig_key, a1_idx):\n a3_idx = idx\n\n mig_redef_keys = (a1_idx, a2_idx, a3_idx)\n\n # determine if the zmatrix needs to be rebuilt by x2z\n # determines if the hydrogen atom is used to define other atoms\n rebuild = False\n if any(idx > mig_key for idx in mig_redef_keys):\n rebuild = True\n\n # rebuild zmat and go through while loop again if needed\n # shift order of cartesian coords & rerun x2z to get a new zmat\n # else go to next stage\n if rebuild:\n reord_zma = reorder_zmatrix_for_migration(\n init_zma, a1_idx, mig_key)\n rct_zmas = [reord_zma]\n count += 1\n if count == 3:\n finish_build = False\n break\n else:\n rct_zma = init_zma\n finish_build = True\n break\n else:\n finish_build = False\n\n # If z-mat with good order found, finish building it\n if finish_build:\n\n # determine the new coordinates\n rct_geo = automol.zmatrix.geometry(rct_zma)\n distance = automol.geom.distance(\n rct_geo, mig_key, a1_idx)\n angle = automol.geom.central_angle(\n rct_geo, mig_key, a1_idx, a2_idx)\n dihedral = automol.geom.dihedral_angle(\n rct_geo, mig_key, a1_idx, a2_idx, a3_idx)\n # Reset the keys for the migrating H atom\n new_idxs = (a1_idx, a2_idx, a3_idx)\n key_dct = {mig_key: new_idxs}\n ts_zma = automol.zmatrix.set_keys(rct_zma, key_dct)\n\n # Reset the values in the value dict\n mig_names = automol.zmatrix.name_matrix(ts_zma)[mig_key]\n ts_zma = automol.zmatrix.set_values(\n ts_zma, {mig_names[0]: distance,\n mig_names[1]: angle,\n mig_names[2]: dihedral}\n )\n\n # standardize the ts zmat and get tors and dist coords\n coo_dct = automol.zmatrix.coordinates(ts_zma)\n dist_coo_key = tuple(reversed(sorted(frm_bnd_key)))\n dist_name = next(coo_name for coo_name, coo_keys in coo_dct.items()\n if dist_coo_key in coo_keys)\n ts_name_dct = automol.zmatrix.standard_names(ts_zma)\n dist_name = ts_name_dct[dist_name]\n ts_zma = automol.zmatrix.standard_form(ts_zma)\n\n # Get the name of the coordinate of the other bond that is breaking\n brk_dist_name = None\n for brk_key in (brk_bnd_key1, brk_bnd_key2):\n if not brk_key.intersection(frm_bnd_key):\n brk_dist_name = automol.zmatrix.bond_key_from_idxs(\n ts_zma, brk_key)\n\n # Add second attempt to get brk_dist_name\n if brk_dist_name is None:\n brk_dist_names = [\n automol.zmatrix.bond_key_from_idxs(ts_zma, brk_bnd_key1),\n automol.zmatrix.bond_key_from_idxs(ts_zma, brk_bnd_key2)\n ]\n # Grab the name that is not None\n for name in brk_dist_names:\n if name is not None:\n brk_dist_name = name\n\n # get full set of potential torsional coordinates\n pot_tors_names = automol.zmatrix.torsion_coordinate_names(rct_zma)\n\n # remove the torsional coordinates that would break reaction coordinate\n gra = automol.zmatrix.graph(ts_zma, remove_stereo=True)\n coo_dct = automol.zmatrix.coordinates(ts_zma)\n tors_names = []\n for tors_name in pot_tors_names:\n axis = coo_dct[tors_name][0][1:3]\n grp1 = [axis[1]] + (\n list(automol.graph.branch_atom_keys(gra, axis[0], axis) -\n set(axis)))\n grp2 = [axis[0]] + (\n list(automol.graph.branch_atom_keys(gra, axis[1], axis) -\n set(axis)))\n if not ((mig_key in grp1 and a1_idx in grp2) or\n (mig_key in grp2 and a1_idx in grp1)):\n tors_names.append(tors_name)\n\n # Get reactants graph\n _, rct_gras = shifted_standard_zmas_graphs(\n [rct_zma], remove_stereo=True)\n rcts_gra = automol.graph.union_from_sequence(rct_gras)\n\n brk_bnd_key1 = shift_vals_from_dummy(brk_bnd_key1, ts_zma)\n brk_bnd_key2 = shift_vals_from_dummy(brk_bnd_key2, ts_zma)\n brk_bnd_keys = frozenset({brk_bnd_key1, brk_bnd_key2})\n frm_bnd_key = shift_vals_from_dummy(frm_bnd_key, ts_zma)\n\n ret = ts_zma, dist_name, brk_dist_name, brk_bnd_keys, frm_bnd_key, tors_names, rcts_gra\n\n return ret", "def invert(self):\n self._c = ~self._c", "def intransitive(self):\r\n\r\n self.transitive = False\r\n return self", "def drawIsoSurfaces0( self ):\n #research\n profbox()\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n v= vtk.vtkAppendPolyData()\n \n for modelNode in modelNodes.values():\n if modelNode.GetAttribute(\"nth\")!=None and modelNode.GetDisplayVisibility()==1 :\n v.AddInput(modelNode.GetPolyData())\n \n modeller = vtk.vtkImplicitModeller()\n modeller.SetInput(v.GetOutput())\n modeller.SetSampleDimensions(self.dim.value,self.dim.value,self.dim.value)\n modeller.SetCapping(0)\n modeller.SetAdjustBounds(self.abonds.value)\n modeller.SetProcessModeToPerVoxel() \n modeller.SetAdjustDistance(self.adist.value/100)\n modeller.SetMaximumDistance(self.maxdist.value/100) \n \n contourFilter = vtk.vtkContourFilter()\n contourFilter.SetNumberOfContours(self.nb.value)\n contourFilter.SetInputConnection(modeller.GetOutputPort()) \n contourFilter.ComputeNormalsOn()\n contourFilter.ComputeScalarsOn()\n contourFilter.UseScalarTreeOn()\n contourFilter.SetValue(self.contour.value,self.contourValue.value)\n contourFilter.SetValue(self.contour2.value,self.contourValue2.value)\n contourFilter.SetValue(self.contour3.value,self.contourValue3.value)\n contourFilter.SetValue(self.contour4.value,self.contourValue4.value)\n contourFilter.SetValue(self.contour5.value,self.contourValue5.value)\n\n isoSurface = contourFilter.GetOutput()\n self.AddContour(isoSurface)", "def set_und_surface(self):\n if (self.dimension == '3D'):\n self.part_RVE.Set(\n cells=self.part_RVE.cells.getSequenceFromMask(mask=('[#1 ]', ), ),\n name='Set_RVE')\n elif (self.dimension == '2D'):\n self.part_RVE.Set(\n faces=self.part_RVE.faces.getSequenceFromMask(mask=('[#1 ]', ), ),\n name='Set_RVE')\n else:\n print('dimension Error!')", "def normalize_and_stitch(self):\n\n # Normalize any remaining orders.\n index = self.current_order_index \n for i in range(len(self.parent.session.input_spectra)):\n self.update_order_index(i) # TODO: This is clumsy.\n self.fit_continuum(clobber=False)\n\n # Go back to original order.\n self.update_order_index(index)\n\n # Stitch and stack all orders.\n self.parent.session.stitch_and_stack()\n\n # Enable the menu-bar and the next three tabs.\n self.parent._menu_export_normalized_spectrum.setEnabled(True)\n self.parent.tabs.setTabEnabled(self.parent.tabs.indexOf(self) + 1, True)\n self.parent.tabs.setTabEnabled(self.parent.tabs.indexOf(self) + 2, True)\n self.parent.tabs.setTabEnabled(self.parent.tabs.indexOf(self) + 3, True)\n\n self.parent.stellar_parameters_tab.populate_widgets()\n self.parent.chemical_abundances_tab.new_session_loaded()\n\n return None", "def set_flipped(self, x, y):\n self.pieces[x + (y * self.width)].set_flipped()", "def _shipCollide(self):\n for s in range(self.getLengthAlien()):\n for t in range(len(self._aliens[0])):\n for b in self._bolts:\n if self._aliens[s][t] != None and + \\\n self._aliens[s][t].collides(b):\n self._aliens[s][t] = None\n self._bolts.remove(b)\n self._key = False", "def test_enumerating_stereobonds(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = Molecule.from_smiles(\n \"ClC=CCl\", allow_undefined_stereo=True, toolkit_registry=toolkit\n )\n\n # use the default options\n isomers = mol.enumerate_stereoisomers()\n assert len(isomers) == 2\n\n assert mol not in isomers\n # make sure the input molecule is only different by bond stereo\n for ismol in isomers:\n assert (\n Molecule.are_isomorphic(\n mol,\n ismol,\n return_atom_map=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n assert mol.is_isomorphic_with(ismol) is False\n\n # make sure the isomers are different\n assert isomers[0].is_isomorphic_with(isomers[1]) is False\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def test_enumerating_stereobonds(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = Molecule.from_smiles(\n \"ClC=CCl\", allow_undefined_stereo=True, toolkit_registry=toolkit\n )\n\n # use the default options\n isomers = mol.enumerate_stereoisomers()\n assert len(isomers) == 2\n\n assert mol not in isomers\n # make sure the input molecule is only different by bond stereo\n for ismol in isomers:\n assert (\n Molecule.are_isomorphic(\n mol,\n ismol,\n return_atom_map=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n assert mol.is_isomorphic_with(ismol) is False\n\n # make sure the isomers are different\n assert isomers[0].is_isomorphic_with(isomers[1]) is False\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def ToggleAllGizmoLocalMode( self ):\n\n value = self.gizmoMgr.GetGizmoLocal( 'pos' )\n self.gizmoMgr.SetGizmoLocal( 'pos', not value )\n self.gizmoMgr.SetGizmoLocal( 'rot', not value )\n self.gizmoMgr.SetGizmoLocal( 'scl', not value )", "def orbit(self, representation='trivial'):\n\n def sign(permutation, representation):\n if representation == 'trivial':\n return 1\n if representation == 'sign':\n return permutation.sign\n\n answer = self.zero()\n for k, v in self.items():\n seen = []\n for i in k:\n if i not in seen:\n seen.append(i)\n permutation = SymmetricGroupElement(seen).inverse()\n new_v = sign(permutation, representation) * v\n answer += permutation * self.create({k: new_v})\n\n return answer", "def __invert(self, args):", "def stereo_callback(self, stereo_msg):\r\n start = time.time()\r\n self.cam0_curr_img_msg = stereo_msg.cam0_msg\r\n self.cam1_curr_img_msg = stereo_msg.cam1_msg\r\n\r\n # Build the image pyramids once since they're used at multiple places.\r\n self.create_image_pyramids()\r\n\r\n # Detect features in the first frame.\r\n if self.is_first_img:\r\n if not self.config.load_features_flag:\r\n self.initialize_first_frame()\r\n self.is_first_img = False\r\n # Draw results.\r\n # self.draw_features_stereo()\r\n else:\r\n if not self.config.load_features_flag:\r\n # Track the feature in the previous image.\r\n t = time.time()\r\n self.track_features()\r\n print('___track_features:', time.time() - t)\r\n t = time.time()\r\n\r\n # Add new features into the current image.\r\n self.add_new_features()\r\n print('___add_new_features:', time.time() - t)\r\n t = time.time()\r\n self.prune_features()\r\n print('___prune_features:', time.time() - t)\r\n t = time.time()\r\n # Draw results.\r\n # self.draw_features_stereo()\r\n print('___draw_features_stereo:', time.time() - t)\r\n t = time.time()\r\n\r\n print('===image process elapsed:', time.time() - start, f'({stereo_msg.timestamp})')\r\n\r\n if not self.config.load_features_flag:\r\n try:\r\n self.save_features() \r\n return self.publish()\r\n finally:\r\n self.cam0_prev_img_msg = self.cam0_curr_img_msg\r\n self.prev_features = self.curr_features\r\n self.prev_cam0_pyramid = self.curr_cam0_pyramid\r\n\r\n # Initialize the current features to empty vectors.\r\n self.curr_features = [[] for _ in range(self.config.grid_num)]\r\n else:\r\n self.load_features()\r\n return self.publish()", "def _standardize_pose(self):\n self.mesh_.center_vertices_bb()\n vertex_array_cent = np.array(self.mesh_.vertices())\n\n # find principal axes\n pca = sklearn.decomposition.PCA(n_components = 3)\n pca.fit(vertex_array_cent)\n\n # count num vertices on side of origin wrt principal axes\n comp_array = pca.components_\n norm_proj = vertex_array_cent.dot(comp_array.T)\n opposite_aligned = np.sum(norm_proj < 0, axis = 0)\n same_aligned = np.sum(norm_proj >= 0, axis = 0)\n pos_oriented = 1 * (same_aligned > opposite_aligned) # trick to turn logical to int\n neg_oriented = 1 - pos_oriented\n\n # create rotation from principal axes to standard basis\n target_array = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]]) # Z+, Y+, X+\n target_array = target_array * pos_oriented + -1 * target_array * neg_oriented\n R = np.linalg.solve(comp_array, target_array)\n R = R.T\n\n # rotate vertices, normals and reassign to the mesh\n vertex_array_rot = R.dot(vertex_array_cent.T)\n vertex_array_rot = vertex_array_rot.T\n self.mesh_.set_vertices(vertex_array_rot.tolist())\n self.mesh_.center_vertices_bb()\n\n if self.mesh_.normals() is not None:\n normals_array = np.array(self.mesh_.normals_)\n normals_array_rot = R.dot(normals_array.T)\n self.mesh_.set_normals(normals_array_rot.tolist())", "def test_flip_loop2():\n conv = ToPointsAndSegments()\n ring = [ (0,0), (3,0), (3.8,2), (5,0), (6.3, 2), (7,0), (10,0), (13,4), (10,5), (7,5), (6.5, 3), (5,5), (3.5,3), (3,5), (0,5), (-2,2), (0,0)]\n conv.add_polygon([ring])\n skel = calc_skel(conv, pause=True, output=True)", "def symmetricModelling(*args, about: Union[AnyStr, bool]=\"\", allowPartial: bool=True, axis:\n Union[AnyStr, bool]=\"\", preserveSeam: Union[int, bool]=0, reset:\n bool=True, seamFalloffCurve: Union[AnyStr, bool]=\"\", seamTolerance:\n Union[float, bool]=0.0, symmetry: Union[int, bool]=0, tolerance:\n Union[float, bool]=0.0, topoSymmetry: bool=True, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[None, Any]:\n pass", "def automerge_clusters(self):\n all_clusters = self.get_clusters().copy()\n\n if not self._single: # if not in single mode mode\n # initialize the variable to check if some change has happened \n changed = False\n for cl_1 in all_clusters: # cycle over clusters\n c_c1 = all_clusters[cl_1]\n for cl_2 in all_clusters: # inner cycle over clusters\n c_c2 = all_clusters[cl_2]\n # if two clusters have the same speaker and have different \n # cluster identifiers\n if cl_1 != cl_2 and c_c1.get_speaker() != 'unknown' and c_c1.get_speaker() == c_c2.get_speaker() and self._clusters.has_key(cl_1) and self._clusters.has_key(cl_2):\n changed = True\n # merge the clusters an record that something changed\n self._merge_clusters(cl_1, cl_2)\n if changed: # if something has changed\n # rename all the clusters starting from S0\n self._rename_clusters()\n # remove also the old waves and seg files of the old clusters\n shutil.rmtree(self.get_file_basename())\n # rebuild all seg files\n self.generate_seg_file(set_speakers=False)\n # resplit the original wave file according to the new clusters\n self._to_trim()", "def cleaveSurfAtom(entry,max_bonds=1,supercell=2,group_structs=True):\n \n \n struct = copy.deepcopy(entry[0])\n results = getStructureType(entry,supercell=supercell,returnSS=True)\n \n # If the crystal is 3D\n if results[0]=='conventional':\n struct = copy.deepcopy(entry[0])\n og_binary_matrix = getDistMat(struct,entry[1]-1)\n og_num_bonds = sum(sum(np.array(og_binary_matrix)))\n struct.make_supercell(supercell)\n binary_matrix= getDistMat(struct,entry[1]-1)\n bonds = []\n \n # Get list of bonded atoms\n for i in range(len(og_binary_matrix)):\n for pair in [(i,j) for j in range(i+1,len(og_binary_matrix)) \n if og_binary_matrix[i][j]==1]:\n bonds.append(pair)\n allCombos = []\n combNum = 0\n \n # Get list of all combinations of bonds\n for i in range(max_bonds+1):\n for com in list(itertools.combinations(bonds,i)):\n allCombos.append(com)\n combNum+=1\n\n combos = allCombos\n jjj=0\n all_structs = []\n \n # For each bond combination\n for combo in combos:\n broken=0\n jjj+=1\n modified_matrix = np.array(binary_matrix)\n for pair in combo:\n i,j = pair\n i=i*supercell**3\n j=j*supercell**3\n # Break bonds in the loop\n for shift in range(supercell**3):\n for shift2 in range(supercell**3):\n modified_matrix[i+shift][j+shift2]=0\n modified_matrix[j+shift][i+shift2]=0\n\n new_num_bonds=sum(sum(modified_matrix))\n broken=int(og_num_bonds-new_num_bonds) \n seed_index=0\n old_cluster_size=len(buildNetwork(binary_matrix,seed_index))/supercell**3\n cluster = buildNetwork(modified_matrix,seed_index)\n hetero=False\n # If the new set of atoms is not empty\n if cluster!=set():\n scale = len(cluster)/old_cluster_size\n compo = Composition.from_dict(Counter([struct[l].specie.name \n for l in list(cluster)]))\n if compo.reduced_formula != struct.composition.reduced_formula:\n # i.e. the cluster does not have the same composition\n # as the overall crystal; therefore there are other\n # clusters of varying composition.\n hetero = True\n motiif = getDim(scale,supercell)\n\n # If formula of new network matches the original cell\n if not hetero:\n if motiif=='layered':\n cluster_sites = [struct.sites[n] for n in cluster]\n all_structs.append(struct.from_sites(cluster_sites))\n\n if group_structs:\n matched = [x[0] for x in \n StructureMatcher(stol=1E-6,primitive_cell=False,\n scale=False).group_structures(all_structs)]\n else:\n matched=all_structs\n return(matched) \n\n else:\n print('Material is does not have a 3D motiif')\n print('Try increasing radii tolerance if appropriate')\n return([])", "def toggle_conms(self):\n name = 'conm2'\n if name in self.gui.geometry_actors:\n geometry_properties_change = {name : self.gui.geometry_properties[name]}\n visibility_prev = geometry_properties_change[name].is_visible\n geometry_properties_change[name].is_visible = not visibility_prev\n\n self.gui.on_update_geometry_properties_override_dialog(geometry_properties_change)", "def test_flip_loop():\n conv = ToPointsAndSegments()\n ring = [ (0,0), (3,0), (3.8,2), (4,0), (6,0), (6.3, 2), (7,0), (10,0), (13,4), (10,5), (7,5), (6.5, 3), (6,5), (4,5), (3.5,3), (3,5), (0,5), (-2,2), (0,0)]\n conv.add_polygon([ring])\n skel = calc_skel(conv, pause=True, output=True)", "def invert_selection(self):\n pass", "def flipViewer():\n allV = nuke.allNodes('Viewer')\n pV = allV[0]\n List = nuke.selectedNodes()\n nuke.selectAll()\n nuke.invertSelection()\n try:\n n = nuke.toNode('VIEWER_INPUT')\n if n.Class() == 'Mirror':\n n['Horizontal'].setValue(not n['Horizontal'].value())\n for i in allV:\n i['input_process'].setValue(not n['Vertical'].value() + n['Horizontal'].value() == 0)\n if n['Vertical'].value() + n['Horizontal'].value() == 0:\n nuke.delete(n)\n nuke.selectAll()\n nuke.invertSelection()\n else:\n nuke.message(\"Another Viewer Input already exists.\\nAborting to avoid conflict\")\n for i in List:\n i['selected'].setValue(True)\n \n except:\n n = nuke.Node('Mirror', inpanel=False)\n n['xpos'].setValue(pV.xpos()+150)\n n['ypos'].setValue(pV.ypos())\n n['name'].setValue('VIEWER_INPUT')\n n['hide_input'].setValue(1)\n n['Horizontal'].setValue(not n['Horizontal'].value())\n nuke.selectAll()\n nuke.invertSelection()\n for i in List:\n i['selected'].setValue(True)", "def invert(self):\n tmp = self.pvt\n self.pvt = self.nvt\n self.nvt = tmp\n tmp = self.pFace\n self.pFace = self.nFace\n self.nFace = tmp", "def ms_contrast_restore(self, viewer, event, data_x, data_y, msg=True):\n if not self.cancmap:\n return False\n event.accept()\n if event.state == 'down':\n self.restore_contrast(viewer, msg=msg)", "def change_glitch(sv):\r\n for nod in sv.Object.values():\r\n for i, (c,v) in enumerate(nod.clauses):\r\n if c and c[1] and is_glitch(c[1][0]): # only for conditions \r\n c=(Change, c[1], c[2])\r\n nod.clauses[i]=(c,v)", "def chordmode():\n for token in consume():\n if source.inSelection and isinstance(token, tokenizer.Pitch):\n transpose(token, 0)", "def update_normal(self):\n options = self.get_direction_options()\n if self.is_at_intersection() or self.last_position == (self.rect.centerx, self.rect.centery):\n self.direction = self.get_chase_direction(options)\n if self.direction == 'u' and 'u' in options:\n self.rect.centery -= self.speed\n elif self.direction == 'l' and 'l' in options:\n self.rect.centerx -= self.speed\n elif self.direction == 'd' and 'd' in options:\n self.rect.centery += self.speed\n elif self.direction == 'r' and 'r' in options:\n self.rect.centerx += self.speed\n self.change_eyes(self.direction or 'r') # default look direction to right\n self.image = self.norm_images.next_image()", "def flip(self):\n self.align = self._left if self.align == self._right else self._right\n self.group.layout_all()", "def __init__(self, channels):\n super(PositionalEncodingPermute2D, self).__init__()\n self.penc = PositionalEncoding2D(channels)", "def invert(self):\n self.vertices.reverse()", "async def nogroup(ctx):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n author = ctx.message.author\n roles = author.roles\n for role in roles:\n if role.name.lower() in changeable_groups:\n roles.remove(role)\n await amor_manager.replace_roles(author, *roles)\n await amor_manager.say('{0} removed from color groups'.format(author.name))", "def test_inverse_bijective(Group: Type[jaxlie.MatrixLieGroup]):\n transform = sample_transform(Group)\n assert_transforms_close(transform, transform.inverse().inverse())", "def test_process_stereo_file(self):\n test_path = pathlib.Path(__file__).parent.absolute() / 'data/stereo.wav'\n self.default_kwargs['input_file'] = test_path\n self.encoder = FileEncoder(**self.default_kwargs)\n self.encoder.process()", "def stereo_symbol(self):\n\n return np.array([bond.stereo_symbol for bond in self])" ]
[ "0.65880185", "0.6446109", "0.593325", "0.5791387", "0.5475662", "0.53331393", "0.52869034", "0.52321607", "0.51858443", "0.5148949", "0.5144954", "0.5143506", "0.51018935", "0.5073538", "0.5006511", "0.49255782", "0.4913669", "0.4903975", "0.48686603", "0.47758043", "0.47285745", "0.47272015", "0.472204", "0.4691385", "0.46745914", "0.46667522", "0.46620986", "0.465791", "0.46475267", "0.4589371", "0.45750064", "0.4569052", "0.45636305", "0.45547447", "0.45461968", "0.45342287", "0.4529784", "0.45252067", "0.45193994", "0.44995642", "0.44967833", "0.44885436", "0.44646838", "0.44638085", "0.4459395", "0.44476536", "0.4446348", "0.44406375", "0.44319773", "0.44306484", "0.44304138", "0.44213843", "0.4419363", "0.4419169", "0.44158065", "0.44077045", "0.43903527", "0.43858832", "0.43801177", "0.4378717", "0.43744934", "0.43709585", "0.43689758", "0.43666273", "0.43657997", "0.43653074", "0.43605447", "0.4355299", "0.43501645", "0.43442008", "0.4342607", "0.43418208", "0.4340308", "0.43390116", "0.43390116", "0.43377146", "0.43338743", "0.43301296", "0.43296537", "0.43250194", "0.43242732", "0.43242386", "0.43184012", "0.43172655", "0.43124148", "0.4312097", "0.43096802", "0.43073612", "0.4306013", "0.4298273", "0.42761704", "0.42734462", "0.42502534", "0.42491993", "0.4242448", "0.42399263", "0.42395487", "0.42363894", "0.4229811", "0.42277896" ]
0.5284103
7
StereoGroup atoms are in the reaction, and the reaction destroys the specified chirality at the stereo centers > invalidate stereo center, preserve the rest of the stereo group.
def test_reaction_destroys_stereo(self): reaction = '[C@:1]>>[C:1]' products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|') self.assertEqual(products, 'FC(Cl)Br') products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|') self.assertEqual(products, 'FC(Cl)Br') products = _reactAndSummarize(reaction, 'FC(Cl)Br') self.assertEqual(products, 'FC(Cl)Br') reaction = '[C@:1]F>>[C:1]F' # Reaction destroys stereo (but preserves unaffected group products = _reactAndSummarize(reaction, 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|') self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|') # Reaction destroys stereo (but preserves the rest of the group products = _reactAndSummarize(reaction, 'F[C@H](Cl)[C@@H](Cl)Br |&1:1,3|') self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reaction_defines_stereo(self):\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'FC(Cl)Br')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n\n # Remove group with defined stereo\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |&1:3|')\n\n # Remove atoms with defined stereo from group\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')", "def test_stereogroup_is_spectator_to_reaction(self):\n # 5a. Reaction preserves unrelated stereo\n products = _reactAndSummarize('[C@:1]F>>[C@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5b. Reaction ignores unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5c. Reaction inverts unrelated stereo'\n products = _reactAndSummarize('[C@:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5d. Reaction destroys unrelated stereo' 1:3|\n products = _reactAndSummarize('[C@:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |o1:3|')\n # 5e. Reaction assigns unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')", "def test_reaction_copies_stereogroup(self):\n # Stereogroup atoms are in the reaction with multiple copies in the product\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )\n\n # Stereogroup atoms are not in the reaction, but have multiple copies in the\n # product.\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )", "def test_reaction_ignores_stereo(self):\n reaction = '[C:1]>>[C:1]'\n reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br']\n for reactant in reactants:\n products = _reactAndSummarize(reaction, reactant)\n self.assertEqual(products, reactant)", "def test_reaction_splits_stereogroup(self):\n products = _reactAndSummarize('[C:1]OO[C:2]>>[C:2]O.O[C:1]',\n 'F[C@H](Cl)OO[C@@H](Cl)Br |o1:1,5|')\n # Two product sets, each with two mols:\n self.assertEqual(products.count('|o1:1|'), 4)", "def test_check_for_existing_reaction_removes_duplicates_in_opposite_directions(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n s1 = Species().from_smiles(\"[H]\")\n s2 = Species().from_smiles(\"CC\")\n s3 = Species().from_smiles(\"[H][H]\")\n s4 = Species().from_smiles(\"C[CH2]\")\n s1.label = 'H'\n s2.label = 'CC'\n s3.label = 'HH'\n s4.label = 'C[CH2]'\n\n rxn_f = TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction')\n )\n\n rxn_r = TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction')\n )\n\n rxn_f.reactants.sort()\n rxn_f.products.sort()\n\n cerm.add_reaction_to_core(rxn_f)\n cerm.register_reaction(rxn_f)\n\n reactions = cerm.search_retrieve_reactions(rxn_r)\n self.assertEqual(1, len(reactions), 'cerm.search_retrieve_reactions could not identify reverse reaction')\n\n found, rxn = cerm.check_for_existing_reaction(rxn_r)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to identify existing reaction in the reverse direction')\n self.assertEqual(rxn, rxn_f)", "def test_reaction_preserves_stereo(self):\n reaction = '[C@:1]>>[C@:1]'\n reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br']\n for reactant in reactants:\n products = _reactAndSummarize(reaction, reactant)\n self.assertEqual(products, reactant)", "def _shipCollide(self):\n for s in range(self.getLengthAlien()):\n for t in range(len(self._aliens[0])):\n for b in self._bolts:\n if self._aliens[s][t] != None and + \\\n self._aliens[s][t].collides(b):\n self._aliens[s][t] = None\n self._bolts.remove(b)\n self._key = False", "def group_group_collide(sprite_group, o_sprite_group):\n sprites = set(sprite_group)\n for sprite in sprites:\n if group_collide(o_sprite_group, sprite):\n sprite_group.remove(sprite)\n sprite.delete()\n return True\n return False", "def _kill_group(self, x, y):\n if self[x, y] not in self.TURNS:\n raise BoardError('Can only kill black or white group')\n\n group = self.get_group(x, y)\n score = len(group)\n\n for x1, y1 in group:\n self[x1, y1] = self.EMPTY\n\n return score", "def group_collide(sprite_group, other_object):\n sprites = set(sprite_group)\n for sprite in sprites:\n if sprite.collide(other_object):\n sprite_group.remove(sprite)\n sprite.delete()\n return True\n return False", "def DeMorgan_equivalence(self, position_list=[]):\n\t\treturn self.__class__(_replace_match_at(self, position_list, [\n\t\t\t[ ((neg, A), disj, (neg, B)), (neg, (A, conj, B)) ],\n\t\t\t[ (neg, (A, conj, B)), ((neg, A), disj, (neg, B)) ],\n\t\t\t[ ((neg, A), conj, (neg, B)), (neg, (A, disj, B)) ],\n\t\t\t[ (neg, (A, disj, B)), ((neg, A), conj, (neg, B)) ]\n\t\t]))", "async def nogroup(ctx):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n author = ctx.message.author\n roles = author.roles\n for role in roles:\n if role.name.lower() in changeable_groups:\n roles.remove(role)\n await amor_manager.replace_roles(author, *roles)\n await amor_manager.say('{0} removed from color groups'.format(author.name))", "def replace(self):\n if self.removed:\n self.coordinates = [[(self.player * 15 - 15), 0], [(self.player * 15 - 15), 1],\n [(self.player * 15 - 15), 2], [(self.player * 15 - 15), 3]]\n for i in self.coordinates:\n self.collision_boxes.append(rect.Rect(i[0] * 64, i[1] * 64, 64, 64))\n self.removed=False", "def cleanUpRigPose(self):\n\n # show the proxy geo\n cmds.select(self.name + \"_mover_grp\", hi=True)\n allNodes = cmds.ls(sl=True)\n for node in allNodes:\n if node.find(\"_proxy_geo\") != -1:\n if cmds.nodeType(node) == \"mesh\":\n parent = cmds.listRelatives(node, parent=True)[0]\n cmds.lockNode(parent, lock=False)\n cmds.setAttr(parent + \".v\", lock=False)\n cmds.setAttr(parent + \".v\", 1)\n cmds.lockNode(parent, lock=True)\n\n # unlock mover group for this module and make invisible\n cmds.lockNode(self.name + \"_mover_grp\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=False)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", 0)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=True)\n cmds.lockNode(self.name + \"_mover_grp\", lock=True)\n\n # get the joints created by this module and remove the constraints\n joints = self.returnCreatedJoints\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n for joint in joints:\n if cmds.objExists(joint + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)\n\n else:\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointBaseName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointBaseName = jointBaseName.partition(nameData[1])[0]\n\n if cmds.objExists(self.name + \"_\" + jointBaseName + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)", "def remove_as_subgroup(self, other_groups):\r\n symbols_to_exclude = reduce(lambda alphabet, cell: alphabet.union(cell.get_possible_symbols()),\r\n self.cells, set())\r\n my_cells = set(self.cells)\r\n\r\n for group in other_groups:\r\n if my_cells.issubset(group.cells) and self is not group:\r\n # Remove my cells from the other group\r\n for cell in self.cells:\r\n cell.remove_group(group)\r\n group.cells.remove(cell)\r\n\r\n # Update the alphabets in the other group\r\n for cell in group.cells:\r\n cell.remove_possible_symbols(symbols_to_exclude)", "def concerted_unimolecular_elimination(rct_zmas, prd_zmas):\n\n # Initialize info for the returns\n ret = None, None, None, None, None\n finish_build = True\n\n # Attempt to build appropriate z-matrix\n prd_zmas, prd_gras = shifted_standard_zmas_graphs(\n prd_zmas, remove_stereo=True)\n if len(rct_zmas) == 1:\n count = 1\n while True:\n rct_zmas, rct_gras = shifted_standard_zmas_graphs(\n rct_zmas, remove_stereo=True)\n init_zma, = rct_zmas\n\n tras, _, _ = automol.graph.reac.elimination(rct_gras, prd_gras)\n if tras is not None:\n if len(tras[0]) == 1:\n tras = [tras]\n min_dist = 100.\n frm_bnd_key = None\n for tra_i in tras:\n # Get the bond formation and breaking keys\n bnd_key, = automol.graph.trans.formed_bond_keys(tra_i)\n geo = automol.zmatrix.geometry(rct_zmas[0])\n dist = automol.geom.distance(geo, *list(bnd_key))\n if dist < min_dist:\n min_dist = dist\n frm_bnd_key = bnd_key\n tra = tra_i\n brk_keys = automol.graph.trans.broken_bond_keys(tra)\n brk_bnd_key1, brk_bnd_key2 = brk_keys\n init_zma, = rct_zmas\n\n\n # Get index for migrating atom (or bond-form atom in group)\n for bnd_key in (brk_bnd_key1, brk_bnd_key2):\n if bnd_key & frm_bnd_key:\n mig_key = next(iter(bnd_key & frm_bnd_key))\n for key in frm_bnd_key:\n if key != mig_key:\n a1_idx = key\n\n # Get chain for redefining the rc1_atm1_key z-matrix entries\n _, gras = shifted_standard_zmas_graphs(\n [init_zma], remove_stereo=True)\n gra = functools.reduce(automol.graph.union, gras)\n xgr1, = automol.graph.connected_components(gra)\n atm1_neighbors = _atom_neighbor_keys(xgr1)[a1_idx]\n for idx in atm1_neighbors:\n num_keys = len(_atom_neighbor_keys(xgr1)[idx])\n if idx != mig_key and num_keys > 1:\n a2_idx = idx\n atm2_neighbors = _atom_neighbor_keys(xgr1)[a2_idx]\n for idx in atm2_neighbors:\n if idx not in (mig_key, a1_idx):\n a3_idx = idx\n\n mig_redef_keys = (a1_idx, a2_idx, a3_idx)\n\n # determine if the zmatrix needs to be rebuilt by x2z\n # determines if the hydrogen atom is used to define other atoms\n rebuild = False\n if any(idx > mig_key for idx in mig_redef_keys):\n rebuild = True\n\n # rebuild zmat and go through while loop again if needed\n # shift order of cartesian coords & rerun x2z to get a new zmat\n # else go to next stage\n if rebuild:\n reord_zma = reorder_zmatrix_for_migration(\n init_zma, a1_idx, mig_key)\n rct_zmas = [reord_zma]\n count += 1\n if count == 3:\n finish_build = False\n break\n else:\n rct_zma = init_zma\n finish_build = True\n break\n else:\n finish_build = False\n\n # If z-mat with good order found, finish building it\n if finish_build:\n\n # determine the new coordinates\n rct_geo = automol.zmatrix.geometry(rct_zma)\n distance = automol.geom.distance(\n rct_geo, mig_key, a1_idx)\n angle = automol.geom.central_angle(\n rct_geo, mig_key, a1_idx, a2_idx)\n dihedral = automol.geom.dihedral_angle(\n rct_geo, mig_key, a1_idx, a2_idx, a3_idx)\n # Reset the keys for the migrating H atom\n new_idxs = (a1_idx, a2_idx, a3_idx)\n key_dct = {mig_key: new_idxs}\n ts_zma = automol.zmatrix.set_keys(rct_zma, key_dct)\n\n # Reset the values in the value dict\n mig_names = automol.zmatrix.name_matrix(ts_zma)[mig_key]\n ts_zma = automol.zmatrix.set_values(\n ts_zma, {mig_names[0]: distance,\n mig_names[1]: angle,\n mig_names[2]: dihedral}\n )\n\n # standardize the ts zmat and get tors and dist coords\n coo_dct = automol.zmatrix.coordinates(ts_zma)\n dist_coo_key = tuple(reversed(sorted(frm_bnd_key)))\n dist_name = next(coo_name for coo_name, coo_keys in coo_dct.items()\n if dist_coo_key in coo_keys)\n ts_name_dct = automol.zmatrix.standard_names(ts_zma)\n dist_name = ts_name_dct[dist_name]\n ts_zma = automol.zmatrix.standard_form(ts_zma)\n\n # Get the name of the coordinate of the other bond that is breaking\n brk_dist_name = None\n for brk_key in (brk_bnd_key1, brk_bnd_key2):\n if not brk_key.intersection(frm_bnd_key):\n brk_dist_name = automol.zmatrix.bond_key_from_idxs(\n ts_zma, brk_key)\n\n # Add second attempt to get brk_dist_name\n if brk_dist_name is None:\n brk_dist_names = [\n automol.zmatrix.bond_key_from_idxs(ts_zma, brk_bnd_key1),\n automol.zmatrix.bond_key_from_idxs(ts_zma, brk_bnd_key2)\n ]\n # Grab the name that is not None\n for name in brk_dist_names:\n if name is not None:\n brk_dist_name = name\n\n # get full set of potential torsional coordinates\n pot_tors_names = automol.zmatrix.torsion_coordinate_names(rct_zma)\n\n # remove the torsional coordinates that would break reaction coordinate\n gra = automol.zmatrix.graph(ts_zma, remove_stereo=True)\n coo_dct = automol.zmatrix.coordinates(ts_zma)\n tors_names = []\n for tors_name in pot_tors_names:\n axis = coo_dct[tors_name][0][1:3]\n grp1 = [axis[1]] + (\n list(automol.graph.branch_atom_keys(gra, axis[0], axis) -\n set(axis)))\n grp2 = [axis[0]] + (\n list(automol.graph.branch_atom_keys(gra, axis[1], axis) -\n set(axis)))\n if not ((mig_key in grp1 and a1_idx in grp2) or\n (mig_key in grp2 and a1_idx in grp1)):\n tors_names.append(tors_name)\n\n # Get reactants graph\n _, rct_gras = shifted_standard_zmas_graphs(\n [rct_zma], remove_stereo=True)\n rcts_gra = automol.graph.union_from_sequence(rct_gras)\n\n brk_bnd_key1 = shift_vals_from_dummy(brk_bnd_key1, ts_zma)\n brk_bnd_key2 = shift_vals_from_dummy(brk_bnd_key2, ts_zma)\n brk_bnd_keys = frozenset({brk_bnd_key1, brk_bnd_key2})\n frm_bnd_key = shift_vals_from_dummy(frm_bnd_key, ts_zma)\n\n ret = ts_zma, dist_name, brk_dist_name, brk_bnd_keys, frm_bnd_key, tors_names, rcts_gra\n\n return ret", "def handle_collisions():\n for sprite in sprite_group:\n for other in pygame.sprite.spritecollide(sprite, sprite_group, False):\n if sprite is not other and DO_KILL:\n sprite.kill()\n other.kill()", "def remove_clashes(self):\n dihe_parameters = self.myGlycosylator.builder.Parameters.parameters['DIHEDRALS']\n vwd_parameters = self.myGlycosylator.builder.Parameters.parameters['NONBONDED']\n \n static_glycans = None\n for k in self.original_glycanMolecules:\n if k not in self.linked_glycanMolecules:\n if static_glycans is not None:\n static_glycans += self.original_glycanMolecules[k].atom_group\n else:\n static_glycans = self.original_glycanMolecules[k].atom_group.copy()\n \n environment = self.myGlycosylator.protein.copy() \n environment += static_glycans\n \n #Build topology\n self.myGlycosylator.build_glycan_topology(glycanMolecules = self.linked_glycanMolecules, build_all = False)\n sampler = glc.Sampler(self.linked_glycanMolecules.values(), environment, dihe_parameters, vwd_parameters)\n sampler.remove_clashes_GA()", "def unsetReversible(self):\n return _libsbml.Reaction_unsetReversible(self)", "def remove_from_group(self, org, contact, group):\n pass", "def unsetReaction(self):\n return _libsbml.GeneAssociation_unsetReaction(self)", "def test_check_for_existing_reaction_eliminates_identical_reactions(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'])\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'])\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to identify existing reaction')", "def pseudopotentialise_molecule(self, sysargs=None, execute_deletion=True):\n\n # Find atoms to replace\n deletion_list = []\n if len(sysargs) > 2:\n if 'del' in sysargs:\n deletion_list = self.parse_coord_list(sysargs[4])\n replacement_list = self.parse_coord_list(sysargs[2])\n atoms_to_potentialise = list(item for item in self.coord_list if item[\"#\"] in replacement_list)\n else:\n atoms_to_potentialise = (item for item in self.coord_list if item[\"el\"] == 'c')\n deletion_list = (item for item in self.coord_list if item[\"el\"] == 'h')\n print('Pseudo-potentialising carbon atoms %s ...' % [atom['#'] for atom in atoms_to_potentialise])\n\n potential_coords_list = []\n\n for atom in atoms_to_potentialise:\n distanced_atom_list = self.order_atoms_by_distance_from(atom['#'])\n distanced_carbon_list = self.order_atoms_by_distance_from(atom['#'], element='c')\n\n if len(distanced_carbon_list) == 1:\n primary_vector = None\n for non_c_atom in distanced_atom_list[1:4]:\n if non_c_atom['el'] != 'h':\n primary_vector = self.vectorise_atom(non_c_atom['#']) - self.vectorise_atom(atom['#'])\n if primary_vector is None:\n primary_vector = self.vectorise_atom(distanced_atom_list[1]['#']) - self.vectorise_atom(atom['#'])\n else:\n primary_vector = self.vectorise_atom(distanced_carbon_list[1]['#']) - self.vectorise_atom(atom['#'])\n\n normal_vector = numpy.cross(\n self.vectorise_atom(distanced_atom_list[1]['#']) - self.vectorise_atom(atom['#']),\n self.vectorise_atom(distanced_atom_list[2]['#']) - self.vectorise_atom(atom['#'])\n )\n\n primary_potential_vector = self.lengtherise_vector(primary_vector, self.atom_potential_set_distance)\n potential_set_split_vector = self.lengtherise_vector(normal_vector, self.potential_set_split_distance)\n\n relative_potential_vectors = [\n primary_potential_vector + potential_set_split_vector,\n primary_potential_vector - potential_set_split_vector\n ]\n\n for potential_set in range(self.no_potential_sets_per_atom-1):\n\n pps_positive = numpy.dot(self.construct_euler_rodriguez_matrix(\n normal_vector,\n 2*numpy.pi/self.no_potential_sets_per_atom),\n relative_potential_vectors[-2],\n )\n pps_negative = numpy.dot(self.construct_euler_rodriguez_matrix(\n normal_vector,\n 2*numpy.pi/self.no_potential_sets_per_atom),\n relative_potential_vectors[-1]\n )\n\n relative_potential_vectors.append(pps_positive)\n relative_potential_vectors.append(pps_negative)\n\n if self.add_primary_vector_potentials_as_coords is False:\n del relative_potential_vectors[0]\n del relative_potential_vectors[0]\n\n # potential coords are still relative to their atom, now make them real.\n for vector in relative_potential_vectors:\n potential_coords_list.append(\n {'#': 0, 'el': self.sp2_pseudo_element, 'x': vector[0]+atom['x'], 'y': vector[1]+atom['y'], 'z': vector[2]+atom['z']},\n )\n\n # Now add potentials to coord list, after removing the 'real' hydrogen atoms.\n if execute_deletion is True:\n self.delete_specified_atoms(deletion_list)\n for potential_coord in potential_coords_list:\n self.write_coord(potential_coord, overwrite=False)", "def cull(self) -> None:\n for player in self.players:\n to_remove = [creature for creature in player.battle_line if creature.damage_taken >= creature.power()]\n for creature in to_remove:\n player.battle_line.remove(creature)\n to_remove.destroyed(self, creature)", "def release_atoms(self):\r\n\t\thole_size = self.box_size/2\r\n\t\thole_left = self.box_size/2 - hole_size/2\r\n\t\thole_right = self.box_size/2 + hole_size/2\r\n\r\n\t\tx_vals = (self.pos.x > hole_left) & (self.pos.x < hole_right)\r\n\t\ty_vals = (self.pos.y > hole_left) & (self.pos.y < hole_right)\r\n\t\tindices = (self.pos.z < 0) & x_vals & y_vals\r\n\r\n\t\tescaped_count = np.sum(indices)\r\n\t\tlost_momentum = self.atom_mass*np.sum(self.vel.z)\r\n\r\n\t\t# this would look bettes as self.vel.values[:, indices] = ... , but that is actualy noticeably slower\r\n\t\tself.pos.x[indices], self.pos.y[indices], self.pos.z[indices] = *generator.uniform(hole_left, hole_right, size=(2, escaped_count)), np.full(escaped_count, self.box_size)\r\n\t\tif self.change_velocities:\r\n\t\t\t# changing the velocity makes the temperature decrease over time\r\n\t\t\tself.vel.x[indices], self.vel.y[indices], self.vel.z[indices] = generator.uniform(0, self.box_size, size=(3, escaped_count))\r\n\r\n\t\treturn escaped_count, lost_momentum", "def _ignore_collision(self):\n # The legacy version only ignores collision of child links of active joints.\n for link in self.cabinet.get_links():\n for s in link.get_collision_shapes():\n g0, g1, g2, g3 = s.get_collision_groups()\n s.set_collision_groups(g0, g1, g2 | 1 << 31, g3)", "def _alienCollide(self):\n for b in self._bolts:\n if self._ship != None and self._ship.collides(b):\n self._ship = None\n self._bolts = []\n self._key = False\n self._lives -= 1", "def test_check_for_existing_reaction_eliminates_identical_reactions_without_duplicate_flag(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'],\n duplicate=False)\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Cs12345', 'H'],\n duplicate=False)\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to eliminate reactions without duplicate tag')", "def remove_mass(self, *focal_elements):\n for focal in focal_elements:\n if focal[0] in self.focals:\n self.focals[focal[0]] -= focal[1]\n else:\n self.focals[focal[0]] = -focal[1]", "def cleanup(self):\n for residue in self.debumper.biomolecule.residues:\n if not isinstance(residue, aa.Amino):\n continue\n if residue.name == \"GLH\" or \"GLH\" in residue.patches:\n if residue.has_atom(\"HE1\") and residue.has_atom(\"HE2\"):\n residue.remove_atom(\"HE1\")\n elif residue.name == \"ASH\" or \"ASH\" in residue.patches:\n if residue.has_atom(\"HD1\") and residue.has_atom(\"HD2\"):\n residue.remove_atom(\"HD1\")", "def pick(self, inv, pl, group):\r\n if self.rect.colliderect(pl):\r\n group.remove(self)\r\n if inv.count('key') == 0:\r\n inv += ['key']\r\n music_acceptor.activatedPortalSound()", "def pseudopotentialise_ethane_like_molecule(self, sysargs, execute_deletion=True):\n\n # Find atoms to replace\n deletion_list = []\n potential_coords_list = []\n if len(sysargs) > 2:\n if 'del' in sysargs:\n deletion_list = self.parse_coord_list(sysargs[4])\n replacement_list = self.parse_coord_list(sysargs[2])\n atoms_to_replace = list(item for item in self.coord_list if item[\"#\"] in replacement_list)\n else:\n atoms_to_replace = (item for item in self.coord_list if item[\"el\"] == 'c')\n deletion_list = (item for item in self.coord_list if item[\"el\"] == 'h')\n print('Pseudo-potentialising atoms %s ...' % [atom['#'] for atom in atoms_to_replace])\n\n # Option to place a potential on the *opposite* side of the carbon as well.\n dipolar_potentials = False\n if 'dipole' in sysargs:\n print('Dipolar potentialisation activated...')\n dipolar_potentials = True\n\n for atom in atoms_to_replace:\n # Find vector from nearest carbon.\n distanced_carbon_list = self.order_atoms_by_distance_from(atom['#'], element='c')\n\n vector_from_nearest_carbon = self.vectorise_atom(atom['#']) \\\n - self.vectorise_atom(distanced_carbon_list[0]['#'])\n vector_to_nearest_carbon = self.vectorise_atom(distanced_carbon_list[0]['#']) \\\n - self.vectorise_atom(atom['#'])\n\n # Lengtherise vector from carbon to give relative pp coordinates.\n vector_c_to_new_pp = self.lengtherise_vector(vector_from_nearest_carbon, self.atom_potential_set_distance)\n vector_c_to_new_dipole_pp = self.lengtherise_vector(vector_to_nearest_carbon, self.atom_potential_set_distance)\n\n # Add to carbon coords to get new pp coords.\n potential_coords_list.append(\n {'#': 0, 'el': self.sp3_pseudo_element,\n 'x': vector_c_to_new_pp[0] + distanced_carbon_list[0]['x'],\n 'y': vector_c_to_new_pp[1] + distanced_carbon_list[0]['y'],\n 'z': vector_c_to_new_pp[2] + distanced_carbon_list[0]['z']},\n )\n if dipolar_potentials is True:\n # Add to carbon coords to get new pp coords.\n potential_coords_list.append(\n {'#': 0, 'el': self.sp3_pseudo_element,\n 'x': vector_c_to_new_dipole_pp[0] + distanced_carbon_list[0]['x'],\n 'y': vector_c_to_new_dipole_pp[1] + distanced_carbon_list[0]['y'],\n 'z': vector_c_to_new_dipole_pp[2] + distanced_carbon_list[0]['z']},\n )\n\n # Now add potentials to coord list, after removing the 'real' atoms.\n if execute_deletion is True:\n self.delete_specified_atoms(deletion_list)\n for potential_coord in potential_coords_list:\n self.write_coord(potential_coord, overwrite=False)", "def is_surjective(self):\n # Testing equality of free modules over PIDs is unreliable\n # see Trac #11579 for explanation and status\n # We test if image equals codomain with two inclusions\n # reverse inclusion of below is trivially true\n return self.codomain().is_submodule(self.image())", "def terminate(self, atoms):\n\n c = list(atoms.keys())[0] # name of carbon atom being terminated\n c_ndx = list(atoms.values())[0] # serial index of carbon begin terminated\n\n chain = self.determine_chains(c)[0] # which chain carbon atom is on\n c_name = self.monomer.chains[chain][c]\n\n # to get indexing right\n c_ndx -= self.monomer.indices[chain][c_name]\n\n # types after reaction. Keeping this dictionary format so it integrates easily with xlinking algorithm\n types = {'chain': {self.monomer.chains[chain][c]: 'c3', self.monomer.dummy_connectivity[chain][c]: 'hc'}}\n\n for i in self.monomer.hydrogen_connectivity[c]: # turn already attached carbon(s) to c3\n types['chain'][i] = 'hc'\n\n # update types\n reacted_types = {'chain': {c_ndx + self.monomer.indices[chain][a]: types['chain'][a]\n for a in types['chain'].keys()}}\n\n # add dummy atom bond\n bonds = [[c_ndx + self.monomer.indices[chain]['C2'], c_ndx + self.monomer.indices[chain]['D2'], 'dummy']]\n\n radicals = []\n\n rm_improper = [[c_ndx + self.monomer.indices[chain][x] for x in self.monomer.impropers[chain][c_name]]]\n\n # define terminated atoms\n terminated = [c_ndx + self.monomer.indices[chain][c_name]]\n\n return reacted_types, bonds, radicals, rm_improper, terminated", "def remove_alt_confs (hierarchy, always_keep_one_conformer=False) :\n for model in hierarchy.models() :\n for chain in model.chains() :\n for residue_group in chain.residue_groups() :\n atom_groups = residue_group.atom_groups()\n assert (len(atom_groups) > 0)\n if always_keep_one_conformer :\n if (len(atom_groups) == 1) and (atom_groups[0].altloc == '') :\n continue\n atom_groups_and_occupancies = []\n for atom_group in atom_groups :\n if (atom_group.altloc == '') :\n continue\n mean_occ = flex.mean(atom_group.atoms().extract_occ())\n atom_groups_and_occupancies.append((atom_group, mean_occ))\n atom_groups_and_occupancies.sort(lambda a,b: cmp(b[1], a[1]))\n for atom_group, occ in atom_groups_and_occupancies[1:] :\n residue_group.remove_atom_group(atom_group=atom_group)\n single_conf, occ = atom_groups_and_occupancies[0]\n single_conf.altloc = ''\n else :\n for atom_group in atom_groups :\n if (not atom_group.altloc in [\"\", \"A\"]) :\n residue_group.remove_atom_group(atom_group=atom_group)\n else :\n atom_group.altloc = \"\"\n if (len(residue_group.atom_groups()) == 0) :\n chain.remove_residue_group(residue_group=residue_group)\n if (len(chain.residue_groups()) == 0) :\n model.remove_chain(chain=chain)\n atoms = hierarchy.atoms()\n new_occ = flex.double(atoms.size(), 1.0)\n atoms.set_occ(new_occ)", "def stop_group(self, ch_group, args):\n for sound in ch_group.sounds: \n self._stop_sound(sound, args) \n # and apply recursively to subgroups\n for group in ch_group.sub_group_channels:\n self.stop_group(group, args)", "def _collide(self):\n\n collisions = self._get_collisions()\n for collision in collisions:\n self._update_excitation(collision)\n atom1 = self.atoms[collision[0]]\n atom2 = self.atoms[collision[1]]\n\n r = atom1.pos-atom2.pos\n r_mag = np.linalg.norm(r)\n r_hat = r/r_mag\n\n v_1_r = np.dot(atom1.vel, r_hat)\n v_2_r = np.dot(atom2.vel, r_hat)\n\n v_1_r_f = (atom1.mass-atom2.mass)*v_1_r/(atom1.mass + atom2.mass)\\\n + 2*atom2.mass*v_2_r/(atom1.mass + atom2.mass)\n v_2_r_f = (atom2.mass-atom1.mass)*v_2_r/(atom1.mass + atom2.mass)\\\n + 2*atom1.mass*v_1_r/(atom1.mass + atom2.mass)\n\n delta_v_1 = (v_1_r_f - v_1_r)*r_hat\n delta_v_2 = (v_2_r_f - v_2_r)*r_hat\n\n self.atoms[collision[0]].vel += delta_v_1\n self.atoms[collision[1]].vel += delta_v_2", "def intercept_e(self):\n for asteroid in range(len(self.asteroid_id_e) - 1, -1, -1):\n if self.distance(self.Main_Ship, self.asteroid_id_e[asteroid]) < (self.spaceship_radius + self.asteroid_r_e[asteroid]):\n self.del_asteroid_e(asteroid)\n self.lives -= 1", "def test_revoke_group_permissions(self):\n group0 = self.test_save('TestGroup0', user0)\n group1 = self.test_save('TestGroup1', user1)\n \n # revoke perm when user has no perms\n revoke(group0, 'Perm1', object0)\n \n for perm in perms:\n group0.grant(perm, object0)\n group0.grant(perm, object1)\n group1.grant(perm, object0)\n group1.grant(perm, object1)\n \n # revoke single perm\n group0.revoke('Perm1', object0)\n self.assertEqual(set(['Perm2', 'Perm3', 'Perm4']), set(group0.get_perms(object0)))\n self.assertEqual(perms, set(group0.get_perms(object1)))\n self.assertEqual(perms, set(group1.get_perms(object0)))\n self.assertEqual(perms, set(group1.get_perms(object1)))\n \n # revoke a second perm\n group0.revoke('Perm3', object0)\n self.assertEqual(set(['Perm2', 'Perm4']), set(group0.get_perms(object0)))\n self.assertEqual(perms, set(group0.get_perms(object1)))\n self.assertEqual(perms, set(group1.get_perms(object0)))\n self.assertEqual(perms, set(group1.get_perms(object1)))\n \n # revoke from another object\n group0.revoke('Perm3', object1)\n self.assertEqual(set(['Perm2', 'Perm4']), set(group0.get_perms(object0)))\n self.assertEqual(set(['Perm1', 'Perm2', 'Perm4']), set(group0.get_perms(object1)))\n self.assertEqual(perms, set(group1.get_perms(object0)))\n self.assertEqual(perms, set(group1.get_perms(object1)))\n \n # revoke from another user\n group1.revoke('Perm4', object0)\n self.assertEqual(set(['Perm2', 'Perm4']), set(group0.get_perms(object0)))\n self.assertEqual(set(['Perm1', 'Perm2', 'Perm4']), set(group0.get_perms(object1)))\n self.assertEqual(set(['Perm1', 'Perm2', 'Perm3']), set(group1.get_perms(object0)))\n self.assertEqual(perms, set(group1.get_perms(object1)))\n \n # revoke perm user does not have\n group0.revoke('Perm1', object0)\n self.assertEqual(set(['Perm2', 'Perm4']), set(group0.get_perms(object0)))\n self.assertEqual(set(['Perm1', 'Perm2', 'Perm4']), set(group0.get_perms(object1)))\n self.assertEqual(set(['Perm1', 'Perm2', 'Perm3']), set(group1.get_perms(object0)))\n self.assertEqual(perms, set(group1.get_perms(object1)))\n \n # revoke perm that does not exist\n group0.revoke('DoesNotExist', object0)\n self.assertEqual(set(['Perm2', 'Perm4']), set(group0.get_perms(object0)))\n self.assertEqual(set(['Perm1', 'Perm2', 'Perm4']), set(group0.get_perms(object1)))\n self.assertEqual(set(['Perm1', 'Perm2', 'Perm3']), set(group1.get_perms(object0)))\n self.assertEqual(perms, set(group1.get_perms(object1)))", "def test_04_self_unshare_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n dog.uaccess.unshare_group_with_user(meowers, dog)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertFalse(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_group_unshare_users(meowers)))", "def remove(self, sgid, other, proto_spec, direction=\"in\"):\n # returns (removed_sgr_info, ...)\n # RevokeSecurityGroupIngress, RevokeSecurityGroupEgress\n if direction == \"in\":\n api = \"RevokeSecurityGroupIngress\"\n elif direction == \"out\":\n api = \"RevokeSecurityGroupEgress\"\n else:\n raise ValueError(\"direction must be one of ('in', 'out')\")\n return self.modify(api, sgid, other, proto_spec)", "def test_reaction_inverts_stereo(self):\n reaction = '[C@:1]>>[C@@:1]'\n\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br |o1:1|')\n products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@H](Cl)Br |&1:1|')\n products = _reactAndSummarize(reaction, 'FC(Cl)Br')\n self.assertEqual(products, 'FC(Cl)Br')", "def cleaveSurfAtom(entry,max_bonds=1,supercell=2,group_structs=True):\n \n \n struct = copy.deepcopy(entry[0])\n results = getStructureType(entry,supercell=supercell,returnSS=True)\n \n # If the crystal is 3D\n if results[0]=='conventional':\n struct = copy.deepcopy(entry[0])\n og_binary_matrix = getDistMat(struct,entry[1]-1)\n og_num_bonds = sum(sum(np.array(og_binary_matrix)))\n struct.make_supercell(supercell)\n binary_matrix= getDistMat(struct,entry[1]-1)\n bonds = []\n \n # Get list of bonded atoms\n for i in range(len(og_binary_matrix)):\n for pair in [(i,j) for j in range(i+1,len(og_binary_matrix)) \n if og_binary_matrix[i][j]==1]:\n bonds.append(pair)\n allCombos = []\n combNum = 0\n \n # Get list of all combinations of bonds\n for i in range(max_bonds+1):\n for com in list(itertools.combinations(bonds,i)):\n allCombos.append(com)\n combNum+=1\n\n combos = allCombos\n jjj=0\n all_structs = []\n \n # For each bond combination\n for combo in combos:\n broken=0\n jjj+=1\n modified_matrix = np.array(binary_matrix)\n for pair in combo:\n i,j = pair\n i=i*supercell**3\n j=j*supercell**3\n # Break bonds in the loop\n for shift in range(supercell**3):\n for shift2 in range(supercell**3):\n modified_matrix[i+shift][j+shift2]=0\n modified_matrix[j+shift][i+shift2]=0\n\n new_num_bonds=sum(sum(modified_matrix))\n broken=int(og_num_bonds-new_num_bonds) \n seed_index=0\n old_cluster_size=len(buildNetwork(binary_matrix,seed_index))/supercell**3\n cluster = buildNetwork(modified_matrix,seed_index)\n hetero=False\n # If the new set of atoms is not empty\n if cluster!=set():\n scale = len(cluster)/old_cluster_size\n compo = Composition.from_dict(Counter([struct[l].specie.name \n for l in list(cluster)]))\n if compo.reduced_formula != struct.composition.reduced_formula:\n # i.e. the cluster does not have the same composition\n # as the overall crystal; therefore there are other\n # clusters of varying composition.\n hetero = True\n motiif = getDim(scale,supercell)\n\n # If formula of new network matches the original cell\n if not hetero:\n if motiif=='layered':\n cluster_sites = [struct.sites[n] for n in cluster]\n all_structs.append(struct.from_sites(cluster_sites))\n\n if group_structs:\n matched = [x[0] for x in \n StructureMatcher(stol=1E-6,primitive_cell=False,\n scale=False).group_structures(all_structs)]\n else:\n matched=all_structs\n return(matched) \n\n else:\n print('Material is does not have a 3D motiif')\n print('Try increasing radii tolerance if appropriate')\n return([])", "def process(self, car):\n super(LeftIntersectionMessage, self).process(car)\n car.delete_car_at_intersection(self)", "def reaction_remotion(a2_data, retained_reactions):\n for i in a2_data['I'].keys():\n for r in a2_data['I'][i]['R'].keys():\n if r not in retained_reactions:\n a2_data['I'][i]['R'].pop(r)\n return a2_data", "def enr_destroy(dims, excitations, *, dtype=None):\n from .states import enr_state_dictionaries\n dtype = dtype or settings.core[\"default_dtype\"] or _data.CSR\n\n nstates, state2idx, idx2state = enr_state_dictionaries(dims, excitations)\n\n a_ops = [scipy.sparse.lil_matrix((nstates, nstates), dtype=np.complex128)\n for _ in dims]\n\n for n1, state1 in enumerate(idx2state):\n for idx, s in enumerate(state1):\n # if s > 0, the annihilation operator of mode idx has a non-zero\n # entry with one less excitation in mode idx in the final state\n if s > 0:\n state2 = state1[:idx] + (s-1,) + state1[idx+1:]\n n2 = state2idx[state2]\n a_ops[idx][n2, n1] = np.sqrt(s)\n\n return [Qobj(a, dims=[dims, dims]).to(dtype) for a in a_ops]", "def removeSeparatrix(self):\n if self._separatrixOverlayHandle is not None:\n self._separatrixOverlayHandle.remove()\n self._separatrixOverlayHandle = None\n\n self.overlaySeparatrix = False", "def unsetCompartment(self):\n return _libsbml.Reaction_unsetCompartment(self)", "def test_parameterize_mol_missing_stereo_rdkit(self, force_field):\n toolkit_registry = ToolkitRegistry(\n toolkit_precedence=[RDKitToolkitWrapper, AmberToolsToolkitWrapper]\n )\n\n molecule = Molecule.from_smiles(\"CC1CCC(=O)O1\", allow_undefined_stereo=True)\n topology = Topology.from_molecules([molecule])\n\n force_field.create_openmm_system(\n topology,\n toolkit_registry=toolkit_registry,\n )", "def remove_mass_unsafe(self, *focal_elements):\n for focal in focal_elements:\n if focal[0] in self.focals:\n self.focals[focal[0]] -= focal[1]\n else:\n self.focals[focal[0]] = -focal[1]", "def cull(self):", "def _remove_receptors_helper(self, argin):\n receptor_to_vcc = dict([*map(int, pair.split(\":\"))] for pair in\n self._proxy_cbf_controller.receptorToVcc)\n for receptorID in argin:\n if receptorID in self._receptors:\n vccID = receptor_to_vcc[receptorID]\n vccProxy = self._proxies_vcc[vccID - 1]\n\n # unsubscribe from events\n vccProxy.unsubscribe_event(self._events_state_change_vcc[vccID][0]) # state\n vccProxy.unsubscribe_event(self._events_state_change_vcc[vccID][1]) # healthState\n del self._events_state_change_vcc[vccID]\n del self._vcc_state[self._fqdn_vcc[vccID - 1]]\n del self._vcc_health_state[self._fqdn_vcc[vccID - 1]]\n\n # reset receptorID and subarrayMembership Vcc attribute:\n vccProxy.receptorID = 0\n vccProxy.subarrayMembership = 0\n\n self._receptors.remove(receptorID)\n self._proxies_assigned_vcc.remove(vccProxy)\n self._group_vcc.remove(self._fqdn_vcc[vccID - 1])\n else:\n log_msg = \"Receptor {} not assigned to subarray. Skipping.\".format(str(receptorID))\n self.logger.warn(log_msg)\n\n # transitions to EMPTY if not assigned any receptors\n if not self._receptors:\n self._update_obs_state(ObsState.EMPTY)", "def destroy(self, cause:str, *, warp_core_breach:bool=False, self_destruct:bool=False):\n gd = self.game_data\n #gd.grid[self.sector_coords.y][self.sector_coords.x].removeShipFromSec(self)\n is_controllable = self.is_controllable\n #wc_value = self.sys_warp_core.get_effective_value\n\n if self.is_controllable:\n self.game_data.cause_of_damage = cause\n try:\n self.life_support.able_crew = 0\n self.life_support.injured_crew = 0\n except AttributeError:\n pass\n try:\n for k in self.torpedo_launcher.torps.keys():\n self.torpedo_launcher.torps[k] = 0\n self.torpedo_launcher.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.shield_generator.shields = 0\n self.shield_generator.shields_up = False\n self.shield_generator.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.polarized_hull.polarization_amount = 0\n self.polarized_hull.is_polarized = False\n self.polarized_hull.integrety = 0.0\n except AttributeError:\n pass\n self.power_generator.energy = 0\n self.power_generator.integrety = 0\n try:\n self.warp_drive.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.beam_array.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.cannons.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.impulse_engine.integrety = 0.0\n except AttributeError:\n pass\n self.sensors.integrety = 0.0\n try:\n self.cloak.cloak_status = CloakStatus.INACTIVE\n self.cloak.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.transporter.integrety = 0.0\n except AttributeError:\n pass\n\n if is_controllable:\n gd.engine.message_log.print_messages = False\n\n if warp_core_breach or self_destruct:\n \n self.warp_core_breach(self_destruct)\n self.hull = -self.ship_class.max_hull\n \n if self is self.game_data.selected_ship_planet_or_star:\n self.game_data.selected_ship_planet_or_star = None\n \n self.get_sub_sector.destroy_ship(self)", "def invalidate_masks(cls, input_masks: List[Union[None, PropagationMask]]) -> None:\n for mask in input_masks:\n if mask:\n mask.invalidate_groups()", "def unsetKineticLaw(self):\n return _libsbml.Reaction_unsetKineticLaw(self)", "def cure(self, s):\n if self.disease_status == 1:\n s.number_of_symptomatic -= 1\n elif self.disease_status == 2:\n s.number_of_asymptomatic -= 1\n elif self.disease_status == 3:\n s.number_of_res_symp -= 1\n elif self.disease_status == 4:\n s.number_of_res_asymp -= 1\n if self.disease_status > 0:\n s.infected.remove(self.identifier)\n if self.disease_status > 2:\n s.resistant.remove(self.identifier)\n self.disease_status = 0\n self.time_since_infection = -1", "def delete_comp_outside_cluster(cls_dic,compos):\n #so that merge will not contain cluster\n del_comp = []\n for idx, comp in enumerate(compos):\n box1 = [comp.bbox.row_min, comp.bbox.row_max, comp.bbox.col_min, comp.bbox.col_max]\n for idx2, comp2 in enumerate(compos):\n if idx == idx2:\n continue\n if comp.cls not in cls_dic.keys() or comp2.cls in cls_dic.keys():\n continue\n box2 = [comp2.bbox.row_min, comp2.bbox.row_max, comp2.bbox.col_min, comp2.bbox.col_max]\n #\n if cal_iou(box1, box2)[0] >=0.5 and cal_iou(box1, box2)[0] > cal_iou(box1, box2)[1]:\n del_comp.append(idx2)\n # print('del',box2)\n new_comp = []\n for idx, comp in enumerate(compos):\n if idx not in del_comp:\n new_comp.append(comp)\n return new_comp", "def stereoWarpK_noMotion_singleSided(curImageInfo, conversionParam, globalParam): \n h, w, u = curImageInfo.originalImageResized.shape # shape after resize\n K = 1\n N = h * w * K\n gr = np.mean(curImageInfo.originalImageResized, 2) # not 3 as it is zero based :3\n grs = cv2.GaussianBlur(gr, (5, 5), 1)\n \n # One heuristic for converting depth to disparity\n disparity0 = imnormalize(1/(1+imnormalize(curImageInfo.depthResized)))*conversionParam.maxDisp - conversionParam.maxDisp/2;\n \n if conversionParam.spatialSmoothnessSwitch == True:\n # Smoothing the depth spatially according to adjacent pixels by using Gx, Gy gradients\n # Vertical and Horizontal Edges\n dx = cv2.filter2D(grs, -1, np.transpose(np.array([[-1, 1, 0]])))\n dy = cv2.filter2D(grs, -1, np.array([[-1, 1, 0]]))\n \n W = ( imnormalize(disparity0) + sigmoid(np.sqrt(np.power(dx, 2) + np.power(dy, 2)), 0.01, 500) ) / 2 \n \n A = np.transpose(spdiags(np.transpose(W).flatten(), 0, N, N, \"csc\") \\\n + (conversionParam.spatialSmoothCoeff_x * globalParam.Gx.transpose() * globalParam.Gx) \\\n + (conversionParam.spatialSmoothCoeff_y * globalParam.Gy.transpose() * globalParam.Gy))\n \n b = np.transpose(W).flatten() * np.transpose(disparity0).flatten()\n \n [x, flag] = cg(A, b, np.transpose(disparity0).flatten(), 5e-1, 50)\n \n disparity = np.transpose(np.reshape(x, (w, h))) # remove (h, w, 1, K)\n else:\n disparity = disparity0\n \n curImageInfo.leftImage = curImageInfo.originalImage\n \n # The -ve sign to convert the white to black and black to white \n warpright = -disparity\n \n # only the warping interp2 is done on the original size image with no resizing to have good estimation\n warpright = cv2.resize(warpright, (curImageInfo.originalImage.shape[1], curImageInfo.originalImage.shape[0]), \n interpolation=cv2.INTER_LINEAR)\n \n curImageInfo.rightImage = (clip(warpImage_v2((curImageInfo.originalImage), (warpright), \n conversionParam.resizeFactor, globalParam.xx, globalParam.yy, globalParam.YY)))\n \n return disparity", "async def on_raw_reaction_remove(self, payload):\n\n\t\tguild = self.bot.get_guild(payload.guild_id)\n\t\tif guild is not None:\n\t\t\t# Update reaction leaderboards\n\t\t\treactionLeaderboard = self.leaderboards[str(payload.guild_id)][\"reactionLeaderboard\"]\n\n\t\t\tif payload.emoji.id is not None:\n\t\t\t\tfor guildEmoji in guild.emojis:\n\t\t\t\t\tif payload.emoji.id == guildEmoji.id:\n\t\t\t\t\t\treactionLeaderboard[\"<:\" + str(payload.emoji.name) + \":\" + str(payload.emoji.id) + \">\"] -= 1\n\t\t\t\t\t\tbreak\n\n\t\t\telse:\n\t\t\t\treactionLeaderboard[str(payload.emoji.name)] -= 1\n\n\t\t\tif str(payload.emoji.id) in self.leaderboards[str(payload.guild_id)][\"emojiLeaderboard\"]:\n\t\t\t\tself.leaderboards[str(payload.guild_id)][\"emojiLeaderboard\"][str(payload.emoji.id)] -= 1", "def remove_to_destroy(total_buffer,to_destroy):\n totbuf=np.copy(total_buffer)\n for val,begInd,endInd in to_destroy:\n for j in range(endInd-begInd):\n index_beg = begInd+j\n totbuf[ total_buffer[:,:,index_beg]==val,index_beg]=0\n return totbuf", "def test_revoke_all_group(self):\n group0 = self.test_save('TestGroup0')\n group1 = self.test_save('TestGroup1')\n \n for perm in perms:\n grant_group(group0, perm, object0)\n grant_group(group0, perm, object1)\n grant_group(group1, perm, object0)\n grant_group(group1, perm, object1)\n \n revoke_all_group(group0, object0)\n self.assertEqual([], get_group_perms(group0, object0))\n self.assertEqual(perms, set(get_group_perms(group0, object1)))\n self.assertEqual(perms, set(get_group_perms(group1, object0)))\n self.assertEqual(perms, set(get_group_perms(group1, object1)))\n \n revoke_all_group(group0, object1)\n self.assertEqual([], get_group_perms(group0, object0))\n self.assertEqual([], get_group_perms(group0, object1))\n self.assertEqual(perms, set(get_group_perms(group1, object0)))\n self.assertEqual(perms, set(get_group_perms(group1, object1)))\n \n revoke_all_group(group1, object0)\n self.assertEqual([], get_group_perms(group0, object0))\n self.assertEqual([], get_group_perms(group0, object1))\n self.assertEqual([], get_group_perms(group1, object0))\n self.assertEqual(perms, set(get_group_perms(group1, object1)))\n \n revoke_all_group(group1, object1)\n self.assertEqual([], get_group_perms(group0, object0))\n self.assertEqual([], get_group_perms(group0, object1))\n self.assertEqual([], get_group_perms(group1, object0))\n self.assertEqual([], get_group_perms(group1, object1))", "def _update_excitation(self, collision_indices):\n\n atom1 = self.atoms[collision_indices[0]]\n atom2 = self.atoms[collision_indices[1]]\n\n m_tot = atom1.mass+atom2.mass\n mu = atom1.mass*atom2.mass/(m_tot)\n v_cm = (atom1.mass*atom1.vel+atom2.mass*atom2.vel)/m_tot\n\n v_1_cm = atom1.vel-v_cm\n v_2_cm = atom2.vel-v_cm\n\n v_1_cm_hat = v_1_cm/np.linalg.norm(v_1_cm)\n v_2_cm_hat = v_2_cm/np.linalg.norm(v_2_cm)\n\n E_cm = 0.5*atom1.mass*np.linalg.norm(v_1_cm)**2\\\n + 0.5*atom2.mass*np.linalg.norm(v_2_cm)**2\n\n if atom1.exc == 0 and atom2.exc == 0:\n if E_cm > (atom1.chi + atom2.chi):\n if np.random.rand() < atom1.f_ex:\n E_cm -= atom1.chi\n atom1.exc = 1\n\n if np.random.rand() < atom1.f_ex:\n E_cm -= atom2.chi\n atom2.exc = 1\n\n elif max(atom1.chi, atom2.chi) < E_cm < (atom1.chi + atom2.chi):\n if np.random.rand() < 0.5:\n if np.random.rand() < atom1.f_ex:\n E_cm -= atom1.chi\n atom1.exc = 1\n\n else:\n if np.random.rand() < atom1.f_ex:\n E_cm -= atom2.chi\n atom2.exc = 1\n\n elif min(atom1.chi, atom2.chi) < E_cm < max(atom1.chi, atom2.chi):\n if atom1.chi < atom2.chi:\n if np.random.rand() < atom1.f_ex:\n E_cm -= atom1.chi\n atom1.exc = 1\n\n else:\n if np.random.rand() < atom2.f_ex:\n E_cm -= atom2.chi\n atom2.exc = 1\n\n elif atom1.exc == 0 and atom2.exc == 1:\n if np.random.rand() < atom2.f_de(E_cm, mu):\n E_cm += atom2.chi\n atom2.exc = 0\n if E_cm > atom1.chi and np.random.rand() < atom1.f_ex:\n E_cm -= atom1.chi\n atom1.exc = 1\n\n elif atom1.exc == 1 and atom2.exc == 0:\n if np.random.rand() < atom1.f_de(E_cm, mu):\n E_cm += atom1.chi\n atom1.exc = 0\n if E_cm > atom2.chi and np.random.rand() < atom2.f_ex:\n E_cm -= atom2.chi\n atom2.exc = 1\n\n elif atom1.exc == 1 and atom2.exc == 1:\n if np.random.rand() < atom1.f_de(E_cm, mu):\n E_cm += atom1.chi\n atom1.exc = 0\n\n if np.random.rand() < atom2.f_de(E_cm, mu):\n E_cm += atom2.chi\n atom2.exc = 0\n\n p_cm = np.sqrt(2*(E_cm)*mu)\n v_2_cm = p_cm*v_2_cm_hat/atom2.mass\n v_1_cm = p_cm*v_1_cm_hat/atom1.mass\n\n self.atoms[collision_indices[0]].vel = v_cm+v_1_cm\n self.atoms[collision_indices[1]].vel = v_cm+v_2_cm", "def do_erase(self, line):\n if self.bootstrap() != 0:\n return self.return_code(1, True)\n\n # Warning\n print('')\n print(self.t.underline_red('! WARNING !'))\n print('This is a destructive operation, all shares will be unrecoverably deleted from the card')\n if not self.ask_proceed('Do you really want to remove all key shares? (y/n): ', support_non_interactive=True):\n return self.return_code(0)\n\n # Erase\n resp, sw = self.card.send_erase_shares()\n if sw != 0x9000:\n logger.error('Could not erase all shares, code: %04X' % sw)\n return self.return_code(1)\n\n print('All shares erased successfully')\n return self.return_code(0)", "def group_group_collide(missiles, group):\n remove1 = set()\n remove2 = set()\n collisions = 0\n for missile in missiles:\n for obj in group:\n if missile.collide(obj):\n collisions += 1\n missile.dead = True\n obj.dead = True\n obj.after_death()\n remove1.add(missile)\n remove2.add(obj)\n missiles.difference_update(remove1)\n group.difference_update(remove2)\n return (missiles, group, collisions)", "def removeAllCorrelations(self, removeReImCorrel = True):\n\t\tdim = len(self.coma)/2\n#\t#\tCMwrite(\"removeAllCorrelations\")\n\t\tfor i in range(dim):\n\t\t\tfor j in range(dim):\n\t\t\t\tif not i == j:\n\t\t\t\t\tself.coma[2*i ,2*j ] = 0.\t\t\n\t\t\t\t\tself.coma[2*i+1,2*j ] = 0.\n\t\t\t\t\tself.coma[2*i ,2*j+1] = 0.\n\t\t\t\t\tself.coma[2*i+1,2*j+1] = 0.\n\t\t\t\telif removeReImCorrel:\n\t\t\t\t\tself.coma[2*i+1,2*j ] = 0.\n\t\t\t\t\tself.coma[2*i ,2*j+1] = 0.\n\t\tself.makeComaInv()\n\t\tself.specialCOMAs = {}", "def test_remove_learner_group_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.remove_learner_group', self.learner_groups[1]))", "async def on_raw_reaction_remove(self, payload):\n\n # exclude all reactions which are not the original message\n if str(payload.message_id) != self.message_id:\n return\n\n # exclude the bot\n if payload.user_id == self.bot.user.id:\n return\n\n else:\n # get the model data for the role assigner object\n data = await self.get_objects(\n model=RoleAssigner, filter={\"bot__name\": str(self.bot_name)}\n )\n\n # role assigner object\n data = data[0]\n\n guild = self.get_guild(guild_id=payload.guild_id)\n\n user = self.get_user(guild=guild, user_id=payload.user_id)\n\n for db_role in data.roles.all():\n\n if db_role.emoji.startswith(\":\") and db_role.emoji.endswith(\":\"):\n\n ce = db_role.emoji[1:-1]\n\n else:\n ce = db_role.emoji\n\n if str(payload.emoji.name) == str(ce):\n\n role = self.get_role(guild, int(db_role.uid))\n\n if user in role.members:\n\n await user.remove_roles(role)\n\n print(\"Removed \" + str(user) + \" from role: \" + str(role) + \"!\")\n\n else:\n print(\"User \" + str(user) + \" not in role: \" + str(role) + \"!\")\n\n pass", "def unsetReplacedBy(self):\n return _libsbml.CompSBasePlugin_unsetReplacedBy(self)", "def neutralise_raw(self):\n # kekulization has to be done, otherwise u will encounter\n # issues when assigning bond types later\n Chem.Kekulize(self.m)\n\n # get pairs of charged atoms\n self.get_charged_pairs()\n\n # eliminate the charges by rebuilding the molecule\n m = Chem.Mol()\n mc = Chem.EditableMol(m)\n for i, az in enumerate(self.zs):\n ai = Chem.Atom( az )\n ci = self.charges[i]\n if ci != 0:\n if ci == 1:\n filt = (self.cpairs[:,0] == i)\n if np.any(filt):\n ai.SetFormalCharge( 1 )\n elif ci == -1:\n filt = (self.cpairs[:,1] == i)\n if np.any(filt): ai.SetFormalCharge( -1 )\n else:\n print((' -- i, charges[i] = ', i, self.charges[i]))\n print(' #ERROR: abs(charge) > 1??')\n raise\n mc.AddAtom( ai )\n\n ijs = np.array( np.where( np.triu(self.bom) > 0 ) ).astype(np.int)\n nb = ijs.shape[1]\n for i in range(nb):\n i, j = ijs[:,i]\n mc.AddBond( i, j, bo2bt[ '%.1f'%self.bom[i,j] ] )\n\n m = mc.GetMol()\n m2 = assign_coords(m, self.coords)\n self.m = m2", "def end_pairing(self, error_dialog):\n self.speak_dialog(error_dialog)\n self.bus.emit(Message(\"mycroft.mic.unmute\", None))\n\n self.data = None\n self.count = -1", "def unsetReaction(self):\n return _libsbml.FluxObjective_unsetReaction(self)", "def prepareToRemove( self ):\n self.emitRemoved()\n return True", "def remove(self):\n\n self.last_move = \"\"\n self.collision_boxes = []\n self.removed=True", "def conclude_hand(self):\n for position in self.positions.keys():\n if position not in self.cards:\n self.cards[position] = (Card(), Card())", "def mutate_residue(pose, mutant_position, mutant_aa,\n pack_radius = 0.0, pack_scorefxn = '' ):\n #### a MutateResidue Mover exists similar to this except it does not pack\n #### the area around the mutant residue (no pack_radius feature)\n #mutator = MutateResidue(mutant_position, mutant_aa)\n #mutator.apply(test_pose)\n\n if pose.is_fullatom() == False:\n IOError( 'mutate_residue only works with fullatom poses' )\n\n\n # create a standard scorefxn by default\n if not pack_scorefxn:\n pack_scorefxn = rosetta.core.scoring.get_score_function()\n\n task = pyrosetta.standard_packer_task(pose)\n\n # the Vector1 of booleans (a specific object) is needed for specifying the\n # mutation, this demonstrates another more direct method of setting\n # PackerTask options for design\n aa_bool = rosetta.utility.vector1_bool()\n # PyRosetta uses several ways of tracking amino acids (ResidueTypes)\n # the numbers 1-20 correspond individually to the 20 proteogenic amino acids\n # aa_from_oneletter returns the integer representation of an amino acid\n # from its one letter code\n # convert mutant_aa to its integer representation\n mutant_aa = rosetta.core.chemical.aa_from_oneletter_code(mutant_aa)\n\n # mutation is performed by using a PackerTask with only the mutant\n # amino acid available during design\n # to do this, construct a Vector1 of booleans indicating which amino acid\n # (by its numerical designation, see above) to allow\n for i in range(1, 21):\n # in Python, logical expression are evaluated with priority, thus the\n # line below appends to aa_bool the truth (True or False) of the\n # statement i == mutant_aa\n aa_bool.append( i == int(mutant_aa) )\n\n # modify the mutating residue's assignment in the PackerTask using the\n # Vector1 of booleans across the proteogenic amino acids\n task.nonconst_residue_task(mutant_position\n ).restrict_absent_canonical_aas(aa_bool)\n\n # prevent residues from packing by setting the per-residue \"options\" of\n # the PackerTask\n restrict_non_nbrs_from_repacking(pose, mutant_position, task, pack_radius)\n\n # apply the mutation and pack nearby residues\n #print task\n packer = rosetta.protocols.simple_moves.PackRotamersMover(pack_scorefxn, task)\n packer.apply(pose)", "async def async_remove_from_group(self, device):\n if device.entity_id in self._multiroom_group:\n self._multiroom_group.remove(device.entity_id)\n# await self.async_schedule_update_ha_state(True)\n\n if len(self._multiroom_group) <= 1:\n self._multiroom_group = []\n self._is_master = False\n self._slave_list = None\n\n for member in self._multiroom_group:\n for player in self.hass.data[DOMAIN].entities:\n if player.entity_id == member and player.entity_id != self.entity_id:\n await player.async_set_multiroom_group(self._multiroom_group)", "def do(self):\n\n self.logger.debug(\"Entering RemoveAllReceptors()\")\n\n device=self.target\n\n # For LMC0.6.0: use a helper instead of a command so that it doesn't care about the obsState\n device._remove_receptors_helper(device._receptors[:])\n\n message = \"CBFSubarray RemoveAllReceptors command completed OK\"\n self.logger.info(message)\n return (ResultCode.OK, message)", "def checkSwapsAndClean( self, # For comparison the NRG tags and defaults on March 2nd, 2011 are presented.\n energy_abs_criterium = 0.1, # _Stereo_assign_list.Crit_abs_e_diff 0.100\n energy_rel_criterium = 0.0, # _Stereo_assign_list.Crit_rel_e_diff 0.000\n swapPercentage = 75.0, # _Stereo_assign_list.Crit_mdls_favor_pct 75.0\n singleModelCutoff = 1.0, # _Stereo_assign_list.Crit_sing_mdl_viol 1.000 (inclusive)\n multiModelCutoff = 0.5, # _Stereo_assign_list.Crit_multi_mdl_viol 0.500 (inclusive)\n multiModelPercentageCutoff = 50.0, # _Stereo_assign_list.Crit_multi_mdl_pct 50.0 (inclusive)\n method = 'SUM_AVERAGING', # TODO: code others.\n outputFileName = 'stereo_assign.str', # will be written to current directory if not an absolute path. Ignored if output type is custom\n debug = False, # Print debug info?\n useLowestAromaticViolation = False, # Check for lowest violation for single HD1/2 HE1/2 distance constraint items\n outputType = 'NMRSTAR' # Will write out NMR-STAR file. Can also be 'custom', will then only print info\n ):\n if not self.distanceConstraintLists or not self.structureEnsemble or not self.structureEnsemble.models:\n print \"Error: no constraint lists or no structures available! Aborting...\"\n return True\n\n #\n # Initialize... see parameters above for swapPercentage\n #\n # Set a dictionary with violationCodes (what is a large violation?)\n #\n # smallFloat = 0.000000000001 # same for cutoff distance and fraction\n\n negativeFraction = -999.9 # fraction set to always happen as it's under cut off.\n\n self.violationCodes = {}\n self.violationCodes['xl'] = {'violation': singleModelCutoff, 'fraction': negativeFraction}\n self.violationCodes['l'] = {'violation': multiModelCutoff, 'fraction': multiModelPercentageCutoff/100.}\n self.violationCodes[self.VIOLATION_CODE_REPORTINGX_STR] = {'violation': singleModelCutoff, 'fraction': negativeFraction}\n self.violationCodes[self.VIOLATION_CODE_REPORTINGL_STR] = {'violation': multiModelCutoff, 'fraction': negativeFraction}\n self.violationCodes[self.VIOLATION_CODE_REPORTINGS_STR] = {'violation': 0.0, 'fraction': negativeFraction}\n\n\n # JFD changed indentation here so that below statement is always executed.\n # Order in which they are checked, if found will abort so xl violation is prioritized\n self.violationCodeList = ['xl','l',\n self.VIOLATION_CODE_REPORTINGS_STR,\n self.VIOLATION_CODE_REPORTINGL_STR,\n self.VIOLATION_CODE_REPORTINGX_STR ]\n for violationCode in self.violationCodeList:\n if not self.violationCodes.has_key(violationCode):\n print 'ERROR: expected violationCode [%s] in StereoAssignmentCleanup.violationCodes ' % violationCode\n return True\n# print 'DEBUG: self.violationCode[%s] : %s' % ( violationCode, str(self.violationCodes[violationCode]))\n\n #\n # Initialise some variables\n #\n\n self.useLowestAromaticViolation = useLowestAromaticViolation\n\n #\n # Set the factor for calculating violations\n #\n\n self.method = method\n if self.method == 'SUM_AVERAGING':\n self.factor = 1.0/6.0\n\n #\n # Initialise resonance and 'triplet' information\n #\n\n print\n print \"Checking swap status and cleaning prochiral groups in constraint lists...\"\n print\n\n (self.resAtomDict,self.resAtomSetDict) = createResonanceAtomAndAtomSetDict(self.distanceConstraintLists[0].parent.fixedResonances)\n if self.verbose:\n print \"Made resAtomDict, resAtomSetDict\"\n\n # resAtomSwapDict is list of atoms associated with a resonance, prochiralResonancesDict links to (chainCode,seqId,prochiralChemAtomSet) tuple\n (self.resAtomSwapDict,self.prochiralResonancesDict) = createResAtomSwapDict(self.resAtomSetDict,compareWithWattos=self.compareWithWattos)\n if self.verbose:\n print \"Made resAtomSwapDict,prochiralResonancesDict\"\n\n self.triplets = {}\n\n # Generate a list of triplets, only for ones that have resonances - rest is dealt with later on.\n resList = self.prochiralResonancesDict.keys()\n resList.sort()\n\n for res in resList:\n atomTuple = self.resAtomDict[res]\n prochiralKey = self.prochiralResonancesDict[res]\n\n if not self.triplets.has_key(prochiralKey):\n self.triplets[prochiralKey] = {}\n\n if not self.triplets[prochiralKey].has_key(atomTuple):\n self.triplets[prochiralKey][atomTuple] = []\n\n self.triplets[prochiralKey][atomTuple].append(res)\n\n #\n # Now prioritise the triplets...\n #\n\n prochiralPriority = {}\n self.prochiralConstraints = {}\n\n prochiralKeys = self.triplets.keys()\n prochiralKeys.sort()\n Triplet_count = len(prochiralKeys)\n if Triplet_count < 1:\n print \"WARNING: expected at least one triplet. Are there SSA distance restraints available?\"\n return\n invalidTripletCount = 0 # Like 1a24 1 185 LEU CD* that is invalid and can easily be recognized because it gets no involved restraints.\n for prochiralKey in prochiralKeys:\n #print prochiralKey\n atomTuples = self.triplets[prochiralKey].keys()\n atomTuples.sort()\n connectedConstraints = []\n unambiguousStereoConstraints = [] # These are constraints where there is no additional stereo ambiguity in the constraint items involving the prochiral\n allResonancesSet = set()\n\n otherItems = {}\n\n for atomTuple in atomTuples:\n #print \"\",atomTuple,triplets[prochiralKey][atomTuple]\n for resonance in self.triplets[prochiralKey][atomTuple]:\n allResonancesSet.add(resonance) # Note will not add the same item twice, so this is fine!\n for constraintItem in resonance.pairwiseConstraintItems:\n constraint = constraintItem.constraint\n if not otherItems.has_key(constraint):\n otherItems[constraint] = {}\n\n # Track other resonance in the item for filtering out fully ambiguous restraints\n orderedResonances = list(constraintItem.orderedResonances)\n otherResonance = orderedResonances[not orderedResonances.index(resonance)]\n if otherResonance not in otherItems[constraint]: # Use this now for future Python3 compatibility\n otherItems[constraint][otherResonance] = set()\n otherItems[constraint][otherResonance].add(resonance)\n\n if constraint.className in ('DistanceConstraint','HBondConstraint'):\n if constraint not in connectedConstraints:\n connectedConstraints.append(constraint)\n # So only 'unambiguous' if the 'other' resonance in the item has a resonance assignment, is assigned to one atomSet, and is prochiral (so could be deassigned)\n if otherResonance.resonanceSet and len(otherResonance.resonanceSet.atomSets) == 1 and otherResonance in self.prochiralResonancesDict:\n #if self.resAtomDict[resonance][0].residue.seqId == 48:\n # print self.resAtomDict[resonance], self.resAtomDict[otherResonance], otherResonance.resonanceSet.atomSets\n unambiguousStereoConstraints.append(constraint)\n else:\n pass\n# print 'DEBUG: ambi in %s:\\n %s' % (prochiralKey, ccpnDistanceRestraintToString(constraint)) # JFD doesn't know how to easily show atoms here.\n\n #\n # Clean up restraints so that constraints that are already fully ambiguous for the prochiral resonances (and they point to exactly the same resonances) are not included in the list to check..\n #\n\n if len(allResonancesSet) > 1:\n for constraint in otherItems:\n allMatch = True\n for otherResonance in otherItems[constraint]:\n if allResonancesSet != otherItems[constraint][otherResonance]:\n allMatch = False\n break\n\n if allMatch:\n if constraint in connectedConstraints:\n connectedConstraints.pop(connectedConstraints.index(constraint))\n if constraint in unambiguousStereoConstraints:\n unambiguousStereoConstraints.pop(unambiguousStereoConstraints.index(constraint))\n\n #\n # Set their priority\n #\n\n chainIdCcpn = prochiralKey[0]\n resIdCcpn = prochiralKey[1]\n chemAtomSetName = prochiralKey[2].name\n priorityKey = (len(connectedConstraints),len(unambiguousStereoConstraints),chainIdCcpn,resIdCcpn,chemAtomSetName)\n# print \"DEBUG: priorityKey:\", priorityKey\n if not prochiralPriority.has_key(priorityKey):\n prochiralPriority[priorityKey] = []\n\n prochiralPriority[priorityKey].append(prochiralKey)\n\n connectedConstraints.sort()\n self.prochiralConstraints[prochiralKey] = connectedConstraints\n\n \n #\n # Sort by priority and reorganise...\n #\n \n priorityKeys = prochiralPriority.keys()\n\n ## custom sort needs to return an int.\n def tripletComparator(x, y):\n if x[0] != y[0]:\n return x[0] - y[0] # ascending connectedConstraints\n# if not self.compareWithWattos:\n if x[1] != y[1]:\n return y[1] - x[1] # ascending unambiguousStereoConstraints\n if x[2] != y[2]:\n if x[2] < y[2]: # descending chainIdCcpn character\n return 1\n else:\n return -1\n resIdX = int(x[3])\n resIdY = int(y[3])\n if resIdX != resIdY:\n return resIdY - resIdX # descending resIdCcpn\n if x[4] != y[4]:\n if x[4] < y[4]: # descending chemAtomSetName\n return 1\n else:\n return -1\n return 0\n # end def\n\n priorityKeys.sort(cmp=tripletComparator)\n priorityKeys.reverse()\n\n if debug:\n for pk in priorityKeys:\n for pck in prochiralPriority[pk]:\n print \"pck: \", pck\n for at in self.triplets[pck].keys():\n print \" at, self.triplets[pck][at]: \",at, self.triplets[pck][at]\n print\n\n #\n # Now calculate the total 'energy' for each constraint, and track whether there are any serious violations\n #\n # The 'energy' is the sum of the squared violations (over all models and restraints).\n #\n\n self.createAtomCoordDict() # This is static, fine to keep like this!\n\n # Corresponds to the indexes of avgLocalSums\n\n self.swapTypes = [self.SWAP_TYPE_ORG,'swapped']\n self.constraintItemsReset = []\n\n #\n # First only do swapping...\n #\n\n swapInfo = {}\n orgMaxViolation = {}\n orgViolationSingleModelCriteriumCount = {}\n orgViolationMultiModelCriteriumCount = {}\n\n Swap_count = 0 # Using captial to distinguish from original FC and use exact same as Wattos.\n Deassign_count = 0\n Total_e_low_states = 0.0\n Total_e_high_states = 0.0\n tripletIdx = 0\n for priorityKey in priorityKeys:\n for prochiralKey in prochiralPriority[priorityKey]:\n tripletIdx += 1\n if debug:\n print prochiralKey\n\n (prochiralViolationInfo,allConstraintItems) = self.checkProchiralKeyConstraints(prochiralKey,debug)\n\n # Find max violation of original assignment\n orgMaxViolation[ prochiralKey] = 0.0\n orgViolationSingleModelCriteriumCount[ prochiralKey] = 0\n orgViolationMultiModelCriteriumCount[ prochiralKey] = 0\n violResultTupleList = prochiralViolationInfo[self.SWAP_TYPE_ORG][self.REQUIRES_DEASSIGNMENT_STR]\n for violationCode, violationList in violResultTupleList:\n if violationCode == self.VIOLATION_CODE_REPORTINGS_STR: # Includes any possible violation.\n orgMaxViolation[prochiralKey] = max( orgMaxViolation[prochiralKey], max(violationList)) # a list of violations\n elif violationCode == self.VIOLATION_CODE_REPORTINGX_STR:\n orgViolationSingleModelCriteriumCount[prochiralKey] += self.numModels - violationList.count(0.0)\n elif violationCode == self.VIOLATION_CODE_REPORTINGL_STR:\n orgViolationMultiModelCriteriumCount[prochiralKey] += self.numModels - violationList.count(0.0)\n # end for violation results\n\n #\n # Now check whether needs to be swapped\n #\n\n doSwapCount = 0.0\n totalEnergyHighState = 0.0 # actual high state will be determined after next loop. For now assume state 0 (unswapped)\n totalEnergyLowState = 0.0\n for modelIndex in range(self.numModels):\n energyHighState = prochiralViolationInfo[self.swapTypes[0]]['energy'][modelIndex]\n energyLowState = prochiralViolationInfo[self.swapTypes[1]]['energy'][modelIndex]\n\n# totalEnergyDiff = prochiralViolationInfo[self.swapTypes[0]]['energy'][modelIndex] - prochiralViolationInfo[self.swapTypes[1]]['energy'][modelIndex] # this is a bug? Needs to be cumulative over models.\n totalEnergyHighState += energyHighState\n totalEnergyLowState += energyLowState\n if energyHighState > energyLowState: # swapping needed because for this model the assumption on the unswapped being the highest energy state was correct\n doSwapCount += 1.0\n# print \"DEBUG: tripletIdx,modelIndex,energyHighState,energyLowState: %s\" % str((tripletIdx,modelIndex,energyHighState,energyLowState))\n # end for model loop\n swappedFavouredFraction = doSwapCount / self.numModels\n\n # Adapted from Wattos\n totalEnergyHighState /= self.numModels # For criteria it's important to use one that can be compared over entries. Ensemble size should not influence result.\n totalEnergyLowState /= self.numModels\n if totalEnergyHighState < totalEnergyLowState: # Get this right before deciding on swapping.\n tmpEnergy = totalEnergyHighState\n totalEnergyHighState = totalEnergyLowState\n totalEnergyLowState = tmpEnergy\n # end if\n energyDifference = totalEnergyHighState - totalEnergyLowState # guaranteed positive or zero\n totalEnergyDiff = energyDifference # FC name\n percentageModelFavoured = 100.0 * swappedFavouredFraction\n if totalEnergyHighState > 0.0: # Strange in Wattos code there's no safety on totalEnergyHighState being zero. Added here.\n energyDifferencePercentage = 100.0 * energyDifference / totalEnergyHighState\n else:\n energyDifferencePercentage = 0.0\n if energyDifference > 0.0:\n energyDifferencePercentage = 100.0\n # end if/else\n\n # If any criteria is not met then the assignment will be maintained.\n swapAssignment = False\n if totalEnergyHighState <= totalEnergyLowState:\n msg = \"criterium not met: totalEnergyHighState > totalEnergyLowState: %.3f and %.3f\" % ( totalEnergyHighState, totalEnergyLowState )\n elif percentageModelFavoured < swapPercentage:\n msg = \"criterium not met: percentageModelFavoured >= swapPercentage: %.1f %.1f\" % ( percentageModelFavoured, swapPercentage)\n elif energyDifference < energy_abs_criterium: # If diff is close to zero do nothing.\n msg = \"criterium not met: energyDifference >= energy_abs_criterium: %.3f and %.3f\" % ( energyDifference, energy_abs_criterium )\n elif energyDifferencePercentage < energy_rel_criterium:\n msg = \"criterium not met: energyDifferencePercentage >= energy_rel_criterium: %.1f %.1f\" % ( energyDifferencePercentage, energy_rel_criterium)\n else:\n swapAssignment = True\n # end if/else\n if not swapAssignment:\n print \"DEBUG maintaining tripletIdx %s because %s\" % ( tripletIdx, msg)\n else:\n print \"DEBUG swapping tripletIdx %s\" % tripletIdx\n # end if\n finalSwapType = self.swapTypes[0]\n favouredPercent = (1 - swappedFavouredFraction) * 100.0\n if swapAssignment:\n finalSwapType = self.swapTypes[1]\n favouredPercent = 100.0 - favouredPercent\n Swap_count += 1\n\n Total_e_low_states += totalEnergyLowState\n Total_e_high_states += totalEnergyHighState\n swapInfo[prochiralKey] = (swapAssignment,finalSwapType,energyDifferencePercentage,totalEnergyDiff, totalEnergyHighState, totalEnergyLowState,\n favouredPercent,swappedFavouredFraction,tripletIdx)\n\n\n #\n # Now make changes in CCPN... deassignment gets priority over swapping.\n #\n\n if swapAssignment:\n\n prochiralResonances = []\n for resList in self.triplets[prochiralKey].values():\n for resonance in resList:\n if not resonance in prochiralResonances:\n prochiralResonances.append(resonance)\n\n #\n # Switch the assignments...\n #\n \n if debug:\n print\n print \"SWAPPING\", prochiralResonances\n print\n\n if len(prochiralResonances) == 2:\n\n resSet1 = prochiralResonances[0].resonanceSet\n atomSet1 = resSet1.sortedAtomSets()[0]\n resSet2 = prochiralResonances[1].resonanceSet\n atomSet2 = resSet2.sortedAtomSets()[0]\n\n resSet1.addAtomSet(atomSet2)\n resSet1.removeAtomSet(atomSet1)\n resSet2.addAtomSet(atomSet1)\n resSet2.removeAtomSet(atomSet2)\n\n # Reset some dictionaries as well - note that resAtomSwapDict gives atoms of the *other* prochiral, so below is correct!\n atomTuple1 = tuple(atomSet1.sortedAtoms())\n atomTuple2 = tuple(atomSet2.sortedAtoms())\n\n self.resAtomSwapDict[prochiralResonances[0]] = atomTuple2\n self.resAtomSwapDict[prochiralResonances[1]] = atomTuple1\n\n # Reset triplets info\n self.triplets[prochiralKey] = {}\n self.triplets[prochiralKey][atomTuple1] = [prochiralResonances[1]]\n self.triplets[prochiralKey][atomTuple2] = [prochiralResonances[0]]\n\n elif len(prochiralResonances) == 1:\n resSet = prochiralResonances[0].resonanceSet\n atomSet1 = resSet.sortedAtomSets()[0]\n\n otherAtoms = self.resAtomSwapDict[prochiralResonances[0]]\n\n otherAtomSet = otherAtoms[0].findFirstFixedAtomSet(nmrConstraintStore=self.nmrConstraintStore)\n if not otherAtomSet:\n otherAtomSet = self.nmrConstraintStore.newFixedAtomSet(atoms = otherAtoms)\n \n if otherAtomSet != atomSet1:\n resSet.addAtomSet(otherAtomSet)\n atomSet1.removeResonanceSet(resSet)\n\n # Reset some dictionaries as well - note that resAtomSwapDict gives atoms of the *other* prochiral, so below is correct!\n atomTuple1 = tuple(atomSet1.sortedAtoms())\n \n else:\n # Same atomSet, possible for HD1/2 HE1/2 aromatics\n atomTuple1 = otherAtoms\n\n self.resAtomSwapDict[prochiralResonances[0]] = atomTuple1\n\n # Reset triplets info\n self.triplets[prochiralKey] = {}\n self.triplets[prochiralKey][atomTuple1] = []\n self.triplets[prochiralKey][otherAtomSet] = [prochiralResonances[0]]\n\n #\n # Then do deassigning. and track info for final printout...\n #\n\n finalList = {}\n\n self.swapTypes = [self.SWAP_TYPE_ORG] # Swapped not necessary any more\n priorityCount = 0\n\n for priorityKey in priorityKeys:\n priorityCount += 1\n for prochiralKey in prochiralPriority[priorityKey]:\n\n if debug:\n print prochiralKey\n\n (prochiralViolationInfo,allConstraintItems) = self.checkProchiralKeyConstraints(prochiralKey,debug=debug)\n\n #\n # Now check whether needs to be deassigned\n #\n\n finalSwapType = self.SWAP_TYPE_ORG\n\n numViol = {}\n deassign = False\n\n violResultTupleList = prochiralViolationInfo[finalSwapType][self.REQUIRES_DEASSIGNMENT_STR]\n for violationCodeToTest in self.violationCodeList:\n if violationCodeToTest in self.VIOLATION_CODE_REPORTING_LIST:\n continue\n if deassign:\n continue\n fractionByViolationCode = self.violationCodes[violationCodeToTest]['fraction']\n# numViol[violationCodeToTest] = 0\n for violationCode, violationList in violResultTupleList:\n if violationCodeToTest != violationCode:\n continue\n # Look for every violationCodeToTest (a large single model cutoff and a smaller multi model cutoff) if fraction is met.\n numViol = self.numModels - violationList.count(0.0)\n fractionFound = ( 1.0 * numViol ) / self.numModels\n if fractionFound >= fractionByViolationCode: # inclusive\n if debug:\n print \"DEBUG: DEASSIGNING BASED ON %s %s\" % (violationCode, str(prochiralViolationInfo[finalSwapType][self.REQUIRES_DEASSIGNMENT_STR]))\n deassign = True\n Deassign_count += 1\n break # no need to look at other potentially qualifying restraints\n # end for\n # end for violationCodeToTest\n\n # Retrieve the swap info...\n (swapAssignment,finalSwapType,energyDifferencePercentage,totalEnergyDiff, totalEnergyHighState, totalEnergyLowState,\n favouredPercent,swappedFavouredFraction,tripletIdx) = swapInfo[prochiralKey]\n\n chainCode = prochiralKey[0]\n seqId = prochiralKey[1]\n chemAtomSetName = prochiralKey[2].name\n ccpCode = prochiralKey[2].chemComp.ccpCode\n totalConstraints = priorityKey[0]\n ambiguousConstraints = priorityKey[1]\n\n maximum_violation = orgMaxViolation[ prochiralKey]\n violation_single_model_criterium_count = orgViolationSingleModelCriteriumCount[prochiralKey]\n violation_multi_model_criterium_count = orgViolationMultiModelCriteriumCount[ prochiralKey]\n\n # chainCode, seqId, ccpCode, chemAtomSetName, swapAssignment, favouredPercent, totalEnergyDiff, totalConstraints, unambiguousStereoConstraints, deassign, numVeryLargeViol, numLargeViol\n# dummyIdxForComparisonWithWattos = '1' # TODO: reset to sensible output. chainCode\n# mapChainId2Idx = { 'A': '1', 'B': '2', 'C': '3' }\n# if mapChainId2Idx.has_key(chainCode):\n# dummyIdxForComparisonWithWattos = mapChainId2Idx[chainCode]\n pseudoNameKey = '%s,%s' % (ccpCode.upper(), chemAtomSetName)\n iupacPseudo = chemAtomSetName\n if self.mapCcpn2IupacPseudo.has_key(pseudoNameKey):\n iupacPseudo = self.mapCcpn2IupacPseudo[ pseudoNameKey ]\n lineItem = \"%1s %4d %5s %-10s\" % ( chainCode, seqId, ccpCode.upper(), iupacPseudo )\n lineItem += \" %3d %-3s %7.1f %7.1f %6.1f\" % ( tripletIdx, booleanPythonToJavaStr(swapAssignment), favouredPercent, energyDifferencePercentage, totalEnergyDiff )\n lineItem += \" %6.1f %6.1f %3d\" % ( totalEnergyHighState, totalEnergyLowState, totalConstraints )\n lineItem += \" %3d\" % ( ambiguousConstraints )\n lineItem += \" %-5s %7.3f\" % ( booleanPythonToJavaStr(deassign), maximum_violation )\n lineItem += \" %3d %3d\" % ( violation_single_model_criterium_count, violation_multi_model_criterium_count)\n \n if totalConstraints:\n finalList[(chainCode,seqId,chemAtomSetName)] = lineItem\n else:\n print \"warning skipping triplet without restraints: \" + lineItem\n invalidTripletCount += 1\n # end if\n \n #\n # Now make changes in CCPN... deassignment gets priority over swapping.\n #\n\n\n if deassign:\n\n violationCode = 'xxx'\n fractionViolated = 0.00\n\n prochiralResonances = []\n for resList in self.triplets[prochiralKey].values():\n for resonance in resList:\n if not resonance in prochiralResonances:\n prochiralResonances.append(resonance)\n\n self.resetConstraintItems(allConstraintItems,prochiralResonances, prochiralKey,violationCode,fractionViolated,verbose=False)\n\n #\n # Print out for checking\n #\n \n if outputType == 'custom':\n \n print \"\"\"# Columns below (* means new):\n# 1 chainCode\n# 2 seqId\n# 3 ccpCode\n# 4 chemAtomSetName\n# 5 priority (1 was handled first)\n# 6 swapAssignment\n# 7 favouredPercent (so for the swapped state if swapped!)\n# 8 energyDifferencePercentage (*)\n# 9 totalEnergyDiff ensemble averaged\n# 10 totalEnergyHighState ensemble averaged (*)\n# 11 totalEnergyLowState ensemble averaged (*)\n# 12 totalConstraints\n# 13 ambiguousConstraints (optional)\n# 14 deassign\n# 15 maximumViolation (pre processing)\n# 16 numVeryLargeViol (post processing TODO: check)\n# 17 numLargeViol (post processing TODO: check)\n\"\"\"\n\n finalIds = finalList.keys()\n finalIds.sort()\n\n meat = ''\n\n for finalId in finalIds:\n if outputType == 'custom':\n print finalList[finalId]\n else:\n meat += str( finalList[finalId] ) + '\\n'\n\n \n #\n # NMR-STAR Wattos type output\n #\n # meat = \"\"\"\n # A 4 Met HB* 82 False 100.0 0.000 2 0 False 0.000 0 0\n # A 5 Arg HD* 81 False 100.0 0.000 4 2 False 0.000 0 0\n # A 6 Leu HB* 23 False 90.0 14.328 26 7 True 1.812 11 0\n #\n # 1 6 LEU QB 22 no 90.0 78.6 8.803 11.204 2.402 26 10 yes 2.200 11 11\n # 1 6 LEU QD 8 no 5.0 0.0 0.000 1.649 1.649 34 14 yes 1.651 19 22\n # 1 9 GLU QG 96 no 100.0 0.0 0.000 0.000 0.000 10 0 no 0.000 0 0\n #\"\"\"\n\n if outputType == 'NMRSTAR':\n\n # Let's do the same with a STAR table.\n if invalidTripletCount:\n print \"Warning: found triplets without restraints.\"\n validTripletCount = Triplet_count - invalidTripletCount\n if validTripletCount < 1:\n print \"Error: found no triplets with restraints.\"\n return True\n validTripletCount2 = len(finalIds) # double check.\n if validTripletCount != validTripletCount2:\n print \"Error: found number of triplets with restraints %d but number of report list %d\" % ( validTripletCount, validTripletCount2)\n# return True\n \n Swap_percentage = ( 100.0 * Swap_count ) / validTripletCount\n Deassign_percentage = ( 100.0 * Deassign_count ) / validTripletCount\n Model_count = self.numModels\n Crit_abs_e_diff = energy_abs_criterium\n Crit_rel_e_diff = energy_rel_criterium\n Crit_mdls_favor_pct = swapPercentage\n Crit_sing_mdl_viol = self.violationCodes['xl']['violation']\n Crit_multi_mdl_viol = self.violationCodes['l']['violation']\n Crit_multi_mdl_pct = self.violationCodes['l']['fraction'] * 100.0\n\n header = \"\"\"data_entry\n\n\n save_assign_stereo\n _Stereo_assign_list.Sf_category stereo_assignments\n _Stereo_assign_list.Triplet_count %s\n _Stereo_assign_list.Swap_count %s\n _Stereo_assign_list.Swap_percentage %.1f\n _Stereo_assign_list.Deassign_count %s\n _Stereo_assign_list.Deassign_percentage %.1f\n _Stereo_assign_list.Model_count %s\n _Stereo_assign_list.Total_e_low_states %.1f\n _Stereo_assign_list.Total_e_high_states %.1f\n _Stereo_assign_list.Crit_abs_e_diff %.1f\n _Stereo_assign_list.Crit_rel_e_diff %.1f\n _Stereo_assign_list.Crit_mdls_favor_pct %.1f\n _Stereo_assign_list.Crit_sing_mdl_viol %.3f\n _Stereo_assign_list.Crit_multi_mdl_viol %.3f\n _Stereo_assign_list.Crit_multi_mdl_pct %.1f\"\"\" % (\n validTripletCount,\n Swap_count,\n Swap_percentage,\n Deassign_count,\n Deassign_percentage,\n Model_count,\n Total_e_low_states,\n Total_e_high_states,\n Crit_abs_e_diff,\n Crit_rel_e_diff,\n Crit_mdls_favor_pct,\n Crit_sing_mdl_viol,\n Crit_multi_mdl_viol,\n Crit_multi_mdl_pct\n )\n\n\n explanations = \"\"\"\n _Stereo_assign_list.Details\n;\n\nDescription of the tags in this list:\n* 1 * NMR-STAR 3 administrative tag\n* 2 * NMR-STAR 3 administrative tag\n* 3 * NMR-STAR 3 administrative tag\n* 4 * Number of triplets (atom-group pair and pseudo)\n* 5 * Number of triplets that were swapped\n* 6 * Percentage of triplets that were swapped\n* 7 * Number of deassigned triplets\n* 8 * Percentage of deassigned triplets\n* 9 * Number of models in ensemble\n* 10 * Energy of the states with the lower energies summed for all triplets (Ang.**2) ensemble averaged\n* 11 * Energy of the states with the higher energies summed for all triplets (Ang.**2) ensemble averaged\n* 12 * Item 9-8\n* 13 * Criterium for swapping assignment on the absolute energy difference (Ang.**2)\n* 14 * Criterium for swapping assignment on the relative energy difference (Ang.**2)\n* 15 * Criterium for swapping assignment on the percentage of models favoring a swap\n* 16 * Criterium for deassignment on a single model violation (Ang.)\n* 17 * Criterium for deassignment on a multiple model violation (Ang.)\n* 18 * Criterium for deassignment on a percentage of models\n* 19 * this tag\n\nDescription of the tags in the table below:\n* 1 * Chain identifier (can be absent if none defined)\n* 2 * Residue number\n* 3 * Residue name\n* 4 * Name of pseudoatom representing the triplet\n* 5 * Ordinal number of assignment (1 is assigned first)\n* 6 * 'yes' if assignment state is swapped with respect to restraint file\n* 7 * Percentage of models in which the assignment with the lowest\n overall energy is favored\n* 8 * Percentage of difference between lowest and highest overall energy\n with respect to the highest overall energy\n* 9 * Difference between lowest and highest overall energy ensemble averaged\n* 10 * Energy of the highest overall energy state (Ang.**2) ensemble averaged\n* 11 * Energy of the lowest overall energy state (Ang.**2) ensemble averaged\n* 12 * Number of restraints involved with the triplet. The highest ranking\n triplet on this number, is assigned first (optional)\n* 13 * Number of restraints involved with the triplet that are ambiguous\n besides the ambiguity from this triplet\n* 14 * 'yes' if restraints included in this triplet are deassigned\n* 15 * Maximum unaveraged violation before deassignment (Ang.)\n* 16 * Number of violated restraints above threshold for a single model\n before deassignment (given by Single_mdl_crit_count)\n* 17 * Number of violated restraints above threshold for a multiple models\n before deassignment (given by Multi_mdl_crit_count)\n;\n\n\n loop_\n _Stereo_assign.Chain_ID\n _Stereo_assign.Comp_index_ID\n _Stereo_assign.Comp_ID\n _Stereo_assign.Pseudo_Atom_ID\n _Stereo_assign.Num\n _Stereo_assign.Swapped\n _Stereo_assign.Models_favoring_pct\n _Stereo_assign.Energy_difference_pct\n _Stereo_assign.Energy_difference\n _Stereo_assign.Energy_high_state\n _Stereo_assign.Energy_low_state\n _Stereo_assign.Constraint_count\n \"\"\"\n # if not self.compareWithWattos:\n explanations += \" _Stereo_assign.Constraint_ambi_count\\n\"\n # end if\n explanations += \"\"\" _Stereo_assign.Deassigned\n _Stereo_assign.Violation_max\n _Stereo_assign.Single_mdl_crit_count\n _Stereo_assign.Multi_mdl_crit_count\n\n\"\"\"\n\n footer = \"\"\" stop_\n\n save_\n\n \"\"\"\n\n\n star_text = header + explanations + meat + footer\n\n starFile = File()\n if starFile.read(text=star_text):\n print \"Error: reading STAR text by STAR api.\"\n return True\n if starFile.check_integrity():\n print \"Error: STAR text failed integrity check.\"\n return True\n starFile.filename = outputFileName\n if starFile.write():\n print \"Error: writing file %\" % outputFileName\n return True\n if not os.path.exists(outputFileName):\n print \"Error: failed to find STAR file %s\" % outputFileName\n return True\n# print \"Written meta data to STAR file: %s\" % outputFileName # already printed by write()\n \n \n self.storeToAppData( star_text )\n # end def", "def __circle_collision(self, circle):\n raise Exception('--- This methods have not been implemented yet! Use circle_collider instead ---')", "def MR_rate_clean(self, mech_rates):\n for rate_tuple in self['MR_rate']:\n \n if rate_tuple not in mech_rates.keys():\n self['MR_rate'].remove(rate_tuple)\n print (\"Removed \" + str(rate_tuple) + \" from MR_rate\")\n \n #check for rate to change in MR params\n for _rtc in self['rate_to_change']:\n rtc_tuple = r_tuple_from_r_name(mech_rates, _rtc)\n \n if rtc_tuple not in self['MR_avoid'] and not self['MR_avoid_preserve']:\n #this blanket hack will remove any special info in MR_avoid\n #flag can be used to make MR_avoid invulnerable\n \n self['MR_avoid'].append(rtc_tuple)\n print (\"Adding \"+str(rtc_tuple)+\" to MR_avoid (now: \"+ str(self['MR_avoid'])+\" )\\n\")\n \n #take the rate to change out of MR use \n if rtc_tuple in self['MR_rate']:\n self['MR_rate'].remove(rtc_tuple)", "def destroy(self):\r\n if self._group:\r\n if self._machine:\r\n self._group.unregisterContainer(self)\r\n self._machine.unregisterContainer(self)\r\n self._machine = None\r\n\r\n self._group = None\r\n\r\n super(Container, self).destroy()\r\n else:\r\n print('container.Container destroy() called multiple times...')", "def kill(self):\n for piece in self.board.pieces:\n piece.destroyed = True", "def collide(self, xvel, yvel,\n space_mask_right, space_mask_left, space_mask_up, space_mask_bottom,\n reverse_x=False):\n for game_object in pygame.sprite.spritecollide(self, game_objects, False,\n collided=pygame.sprite.collide_mask):\n if isinstance(self, Player): # Если экземпляр класса основной игрок\n if game_object.collision and game_object.collision_do_kill:\n if not self.damage_mode and not self.death_mode:\n self.lives -= 1\n if self.lives > 0:\n self.damage()\n else:\n self.death()\n self.visible_hearts()\n if isinstance(game_object, Heart): # Если игрок столкнулся с бонусной жизнью\n self.lives += 1\n game_object.kill()\n self.visible_hearts()\n if isinstance(game_object, CheckPoint): # Если игрок столкнулся с чек-поинтом\n camera.set_memory(0, 0)\n width, height = game_object.rect.size\n image = game_object.image\n for y in range(width):\n for x in range(height):\n pixel = image.get_at((x, y))\n if pixel.a != 0:\n image.set_at((x, y), (100, 100, 100, 200))\n game_object.mask = pygame.mask.Mask((width, height), fill=0)\n game_object.collision = False\n if isinstance(game_object, Key): # Если игрок столкнулся с ключом от двери\n game_object.kill()\n self.key = True\n self.visible_key()\n if isinstance(game_object, Door): # Если игрок столкнулся с дверью\n if self.key:\n self.finish = True\n if isinstance(game_object, Coin): # Если игрок столкнулся с монетой\n self.coins += 1\n game_object.kill()\n if isinstance(game_object, Crystal): # Если игрок столкнулся с кристаллом\n self.crystals += 1\n game_object.kill()\n if isinstance(game_object, ButtonJump): # Если игрок столкнулся с батутом\n if yvel:\n self.yvel = -JUMP_POWER * JUMP_BOOST\n return\n if isinstance(game_object, Stairs): # Если игрок столкнулся с лестницей\n keys_status = pygame.key.get_pressed()\n if keys_status[pygame.K_w]:\n self.yvel = -MOVE_SPEED\n elif keys_status[pygame.K_s]:\n self.yvel = MOVE_SPEED\n else:\n self.yvel = 0\n if pygame.key.get_mods() & pygame.KMOD_LSHIFT:\n self.yvel *= SPEED_UP_BOOST\n self.on_stairs = True\n if not game_object.collision_player: # Если у объекта отключена коллизия с игроком\n continue\n else: # Если экземпляр класса основной противник\n if not game_object.collision_enemy: # Если у объекта отключена коллизия с врагом\n continue\n\n if reverse_x and xvel != 0:\n self.xvel = -self.xvel\n if xvel > 0:\n self.rect.right = game_object.rect.left + space_mask_right - 1\n if xvel < 0:\n self.rect.left = game_object.rect.right - space_mask_left + 1\n if yvel > 0:\n self.rect.bottom = game_object.rect.top + space_mask_bottom - 1\n self.on_ground = True\n self.yvel = 0\n if yvel < 0:\n self.rect.top = game_object.rect.bottom - space_mask_up + 1\n self.yvel = 0", "def applyMorphologicalCleaning(self, image):", "def _delete_rights(self):\n for right in self.rights:\n right.delete()", "def _shipCollision(self):\r\n for bolt in self._bolts:\r\n if self._ship.collides(bolt):\r\n self._ship = None\r\n if not self._shipexplode is None:\r\n self._shipexplode.play()\r\n self._bolts.remove(bolt)\r\n self._lives -= 1\r\n return", "def test_group(self):\n # leave out particle 0\n group = hoomd.group.tags(1,2)\n\n # compute forces\n f = azplugins.restrain.plane(group=group, point=(0,0,0), normal=(1,0,0), k=2.0)\n hoomd.run(1)\n np.testing.assert_array_almost_equal(f.forces[0].force, ( 0.,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].force, ( 2.,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].force, ( 6.,0,0))\n self.assertAlmostEqual(f.forces[0].energy, 0.)\n self.assertAlmostEqual(f.forces[1].energy, 1.)\n self.assertAlmostEqual(f.forces[2].energy, 9.)\n np.testing.assert_array_almost_equal(f.forces[0].virial, (0,0,0,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].virial, (-2.,0,4.,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].virial, (12.,0,0,0,0,0))", "def unmask(self, near_x, near_y, radius=BRIGHTEN_RECT_TEMP, is_lighter=False):\n #self._actual_unmask(self.last_brighten, self.masked_image, radius=radius)\n \n brighten_rect = pygame.draw.circle(self.image, (255, 255,0), (near_x, near_y), radius/2)\n \n if is_lighter:\n brighten_rect = pygame.draw.circle(self.image, (50, 0, 0), (near_x, near_y), 40)\n brighten_rect = pygame.draw.circle(self.image, (255, 255,0), (near_x, near_y), 38)\n \n self.game.dirty_rects.append(brighten_rect)\n \n self.last_brighten = (near_x, near_y)", "def cb_receive_node_destroy(cls, session, node_id):\n # Remove item from collection of properties\n wm = bpy.context.window_manager\n index = 0\n for item in wm.verse_avatars:\n if item.node_id == node_id:\n wm.verse_avatars.remove(index)\n if wm.cur_verse_avatar_index >= index:\n wm.cur_verse_avatar_index -= 1\n break\n index += 1\n cls.__other_views.pop(node_id)\n # Force redraw of 3D view\n ui.update_all_views(('VIEW_3D',))\n return super(AvatarView, cls).cb_receive_node_destroy(session, node_id)", "def removeDoubleUnbondedAtoms (self):\r\n atomsToRemove = [] # Stores index of atoms we will need to remove\r\n \r\n # Go through each mol\r\n for i in range(len(self.mol)):\r\n # Atom is disconnected if number of unbonded spikes is equal to the number of spikes in the atom\r\n numUnbondedSpikes = 0\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.mol[i].spikeArray[j].bonded == False:\r\n # Spike not bonded so increment counter\r\n numUnbondedSpikes += 1\r\n # If atom disconnected then need to check to see if dangling nodes or tails are bonded\r\n if numUnbondedSpikes == len(self.mol[i].spikeArray):\r\n print (\"Atom: \" + str(self.mol[i].rbnNumber) + \" is being removed \\n\")\r\n anyBondedDanglingNodes = False\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.isUnbondedAtomConnected(self.mol[i].spikeArray[j]) == True:\r\n anyBondedDanglingNodes = True\r\n # If atom has connected dangling nodes then need to convert atom to metaAtom, add metaAtom to metaMolecule and\r\n # remove atom from ring\r\n if anyBondedDanglingNodes == True:\r\n print (\"A new metaAtom is being created \\n\")\r\n newMetaAtom = self.convertUnbondedAtomToMetaAtom(self.mol[i])\r\n self.metaMolecule.addMetaAtom(newMetaAtom)\r\n atomsToRemove.append(i)\r\n \r\n # Now need to remove atoms\r\n print (\"Length of ring before removal: \" + str(len(self.mol)) + \"\\n\")\r\n for i in range(len(atomsToRemove)):\r\n self.mol.pop(atomsToRemove[i])\r\n print (\"Length of ring after removal: \" + str(len(self.mol)) + \"\\n\")\r\n # Finally need to update metaMolecule with new mol \r\n self.metaMolecule.updateListMols(self)", "def test_add_new_surface_objects(self):\n\n # create object with ReactionSystem behavior\n class rsys:\n pass\n\n class item:\n pass\n\n T = item()\n P = item()\n T.value_si = 1000.0\n P.value_si = 101000.0\n rsys.T = T\n rsys.P = P\n procnum = 2\n\n cerm = CoreEdgeReactionModel()\n\n spcA = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs]\n\n rxns = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n rxns += list(itertools.chain.from_iterable(react([((spcs[0], spcs[1]), ['H_Abstraction'])], procnum)))\n\n for rxn in rxns:\n cerm.make_new_reaction(rxn)\n\n cerm.core.species = [spcA] + spcs\n\n corerxns = []\n edgerxns = []\n edgespcs = set()\n for rxn in rxns:\n if set(rxn.reactants + rxn.products) <= set(cerm.core.species):\n corerxns.append(rxn)\n else:\n edgespcs |= set(cerm.core.species) - set(rxn.reactants + rxn.products)\n edgerxns.append(rxn)\n\n cerm.edge.species += list(edgespcs)\n\n cerm.core.reactions = corerxns\n cerm.edge.reactions = edgerxns\n\n cerm.surface.species = []\n cerm.surface.reactions = []\n\n new_surface_reactions = [cerm.edge.reactions[0]]\n new_surface_species = []\n obj = new_surface_reactions\n\n cerm.add_new_surface_objects(obj, new_surface_species, new_surface_reactions, rsys)\n\n empty = set()\n\n self.assertEqual(cerm.new_surface_spcs_add, empty)\n self.assertEqual(cerm.new_surface_spcs_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_add, set([cerm.edge.reactions[0]]))", "def resetCoronalSegment(self):\r\n #research\r\n profprint()\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeGreen\")\r\n if sGreen == None :\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode3\")\r\n reformatLogic = slicer.vtkSlicerReformatLogic()\r\n #sGreen.SetSliceVisible(0)\r\n sGreen.SetOrientationToCoronal()\r\n #sw = slicer.app.layoutManager().sliceWidget(\"Green\")\r\n #sw.fitSliceToBackground()\r\n sGreen.Modified()", "def test_clashing_atoms():\n benzene_path = examples_paths()['benzene']\n toluene_path = examples_paths()['toluene']\n with mmtools.utils.temporary_directory() as tmp_dir:\n yaml_content = get_template_script(tmp_dir, keep_openeye=True)\n system_id = 'explicit-system'\n system_description = yaml_content['systems'][system_id]\n system_description['pack'] = True\n system_description['solvent'] = utils.CombinatorialLeaf(['vacuum', 'PME'])\n\n # Sanity check: at the beginning molecules clash\n toluene_pos = utils.get_oe_mol_positions(utils.load_oe_molecules(toluene_path, molecule_idx=0))\n benzene_pos = utils.get_oe_mol_positions(utils.load_oe_molecules(benzene_path, molecule_idx=0))\n assert pipeline.compute_min_dist(toluene_pos, benzene_pos) < pipeline.SetupDatabase.CLASH_THRESHOLD\n\n exp_builder = ExperimentBuilder(yaml_content)\n\n for sys_id in [system_id + '_vacuum', system_id + '_PME']:\n system_dir = os.path.dirname(\n exp_builder._db.get_system(sys_id)[0].position_path)\n\n # Get positions of molecules in the final system\n prmtop = openmm.app.AmberPrmtopFile(os.path.join(system_dir, 'complex.prmtop'))\n inpcrd = openmm.app.AmberInpcrdFile(os.path.join(system_dir, 'complex.inpcrd'))\n positions = inpcrd.getPositions(asNumpy=True).value_in_unit(unit.angstrom)\n topography = Topography(prmtop.topology, ligand_atoms='resname TOL')\n benzene_pos2 = positions.take(topography.receptor_atoms, axis=0)\n toluene_pos2 = positions.take(topography.ligand_atoms, axis=0)\n # atom_indices = pipeline.find_components(prmtop.createSystem(), prmtop.topology, 'resname TOL')\n # benzene_pos2 = positions.take(atom_indices['receptor'], axis=0)\n # toluene_pos2 = positions.take(atom_indices['ligand'], axis=0)\n\n # Test that clashes are resolved in the system\n min_dist, max_dist = pipeline.compute_min_max_dist(toluene_pos2, benzene_pos2)\n assert min_dist >= pipeline.SetupDatabase.CLASH_THRESHOLD\n\n # For solvent we check that molecule is within the box\n if sys_id == system_id + '_PME':\n assert max_dist <= exp_builder._db.solvents['PME']['clearance'].value_in_unit(unit.angstrom)", "def die(self):\n self.pjs.bombermen.remove(self)\n for block in self.physics.blocks[self.stype]:\n if block == self.rects[0]:\n self.physics.blocks[self.stype].remove(block)", "def free_curvature(self) -> None:\n self.n1.free = True\n self.n2.free = True", "def cleanup():\n for s in [missiles, explosions, bonus]:\n\n set_to_remove = set([])\n for m in s:\n if m.isDead:\n set_to_remove.add(m)\n\n s.difference_update(set_to_remove)", "def collision(self):\n raise NotImplementedError", "def RemoveInitiators(self, initiatorList):\n # Append the IQNs to the existing list\n full_iqn_list = self.initiators\n for iqn in initiatorList:\n if iqn.lower() in full_iqn_list:\n full_iqn_list.remove(iqn)\n else:\n mylog.debug(iqn + \" is already not in group \" + self.name)\n\n # Modify the VAG on the cluster\n params = {}\n params[\"volumeAccessGroupID\"] = self.ID\n params[\"initiators\"] = full_iqn_list\n libsf.CallApiMethod(self.mvip, self.username, self.password, \"ModifyVolumeAccessGroup\", params, ApiVersion=5.0)", "def reset_independent_structured_mask(self):\n assert self._scheduler.enable_structured_masking\n self._structured_mask_handler.update_independent_structured_mask()" ]
[ "0.6586952", "0.58681905", "0.5560465", "0.5431528", "0.5087718", "0.49389872", "0.49223432", "0.4907192", "0.48812112", "0.4859121", "0.48425356", "0.48171473", "0.48001352", "0.47328418", "0.46946502", "0.46811792", "0.46665424", "0.46157676", "0.4614911", "0.4604623", "0.45941934", "0.45920017", "0.45833287", "0.45755368", "0.4573333", "0.456913", "0.45406634", "0.45349336", "0.45329696", "0.45296168", "0.45226562", "0.4495121", "0.44930303", "0.44864413", "0.4479615", "0.4478997", "0.44666323", "0.44635606", "0.44634673", "0.44600865", "0.4456746", "0.44522804", "0.44460464", "0.4439358", "0.44309115", "0.44207045", "0.44188923", "0.44162977", "0.4399674", "0.4399244", "0.43964556", "0.43895632", "0.4378639", "0.43734765", "0.43705055", "0.43622583", "0.43488672", "0.43397182", "0.43354225", "0.43327802", "0.4326977", "0.4318558", "0.4312412", "0.43098536", "0.43055475", "0.43001786", "0.4299793", "0.4298964", "0.42981273", "0.42928502", "0.42885503", "0.42882985", "0.4283684", "0.42819902", "0.42796868", "0.42789078", "0.42688915", "0.42653537", "0.42612427", "0.42607468", "0.42603013", "0.4259189", "0.4256731", "0.42517054", "0.42511025", "0.42403892", "0.42370915", "0.4233817", "0.42218432", "0.4221701", "0.42092556", "0.4205076", "0.4204747", "0.42038238", "0.4199344", "0.4198795", "0.4198099", "0.41972733", "0.4196969", "0.4196882" ]
0.6826048
0
StereoGroup atoms are in the reaction, and the reaction creates the specified chirality at the stereo centers > remove the stereo center from > invalidate stereo group
def test_reaction_defines_stereo(self): products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@H](Cl)Br |o1:1|') self.assertEqual(products, 'F[C@@H](Cl)Br') products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@@H](Cl)Br |&1:1|') self.assertEqual(products, 'F[C@@H](Cl)Br') products = _reactAndSummarize('[C:1]>>[C@@:1]', 'FC(Cl)Br') self.assertEqual(products, 'F[C@@H](Cl)Br') # Remove group with defined stereo products = _reactAndSummarize('[C:1]F>>[C@@:1]F', 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|') self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |&1:3|') # Remove atoms with defined stereo from group products = _reactAndSummarize('[C:1]F>>[C@@:1]F', 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,3|') self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reaction_destroys_stereo(self):\n reaction = '[C@:1]>>[C:1]'\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'FC(Cl)Br')\n self.assertEqual(products, 'FC(Cl)Br')\n\n reaction = '[C@:1]F>>[C:1]F'\n # Reaction destroys stereo (but preserves unaffected group\n products = _reactAndSummarize(reaction,\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')\n # Reaction destroys stereo (but preserves the rest of the group\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)[C@@H](Cl)Br |&1:1,3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')", "def test_stereogroup_is_spectator_to_reaction(self):\n # 5a. Reaction preserves unrelated stereo\n products = _reactAndSummarize('[C@:1]F>>[C@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5b. Reaction ignores unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5c. Reaction inverts unrelated stereo'\n products = _reactAndSummarize('[C@:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5d. Reaction destroys unrelated stereo' 1:3|\n products = _reactAndSummarize('[C@:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |o1:3|')\n # 5e. Reaction assigns unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')", "def test_reaction_copies_stereogroup(self):\n # Stereogroup atoms are in the reaction with multiple copies in the product\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )\n\n # Stereogroup atoms are not in the reaction, but have multiple copies in the\n # product.\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )", "def test_reaction_splits_stereogroup(self):\n products = _reactAndSummarize('[C:1]OO[C:2]>>[C:2]O.O[C:1]',\n 'F[C@H](Cl)OO[C@@H](Cl)Br |o1:1,5|')\n # Two product sets, each with two mols:\n self.assertEqual(products.count('|o1:1|'), 4)", "def test_reaction_ignores_stereo(self):\n reaction = '[C:1]>>[C:1]'\n reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br']\n for reactant in reactants:\n products = _reactAndSummarize(reaction, reactant)\n self.assertEqual(products, reactant)", "def cleanUpRigPose(self):\n\n # show the proxy geo\n cmds.select(self.name + \"_mover_grp\", hi=True)\n allNodes = cmds.ls(sl=True)\n for node in allNodes:\n if node.find(\"_proxy_geo\") != -1:\n if cmds.nodeType(node) == \"mesh\":\n parent = cmds.listRelatives(node, parent=True)[0]\n cmds.lockNode(parent, lock=False)\n cmds.setAttr(parent + \".v\", lock=False)\n cmds.setAttr(parent + \".v\", 1)\n cmds.lockNode(parent, lock=True)\n\n # unlock mover group for this module and make invisible\n cmds.lockNode(self.name + \"_mover_grp\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=False)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", 0)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=True)\n cmds.lockNode(self.name + \"_mover_grp\", lock=True)\n\n # get the joints created by this module and remove the constraints\n joints = self.returnCreatedJoints\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n for joint in joints:\n if cmds.objExists(joint + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)\n\n else:\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointBaseName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointBaseName = jointBaseName.partition(nameData[1])[0]\n\n if cmds.objExists(self.name + \"_\" + jointBaseName + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)", "def test_reaction_preserves_stereo(self):\n reaction = '[C@:1]>>[C@:1]'\n reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br']\n for reactant in reactants:\n products = _reactAndSummarize(reaction, reactant)\n self.assertEqual(products, reactant)", "def replace(self):\n if self.removed:\n self.coordinates = [[(self.player * 15 - 15), 0], [(self.player * 15 - 15), 1],\n [(self.player * 15 - 15), 2], [(self.player * 15 - 15), 3]]\n for i in self.coordinates:\n self.collision_boxes.append(rect.Rect(i[0] * 64, i[1] * 64, 64, 64))\n self.removed=False", "def atomisticSphere (flag, filin, filout, max_distance = 15, analysis = 1, atom_central = \"mean_point\", debug = 1):\n \n list_atom_pocket = parsePDB.loadCoordSectionPDB(filin)\n dico_stock_count = tool.generateStructCompositionAtomistic (max_distance, 3)\n \n if atom_central == \"mean_point\" : \n central_point = generateMeansPointPocket (list_atom_pocket)\n # else append barycenter pocket calculated by RADI\n \n for atom in list_atom_pocket : \n distance = parsePDB.distanceTwoatoms(central_point, atom)\n # print distance\n element = atom[\"element\"]\n name_atom = atom[\"name\"]\n residue = tool.transformAA(atom[\"resName\"])\n \n for distance_key in dico_stock_count.keys() : \n if distance <= distance_key or distance > max_distance : \n dico_stock_count [distance_key] [\"atom\"] = dico_stock_count [distance_key] [\"atom\"] + 1\n if element == \"C\" : \n dico_stock_count [distance_key] [\"carbon\"] = dico_stock_count [distance_key] [\"carbon\"] + 1\n elif element == \"N\" : \n dico_stock_count [distance_key] [\"nitrogen\"] = dico_stock_count [distance_key] [\"nitrogen\"] + 1\n elif element == \"S\" : \n dico_stock_count [distance_key] [\"sulfur\"] = dico_stock_count [distance_key] [\"sulfur\"] + 1\n elif element == \"O\" : \n dico_stock_count [distance_key] [\"oxygen\"] = dico_stock_count [distance_key] [\"oxygen\"] + 1\n elif element == \"H\" : \n dico_stock_count [distance_key] [\"hydrogen\"] = dico_stock_count [distance_key] [\"hydrogen\"] + 1\n \n if residue in dico_Hacceptor.keys () : \n if name_atom in dico_Hacceptor[residue] : \n dico_stock_count [distance_key] [\"hbond_acceptor\"] = dico_stock_count [distance_key] [\"hbond_acceptor\"] + 1\n \n if residue in dico_atom_Car : \n if name_atom in dico_atom_Car[residue] : \n dico_stock_count [distance_key] [\"aromatic\"] = dico_stock_count [distance_key] [\"aromatic\"] + 1\n \n if residue in dico_atom_hydrophobic : \n if name_atom in dico_atom_hydrophobic[residue] : \n dico_stock_count [distance_key] [\"hydrophobic\"] = dico_stock_count [distance_key] [\"hydrophobic\"] + 1\n \n if residue in dico_atom_Carg : \n if name_atom in dico_atom_Carg[residue] : \n dico_stock_count [distance_key] [\"alcool\"] = dico_stock_count [distance_key] [\"alcool\"] + 1\n \n \n if residue in dico_Hdonor.keys () : \n if name_atom in dico_Hdonor[residue] : \n dico_stock_count [distance_key] [\"hbond_donor\"] = dico_stock_count [distance_key] [\"hbond_donor\"] + 1\n \n if name_atom == \"CA\" or name_atom == \"O\" or name_atom == \"C\" or name_atom == \"N\" or name_atom == \"H\" or name_atom == \"HA\" :\n dico_stock_count [distance_key] [\"main_chain\"] = dico_stock_count [distance_key] [\"main_chain\"] + 1\n else : \n dico_stock_count [distance_key] [\"side_chain\"] = dico_stock_count [distance_key] [\"side_chain\"] + 1\n \n for distance_key in dico_stock_count.keys () : \n nb_atom = float(dico_stock_count [distance_key] [\"atom\"])\n if nb_atom == 0 : \n filout.write (flag + \"_atom_\" + str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_side_chain_\"+ str(distance_key) + \"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_main_chain_\" + str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_sulfur_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_carbone_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_nitrogen_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_oxygen_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hydrogen_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hbond_acceptor_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hbond_donor_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_alcool_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hydrophobic_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_aromatic_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n \n else : \n filout.write (flag + \"_atom_\" + str(distance_key) +\"\\t\" + str(nb_atom) + \"\\n\")\n filout.write (flag + \"_side_chain_\"+ str(distance_key) + \"\\t\" + str (dico_stock_count [distance_key] [\"side_chain\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_main_chain_\" + str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"main_chain\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_sulfur_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"sulfur\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_carbone_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"carbon\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_nitrogen_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"nitrogen\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_oxygen_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"oxygen\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hydrogen_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hydrogen\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hbond_acceptor_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hbond_acceptor\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hbond_donor_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hbond_donor\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_alcool_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"alcool\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hydrophobic_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hydrophobic\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_aromatic_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"aromatic\"] / nb_atom) + \"\\n\")", "def pick(self, inv, pl, group):\r\n if self.rect.colliderect(pl):\r\n group.remove(self)\r\n if inv.count('key') == 0:\r\n inv += ['key']\r\n music_acceptor.activatedPortalSound()", "def test_check_for_existing_reaction_removes_duplicates_in_opposite_directions(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n s1 = Species().from_smiles(\"[H]\")\n s2 = Species().from_smiles(\"CC\")\n s3 = Species().from_smiles(\"[H][H]\")\n s4 = Species().from_smiles(\"C[CH2]\")\n s1.label = 'H'\n s2.label = 'CC'\n s3.label = 'HH'\n s4.label = 'C[CH2]'\n\n rxn_f = TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction')\n )\n\n rxn_r = TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction')\n )\n\n rxn_f.reactants.sort()\n rxn_f.products.sort()\n\n cerm.add_reaction_to_core(rxn_f)\n cerm.register_reaction(rxn_f)\n\n reactions = cerm.search_retrieve_reactions(rxn_r)\n self.assertEqual(1, len(reactions), 'cerm.search_retrieve_reactions could not identify reverse reaction')\n\n found, rxn = cerm.check_for_existing_reaction(rxn_r)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to identify existing reaction in the reverse direction')\n self.assertEqual(rxn, rxn_f)", "def _shipCollide(self):\n for s in range(self.getLengthAlien()):\n for t in range(len(self._aliens[0])):\n for b in self._bolts:\n if self._aliens[s][t] != None and + \\\n self._aliens[s][t].collides(b):\n self._aliens[s][t] = None\n self._bolts.remove(b)\n self._key = False", "def cleaveSurfAtom(entry,max_bonds=1,supercell=2,group_structs=True):\n \n \n struct = copy.deepcopy(entry[0])\n results = getStructureType(entry,supercell=supercell,returnSS=True)\n \n # If the crystal is 3D\n if results[0]=='conventional':\n struct = copy.deepcopy(entry[0])\n og_binary_matrix = getDistMat(struct,entry[1]-1)\n og_num_bonds = sum(sum(np.array(og_binary_matrix)))\n struct.make_supercell(supercell)\n binary_matrix= getDistMat(struct,entry[1]-1)\n bonds = []\n \n # Get list of bonded atoms\n for i in range(len(og_binary_matrix)):\n for pair in [(i,j) for j in range(i+1,len(og_binary_matrix)) \n if og_binary_matrix[i][j]==1]:\n bonds.append(pair)\n allCombos = []\n combNum = 0\n \n # Get list of all combinations of bonds\n for i in range(max_bonds+1):\n for com in list(itertools.combinations(bonds,i)):\n allCombos.append(com)\n combNum+=1\n\n combos = allCombos\n jjj=0\n all_structs = []\n \n # For each bond combination\n for combo in combos:\n broken=0\n jjj+=1\n modified_matrix = np.array(binary_matrix)\n for pair in combo:\n i,j = pair\n i=i*supercell**3\n j=j*supercell**3\n # Break bonds in the loop\n for shift in range(supercell**3):\n for shift2 in range(supercell**3):\n modified_matrix[i+shift][j+shift2]=0\n modified_matrix[j+shift][i+shift2]=0\n\n new_num_bonds=sum(sum(modified_matrix))\n broken=int(og_num_bonds-new_num_bonds) \n seed_index=0\n old_cluster_size=len(buildNetwork(binary_matrix,seed_index))/supercell**3\n cluster = buildNetwork(modified_matrix,seed_index)\n hetero=False\n # If the new set of atoms is not empty\n if cluster!=set():\n scale = len(cluster)/old_cluster_size\n compo = Composition.from_dict(Counter([struct[l].specie.name \n for l in list(cluster)]))\n if compo.reduced_formula != struct.composition.reduced_formula:\n # i.e. the cluster does not have the same composition\n # as the overall crystal; therefore there are other\n # clusters of varying composition.\n hetero = True\n motiif = getDim(scale,supercell)\n\n # If formula of new network matches the original cell\n if not hetero:\n if motiif=='layered':\n cluster_sites = [struct.sites[n] for n in cluster]\n all_structs.append(struct.from_sites(cluster_sites))\n\n if group_structs:\n matched = [x[0] for x in \n StructureMatcher(stol=1E-6,primitive_cell=False,\n scale=False).group_structures(all_structs)]\n else:\n matched=all_structs\n return(matched) \n\n else:\n print('Material is does not have a 3D motiif')\n print('Try increasing radii tolerance if appropriate')\n return([])", "def pseudopotentialise_molecule(self, sysargs=None, execute_deletion=True):\n\n # Find atoms to replace\n deletion_list = []\n if len(sysargs) > 2:\n if 'del' in sysargs:\n deletion_list = self.parse_coord_list(sysargs[4])\n replacement_list = self.parse_coord_list(sysargs[2])\n atoms_to_potentialise = list(item for item in self.coord_list if item[\"#\"] in replacement_list)\n else:\n atoms_to_potentialise = (item for item in self.coord_list if item[\"el\"] == 'c')\n deletion_list = (item for item in self.coord_list if item[\"el\"] == 'h')\n print('Pseudo-potentialising carbon atoms %s ...' % [atom['#'] for atom in atoms_to_potentialise])\n\n potential_coords_list = []\n\n for atom in atoms_to_potentialise:\n distanced_atom_list = self.order_atoms_by_distance_from(atom['#'])\n distanced_carbon_list = self.order_atoms_by_distance_from(atom['#'], element='c')\n\n if len(distanced_carbon_list) == 1:\n primary_vector = None\n for non_c_atom in distanced_atom_list[1:4]:\n if non_c_atom['el'] != 'h':\n primary_vector = self.vectorise_atom(non_c_atom['#']) - self.vectorise_atom(atom['#'])\n if primary_vector is None:\n primary_vector = self.vectorise_atom(distanced_atom_list[1]['#']) - self.vectorise_atom(atom['#'])\n else:\n primary_vector = self.vectorise_atom(distanced_carbon_list[1]['#']) - self.vectorise_atom(atom['#'])\n\n normal_vector = numpy.cross(\n self.vectorise_atom(distanced_atom_list[1]['#']) - self.vectorise_atom(atom['#']),\n self.vectorise_atom(distanced_atom_list[2]['#']) - self.vectorise_atom(atom['#'])\n )\n\n primary_potential_vector = self.lengtherise_vector(primary_vector, self.atom_potential_set_distance)\n potential_set_split_vector = self.lengtherise_vector(normal_vector, self.potential_set_split_distance)\n\n relative_potential_vectors = [\n primary_potential_vector + potential_set_split_vector,\n primary_potential_vector - potential_set_split_vector\n ]\n\n for potential_set in range(self.no_potential_sets_per_atom-1):\n\n pps_positive = numpy.dot(self.construct_euler_rodriguez_matrix(\n normal_vector,\n 2*numpy.pi/self.no_potential_sets_per_atom),\n relative_potential_vectors[-2],\n )\n pps_negative = numpy.dot(self.construct_euler_rodriguez_matrix(\n normal_vector,\n 2*numpy.pi/self.no_potential_sets_per_atom),\n relative_potential_vectors[-1]\n )\n\n relative_potential_vectors.append(pps_positive)\n relative_potential_vectors.append(pps_negative)\n\n if self.add_primary_vector_potentials_as_coords is False:\n del relative_potential_vectors[0]\n del relative_potential_vectors[0]\n\n # potential coords are still relative to their atom, now make them real.\n for vector in relative_potential_vectors:\n potential_coords_list.append(\n {'#': 0, 'el': self.sp2_pseudo_element, 'x': vector[0]+atom['x'], 'y': vector[1]+atom['y'], 'z': vector[2]+atom['z']},\n )\n\n # Now add potentials to coord list, after removing the 'real' hydrogen atoms.\n if execute_deletion is True:\n self.delete_specified_atoms(deletion_list)\n for potential_coord in potential_coords_list:\n self.write_coord(potential_coord, overwrite=False)", "def test_parameterize_mol_missing_stereo_rdkit(self, force_field):\n toolkit_registry = ToolkitRegistry(\n toolkit_precedence=[RDKitToolkitWrapper, AmberToolsToolkitWrapper]\n )\n\n molecule = Molecule.from_smiles(\"CC1CCC(=O)O1\", allow_undefined_stereo=True)\n topology = Topology.from_molecules([molecule])\n\n force_field.create_openmm_system(\n topology,\n toolkit_registry=toolkit_registry,\n )", "def update(self):\n #self.model.states[Polymerase].molecules\n\n DNA_obj = self.model.states[DNA].get_molecules(\"DNA\")[0]\n\n for i in range(1): #500\n DNA_obj.bind_polymerase()\n \n for i in range(50): #50\n DNA_obj.move_polymerase()\n #print(DNA_obj.poly_transcript)\n \n\n\n\n #print(self.test.poly_status)\n #print(DNA_obj.poly_pos)", "def mutate_residue(pose, mutant_position, mutant_aa,\n pack_radius = 0.0, pack_scorefxn = '' ):\n #### a MutateResidue Mover exists similar to this except it does not pack\n #### the area around the mutant residue (no pack_radius feature)\n #mutator = MutateResidue(mutant_position, mutant_aa)\n #mutator.apply(test_pose)\n\n if pose.is_fullatom() == False:\n IOError( 'mutate_residue only works with fullatom poses' )\n\n\n # create a standard scorefxn by default\n if not pack_scorefxn:\n pack_scorefxn = rosetta.core.scoring.get_score_function()\n\n task = pyrosetta.standard_packer_task(pose)\n\n # the Vector1 of booleans (a specific object) is needed for specifying the\n # mutation, this demonstrates another more direct method of setting\n # PackerTask options for design\n aa_bool = rosetta.utility.vector1_bool()\n # PyRosetta uses several ways of tracking amino acids (ResidueTypes)\n # the numbers 1-20 correspond individually to the 20 proteogenic amino acids\n # aa_from_oneletter returns the integer representation of an amino acid\n # from its one letter code\n # convert mutant_aa to its integer representation\n mutant_aa = rosetta.core.chemical.aa_from_oneletter_code(mutant_aa)\n\n # mutation is performed by using a PackerTask with only the mutant\n # amino acid available during design\n # to do this, construct a Vector1 of booleans indicating which amino acid\n # (by its numerical designation, see above) to allow\n for i in range(1, 21):\n # in Python, logical expression are evaluated with priority, thus the\n # line below appends to aa_bool the truth (True or False) of the\n # statement i == mutant_aa\n aa_bool.append( i == int(mutant_aa) )\n\n # modify the mutating residue's assignment in the PackerTask using the\n # Vector1 of booleans across the proteogenic amino acids\n task.nonconst_residue_task(mutant_position\n ).restrict_absent_canonical_aas(aa_bool)\n\n # prevent residues from packing by setting the per-residue \"options\" of\n # the PackerTask\n restrict_non_nbrs_from_repacking(pose, mutant_position, task, pack_radius)\n\n # apply the mutation and pack nearby residues\n #print task\n packer = rosetta.protocols.simple_moves.PackRotamersMover(pack_scorefxn, task)\n packer.apply(pose)", "def concerted_unimolecular_elimination(rct_zmas, prd_zmas):\n\n # Initialize info for the returns\n ret = None, None, None, None, None\n finish_build = True\n\n # Attempt to build appropriate z-matrix\n prd_zmas, prd_gras = shifted_standard_zmas_graphs(\n prd_zmas, remove_stereo=True)\n if len(rct_zmas) == 1:\n count = 1\n while True:\n rct_zmas, rct_gras = shifted_standard_zmas_graphs(\n rct_zmas, remove_stereo=True)\n init_zma, = rct_zmas\n\n tras, _, _ = automol.graph.reac.elimination(rct_gras, prd_gras)\n if tras is not None:\n if len(tras[0]) == 1:\n tras = [tras]\n min_dist = 100.\n frm_bnd_key = None\n for tra_i in tras:\n # Get the bond formation and breaking keys\n bnd_key, = automol.graph.trans.formed_bond_keys(tra_i)\n geo = automol.zmatrix.geometry(rct_zmas[0])\n dist = automol.geom.distance(geo, *list(bnd_key))\n if dist < min_dist:\n min_dist = dist\n frm_bnd_key = bnd_key\n tra = tra_i\n brk_keys = automol.graph.trans.broken_bond_keys(tra)\n brk_bnd_key1, brk_bnd_key2 = brk_keys\n init_zma, = rct_zmas\n\n\n # Get index for migrating atom (or bond-form atom in group)\n for bnd_key in (brk_bnd_key1, brk_bnd_key2):\n if bnd_key & frm_bnd_key:\n mig_key = next(iter(bnd_key & frm_bnd_key))\n for key in frm_bnd_key:\n if key != mig_key:\n a1_idx = key\n\n # Get chain for redefining the rc1_atm1_key z-matrix entries\n _, gras = shifted_standard_zmas_graphs(\n [init_zma], remove_stereo=True)\n gra = functools.reduce(automol.graph.union, gras)\n xgr1, = automol.graph.connected_components(gra)\n atm1_neighbors = _atom_neighbor_keys(xgr1)[a1_idx]\n for idx in atm1_neighbors:\n num_keys = len(_atom_neighbor_keys(xgr1)[idx])\n if idx != mig_key and num_keys > 1:\n a2_idx = idx\n atm2_neighbors = _atom_neighbor_keys(xgr1)[a2_idx]\n for idx in atm2_neighbors:\n if idx not in (mig_key, a1_idx):\n a3_idx = idx\n\n mig_redef_keys = (a1_idx, a2_idx, a3_idx)\n\n # determine if the zmatrix needs to be rebuilt by x2z\n # determines if the hydrogen atom is used to define other atoms\n rebuild = False\n if any(idx > mig_key for idx in mig_redef_keys):\n rebuild = True\n\n # rebuild zmat and go through while loop again if needed\n # shift order of cartesian coords & rerun x2z to get a new zmat\n # else go to next stage\n if rebuild:\n reord_zma = reorder_zmatrix_for_migration(\n init_zma, a1_idx, mig_key)\n rct_zmas = [reord_zma]\n count += 1\n if count == 3:\n finish_build = False\n break\n else:\n rct_zma = init_zma\n finish_build = True\n break\n else:\n finish_build = False\n\n # If z-mat with good order found, finish building it\n if finish_build:\n\n # determine the new coordinates\n rct_geo = automol.zmatrix.geometry(rct_zma)\n distance = automol.geom.distance(\n rct_geo, mig_key, a1_idx)\n angle = automol.geom.central_angle(\n rct_geo, mig_key, a1_idx, a2_idx)\n dihedral = automol.geom.dihedral_angle(\n rct_geo, mig_key, a1_idx, a2_idx, a3_idx)\n # Reset the keys for the migrating H atom\n new_idxs = (a1_idx, a2_idx, a3_idx)\n key_dct = {mig_key: new_idxs}\n ts_zma = automol.zmatrix.set_keys(rct_zma, key_dct)\n\n # Reset the values in the value dict\n mig_names = automol.zmatrix.name_matrix(ts_zma)[mig_key]\n ts_zma = automol.zmatrix.set_values(\n ts_zma, {mig_names[0]: distance,\n mig_names[1]: angle,\n mig_names[2]: dihedral}\n )\n\n # standardize the ts zmat and get tors and dist coords\n coo_dct = automol.zmatrix.coordinates(ts_zma)\n dist_coo_key = tuple(reversed(sorted(frm_bnd_key)))\n dist_name = next(coo_name for coo_name, coo_keys in coo_dct.items()\n if dist_coo_key in coo_keys)\n ts_name_dct = automol.zmatrix.standard_names(ts_zma)\n dist_name = ts_name_dct[dist_name]\n ts_zma = automol.zmatrix.standard_form(ts_zma)\n\n # Get the name of the coordinate of the other bond that is breaking\n brk_dist_name = None\n for brk_key in (brk_bnd_key1, brk_bnd_key2):\n if not brk_key.intersection(frm_bnd_key):\n brk_dist_name = automol.zmatrix.bond_key_from_idxs(\n ts_zma, brk_key)\n\n # Add second attempt to get brk_dist_name\n if brk_dist_name is None:\n brk_dist_names = [\n automol.zmatrix.bond_key_from_idxs(ts_zma, brk_bnd_key1),\n automol.zmatrix.bond_key_from_idxs(ts_zma, brk_bnd_key2)\n ]\n # Grab the name that is not None\n for name in brk_dist_names:\n if name is not None:\n brk_dist_name = name\n\n # get full set of potential torsional coordinates\n pot_tors_names = automol.zmatrix.torsion_coordinate_names(rct_zma)\n\n # remove the torsional coordinates that would break reaction coordinate\n gra = automol.zmatrix.graph(ts_zma, remove_stereo=True)\n coo_dct = automol.zmatrix.coordinates(ts_zma)\n tors_names = []\n for tors_name in pot_tors_names:\n axis = coo_dct[tors_name][0][1:3]\n grp1 = [axis[1]] + (\n list(automol.graph.branch_atom_keys(gra, axis[0], axis) -\n set(axis)))\n grp2 = [axis[0]] + (\n list(automol.graph.branch_atom_keys(gra, axis[1], axis) -\n set(axis)))\n if not ((mig_key in grp1 and a1_idx in grp2) or\n (mig_key in grp2 and a1_idx in grp1)):\n tors_names.append(tors_name)\n\n # Get reactants graph\n _, rct_gras = shifted_standard_zmas_graphs(\n [rct_zma], remove_stereo=True)\n rcts_gra = automol.graph.union_from_sequence(rct_gras)\n\n brk_bnd_key1 = shift_vals_from_dummy(brk_bnd_key1, ts_zma)\n brk_bnd_key2 = shift_vals_from_dummy(brk_bnd_key2, ts_zma)\n brk_bnd_keys = frozenset({brk_bnd_key1, brk_bnd_key2})\n frm_bnd_key = shift_vals_from_dummy(frm_bnd_key, ts_zma)\n\n ret = ts_zma, dist_name, brk_dist_name, brk_bnd_keys, frm_bnd_key, tors_names, rcts_gra\n\n return ret", "def release_atoms(self):\r\n\t\thole_size = self.box_size/2\r\n\t\thole_left = self.box_size/2 - hole_size/2\r\n\t\thole_right = self.box_size/2 + hole_size/2\r\n\r\n\t\tx_vals = (self.pos.x > hole_left) & (self.pos.x < hole_right)\r\n\t\ty_vals = (self.pos.y > hole_left) & (self.pos.y < hole_right)\r\n\t\tindices = (self.pos.z < 0) & x_vals & y_vals\r\n\r\n\t\tescaped_count = np.sum(indices)\r\n\t\tlost_momentum = self.atom_mass*np.sum(self.vel.z)\r\n\r\n\t\t# this would look bettes as self.vel.values[:, indices] = ... , but that is actualy noticeably slower\r\n\t\tself.pos.x[indices], self.pos.y[indices], self.pos.z[indices] = *generator.uniform(hole_left, hole_right, size=(2, escaped_count)), np.full(escaped_count, self.box_size)\r\n\t\tif self.change_velocities:\r\n\t\t\t# changing the velocity makes the temperature decrease over time\r\n\t\t\tself.vel.x[indices], self.vel.y[indices], self.vel.z[indices] = generator.uniform(0, self.box_size, size=(3, escaped_count))\r\n\r\n\t\treturn escaped_count, lost_momentum", "def build_reactive_complex(self, settings_manager: SettingsManager):\n import scine_database as db\n import scine_utilities as utils\n\n start_structure_ids = self._calculation.get_structures()\n start_structures = [db.Structure(sid, self._structures) for sid in start_structure_ids]\n self.save_initial_graphs_and_charges(settings_manager, start_structures)\n if len(start_structures) == 1:\n # For an intramolecular structure it is sufficient to provide one\n # structure that is both, start structure and reactive complex\n structure = start_structures[0]\n atoms = structure.get_atoms()\n self.random_displace_atoms(atoms, self.settings[self.rc_key][\"displacement\"]) # breaks symmetry\n return atoms\n\n if len(start_structures) == 2:\n # Intermolecular reactions reactions require in situ generation of the reactive complex\n s0 = start_structures[0]\n s1 = start_structures[1]\n\n # Get coordinates\n atoms1 = s0.get_atoms()\n atoms2 = s1.get_atoms()\n elements1 = atoms1.elements\n elements2 = atoms2.elements\n coordinates1 = atoms1.positions\n coordinates2 = atoms2.positions\n # Calculate reactive center mean position\n if self.exploration_key + \"_lhs_list\" in self.settings[self.exploration_key]:\n sites1 = self.settings[self.exploration_key][self.exploration_key + \"_lhs_list\"]\n sites2 = self.settings[self.exploration_key][self.exploration_key + \"_rhs_list\"]\n self.settings[self.exploration_key][self.exploration_key + \"_rhs_list\"] = list(\n idx + len(elements1) for idx in sites2\n )\n elif \"nt_associations\" in self.settings[self.exploration_key]:\n sites1 = []\n sites2 = []\n nAtoms1 = len(atoms1.elements)\n for i in range(0, len(self.settings[self.exploration_key][\"nt_associations\"]), 2):\n at1 = self.settings[self.exploration_key][\"nt_associations\"][i]\n at2 = self.settings[self.exploration_key][\"nt_associations\"][i + 1]\n if at1 >= nAtoms1 > at2:\n sites1.append(at2)\n sites2.append(at1 - nAtoms1)\n if at2 >= nAtoms1 > at1:\n sites1.append(at1)\n sites2.append(at2 - nAtoms1)\n else:\n self.raise_named_exception(\n \"Reactive complex can not be build: missing reactive atoms list(s).\"\n )\n reactive_center1 = np.mean(coordinates1[sites1], axis=0)\n reactive_center2 = np.mean(coordinates2[sites2], axis=0)\n # Place reactive center mean position into origin\n coord1 = coordinates1 - reactive_center1\n coord2 = coordinates2 - reactive_center2\n positions = self._orient_coordinates(coord1, coord2)\n atoms = utils.AtomCollection(elements1 + elements2, positions)\n self.random_displace_atoms(atoms, self.settings[self.rc_key][\"displacement\"]) # breaks symmetry\n return atoms\n\n # should not be reachable\n self.raise_named_exception(\n \"Reactive complexes built from more than 2 structures are not supported.\"\n )", "def remove_mass(self, *focal_elements):\n for focal in focal_elements:\n if focal[0] in self.focals:\n self.focals[focal[0]] -= focal[1]\n else:\n self.focals[focal[0]] = -focal[1]", "def remove_clashes(self):\n dihe_parameters = self.myGlycosylator.builder.Parameters.parameters['DIHEDRALS']\n vwd_parameters = self.myGlycosylator.builder.Parameters.parameters['NONBONDED']\n \n static_glycans = None\n for k in self.original_glycanMolecules:\n if k not in self.linked_glycanMolecules:\n if static_glycans is not None:\n static_glycans += self.original_glycanMolecules[k].atom_group\n else:\n static_glycans = self.original_glycanMolecules[k].atom_group.copy()\n \n environment = self.myGlycosylator.protein.copy() \n environment += static_glycans\n \n #Build topology\n self.myGlycosylator.build_glycan_topology(glycanMolecules = self.linked_glycanMolecules, build_all = False)\n sampler = glc.Sampler(self.linked_glycanMolecules.values(), environment, dihe_parameters, vwd_parameters)\n sampler.remove_clashes_GA()", "def stereoWarpK_noMotion_singleSided(curImageInfo, conversionParam, globalParam): \n h, w, u = curImageInfo.originalImageResized.shape # shape after resize\n K = 1\n N = h * w * K\n gr = np.mean(curImageInfo.originalImageResized, 2) # not 3 as it is zero based :3\n grs = cv2.GaussianBlur(gr, (5, 5), 1)\n \n # One heuristic for converting depth to disparity\n disparity0 = imnormalize(1/(1+imnormalize(curImageInfo.depthResized)))*conversionParam.maxDisp - conversionParam.maxDisp/2;\n \n if conversionParam.spatialSmoothnessSwitch == True:\n # Smoothing the depth spatially according to adjacent pixels by using Gx, Gy gradients\n # Vertical and Horizontal Edges\n dx = cv2.filter2D(grs, -1, np.transpose(np.array([[-1, 1, 0]])))\n dy = cv2.filter2D(grs, -1, np.array([[-1, 1, 0]]))\n \n W = ( imnormalize(disparity0) + sigmoid(np.sqrt(np.power(dx, 2) + np.power(dy, 2)), 0.01, 500) ) / 2 \n \n A = np.transpose(spdiags(np.transpose(W).flatten(), 0, N, N, \"csc\") \\\n + (conversionParam.spatialSmoothCoeff_x * globalParam.Gx.transpose() * globalParam.Gx) \\\n + (conversionParam.spatialSmoothCoeff_y * globalParam.Gy.transpose() * globalParam.Gy))\n \n b = np.transpose(W).flatten() * np.transpose(disparity0).flatten()\n \n [x, flag] = cg(A, b, np.transpose(disparity0).flatten(), 5e-1, 50)\n \n disparity = np.transpose(np.reshape(x, (w, h))) # remove (h, w, 1, K)\n else:\n disparity = disparity0\n \n curImageInfo.leftImage = curImageInfo.originalImage\n \n # The -ve sign to convert the white to black and black to white \n warpright = -disparity\n \n # only the warping interp2 is done on the original size image with no resizing to have good estimation\n warpright = cv2.resize(warpright, (curImageInfo.originalImage.shape[1], curImageInfo.originalImage.shape[0]), \n interpolation=cv2.INTER_LINEAR)\n \n curImageInfo.rightImage = (clip(warpImage_v2((curImageInfo.originalImage), (warpright), \n conversionParam.resizeFactor, globalParam.xx, globalParam.yy, globalParam.YY)))\n \n return disparity", "def test_add_new_surface_objects(self):\n\n # create object with ReactionSystem behavior\n class rsys:\n pass\n\n class item:\n pass\n\n T = item()\n P = item()\n T.value_si = 1000.0\n P.value_si = 101000.0\n rsys.T = T\n rsys.P = P\n procnum = 2\n\n cerm = CoreEdgeReactionModel()\n\n spcA = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs]\n\n rxns = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n rxns += list(itertools.chain.from_iterable(react([((spcs[0], spcs[1]), ['H_Abstraction'])], procnum)))\n\n for rxn in rxns:\n cerm.make_new_reaction(rxn)\n\n cerm.core.species = [spcA] + spcs\n\n corerxns = []\n edgerxns = []\n edgespcs = set()\n for rxn in rxns:\n if set(rxn.reactants + rxn.products) <= set(cerm.core.species):\n corerxns.append(rxn)\n else:\n edgespcs |= set(cerm.core.species) - set(rxn.reactants + rxn.products)\n edgerxns.append(rxn)\n\n cerm.edge.species += list(edgespcs)\n\n cerm.core.reactions = corerxns\n cerm.edge.reactions = edgerxns\n\n cerm.surface.species = []\n cerm.surface.reactions = []\n\n new_surface_reactions = [cerm.edge.reactions[0]]\n new_surface_species = []\n obj = new_surface_reactions\n\n cerm.add_new_surface_objects(obj, new_surface_species, new_surface_reactions, rsys)\n\n empty = set()\n\n self.assertEqual(cerm.new_surface_spcs_add, empty)\n self.assertEqual(cerm.new_surface_spcs_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_add, set([cerm.edge.reactions[0]]))", "async def nogroup(ctx):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n author = ctx.message.author\n roles = author.roles\n for role in roles:\n if role.name.lower() in changeable_groups:\n roles.remove(role)\n await amor_manager.replace_roles(author, *roles)\n await amor_manager.say('{0} removed from color groups'.format(author.name))", "def remove_from_group(self, org, contact, group):\n pass", "def test_clashing_atoms():\n benzene_path = examples_paths()['benzene']\n toluene_path = examples_paths()['toluene']\n with mmtools.utils.temporary_directory() as tmp_dir:\n yaml_content = get_template_script(tmp_dir, keep_openeye=True)\n system_id = 'explicit-system'\n system_description = yaml_content['systems'][system_id]\n system_description['pack'] = True\n system_description['solvent'] = utils.CombinatorialLeaf(['vacuum', 'PME'])\n\n # Sanity check: at the beginning molecules clash\n toluene_pos = utils.get_oe_mol_positions(utils.load_oe_molecules(toluene_path, molecule_idx=0))\n benzene_pos = utils.get_oe_mol_positions(utils.load_oe_molecules(benzene_path, molecule_idx=0))\n assert pipeline.compute_min_dist(toluene_pos, benzene_pos) < pipeline.SetupDatabase.CLASH_THRESHOLD\n\n exp_builder = ExperimentBuilder(yaml_content)\n\n for sys_id in [system_id + '_vacuum', system_id + '_PME']:\n system_dir = os.path.dirname(\n exp_builder._db.get_system(sys_id)[0].position_path)\n\n # Get positions of molecules in the final system\n prmtop = openmm.app.AmberPrmtopFile(os.path.join(system_dir, 'complex.prmtop'))\n inpcrd = openmm.app.AmberInpcrdFile(os.path.join(system_dir, 'complex.inpcrd'))\n positions = inpcrd.getPositions(asNumpy=True).value_in_unit(unit.angstrom)\n topography = Topography(prmtop.topology, ligand_atoms='resname TOL')\n benzene_pos2 = positions.take(topography.receptor_atoms, axis=0)\n toluene_pos2 = positions.take(topography.ligand_atoms, axis=0)\n # atom_indices = pipeline.find_components(prmtop.createSystem(), prmtop.topology, 'resname TOL')\n # benzene_pos2 = positions.take(atom_indices['receptor'], axis=0)\n # toluene_pos2 = positions.take(atom_indices['ligand'], axis=0)\n\n # Test that clashes are resolved in the system\n min_dist, max_dist = pipeline.compute_min_max_dist(toluene_pos2, benzene_pos2)\n assert min_dist >= pipeline.SetupDatabase.CLASH_THRESHOLD\n\n # For solvent we check that molecule is within the box\n if sys_id == system_id + '_PME':\n assert max_dist <= exp_builder._db.solvents['PME']['clearance'].value_in_unit(unit.angstrom)", "def terminate(self, atoms):\n\n c = list(atoms.keys())[0] # name of carbon atom being terminated\n c_ndx = list(atoms.values())[0] # serial index of carbon begin terminated\n\n chain = self.determine_chains(c)[0] # which chain carbon atom is on\n c_name = self.monomer.chains[chain][c]\n\n # to get indexing right\n c_ndx -= self.monomer.indices[chain][c_name]\n\n # types after reaction. Keeping this dictionary format so it integrates easily with xlinking algorithm\n types = {'chain': {self.monomer.chains[chain][c]: 'c3', self.monomer.dummy_connectivity[chain][c]: 'hc'}}\n\n for i in self.monomer.hydrogen_connectivity[c]: # turn already attached carbon(s) to c3\n types['chain'][i] = 'hc'\n\n # update types\n reacted_types = {'chain': {c_ndx + self.monomer.indices[chain][a]: types['chain'][a]\n for a in types['chain'].keys()}}\n\n # add dummy atom bond\n bonds = [[c_ndx + self.monomer.indices[chain]['C2'], c_ndx + self.monomer.indices[chain]['D2'], 'dummy']]\n\n radicals = []\n\n rm_improper = [[c_ndx + self.monomer.indices[chain][x] for x in self.monomer.impropers[chain][c_name]]]\n\n # define terminated atoms\n terminated = [c_ndx + self.monomer.indices[chain][c_name]]\n\n return reacted_types, bonds, radicals, rm_improper, terminated", "def pseudopotentialise_ethane_like_molecule(self, sysargs, execute_deletion=True):\n\n # Find atoms to replace\n deletion_list = []\n potential_coords_list = []\n if len(sysargs) > 2:\n if 'del' in sysargs:\n deletion_list = self.parse_coord_list(sysargs[4])\n replacement_list = self.parse_coord_list(sysargs[2])\n atoms_to_replace = list(item for item in self.coord_list if item[\"#\"] in replacement_list)\n else:\n atoms_to_replace = (item for item in self.coord_list if item[\"el\"] == 'c')\n deletion_list = (item for item in self.coord_list if item[\"el\"] == 'h')\n print('Pseudo-potentialising atoms %s ...' % [atom['#'] for atom in atoms_to_replace])\n\n # Option to place a potential on the *opposite* side of the carbon as well.\n dipolar_potentials = False\n if 'dipole' in sysargs:\n print('Dipolar potentialisation activated...')\n dipolar_potentials = True\n\n for atom in atoms_to_replace:\n # Find vector from nearest carbon.\n distanced_carbon_list = self.order_atoms_by_distance_from(atom['#'], element='c')\n\n vector_from_nearest_carbon = self.vectorise_atom(atom['#']) \\\n - self.vectorise_atom(distanced_carbon_list[0]['#'])\n vector_to_nearest_carbon = self.vectorise_atom(distanced_carbon_list[0]['#']) \\\n - self.vectorise_atom(atom['#'])\n\n # Lengtherise vector from carbon to give relative pp coordinates.\n vector_c_to_new_pp = self.lengtherise_vector(vector_from_nearest_carbon, self.atom_potential_set_distance)\n vector_c_to_new_dipole_pp = self.lengtherise_vector(vector_to_nearest_carbon, self.atom_potential_set_distance)\n\n # Add to carbon coords to get new pp coords.\n potential_coords_list.append(\n {'#': 0, 'el': self.sp3_pseudo_element,\n 'x': vector_c_to_new_pp[0] + distanced_carbon_list[0]['x'],\n 'y': vector_c_to_new_pp[1] + distanced_carbon_list[0]['y'],\n 'z': vector_c_to_new_pp[2] + distanced_carbon_list[0]['z']},\n )\n if dipolar_potentials is True:\n # Add to carbon coords to get new pp coords.\n potential_coords_list.append(\n {'#': 0, 'el': self.sp3_pseudo_element,\n 'x': vector_c_to_new_dipole_pp[0] + distanced_carbon_list[0]['x'],\n 'y': vector_c_to_new_dipole_pp[1] + distanced_carbon_list[0]['y'],\n 'z': vector_c_to_new_dipole_pp[2] + distanced_carbon_list[0]['z']},\n )\n\n # Now add potentials to coord list, after removing the 'real' atoms.\n if execute_deletion is True:\n self.delete_specified_atoms(deletion_list)\n for potential_coord in potential_coords_list:\n self.write_coord(potential_coord, overwrite=False)", "def destroy(self, cause:str, *, warp_core_breach:bool=False, self_destruct:bool=False):\n gd = self.game_data\n #gd.grid[self.sector_coords.y][self.sector_coords.x].removeShipFromSec(self)\n is_controllable = self.is_controllable\n #wc_value = self.sys_warp_core.get_effective_value\n\n if self.is_controllable:\n self.game_data.cause_of_damage = cause\n try:\n self.life_support.able_crew = 0\n self.life_support.injured_crew = 0\n except AttributeError:\n pass\n try:\n for k in self.torpedo_launcher.torps.keys():\n self.torpedo_launcher.torps[k] = 0\n self.torpedo_launcher.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.shield_generator.shields = 0\n self.shield_generator.shields_up = False\n self.shield_generator.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.polarized_hull.polarization_amount = 0\n self.polarized_hull.is_polarized = False\n self.polarized_hull.integrety = 0.0\n except AttributeError:\n pass\n self.power_generator.energy = 0\n self.power_generator.integrety = 0\n try:\n self.warp_drive.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.beam_array.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.cannons.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.impulse_engine.integrety = 0.0\n except AttributeError:\n pass\n self.sensors.integrety = 0.0\n try:\n self.cloak.cloak_status = CloakStatus.INACTIVE\n self.cloak.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.transporter.integrety = 0.0\n except AttributeError:\n pass\n\n if is_controllable:\n gd.engine.message_log.print_messages = False\n\n if warp_core_breach or self_destruct:\n \n self.warp_core_breach(self_destruct)\n self.hull = -self.ship_class.max_hull\n \n if self is self.game_data.selected_ship_planet_or_star:\n self.game_data.selected_ship_planet_or_star = None\n \n self.get_sub_sector.destroy_ship(self)", "def test_enumerating_stereo_options(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n\n # test undefined only\n mol = Molecule.from_smiles(\n \"ClC=CCl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert len(isomers) == 2\n for isomer in isomers:\n assert isomer.n_conformers == 0\n\n mol = Molecule.from_smiles(\n r\"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert isomers == []\n\n mol = Molecule.from_smiles(\n r\"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=False, rationalise=False\n )\n\n assert len(isomers) == 1\n\n # test max isomers\n mol = Molecule.from_smiles(\n \"BrC=C[C@H]1OC(C2)(F)C2(Cl)C1\",\n toolkit_registry=toolkit,\n allow_undefined_stereo=True,\n )\n isomers = mol.enumerate_stereoisomers(\n max_isomers=5,\n undefined_only=True,\n toolkit_registry=toolkit,\n rationalise=True,\n )\n\n assert len(isomers) <= 5\n for isomer in isomers:\n assert isomer.n_conformers == 1\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def test_enumerating_stereo_options(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n\n # test undefined only\n mol = Molecule.from_smiles(\n \"ClC=CCl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert len(isomers) == 2\n for isomer in isomers:\n assert isomer.n_conformers == 0\n\n mol = Molecule.from_smiles(\n \"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert isomers == []\n\n mol = Molecule.from_smiles(\n \"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=False, rationalise=False\n )\n\n assert len(isomers) == 1\n\n # test max isomers\n mol = Molecule.from_smiles(\n \"BrC=C[C@H]1OC(C2)(F)C2(Cl)C1\",\n toolkit_registry=toolkit,\n allow_undefined_stereo=True,\n )\n isomers = mol.enumerate_stereoisomers(\n max_isomers=5,\n undefined_only=True,\n toolkit_registry=toolkit,\n rationalise=True,\n )\n\n assert len(isomers) <= 5\n for isomer in isomers:\n assert isomer.n_conformers == 1\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def remove_alt_confs (hierarchy, always_keep_one_conformer=False) :\n for model in hierarchy.models() :\n for chain in model.chains() :\n for residue_group in chain.residue_groups() :\n atom_groups = residue_group.atom_groups()\n assert (len(atom_groups) > 0)\n if always_keep_one_conformer :\n if (len(atom_groups) == 1) and (atom_groups[0].altloc == '') :\n continue\n atom_groups_and_occupancies = []\n for atom_group in atom_groups :\n if (atom_group.altloc == '') :\n continue\n mean_occ = flex.mean(atom_group.atoms().extract_occ())\n atom_groups_and_occupancies.append((atom_group, mean_occ))\n atom_groups_and_occupancies.sort(lambda a,b: cmp(b[1], a[1]))\n for atom_group, occ in atom_groups_and_occupancies[1:] :\n residue_group.remove_atom_group(atom_group=atom_group)\n single_conf, occ = atom_groups_and_occupancies[0]\n single_conf.altloc = ''\n else :\n for atom_group in atom_groups :\n if (not atom_group.altloc in [\"\", \"A\"]) :\n residue_group.remove_atom_group(atom_group=atom_group)\n else :\n atom_group.altloc = \"\"\n if (len(residue_group.atom_groups()) == 0) :\n chain.remove_residue_group(residue_group=residue_group)\n if (len(chain.residue_groups()) == 0) :\n model.remove_chain(chain=chain)\n atoms = hierarchy.atoms()\n new_occ = flex.double(atoms.size(), 1.0)\n atoms.set_occ(new_occ)", "def _kill_group(self, x, y):\n if self[x, y] not in self.TURNS:\n raise BoardError('Can only kill black or white group')\n\n group = self.get_group(x, y)\n score = len(group)\n\n for x1, y1 in group:\n self[x1, y1] = self.EMPTY\n\n return score", "def group_group_collide(sprite_group, o_sprite_group):\n sprites = set(sprite_group)\n for sprite in sprites:\n if group_collide(o_sprite_group, sprite):\n sprite_group.remove(sprite)\n sprite.delete()\n return True\n return False", "def resetCoronalSegment(self):\r\n #research\r\n profprint()\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeGreen\")\r\n if sGreen == None :\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode3\")\r\n reformatLogic = slicer.vtkSlicerReformatLogic()\r\n #sGreen.SetSliceVisible(0)\r\n sGreen.SetOrientationToCoronal()\r\n #sw = slicer.app.layoutManager().sliceWidget(\"Green\")\r\n #sw.fitSliceToBackground()\r\n sGreen.Modified()", "def DeMorgan_equivalence(self, position_list=[]):\n\t\treturn self.__class__(_replace_match_at(self, position_list, [\n\t\t\t[ ((neg, A), disj, (neg, B)), (neg, (A, conj, B)) ],\n\t\t\t[ (neg, (A, conj, B)), ((neg, A), disj, (neg, B)) ],\n\t\t\t[ ((neg, A), conj, (neg, B)), (neg, (A, disj, B)) ],\n\t\t\t[ (neg, (A, disj, B)), ((neg, A), conj, (neg, B)) ]\n\t\t]))", "def cleanup(self):\n for residue in self.debumper.biomolecule.residues:\n if not isinstance(residue, aa.Amino):\n continue\n if residue.name == \"GLH\" or \"GLH\" in residue.patches:\n if residue.has_atom(\"HE1\") and residue.has_atom(\"HE2\"):\n residue.remove_atom(\"HE1\")\n elif residue.name == \"ASH\" or \"ASH\" in residue.patches:\n if residue.has_atom(\"HD1\") and residue.has_atom(\"HD2\"):\n residue.remove_atom(\"HD1\")", "def cycleManipulatorSpace():\n validateSelect()\n current_context = pm.currentCtx()\n context_title = pm.contextInfo(current_context, t=True)\n\n if 'Move' in context_title:\n context_mode = pm.manipMoveContext('Move', q=True, mode=True)\n if context_mode == 0:\n pm.manipMoveContext('Move', edit=True, mode=context_mode + 1)\n pm.displayInfo('In Parent space.')\n elif context_mode == 1:\n pm.manipMoveContext('Move', edit=True, mode=context_mode + 1)\n pm.displayInfo('In World space.')\n else:\n pm.manipMoveContext('Move', edit=True, mode=0)\n pm.displayInfo('In Object space.')\n\n elif 'Rotate' in context_title:\n context_mode = pm.manipRotateContext('Rotate', q=True, mode=True)\n if context_mode == 0:\n pm.manipRotateContext('Rotate', edit=True, mode=context_mode + 1)\n pm.displayInfo('In World space.')\n elif context_mode == 1:\n pm.manipRotateContext('Rotate', edit=True, mode=context_mode + 1)\n pm.displayInfo('In Gimbal space.')\n else:\n pm.manipRotateContext('Rotate', edit=True, mode=0)\n pm.displayInfo('In Object space.')\n\n elif 'Scale' in context_title:\n context_mode = pm.manipScaleContext('Scale', q=True, mode=True)\n if context_mode == 0:\n pm.manipScaleContext('Scale', edit=True, mode=context_mode + 1)\n pm.displayInfo('In Parent space.')\n elif context_mode == 1:\n pm.manipScaleContext('Scale', edit=True, mode=context_mode + 1)\n pm.displayInfo('In World space.')\n else:\n pm.manipScaleContext('Scale', edit=True, mode=0)\n pm.displayInfo('In Object space.')", "def test_reaction_inverts_stereo(self):\n reaction = '[C@:1]>>[C@@:1]'\n\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br |o1:1|')\n products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@H](Cl)Br |&1:1|')\n products = _reactAndSummarize(reaction, 'FC(Cl)Br')\n self.assertEqual(products, 'FC(Cl)Br')", "def test_parameterize_mol_missing_stereo_openeye(self, force_field):\n toolkit_registry = ToolkitRegistry(toolkit_precedence=[OpenEyeToolkitWrapper])\n\n molecule = Molecule.from_smiles(\"CC1CCC(=O)O1\", allow_undefined_stereo=True)\n topology = Topology.from_molecules([molecule])\n\n force_field.create_openmm_system(\n topology,\n toolkit_registry=toolkit_registry,\n )", "def sequence_tunable(\n mol,\n OP_REMOVE_ISOTOPE=True, OP_NEUTRALISE_CHARGE=True,\n OP_REMOVE_STEREO=False, OP_COMMUTE_INCHI=False,\n OP_KEEP_BIGGEST=True, OP_ADD_HYDROGEN=True,\n OP_KEKULIZE=True, OP_NEUTRALISE_CHARGE_LATE=True\n ):\n F = Filters()\n # Always perform the basics..\n Cleanup(mol)\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n AssignStereochemistry(mol, cleanIt=True, force=True, flagPossibleStereoCenters=True) # Fix bug TD201904.01\n # \n if OP_REMOVE_ISOTOPE:\n mol = F.remove_isotope(mol)\n if OP_NEUTRALISE_CHARGE:\n mol = F.neutralise_charge(mol)\n if any([OP_REMOVE_ISOTOPE, OP_REMOVE_ISOTOPE]):\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n # \n if OP_REMOVE_STEREO:\n mol = F.remove_stereo(mol)\n OP_COMMUTE_INCHI = True\n if OP_COMMUTE_INCHI:\n mol = F.commute_inchi(mol)\n if OP_KEEP_BIGGEST:\n mol = F.keep_biggest(mol)\n if any([OP_REMOVE_STEREO, OP_COMMUTE_INCHI, OP_KEEP_BIGGEST]):\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n #\n if OP_NEUTRALISE_CHARGE_LATE:\n mol = F.neutralise_charge(mol)\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n #\n if OP_ADD_HYDROGEN:\n mol = F.add_hydrogen(mol, addCoords=True)\n if OP_KEKULIZE:\n mol = F.kekulize(mol)\n #\n return mol", "async def async_remove_from_group(self, device):\n if device.entity_id in self._multiroom_group:\n self._multiroom_group.remove(device.entity_id)\n# await self.async_schedule_update_ha_state(True)\n\n if len(self._multiroom_group) <= 1:\n self._multiroom_group = []\n self._is_master = False\n self._slave_list = None\n\n for member in self._multiroom_group:\n for player in self.hass.data[DOMAIN].entities:\n if player.entity_id == member and player.entity_id != self.entity_id:\n await player.async_set_multiroom_group(self._multiroom_group)", "def group_collide(sprite_group, other_object):\n sprites = set(sprite_group)\n for sprite in sprites:\n if sprite.collide(other_object):\n sprite_group.remove(sprite)\n sprite.delete()\n return True\n return False", "def remove(self):\n\n self.last_move = \"\"\n self.collision_boxes = []\n self.removed=True", "def deleteModule(self):\n\n # delete the joint mover\n movers = self.returnJointMovers\n\n for moverGrp in movers:\n for mover in moverGrp:\n cmds.lockNode(mover, lock=False)\n\n cmds.delete(self.name + \"_mover_grp\")\n\n # remove the entry from the outliner\n index = self.rigUiInst.treeWidget.indexOfTopLevelItem(self.outlinerWidgets[self.name + \"_treeModule\"])\n self.rigUiInst.treeWidget.takeTopLevelItem(index)\n\n # remove the groupbox\n self.groupBox.setParent(None)\n\n # deal with mirror module\n networkNode = self.returnNetworkNode\n mirrorModule = cmds.getAttr(networkNode + \".mirrorModule\")\n if mirrorModule != None:\n if mirrorModule != \"None\":\n modules = utils.returnRigModules()\n for mod in modules:\n modName = cmds.getAttr(mod + \".moduleName\")\n if modName == mirrorModule:\n\n # set the mirrored version\n cmds.setAttr(mod + \".mirrorModule\", lock=False)\n cmds.setAttr(mod + \".mirrorModule\", \"None\", type=\"string\", lock=True)\n\n # get instance of mirror module's class\n modType = cmds.getAttr(mod + \".moduleType\")\n modName = cmds.getAttr(mod + \".moduleName\")\n module = __import__(\"RigModules.\" + modType, {}, {}, [modType])\n\n # get the class name from that module file (returns Modules.ART_Root.ART_Root for example)\n moduleClass = getattr(module, module.className)\n\n # find the instance of that module and call on the skeletonSettings_UI function\n moduleInst = moduleClass(self.rigUiInst, modName)\n\n # find the current groupBox for this module\n for i in range(self.rigUiInst.moduleSettingsLayout.count()):\n if type(self.rigUiInst.moduleSettingsLayout.itemAt(i).widget()) == QtWidgets.QGroupBox:\n if self.rigUiInst.moduleSettingsLayout.itemAt(i).widget().title() == modName:\n self.rigUiInst.moduleSettingsLayout.itemAt(i).widget().setParent(None)\n\n # relaunch the skeleton settings UI with new info\n moduleInst.skeletonSettings_UI(modName)\n\n # check for any attached modules\n attachedModules = self.checkForDependencies()\n elementList = []\n if len(attachedModules) > 0:\n\n for each in attachedModules:\n elementList.append([each[2], \" -> parent changed from: \", each[1], \" to: \", \"root\\n\"])\n cmds.parent(each[2] + \"_mover_grp\", \"root_mover\")\n cmds.setAttr(each[0] + \".parentModuleBone\", lock=False)\n cmds.setAttr(each[0] + \".parentModuleBone\", \"root\", type=\"string\", lock=True)\n each[3].currentParent.setText(\"root\")\n mover = \"root_mover\"\n\n # create the connection geo between the two\n childMover = utils.findOffsetMoverFromName(each[2])\n riggingUtils.createBoneConnection(mover, childMover, each[2])\n each[3].applyModuleChanges(each[3])\n cmds.select(clear=True)\n\n # remove the network node\n cmds.delete(networkNode)\n\n # delete scriptJob\n cmds.scriptJob(kill=self.scriptJob, force=True)\n self.updateBoneCount()\n self.rigUiInst.moduleInstances.remove(self)\n\n # warn user about changes\n if len(attachedModules) > 0:\n winParent = interfaceUtils.getMainWindow()\n win = interfaceUtils.DialogMessage(\"Attention!\",\n \"The following modules have had their parent changed\\\n due to the change in this module's structure:\",\n elementList, 5, winParent)\n win.show()", "def process(self, car):\n super(LeftIntersectionMessage, self).process(car)\n car.delete_car_at_intersection(self)", "def RemoveInitiators(self, initiatorList):\n # Append the IQNs to the existing list\n full_iqn_list = self.initiators\n for iqn in initiatorList:\n if iqn.lower() in full_iqn_list:\n full_iqn_list.remove(iqn)\n else:\n mylog.debug(iqn + \" is already not in group \" + self.name)\n\n # Modify the VAG on the cluster\n params = {}\n params[\"volumeAccessGroupID\"] = self.ID\n params[\"initiators\"] = full_iqn_list\n libsf.CallApiMethod(self.mvip, self.username, self.password, \"ModifyVolumeAccessGroup\", params, ApiVersion=5.0)", "def algo_mover(self):\n self.tick_count = 0 ## reset to start clock on delayed state change\n\n ### Move 1 ring ###\n de, aux, vers = self.instructions.pop(0)\n\n de = self.rods[de]\n aux = self.rods[aux]\n vers = self.rods[vers]\n\n ring = de.pop_ring()\n vers.add_ring(ring)\n\n ### Update the states after -- moved this to main update\n #self.state_updater()", "def update(self):\n super().update()\n bulletKeys=list(Bullet.registeredBullets.keys())\n for currKey in bulletKeys:\n currBullet=Bullet.registeredBullets[currKey]\n if (self.hasCollidedWith(currBullet)):\n currBullet.delete()\n currBullet.entityThatCreatedMe.score+=self.pointValue\n self.shatter()\n break", "def removeDoubleUnbondedAtoms (self):\r\n atomsToRemove = [] # Stores index of atoms we will need to remove\r\n \r\n # Go through each mol\r\n for i in range(len(self.mol)):\r\n # Atom is disconnected if number of unbonded spikes is equal to the number of spikes in the atom\r\n numUnbondedSpikes = 0\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.mol[i].spikeArray[j].bonded == False:\r\n # Spike not bonded so increment counter\r\n numUnbondedSpikes += 1\r\n # If atom disconnected then need to check to see if dangling nodes or tails are bonded\r\n if numUnbondedSpikes == len(self.mol[i].spikeArray):\r\n print (\"Atom: \" + str(self.mol[i].rbnNumber) + \" is being removed \\n\")\r\n anyBondedDanglingNodes = False\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.isUnbondedAtomConnected(self.mol[i].spikeArray[j]) == True:\r\n anyBondedDanglingNodes = True\r\n # If atom has connected dangling nodes then need to convert atom to metaAtom, add metaAtom to metaMolecule and\r\n # remove atom from ring\r\n if anyBondedDanglingNodes == True:\r\n print (\"A new metaAtom is being created \\n\")\r\n newMetaAtom = self.convertUnbondedAtomToMetaAtom(self.mol[i])\r\n self.metaMolecule.addMetaAtom(newMetaAtom)\r\n atomsToRemove.append(i)\r\n \r\n # Now need to remove atoms\r\n print (\"Length of ring before removal: \" + str(len(self.mol)) + \"\\n\")\r\n for i in range(len(atomsToRemove)):\r\n self.mol.pop(atomsToRemove[i])\r\n print (\"Length of ring after removal: \" + str(len(self.mol)) + \"\\n\")\r\n # Finally need to update metaMolecule with new mol \r\n self.metaMolecule.updateListMols(self)", "def testStereo(self):\r\n smi_and_cansmi = [\r\n ('OC(=O)[C@@H](CCC(N)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](CCC(N)=O)N', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](N)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@@H](N)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](N)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](N)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](C(O)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](C(O)=O)N', 'NC(=O)CC[C@H](N)C(=O)O')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)", "def remove(self, sgid, other, proto_spec, direction=\"in\"):\n # returns (removed_sgr_info, ...)\n # RevokeSecurityGroupIngress, RevokeSecurityGroupEgress\n if direction == \"in\":\n api = \"RevokeSecurityGroupIngress\"\n elif direction == \"out\":\n api = \"RevokeSecurityGroupEgress\"\n else:\n raise ValueError(\"direction must be one of ('in', 'out')\")\n return self.modify(api, sgid, other, proto_spec)", "def cull(self) -> None:\n for player in self.players:\n to_remove = [creature for creature in player.battle_line if creature.damage_taken >= creature.power()]\n for creature in to_remove:\n player.battle_line.remove(creature)\n to_remove.destroyed(self, creature)", "def intercept_e(self):\n for asteroid in range(len(self.asteroid_id_e) - 1, -1, -1):\n if self.distance(self.Main_Ship, self.asteroid_id_e[asteroid]) < (self.spaceship_radius + self.asteroid_r_e[asteroid]):\n self.del_asteroid_e(asteroid)\n self.lives -= 1", "def cleaveSurfBond(entry,max_bonds=1,supercell=2,group_structs=True,prec=1E-4):\n \n \n struct = copy.deepcopy(entry[0])\n results = getStructureType(entry,supercell=supercell,returnSS=True)\n \n # Proceed only if the structure is classified as periodic\n # in all directions\n if results[0]=='conventional':\n struct.make_supercell(supercell)\n binary_matrix= getDistMat(struct,entry[1]-1)\n og_num_bonds = sum(sum(np.array(binary_matrix)))/2\n \n # Get dictionary of directional bonds in the system, \n # and the associated atomic species\n bond_dir = getBondVectors(struct,entry[1]-1,prec)\n\n \n # Create the list of bonds to be broken\n all_structs=[]\n combos=[]\n for s1 in bond_dir:\n for s2 in bond_dir[s1]:\n for cleave in bond_dir[s1][s2]: \n combos.append(cleave[1])\n \n # Create pairings of bonds to be broken, up to \n # max_bonds number of bonds\n \n final_combos=[]\n for i in range(1,max_bonds+1):\n for mix in list(itertools.combinations(combos,max_bonds)):\n final_combos.append(mix)\n seed_index=0\n old_cluster_size=len(buildNetwork(binary_matrix,seed_index))/supercell**3\n for combo in final_combos:\n modified_matrix = np.array(binary_matrix)\n for sett in combo:\n for pair in sett:\n i,j = pair\n modified_matrix[i][j]=0\n modified_matrix[j][i]=0\n new_num_bonds=sum(sum(modified_matrix))/2\n \n # Number of bonds broken in the search. Not necessarily\n # the number of bonds broken to cleave the surface\n \n broken=int(og_num_bonds-new_num_bonds)\n \n cluster = buildNetwork(modified_matrix,seed_index)\n hetero=False\n if cluster!=set():\n scale = len(cluster)/old_cluster_size\n compo = Composition.from_dict(Counter([struct[l].specie.name \n for l in list(cluster)]))\n if compo.reduced_formula != struct.composition.reduced_formula:\n # i.e. the cluster does not have the same composition\n # as the overall crystal; therefore there are other\n # clusters of varying composition.\n hetero = True\n motiif = getDim(scale,supercell)\n\n if not hetero:\n if motiif=='layered':\n cluster_sites = [struct.sites[n] for n in cluster]\n all_structs.append(struct.from_sites(cluster_sites))\n\n if group_structs:\n matched = [x[0] for x in \n StructureMatcher(stol=1E-6,primitive_cell=False,\n scale=False).group_structures(all_structs)]\n else:\n matched=all_structs\n return(matched) \n\n\n else:\n print('Material is does not have a 3D motiif')\n print('Try increasing radii tolerance if appropriate')\n return([])", "def rmsd_cluster(input, ref, output, clusters):\n ifs = oemolistream()\n if not ifs.open(input):\n OEThrow.Fatal(\"Unable to open %s for reading\" % input)\n poses = list()\n mol = OEMol()\n while OEReadMolecule(ifs, mol):\n mol_copy = OEMol(mol)\n #print(dir(mol_copy))\n #print(mol_copy.NumConfs())\n for conf in mol_copy.GetConfs():\n poses.append(conf)\n ifs.close()\n print(\"%d poses read\" % len(poses))\n\n # Create a list of centroids, starting with first molecule.\n centroids = list()\n\n # Make first pose our first centroid.\n centroids.append(poses.pop(0))\n if int(clusters) < len(poses):\n print(\"Will return %s poses...\" % clusters)\n else:\n print(\"Will return %s poses...\" % (len(poses)+1))\n while len(centroids) < int(clusters) and len(poses)>0:\n print(len(centroids))\n # Compute distance from all poses to closest centroid.\n min_rmsd = numpy.zeros([len(poses)])\n for (pose_index, pose) in enumerate(poses):\n centroids_rmsds = [OERMSD(pose, centroid) for centroid in centroids]\n min_rmsd[pose_index] = min(centroids_rmsds)\n # Find pose that is farthest away from all current centroids.\n farthest_pose_index = min_rmsd.argmax()\n print(\"Farthest pose is %d at %f A away from centroids\" % (farthest_pose_index, min_rmsd[farthest_pose_index]))\n # Move farthest pose to centroids.\n centroids.append(poses.pop(farthest_pose_index))\n # Write out all centroids.\n ofs=oemolostream()\n if not ofs.open(output):\n OEThrow.Fatal(\"Unable to open %s for writing\" % itf.GetString(\"-o\"))\n for mol in centroids:\n #OEWritePDBFile(ofs, mol)\n OEWriteMolecule(ofs, mol)\n\n print(\"Done!\")\n\n return 0", "def test_enumerating_stereobonds(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = Molecule.from_smiles(\n \"ClC=CCl\", allow_undefined_stereo=True, toolkit_registry=toolkit\n )\n\n # use the default options\n isomers = mol.enumerate_stereoisomers()\n assert len(isomers) == 2\n\n assert mol not in isomers\n # make sure the input molecule is only different by bond stereo\n for ismol in isomers:\n assert (\n Molecule.are_isomorphic(\n mol,\n ismol,\n return_atom_map=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n assert mol.is_isomorphic_with(ismol) is False\n\n # make sure the isomers are different\n assert isomers[0].is_isomorphic_with(isomers[1]) is False\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def test_enumerating_stereobonds(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = Molecule.from_smiles(\n \"ClC=CCl\", allow_undefined_stereo=True, toolkit_registry=toolkit\n )\n\n # use the default options\n isomers = mol.enumerate_stereoisomers()\n assert len(isomers) == 2\n\n assert mol not in isomers\n # make sure the input molecule is only different by bond stereo\n for ismol in isomers:\n assert (\n Molecule.are_isomorphic(\n mol,\n ismol,\n return_atom_map=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n assert mol.is_isomorphic_with(ismol) is False\n\n # make sure the isomers are different\n assert isomers[0].is_isomorphic_with(isomers[1]) is False\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def stop_group(self, ch_group, args):\n for sound in ch_group.sounds: \n self._stop_sound(sound, args) \n # and apply recursively to subgroups\n for group in ch_group.sub_group_channels:\n self.stop_group(group, args)", "def enr_destroy(dims, excitations, *, dtype=None):\n from .states import enr_state_dictionaries\n dtype = dtype or settings.core[\"default_dtype\"] or _data.CSR\n\n nstates, state2idx, idx2state = enr_state_dictionaries(dims, excitations)\n\n a_ops = [scipy.sparse.lil_matrix((nstates, nstates), dtype=np.complex128)\n for _ in dims]\n\n for n1, state1 in enumerate(idx2state):\n for idx, s in enumerate(state1):\n # if s > 0, the annihilation operator of mode idx has a non-zero\n # entry with one less excitation in mode idx in the final state\n if s > 0:\n state2 = state1[:idx] + (s-1,) + state1[idx+1:]\n n2 = state2idx[state2]\n a_ops[idx][n2, n1] = np.sqrt(s)\n\n return [Qobj(a, dims=[dims, dims]).to(dtype) for a in a_ops]", "def test_check_for_existing_reaction_eliminates_identical_reactions(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'])\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'])\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to identify existing reaction')", "def _propose_atoms_in_order(self, atom_group):\n atom_torsions= []\n logp = []\n assert len(atom_group) == len(set(atom_group)), \"There are duplicate atom indices in the list of atom proposal indices\"\n while len(atom_group) > 0:\n #initialise an eligible_torsions_list\n eligible_torsions_list = list()\n\n for atom_index in atom_group:\n\n # Find the shortest path up to length four from the atom in question:\n shortest_paths = nx.algorithms.single_source_shortest_path(self._residue_graph, atom_index, cutoff=4)\n\n # Loop through the destination and path of each path and append to eligible_torsions_list\n # if destination has a position and path[1:3] is a subset of atoms with positions\n for destination, path in shortest_paths.items():\n\n # Check if the path is length 4 (a torsion) and that the destination has a position. Continue if not.\n if len(path) != 4 or destination not in self._atoms_with_positions_set:\n continue\n\n # If the last atom is in atoms with positions, check to see if the others are also.\n # If they are, append the torsion to the list of possible torsions to propose\n if set(path[1:3]).issubset(self._atoms_with_positions_set):\n eligible_torsions_list.append(path)\n\n assert len(eligible_torsions_list) != 0, \"There is a connectivity issue; there are no torsions from which to choose\"\n #now we have to randomly choose a single torsion\n ntorsions = len(eligible_torsions_list)\n random_torsion_index = np.random.choice(range(ntorsions))\n random_torsion = eligible_torsions_list[random_torsion_index]\n\n #append random torsion to the atom_torsions and remove source atom from the atom_group\n chosen_atom_index = random_torsion[0]\n first_old_atom_index = random_torsion[1]\n atom_torsions.append(random_torsion)\n atom_group.remove(chosen_atom_index)\n\n #add atom to atoms with positions and corresponding set\n self._atoms_with_positions_set.add(chosen_atom_index)\n\n #add a bond from the new to the previous torsion atom in the _reference_connectivity_graph\n self._reference_connectivity_graph.add_edge(chosen_atom_index, first_old_atom_index)\n\n #add the log probability of the choice to logp\n logp.append(np.log(1./ntorsions))\n\n # Ensure that logp is not ill-defined\n assert len(logp) == len(atom_torsions), \"There is a mismatch in the size of the atom torsion proposals and the associated logps\"\n\n return atom_torsions, logp", "def _remove_receptors_helper(self, argin):\n receptor_to_vcc = dict([*map(int, pair.split(\":\"))] for pair in\n self._proxy_cbf_controller.receptorToVcc)\n for receptorID in argin:\n if receptorID in self._receptors:\n vccID = receptor_to_vcc[receptorID]\n vccProxy = self._proxies_vcc[vccID - 1]\n\n # unsubscribe from events\n vccProxy.unsubscribe_event(self._events_state_change_vcc[vccID][0]) # state\n vccProxy.unsubscribe_event(self._events_state_change_vcc[vccID][1]) # healthState\n del self._events_state_change_vcc[vccID]\n del self._vcc_state[self._fqdn_vcc[vccID - 1]]\n del self._vcc_health_state[self._fqdn_vcc[vccID - 1]]\n\n # reset receptorID and subarrayMembership Vcc attribute:\n vccProxy.receptorID = 0\n vccProxy.subarrayMembership = 0\n\n self._receptors.remove(receptorID)\n self._proxies_assigned_vcc.remove(vccProxy)\n self._group_vcc.remove(self._fqdn_vcc[vccID - 1])\n else:\n log_msg = \"Receptor {} not assigned to subarray. Skipping.\".format(str(receptorID))\n self.logger.warn(log_msg)\n\n # transitions to EMPTY if not assigned any receptors\n if not self._receptors:\n self._update_obs_state(ObsState.EMPTY)", "def reapply(self, circ):\n self._modifiers(circ.sdg(self.qargs[0]))", "def remove():", "def stereo_callback(self, stereo_msg):\r\n start = time.time()\r\n self.cam0_curr_img_msg = stereo_msg.cam0_msg\r\n self.cam1_curr_img_msg = stereo_msg.cam1_msg\r\n\r\n # Build the image pyramids once since they're used at multiple places.\r\n self.create_image_pyramids()\r\n\r\n # Detect features in the first frame.\r\n if self.is_first_img:\r\n if not self.config.load_features_flag:\r\n self.initialize_first_frame()\r\n self.is_first_img = False\r\n # Draw results.\r\n # self.draw_features_stereo()\r\n else:\r\n if not self.config.load_features_flag:\r\n # Track the feature in the previous image.\r\n t = time.time()\r\n self.track_features()\r\n print('___track_features:', time.time() - t)\r\n t = time.time()\r\n\r\n # Add new features into the current image.\r\n self.add_new_features()\r\n print('___add_new_features:', time.time() - t)\r\n t = time.time()\r\n self.prune_features()\r\n print('___prune_features:', time.time() - t)\r\n t = time.time()\r\n # Draw results.\r\n # self.draw_features_stereo()\r\n print('___draw_features_stereo:', time.time() - t)\r\n t = time.time()\r\n\r\n print('===image process elapsed:', time.time() - start, f'({stereo_msg.timestamp})')\r\n\r\n if not self.config.load_features_flag:\r\n try:\r\n self.save_features() \r\n return self.publish()\r\n finally:\r\n self.cam0_prev_img_msg = self.cam0_curr_img_msg\r\n self.prev_features = self.curr_features\r\n self.prev_cam0_pyramid = self.curr_cam0_pyramid\r\n\r\n # Initialize the current features to empty vectors.\r\n self.curr_features = [[] for _ in range(self.config.grid_num)]\r\n else:\r\n self.load_features()\r\n return self.publish()", "def remove_as_subgroup(self, other_groups):\r\n symbols_to_exclude = reduce(lambda alphabet, cell: alphabet.union(cell.get_possible_symbols()),\r\n self.cells, set())\r\n my_cells = set(self.cells)\r\n\r\n for group in other_groups:\r\n if my_cells.issubset(group.cells) and self is not group:\r\n # Remove my cells from the other group\r\n for cell in self.cells:\r\n cell.remove_group(group)\r\n group.cells.remove(cell)\r\n\r\n # Update the alphabets in the other group\r\n for cell in group.cells:\r\n cell.remove_possible_symbols(symbols_to_exclude)", "def radical_c2(self, atoms):\n\n c1, c2 = atoms.keys()\n c1_ndx, c2_ndx = atoms.values()\n\n chain1, chain2 = self.determine_chains([c1, c2])\n\n # to get indexing right\n c1_ndx -= self.monomer.indices[chain1]['C1']\n c2_ndx -= self.monomer.indices[chain2]['C2']\n\n # types after reaction\n types = {'chain1': {'C1': 'c3', 'C2': 'c2', 'C3': 'c2', 'C4': 'c2', 'H1': 'hc', 'H2': 'hc', 'H3': 'ha',\n 'H4': 'ha', 'H5': 'ha'}, # chain1 contains c1\n 'chain2': {'C1': 'c3', 'C2': 'c3', 'C3': 'c2', 'C4': 'c2', 'H1': 'hc', 'H2': 'hc', 'H3': 'hc',\n 'H4': 'ha', 'H5': 'ha'}} # chain2 contains c2 radical\n\n # update types\n reacted_types = {'chain1': {c1_ndx + self.monomer.indices[chain1][a]: types['chain1'][a]\n for a in types['chain1'].keys()},\n 'chain2': {c2_ndx + self.monomer.indices[chain2][a]: types['chain2'][a]\n for a in types['chain2'].keys()}}\n\n # new bonds\n bonds = [[c1_ndx + self.monomer.indices[chain1]['C1'], c2_ndx + self.monomer.indices[chain2]['C2'], 'carbon']]\n\n # no dummy bonds to add\n\n # define indices of left-over radicals\n radicals = [c1_ndx + self.monomer.indices[chain1]['C2']]\n\n chain1_impropers = ['C1'] # [1]\n chain2_impropers = ['C2'] # [2]\n rm_improper = []\n for c in chain1_impropers:\n rm_improper.append([c1_ndx + self.monomer.indices[chain1][x] for x in self.monomer.impropers[chain1][c]])\n for c in chain2_impropers:\n rm_improper.append([c2_ndx + self.monomer.indices[chain2][x] for x in self.monomer.impropers[chain2][c]])\n\n # define terminated atoms\n terminated = [c1_ndx + self.monomer.indices[chain1]['C1'], c2_ndx + self.monomer.indices[chain2]['C2']]\n\n return reacted_types, bonds, radicals, rm_improper, terminated", "def test_group(self):\n # leave out particle 0\n group = hoomd.group.tags(1,2)\n\n # compute forces\n f = azplugins.restrain.plane(group=group, point=(0,0,0), normal=(1,0,0), k=2.0)\n hoomd.run(1)\n np.testing.assert_array_almost_equal(f.forces[0].force, ( 0.,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].force, ( 2.,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].force, ( 6.,0,0))\n self.assertAlmostEqual(f.forces[0].energy, 0.)\n self.assertAlmostEqual(f.forces[1].energy, 1.)\n self.assertAlmostEqual(f.forces[2].energy, 9.)\n np.testing.assert_array_almost_equal(f.forces[0].virial, (0,0,0,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].virial, (-2.,0,4.,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].virial, (12.,0,0,0,0,0))", "def removePick(self):\n self.pnt = None\n vtkRenWin.delMarker(self.renWin)", "def conclude_hand(self):\n for position in self.positions.keys():\n if position not in self.cards:\n self.cards[position] = (Card(), Card())", "def test_strip_atom_stereochemistry(self):\n mol = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n nitrogen_idx = [\n atom.molecule_atom_index for atom in mol.atoms if atom.symbol == \"N\"\n ][0]\n\n # TODO: This fails with RDKitToolkitWrapper because it perceives\n # the stereochemistry of this nitrogen as None\n assert mol.atoms[nitrogen_idx].stereochemistry == \"S\"\n mol.strip_atom_stereochemistry(smarts=\"[N+0X3:1](-[*])(-[*])(-[*])\")\n assert mol.atoms[nitrogen_idx].stereochemistry is None\n\n mol = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n assert mol.atoms[nitrogen_idx].stereochemistry == \"S\"\n mol.strip_atom_stereochemistry(smarts=\"[N+0X3:1](-[*])(-[*])(-[*])\")\n assert mol.atoms[nitrogen_idx].stereochemistry is None", "def set_original_planes(self, display_opt):\n\n # get 4-chamber view\n four_ch_view_plane_normal = self.find_4ch_view(display_opt)\n\n # set rodriguez rotation around midline (apex to C)\n axis_of_rot = np.array(self.epi_apex_node - self.C)\n self.axis_of_rot_normalized = axis_of_rot/np.linalg.norm(axis_of_rot)\n\n # get 2-chamber view (90-counterclock rotation from 4ch)\n new_P = my_rodriguez_rotation(self.plane_pts, self.axis_of_rot_normalized,\n math.radians(self.orig_view_angles[1])) # rodriguez rotation around midline\n two_ch_view_plane_normal = find_plane_eq(new_P[0, :], new_P[1, :], new_P[2, :])\n\n # get 3-chamber view (additional 30-60 counterclock rotation from 3ch)\n new_P = my_rodriguez_rotation(self.plane_pts, self.axis_of_rot_normalized, math.radians(self.orig_view_angles[2]))\n three_ch_view_plane_normal = find_plane_eq(new_P[0, :], new_P[1, :], new_P[2, :])\n\n if display_opt:\n _ = self.mesh_slicer(four_ch_view_plane_normal, 'mesh')\n _ = self.mesh_slicer(two_ch_view_plane_normal, 'mesh')\n _ = self.mesh_slicer(three_ch_view_plane_normal, 'mesh')\n\n self.original_planes = np.vstack((four_ch_view_plane_normal,\n two_ch_view_plane_normal,\n three_ch_view_plane_normal))", "def automerge_clusters(self):\n all_clusters = self.get_clusters().copy()\n\n if not self._single: # if not in single mode mode\n # initialize the variable to check if some change has happened \n changed = False\n for cl_1 in all_clusters: # cycle over clusters\n c_c1 = all_clusters[cl_1]\n for cl_2 in all_clusters: # inner cycle over clusters\n c_c2 = all_clusters[cl_2]\n # if two clusters have the same speaker and have different \n # cluster identifiers\n if cl_1 != cl_2 and c_c1.get_speaker() != 'unknown' and c_c1.get_speaker() == c_c2.get_speaker() and self._clusters.has_key(cl_1) and self._clusters.has_key(cl_2):\n changed = True\n # merge the clusters an record that something changed\n self._merge_clusters(cl_1, cl_2)\n if changed: # if something has changed\n # rename all the clusters starting from S0\n self._rename_clusters()\n # remove also the old waves and seg files of the old clusters\n shutil.rmtree(self.get_file_basename())\n # rebuild all seg files\n self.generate_seg_file(set_speakers=False)\n # resplit the original wave file according to the new clusters\n self._to_trim()", "def EnumerateStereoisomers(m, options=..., verbose=...): # -> Generator[Unknown, None, None]:\n ...", "def end_pairing(self, error_dialog):\n self.speak_dialog(error_dialog)\n self.bus.emit(Message(\"mycroft.mic.unmute\", None))\n\n self.data = None\n self.count = -1", "def set_caero_control_surface_grid(self, name: str, cs_box_ids: List[int],\n box_id_to_caero_element_map: Dict[int, Any],\n caero_points: np.ndarray,\n note: Optional[str]=None,\n zfighting_offset: float=0.001,\n store_msg: bool=False) -> str:\n gui = self.gui\n log = self.gui.log\n boxes_to_show, stored_msg = check_for_missing_control_surface_boxes(\n name, cs_box_ids, box_id_to_caero_element_map, log,\n store_msg=store_msg)\n #if not boxes_to_show:\n #print('*%s' % name)\n #print('*%s' % boxes_to_show)\n #return\n\n #if name not in gui.alt_grids:\n #print('**%s' % name)\n #return\n\n grid = gui.alt_grids[name]\n grid.Reset()\n\n all_points, elements, centroids, areas = get_caero_control_surface_grid(\n grid,\n box_id_to_caero_element_map,\n caero_points, boxes_to_show, log)\n\n if len(all_points) == 0:\n log.error('deleting %r' % name)\n\n # name = spline_1000_boxes\n sname = name.split('_')\n sname[-1] = 'structure_points'\n\n # points_name = spline_1000_structure_points\n points_name = '_'.join(sname)\n log.error('deleting %r' % points_name)\n\n gui.remove_alt_grid(name, remove_geometry_property=True)\n gui.remove_alt_grid(points_name, remove_geometry_property=True)\n return stored_msg\n\n # combine all the points\n all_points_array = np.vstack(all_points)\n\n #vtk_etype = 9 # vtkQuad\n #create_vtk_cells_of_constant_element_type(grid, elements, vtk_etype)\n\n # shift z to remove z-fighting with caero in surface representation\n all_points_array[:, [1, 2]] += zfighting_offset\n\n # get the vtk object\n vtk_points = numpy_to_vtk_points(all_points_array, deep=0)\n grid.SetPoints(vtk_points)\n\n #if missing_boxes:\n #msg = 'Missing CAERO AELIST boxes: ' + str(missing_boxes)\n #gui.log_error(msg)\n if note:\n # points_list (15, 4, 3) = (elements, nodes, 3)\n x, y, z = np.average(centroids, weights=areas, axis=0)\n text = str(note)\n #slot = gui.label_actors[-1]\n\n slot = gui.reset_label_actors(name)\n annotation = gui.create_annotation(text, x, y, z)\n slot.append(annotation)\n\n return stored_msg", "def applyMorphologicalCleaning(self, image):", "def draw_molecule(molecule, center=(0, 0, 0), show_bonds=True, join=True,\n name='molecule'):\n collection = bpy.data.collections.new(name)\n bpy.context.scene.collection.children.link(collection)\n shapes = []\n\n # If using space-filling model, scale up atom size and remove bonds\n\n # Add atom primitive\n bpy.ops.object.select_all(action='DESELECT')\n bpy.ops.mesh.primitive_uv_sphere_add()\n sphere = bpy.context.object\n\n # Initialize bond material if it's going to be used.\n if show_bonds:\n bpy.data.materials.new(name='bond')\n bpy.data.materials['bond'].diffuse_color = atom_data['bond']['color'] + [1]\n bpy.data.materials['bond'].specular_intensity = 0.2\n bpy.ops.mesh.primitive_cylinder_add()\n cylinder = bpy.context.object\n cylinder.active_material = bpy.data.materials['bond']\n\n for atom in molecule['atoms']:\n if atom['element'] not in atom_data:\n atom['element'] = 'undefined'\n\n if atom['element'] not in bpy.data.materials:\n key = atom['element']\n bpy.data.materials.new(name=key)\n bpy.data.materials[key].diffuse_color = atom_data[key]['color'] + [1]\n bpy.data.materials[key].specular_intensity = 0.2\n\n atom_sphere = sphere.copy()\n atom_sphere.data = sphere.data.copy()\n atom_sphere.location = [l + c for l, c in\n zip(atom['location'], center)]\n scale = 1 if show_bonds else 2.5\n atom_sphere.dimensions = [atom_data[atom['element']]['radius'] *\n scale * 2] * 3\n atom_sphere.active_material = bpy.data.materials[atom['element']]\n collection.objects.link(atom_sphere)\n shapes.append(atom_sphere)\n\n for bond in (molecule['bonds'] if show_bonds else []):\n start = molecule['atoms'][bond['atoms'][0]]['location']\n end = molecule['atoms'][bond['atoms'][1]]['location']\n diff = [c2 - c1 for c2, c1 in zip(start, end)]\n cent = [(c2 + c1) / 2 for c2, c1 in zip(start, end)]\n mag = sum([(c2 - c1) ** 2 for c1, c2 in zip(start, end)]) ** 0.5\n\n v_axis = Vector(diff).normalized()\n v_obj = Vector((0, 0, 1))\n v_rot = v_obj.cross(v_axis)\n\n # This check prevents gimbal lock (ie. weird behavior when v_axis is\n # close to (0, 0, 1))\n if v_rot.length > 0.01:\n v_rot = v_rot.normalized()\n axis_angle = [acos(v_obj.dot(v_axis))] + list(v_rot)\n else:\n v_rot = Vector((1, 0, 0))\n axis_angle = [0] * 4\n\n if bond['order'] not in range(1, 4):\n sys.stderr.write(\"Improper number of bonds! Defaulting to 1.\\n\")\n bond['order'] = 1\n\n if bond['order'] == 1:\n trans = [[0] * 3]\n elif bond['order'] == 2:\n trans = [[1.4 * atom_data['bond']['radius'] * x for x in v_rot],\n [-1.4 * atom_data['bond']['radius'] * x for x in v_rot]]\n elif bond['order'] == 3:\n trans = [[0] * 3,\n [2.2 * atom_data['bond']['radius'] * x for x in v_rot],\n [-2.2 * atom_data['bond']['radius'] * x for x in v_rot]]\n\n for i in range(bond['order']):\n bond_cylinder = cylinder.copy()\n bond_cylinder.data = cylinder.data.copy()\n bond_cylinder.dimensions = [atom_data['bond']['radius'] * scale *\n 2] * 2 + [mag]\n bond_cylinder.location = [c + scale * v for c,\n v in zip(cent, trans[i])]\n bond_cylinder.rotation_mode = 'AXIS_ANGLE'\n bond_cylinder.rotation_axis_angle = axis_angle\n collection.objects.link(bond_cylinder)\n shapes.append(bond_cylinder)\n\n # Remove primitive meshes\n bpy.ops.object.select_all(action='DESELECT')\n sphere.select_set(True)\n if show_bonds:\n cylinder.select_set(True)\n # If the starting cube is there, remove it\n if 'Cube' in bpy.data.objects.keys():\n bpy.data.objects.get('Cube').select_set(True)\n bpy.ops.object.delete()\n\n for shape in shapes:\n shape.select_set(True)\n bpy.context.view_layer.objects.active = shapes[0]\n bpy.ops.object.shade_smooth()\n if join:\n bpy.ops.object.join()\n for obj in bpy.context.selected_objects:\n obj.name = name\n\n bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='MEDIAN')", "def removeSeparatrix(self):\n if self._separatrixOverlayHandle is not None:\n self._separatrixOverlayHandle.remove()\n self._separatrixOverlayHandle = None\n\n self.overlaySeparatrix = False", "async def on_reaction_remove(reaction, user):\n #Before doing anything\n #Check to see if the reaction was a karma emoji\n if reaction.emoji == initKarma.goodKarma:\n consoleMessage = 'Writing to karmaData file :: Decreasing '\n consoleMessage += reaction.message.author.name\n consoleMessage += '\\'s karma by 1!'\n print(consoleMessage)\n await karmaUpdate(client, reaction.message, '-1')\n if reaction.emoji == initKarma.badKarma:\n consoleMessage = 'Writing to karmaData file :: Increasing '\n consoleMessage += reaction.message.author.name\n consoleMessage += '\\'s karma by 1!'\n print(consoleMessage)\n await karmaUpdate(client, reaction.message, '+1')", "async def on_raw_reaction_remove(self, payload):\n\n\t\tguild = self.bot.get_guild(payload.guild_id)\n\t\tif guild is not None:\n\t\t\t# Update reaction leaderboards\n\t\t\treactionLeaderboard = self.leaderboards[str(payload.guild_id)][\"reactionLeaderboard\"]\n\n\t\t\tif payload.emoji.id is not None:\n\t\t\t\tfor guildEmoji in guild.emojis:\n\t\t\t\t\tif payload.emoji.id == guildEmoji.id:\n\t\t\t\t\t\treactionLeaderboard[\"<:\" + str(payload.emoji.name) + \":\" + str(payload.emoji.id) + \">\"] -= 1\n\t\t\t\t\t\tbreak\n\n\t\t\telse:\n\t\t\t\treactionLeaderboard[str(payload.emoji.name)] -= 1\n\n\t\t\tif str(payload.emoji.id) in self.leaderboards[str(payload.guild_id)][\"emojiLeaderboard\"]:\n\t\t\t\tself.leaderboards[str(payload.guild_id)][\"emojiLeaderboard\"][str(payload.emoji.id)] -= 1", "def _collide(self):\n\n collisions = self._get_collisions()\n for collision in collisions:\n self._update_excitation(collision)\n atom1 = self.atoms[collision[0]]\n atom2 = self.atoms[collision[1]]\n\n r = atom1.pos-atom2.pos\n r_mag = np.linalg.norm(r)\n r_hat = r/r_mag\n\n v_1_r = np.dot(atom1.vel, r_hat)\n v_2_r = np.dot(atom2.vel, r_hat)\n\n v_1_r_f = (atom1.mass-atom2.mass)*v_1_r/(atom1.mass + atom2.mass)\\\n + 2*atom2.mass*v_2_r/(atom1.mass + atom2.mass)\n v_2_r_f = (atom2.mass-atom1.mass)*v_2_r/(atom1.mass + atom2.mass)\\\n + 2*atom1.mass*v_1_r/(atom1.mass + atom2.mass)\n\n delta_v_1 = (v_1_r_f - v_1_r)*r_hat\n delta_v_2 = (v_2_r_f - v_2_r)*r_hat\n\n self.atoms[collision[0]].vel += delta_v_1\n self.atoms[collision[1]].vel += delta_v_2", "def test_enumerating_stereocenters(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = Molecule.from_smiles(\n \"NC(Cl)(F)O\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n\n isomers = mol.enumerate_stereoisomers(toolkit_registry=toolkit)\n\n assert len(isomers) == 2\n # make sure the mol is not in the isomers and that they only differ by stereo chem\n assert mol not in isomers\n for ismol in isomers:\n assert ismol.n_conformers != 0\n assert (\n Molecule.are_isomorphic(\n mol,\n ismol,\n return_atom_map=False,\n atom_stereochemistry_matching=False,\n )[0]\n is True\n )\n assert mol.is_isomorphic_with(ismol) is False\n\n # make sure the two isomers are different\n assert isomers[0].is_isomorphic_with(isomers[1]) is False\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def test_enumerating_stereocenters(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = Molecule.from_smiles(\n \"NC(Cl)(F)O\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n\n isomers = mol.enumerate_stereoisomers(toolkit_registry=toolkit)\n\n assert len(isomers) == 2\n # make sure the mol is not in the isomers and that they only differ by stereo chem\n assert mol not in isomers\n for ismol in isomers:\n assert ismol.n_conformers != 0\n assert (\n Molecule.are_isomorphic(\n mol,\n ismol,\n return_atom_map=False,\n atom_stereochemistry_matching=False,\n )[0]\n is True\n )\n assert mol.is_isomorphic_with(ismol) is False\n\n # make sure the two isomers are different\n assert isomers[0].is_isomorphic_with(isomers[1]) is False\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def removeGlobalPhaseFromComa(self):\n\t\tprint \"Phase removed <---!\"\n\t\tif self.globalPhaseRemoved:\n\t\t\tprint \"DO NOT REMOVE THE GLOBAL PHASE A SECOND TIME\"\n\t\t\treturn \n\t\tself.globalPhaseRemoved = True\n\t\tphaseDirection = self.getGlobalPhaseDirection()\n\t\tdim = 2*self.totalBins\n\t\ttransformationMatrix = np.identity(dim)\n#\t#\tCMwrite(\"removeGlobalPhaseFromComa\")\n\t\tfor i in range(dim):\n\t\t\tfor j in range(dim):\n\t\t\t\ttransformationMatrix[i,j] -= phaseDirection[i] * phaseDirection[j]\n\t\tself.coma = np.dot(transformationMatrix, np.dot(self.coma, transformationMatrix)) # no transpose needed, since transformationMatrix is symmetric\n\t\tself.makeComaInv()\n\t\tself.specialCOMAs = {}", "def _handle_pairing_complete(self, _):\n self.bus.emit(Message(\"mycroft.mic.unmute\"))", "def cure(self, s):\n if self.disease_status == 1:\n s.number_of_symptomatic -= 1\n elif self.disease_status == 2:\n s.number_of_asymptomatic -= 1\n elif self.disease_status == 3:\n s.number_of_res_symp -= 1\n elif self.disease_status == 4:\n s.number_of_res_asymp -= 1\n if self.disease_status > 0:\n s.infected.remove(self.identifier)\n if self.disease_status > 2:\n s.resistant.remove(self.identifier)\n self.disease_status = 0\n self.time_since_infection = -1", "def fix_sphere_m (center_x, center_y, center_z, radius, centers, radii, len_points):\n \n g_x = []\n g_y = []\n g_z = []\n points = [hydrogen_coord_gen(center_x, center_y, center_z, radius) for i in range(0, len_points)] \n x = [points[i][0] for i in range(0, len(points))] \n y = [points[i][1] for i in range(0, len(points))]\n z = [points[i][2] for i in range(0, len(points))]\n\n for i in range(0, len(points)):\n check = 0\n j = 0\n while (j <= (len(centers) - 1) and (check == 0)): \n if (calculate_3D_distance_2_centers(x[i], y[i], z[i], centers[j][0], centers[j][1], centers[j][2]) < radii[j]):\n check += 1\n j += 1\n if (check == 0):\n g_x.append(x[i])\n g_y.append(y[i])\n g_z.append(z[i])\n\n return g_x, g_y, g_z", "def process_stereo(self, image1, image2, disp1, disp2):\n return _elas.Elas_process_stereo(self, image1, image2, disp1, disp2)", "def evolve(self):\n # Start with first player\n self.phase.set(1)\n\n #self.first_player\n \n # Autopass turn if no cards left for player\n \n \n pass", "def atom_stereo_keys(sgr):\n atm_ste_keys = dict_.keys_by_value(atom_stereo_parities(sgr),\n lambda x: x in [True, False])\n return atm_ste_keys", "def object_selection(self, ch1_bright_mag, ch2_bright_mag, selection_band_faint_mag, selection_band='I2_MAG_APER4'):\n\n clusters_to_remove = []\n for cluster_id, cluster_info in self._catalog_dictionary.items():\n # Read in the catalog\n se_catalog = Table.read(cluster_info['se_cat_path'], format='ascii')\n\n # Add the mask name to the catalog. Extracting only the system agnostic portion of the path\n se_catalog['MASK_NAME'] = re.search(r'Data_Repository/.*?\\Z', cluster_info['cov_mask_path']).group(0)\n\n # Preform SExtractor Flag cut. A value of under 4 should indicate the object was extracted well.\n se_catalog = se_catalog[se_catalog['FLAGS'] < 4]\n\n # Preform a faint-end magnitude cut in selection band.\n se_catalog = se_catalog[se_catalog[selection_band] <= selection_band_faint_mag]\n\n # Preform bright-end cuts\n # Limits from Eisenhardt+04 for ch1 = 10.0 and ch2 = 9.8\n se_catalog = se_catalog[se_catalog['I1_MAG_APER4'] > ch1_bright_mag] # [3.6] saturation limit\n se_catalog = se_catalog[se_catalog['I2_MAG_APER4'] > ch2_bright_mag] # [4.5] saturation limit\n\n # For the mask cut we need to check the pixel value for each object's centroid.\n # Read in the mask file\n mask, header = fits.getdata(cluster_info['cov_mask_path'], header=True)\n\n # Recast the mask image as a boolean array so we can use it as a check on the catalog entries\n mask = mask.astype(bool)\n\n # Read in the WCS from the mask\n w = WCS(header)\n\n # Get the objects pixel coordinates\n xy_data = np.array(w.wcs_world2pix(se_catalog['ALPHA_J2000'], se_catalog['DELTA_J2000'], 0))\n\n # Floor the values and cast as integers so we have the pixel indices into the mask\n xy_pix_idxs = np.floor(xy_data).astype(int)\n\n # Filter the catalog according to the boolean value in the mask at the objects' locations.\n se_catalog = se_catalog[mask[xy_pix_idxs[1], xy_pix_idxs[0]]]\n\n # If we have completely exhausted the cluster of any object, we should mark it for removal otherwise add it\n # to the data structure\n if se_catalog:\n cluster_info['catalog'] = se_catalog\n else:\n clusters_to_remove.append(cluster_id)\n\n # Remove any cluster that has no objects surviving our selection cuts\n for cluster_id in clusters_to_remove:\n self._catalog_dictionary.pop(cluster_id, None)", "def _alienCollide(self):\n for b in self._bolts:\n if self._ship != None and self._ship.collides(b):\n self._ship = None\n self._bolts = []\n self._key = False\n self._lives -= 1", "def delete_leader(self):", "def disconnect_poselocs():\r\n #TODO: Eventually create a non-linear way to re-add the nodes for a pose if you want to edit it later.\r\n if DRYRUN:\r\n print('poselocs deleted - DRY RUN ONLY')\r\n return False\r\n\r\n for oPos in pm.ls('*_POSE', type='transform'):\r\n pm.delete(oPos.getChildren(ad=True, type='transform'))", "def delete_ss(self, sub):\n # Suppression of one random simple surface (satisfying both criteria)\n random.seed(42)\n surface = random.randint(0, len(self.surfaces)-1)\n print(self.surfaces[surface]['label'])\n\n bck_map = self.surfaces[surface]['aims_ss']\n for voxel in bck_map[0].keys():\n self.skel.setValue(0, voxel[0], voxel[1], voxel[2])\n\n bck_map_bottom = self.surfaces[surface]['aims_bottom']\n for voxel in bck_map_bottom[0].keys():\n self.skel.setValue(0, voxel[0], voxel[1], voxel[2])\n\n save_subject = sub\n return save_subject", "def remove(self):\n self.ren.RemoveActor(self.actor)\n \n self.visible = 0", "def remove_mass_unsafe(self, *focal_elements):\n for focal in focal_elements:\n if focal[0] in self.focals:\n self.focals[focal[0]] -= focal[1]\n else:\n self.focals[focal[0]] = -focal[1]" ]
[ "0.6699158", "0.599879", "0.5703354", "0.54769295", "0.5319069", "0.51858544", "0.49389228", "0.49315634", "0.4910987", "0.48884475", "0.4821768", "0.4819886", "0.4803131", "0.48022223", "0.48019797", "0.47844344", "0.47762632", "0.47756767", "0.47696617", "0.47598013", "0.46851963", "0.4684362", "0.4670574", "0.4658945", "0.4649536", "0.462998", "0.4601323", "0.45977697", "0.45911625", "0.4579816", "0.4569355", "0.45592692", "0.45521846", "0.45329767", "0.45255348", "0.45248407", "0.45191896", "0.451536", "0.45152497", "0.45101416", "0.45080385", "0.44985074", "0.44860962", "0.44820133", "0.44774017", "0.4476765", "0.44764897", "0.44703212", "0.4467004", "0.4466825", "0.44636342", "0.44632763", "0.4462159", "0.4459068", "0.4456366", "0.44429523", "0.4440511", "0.44379073", "0.44379073", "0.4433757", "0.44335198", "0.44315365", "0.4411447", "0.4410548", "0.44064167", "0.44027078", "0.43986684", "0.43970844", "0.43926385", "0.4391324", "0.43893293", "0.43869787", "0.43798903", "0.43760103", "0.43747285", "0.43737543", "0.4372958", "0.4367509", "0.43649915", "0.436296", "0.43627217", "0.43617803", "0.43604755", "0.43578753", "0.43576157", "0.43576157", "0.4354321", "0.43526855", "0.43505082", "0.4346283", "0.43458784", "0.434531", "0.4343365", "0.4343245", "0.4342612", "0.43408927", "0.433959", "0.43356177", "0.4333945", "0.43329638" ]
0.6745221
0
StereoGroup atoms are not in the reaction > stereo group is unaffected
def test_stereogroup_is_spectator_to_reaction(self): # 5a. Reaction preserves unrelated stereo products = _reactAndSummarize('[C@:1]F>>[C@:1]F', 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|') self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|') # 5b. Reaction ignores unrelated stereo' products = _reactAndSummarize('[C:1]F>>[C:1]F', 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|') self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|') # 5c. Reaction inverts unrelated stereo' products = _reactAndSummarize('[C@:1]F>>[C@@:1]F', 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|') self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|') # 5d. Reaction destroys unrelated stereo' 1:3| products = _reactAndSummarize('[C@:1]F>>[C:1]F', 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|') self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |o1:3|') # 5e. Reaction assigns unrelated stereo' products = _reactAndSummarize('[C:1]F>>[C@@:1]F', 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|') self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reaction_defines_stereo(self):\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'FC(Cl)Br')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n\n # Remove group with defined stereo\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |&1:3|')\n\n # Remove atoms with defined stereo from group\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')", "def test_reaction_destroys_stereo(self):\n reaction = '[C@:1]>>[C:1]'\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'FC(Cl)Br')\n self.assertEqual(products, 'FC(Cl)Br')\n\n reaction = '[C@:1]F>>[C:1]F'\n # Reaction destroys stereo (but preserves unaffected group\n products = _reactAndSummarize(reaction,\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')\n # Reaction destroys stereo (but preserves the rest of the group\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)[C@@H](Cl)Br |&1:1,3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')", "def test_reaction_copies_stereogroup(self):\n # Stereogroup atoms are in the reaction with multiple copies in the product\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )\n\n # Stereogroup atoms are not in the reaction, but have multiple copies in the\n # product.\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )", "def test_reaction_splits_stereogroup(self):\n products = _reactAndSummarize('[C:1]OO[C:2]>>[C:2]O.O[C:1]',\n 'F[C@H](Cl)OO[C@@H](Cl)Br |o1:1,5|')\n # Two product sets, each with two mols:\n self.assertEqual(products.count('|o1:1|'), 4)", "def test_reaction_ignores_stereo(self):\n reaction = '[C:1]>>[C:1]'\n reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br']\n for reactant in reactants:\n products = _reactAndSummarize(reaction, reactant)\n self.assertEqual(products, reactant)", "def test_parameterize_mol_missing_stereo_rdkit(self, force_field):\n toolkit_registry = ToolkitRegistry(\n toolkit_precedence=[RDKitToolkitWrapper, AmberToolsToolkitWrapper]\n )\n\n molecule = Molecule.from_smiles(\"CC1CCC(=O)O1\", allow_undefined_stereo=True)\n topology = Topology.from_molecules([molecule])\n\n force_field.create_openmm_system(\n topology,\n toolkit_registry=toolkit_registry,\n )", "def test_reaction_preserves_stereo(self):\n reaction = '[C@:1]>>[C@:1]'\n reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br']\n for reactant in reactants:\n products = _reactAndSummarize(reaction, reactant)\n self.assertEqual(products, reactant)", "def testStereo(self):\r\n smi_and_cansmi = [\r\n ('OC(=O)[C@@H](CCC(N)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](CCC(N)=O)N', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](N)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@@H](N)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](N)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](N)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](C(O)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](C(O)=O)N', 'NC(=O)CC[C@H](N)C(=O)O')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)", "def test_parameterize_mol_missing_stereo_openeye(self, force_field):\n toolkit_registry = ToolkitRegistry(toolkit_precedence=[OpenEyeToolkitWrapper])\n\n molecule = Molecule.from_smiles(\"CC1CCC(=O)O1\", allow_undefined_stereo=True)\n topology = Topology.from_molecules([molecule])\n\n force_field.create_openmm_system(\n topology,\n toolkit_registry=toolkit_registry,\n )", "def has_stereo(gra):\n return bool(atom_stereo_keys(gra) or bond_stereo_keys(gra))", "def testMoreStereo(self):\r\n smi_and_cansmi = [\r\n ('Cl[C@](C)(I)Br', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](C)(I)Br', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](C)(Br)Cl', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](C)(Br)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](Cl)(Br)C', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](Cl)(Br)C', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](Br)(I)C', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](Br)(I)C', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](C)(Cl)I', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](C)(Cl)I', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](C)(Cl)Br', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](C)(Cl)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](Cl)(I)Br', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](Cl)(I)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](Br)(Cl)I', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](Br)(Cl)I', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](Br)(Cl)C', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](Br)(Cl)C', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](C)(Br)I', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](C)(Br)I', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@](I)(Br)C', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](I)(Br)C', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](I)(Br)Cl', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](I)(Br)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](Cl)(C)Br', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](Cl)(C)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](Cl)(Br)I', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](Cl)(Br)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](C)(I)Cl', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](C)(I)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](Br)(C)Cl', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](Br)(C)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](Br)(I)Cl', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](Br)(I)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](I)(Cl)Br', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](I)(Cl)Br', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](I)(C)Br', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](I)(C)Br', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](I)(C)Cl', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](I)(C)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@](Br)(C)I', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](Br)(C)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](Cl)(C)I', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](Cl)(C)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](I)(Cl)C', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](I)(Cl)C', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](Cl)(I)C', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](Cl)(I)C', 'C[C@@](Cl)(Br)I')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)", "def EnumerateStereoisomers(m, options=..., verbose=...): # -> Generator[Unknown, None, None]:\n ...", "def _ignore_collision(self):\n # The legacy version only ignores collision of child links of active joints.\n for link in self.cabinet.get_links():\n for s in link.get_collision_shapes():\n g0, g1, g2, g3 = s.get_collision_groups()\n s.set_collision_groups(g0, g1, g2 | 1 << 31, g3)", "def GetStereoisomerCount(m, options=...): # -> Any:\n ...", "def reflect_local_stereo(gra):\n atm_par_dct = atom_stereo_parities(gra)\n atm_par_dct = dict_.transform_values(\n atm_par_dct, lambda x: x if x is None else not x)\n gra = set_atom_stereo_parities(gra, atm_par_dct)\n return gra", "def atom_stereo_keys(sgr):\n atm_ste_keys = dict_.keys_by_value(atom_stereo_parities(sgr),\n lambda x: x in [True, False])\n return atm_ste_keys", "def test_enumerating_stereo_options(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n\n # test undefined only\n mol = Molecule.from_smiles(\n \"ClC=CCl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert len(isomers) == 2\n for isomer in isomers:\n assert isomer.n_conformers == 0\n\n mol = Molecule.from_smiles(\n r\"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert isomers == []\n\n mol = Molecule.from_smiles(\n r\"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=False, rationalise=False\n )\n\n assert len(isomers) == 1\n\n # test max isomers\n mol = Molecule.from_smiles(\n \"BrC=C[C@H]1OC(C2)(F)C2(Cl)C1\",\n toolkit_registry=toolkit,\n allow_undefined_stereo=True,\n )\n isomers = mol.enumerate_stereoisomers(\n max_isomers=5,\n undefined_only=True,\n toolkit_registry=toolkit,\n rationalise=True,\n )\n\n assert len(isomers) <= 5\n for isomer in isomers:\n assert isomer.n_conformers == 1\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def test_enumerating_stereo_options(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n\n # test undefined only\n mol = Molecule.from_smiles(\n \"ClC=CCl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert len(isomers) == 2\n for isomer in isomers:\n assert isomer.n_conformers == 0\n\n mol = Molecule.from_smiles(\n \"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=True, rationalise=False\n )\n\n assert isomers == []\n\n mol = Molecule.from_smiles(\n \"Cl/C=C\\Cl\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n isomers = mol.enumerate_stereoisomers(\n undefined_only=False, rationalise=False\n )\n\n assert len(isomers) == 1\n\n # test max isomers\n mol = Molecule.from_smiles(\n \"BrC=C[C@H]1OC(C2)(F)C2(Cl)C1\",\n toolkit_registry=toolkit,\n allow_undefined_stereo=True,\n )\n isomers = mol.enumerate_stereoisomers(\n max_isomers=5,\n undefined_only=True,\n toolkit_registry=toolkit,\n rationalise=True,\n )\n\n assert len(isomers) <= 5\n for isomer in isomers:\n assert isomer.n_conformers == 1\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def test_check_for_existing_reaction_removes_duplicates_in_opposite_directions(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n s1 = Species().from_smiles(\"[H]\")\n s2 = Species().from_smiles(\"CC\")\n s3 = Species().from_smiles(\"[H][H]\")\n s4 = Species().from_smiles(\"C[CH2]\")\n s1.label = 'H'\n s2.label = 'CC'\n s3.label = 'HH'\n s4.label = 'C[CH2]'\n\n rxn_f = TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction')\n )\n\n rxn_r = TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction')\n )\n\n rxn_f.reactants.sort()\n rxn_f.products.sort()\n\n cerm.add_reaction_to_core(rxn_f)\n cerm.register_reaction(rxn_f)\n\n reactions = cerm.search_retrieve_reactions(rxn_r)\n self.assertEqual(1, len(reactions), 'cerm.search_retrieve_reactions could not identify reverse reaction')\n\n found, rxn = cerm.check_for_existing_reaction(rxn_r)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to identify existing reaction in the reverse direction')\n self.assertEqual(rxn, rxn_f)", "def is_surjective(self):\n # Testing equality of free modules over PIDs is unreliable\n # see Trac #11579 for explanation and status\n # We test if image equals codomain with two inclusions\n # reverse inclusion of below is trivially true\n return self.codomain().is_submodule(self.image())", "async def nogroup(ctx):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n author = ctx.message.author\n roles = author.roles\n for role in roles:\n if role.name.lower() in changeable_groups:\n roles.remove(role)\n await amor_manager.replace_roles(author, *roles)\n await amor_manager.say('{0} removed from color groups'.format(author.name))", "def test_enumerating_stereobonds(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = Molecule.from_smiles(\n \"ClC=CCl\", allow_undefined_stereo=True, toolkit_registry=toolkit\n )\n\n # use the default options\n isomers = mol.enumerate_stereoisomers()\n assert len(isomers) == 2\n\n assert mol not in isomers\n # make sure the input molecule is only different by bond stereo\n for ismol in isomers:\n assert (\n Molecule.are_isomorphic(\n mol,\n ismol,\n return_atom_map=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n assert mol.is_isomorphic_with(ismol) is False\n\n # make sure the isomers are different\n assert isomers[0].is_isomorphic_with(isomers[1]) is False\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def test_enumerating_stereobonds(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = Molecule.from_smiles(\n \"ClC=CCl\", allow_undefined_stereo=True, toolkit_registry=toolkit\n )\n\n # use the default options\n isomers = mol.enumerate_stereoisomers()\n assert len(isomers) == 2\n\n assert mol not in isomers\n # make sure the input molecule is only different by bond stereo\n for ismol in isomers:\n assert (\n Molecule.are_isomorphic(\n mol,\n ismol,\n return_atom_map=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n assert mol.is_isomorphic_with(ismol) is False\n\n # make sure the isomers are different\n assert isomers[0].is_isomorphic_with(isomers[1]) is False\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def test_enumerating_stereo_partially_defined(\n self, toolkit_class, smiles, undefined_only, expected\n ):\n\n if not toolkit_class.is_available():\n pytest.skip(\"Required toolkit is unavailable\")\n\n toolkit = toolkit_class()\n\n # test undefined only\n mol = Molecule.from_smiles(\n smiles, toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n stereoisomers = mol.enumerate_stereoisomers(\n undefined_only=undefined_only, rationalise=False\n )\n\n # Ensure that the results of the enumeration are what the test expects.\n # This roundtrips the expected output from SMILES --> OFFMol --> SMILES,\n # since the SMILES for stereoisomers generated in this test may change depending\n # on which cheminformatics toolkit is used.\n expected = {\n Molecule.from_smiles(stereoisomer, allow_undefined_stereo=True).to_smiles(\n explicit_hydrogens=True, isomeric=True, mapped=False\n )\n for stereoisomer in expected\n }\n actual = {\n stereoisomer.to_smiles(explicit_hydrogens=True, isomeric=True, mapped=False)\n for stereoisomer in stereoisomers\n }\n\n assert expected == actual", "def test_isomorphic_striped_stereochemistry(self):\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n # Ensure default value is respected and order does not matter\n assert Molecule.are_isomorphic(mol1, mol2, strip_pyrimidal_n_atom_stereo=True)\n assert Molecule.are_isomorphic(mol1, mol2)\n assert Molecule.are_isomorphic(mol2, mol1)\n\n assert mol1 == mol2\n assert Molecule.from_smiles(\"CCC[N@](C)CC\") == Molecule.from_smiles(\n \"CCC[N@@](C)CC\"\n )\n\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n assert not Molecule.are_isomorphic(\n mol1,\n mol2,\n strip_pyrimidal_n_atom_stereo=False,\n atom_stereochemistry_matching=True,\n bond_stereochemistry_matching=True,\n )[0]", "def _FilterProtonsAndElectrons(self):\n self.reactants = filter(lambda c: c.compound.kegg_id not in \n ['C00080', 'C05359'], self.reactants)", "def test_reaction_inverts_stereo(self):\n reaction = '[C@:1]>>[C@@:1]'\n\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br |o1:1|')\n products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@H](Cl)Br |&1:1|')\n products = _reactAndSummarize(reaction, 'FC(Cl)Br')\n self.assertEqual(products, 'FC(Cl)Br')", "def test_isomorphic_stripped_stereochemistry(self):\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n # Ensure default value is respected and order does not matter\n assert Molecule.are_isomorphic(mol1, mol2, strip_pyrimidal_n_atom_stereo=True)\n assert Molecule.are_isomorphic(mol1, mol2)\n assert Molecule.are_isomorphic(mol2, mol1)\n\n assert mol1 == mol2\n assert Molecule.from_smiles(\"CCC[N@](C)CC\") == Molecule.from_smiles(\n \"CCC[N@@](C)CC\"\n )", "def filter_invalid_combos(m: 'Mods') -> 'Mods':\n if m & (Mods.DOUBLETIME | Mods.NIGHTCORE) and m & Mods.HALFTIME:\n m &= ~Mods.HALFTIME\n if m & Mods.EASY and m & Mods.HARDROCK:\n m &= ~Mods.HARDROCK\n if m & Mods.RELAX and m & Mods.AUTOPILOT:\n m &= ~Mods.AUTOPILOT\n if m & Mods.PERFECT and m & Mods.SUDDENDEATH:\n m &= ~Mods.SUDDENDEATH\n\n return m", "def test_enumerating_no_protomers(self):\n\n mol = Molecule.from_smiles(\"CC\")\n\n assert mol.enumerate_protomers() == []", "def test_enumerating_no_protomers(self):\n\n mol = Molecule.from_smiles(\"CC\")\n\n assert mol.enumerate_protomers() == []", "def substereomers(gra):\n _assigned = functools.partial(\n dict_.filter_by_value, func=lambda x: x is not None)\n\n known_atm_ste_par_dct = _assigned(atom_stereo_parities(gra))\n known_bnd_ste_par_dct = _assigned(bond_stereo_parities(gra))\n\n def _is_compatible(sgr):\n atm_ste_par_dct = _assigned(atom_stereo_parities(sgr))\n bnd_ste_par_dct = _assigned(bond_stereo_parities(sgr))\n _compat_atm_assgns = (set(known_atm_ste_par_dct.items()) <=\n set(atm_ste_par_dct.items()))\n _compat_bnd_assgns = (set(known_bnd_ste_par_dct.items()) <=\n set(bnd_ste_par_dct.items()))\n return _compat_atm_assgns and _compat_bnd_assgns\n\n sgrs = tuple(filter(_is_compatible, stereomers(gra)))\n return sgrs", "def pick(self, inv, pl, group):\r\n if self.rect.colliderect(pl):\r\n group.remove(self)\r\n if inv.count('key') == 0:\r\n inv += ['key']\r\n music_acceptor.activatedPortalSound()", "def is_standard_residue(self):\n return False", "def _shipCollide(self):\n for s in range(self.getLengthAlien()):\n for t in range(len(self._aliens[0])):\n for b in self._bolts:\n if self._aliens[s][t] != None and + \\\n self._aliens[s][t].collides(b):\n self._aliens[s][t] = None\n self._bolts.remove(b)\n self._key = False", "def test_check_for_existing_reaction_eliminates_identical_reactions_without_duplicate_flag(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'],\n duplicate=False)\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Cs12345', 'H'],\n duplicate=False)\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to eliminate reactions without duplicate tag')", "def proc_sw_only_morphs(forward_pairs, morphs, backward_pairs):\n sandwich_pairs = []\n if not backward_pairs:\n forward_pairs[-1].morphs.extend(morphs)\n elif len(morphs) == 1:\n morph = morphs[0]\n morph_str = str(morph)\n if morph_str in ['이/VCP', '하/VX'] and backward_pairs[0].morphs[0].tag.startswith('E'):\n # '이' 긍정지정사나 '하' 보조용언 뒤에 어미가 나올 경우\n backward_pairs[0].morphs.insert(0, morphs[0])\n del morphs[:]\n elif morph_str == '에/JKB' and backward_pairs[0].morphs[0].tag == 'JX':\n # '에' 부사격조사 뒤에 보조사가 나올 경우\n backward_pairs[0].morphs.insert(0, morphs[0])\n del morphs[:]\n elif morph_str == 'ᆯ/ETM' and forward_pairs[-1].morphs[-1].tag.startswith('V'):\n # 'ㄹ' 관형형전성어미 앞에 용언이 나올 경우\n forward_pairs[-1].morphs.append(morphs[0])\n del morphs[:]\n elif morph.tag in ['EC', 'EF'] and forward_pairs[-1].morphs[-1].tag.startswith('V'):\n # 연결어미나 종결어미 앞에 용언이 나올 경우\n forward_pairs[-1].morphs.append(morphs[0])\n del morphs[:]\n elif morph.tag.startswith('XS'):\n # append suffixes to the end of forward pair list\n forward_pairs[-1].morphs.append(morphs[0])\n del morphs[:]\n else:\n raise AlignError()\n else:\n morphs_str = ' + '.join([str(morph) for morph in morphs])\n if morphs_str == '(/SS + 대북/NNG + (/SS + 대북/NNG + )/SS + )/SS' and forward_pairs[-1].word_str == u'대북':\n del morphs[:]\n elif morphs_str == '(/SS + 동경/NNP + )/SS' and forward_pairs[-1].word_str == u'도쿄':\n del morphs[:]\n else:\n raise AlignError()\n return sandwich_pairs", "def bond_stereo_keys(sgr):\n bnd_ste_keys = dict_.keys_by_value(bond_stereo_parities(sgr),\n lambda x: x in [True, False])\n return bnd_ste_keys", "def set_channel_group(self, channel_group):\n super().set_channel_group(channel_group)\n self.skip_flags = self.flagspace.all_flags() # everything but 0", "def _molge(x, y):\n if x is None or y is None:\n return False\n if hasattr(x, '_substructfp'):\n if not hasattr(y, '_substructfp'):\n y._substructfp = _fingerprinter(y, True)\n if not DataStructs.AllProbeBitsMatch(y._substructfp, x._substructfp):\n return False\n match = x.GetSubstructMatch(y)\n x.__sssAtoms = []\n if match:\n if highlightSubstructures:\n x.__sssAtoms = list(match)\n return True\n else:\n return False", "def DeMorgan_equivalence(self, position_list=[]):\n\t\treturn self.__class__(_replace_match_at(self, position_list, [\n\t\t\t[ ((neg, A), disj, (neg, B)), (neg, (A, conj, B)) ],\n\t\t\t[ (neg, (A, conj, B)), ((neg, A), disj, (neg, B)) ],\n\t\t\t[ ((neg, A), conj, (neg, B)), (neg, (A, disj, B)) ],\n\t\t\t[ (neg, (A, disj, B)), ((neg, A), conj, (neg, B)) ]\n\t\t]))", "def braid_group_action(self):\n G = []\n for c in self:\n c = c.relabel()\n if any(c in g for g in G):\n continue\n G.append(c.braid_group_orbit())\n return G", "def test_molecules_from_xyz(self):\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz6['dict'])\n\n # check that the atom order is the same\n self.assertTrue(s_mol.atoms[0].is_sulfur())\n self.assertTrue(b_mol.atoms[0].is_sulfur())\n self.assertTrue(s_mol.atoms[1].is_oxygen())\n self.assertTrue(b_mol.atoms[1].is_oxygen())\n self.assertTrue(s_mol.atoms[2].is_oxygen())\n self.assertTrue(b_mol.atoms[2].is_oxygen())\n self.assertTrue(s_mol.atoms[3].is_nitrogen())\n self.assertTrue(b_mol.atoms[3].is_nitrogen())\n self.assertTrue(s_mol.atoms[4].is_carbon())\n self.assertTrue(b_mol.atoms[4].is_carbon())\n self.assertTrue(s_mol.atoms[5].is_hydrogen())\n self.assertTrue(b_mol.atoms[5].is_hydrogen())\n self.assertTrue(s_mol.atoms[6].is_hydrogen())\n self.assertTrue(b_mol.atoms[6].is_hydrogen())\n self.assertTrue(s_mol.atoms[7].is_hydrogen())\n self.assertTrue(b_mol.atoms[7].is_hydrogen())\n self.assertTrue(s_mol.atoms[8].is_hydrogen())\n self.assertTrue(b_mol.atoms[8].is_hydrogen())\n self.assertTrue(s_mol.atoms[9].is_hydrogen())\n self.assertTrue(b_mol.atoms[9].is_hydrogen())\n\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz7['dict'])\n self.assertTrue(s_mol.atoms[0].is_oxygen())\n self.assertTrue(b_mol.atoms[0].is_oxygen())\n self.assertTrue(s_mol.atoms[2].is_carbon())\n self.assertTrue(b_mol.atoms[2].is_carbon())\n\n expected_bonded_adjlist = \"\"\"multiplicity 2\n1 O u0 p2 c0 {6,S} {10,S}\n2 O u0 p2 c0 {3,S} {28,S}\n3 C u0 p0 c0 {2,S} {8,S} {14,S} {15,S}\n4 C u0 p0 c0 {7,S} {16,S} {17,S} {18,S}\n5 C u0 p0 c0 {7,S} {19,S} {20,S} {21,S}\n6 C u0 p0 c0 {1,S} {22,S} {23,S} {24,S}\n7 C u1 p0 c0 {4,S} {5,S} {9,S}\n8 C u0 p0 c0 {3,S} {10,D} {11,S}\n9 C u0 p0 c0 {7,S} {11,D} {12,S}\n10 C u0 p0 c0 {1,S} {8,D} {13,S}\n11 C u0 p0 c0 {8,S} {9,D} {25,S}\n12 C u0 p0 c0 {9,S} {13,D} {26,S}\n13 C u0 p0 c0 {10,S} {12,D} {27,S}\n14 H u0 p0 c0 {3,S}\n15 H u0 p0 c0 {3,S}\n16 H u0 p0 c0 {4,S}\n17 H u0 p0 c0 {4,S}\n18 H u0 p0 c0 {4,S}\n19 H u0 p0 c0 {5,S}\n20 H u0 p0 c0 {5,S}\n21 H u0 p0 c0 {5,S}\n22 H u0 p0 c0 {6,S}\n23 H u0 p0 c0 {6,S}\n24 H u0 p0 c0 {6,S}\n25 H u0 p0 c0 {11,S}\n26 H u0 p0 c0 {12,S}\n27 H u0 p0 c0 {13,S}\n28 H u0 p0 c0 {2,S}\n\"\"\"\n expected_mol = Molecule().from_adjacency_list(expected_bonded_adjlist)\n self.assertEqual(b_mol.to_adjacency_list(), expected_bonded_adjlist)\n # the is_isomorphic test must come after the adjlist test since it changes the atom order\n self.assertTrue(b_mol.is_isomorphic(expected_mol))\n\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz10['dict'], multiplicity=1, charge=0)\n self.assertIsNotNone(s_mol)\n self.assertIsNotNone(b_mol)\n for atom1, atom2 in zip(s_mol.atoms, b_mol.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n for atom1, symbol in zip(s_mol.atoms, self.xyz10['dict']['symbols']):\n self.assertEqual(atom1.symbol, symbol)\n\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz_dict_13, multiplicity=1, charge=0)\n for atom1, atom2 in zip(s_mol.atoms, b_mol.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n for atom1, symbol in zip(s_mol.atoms, self.xyz_dict_13['symbols']):\n self.assertEqual(atom1.symbol, symbol)\n self.assertEqual(s_mol.multiplicity, 1)\n self.assertEqual(b_mol.multiplicity, 1)\n self.assertFalse(any(atom.radical_electrons for atom in b_mol.atoms))", "def _check_calib_groups(self):\n is_science = self.find_frames('science')\n for i in range(len(self)):\n if not is_science[i]:\n continue\n if len(self.calib_bitmask.flagged_bits(self['calibbit'][i])) > 1:\n msgs.error('Science frames can only be assigned to a single calibration group.')", "def testInvisibleNegative(self):\n slhafile=\"./testFiles/slha/higgsinoStop.slha\"\n model = Model(BSMList,SMList)\n model.updateParticles(slhafile)\n topos = decomposer.decompose(model, .1*fb, False, True, 5.*GeV)\n tested = False\n for topo in topos:\n if str(topo)!=\"[1,1][1,1]\":\n continue\n for element in topo.elementList:\n if str(element)!=\"[[[t+],[t-]],[[q],[W+]]]\":\n continue\n tested = True\n trueMothers = [mother for mother in element.motherElements if not mother is element]\n self.assertEqual(len(trueMothers),0)\n self.assertTrue(tested)", "def test_enumerating_stereocenters(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = Molecule.from_smiles(\n \"NC(Cl)(F)O\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n\n isomers = mol.enumerate_stereoisomers(toolkit_registry=toolkit)\n\n assert len(isomers) == 2\n # make sure the mol is not in the isomers and that they only differ by stereo chem\n assert mol not in isomers\n for ismol in isomers:\n assert ismol.n_conformers != 0\n assert (\n Molecule.are_isomorphic(\n mol,\n ismol,\n return_atom_map=False,\n atom_stereochemistry_matching=False,\n )[0]\n is True\n )\n assert mol.is_isomorphic_with(ismol) is False\n\n # make sure the two isomers are different\n assert isomers[0].is_isomorphic_with(isomers[1]) is False\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def test_enumerating_stereocenters(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = Molecule.from_smiles(\n \"NC(Cl)(F)O\", toolkit_registry=toolkit, allow_undefined_stereo=True\n )\n\n isomers = mol.enumerate_stereoisomers(toolkit_registry=toolkit)\n\n assert len(isomers) == 2\n # make sure the mol is not in the isomers and that they only differ by stereo chem\n assert mol not in isomers\n for ismol in isomers:\n assert ismol.n_conformers != 0\n assert (\n Molecule.are_isomorphic(\n mol,\n ismol,\n return_atom_map=False,\n atom_stereochemistry_matching=False,\n )[0]\n is True\n )\n assert mol.is_isomorphic_with(ismol) is False\n\n # make sure the two isomers are different\n assert isomers[0].is_isomorphic_with(isomers[1]) is False\n\n else:\n pytest.skip(\"Required toolkit is unavailable\")", "def _remove_noise_in_o2m():\n if line.reconcile_partial_id:\n sign = 1\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency * sign <= 0:\n return True\n else:\n if line.amount_residual * sign <= 0:\n return True\n return False", "def test_run_molecule_not_selected(self, single_mol_system):\n sequence = \"ABCDE\"\n processor = dssp.AnnotateResidues(\n \"test\", sequence, molecule_selector=lambda mol: False\n )\n processor.run_molecule(single_mol_system.molecules[0])\n found = self.sequence_from_system(single_mol_system, \"test\")\n assert vermouth.utils.are_all_equal(found)\n assert found[0] is None", "def testInvisiblePositive(self):\n slhafile=\"./testFiles/slha/higgsinoStop.slha\"\n model = Model(BSMList,SMList)\n model.updateParticles(slhafile)\n topos = decomposer.decompose ( model, .1*fb, False, True, 5.*GeV )\n tested = False\n for topo in topos:\n if str(topo)!=\"[][]\":\n continue\n for element in topo.elementList:\n if str(element)!=\"[[],[]]\":\n continue\n tested = True\n trueMothers = [mother for mother in element.motherElements if not mother is element]\n if not trueMothers: continue\n self.assertEqual(str(trueMothers[0]),\"[[],[[nu,nu]]]\")\n self.assertEqual(len(trueMothers), 1)\n self.assertTrue(tested)", "def on_collision(self):", "def volume_mask(self):\n return np.vectorize(lambda name: name not in self.nvertices.keys())(self.name)", "def mod_mask(self):\n # Check the *_masq values\n self.__log.debug(\"Checking the *_masq arrays\")\n # Retrieve the kid boxes\n masq_names = np.unique([\"{}_masq\".format(item[1]) for item in self.list_detector])\n self.__check_attributes(masq_names, read_missing=False)\n # Check that they are all the same\n warnings.warn(\"Temporary fix to int8\")\n masqs = [getattr(self, masq).astype(np.int8) for masq in masq_names]\n\n if np.any(np.std(masqs, axis=0) != 0):\n self.__log.error(\"*_masq is varying -- Please check : {}\".format(pprint_list(masq_names, \"_masq\")))\n\n # AB private comm) main_flag should be the bitwise_or of all boxes\n # Well not exactly....\n # cast into 8 bit, is more than enough, only 3 bits used anyway...\n masq = np.bitwise_or.reduce(masqs, axis=0).astype(np.int8)\n\n # AB (#CONCERTO_DAQ January 11 13:02)\n # _flag_balayage_en_cours & _flag_blanking_synthe\n # Ainsi on aura la modulation en bit0 et 1 et le flag blanking en bit\n # AB (#CONCERTO_DAQ February 11 11:07)\n # bit 1 & 2 code the modulation as a signed integer -1 0 1 : 11 00 01 ie 3 0 1\n # bit 3 is a blanking bit, which does not exist for KISS, but should not be taken into account for CONCERTO\n\n # Thus as a temporary fix, let's clear the 3rd bit, actually a bad idea...\n # self.__log.warning(\"Temporary fix : clearing the 3rd bit of masq\")\n # masq = masq & ~(1 << 2)\n\n return masq", "def core_mask(self):\n mask = []\n for atom in self.atoms:\n if \"shell\" not in atom.atom_type.label:\n mask.append(True)\n else:\n mask.append(False)\n return mask", "def is_standard_residue(self):\n return True", "def is_standard_residue(self):\n return True", "def _remove_noise_in_o2m():\n if line.reconcile_partial_id:\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency <= 0:\n return True\n else:\n if line.amount_residual <= 0:\n return True\n return False", "def _remove_noise_in_o2m():\n if line.reconcile_partial_id:\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency <= 0:\n return True\n else:\n if line.amount_residual <= 0:\n return True\n return False", "def test_no_group_filter(self):\n self.sync.sync_groups()\n self.assertEqual(self.ldapobj.methods_called(), [])", "def _propose_atoms_in_order(self, atom_group):\n atom_torsions= []\n logp = []\n assert len(atom_group) == len(set(atom_group)), \"There are duplicate atom indices in the list of atom proposal indices\"\n while len(atom_group) > 0:\n #initialise an eligible_torsions_list\n eligible_torsions_list = list()\n\n for atom_index in atom_group:\n\n # Find the shortest path up to length four from the atom in question:\n shortest_paths = nx.algorithms.single_source_shortest_path(self._residue_graph, atom_index, cutoff=4)\n\n # Loop through the destination and path of each path and append to eligible_torsions_list\n # if destination has a position and path[1:3] is a subset of atoms with positions\n for destination, path in shortest_paths.items():\n\n # Check if the path is length 4 (a torsion) and that the destination has a position. Continue if not.\n if len(path) != 4 or destination not in self._atoms_with_positions_set:\n continue\n\n # If the last atom is in atoms with positions, check to see if the others are also.\n # If they are, append the torsion to the list of possible torsions to propose\n if set(path[1:3]).issubset(self._atoms_with_positions_set):\n eligible_torsions_list.append(path)\n\n assert len(eligible_torsions_list) != 0, \"There is a connectivity issue; there are no torsions from which to choose\"\n #now we have to randomly choose a single torsion\n ntorsions = len(eligible_torsions_list)\n random_torsion_index = np.random.choice(range(ntorsions))\n random_torsion = eligible_torsions_list[random_torsion_index]\n\n #append random torsion to the atom_torsions and remove source atom from the atom_group\n chosen_atom_index = random_torsion[0]\n first_old_atom_index = random_torsion[1]\n atom_torsions.append(random_torsion)\n atom_group.remove(chosen_atom_index)\n\n #add atom to atoms with positions and corresponding set\n self._atoms_with_positions_set.add(chosen_atom_index)\n\n #add a bond from the new to the previous torsion atom in the _reference_connectivity_graph\n self._reference_connectivity_graph.add_edge(chosen_atom_index, first_old_atom_index)\n\n #add the log probability of the choice to logp\n logp.append(np.log(1./ntorsions))\n\n # Ensure that logp is not ill-defined\n assert len(logp) == len(atom_torsions), \"There is a mismatch in the size of the atom torsion proposals and the associated logps\"\n\n return atom_torsions, logp", "def test_add_new_surface_objects(self):\n\n # create object with ReactionSystem behavior\n class rsys:\n pass\n\n class item:\n pass\n\n T = item()\n P = item()\n T.value_si = 1000.0\n P.value_si = 101000.0\n rsys.T = T\n rsys.P = P\n procnum = 2\n\n cerm = CoreEdgeReactionModel()\n\n spcA = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs]\n\n rxns = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n rxns += list(itertools.chain.from_iterable(react([((spcs[0], spcs[1]), ['H_Abstraction'])], procnum)))\n\n for rxn in rxns:\n cerm.make_new_reaction(rxn)\n\n cerm.core.species = [spcA] + spcs\n\n corerxns = []\n edgerxns = []\n edgespcs = set()\n for rxn in rxns:\n if set(rxn.reactants + rxn.products) <= set(cerm.core.species):\n corerxns.append(rxn)\n else:\n edgespcs |= set(cerm.core.species) - set(rxn.reactants + rxn.products)\n edgerxns.append(rxn)\n\n cerm.edge.species += list(edgespcs)\n\n cerm.core.reactions = corerxns\n cerm.edge.reactions = edgerxns\n\n cerm.surface.species = []\n cerm.surface.reactions = []\n\n new_surface_reactions = [cerm.edge.reactions[0]]\n new_surface_species = []\n obj = new_surface_reactions\n\n cerm.add_new_surface_objects(obj, new_surface_species, new_surface_reactions, rsys)\n\n empty = set()\n\n self.assertEqual(cerm.new_surface_spcs_add, empty)\n self.assertEqual(cerm.new_surface_spcs_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_add, set([cerm.edge.reactions[0]]))", "def removeDoubleUnbondedAtoms (self):\r\n atomsToRemove = [] # Stores index of atoms we will need to remove\r\n \r\n # Go through each mol\r\n for i in range(len(self.mol)):\r\n # Atom is disconnected if number of unbonded spikes is equal to the number of spikes in the atom\r\n numUnbondedSpikes = 0\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.mol[i].spikeArray[j].bonded == False:\r\n # Spike not bonded so increment counter\r\n numUnbondedSpikes += 1\r\n # If atom disconnected then need to check to see if dangling nodes or tails are bonded\r\n if numUnbondedSpikes == len(self.mol[i].spikeArray):\r\n print (\"Atom: \" + str(self.mol[i].rbnNumber) + \" is being removed \\n\")\r\n anyBondedDanglingNodes = False\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.isUnbondedAtomConnected(self.mol[i].spikeArray[j]) == True:\r\n anyBondedDanglingNodes = True\r\n # If atom has connected dangling nodes then need to convert atom to metaAtom, add metaAtom to metaMolecule and\r\n # remove atom from ring\r\n if anyBondedDanglingNodes == True:\r\n print (\"A new metaAtom is being created \\n\")\r\n newMetaAtom = self.convertUnbondedAtomToMetaAtom(self.mol[i])\r\n self.metaMolecule.addMetaAtom(newMetaAtom)\r\n atomsToRemove.append(i)\r\n \r\n # Now need to remove atoms\r\n print (\"Length of ring before removal: \" + str(len(self.mol)) + \"\\n\")\r\n for i in range(len(atomsToRemove)):\r\n self.mol.pop(atomsToRemove[i])\r\n print (\"Length of ring after removal: \" + str(len(self.mol)) + \"\\n\")\r\n # Finally need to update metaMolecule with new mol \r\n self.metaMolecule.updateListMols(self)", "def test_zero_modes(self):\n # make random numbers reproducible\n torch.manual_seed(0)\n \n n = 5\n # draw random numbers for positive definite, symmetric n x n matrix of width parameters\n Gi = 5.0 * 2.0*(torch.rand(n,n) - 0.5)\n # symmetrize\n Gi = 0.5*(Gi + Gi.T)\n # random numbers for position and momentum\n qi,pi = torch.rand(n,1), torch.rand(n,1)\n qj,pj = qi,pi #torch.rand(n,1), torch.rand(n,1)\n # check <qi,pi,Gi|qi,pi,Gi> = 1 \n cso = CoherentStatesOverlap(Gi,Gi)\n olap = cso(qi,pi, qj,pj)\n\n # turn Gi into a singular matrix by embedding it into a larger space\n Gi_ = torch.zeros((n+1,n+1))\n Gi_[:n,:n] = Gi\n qi_, pi_, qj_, pj_ = (torch.cat((x, torch.zeros(1,1)), 0) for x in (qi,pi,qj,pj))\n # The zero dimension should have no effect on the overlaps\n cso_ = CoherentStatesOverlap(Gi_,Gi_)\n olap_ = cso_(qi_,pi_, qj_,pj_)\n\n self.assertEqual(olap.squeeze().item(), olap_.squeeze().item())", "def neutralise(self):\n smi = self.smiles\n\n patts = [\n # Imidazoles\n ('[n+;H]','n'),\n # Amines\n ('[N+;!H0]','N'),\n # Carboxylic acids and alcohols\n ('[$([O-]);!$([O-][#7])]','O'),\n # Thiols\n ('[S-;X1]','S'),\n # Sulfonamides\n ('[$([N-;X2]S(=O)=O)]','N'),\n # Enamines\n ('[$([N-;X2][C,N]=C)]','N'),\n # Tetrazoles\n ('[n-]','[nH]'),\n # Sulfoxides\n ('[$([S-]=O)]','S'),\n # Amides\n ('[$([N-]C=O)]','N') ]\n\n reactions = [(Chem.MolFromSmarts(x),Chem.MolFromSmiles(y,False)) for x,y in patts]\n\n m = Chem.MolFromSmiles(smi)\n for i,(reactant, product) in enumerate(reactions):\n while m.HasSubstructMatch(reactant):\n rms = AllChem.ReplaceSubstructs(m, reactant, product)\n m = rms[0]\n\n # it doesn't matter is u choose to output a canonical smiles as the\n # sequence of atoms is changed calling `AllChem.ReplaceSubstructs\n self.smiles = Chem.MolToSmiles(m, isomericSmiles=False) #, canonical=False)", "def test_check_for_existing_reaction_eliminates_identical_reactions(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'])\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'])\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to identify existing reaction')", "def is_potential_group(self, player: int, row: int, col: int, row_diff: int, col_diff: int):\n opponent = 1 - player\n for _ in range(4):\n square = Square(row, col)\n if not self.is_valid(square):\n return False\n if self.state[opponent][row][col]:\n # If there is a token that belongs to the opponent in this group,\n # then this group is not a potential group that belongs to the given player.\n return False\n row, col = row + row_diff, col + col_diff\n return True", "def __bool__(self):\n return len(self.atoms) >= 1", "def connect_objects(self, context):\n # Poke hole in ring\n m = self.outside.modifiers.new(name=\"boolean_inner\", type=\"BOOLEAN\")\n m.operation = \"DIFFERENCE\"\n m.object = self.inside", "def cleanUpRigPose(self):\n\n # show the proxy geo\n cmds.select(self.name + \"_mover_grp\", hi=True)\n allNodes = cmds.ls(sl=True)\n for node in allNodes:\n if node.find(\"_proxy_geo\") != -1:\n if cmds.nodeType(node) == \"mesh\":\n parent = cmds.listRelatives(node, parent=True)[0]\n cmds.lockNode(parent, lock=False)\n cmds.setAttr(parent + \".v\", lock=False)\n cmds.setAttr(parent + \".v\", 1)\n cmds.lockNode(parent, lock=True)\n\n # unlock mover group for this module and make invisible\n cmds.lockNode(self.name + \"_mover_grp\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=False)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", 0)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=True)\n cmds.lockNode(self.name + \"_mover_grp\", lock=True)\n\n # get the joints created by this module and remove the constraints\n joints = self.returnCreatedJoints\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n for joint in joints:\n if cmds.objExists(joint + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)\n\n else:\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointBaseName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointBaseName = jointBaseName.partition(nameData[1])[0]\n\n if cmds.objExists(self.name + \"_\" + jointBaseName + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)", "def stereo_score(alignment):\n #dictionary with properties for each residue\n dic_prop = {'I': [1, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n 'L': [1, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n 'V': [1, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n 'C': [1, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n 'A': [1, 0, 1, 0, 1, 0, 0, 0, 0, 0],\n 'G': [1, 0, 1, 0, 1, 0, 0, 0, 0, 0],\n 'M': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'F': [1, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n 'Y': [1, 1, 0, 0, 0, 0, 1, 0, 0, 0],\n 'W': [1, 1, 0, 0, 0, 0, 1, 0, 0, 0],\n 'H': [1, 1, 0, 0, 0, 0, 1, 1, 0, 1],\n 'K': [1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n 'R': [0, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n 'E': [0, 1, 0, 0, 0, 0, 0, 0, 1, 1],\n 'Q': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'D': [0, 1, 1, 0, 0, 0, 0, 0, 1, 1],\n 'N': [0, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n 'S': [0, 1, 1, 0, 1, 0, 0, 0, 0, 0],\n 'T': [1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n 'P': [0, 0, 1, 1, 0, 0, 0, 0, 0, 0],\n 'B': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'Z': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'X': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n '-': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}\n score_list = []\n for i in range(0, alignment.get_alignment_length()):\n #extract the unique residues in the alignment\n column = ''.join(set(alignment[:, i]))\n stereo_list = []\n #loop through each residue\n for res in range(0, len(column)):\n #replace the residue with list of properties\n residue = column[res]\n #append the properties list to a\n stereo_prop = dic_prop.get(residue)\n stereo_list.append(stereo_prop)\n #number of common properties\n count_stereo = sum(len(set(i)) == 1 for i in zip(*stereo_list))\n #add the number of properties to a list\n score_list.append(count_stereo)\n score_list_final = [float(i*0.1) for i in score_list]\n return score_list_final", "def test_strip_atom_stereochemistry(self):\n mol = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n nitrogen_idx = [\n atom.molecule_atom_index for atom in mol.atoms if atom.symbol == \"N\"\n ][0]\n\n # TODO: This fails with RDKitToolkitWrapper because it perceives\n # the stereochemistry of this nitrogen as None\n assert mol.atoms[nitrogen_idx].stereochemistry == \"S\"\n mol.strip_atom_stereochemistry(smarts=\"[N+0X3:1](-[*])(-[*])(-[*])\")\n assert mol.atoms[nitrogen_idx].stereochemistry is None\n\n mol = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n assert mol.atoms[nitrogen_idx].stereochemistry == \"S\"\n mol.strip_atom_stereochemistry(smarts=\"[N+0X3:1](-[*])(-[*])(-[*])\")\n assert mol.atoms[nitrogen_idx].stereochemistry is None", "def test_enlarge_1_add_nonreactive_species(self):\n m0 = Molecule(smiles='[He]')\n spc0 = self.rmg.reaction_model.make_new_species(m0, label='He', reactive=False)[0]\n self.rmg.reaction_model.enlarge(spc0)\n\n self.assertEqual(len(self.rmg.reaction_model.core.species), 1)\n self.assertFalse(self.rmg.reaction_model.core.species[0].reactive)", "def test_check_isomorphism(self):\n mol1 = Molecule(smiles='[O-][N+]#N')\n mol2 = Molecule(smiles='[N-]=[N+]=O')\n self.assertTrue(converter.check_isomorphism(mol1, mol2))", "def __ne__(self, other):\r\n if isinstance(other, vec4):\r\n return self.x!=other.x or self.y!=other.y or self.z!=other.z\r\n else:\r\n return 1", "def badMuons(self, allmuons, allvertices):\n\n muons = list(m for m in allmuons) # make it a python list\n goodMuon = []\n\n if len(allvertices) < 1: raise RuntimeError\n PV = allvertices[0].position()\n \n out = [] \n for mu in muons:\n if (not(mu.isPFMuon()) or mu.innerTrack().isNull()):\n goodMuon.append(-1); # bad but we don't care\n continue;\n if (self.preselection(mu)):\n dxypv = abs(mu.innerTrack().dxy(PV));\n dzpv = abs(mu.innerTrack().dz(PV));\n if (self.tighterId(mu)):\n ipLoose = ((dxypv < 0.5 and dzpv < 2.0) or mu.innerTrack().hitPattern().pixelLayersWithMeasurement() >= 2);\n goodMuon.append(ipLoose or (not(self.selectClones_) and self.tightGlobal(mu)));\n elif (self.safeId(mu)):\n ipTight = (dxypv < 0.2 and dzpv < 0.5);\n goodMuon.append(ipTight);\n else:\n goodMuon.append(0);\n else:\n goodMuon.append(3); # maybe good, maybe bad, but we don't care\n\n n = len(muons)\n for i in range(n):\n if (muons[i].pt() < self.ptCut_ or goodMuon[i] != 0): continue;\n bad = True;\n if (self.selectClones_):\n bad = False; # unless proven otherwise\n n1 = muons[i].numberOfMatches(ROOT.reco.Muon.SegmentArbitration);\n for j in range(n):\n if (j == i or goodMuon[j] <= 0 or not(self.partnerId(muons[j]))): continue\n n2 = muons[j].numberOfMatches(ROOT.reco.Muon.SegmentArbitration);\n if (deltaR(muons[i],muons[j]) < 0.4 or (n1 > 0 and n2 > 0 and ROOT.muon.sharedSegments(muons[i],muons[j]) >= 0.5*min(n1,n2))):\n bad = True;\n break;\n if (bad):\n out.append(muons[i]);\n return out", "def _overlapping(self, atom1, atom2):\n\n if np.linalg.norm(atom1.pos-atom2.pos) < (atom1.rad+atom2.rad):\n return True\n else:\n return False", "def group_is_surrounded(group, board):\n if group_adjacents(group, board, filter_by=\"None\"):\n return False\n else:\n return True", "def exitPreMolecule(self):\n if self.molecules == 0 and max(self.departed.values()) > 0:\n print(\"these atoms exited before first molecule created: \", end=\"\")\n print([atom.name for atom,count in self.departed.items() if count>0])\n return False\n return True", "def mask(self):", "def get_inv_overlap_mask(self):\n self.inv_overlap_mask = self.structure_mask != self.overlap_mask", "def test_ungrouping(self):\n n = self.create(NodeItem, UML.Node)\n c = self.create(ComponentItem, UML.Component)\n\n self.group(n, c)\n self.ungroup(n, c)\n\n assert 0 == len(n.subject.ownedAttribute)\n assert 0 == len(c.subject.ownedAttribute)\n assert 0 == len(self.kindof(UML.Property))\n assert 0 == len(self.kindof(UML.Connector))\n assert 0 == len(self.kindof(UML.ConnectorEnd))", "def test_append_unreactive_structure(self):\n\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().from_smiles('CCO'), # a control species\n Species().from_smiles('[N]=O'),\n Species().from_adjacency_list(\"\"\"1 O u1 p2 c0 {2,S}\n 2 N u0 p2 c0 {1,S}\"\"\"), # a non-representative structure of '[N]=O'\n ]\n\n for spc in spcs:\n cerm.make_new_species(spc)\n\n self.assertEquals(len(cerm.species_dict), 2)\n self.assertEquals(len(cerm.index_species_dict), 2)\n self.assertEquals(len(cerm.index_species_dict[1].molecule), 1)\n self.assertTrue(cerm.index_species_dict[1].molecule[0].reactive)\n self.assertEquals(len(cerm.index_species_dict[2].molecule), 1)\n self.assertTrue(cerm.index_species_dict[2].molecule[0].reactive)", "def group_group_collide(sprite_group, o_sprite_group):\n sprites = set(sprite_group)\n for sprite in sprites:\n if group_collide(o_sprite_group, sprite):\n sprite_group.remove(sprite)\n sprite.delete()\n return True\n return False", "def is_atom_convex(self):\n return False", "def is_atom_convex(self):\n return False", "def setupCollisions(self) :", "def cleaveSurfAtom(entry,max_bonds=1,supercell=2,group_structs=True):\n \n \n struct = copy.deepcopy(entry[0])\n results = getStructureType(entry,supercell=supercell,returnSS=True)\n \n # If the crystal is 3D\n if results[0]=='conventional':\n struct = copy.deepcopy(entry[0])\n og_binary_matrix = getDistMat(struct,entry[1]-1)\n og_num_bonds = sum(sum(np.array(og_binary_matrix)))\n struct.make_supercell(supercell)\n binary_matrix= getDistMat(struct,entry[1]-1)\n bonds = []\n \n # Get list of bonded atoms\n for i in range(len(og_binary_matrix)):\n for pair in [(i,j) for j in range(i+1,len(og_binary_matrix)) \n if og_binary_matrix[i][j]==1]:\n bonds.append(pair)\n allCombos = []\n combNum = 0\n \n # Get list of all combinations of bonds\n for i in range(max_bonds+1):\n for com in list(itertools.combinations(bonds,i)):\n allCombos.append(com)\n combNum+=1\n\n combos = allCombos\n jjj=0\n all_structs = []\n \n # For each bond combination\n for combo in combos:\n broken=0\n jjj+=1\n modified_matrix = np.array(binary_matrix)\n for pair in combo:\n i,j = pair\n i=i*supercell**3\n j=j*supercell**3\n # Break bonds in the loop\n for shift in range(supercell**3):\n for shift2 in range(supercell**3):\n modified_matrix[i+shift][j+shift2]=0\n modified_matrix[j+shift][i+shift2]=0\n\n new_num_bonds=sum(sum(modified_matrix))\n broken=int(og_num_bonds-new_num_bonds) \n seed_index=0\n old_cluster_size=len(buildNetwork(binary_matrix,seed_index))/supercell**3\n cluster = buildNetwork(modified_matrix,seed_index)\n hetero=False\n # If the new set of atoms is not empty\n if cluster!=set():\n scale = len(cluster)/old_cluster_size\n compo = Composition.from_dict(Counter([struct[l].specie.name \n for l in list(cluster)]))\n if compo.reduced_formula != struct.composition.reduced_formula:\n # i.e. the cluster does not have the same composition\n # as the overall crystal; therefore there are other\n # clusters of varying composition.\n hetero = True\n motiif = getDim(scale,supercell)\n\n # If formula of new network matches the original cell\n if not hetero:\n if motiif=='layered':\n cluster_sites = [struct.sites[n] for n in cluster]\n all_structs.append(struct.from_sites(cluster_sites))\n\n if group_structs:\n matched = [x[0] for x in \n StructureMatcher(stol=1E-6,primitive_cell=False,\n scale=False).group_structures(all_structs)]\n else:\n matched=all_structs\n return(matched) \n\n else:\n print('Material is does not have a 3D motiif')\n print('Try increasing radii tolerance if appropriate')\n return([])", "def blendShapeEnvelopeOff():\n obj = cmds.ls(selection = True)\n history = cmds.listHistory(obj)\n bsHistory = cmds.ls(history, type = 'blendShape')\n for bs in bsHistory:\n cmds.setAttr(bs+'.'+'envelope',0.0) #note not changing blend target weights", "def atomisticSphere (flag, filin, filout, max_distance = 15, analysis = 1, atom_central = \"mean_point\", debug = 1):\n \n list_atom_pocket = parsePDB.loadCoordSectionPDB(filin)\n dico_stock_count = tool.generateStructCompositionAtomistic (max_distance, 3)\n \n if atom_central == \"mean_point\" : \n central_point = generateMeansPointPocket (list_atom_pocket)\n # else append barycenter pocket calculated by RADI\n \n for atom in list_atom_pocket : \n distance = parsePDB.distanceTwoatoms(central_point, atom)\n # print distance\n element = atom[\"element\"]\n name_atom = atom[\"name\"]\n residue = tool.transformAA(atom[\"resName\"])\n \n for distance_key in dico_stock_count.keys() : \n if distance <= distance_key or distance > max_distance : \n dico_stock_count [distance_key] [\"atom\"] = dico_stock_count [distance_key] [\"atom\"] + 1\n if element == \"C\" : \n dico_stock_count [distance_key] [\"carbon\"] = dico_stock_count [distance_key] [\"carbon\"] + 1\n elif element == \"N\" : \n dico_stock_count [distance_key] [\"nitrogen\"] = dico_stock_count [distance_key] [\"nitrogen\"] + 1\n elif element == \"S\" : \n dico_stock_count [distance_key] [\"sulfur\"] = dico_stock_count [distance_key] [\"sulfur\"] + 1\n elif element == \"O\" : \n dico_stock_count [distance_key] [\"oxygen\"] = dico_stock_count [distance_key] [\"oxygen\"] + 1\n elif element == \"H\" : \n dico_stock_count [distance_key] [\"hydrogen\"] = dico_stock_count [distance_key] [\"hydrogen\"] + 1\n \n if residue in dico_Hacceptor.keys () : \n if name_atom in dico_Hacceptor[residue] : \n dico_stock_count [distance_key] [\"hbond_acceptor\"] = dico_stock_count [distance_key] [\"hbond_acceptor\"] + 1\n \n if residue in dico_atom_Car : \n if name_atom in dico_atom_Car[residue] : \n dico_stock_count [distance_key] [\"aromatic\"] = dico_stock_count [distance_key] [\"aromatic\"] + 1\n \n if residue in dico_atom_hydrophobic : \n if name_atom in dico_atom_hydrophobic[residue] : \n dico_stock_count [distance_key] [\"hydrophobic\"] = dico_stock_count [distance_key] [\"hydrophobic\"] + 1\n \n if residue in dico_atom_Carg : \n if name_atom in dico_atom_Carg[residue] : \n dico_stock_count [distance_key] [\"alcool\"] = dico_stock_count [distance_key] [\"alcool\"] + 1\n \n \n if residue in dico_Hdonor.keys () : \n if name_atom in dico_Hdonor[residue] : \n dico_stock_count [distance_key] [\"hbond_donor\"] = dico_stock_count [distance_key] [\"hbond_donor\"] + 1\n \n if name_atom == \"CA\" or name_atom == \"O\" or name_atom == \"C\" or name_atom == \"N\" or name_atom == \"H\" or name_atom == \"HA\" :\n dico_stock_count [distance_key] [\"main_chain\"] = dico_stock_count [distance_key] [\"main_chain\"] + 1\n else : \n dico_stock_count [distance_key] [\"side_chain\"] = dico_stock_count [distance_key] [\"side_chain\"] + 1\n \n for distance_key in dico_stock_count.keys () : \n nb_atom = float(dico_stock_count [distance_key] [\"atom\"])\n if nb_atom == 0 : \n filout.write (flag + \"_atom_\" + str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_side_chain_\"+ str(distance_key) + \"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_main_chain_\" + str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_sulfur_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_carbone_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_nitrogen_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_oxygen_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hydrogen_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hbond_acceptor_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hbond_donor_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_alcool_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hydrophobic_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_aromatic_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n \n else : \n filout.write (flag + \"_atom_\" + str(distance_key) +\"\\t\" + str(nb_atom) + \"\\n\")\n filout.write (flag + \"_side_chain_\"+ str(distance_key) + \"\\t\" + str (dico_stock_count [distance_key] [\"side_chain\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_main_chain_\" + str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"main_chain\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_sulfur_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"sulfur\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_carbone_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"carbon\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_nitrogen_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"nitrogen\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_oxygen_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"oxygen\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hydrogen_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hydrogen\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hbond_acceptor_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hbond_acceptor\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hbond_donor_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hbond_donor\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_alcool_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"alcool\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hydrophobic_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hydrophobic\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_aromatic_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"aromatic\"] / nb_atom) + \"\\n\")", "def remove_clashes(self):\n dihe_parameters = self.myGlycosylator.builder.Parameters.parameters['DIHEDRALS']\n vwd_parameters = self.myGlycosylator.builder.Parameters.parameters['NONBONDED']\n \n static_glycans = None\n for k in self.original_glycanMolecules:\n if k not in self.linked_glycanMolecules:\n if static_glycans is not None:\n static_glycans += self.original_glycanMolecules[k].atom_group\n else:\n static_glycans = self.original_glycanMolecules[k].atom_group.copy()\n \n environment = self.myGlycosylator.protein.copy() \n environment += static_glycans\n \n #Build topology\n self.myGlycosylator.build_glycan_topology(glycanMolecules = self.linked_glycanMolecules, build_all = False)\n sampler = glc.Sampler(self.linked_glycanMolecules.values(), environment, dihe_parameters, vwd_parameters)\n sampler.remove_clashes_GA()", "def is_clayey(self):\n group_index = self._data[SoilProperty.GI]\n return group_index[0] not in ['S','G']", "def test_null_conv_mol(self):\n num_feat = 4\n null_mol = ConvMol.get_null_mol(num_feat)\n\n deg_adj_lists = null_mol.get_deg_adjacency_lists()\n\n # Check that atoms are only connected to themselves.\n assert np.array_equal(deg_adj_lists[10],\n [[10, 10, 10, 10, 10, 10, 10, 10, 10, 10]])\n assert np.array_equal(deg_adj_lists[1], [[1]])\n # Check that there's one atom of each degree.\n assert np.array_equal(null_mol.get_deg_slice(),\n [[0, 1], [1, 1], [2, 1], [3, 1], [4, 1], [5, 1],\n [6, 1], [7, 1], [8, 1], [9, 1], [10, 1]])", "def neutralise_raw(self):\n # kekulization has to be done, otherwise u will encounter\n # issues when assigning bond types later\n Chem.Kekulize(self.m)\n\n # get pairs of charged atoms\n self.get_charged_pairs()\n\n # eliminate the charges by rebuilding the molecule\n m = Chem.Mol()\n mc = Chem.EditableMol(m)\n for i, az in enumerate(self.zs):\n ai = Chem.Atom( az )\n ci = self.charges[i]\n if ci != 0:\n if ci == 1:\n filt = (self.cpairs[:,0] == i)\n if np.any(filt):\n ai.SetFormalCharge( 1 )\n elif ci == -1:\n filt = (self.cpairs[:,1] == i)\n if np.any(filt): ai.SetFormalCharge( -1 )\n else:\n print((' -- i, charges[i] = ', i, self.charges[i]))\n print(' #ERROR: abs(charge) > 1??')\n raise\n mc.AddAtom( ai )\n\n ijs = np.array( np.where( np.triu(self.bom) > 0 ) ).astype(np.int)\n nb = ijs.shape[1]\n for i in range(nb):\n i, j = ijs[:,i]\n mc.AddBond( i, j, bo2bt[ '%.1f'%self.bom[i,j] ] )\n\n m = mc.GetMol()\n m2 = assign_coords(m, self.coords)\n self.m = m2", "def test_ungrouping(self):\n s = self.create(ComponentItem, UML.Component)\n uc1 = self.create(UseCaseItem, UML.UseCase)\n uc2 = self.create(UseCaseItem, UML.UseCase)\n\n self.group(s, uc1)\n self.group(s, uc2)\n\n self.ungroup(s, uc1)\n assert 0 == len(uc1.subject.subject)\n # Classifier.useCase is not navigable to UseCase\n # self.assertEqual(1, len(s.subject.useCase))\n\n self.ungroup(s, uc2)\n assert 0 == len(uc2.subject.subject)\n # Classifier.useCase is not navigable to UseCase\n # self.assertEqual(0, len(s.subject.useCase))", "def isRenderable(data):\n return data.find(\"<molecule\") != -1 and data.find(\"<atom\") != -1", "def collision(self):\n raise NotImplementedError", "def braid_group_orbits(self):\n return [g.vertices() for g in self.braid_group_action()]", "def group_collide(sprite_group, other_object):\n sprites = set(sprite_group)\n for sprite in sprites:\n if sprite.collide(other_object):\n sprite_group.remove(sprite)\n sprite.delete()\n return True\n return False", "def starts_with_unity(self):\n raise NotImplementedError", "def build_reactive_complex(self, settings_manager: SettingsManager):\n import scine_database as db\n import scine_utilities as utils\n\n start_structure_ids = self._calculation.get_structures()\n start_structures = [db.Structure(sid, self._structures) for sid in start_structure_ids]\n self.save_initial_graphs_and_charges(settings_manager, start_structures)\n if len(start_structures) == 1:\n # For an intramolecular structure it is sufficient to provide one\n # structure that is both, start structure and reactive complex\n structure = start_structures[0]\n atoms = structure.get_atoms()\n self.random_displace_atoms(atoms, self.settings[self.rc_key][\"displacement\"]) # breaks symmetry\n return atoms\n\n if len(start_structures) == 2:\n # Intermolecular reactions reactions require in situ generation of the reactive complex\n s0 = start_structures[0]\n s1 = start_structures[1]\n\n # Get coordinates\n atoms1 = s0.get_atoms()\n atoms2 = s1.get_atoms()\n elements1 = atoms1.elements\n elements2 = atoms2.elements\n coordinates1 = atoms1.positions\n coordinates2 = atoms2.positions\n # Calculate reactive center mean position\n if self.exploration_key + \"_lhs_list\" in self.settings[self.exploration_key]:\n sites1 = self.settings[self.exploration_key][self.exploration_key + \"_lhs_list\"]\n sites2 = self.settings[self.exploration_key][self.exploration_key + \"_rhs_list\"]\n self.settings[self.exploration_key][self.exploration_key + \"_rhs_list\"] = list(\n idx + len(elements1) for idx in sites2\n )\n elif \"nt_associations\" in self.settings[self.exploration_key]:\n sites1 = []\n sites2 = []\n nAtoms1 = len(atoms1.elements)\n for i in range(0, len(self.settings[self.exploration_key][\"nt_associations\"]), 2):\n at1 = self.settings[self.exploration_key][\"nt_associations\"][i]\n at2 = self.settings[self.exploration_key][\"nt_associations\"][i + 1]\n if at1 >= nAtoms1 > at2:\n sites1.append(at2)\n sites2.append(at1 - nAtoms1)\n if at2 >= nAtoms1 > at1:\n sites1.append(at1)\n sites2.append(at2 - nAtoms1)\n else:\n self.raise_named_exception(\n \"Reactive complex can not be build: missing reactive atoms list(s).\"\n )\n reactive_center1 = np.mean(coordinates1[sites1], axis=0)\n reactive_center2 = np.mean(coordinates2[sites2], axis=0)\n # Place reactive center mean position into origin\n coord1 = coordinates1 - reactive_center1\n coord2 = coordinates2 - reactive_center2\n positions = self._orient_coordinates(coord1, coord2)\n atoms = utils.AtomCollection(elements1 + elements2, positions)\n self.random_displace_atoms(atoms, self.settings[self.rc_key][\"displacement\"]) # breaks symmetry\n return atoms\n\n # should not be reachable\n self.raise_named_exception(\n \"Reactive complexes built from more than 2 structures are not supported.\"\n )" ]
[ "0.72588325", "0.678643", "0.66508996", "0.6132741", "0.60709405", "0.5719462", "0.5535332", "0.552644", "0.5486801", "0.54643136", "0.53574395", "0.53247046", "0.52777433", "0.52353007", "0.5227962", "0.52224547", "0.5211532", "0.5198085", "0.51577014", "0.51439506", "0.50774413", "0.50373614", "0.50373614", "0.5024517", "0.5022664", "0.5020434", "0.5003295", "0.4993154", "0.4947047", "0.49167743", "0.49167743", "0.48739442", "0.48738703", "0.4863051", "0.4855876", "0.48545438", "0.483368", "0.48289746", "0.4827546", "0.48178196", "0.48140198", "0.48096064", "0.48095152", "0.48040548", "0.47965556", "0.47964185", "0.47964185", "0.4791835", "0.47761932", "0.47616786", "0.47498173", "0.47304368", "0.4725699", "0.4722607", "0.47188336", "0.47188336", "0.47141513", "0.47141513", "0.46978334", "0.46972498", "0.4696359", "0.4692995", "0.468086", "0.4678388", "0.46780002", "0.46757224", "0.4666896", "0.46656442", "0.46618578", "0.46522194", "0.46472955", "0.46421182", "0.46391347", "0.4622307", "0.46146637", "0.46058083", "0.45999828", "0.45981944", "0.45862487", "0.45833024", "0.4582048", "0.4581757", "0.4579302", "0.45702967", "0.45702967", "0.4569143", "0.45691094", "0.45644632", "0.45436266", "0.4543244", "0.45323026", "0.45269942", "0.45227686", "0.45194235", "0.4517495", "0.45128575", "0.45094612", "0.4507281", "0.4502749", "0.45018783" ]
0.6698314
2
StereoGroup atoms are split into two products by the reaction > Should the group be invalidated or trimmed?
def test_reaction_splits_stereogroup(self): products = _reactAndSummarize('[C:1]OO[C:2]>>[C:2]O.O[C:1]', 'F[C@H](Cl)OO[C@@H](Cl)Br |o1:1,5|') # Two product sets, each with two mols: self.assertEqual(products.count('|o1:1|'), 4)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reaction_defines_stereo(self):\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'FC(Cl)Br')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n\n # Remove group with defined stereo\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |&1:3|')\n\n # Remove atoms with defined stereo from group\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')", "def test_reaction_destroys_stereo(self):\n reaction = '[C@:1]>>[C:1]'\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'FC(Cl)Br')\n self.assertEqual(products, 'FC(Cl)Br')\n\n reaction = '[C@:1]F>>[C:1]F'\n # Reaction destroys stereo (but preserves unaffected group\n products = _reactAndSummarize(reaction,\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')\n # Reaction destroys stereo (but preserves the rest of the group\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)[C@@H](Cl)Br |&1:1,3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')", "def test_reaction_copies_stereogroup(self):\n # Stereogroup atoms are in the reaction with multiple copies in the product\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )\n\n # Stereogroup atoms are not in the reaction, but have multiple copies in the\n # product.\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )", "def test_stereogroup_is_spectator_to_reaction(self):\n # 5a. Reaction preserves unrelated stereo\n products = _reactAndSummarize('[C@:1]F>>[C@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5b. Reaction ignores unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5c. Reaction inverts unrelated stereo'\n products = _reactAndSummarize('[C@:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5d. Reaction destroys unrelated stereo' 1:3|\n products = _reactAndSummarize('[C@:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |o1:3|')\n # 5e. Reaction assigns unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')", "def testMoreStereo(self):\r\n smi_and_cansmi = [\r\n ('Cl[C@](C)(I)Br', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](C)(I)Br', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](C)(Br)Cl', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](C)(Br)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](Cl)(Br)C', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](Cl)(Br)C', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](Br)(I)C', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](Br)(I)C', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](C)(Cl)I', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](C)(Cl)I', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](C)(Cl)Br', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](C)(Cl)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](Cl)(I)Br', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](Cl)(I)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](Br)(Cl)I', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](Br)(Cl)I', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](Br)(Cl)C', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](Br)(Cl)C', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](C)(Br)I', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](C)(Br)I', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@](I)(Br)C', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](I)(Br)C', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](I)(Br)Cl', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](I)(Br)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](Cl)(C)Br', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](Cl)(C)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](Cl)(Br)I', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](Cl)(Br)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](C)(I)Cl', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](C)(I)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](Br)(C)Cl', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](Br)(C)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](Br)(I)Cl', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](Br)(I)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](I)(Cl)Br', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](I)(Cl)Br', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](I)(C)Br', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](I)(C)Br', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](I)(C)Cl', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](I)(C)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@](Br)(C)I', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](Br)(C)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](Cl)(C)I', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](Cl)(C)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](I)(Cl)C', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](I)(Cl)C', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](Cl)(I)C', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](Cl)(I)C', 'C[C@@](Cl)(Br)I')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)", "def test_reaction_ignores_stereo(self):\n reaction = '[C:1]>>[C:1]'\n reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br']\n for reactant in reactants:\n products = _reactAndSummarize(reaction, reactant)\n self.assertEqual(products, reactant)", "def testStereo(self):\r\n smi_and_cansmi = [\r\n ('OC(=O)[C@@H](CCC(N)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](CCC(N)=O)N', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](N)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@@H](N)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](N)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](N)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](C(O)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](C(O)=O)N', 'NC(=O)CC[C@H](N)C(=O)O')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)", "def test_reaction_preserves_stereo(self):\n reaction = '[C@:1]>>[C@:1]'\n reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br']\n for reactant in reactants:\n products = _reactAndSummarize(reaction, reactant)\n self.assertEqual(products, reactant)", "def space_group(self) -> PermutationGroup:\n return self._full_translation_group @ self.point_group", "def Group(self) -> _n_5_t_0:", "def Group(self) -> _n_5_t_0:", "def cleaveSurfAtom(entry,max_bonds=1,supercell=2,group_structs=True):\n \n \n struct = copy.deepcopy(entry[0])\n results = getStructureType(entry,supercell=supercell,returnSS=True)\n \n # If the crystal is 3D\n if results[0]=='conventional':\n struct = copy.deepcopy(entry[0])\n og_binary_matrix = getDistMat(struct,entry[1]-1)\n og_num_bonds = sum(sum(np.array(og_binary_matrix)))\n struct.make_supercell(supercell)\n binary_matrix= getDistMat(struct,entry[1]-1)\n bonds = []\n \n # Get list of bonded atoms\n for i in range(len(og_binary_matrix)):\n for pair in [(i,j) for j in range(i+1,len(og_binary_matrix)) \n if og_binary_matrix[i][j]==1]:\n bonds.append(pair)\n allCombos = []\n combNum = 0\n \n # Get list of all combinations of bonds\n for i in range(max_bonds+1):\n for com in list(itertools.combinations(bonds,i)):\n allCombos.append(com)\n combNum+=1\n\n combos = allCombos\n jjj=0\n all_structs = []\n \n # For each bond combination\n for combo in combos:\n broken=0\n jjj+=1\n modified_matrix = np.array(binary_matrix)\n for pair in combo:\n i,j = pair\n i=i*supercell**3\n j=j*supercell**3\n # Break bonds in the loop\n for shift in range(supercell**3):\n for shift2 in range(supercell**3):\n modified_matrix[i+shift][j+shift2]=0\n modified_matrix[j+shift][i+shift2]=0\n\n new_num_bonds=sum(sum(modified_matrix))\n broken=int(og_num_bonds-new_num_bonds) \n seed_index=0\n old_cluster_size=len(buildNetwork(binary_matrix,seed_index))/supercell**3\n cluster = buildNetwork(modified_matrix,seed_index)\n hetero=False\n # If the new set of atoms is not empty\n if cluster!=set():\n scale = len(cluster)/old_cluster_size\n compo = Composition.from_dict(Counter([struct[l].specie.name \n for l in list(cluster)]))\n if compo.reduced_formula != struct.composition.reduced_formula:\n # i.e. the cluster does not have the same composition\n # as the overall crystal; therefore there are other\n # clusters of varying composition.\n hetero = True\n motiif = getDim(scale,supercell)\n\n # If formula of new network matches the original cell\n if not hetero:\n if motiif=='layered':\n cluster_sites = [struct.sites[n] for n in cluster]\n all_structs.append(struct.from_sites(cluster_sites))\n\n if group_structs:\n matched = [x[0] for x in \n StructureMatcher(stol=1E-6,primitive_cell=False,\n scale=False).group_structures(all_structs)]\n else:\n matched=all_structs\n return(matched) \n\n else:\n print('Material is does not have a 3D motiif')\n print('Try increasing radii tolerance if appropriate')\n return([])", "def removeDoubleUnbondedAtoms (self):\r\n atomsToRemove = [] # Stores index of atoms we will need to remove\r\n \r\n # Go through each mol\r\n for i in range(len(self.mol)):\r\n # Atom is disconnected if number of unbonded spikes is equal to the number of spikes in the atom\r\n numUnbondedSpikes = 0\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.mol[i].spikeArray[j].bonded == False:\r\n # Spike not bonded so increment counter\r\n numUnbondedSpikes += 1\r\n # If atom disconnected then need to check to see if dangling nodes or tails are bonded\r\n if numUnbondedSpikes == len(self.mol[i].spikeArray):\r\n print (\"Atom: \" + str(self.mol[i].rbnNumber) + \" is being removed \\n\")\r\n anyBondedDanglingNodes = False\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.isUnbondedAtomConnected(self.mol[i].spikeArray[j]) == True:\r\n anyBondedDanglingNodes = True\r\n # If atom has connected dangling nodes then need to convert atom to metaAtom, add metaAtom to metaMolecule and\r\n # remove atom from ring\r\n if anyBondedDanglingNodes == True:\r\n print (\"A new metaAtom is being created \\n\")\r\n newMetaAtom = self.convertUnbondedAtomToMetaAtom(self.mol[i])\r\n self.metaMolecule.addMetaAtom(newMetaAtom)\r\n atomsToRemove.append(i)\r\n \r\n # Now need to remove atoms\r\n print (\"Length of ring before removal: \" + str(len(self.mol)) + \"\\n\")\r\n for i in range(len(atomsToRemove)):\r\n self.mol.pop(atomsToRemove[i])\r\n print (\"Length of ring after removal: \" + str(len(self.mol)) + \"\\n\")\r\n # Finally need to update metaMolecule with new mol \r\n self.metaMolecule.updateListMols(self)", "def proc_sw_only_morphs(forward_pairs, morphs, backward_pairs):\n sandwich_pairs = []\n if not backward_pairs:\n forward_pairs[-1].morphs.extend(morphs)\n elif len(morphs) == 1:\n morph = morphs[0]\n morph_str = str(morph)\n if morph_str in ['이/VCP', '하/VX'] and backward_pairs[0].morphs[0].tag.startswith('E'):\n # '이' 긍정지정사나 '하' 보조용언 뒤에 어미가 나올 경우\n backward_pairs[0].morphs.insert(0, morphs[0])\n del morphs[:]\n elif morph_str == '에/JKB' and backward_pairs[0].morphs[0].tag == 'JX':\n # '에' 부사격조사 뒤에 보조사가 나올 경우\n backward_pairs[0].morphs.insert(0, morphs[0])\n del morphs[:]\n elif morph_str == 'ᆯ/ETM' and forward_pairs[-1].morphs[-1].tag.startswith('V'):\n # 'ㄹ' 관형형전성어미 앞에 용언이 나올 경우\n forward_pairs[-1].morphs.append(morphs[0])\n del morphs[:]\n elif morph.tag in ['EC', 'EF'] and forward_pairs[-1].morphs[-1].tag.startswith('V'):\n # 연결어미나 종결어미 앞에 용언이 나올 경우\n forward_pairs[-1].morphs.append(morphs[0])\n del morphs[:]\n elif morph.tag.startswith('XS'):\n # append suffixes to the end of forward pair list\n forward_pairs[-1].morphs.append(morphs[0])\n del morphs[:]\n else:\n raise AlignError()\n else:\n morphs_str = ' + '.join([str(morph) for morph in morphs])\n if morphs_str == '(/SS + 대북/NNG + (/SS + 대북/NNG + )/SS + )/SS' and forward_pairs[-1].word_str == u'대북':\n del morphs[:]\n elif morphs_str == '(/SS + 동경/NNP + )/SS' and forward_pairs[-1].word_str == u'도쿄':\n del morphs[:]\n else:\n raise AlignError()\n return sandwich_pairs", "def test_reaction_inverts_stereo(self):\n reaction = '[C@:1]>>[C@@:1]'\n\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br |o1:1|')\n products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@H](Cl)Br |&1:1|')\n products = _reactAndSummarize(reaction, 'FC(Cl)Br')\n self.assertEqual(products, 'FC(Cl)Br')", "def test_ungrouping(self):\n n = self.create(NodeItem, UML.Node)\n c = self.create(ComponentItem, UML.Component)\n\n self.group(n, c)\n self.ungroup(n, c)\n\n assert 0 == len(n.subject.ownedAttribute)\n assert 0 == len(c.subject.ownedAttribute)\n assert 0 == len(self.kindof(UML.Property))\n assert 0 == len(self.kindof(UML.Connector))\n assert 0 == len(self.kindof(UML.ConnectorEnd))", "def test_parameterize_mol_missing_stereo_rdkit(self, force_field):\n toolkit_registry = ToolkitRegistry(\n toolkit_precedence=[RDKitToolkitWrapper, AmberToolsToolkitWrapper]\n )\n\n molecule = Molecule.from_smiles(\"CC1CCC(=O)O1\", allow_undefined_stereo=True)\n topology = Topology.from_molecules([molecule])\n\n force_field.create_openmm_system(\n topology,\n toolkit_registry=toolkit_registry,\n )", "def restricted_automorphism_group(self):\n if '_restricted_automorphism_group' in self.__dict__:\n return self._restricted_automorphism_group\n\n from sage.groups.perm_gps.permgroup import PermutationGroup\n\n if self.field() is QQ:\n def rational_approximation(c):\n return c\n\n else: # self.field() is RDF\n c_list = []\n def rational_approximation(c):\n # Implementation detail: Return unique integer if two\n # c-values are the same up to machine precision. But\n # you can think of it as a uniquely-chosen rational\n # approximation.\n for i,x in enumerate(c_list):\n if self._is_zero(x-c):\n return i\n c_list.append(c)\n return len(c_list)-1\n \n # The algorithm identifies the restricted automorphism group\n # with the automorphism group of a edge-colored graph. The\n # nodes of the graph are the V-representation objects. If all\n # V-representation objects are vertices, the edges are\n # labelled by numbers (to be computed below). Roughly\n # speaking, the edge label is the inner product of the\n # coordinate vectors with some orthogonalization thrown in\n # [BSS].\n def edge_label_compact(i,j,c_ij):\n return c_ij\n\n # In the non-compact case we also label the edges by the type\n # of the V-representation object. This ensures that vertices,\n # rays, and lines are only permuted amongst themselves.\n def edge_label_noncompact(i,j,c_ij):\n return (self.Vrepresentation(i).type(), c_ij, self.Vrepresentation(j).type())\n\n if self.is_compact():\n edge_label = edge_label_compact\n else:\n edge_label = edge_label_noncompact\n\n # good coordinates for the V-representation objects\n v_list = []\n for v in self.Vrepresentation():\n v_coords = list(self._affine_coordinates(v))\n if v.is_vertex():\n v_coords = [1]+v_coords\n else:\n v_coords = [0]+v_coords\n v_list.append(vector(v_coords))\n\n # Finally, construct the graph\n Qinv = sum( v.column() * v.row() for v in v_list ).inverse()\n\n # Was set to sparse = False, but there is a problem with Graph\n # backends. It should probably be set back to sparse = False as soon as\n # the backends are fixed.\n G = Graph(sparse=True)\n for i in range(0,len(v_list)):\n for j in range(i+1,len(v_list)):\n v_i = v_list[i]\n v_j = v_list[j]\n c_ij = rational_approximation( v_i * Qinv * v_j )\n G.add_edge(i,j, edge_label(i,j,c_ij))\n\n group, node_dict = G.automorphism_group(edge_labels=True, translation=True)\n\n # Relabel the permutation group\n perm_to_vertex = dict( (i,v+1) for v,i in node_dict.items() )\n group = PermutationGroup([ [ tuple([ perm_to_vertex[i] for i in cycle ])\n for cycle in generator.cycle_tuples() ]\n for generator in group.gens() ])\n\n self._restricted_automorphism_group = group\n return group", "def sequence_tunable(\n mol,\n OP_REMOVE_ISOTOPE=True, OP_NEUTRALISE_CHARGE=True,\n OP_REMOVE_STEREO=False, OP_COMMUTE_INCHI=False,\n OP_KEEP_BIGGEST=True, OP_ADD_HYDROGEN=True,\n OP_KEKULIZE=True, OP_NEUTRALISE_CHARGE_LATE=True\n ):\n F = Filters()\n # Always perform the basics..\n Cleanup(mol)\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n AssignStereochemistry(mol, cleanIt=True, force=True, flagPossibleStereoCenters=True) # Fix bug TD201904.01\n # \n if OP_REMOVE_ISOTOPE:\n mol = F.remove_isotope(mol)\n if OP_NEUTRALISE_CHARGE:\n mol = F.neutralise_charge(mol)\n if any([OP_REMOVE_ISOTOPE, OP_REMOVE_ISOTOPE]):\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n # \n if OP_REMOVE_STEREO:\n mol = F.remove_stereo(mol)\n OP_COMMUTE_INCHI = True\n if OP_COMMUTE_INCHI:\n mol = F.commute_inchi(mol)\n if OP_KEEP_BIGGEST:\n mol = F.keep_biggest(mol)\n if any([OP_REMOVE_STEREO, OP_COMMUTE_INCHI, OP_KEEP_BIGGEST]):\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n #\n if OP_NEUTRALISE_CHARGE_LATE:\n mol = F.neutralise_charge(mol)\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n #\n if OP_ADD_HYDROGEN:\n mol = F.add_hydrogen(mol, addCoords=True)\n if OP_KEKULIZE:\n mol = F.kekulize(mol)\n #\n return mol", "def test_isomorphic_stripped_stereochemistry(self):\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n # Ensure default value is respected and order does not matter\n assert Molecule.are_isomorphic(mol1, mol2, strip_pyrimidal_n_atom_stereo=True)\n assert Molecule.are_isomorphic(mol1, mol2)\n assert Molecule.are_isomorphic(mol2, mol1)\n\n assert mol1 == mol2\n assert Molecule.from_smiles(\"CCC[N@](C)CC\") == Molecule.from_smiles(\n \"CCC[N@@](C)CC\"\n )", "def compose(\n # Left side positive filters\n chainLeftIn,resiNumLeftIn,resiNameLeftIn,atomSerialLeftIn,\n atomNameLeftIn,\n # Left side negative filters\n chainLeftOut,resiNumLeftOut,resiNameLeftOut, atomSerialLeftOut,\n atomNameLeftOut,\n # Right side positive filters\n chainRightIn,resiNumRightIn,resiNameRightIn,atomSerialRightIn,\n atomNameRightIn,\n # Right side negative filters\n chainRightOut,resiNumRightOut,resiNameRightOut,atomSerialRightOut,\n atomNameRightOut,\n # Contact Area\n contactAreaMin,contactAreaMax,\n # Minimal distance\n minimalDistanceMin,minimalDistanceMax,\n # Sequence separation\n seqSeparationMin,seqSeparationMax\n ):\n\n output=''\n\n match_first=''\n match_first=append_to_local_output(match_first, 'c', Generic(chainLeftIn))\n match_first=append_to_local_output(match_first, 'r', Generic(resiNumLeftIn))\n match_first=append_to_local_output(match_first, 'a', Generic(atomSerialLeftIn))\n match_first=append_to_local_output(match_first, 'R', Generic(resiNameLeftIn))\n match_first=append_to_local_output(match_first, 'A', Generic(atomNameLeftIn))\n output=append_to_global_output(output, '--match-first', match_first)\n\n match_first_not=''\n match_first_not=append_to_local_output(match_first_not, 'c', Generic(chainLeftOut))\n match_first_not=append_to_local_output(match_first_not, 'r', Generic(resiNumLeftOut))\n match_first_not=append_to_local_output(match_first_not, 'a', Generic(atomSerialLeftOut))\n match_first_not=append_to_local_output(match_first_not, 'R', Generic(resiNameLeftOut))\n match_first_not=append_to_local_output(match_first_not, 'A', Generic(atomNameLeftOut))\n output=append_to_global_output(output, '--match-first-not', match_first_not)\n\n match_second=''\n match_second=append_to_local_output(match_second, 'c', Generic(chainRightIn))\n match_second=append_to_local_output(match_second, 'r', Generic(resiNumRightIn))\n match_second=append_to_local_output(match_second, 'a', Generic(atomSerialRightIn))\n match_second=append_to_local_output(match_second, 'R', Generic(resiNameRightIn))\n match_second=append_to_local_output(match_second, 'A', Generic(atomNameRightIn))\n output=append_to_global_output(output, '--match-second', match_second)\n\n match_second_not=''\n match_second_not=append_to_local_output(match_second_not, 'c', Generic(chainRightOut))\n match_second_not=append_to_local_output(match_second_not, 'r', Generic(resiNumRightOut))\n match_second_not=append_to_local_output(match_second_not, 'a', Generic(atomSerialRightOut))\n match_second_not=append_to_local_output(match_second_not, 'R', Generic(resiNameRightOut))\n match_second_not=append_to_local_output(match_second_not, 'A', Generic(atomNameRightOut))\n output=append_to_global_output(output, '--match-second-not', match_second_not)\n\n output=append_to_global_output(output, '--match-min-area', Float(contactAreaMin))\n output=append_to_global_output(output, '--match-max-area', Float(contactAreaMax))\n\n output=append_to_global_output(output, '--match-min-dist', Float(minimalDistanceMin))\n output=append_to_global_output(output, '--match-max-dist', Float(minimalDistanceMax))\n\n output=append_to_global_output(output, '--match-min-seq-sep', Int(seqSeparationMin))\n output=append_to_global_output(output, '--match-max-seq-sep', Int(seqSeparationMax))\n\n return output", "def FWDecompose(scaffolds, mols, scores, decomp_params=default_decomp_params) -> FreeWilsonDecomposition:\n descriptors = [] # list of descriptors, one per matched molecules\n # descriptors are 1/0 if a sidechain is present\n matched_scores = [] # scores from the matching molecules\n rgroup_idx = {} # rgroup index into descriptor { smiles: idx }\n rgroups = defaultdict(list) # final list of rgrups/sidechains\n\n if len(mols) != len(scores):\n raise ValueError(f\"The number of molecules must match the number of scores #mols {len(mols)} #scores {len(scores)}\")\n # decompose the rgroups\n logger.info(f\"Decomposing {len(mols)} molecules...\")\n decomposer = rgd.RGroupDecomposition(scaffolds, decomp_params)\n matched = []\n matched_indices = []\n for i,(mol, score) in enumerate(tqdm(zip(mols, scores))):\n if decomposer.Add(mol) >= 0:\n matched_scores.append(score)\n matched.append(mol)\n matched_indices.append(i)\n\n decomposer.Process()\n logger.info(f\"Matched {len(matched_scores)} out of {len(mols)}\")\n if not(matched_scores):\n logger.error(\"No scaffolds matched the input molecules\")\n return\n\n decomposition = decomposer.GetRGroupsAsRows(asSmiles=True)\n\n logger.info(\"Get unique rgroups...\")\n blocker = rdBase.BlockLogs()\n rgroup_counts = defaultdict(int)\n num_reconstructed = 0\n for num_mols, (row, idx) in enumerate(zip(decomposition, matched_indices)):\n row_smiles = []\n for rgroup,smiles in row.items():\n row_smiles.append(smiles)\n rgroup_counts[smiles] += 1\n if smiles not in rgroup_idx:\n rgroup_idx[smiles] = len(rgroup_idx)\n rgroups[rgroup].append(RGroup(smiles, rgroup, 0, 0))\n row['original_idx'] = idx\n reconstructed = \".\".join(row_smiles)\n try:\n blocker = rdBase.BlockLogs()\n mol = molzip_smi(reconstructed)\n num_reconstructed += 1\n except:\n print(\"failed:\", Chem.MolToSmiles(matched[num_mols]), reconstructed)\n \n\n logger.info(f\"Descriptor size {len(rgroup_idx)}\")\n logger.info(f\"Reconstructed {num_reconstructed} out of {num_mols}\")\n\n # get the descriptors list, one-hot encoding per rgroup\n if num_reconstructed == 0:\n logging.warning(\"Could only reconstruct %s out of %s training molecules\",\n num_mols, num_reconstructed)\n\n for mol, row in zip(matched, decomposition):\n row['molecule'] = mol\n descriptor = [0] * len(rgroup_idx)\n descriptors.append(descriptor)\n for smiles in row.values():\n if smiles in rgroup_idx:\n descriptor[rgroup_idx[smiles]] = 1\n\n \n assert len(descriptors) == len(matched_scores), f\"Number of descriptors({len(descriptors)}) doesn't match number of matcved scores({len(matched_scores)})\"\n\n # Perform the Ridge Regression\n logger.info(\"Ridge Regressing...\")\n lm = Ridge()\n lm.fit(descriptors, matched_scores)\n preds = lm.predict(descriptors)\n r2 = r2_score(matched_scores, preds)\n logger.info(f\"R2 {r2}\")\n logger.info(f\"Intercept = {lm.intercept_:.2f}\")\n\n for sidechains in rgroups.values():\n for rgroup in sidechains:\n rgroup.count = rgroup_counts[rgroup.smiles]\n rgroup.coefficient = lm.coef_[rgroup_idx[rgroup.smiles]]\n rgroup.idx = rgroup_idx[rgroup.smiles]\n\n return FreeWilsonDecomposition(rgroups, rgroup_idx, lm,\n r2, descriptors, decomposition,\n num_mols, num_reconstructed)", "def split(complex, ligand_res_name='LIG'):\n\n # Set empty molecule containers\n prot = oechem.OEMol()\n lig = oechem.OEMol()\n wat = oechem.OEMol()\n other = oechem.OEMol()\n\n # Define the Filter options before the splitting\n opt = oechem.OESplitMolComplexOptions()\n\n # The protein filter is set to avoid that multiple\n # chains are separated during the splitting and peptide\n # molecules are recognized as ligands\n pf = oechem.OEMolComplexFilterFactory(oechem.OEMolComplexFilterCategory_Protein) \n peptide = oechem.OEMolComplexFilterFactory(oechem.OEMolComplexFilterCategory_Peptide)\n protein_filter = oechem.OEOrRoleSet(pf, peptide)\n opt.SetProteinFilter(protein_filter)\n \n # The ligand filter is set to recognize just the ligand\n lf = oechem.OEMolComplexFilterFactory(oechem.OEMolComplexFilterCategory_Ligand)\n not_protein_filter = oechem.OENotRoleSet(protein_filter)\n ligand_filter = oechem.OEAndRoleSet(lf, not_protein_filter)\n opt.SetLigandFilter(ligand_filter)\n\n # The water filter is set to recognize just water molecules\n wf = oechem.OEMolComplexFilterFactory(oechem.OEMolComplexFilterCategory_Water)\n opt.SetWaterFilter(wf)\n\n # Set Category\n cat = oechem.OEMolComplexCategorizer()\n cat.AddLigandName(ligand_res_name)\n opt.SetCategorizer(cat)\n\n # Splitting the system\n if not oechem.OESplitMolComplex(lig, prot, wat, other, complex, opt):\n raise ValueError('Unable to split the complex')\n \n # At this point prot contains the protein, lig contains the ligand,\n # wat contains the water and excipients contains the excipients\n\n return prot, lig, wat, other", "def test_ungrouping(self):\n s = self.create(ComponentItem, UML.Component)\n uc1 = self.create(UseCaseItem, UML.UseCase)\n uc2 = self.create(UseCaseItem, UML.UseCase)\n\n self.group(s, uc1)\n self.group(s, uc2)\n\n self.ungroup(s, uc1)\n assert 0 == len(uc1.subject.subject)\n # Classifier.useCase is not navigable to UseCase\n # self.assertEqual(1, len(s.subject.useCase))\n\n self.ungroup(s, uc2)\n assert 0 == len(uc2.subject.subject)\n # Classifier.useCase is not navigable to UseCase\n # self.assertEqual(0, len(s.subject.useCase))", "def test_4_sgops(self):\n from celib.symmetry_module import get_spacegroup\n result = get_spacegroup(self.lattice, self.atomtypes, self.basis, True, 1e-10)\n #The FCC lattice has 48 point group operations; we want to check each of them\n #against the correct values.\n self.assertEqual(result.sg_op.shape, (3,3,48))\n #We have to load the correct symmetry operations from file to check against.\n sgops = path.join(self.fdir, \"sgops.dat\")\n with open(sgops) as f:\n lines = f.readlines()\n model = self._read_sgopgs(lines, 48)\n\n for i in range(48):\n self.assertTrue(allclose(model[i], result.sg_op[:,:,i]),\n \"\\n{}\\n{}\\n\\n{}\".format(i, model[i], result.sg_op[:,:,i]))", "def remove_clashes(self):\n dihe_parameters = self.myGlycosylator.builder.Parameters.parameters['DIHEDRALS']\n vwd_parameters = self.myGlycosylator.builder.Parameters.parameters['NONBONDED']\n \n static_glycans = None\n for k in self.original_glycanMolecules:\n if k not in self.linked_glycanMolecules:\n if static_glycans is not None:\n static_glycans += self.original_glycanMolecules[k].atom_group\n else:\n static_glycans = self.original_glycanMolecules[k].atom_group.copy()\n \n environment = self.myGlycosylator.protein.copy() \n environment += static_glycans\n \n #Build topology\n self.myGlycosylator.build_glycan_topology(glycanMolecules = self.linked_glycanMolecules, build_all = False)\n sampler = glc.Sampler(self.linked_glycanMolecules.values(), environment, dihe_parameters, vwd_parameters)\n sampler.remove_clashes_GA()", "def remove_as_subgroup(self, other_groups):\r\n symbols_to_exclude = reduce(lambda alphabet, cell: alphabet.union(cell.get_possible_symbols()),\r\n self.cells, set())\r\n my_cells = set(self.cells)\r\n\r\n for group in other_groups:\r\n if my_cells.issubset(group.cells) and self is not group:\r\n # Remove my cells from the other group\r\n for cell in self.cells:\r\n cell.remove_group(group)\r\n group.cells.remove(cell)\r\n\r\n # Update the alphabets in the other group\r\n for cell in group.cells:\r\n cell.remove_possible_symbols(symbols_to_exclude)", "def cleanUpRigPose(self):\n\n # show the proxy geo\n cmds.select(self.name + \"_mover_grp\", hi=True)\n allNodes = cmds.ls(sl=True)\n for node in allNodes:\n if node.find(\"_proxy_geo\") != -1:\n if cmds.nodeType(node) == \"mesh\":\n parent = cmds.listRelatives(node, parent=True)[0]\n cmds.lockNode(parent, lock=False)\n cmds.setAttr(parent + \".v\", lock=False)\n cmds.setAttr(parent + \".v\", 1)\n cmds.lockNode(parent, lock=True)\n\n # unlock mover group for this module and make invisible\n cmds.lockNode(self.name + \"_mover_grp\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=False)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", 0)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=True)\n cmds.lockNode(self.name + \"_mover_grp\", lock=True)\n\n # get the joints created by this module and remove the constraints\n joints = self.returnCreatedJoints\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n for joint in joints:\n if cmds.objExists(joint + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)\n\n else:\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointBaseName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointBaseName = jointBaseName.partition(nameData[1])[0]\n\n if cmds.objExists(self.name + \"_\" + jointBaseName + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)", "def sequence_rr_legacy(mol):\n F = Filters()\n Cleanup(mol)\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n AssignStereochemistry(mol, cleanIt=True, force=True, flagPossibleStereoCenters=True) # Fix bug TD201904.01\n mol = F.remove_isotope(mol)\n mol = F.neutralise_charge(mol)\n SanitizeMol(mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False)\n mol = F.keep_biggest(mol)\n mol = F.add_hydrogen(mol, addCoords=True)\n mol = F.kekulize(mol)\n return mol", "def blendShapeEnvelopeOff():\n obj = cmds.ls(selection = True)\n history = cmds.listHistory(obj)\n bsHistory = cmds.ls(history, type = 'blendShape')\n for bs in bsHistory:\n cmds.setAttr(bs+'.'+'envelope',0.0) #note not changing blend target weights", "def test_enlarge_2_add_reactive_species(self):\n m1 = Molecule(smiles='CC')\n spc1 = self.rmg.reaction_model.make_new_species(m1, label='C2H4')[0]\n self.rmg.reaction_model.enlarge(spc1)\n\n self.assertEqual(len(self.rmg.reaction_model.core.species), 2)\n self.assertTrue(self.rmg.reaction_model.core.species[1].reactive)\n\n m2 = Molecule(smiles='[CH3]')\n spc2 = self.rmg.reaction_model.make_new_species(m2, label='CH3')[0]\n self.rmg.reaction_model.enlarge(spc2)\n\n self.assertEqual(len(self.rmg.reaction_model.core.species), 3)\n self.assertTrue(self.rmg.reaction_model.core.species[2].reactive)", "def test_add_lone_pairs_by_atom_valance(self):\n adj1 = \"\"\"multiplicity 3\n1 N u2 p1 c0 {2,S}\n2 S u0 p2 c0 {1,S} {3,S}\n3 H u0 p0 c0 {2,S}\"\"\"\n mol1 = Molecule().from_adjacency_list(adjlist=adj1)\n converter.add_lone_pairs_by_atom_valance(mol1)\n self.assertEqual(mol1.to_smiles(), '[N]S')\n mol1.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol1)\n self.assertEqual(mol1.to_smiles(), 'N#S')\n\n adj2 = \"\"\"multiplicity 3\n1 N u2 p1 c0 {2,S}\n2 N u0 p1 c0 {1,S} {3,S} {4,S}\n3 H u0 p0 c0 {2,S}\n4 H u0 p0 c0 {2,S}\"\"\"\n mol2 = Molecule().from_adjacency_list(adjlist=adj2)\n converter.add_lone_pairs_by_atom_valance(mol2)\n self.assertEqual(mol2.to_smiles(), '[N]N')\n mol2.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol2)\n self.assertEqual(mol2.to_smiles(), '[N-]=[NH2+]')\n\n adj3 = \"\"\"multiplicity 3\n1 C u0 p0 c0 {2,S} {5,S} {6,S} {7,S}\n2 C u0 p0 c0 {1,S} {3,S} {8,S} {9,S}\n3 C u2 p0 c0 {2,S} {4,S}\n4 H u0 p0 c0 {3,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {1,S}\n7 H u0 p0 c0 {1,S}\n8 H u0 p0 c0 {2,S}\n9 H u0 p0 c0 {2,S}\"\"\"\n mol3 = Molecule().from_adjacency_list(adjlist=adj3)\n converter.add_lone_pairs_by_atom_valance(mol3)\n self.assertEqual(mol3.to_smiles(), '[CH]CC')\n mol3.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol3)\n self.assertEqual(mol3.to_adjacency_list(), \"\"\"1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}\n2 C u0 p0 c0 {1,S} {6,S} {7,S} {8,S}\n3 C u0 p1 c0 {1,S} {9,S}\n4 H u0 p0 c0 {1,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {2,S}\n7 H u0 p0 c0 {2,S}\n8 H u0 p0 c0 {2,S}\n9 H u0 p0 c0 {3,S}\n\"\"\")\n\n adj4 = \"\"\"multiplicity 3\n1 C u0 p0 c0 {2,S} {4,S} {5,S} {6,S}\n2 C u0 p0 c0 {1,S} {3,S} {7,S} {8,S}\n3 N u2 p1 c0 {2,S}\n4 H u0 p0 c0 {1,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {1,S}\n7 H u0 p0 c0 {2,S}\n8 H u0 p0 c0 {2,S}\"\"\"\n mol4 = Molecule().from_adjacency_list(adjlist=adj4)\n converter.add_lone_pairs_by_atom_valance(mol4)\n self.assertEqual(mol4.to_smiles(), 'CC[N]')\n mol4.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol4)\n self.assertEqual(mol4.to_adjacency_list(), \"\"\"1 N u0 p2 c0 {3,S}\n2 C u0 p0 c0 {3,S} {4,S} {5,S} {6,S}\n3 C u0 p0 c0 {1,S} {2,S} {7,S} {8,S}\n4 H u0 p0 c0 {2,S}\n5 H u0 p0 c0 {2,S}\n6 H u0 p0 c0 {2,S}\n7 H u0 p0 c0 {3,S}\n8 H u0 p0 c0 {3,S}\n\"\"\")", "def test_group(self):\n # leave out particle 0\n group = hoomd.group.tags(1,2)\n\n # compute forces\n f = azplugins.restrain.plane(group=group, point=(0,0,0), normal=(1,0,0), k=2.0)\n hoomd.run(1)\n np.testing.assert_array_almost_equal(f.forces[0].force, ( 0.,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].force, ( 2.,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].force, ( 6.,0,0))\n self.assertAlmostEqual(f.forces[0].energy, 0.)\n self.assertAlmostEqual(f.forces[1].energy, 1.)\n self.assertAlmostEqual(f.forces[2].energy, 9.)\n np.testing.assert_array_almost_equal(f.forces[0].virial, (0,0,0,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].virial, (-2.,0,4.,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].virial, (12.,0,0,0,0,0))", "def substereomers(gra):\n _assigned = functools.partial(\n dict_.filter_by_value, func=lambda x: x is not None)\n\n known_atm_ste_par_dct = _assigned(atom_stereo_parities(gra))\n known_bnd_ste_par_dct = _assigned(bond_stereo_parities(gra))\n\n def _is_compatible(sgr):\n atm_ste_par_dct = _assigned(atom_stereo_parities(sgr))\n bnd_ste_par_dct = _assigned(bond_stereo_parities(sgr))\n _compat_atm_assgns = (set(known_atm_ste_par_dct.items()) <=\n set(atm_ste_par_dct.items()))\n _compat_bnd_assgns = (set(known_bnd_ste_par_dct.items()) <=\n set(bnd_ste_par_dct.items()))\n return _compat_atm_assgns and _compat_bnd_assgns\n\n sgrs = tuple(filter(_is_compatible, stereomers(gra)))\n return sgrs", "def test_isomorphic_striped_stereochemistry(self):\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n # Ensure default value is respected and order does not matter\n assert Molecule.are_isomorphic(mol1, mol2, strip_pyrimidal_n_atom_stereo=True)\n assert Molecule.are_isomorphic(mol1, mol2)\n assert Molecule.are_isomorphic(mol2, mol1)\n\n assert mol1 == mol2\n assert Molecule.from_smiles(\"CCC[N@](C)CC\") == Molecule.from_smiles(\n \"CCC[N@@](C)CC\"\n )\n\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n assert not Molecule.are_isomorphic(\n mol1,\n mol2,\n strip_pyrimidal_n_atom_stereo=False,\n atom_stereochemistry_matching=True,\n bond_stereochemistry_matching=True,\n )[0]", "def finalize_groups(self):\n merged_rdd = self.merged_rdd\n group_merge_map = self.group_merge_map\n sc = self.sc\n sqc = pyspark.sql.SQLContext(sc)\n\n nPartitions = sc.defaultParallelism*5\n\n nMinMembers = self.nMinMembers\n\n # we need to use the group merge map used in a previous step to see which \n # groups are actually spread across domain boundaries\n group_merge_map = self.group_merge_map\n \n\n def count_groups_local(i, iterator, nMinMembers):\n # the first element is the group mapping dictionary\n dist_groups = set(iterator.next().values())\n print len(dist_groups)\n print 'sizeof set in ', i, ' ', asizeof.asizeof(dist_groups)\n p_arrs = np.concatenate([p_arr for p_arr in iterator])\n gids, counts = np.unique(p_arrs['iGroup'], return_counts=True)\n print 'number of groups in partition ', i, ' = ', len(gids)\n return ((g,cnt) for (g,cnt) in zip(gids, counts) if (g in dist_groups) or (cnt >= nMinMembers))\n \n\n def filter_groups_by_map(rdd, not_in_map=False):\n def perform_filter(iterator, exclusive):\n # the first element after the union is the group mapping\n # here we have already remapped the groups so we need to just take the final group IDs\n dist_groups = set(iterator.next().values())\n return ((gid, count) for (gid,count) in iterator if (gid in dist_groups)^exclusive)\n return rdd.mapPartitions(lambda i: perform_filter(i,not_in_map), preservesPartitioning=True)\n\n def get_local_groups(rdd, map_rdd): \n return filter_groups_by_map(map_rdd + rdd, not_in_map=True)\n\n def get_distributed_groups(rdd, map_rdd):\n return filter_groups_by_map(map_rdd + rdd, not_in_map=False)\n\n # first, get rid of ghost particles\n no_ghosts_rdd = self.filter_ghosts(merged_rdd)\n\n # count up the number of particles in each group in each partition\n group_counts = (group_merge_map + no_ghosts_rdd).mapPartitionsWithIndex(lambda index,i: count_groups_local(index, i, nMinMembers), True).cache()\n\n # merge the groups that reside in multiple domains\n distributed_groups = get_distributed_groups(group_counts, group_merge_map)\n\n merge_group_counts = (distributed_groups.reduceByKey(lambda a,b: a+b, nPartitions)\n .filter(lambda (g,cnt): cnt>=nMinMembers)).cache()\n\n if self.DEBUG:\n print 'spark_fof DEBUG: non-merge groups = %d merge groups = %d'%(group_counts.count(), merge_group_counts.count()) \n\n # combine the group counts\n groups_rdd = (get_local_groups(group_counts, group_merge_map) + merge_group_counts).setName('groups_rdd')\n total_group_counts = groups_rdd.cache().count()\n \n print 'Total number of groups: ', total_group_counts\n\n self.total_group_counts = total_group_counts\n\n return groups_rdd", "def release_atoms(self):\r\n\t\thole_size = self.box_size/2\r\n\t\thole_left = self.box_size/2 - hole_size/2\r\n\t\thole_right = self.box_size/2 + hole_size/2\r\n\r\n\t\tx_vals = (self.pos.x > hole_left) & (self.pos.x < hole_right)\r\n\t\ty_vals = (self.pos.y > hole_left) & (self.pos.y < hole_right)\r\n\t\tindices = (self.pos.z < 0) & x_vals & y_vals\r\n\r\n\t\tescaped_count = np.sum(indices)\r\n\t\tlost_momentum = self.atom_mass*np.sum(self.vel.z)\r\n\r\n\t\t# this would look bettes as self.vel.values[:, indices] = ... , but that is actualy noticeably slower\r\n\t\tself.pos.x[indices], self.pos.y[indices], self.pos.z[indices] = *generator.uniform(hole_left, hole_right, size=(2, escaped_count)), np.full(escaped_count, self.box_size)\r\n\t\tif self.change_velocities:\r\n\t\t\t# changing the velocity makes the temperature decrease over time\r\n\t\t\tself.vel.x[indices], self.vel.y[indices], self.vel.z[indices] = generator.uniform(0, self.box_size, size=(3, escaped_count))\r\n\r\n\t\treturn escaped_count, lost_momentum", "def collision_separate(self, arbiter, space, data):\n pass", "def build(self, X, Y, w=None):\n super(MorseSmaleComplex, self).build(X, Y, w)\n\n if self.debug:\n sys.stdout.write(\"Decomposition: \")\n start = time.perf_counter()\n\n stableManifolds = MorseComplex(debug=self.debug)\n unstableManifolds = MorseComplex(debug=self.debug)\n\n stableManifolds._build_for_morse_smale_complex(self, False)\n unstableManifolds._build_for_morse_smale_complex(self, True)\n\n self.min_indices = unstableManifolds.max_indices\n self.max_indices = stableManifolds.max_indices\n\n # If a degenerate point is both a minimum and a maximum, it\n # could potentially appear twice, but would be masked by the\n # minimum key which would wipe the maximum merge\n self.merge_sequence = stableManifolds.merge_sequence.copy()\n self.merge_sequence.update(unstableManifolds.merge_sequence)\n self.persistences = sorted(\n stableManifolds.persistences + unstableManifolds.persistences\n )\n\n self.base_partitions = {}\n base = np.array([[None, None]] * len(Y))\n for key, items in unstableManifolds.base_partitions.items():\n base[np.array(items), 0] = key\n for key, items in stableManifolds.base_partitions.items():\n base[np.array(items), 1] = key\n\n keys = set(map(tuple, base))\n for key in keys:\n self.base_partitions[key] = np.where(\n np.logical_and(base[:, 0] == key[0], base[:, 1] == key[1])\n )[0]\n\n if self.debug:\n end = time.perf_counter()\n sys.stdout.write(\"%f s\\n\" % (end - start))", "def build_reactive_complex(self, settings_manager: SettingsManager):\n import scine_database as db\n import scine_utilities as utils\n\n start_structure_ids = self._calculation.get_structures()\n start_structures = [db.Structure(sid, self._structures) for sid in start_structure_ids]\n self.save_initial_graphs_and_charges(settings_manager, start_structures)\n if len(start_structures) == 1:\n # For an intramolecular structure it is sufficient to provide one\n # structure that is both, start structure and reactive complex\n structure = start_structures[0]\n atoms = structure.get_atoms()\n self.random_displace_atoms(atoms, self.settings[self.rc_key][\"displacement\"]) # breaks symmetry\n return atoms\n\n if len(start_structures) == 2:\n # Intermolecular reactions reactions require in situ generation of the reactive complex\n s0 = start_structures[0]\n s1 = start_structures[1]\n\n # Get coordinates\n atoms1 = s0.get_atoms()\n atoms2 = s1.get_atoms()\n elements1 = atoms1.elements\n elements2 = atoms2.elements\n coordinates1 = atoms1.positions\n coordinates2 = atoms2.positions\n # Calculate reactive center mean position\n if self.exploration_key + \"_lhs_list\" in self.settings[self.exploration_key]:\n sites1 = self.settings[self.exploration_key][self.exploration_key + \"_lhs_list\"]\n sites2 = self.settings[self.exploration_key][self.exploration_key + \"_rhs_list\"]\n self.settings[self.exploration_key][self.exploration_key + \"_rhs_list\"] = list(\n idx + len(elements1) for idx in sites2\n )\n elif \"nt_associations\" in self.settings[self.exploration_key]:\n sites1 = []\n sites2 = []\n nAtoms1 = len(atoms1.elements)\n for i in range(0, len(self.settings[self.exploration_key][\"nt_associations\"]), 2):\n at1 = self.settings[self.exploration_key][\"nt_associations\"][i]\n at2 = self.settings[self.exploration_key][\"nt_associations\"][i + 1]\n if at1 >= nAtoms1 > at2:\n sites1.append(at2)\n sites2.append(at1 - nAtoms1)\n if at2 >= nAtoms1 > at1:\n sites1.append(at1)\n sites2.append(at2 - nAtoms1)\n else:\n self.raise_named_exception(\n \"Reactive complex can not be build: missing reactive atoms list(s).\"\n )\n reactive_center1 = np.mean(coordinates1[sites1], axis=0)\n reactive_center2 = np.mean(coordinates2[sites2], axis=0)\n # Place reactive center mean position into origin\n coord1 = coordinates1 - reactive_center1\n coord2 = coordinates2 - reactive_center2\n positions = self._orient_coordinates(coord1, coord2)\n atoms = utils.AtomCollection(elements1 + elements2, positions)\n self.random_displace_atoms(atoms, self.settings[self.rc_key][\"displacement\"]) # breaks symmetry\n return atoms\n\n # should not be reachable\n self.raise_named_exception(\n \"Reactive complexes built from more than 2 structures are not supported.\"\n )", "def cull(self):", "def test_replace_group(self):\n pass", "def applyMorphologicalCleaning(self, image):", "def test_add_new_surface_objects(self):\n\n # create object with ReactionSystem behavior\n class rsys:\n pass\n\n class item:\n pass\n\n T = item()\n P = item()\n T.value_si = 1000.0\n P.value_si = 101000.0\n rsys.T = T\n rsys.P = P\n procnum = 2\n\n cerm = CoreEdgeReactionModel()\n\n spcA = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs]\n\n rxns = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n rxns += list(itertools.chain.from_iterable(react([((spcs[0], spcs[1]), ['H_Abstraction'])], procnum)))\n\n for rxn in rxns:\n cerm.make_new_reaction(rxn)\n\n cerm.core.species = [spcA] + spcs\n\n corerxns = []\n edgerxns = []\n edgespcs = set()\n for rxn in rxns:\n if set(rxn.reactants + rxn.products) <= set(cerm.core.species):\n corerxns.append(rxn)\n else:\n edgespcs |= set(cerm.core.species) - set(rxn.reactants + rxn.products)\n edgerxns.append(rxn)\n\n cerm.edge.species += list(edgespcs)\n\n cerm.core.reactions = corerxns\n cerm.edge.reactions = edgerxns\n\n cerm.surface.species = []\n cerm.surface.reactions = []\n\n new_surface_reactions = [cerm.edge.reactions[0]]\n new_surface_species = []\n obj = new_surface_reactions\n\n cerm.add_new_surface_objects(obj, new_surface_species, new_surface_reactions, rsys)\n\n empty = set()\n\n self.assertEqual(cerm.new_surface_spcs_add, empty)\n self.assertEqual(cerm.new_surface_spcs_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_add, set([cerm.edge.reactions[0]]))", "def test_enlarge_1_add_nonreactive_species(self):\n m0 = Molecule(smiles='[He]')\n spc0 = self.rmg.reaction_model.make_new_species(m0, label='He', reactive=False)[0]\n self.rmg.reaction_model.enlarge(spc0)\n\n self.assertEqual(len(self.rmg.reaction_model.core.species), 1)\n self.assertFalse(self.rmg.reaction_model.core.species[0].reactive)", "def semigroup_generators(self):", "def group(seq):\n pass # replace with your solution", "def _with_gaps_removed(group_map, paired):\n gapped_groups = set()\n for group, elems in group_map.items():\n # Verify we're getting 1, 2, 3, ...\n expected_sequence = list(range(1, len(elems) + 1))\n if paired:\n fwd_nums = [\n int(pattern_multilane.search(se).group(1)) for se in [fwd for fwd, _ in elems]\n ]\n rev_nums = [\n int(pattern_multilane.search(se).group(1)) for se in [rev for _, rev in elems]\n ]\n if fwd_nums != expected_sequence or rev_nums != expected_sequence:\n gapped_groups.add(group)\n else:\n nums = [int(pattern_multilane.search(se).group(1)) for se in elems]\n if nums != expected_sequence:\n gapped_groups.add(group)\n\n return {group: elems for group, elems in group_map.items() if group not in gapped_groups}", "def remove_bad_singles( segs_beg_3 ):\n max_seg = segs_beg_3.shape[0]\n\n # get initial number of ramps having single-group segments\n tot_num_single_grp_ramps = len( np.where((segs_beg_3 == 1) &\n (segs_beg_3.sum(axis=0) > 1))[0])\n\n while( tot_num_single_grp_ramps > 0 ):\n # until there are no more single-group segments\n for ii_0 in range( max_seg ):\n slice_0 = segs_beg_3[ii_0,:,:]\n\n for ii_1 in range( max_seg ): # correctly includes EARLIER segments\n if ( ii_0 == ii_1 ): # don't compare with itself\n continue\n\n slice_1 = segs_beg_3[ii_1,:,:]\n\n # Find ramps of a single-group segment and another segment\n # either earlier or later\n wh_y, wh_x = np.where((slice_0 == 1) & (slice_1 > 0))\n\n if (len(wh_y) == 0):\n # Are none, so go to next pair of segments to check\n continue\n\n # Remove the 1-group segment\n segs_beg_3[ii_0:-1, wh_y, wh_x] = segs_beg_3[ii_0+1:, wh_y, wh_x]\n\n # Zero the last segment entry for the ramp, which would otherwise\n # remain non-zero due to the shift\n segs_beg_3[-1, wh_y, wh_x] = 0\n\n del wh_y, wh_x\n\n tot_num_single_grp_ramps = len( np.where((segs_beg_3 == 1) &\n (segs_beg_3.sum(axis=0) > 1))[0])\n\n return segs_beg_3", "def test_ungrouping(self):\n n1 = self.create(NodeItem, UML.Node)\n n2 = self.create(NodeItem, UML.Node)\n\n self.group(n1, n2)\n self.ungroup(n1, n2)\n\n assert n2.subject not in n1.subject.nestedNode\n assert n1.subject not in n2.subject.nestedNode", "def reset(self):\n super(MorseSmaleComplex, self).reset()\n\n self.base_partitions = {}\n self.merge_sequence = {}\n\n self.persistences = []\n self.min_indices = []\n self.max_indices = []\n\n # State properties\n self.persistence = 0.0", "def test_replace_groups(self):\n pass", "def semigroup_generators(self):\n return self.ambient().semigroup_generators().map(self.retract)", "def cleaveSurfBond(entry,max_bonds=1,supercell=2,group_structs=True,prec=1E-4):\n \n \n struct = copy.deepcopy(entry[0])\n results = getStructureType(entry,supercell=supercell,returnSS=True)\n \n # Proceed only if the structure is classified as periodic\n # in all directions\n if results[0]=='conventional':\n struct.make_supercell(supercell)\n binary_matrix= getDistMat(struct,entry[1]-1)\n og_num_bonds = sum(sum(np.array(binary_matrix)))/2\n \n # Get dictionary of directional bonds in the system, \n # and the associated atomic species\n bond_dir = getBondVectors(struct,entry[1]-1,prec)\n\n \n # Create the list of bonds to be broken\n all_structs=[]\n combos=[]\n for s1 in bond_dir:\n for s2 in bond_dir[s1]:\n for cleave in bond_dir[s1][s2]: \n combos.append(cleave[1])\n \n # Create pairings of bonds to be broken, up to \n # max_bonds number of bonds\n \n final_combos=[]\n for i in range(1,max_bonds+1):\n for mix in list(itertools.combinations(combos,max_bonds)):\n final_combos.append(mix)\n seed_index=0\n old_cluster_size=len(buildNetwork(binary_matrix,seed_index))/supercell**3\n for combo in final_combos:\n modified_matrix = np.array(binary_matrix)\n for sett in combo:\n for pair in sett:\n i,j = pair\n modified_matrix[i][j]=0\n modified_matrix[j][i]=0\n new_num_bonds=sum(sum(modified_matrix))/2\n \n # Number of bonds broken in the search. Not necessarily\n # the number of bonds broken to cleave the surface\n \n broken=int(og_num_bonds-new_num_bonds)\n \n cluster = buildNetwork(modified_matrix,seed_index)\n hetero=False\n if cluster!=set():\n scale = len(cluster)/old_cluster_size\n compo = Composition.from_dict(Counter([struct[l].specie.name \n for l in list(cluster)]))\n if compo.reduced_formula != struct.composition.reduced_formula:\n # i.e. the cluster does not have the same composition\n # as the overall crystal; therefore there are other\n # clusters of varying composition.\n hetero = True\n motiif = getDim(scale,supercell)\n\n if not hetero:\n if motiif=='layered':\n cluster_sites = [struct.sites[n] for n in cluster]\n all_structs.append(struct.from_sites(cluster_sites))\n\n if group_structs:\n matched = [x[0] for x in \n StructureMatcher(stol=1E-6,primitive_cell=False,\n scale=False).group_structures(all_structs)]\n else:\n matched=all_structs\n return(matched) \n\n\n else:\n print('Material is does not have a 3D motiif')\n print('Try increasing radii tolerance if appropriate')\n return([])", "def morphological_opening_to_remove_extra_objects(image, structuring_element):\n return opening(image, structuring_element)", "def test_mutation2(self):\n genotype = '0|0|2|0|0|2|0|0 1|0|0|1|1|0|0|0 0|1|0|0|0|0|2|1--1 7'\n search_space = {'dil_conv_3x3', 'dil_conv_5x5', 'dil_conv_7x7',\n 'skip_connect', 'clinc_3x3', 'clinc_7x7', 'avg_pool_3x3', 'max_pool_3x3'}\n\n mutator = Mutations(search_space, prob_mutation=0.8,\n prob_resize=0.99, prob_swap=0.99)\n mutated_g = mutator(genotype)\n a, s, d = get_conf(mutated_g)\n mutator.update_strat_good(a)", "def pulp_smash():", "def test_modeller_mutations():\n mol_id = 'Abl'\n abl_path = examples_paths()['abl']\n with mmtools.utils.temporary_directory() as tmp_dir:\n # Safety check: protein must have WT residue: THR at residue 85 in chain A\n has_wt_residue = False\n with open(abl_path, 'r') as f:\n for line in f:\n if (line[:6] == 'ATOM ') and (line[21] == 'A') and (int(line[22:26]) == 85) and (line[17:20]=='THR'):\n has_wt_residue = True\n break\n assert has_wt_residue\n\n yaml_content = get_template_script(tmp_dir)\n exp_builder = ExperimentBuilder(yaml_content)\n output_dir = exp_builder._db.get_molecule_dir(mol_id)\n output_path = os.path.join(output_dir, 'Abl.pdb')\n\n # We haven't set the strip_protons options, so this shouldn't do anything\n exp_builder._db._setup_molecules(mol_id)\n assert not os.path.exists(output_path)\n\n # Calling modeller with WT creates a file (although the protein is not mutated).\n exp_builder._db.molecules[mol_id]['modeller'] = {\n 'apply_mutations': {\n 'chain_id': 'A',\n 'mutations': 'WT',\n }\n }\n setup_molecule_output_check(exp_builder._db, mol_id, output_path)\n os.remove(output_path) # Remove file for next check.\n\n\n # Reinitialize exp_builder\n exp_builder = ExperimentBuilder(yaml_content)\n\n # Now we set the strip_protons options and repeat for the mutant case\n exp_builder._db.molecules[mol_id]['modeller'] = {\n 'apply_mutations': {\n 'chain_id': 'A',\n 'mutations': 'T85I',\n }\n }\n setup_molecule_output_check(exp_builder._db, mol_id, output_path)\n\n # Safety check: protein must have mutated residue: ILE at residue 85 in chain A\n has_mut_residue = False\n with open(output_path, 'r') as f:\n for line in f:\n if (line[:6] == 'ATOM ') and (line[21] == 'A') and (int(line[22:26]) == 85) and (line[17:20]=='ILE'):\n has_mut_residue = True\n break\n assert has_mut_residue", "def test_grouping(self):\n n = self.create(NodeItem, UML.Node)\n c = self.create(ComponentItem, UML.Component)\n\n self.group(n, c)\n\n assert 1 == len(n.subject.ownedAttribute)\n assert 1 == len(n.subject.ownedConnector)\n assert 1 == len(c.subject.ownedAttribute)\n assert 2 == len(self.kindof(UML.ConnectorEnd))\n\n a1 = n.subject.ownedAttribute[0]\n a2 = c.subject.ownedAttribute[0]\n\n assert a1.isComposite\n assert a1 in n.subject.part\n\n connector = n.subject.ownedConnector[0]\n assert connector.end[0].role is a1\n assert connector.end[1].role is a2", "def braid_group_orbits(self):\n return [g.vertices() for g in self.braid_group_action()]", "def _propose_atoms_in_order(self, atom_group):\n atom_torsions= []\n logp = []\n assert len(atom_group) == len(set(atom_group)), \"There are duplicate atom indices in the list of atom proposal indices\"\n while len(atom_group) > 0:\n #initialise an eligible_torsions_list\n eligible_torsions_list = list()\n\n for atom_index in atom_group:\n\n # Find the shortest path up to length four from the atom in question:\n shortest_paths = nx.algorithms.single_source_shortest_path(self._residue_graph, atom_index, cutoff=4)\n\n # Loop through the destination and path of each path and append to eligible_torsions_list\n # if destination has a position and path[1:3] is a subset of atoms with positions\n for destination, path in shortest_paths.items():\n\n # Check if the path is length 4 (a torsion) and that the destination has a position. Continue if not.\n if len(path) != 4 or destination not in self._atoms_with_positions_set:\n continue\n\n # If the last atom is in atoms with positions, check to see if the others are also.\n # If they are, append the torsion to the list of possible torsions to propose\n if set(path[1:3]).issubset(self._atoms_with_positions_set):\n eligible_torsions_list.append(path)\n\n assert len(eligible_torsions_list) != 0, \"There is a connectivity issue; there are no torsions from which to choose\"\n #now we have to randomly choose a single torsion\n ntorsions = len(eligible_torsions_list)\n random_torsion_index = np.random.choice(range(ntorsions))\n random_torsion = eligible_torsions_list[random_torsion_index]\n\n #append random torsion to the atom_torsions and remove source atom from the atom_group\n chosen_atom_index = random_torsion[0]\n first_old_atom_index = random_torsion[1]\n atom_torsions.append(random_torsion)\n atom_group.remove(chosen_atom_index)\n\n #add atom to atoms with positions and corresponding set\n self._atoms_with_positions_set.add(chosen_atom_index)\n\n #add a bond from the new to the previous torsion atom in the _reference_connectivity_graph\n self._reference_connectivity_graph.add_edge(chosen_atom_index, first_old_atom_index)\n\n #add the log probability of the choice to logp\n logp.append(np.log(1./ntorsions))\n\n # Ensure that logp is not ill-defined\n assert len(logp) == len(atom_torsions), \"There is a mismatch in the size of the atom torsion proposals and the associated logps\"\n\n return atom_torsions, logp", "def test_strip_atom_stereochemistry(self):\n mol = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n nitrogen_idx = [\n atom.molecule_atom_index for atom in mol.atoms if atom.symbol == \"N\"\n ][0]\n\n # TODO: This fails with RDKitToolkitWrapper because it perceives\n # the stereochemistry of this nitrogen as None\n assert mol.atoms[nitrogen_idx].stereochemistry == \"S\"\n mol.strip_atom_stereochemistry(smarts=\"[N+0X3:1](-[*])(-[*])(-[*])\")\n assert mol.atoms[nitrogen_idx].stereochemistry is None\n\n mol = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n assert mol.atoms[nitrogen_idx].stereochemistry == \"S\"\n mol.strip_atom_stereochemistry(smarts=\"[N+0X3:1](-[*])(-[*])(-[*])\")\n assert mol.atoms[nitrogen_idx].stereochemistry is None", "def neutralise(self):\n smi = self.smiles\n\n patts = [\n # Imidazoles\n ('[n+;H]','n'),\n # Amines\n ('[N+;!H0]','N'),\n # Carboxylic acids and alcohols\n ('[$([O-]);!$([O-][#7])]','O'),\n # Thiols\n ('[S-;X1]','S'),\n # Sulfonamides\n ('[$([N-;X2]S(=O)=O)]','N'),\n # Enamines\n ('[$([N-;X2][C,N]=C)]','N'),\n # Tetrazoles\n ('[n-]','[nH]'),\n # Sulfoxides\n ('[$([S-]=O)]','S'),\n # Amides\n ('[$([N-]C=O)]','N') ]\n\n reactions = [(Chem.MolFromSmarts(x),Chem.MolFromSmiles(y,False)) for x,y in patts]\n\n m = Chem.MolFromSmiles(smi)\n for i,(reactant, product) in enumerate(reactions):\n while m.HasSubstructMatch(reactant):\n rms = AllChem.ReplaceSubstructs(m, reactant, product)\n m = rms[0]\n\n # it doesn't matter is u choose to output a canonical smiles as the\n # sequence of atoms is changed calling `AllChem.ReplaceSubstructs\n self.smiles = Chem.MolToSmiles(m, isomericSmiles=False) #, canonical=False)", "def free_group(symbols):\n _free_group = FreeGroup(symbols)\n return (_free_group,) + tuple(_free_group.generators)", "def neutralise_raw(self):\n # kekulization has to be done, otherwise u will encounter\n # issues when assigning bond types later\n Chem.Kekulize(self.m)\n\n # get pairs of charged atoms\n self.get_charged_pairs()\n\n # eliminate the charges by rebuilding the molecule\n m = Chem.Mol()\n mc = Chem.EditableMol(m)\n for i, az in enumerate(self.zs):\n ai = Chem.Atom( az )\n ci = self.charges[i]\n if ci != 0:\n if ci == 1:\n filt = (self.cpairs[:,0] == i)\n if np.any(filt):\n ai.SetFormalCharge( 1 )\n elif ci == -1:\n filt = (self.cpairs[:,1] == i)\n if np.any(filt): ai.SetFormalCharge( -1 )\n else:\n print((' -- i, charges[i] = ', i, self.charges[i]))\n print(' #ERROR: abs(charge) > 1??')\n raise\n mc.AddAtom( ai )\n\n ijs = np.array( np.where( np.triu(self.bom) > 0 ) ).astype(np.int)\n nb = ijs.shape[1]\n for i in range(nb):\n i, j = ijs[:,i]\n mc.AddBond( i, j, bo2bt[ '%.1f'%self.bom[i,j] ] )\n\n m = mc.GetMol()\n m2 = assign_coords(m, self.coords)\n self.m = m2", "def checkSwapsAndClean( self, # For comparison the NRG tags and defaults on March 2nd, 2011 are presented.\n energy_abs_criterium = 0.1, # _Stereo_assign_list.Crit_abs_e_diff 0.100\n energy_rel_criterium = 0.0, # _Stereo_assign_list.Crit_rel_e_diff 0.000\n swapPercentage = 75.0, # _Stereo_assign_list.Crit_mdls_favor_pct 75.0\n singleModelCutoff = 1.0, # _Stereo_assign_list.Crit_sing_mdl_viol 1.000 (inclusive)\n multiModelCutoff = 0.5, # _Stereo_assign_list.Crit_multi_mdl_viol 0.500 (inclusive)\n multiModelPercentageCutoff = 50.0, # _Stereo_assign_list.Crit_multi_mdl_pct 50.0 (inclusive)\n method = 'SUM_AVERAGING', # TODO: code others.\n outputFileName = 'stereo_assign.str', # will be written to current directory if not an absolute path. Ignored if output type is custom\n debug = False, # Print debug info?\n useLowestAromaticViolation = False, # Check for lowest violation for single HD1/2 HE1/2 distance constraint items\n outputType = 'NMRSTAR' # Will write out NMR-STAR file. Can also be 'custom', will then only print info\n ):\n if not self.distanceConstraintLists or not self.structureEnsemble or not self.structureEnsemble.models:\n print \"Error: no constraint lists or no structures available! Aborting...\"\n return True\n\n #\n # Initialize... see parameters above for swapPercentage\n #\n # Set a dictionary with violationCodes (what is a large violation?)\n #\n # smallFloat = 0.000000000001 # same for cutoff distance and fraction\n\n negativeFraction = -999.9 # fraction set to always happen as it's under cut off.\n\n self.violationCodes = {}\n self.violationCodes['xl'] = {'violation': singleModelCutoff, 'fraction': negativeFraction}\n self.violationCodes['l'] = {'violation': multiModelCutoff, 'fraction': multiModelPercentageCutoff/100.}\n self.violationCodes[self.VIOLATION_CODE_REPORTINGX_STR] = {'violation': singleModelCutoff, 'fraction': negativeFraction}\n self.violationCodes[self.VIOLATION_CODE_REPORTINGL_STR] = {'violation': multiModelCutoff, 'fraction': negativeFraction}\n self.violationCodes[self.VIOLATION_CODE_REPORTINGS_STR] = {'violation': 0.0, 'fraction': negativeFraction}\n\n\n # JFD changed indentation here so that below statement is always executed.\n # Order in which they are checked, if found will abort so xl violation is prioritized\n self.violationCodeList = ['xl','l',\n self.VIOLATION_CODE_REPORTINGS_STR,\n self.VIOLATION_CODE_REPORTINGL_STR,\n self.VIOLATION_CODE_REPORTINGX_STR ]\n for violationCode in self.violationCodeList:\n if not self.violationCodes.has_key(violationCode):\n print 'ERROR: expected violationCode [%s] in StereoAssignmentCleanup.violationCodes ' % violationCode\n return True\n# print 'DEBUG: self.violationCode[%s] : %s' % ( violationCode, str(self.violationCodes[violationCode]))\n\n #\n # Initialise some variables\n #\n\n self.useLowestAromaticViolation = useLowestAromaticViolation\n\n #\n # Set the factor for calculating violations\n #\n\n self.method = method\n if self.method == 'SUM_AVERAGING':\n self.factor = 1.0/6.0\n\n #\n # Initialise resonance and 'triplet' information\n #\n\n print\n print \"Checking swap status and cleaning prochiral groups in constraint lists...\"\n print\n\n (self.resAtomDict,self.resAtomSetDict) = createResonanceAtomAndAtomSetDict(self.distanceConstraintLists[0].parent.fixedResonances)\n if self.verbose:\n print \"Made resAtomDict, resAtomSetDict\"\n\n # resAtomSwapDict is list of atoms associated with a resonance, prochiralResonancesDict links to (chainCode,seqId,prochiralChemAtomSet) tuple\n (self.resAtomSwapDict,self.prochiralResonancesDict) = createResAtomSwapDict(self.resAtomSetDict,compareWithWattos=self.compareWithWattos)\n if self.verbose:\n print \"Made resAtomSwapDict,prochiralResonancesDict\"\n\n self.triplets = {}\n\n # Generate a list of triplets, only for ones that have resonances - rest is dealt with later on.\n resList = self.prochiralResonancesDict.keys()\n resList.sort()\n\n for res in resList:\n atomTuple = self.resAtomDict[res]\n prochiralKey = self.prochiralResonancesDict[res]\n\n if not self.triplets.has_key(prochiralKey):\n self.triplets[prochiralKey] = {}\n\n if not self.triplets[prochiralKey].has_key(atomTuple):\n self.triplets[prochiralKey][atomTuple] = []\n\n self.triplets[prochiralKey][atomTuple].append(res)\n\n #\n # Now prioritise the triplets...\n #\n\n prochiralPriority = {}\n self.prochiralConstraints = {}\n\n prochiralKeys = self.triplets.keys()\n prochiralKeys.sort()\n Triplet_count = len(prochiralKeys)\n if Triplet_count < 1:\n print \"WARNING: expected at least one triplet. Are there SSA distance restraints available?\"\n return\n invalidTripletCount = 0 # Like 1a24 1 185 LEU CD* that is invalid and can easily be recognized because it gets no involved restraints.\n for prochiralKey in prochiralKeys:\n #print prochiralKey\n atomTuples = self.triplets[prochiralKey].keys()\n atomTuples.sort()\n connectedConstraints = []\n unambiguousStereoConstraints = [] # These are constraints where there is no additional stereo ambiguity in the constraint items involving the prochiral\n allResonancesSet = set()\n\n otherItems = {}\n\n for atomTuple in atomTuples:\n #print \"\",atomTuple,triplets[prochiralKey][atomTuple]\n for resonance in self.triplets[prochiralKey][atomTuple]:\n allResonancesSet.add(resonance) # Note will not add the same item twice, so this is fine!\n for constraintItem in resonance.pairwiseConstraintItems:\n constraint = constraintItem.constraint\n if not otherItems.has_key(constraint):\n otherItems[constraint] = {}\n\n # Track other resonance in the item for filtering out fully ambiguous restraints\n orderedResonances = list(constraintItem.orderedResonances)\n otherResonance = orderedResonances[not orderedResonances.index(resonance)]\n if otherResonance not in otherItems[constraint]: # Use this now for future Python3 compatibility\n otherItems[constraint][otherResonance] = set()\n otherItems[constraint][otherResonance].add(resonance)\n\n if constraint.className in ('DistanceConstraint','HBondConstraint'):\n if constraint not in connectedConstraints:\n connectedConstraints.append(constraint)\n # So only 'unambiguous' if the 'other' resonance in the item has a resonance assignment, is assigned to one atomSet, and is prochiral (so could be deassigned)\n if otherResonance.resonanceSet and len(otherResonance.resonanceSet.atomSets) == 1 and otherResonance in self.prochiralResonancesDict:\n #if self.resAtomDict[resonance][0].residue.seqId == 48:\n # print self.resAtomDict[resonance], self.resAtomDict[otherResonance], otherResonance.resonanceSet.atomSets\n unambiguousStereoConstraints.append(constraint)\n else:\n pass\n# print 'DEBUG: ambi in %s:\\n %s' % (prochiralKey, ccpnDistanceRestraintToString(constraint)) # JFD doesn't know how to easily show atoms here.\n\n #\n # Clean up restraints so that constraints that are already fully ambiguous for the prochiral resonances (and they point to exactly the same resonances) are not included in the list to check..\n #\n\n if len(allResonancesSet) > 1:\n for constraint in otherItems:\n allMatch = True\n for otherResonance in otherItems[constraint]:\n if allResonancesSet != otherItems[constraint][otherResonance]:\n allMatch = False\n break\n\n if allMatch:\n if constraint in connectedConstraints:\n connectedConstraints.pop(connectedConstraints.index(constraint))\n if constraint in unambiguousStereoConstraints:\n unambiguousStereoConstraints.pop(unambiguousStereoConstraints.index(constraint))\n\n #\n # Set their priority\n #\n\n chainIdCcpn = prochiralKey[0]\n resIdCcpn = prochiralKey[1]\n chemAtomSetName = prochiralKey[2].name\n priorityKey = (len(connectedConstraints),len(unambiguousStereoConstraints),chainIdCcpn,resIdCcpn,chemAtomSetName)\n# print \"DEBUG: priorityKey:\", priorityKey\n if not prochiralPriority.has_key(priorityKey):\n prochiralPriority[priorityKey] = []\n\n prochiralPriority[priorityKey].append(prochiralKey)\n\n connectedConstraints.sort()\n self.prochiralConstraints[prochiralKey] = connectedConstraints\n\n \n #\n # Sort by priority and reorganise...\n #\n \n priorityKeys = prochiralPriority.keys()\n\n ## custom sort needs to return an int.\n def tripletComparator(x, y):\n if x[0] != y[0]:\n return x[0] - y[0] # ascending connectedConstraints\n# if not self.compareWithWattos:\n if x[1] != y[1]:\n return y[1] - x[1] # ascending unambiguousStereoConstraints\n if x[2] != y[2]:\n if x[2] < y[2]: # descending chainIdCcpn character\n return 1\n else:\n return -1\n resIdX = int(x[3])\n resIdY = int(y[3])\n if resIdX != resIdY:\n return resIdY - resIdX # descending resIdCcpn\n if x[4] != y[4]:\n if x[4] < y[4]: # descending chemAtomSetName\n return 1\n else:\n return -1\n return 0\n # end def\n\n priorityKeys.sort(cmp=tripletComparator)\n priorityKeys.reverse()\n\n if debug:\n for pk in priorityKeys:\n for pck in prochiralPriority[pk]:\n print \"pck: \", pck\n for at in self.triplets[pck].keys():\n print \" at, self.triplets[pck][at]: \",at, self.triplets[pck][at]\n print\n\n #\n # Now calculate the total 'energy' for each constraint, and track whether there are any serious violations\n #\n # The 'energy' is the sum of the squared violations (over all models and restraints).\n #\n\n self.createAtomCoordDict() # This is static, fine to keep like this!\n\n # Corresponds to the indexes of avgLocalSums\n\n self.swapTypes = [self.SWAP_TYPE_ORG,'swapped']\n self.constraintItemsReset = []\n\n #\n # First only do swapping...\n #\n\n swapInfo = {}\n orgMaxViolation = {}\n orgViolationSingleModelCriteriumCount = {}\n orgViolationMultiModelCriteriumCount = {}\n\n Swap_count = 0 # Using captial to distinguish from original FC and use exact same as Wattos.\n Deassign_count = 0\n Total_e_low_states = 0.0\n Total_e_high_states = 0.0\n tripletIdx = 0\n for priorityKey in priorityKeys:\n for prochiralKey in prochiralPriority[priorityKey]:\n tripletIdx += 1\n if debug:\n print prochiralKey\n\n (prochiralViolationInfo,allConstraintItems) = self.checkProchiralKeyConstraints(prochiralKey,debug)\n\n # Find max violation of original assignment\n orgMaxViolation[ prochiralKey] = 0.0\n orgViolationSingleModelCriteriumCount[ prochiralKey] = 0\n orgViolationMultiModelCriteriumCount[ prochiralKey] = 0\n violResultTupleList = prochiralViolationInfo[self.SWAP_TYPE_ORG][self.REQUIRES_DEASSIGNMENT_STR]\n for violationCode, violationList in violResultTupleList:\n if violationCode == self.VIOLATION_CODE_REPORTINGS_STR: # Includes any possible violation.\n orgMaxViolation[prochiralKey] = max( orgMaxViolation[prochiralKey], max(violationList)) # a list of violations\n elif violationCode == self.VIOLATION_CODE_REPORTINGX_STR:\n orgViolationSingleModelCriteriumCount[prochiralKey] += self.numModels - violationList.count(0.0)\n elif violationCode == self.VIOLATION_CODE_REPORTINGL_STR:\n orgViolationMultiModelCriteriumCount[prochiralKey] += self.numModels - violationList.count(0.0)\n # end for violation results\n\n #\n # Now check whether needs to be swapped\n #\n\n doSwapCount = 0.0\n totalEnergyHighState = 0.0 # actual high state will be determined after next loop. For now assume state 0 (unswapped)\n totalEnergyLowState = 0.0\n for modelIndex in range(self.numModels):\n energyHighState = prochiralViolationInfo[self.swapTypes[0]]['energy'][modelIndex]\n energyLowState = prochiralViolationInfo[self.swapTypes[1]]['energy'][modelIndex]\n\n# totalEnergyDiff = prochiralViolationInfo[self.swapTypes[0]]['energy'][modelIndex] - prochiralViolationInfo[self.swapTypes[1]]['energy'][modelIndex] # this is a bug? Needs to be cumulative over models.\n totalEnergyHighState += energyHighState\n totalEnergyLowState += energyLowState\n if energyHighState > energyLowState: # swapping needed because for this model the assumption on the unswapped being the highest energy state was correct\n doSwapCount += 1.0\n# print \"DEBUG: tripletIdx,modelIndex,energyHighState,energyLowState: %s\" % str((tripletIdx,modelIndex,energyHighState,energyLowState))\n # end for model loop\n swappedFavouredFraction = doSwapCount / self.numModels\n\n # Adapted from Wattos\n totalEnergyHighState /= self.numModels # For criteria it's important to use one that can be compared over entries. Ensemble size should not influence result.\n totalEnergyLowState /= self.numModels\n if totalEnergyHighState < totalEnergyLowState: # Get this right before deciding on swapping.\n tmpEnergy = totalEnergyHighState\n totalEnergyHighState = totalEnergyLowState\n totalEnergyLowState = tmpEnergy\n # end if\n energyDifference = totalEnergyHighState - totalEnergyLowState # guaranteed positive or zero\n totalEnergyDiff = energyDifference # FC name\n percentageModelFavoured = 100.0 * swappedFavouredFraction\n if totalEnergyHighState > 0.0: # Strange in Wattos code there's no safety on totalEnergyHighState being zero. Added here.\n energyDifferencePercentage = 100.0 * energyDifference / totalEnergyHighState\n else:\n energyDifferencePercentage = 0.0\n if energyDifference > 0.0:\n energyDifferencePercentage = 100.0\n # end if/else\n\n # If any criteria is not met then the assignment will be maintained.\n swapAssignment = False\n if totalEnergyHighState <= totalEnergyLowState:\n msg = \"criterium not met: totalEnergyHighState > totalEnergyLowState: %.3f and %.3f\" % ( totalEnergyHighState, totalEnergyLowState )\n elif percentageModelFavoured < swapPercentage:\n msg = \"criterium not met: percentageModelFavoured >= swapPercentage: %.1f %.1f\" % ( percentageModelFavoured, swapPercentage)\n elif energyDifference < energy_abs_criterium: # If diff is close to zero do nothing.\n msg = \"criterium not met: energyDifference >= energy_abs_criterium: %.3f and %.3f\" % ( energyDifference, energy_abs_criterium )\n elif energyDifferencePercentage < energy_rel_criterium:\n msg = \"criterium not met: energyDifferencePercentage >= energy_rel_criterium: %.1f %.1f\" % ( energyDifferencePercentage, energy_rel_criterium)\n else:\n swapAssignment = True\n # end if/else\n if not swapAssignment:\n print \"DEBUG maintaining tripletIdx %s because %s\" % ( tripletIdx, msg)\n else:\n print \"DEBUG swapping tripletIdx %s\" % tripletIdx\n # end if\n finalSwapType = self.swapTypes[0]\n favouredPercent = (1 - swappedFavouredFraction) * 100.0\n if swapAssignment:\n finalSwapType = self.swapTypes[1]\n favouredPercent = 100.0 - favouredPercent\n Swap_count += 1\n\n Total_e_low_states += totalEnergyLowState\n Total_e_high_states += totalEnergyHighState\n swapInfo[prochiralKey] = (swapAssignment,finalSwapType,energyDifferencePercentage,totalEnergyDiff, totalEnergyHighState, totalEnergyLowState,\n favouredPercent,swappedFavouredFraction,tripletIdx)\n\n\n #\n # Now make changes in CCPN... deassignment gets priority over swapping.\n #\n\n if swapAssignment:\n\n prochiralResonances = []\n for resList in self.triplets[prochiralKey].values():\n for resonance in resList:\n if not resonance in prochiralResonances:\n prochiralResonances.append(resonance)\n\n #\n # Switch the assignments...\n #\n \n if debug:\n print\n print \"SWAPPING\", prochiralResonances\n print\n\n if len(prochiralResonances) == 2:\n\n resSet1 = prochiralResonances[0].resonanceSet\n atomSet1 = resSet1.sortedAtomSets()[0]\n resSet2 = prochiralResonances[1].resonanceSet\n atomSet2 = resSet2.sortedAtomSets()[0]\n\n resSet1.addAtomSet(atomSet2)\n resSet1.removeAtomSet(atomSet1)\n resSet2.addAtomSet(atomSet1)\n resSet2.removeAtomSet(atomSet2)\n\n # Reset some dictionaries as well - note that resAtomSwapDict gives atoms of the *other* prochiral, so below is correct!\n atomTuple1 = tuple(atomSet1.sortedAtoms())\n atomTuple2 = tuple(atomSet2.sortedAtoms())\n\n self.resAtomSwapDict[prochiralResonances[0]] = atomTuple2\n self.resAtomSwapDict[prochiralResonances[1]] = atomTuple1\n\n # Reset triplets info\n self.triplets[prochiralKey] = {}\n self.triplets[prochiralKey][atomTuple1] = [prochiralResonances[1]]\n self.triplets[prochiralKey][atomTuple2] = [prochiralResonances[0]]\n\n elif len(prochiralResonances) == 1:\n resSet = prochiralResonances[0].resonanceSet\n atomSet1 = resSet.sortedAtomSets()[0]\n\n otherAtoms = self.resAtomSwapDict[prochiralResonances[0]]\n\n otherAtomSet = otherAtoms[0].findFirstFixedAtomSet(nmrConstraintStore=self.nmrConstraintStore)\n if not otherAtomSet:\n otherAtomSet = self.nmrConstraintStore.newFixedAtomSet(atoms = otherAtoms)\n \n if otherAtomSet != atomSet1:\n resSet.addAtomSet(otherAtomSet)\n atomSet1.removeResonanceSet(resSet)\n\n # Reset some dictionaries as well - note that resAtomSwapDict gives atoms of the *other* prochiral, so below is correct!\n atomTuple1 = tuple(atomSet1.sortedAtoms())\n \n else:\n # Same atomSet, possible for HD1/2 HE1/2 aromatics\n atomTuple1 = otherAtoms\n\n self.resAtomSwapDict[prochiralResonances[0]] = atomTuple1\n\n # Reset triplets info\n self.triplets[prochiralKey] = {}\n self.triplets[prochiralKey][atomTuple1] = []\n self.triplets[prochiralKey][otherAtomSet] = [prochiralResonances[0]]\n\n #\n # Then do deassigning. and track info for final printout...\n #\n\n finalList = {}\n\n self.swapTypes = [self.SWAP_TYPE_ORG] # Swapped not necessary any more\n priorityCount = 0\n\n for priorityKey in priorityKeys:\n priorityCount += 1\n for prochiralKey in prochiralPriority[priorityKey]:\n\n if debug:\n print prochiralKey\n\n (prochiralViolationInfo,allConstraintItems) = self.checkProchiralKeyConstraints(prochiralKey,debug=debug)\n\n #\n # Now check whether needs to be deassigned\n #\n\n finalSwapType = self.SWAP_TYPE_ORG\n\n numViol = {}\n deassign = False\n\n violResultTupleList = prochiralViolationInfo[finalSwapType][self.REQUIRES_DEASSIGNMENT_STR]\n for violationCodeToTest in self.violationCodeList:\n if violationCodeToTest in self.VIOLATION_CODE_REPORTING_LIST:\n continue\n if deassign:\n continue\n fractionByViolationCode = self.violationCodes[violationCodeToTest]['fraction']\n# numViol[violationCodeToTest] = 0\n for violationCode, violationList in violResultTupleList:\n if violationCodeToTest != violationCode:\n continue\n # Look for every violationCodeToTest (a large single model cutoff and a smaller multi model cutoff) if fraction is met.\n numViol = self.numModels - violationList.count(0.0)\n fractionFound = ( 1.0 * numViol ) / self.numModels\n if fractionFound >= fractionByViolationCode: # inclusive\n if debug:\n print \"DEBUG: DEASSIGNING BASED ON %s %s\" % (violationCode, str(prochiralViolationInfo[finalSwapType][self.REQUIRES_DEASSIGNMENT_STR]))\n deassign = True\n Deassign_count += 1\n break # no need to look at other potentially qualifying restraints\n # end for\n # end for violationCodeToTest\n\n # Retrieve the swap info...\n (swapAssignment,finalSwapType,energyDifferencePercentage,totalEnergyDiff, totalEnergyHighState, totalEnergyLowState,\n favouredPercent,swappedFavouredFraction,tripletIdx) = swapInfo[prochiralKey]\n\n chainCode = prochiralKey[0]\n seqId = prochiralKey[1]\n chemAtomSetName = prochiralKey[2].name\n ccpCode = prochiralKey[2].chemComp.ccpCode\n totalConstraints = priorityKey[0]\n ambiguousConstraints = priorityKey[1]\n\n maximum_violation = orgMaxViolation[ prochiralKey]\n violation_single_model_criterium_count = orgViolationSingleModelCriteriumCount[prochiralKey]\n violation_multi_model_criterium_count = orgViolationMultiModelCriteriumCount[ prochiralKey]\n\n # chainCode, seqId, ccpCode, chemAtomSetName, swapAssignment, favouredPercent, totalEnergyDiff, totalConstraints, unambiguousStereoConstraints, deassign, numVeryLargeViol, numLargeViol\n# dummyIdxForComparisonWithWattos = '1' # TODO: reset to sensible output. chainCode\n# mapChainId2Idx = { 'A': '1', 'B': '2', 'C': '3' }\n# if mapChainId2Idx.has_key(chainCode):\n# dummyIdxForComparisonWithWattos = mapChainId2Idx[chainCode]\n pseudoNameKey = '%s,%s' % (ccpCode.upper(), chemAtomSetName)\n iupacPseudo = chemAtomSetName\n if self.mapCcpn2IupacPseudo.has_key(pseudoNameKey):\n iupacPseudo = self.mapCcpn2IupacPseudo[ pseudoNameKey ]\n lineItem = \"%1s %4d %5s %-10s\" % ( chainCode, seqId, ccpCode.upper(), iupacPseudo )\n lineItem += \" %3d %-3s %7.1f %7.1f %6.1f\" % ( tripletIdx, booleanPythonToJavaStr(swapAssignment), favouredPercent, energyDifferencePercentage, totalEnergyDiff )\n lineItem += \" %6.1f %6.1f %3d\" % ( totalEnergyHighState, totalEnergyLowState, totalConstraints )\n lineItem += \" %3d\" % ( ambiguousConstraints )\n lineItem += \" %-5s %7.3f\" % ( booleanPythonToJavaStr(deassign), maximum_violation )\n lineItem += \" %3d %3d\" % ( violation_single_model_criterium_count, violation_multi_model_criterium_count)\n \n if totalConstraints:\n finalList[(chainCode,seqId,chemAtomSetName)] = lineItem\n else:\n print \"warning skipping triplet without restraints: \" + lineItem\n invalidTripletCount += 1\n # end if\n \n #\n # Now make changes in CCPN... deassignment gets priority over swapping.\n #\n\n\n if deassign:\n\n violationCode = 'xxx'\n fractionViolated = 0.00\n\n prochiralResonances = []\n for resList in self.triplets[prochiralKey].values():\n for resonance in resList:\n if not resonance in prochiralResonances:\n prochiralResonances.append(resonance)\n\n self.resetConstraintItems(allConstraintItems,prochiralResonances, prochiralKey,violationCode,fractionViolated,verbose=False)\n\n #\n # Print out for checking\n #\n \n if outputType == 'custom':\n \n print \"\"\"# Columns below (* means new):\n# 1 chainCode\n# 2 seqId\n# 3 ccpCode\n# 4 chemAtomSetName\n# 5 priority (1 was handled first)\n# 6 swapAssignment\n# 7 favouredPercent (so for the swapped state if swapped!)\n# 8 energyDifferencePercentage (*)\n# 9 totalEnergyDiff ensemble averaged\n# 10 totalEnergyHighState ensemble averaged (*)\n# 11 totalEnergyLowState ensemble averaged (*)\n# 12 totalConstraints\n# 13 ambiguousConstraints (optional)\n# 14 deassign\n# 15 maximumViolation (pre processing)\n# 16 numVeryLargeViol (post processing TODO: check)\n# 17 numLargeViol (post processing TODO: check)\n\"\"\"\n\n finalIds = finalList.keys()\n finalIds.sort()\n\n meat = ''\n\n for finalId in finalIds:\n if outputType == 'custom':\n print finalList[finalId]\n else:\n meat += str( finalList[finalId] ) + '\\n'\n\n \n #\n # NMR-STAR Wattos type output\n #\n # meat = \"\"\"\n # A 4 Met HB* 82 False 100.0 0.000 2 0 False 0.000 0 0\n # A 5 Arg HD* 81 False 100.0 0.000 4 2 False 0.000 0 0\n # A 6 Leu HB* 23 False 90.0 14.328 26 7 True 1.812 11 0\n #\n # 1 6 LEU QB 22 no 90.0 78.6 8.803 11.204 2.402 26 10 yes 2.200 11 11\n # 1 6 LEU QD 8 no 5.0 0.0 0.000 1.649 1.649 34 14 yes 1.651 19 22\n # 1 9 GLU QG 96 no 100.0 0.0 0.000 0.000 0.000 10 0 no 0.000 0 0\n #\"\"\"\n\n if outputType == 'NMRSTAR':\n\n # Let's do the same with a STAR table.\n if invalidTripletCount:\n print \"Warning: found triplets without restraints.\"\n validTripletCount = Triplet_count - invalidTripletCount\n if validTripletCount < 1:\n print \"Error: found no triplets with restraints.\"\n return True\n validTripletCount2 = len(finalIds) # double check.\n if validTripletCount != validTripletCount2:\n print \"Error: found number of triplets with restraints %d but number of report list %d\" % ( validTripletCount, validTripletCount2)\n# return True\n \n Swap_percentage = ( 100.0 * Swap_count ) / validTripletCount\n Deassign_percentage = ( 100.0 * Deassign_count ) / validTripletCount\n Model_count = self.numModels\n Crit_abs_e_diff = energy_abs_criterium\n Crit_rel_e_diff = energy_rel_criterium\n Crit_mdls_favor_pct = swapPercentage\n Crit_sing_mdl_viol = self.violationCodes['xl']['violation']\n Crit_multi_mdl_viol = self.violationCodes['l']['violation']\n Crit_multi_mdl_pct = self.violationCodes['l']['fraction'] * 100.0\n\n header = \"\"\"data_entry\n\n\n save_assign_stereo\n _Stereo_assign_list.Sf_category stereo_assignments\n _Stereo_assign_list.Triplet_count %s\n _Stereo_assign_list.Swap_count %s\n _Stereo_assign_list.Swap_percentage %.1f\n _Stereo_assign_list.Deassign_count %s\n _Stereo_assign_list.Deassign_percentage %.1f\n _Stereo_assign_list.Model_count %s\n _Stereo_assign_list.Total_e_low_states %.1f\n _Stereo_assign_list.Total_e_high_states %.1f\n _Stereo_assign_list.Crit_abs_e_diff %.1f\n _Stereo_assign_list.Crit_rel_e_diff %.1f\n _Stereo_assign_list.Crit_mdls_favor_pct %.1f\n _Stereo_assign_list.Crit_sing_mdl_viol %.3f\n _Stereo_assign_list.Crit_multi_mdl_viol %.3f\n _Stereo_assign_list.Crit_multi_mdl_pct %.1f\"\"\" % (\n validTripletCount,\n Swap_count,\n Swap_percentage,\n Deassign_count,\n Deassign_percentage,\n Model_count,\n Total_e_low_states,\n Total_e_high_states,\n Crit_abs_e_diff,\n Crit_rel_e_diff,\n Crit_mdls_favor_pct,\n Crit_sing_mdl_viol,\n Crit_multi_mdl_viol,\n Crit_multi_mdl_pct\n )\n\n\n explanations = \"\"\"\n _Stereo_assign_list.Details\n;\n\nDescription of the tags in this list:\n* 1 * NMR-STAR 3 administrative tag\n* 2 * NMR-STAR 3 administrative tag\n* 3 * NMR-STAR 3 administrative tag\n* 4 * Number of triplets (atom-group pair and pseudo)\n* 5 * Number of triplets that were swapped\n* 6 * Percentage of triplets that were swapped\n* 7 * Number of deassigned triplets\n* 8 * Percentage of deassigned triplets\n* 9 * Number of models in ensemble\n* 10 * Energy of the states with the lower energies summed for all triplets (Ang.**2) ensemble averaged\n* 11 * Energy of the states with the higher energies summed for all triplets (Ang.**2) ensemble averaged\n* 12 * Item 9-8\n* 13 * Criterium for swapping assignment on the absolute energy difference (Ang.**2)\n* 14 * Criterium for swapping assignment on the relative energy difference (Ang.**2)\n* 15 * Criterium for swapping assignment on the percentage of models favoring a swap\n* 16 * Criterium for deassignment on a single model violation (Ang.)\n* 17 * Criterium for deassignment on a multiple model violation (Ang.)\n* 18 * Criterium for deassignment on a percentage of models\n* 19 * this tag\n\nDescription of the tags in the table below:\n* 1 * Chain identifier (can be absent if none defined)\n* 2 * Residue number\n* 3 * Residue name\n* 4 * Name of pseudoatom representing the triplet\n* 5 * Ordinal number of assignment (1 is assigned first)\n* 6 * 'yes' if assignment state is swapped with respect to restraint file\n* 7 * Percentage of models in which the assignment with the lowest\n overall energy is favored\n* 8 * Percentage of difference between lowest and highest overall energy\n with respect to the highest overall energy\n* 9 * Difference between lowest and highest overall energy ensemble averaged\n* 10 * Energy of the highest overall energy state (Ang.**2) ensemble averaged\n* 11 * Energy of the lowest overall energy state (Ang.**2) ensemble averaged\n* 12 * Number of restraints involved with the triplet. The highest ranking\n triplet on this number, is assigned first (optional)\n* 13 * Number of restraints involved with the triplet that are ambiguous\n besides the ambiguity from this triplet\n* 14 * 'yes' if restraints included in this triplet are deassigned\n* 15 * Maximum unaveraged violation before deassignment (Ang.)\n* 16 * Number of violated restraints above threshold for a single model\n before deassignment (given by Single_mdl_crit_count)\n* 17 * Number of violated restraints above threshold for a multiple models\n before deassignment (given by Multi_mdl_crit_count)\n;\n\n\n loop_\n _Stereo_assign.Chain_ID\n _Stereo_assign.Comp_index_ID\n _Stereo_assign.Comp_ID\n _Stereo_assign.Pseudo_Atom_ID\n _Stereo_assign.Num\n _Stereo_assign.Swapped\n _Stereo_assign.Models_favoring_pct\n _Stereo_assign.Energy_difference_pct\n _Stereo_assign.Energy_difference\n _Stereo_assign.Energy_high_state\n _Stereo_assign.Energy_low_state\n _Stereo_assign.Constraint_count\n \"\"\"\n # if not self.compareWithWattos:\n explanations += \" _Stereo_assign.Constraint_ambi_count\\n\"\n # end if\n explanations += \"\"\" _Stereo_assign.Deassigned\n _Stereo_assign.Violation_max\n _Stereo_assign.Single_mdl_crit_count\n _Stereo_assign.Multi_mdl_crit_count\n\n\"\"\"\n\n footer = \"\"\" stop_\n\n save_\n\n \"\"\"\n\n\n star_text = header + explanations + meat + footer\n\n starFile = File()\n if starFile.read(text=star_text):\n print \"Error: reading STAR text by STAR api.\"\n return True\n if starFile.check_integrity():\n print \"Error: STAR text failed integrity check.\"\n return True\n starFile.filename = outputFileName\n if starFile.write():\n print \"Error: writing file %\" % outputFileName\n return True\n if not os.path.exists(outputFileName):\n print \"Error: failed to find STAR file %s\" % outputFileName\n return True\n# print \"Written meta data to STAR file: %s\" % outputFileName # already printed by write()\n \n \n self.storeToAppData( star_text )\n # end def", "def test_enlarge_3_react_edge(self):\n self.rmg.reaction_model.enlarge(\n react_edge=True,\n unimolecular_react=np.array([0, 1, 0], bool),\n bimolecular_react=np.zeros((3, 3), bool),\n )\n\n self.assertEqual(len(self.rmg.reaction_model.edge.species), 2)\n smiles = set([spc.smiles for spc in self.rmg.reaction_model.edge.species])\n self.assertEqual(smiles, {'[H]', 'C[CH2]'})\n\n # We expect either C-C bond scission to be in the core and C-H bond scission to be in the edge\n self.assertEqual(len(self.rmg.reaction_model.core.reactions), 1)\n rxn = self.rmg.reaction_model.core.reactions[0]\n smiles = set([spc.smiles for spc in rxn.reactants + rxn.products])\n self.assertEqual(smiles, {'CC', '[CH3]'})\n\n self.assertEqual(len(self.rmg.reaction_model.edge.reactions), 1)\n rxn = self.rmg.reaction_model.edge.reactions[0]\n smiles = set([spc.smiles for spc in rxn.reactants + rxn.products])\n self.assertEqual(smiles, {'CC', 'C[CH2]', '[H]'})", "def extrude_balcony_grouped(bm, group, depth):\r\n\r\n def splitones(num):\r\n \"\"\"Return a list of numbers that add up to num where the largest value is one\"\"\"\r\n fract, intr = math.modf(num)\r\n result = [1 for _ in range(int(intr))]\r\n if fract > 0.0:\r\n result.append(fract)\r\n return result\r\n\r\n result = []\r\n inset_faces = group[:]\r\n valid_normals = [f.normal.to_tuple(3) for f in group]\r\n for num in splitones(depth):\r\n res = bmesh.ops.inset_region(\r\n bm, faces=inset_faces, depth=num, use_even_offset=True, use_boundary=True)[\"faces\"]\r\n bmesh.ops.dissolve_degenerate(\r\n bm, dist=0.001, edges=list({e for f in inset_faces for e in f.edges}))\r\n inset_faces = validate(inset_faces)\r\n inset_faces.extend([f for f in res if f.normal.to_tuple(3) in valid_normals])\r\n result.extend(res)\r\n return [f for f in validate(result) if f.normal.z > 0]", "def cleanup(self):\n for residue in self.debumper.biomolecule.residues:\n if not isinstance(residue, aa.Amino):\n continue\n if residue.name == \"GLH\" or \"GLH\" in residue.patches:\n if residue.has_atom(\"HE1\") and residue.has_atom(\"HE2\"):\n residue.remove_atom(\"HE1\")\n elif residue.name == \"ASH\" or \"ASH\" in residue.patches:\n if residue.has_atom(\"HD1\") and residue.has_atom(\"HD2\"):\n residue.remove_atom(\"HD1\")", "def EnumerateStereoisomers(m, options=..., verbose=...): # -> Generator[Unknown, None, None]:\n ...", "def space_group_irreps(self, *k: Array) -> Array:\n k = _ensure_iterable(k)\n # Wave vectors\n big_star_Cart = np.tensordot(self.point_group_.matrices(), k, axes=1)\n big_star = self.lattice.to_reciprocal_lattice(big_star_Cart) * (\n 2 * pi / self.lattice.extent\n )\n # Little-group-irrep factors\n # Conjugacy_table[g,p] lists p^{-1}gp, so point_group_factors[i,:,p]\n # of irrep #i for the little group of p(k) is the equivalent\n # Phase factor for non-symmorphic symmetries is exp(-i w_g . p(k))\n point_group_factors = self._little_group_irreps(k, divide=True)[\n :, self.point_group_.conjugacy_table\n ] * np.exp(\n -1j\n * np.tensordot(\n self.point_group_.translations(), big_star_Cart, axes=(-1, -1)\n )\n )\n # Translational factors\n trans_factors = []\n for axis in range(self.lattice.ndim):\n n_trans = self.lattice.extent[axis] if self.lattice.pbc[axis] else 1\n factors = np.exp(-1j * np.outer(np.arange(n_trans), big_star[:, axis]))\n shape = (\n [1] * axis\n + [n_trans]\n + [1] * (self.lattice.ndim - 1 - axis)\n + [len(self.point_group_)]\n )\n trans_factors.append(factors.reshape(shape))\n trans_factors = reduce(np.multiply, trans_factors).reshape(\n -1, len(self.point_group_)\n )\n\n # Multiply the factors together and sum over the \"p\" PGSymmetry axis\n # Translations are more major than point group operations\n result = np.einsum(\n \"igp, tp -> itg\", point_group_factors, trans_factors\n ).reshape(point_group_factors.shape[0], -1)\n return prune_zeros(result)", "def stereoWarpK_noMotion_singleSided(curImageInfo, conversionParam, globalParam): \n h, w, u = curImageInfo.originalImageResized.shape # shape after resize\n K = 1\n N = h * w * K\n gr = np.mean(curImageInfo.originalImageResized, 2) # not 3 as it is zero based :3\n grs = cv2.GaussianBlur(gr, (5, 5), 1)\n \n # One heuristic for converting depth to disparity\n disparity0 = imnormalize(1/(1+imnormalize(curImageInfo.depthResized)))*conversionParam.maxDisp - conversionParam.maxDisp/2;\n \n if conversionParam.spatialSmoothnessSwitch == True:\n # Smoothing the depth spatially according to adjacent pixels by using Gx, Gy gradients\n # Vertical and Horizontal Edges\n dx = cv2.filter2D(grs, -1, np.transpose(np.array([[-1, 1, 0]])))\n dy = cv2.filter2D(grs, -1, np.array([[-1, 1, 0]]))\n \n W = ( imnormalize(disparity0) + sigmoid(np.sqrt(np.power(dx, 2) + np.power(dy, 2)), 0.01, 500) ) / 2 \n \n A = np.transpose(spdiags(np.transpose(W).flatten(), 0, N, N, \"csc\") \\\n + (conversionParam.spatialSmoothCoeff_x * globalParam.Gx.transpose() * globalParam.Gx) \\\n + (conversionParam.spatialSmoothCoeff_y * globalParam.Gy.transpose() * globalParam.Gy))\n \n b = np.transpose(W).flatten() * np.transpose(disparity0).flatten()\n \n [x, flag] = cg(A, b, np.transpose(disparity0).flatten(), 5e-1, 50)\n \n disparity = np.transpose(np.reshape(x, (w, h))) # remove (h, w, 1, K)\n else:\n disparity = disparity0\n \n curImageInfo.leftImage = curImageInfo.originalImage\n \n # The -ve sign to convert the white to black and black to white \n warpright = -disparity\n \n # only the warping interp2 is done on the original size image with no resizing to have good estimation\n warpright = cv2.resize(warpright, (curImageInfo.originalImage.shape[1], curImageInfo.originalImage.shape[0]), \n interpolation=cv2.INTER_LINEAR)\n \n curImageInfo.rightImage = (clip(warpImage_v2((curImageInfo.originalImage), (warpright), \n conversionParam.resizeFactor, globalParam.xx, globalParam.yy, globalParam.YY)))\n \n return disparity", "def composite_scene(orig_scene, mask_seam, match_scene, dialation_mask, orig_scene1, method=\"paste\", repeat=1):\n avg_pixel = np.mean(orig_scene1[orig_scene1 != 0])\n \n output = np.zeros(orig_scene.shape)\n if method==\"seamlessclone\":\n width, height, _ = match_scene.shape\n center = (height/2, width/2)\n \n # create plain white mask\n mask = np.zeros(match_scene.shape, match_scene.dtype) + 255\n \n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = avg_pixel\n \n \n \n #image_to_compare\n output_blend = cv2.seamlessClone(match_scene.astype(np.uint8), \n orig_scene_impute.astype(np.uint8), \n mask, center,cv2.NORMAL_CLONE)\n \n #implot(output_blend)\n # now reapply the mask with alpha blending to fix it up again.\n \n \"\"\"\n TO DO CHANGE IT FROM THE DILATION + MASK SEAM, NEED TO FIND THE INTERSECTION OF THESE TWO TO BE THE \n REAL MASK TO BLUR\n \"\"\"\n dilation_mask = mask_seam.copy()\n \n dilation_mask = cv2.GaussianBlur(dilation_mask, (101,101), 0) # blur mask and do a alpha blend... between the \n #implot(dilation_mask, 'gray')\n \n dilation_mask = dilation_mask/255.0\n \n \n \n # 0 is black, 1 is white\n #output = cv2.addWeighted(output_blend, dialation_mask, orig_scene, 1-dialation_mask)\n #print dialation_mask\n #print dialation_mask.shape\n #print output_blend.shape\n #a = cv2.multiply(output_blend.astype(np.float), dialation_mask)\n \n for _ in range(10):\n # some kind of layered alpha blend by the dilation mask values...\n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]\n output_blend = cv2.add(cv2.multiply(output_blend.astype(np.float), dilation_mask),\n cv2.multiply(orig_scene_impute.astype(np.float), 1-dilation_mask), 0)\n \n \n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]\n output_blend = cv2.add(cv2.multiply(output_blend.astype(np.float), dilation_mask),\n cv2.multiply(orig_scene_impute.astype(np.float), 1-dilation_mask), 0)\n \n \n \n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]\n output = cv2.seamlessClone(match_scene.astype(np.uint8), \n output_blend.astype(np.uint8), \n mask, center,cv2.NORMAL_CLONE)\n \n # complete blend with seamlessclone...\n \n \n # output = np.maximum(output_blend, orig_scene_impute)\n # or just darken...\n \n \n #if repeat == 1:\n # return output_blend\n #output = composite_scene(orig_scene_impute, mask_seam, output_blend, dialation_mask, method=\"paste\")\n \n\n\n elif method==\"paste\":\n output[mask_seam == 0] = orig_scene[mask_seam == 0]\n output[mask_seam != 0] = match_scene[mask_seam != 0]\n \n elif method==\"alphablend\":\n output_blend = output.copy()\n output_blend[mask_seam == 0] = orig_scene[mask_seam == 0]\n output_blend[mask_seam != 0] = match_scene[mask_seam != 0]\n \n \n \n \n else:\n output[mask_seam == 0] = orig_scene[mask_seam == 0]\n output[mask_seam != 0] = match_scene[mask_seam != 0]\n return output", "def _transform_spacegroup(self, spacegroup):\n assert isinstance(spacegroup, int), \"Input is not an integer!\"\n\n sg_features = list(expand_spacegroup(spacegroup))\n for i in np.arange(2, 0, -1):\n extra_channels = self.encoder[i-1][sg_features[i]]\n sg_features = self._replace_with_list_values(i, sg_features,\n extra_channels)\n return np.array(sg_features)", "def _remove_noise_in_o2m():\n if line.reconcile_partial_id:\n sign = 1\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency * sign <= 0:\n return True\n else:\n if line.amount_residual * sign <= 0:\n return True\n return False", "def split(system, ligand_res_name='LIG'):\n\n # Define Empty sets\n lig_set = set()\n prot_set = set()\n ca_prot_set = set()\n wat_set = set()\n excp_set = set()\n ion_set = set()\n # cofactor_set = set()\n # system_set = set()\n\n # Atom Bond Set vector used to contains the whole system\n frags = oechem.OEAtomBondSetVector()\n\n # Define Options for the Filter\n opt = oechem.OESplitMolComplexOptions()\n\n # The protein filter is set to avoid that multiple\n # chains are separated during the splitting and peptide\n # molecules are recognized as ligands\n pf = oechem.OEMolComplexFilterFactory(oechem.OEMolComplexFilterCategory_Protein)\n peptide = oechem.OEMolComplexFilterFactory(oechem.OEMolComplexFilterCategory_Peptide)\n protein_filter = oechem.OEOrRoleSet(pf, peptide)\n opt.SetProteinFilter(protein_filter)\n\n # The ligand filter is set to recognize just the ligand\n lf = oechem.OEMolComplexFilterFactory(oechem.OEMolComplexFilterCategory_Ligand)\n not_protein_filter = oechem.OENotRoleSet(protein_filter)\n ligand_filter = oechem.OEAndRoleSet(lf, not_protein_filter)\n opt.SetLigandFilter(ligand_filter)\n\n # The water filter is set to recognize just water molecules\n wf = oechem.OEMolComplexFilterFactory(oechem.OEMolComplexFilterCategory_Water)\n opt.SetWaterFilter(wf)\n\n # Set Category\n cat = oechem.OEMolComplexCategorizer()\n cat.AddLigandName(ligand_res_name)\n opt.SetCategorizer(cat)\n\n # Define the system fragments\n if not oechem.OEGetMolComplexFragments(frags, system, opt):\n raise ValueError('Unable to generate the system fragments')\n\n # Set empty OEMol containers\n prot = oechem.OEMol()\n lig = oechem.OEMol()\n wat = oechem.OEMol()\n excp = oechem.OEMol()\n\n # Split the protein from the system\n atommap = oechem.OEAtomArray(system.GetMaxAtomIdx())\n if not oechem.OECombineMolComplexFragments(prot, frags, opt, opt.GetProteinFilter(), atommap):\n raise ValueError('Unable to split the Protein')\n # Populate the protein set and the protein carbon alpha set\n pred = oechem.OEIsAlphaCarbon()\n for sys_at in system.GetAtoms():\n sys_idx = sys_at.GetIdx()\n at_idx = atommap[sys_idx]\n if at_idx:\n prot_set.add(sys_idx)\n at = system.GetAtom(oechem.OEHasAtomIdx(sys_idx))\n if pred(at):\n ca_prot_set.add(sys_idx)\n # print(sys_idx, '->', at_idx)\n\n # Split the ligand from the system\n atommap = oechem.OEAtomArray(system.GetMaxAtomIdx())\n if not oechem.OECombineMolComplexFragments(lig, frags, opt, opt.GetLigandFilter(), atommap):\n raise ValueError('Unable to split the Ligand')\n # Populate the ligand set\n for sys_at in system.GetAtoms():\n sys_idx = sys_at.GetIdx()\n at_idx = atommap[sys_idx]\n if at_idx:\n lig_set.add(sys_idx)\n # print(sys_idx, '->', at_idx)\n\n # Split the water from the system\n atommap = oechem.OEAtomArray(system.GetMaxAtomIdx())\n if not oechem.OECombineMolComplexFragments(wat, frags, opt, opt.GetWaterFilter(), atommap):\n raise ValueError('Unable to split the Water')\n # Populate the water set\n for sys_at in system.GetAtoms():\n sys_idx = sys_at.GetIdx()\n at_idx = atommap[sys_idx]\n if at_idx:\n wat_set.add(sys_idx)\n # print(sys_idx, '->', at_idx)\n\n # Split the excipients from the system\n atommap = oechem.OEAtomArray(system.GetMaxAtomIdx())\n if not oechem.OECombineMolComplexFragments(excp, frags, opt, opt.GetOtherFilter(), atommap):\n raise ValueError('Unable to split the Excipients')\n # Populate the excipient set\n for sys_at in system.GetAtoms():\n sys_idx = sys_at.GetIdx()\n at_idx = atommap[sys_idx]\n if at_idx:\n excp_set.add(sys_idx)\n # print(sys_idx, '->', at_idx)\n\n # Create the ions set\n for exc_idx in excp_set:\n atom = system.GetAtom(oechem.OEHasAtomIdx(exc_idx))\n if atom.GetDegree() == 0:\n ion_set.add(exc_idx)\n\n # Create the cofactor set\n cofactor_set = excp_set - ion_set\n\n # Create the system set\n system_set = prot_set | lig_set | excp_set | wat_set\n\n if len(system_set) != system.NumAtoms():\n raise ValueError(\"The total system atom number {} is different \"\n \"from its set representation {}\".format(system.NumAtoms(), system_set))\n\n # The dictionary is used to link the token keywords to the created molecule sets\n dic_set = {'ligand': lig_set, 'protein': prot_set, 'ca_protein': ca_prot_set,\n 'water': wat_set, 'ions': ion_set, 'cofactors': cofactor_set, 'system': system_set}\n\n return dic_set", "def clean():\n filter_phase_data()\n combine_phase_data()\n remove_duplicates_phase_data()", "def voxelize4(self, materials):\n\t\tlayers = list()\n\t\tlayersR = list()\n\t\tlayersG = list()\n\t\tlayersB = list()\n\t\t\n\t\tlayerMaterial = list()\n\t\tself.volumeComposition = list()\n\t\tfor l in range(len(materials)):\n\t\t\tlayerMaterial.append(list())\n\t\t\tself.volumeComposition.append(list())\n\n\t\tvolumeGeneral = list()\n\t\tm = 0\n\t\tfor i in self.slicePoints:\n\t\t\t#print self.boolResult[m].shape\n\t\t\ttupleResultR = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleResultG = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleResultB = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleMaterial = list()\n\t\t\tfor l in range(len(materials)):\n\t\t\t\ttupleMaterial.append(numpy.zeros(self.boolLayers[m].shape, dtype=float))\n\t\t\t\n\t\t\tj = numpy.nditer(self.boolLayers[m], flags=['multi_index'], op_flags=['readwrite'])\n\t\t\twhile not j.finished:\n\t\t\t\tif j[0] == True:\n\t\t\t\t#tupleResult[j.multi_index] = round((i[direction] - minValue) * ratio)\n\t\t\t\t#tupleResult[j.multi_index] = 78\n\t\t\t\t\tprint type(j.multi_index)\n\t\t\t\t\tprint j.multi_index\n\t\t\t\t\t#tupleResult[j.multi_index] = planeWeight * math.fabs((j.multi_index[1] - planeOrigin[0]) * planeNormal[0] + (j.multi_index[0] - planeOrigin[1]) * planeNormal[1] + (i[2] - planeOrigin[2]) * planeNormal[2]) + pointWeight * math.sqrt(math.pow((j.multi_index[1]- pointValue[0]),2) + math.pow((j.multi_index[0] - pointValue[1]), 2)+math.pow((i[2] - pointValue[2]),2))\n\t\t\t\t\t\n\t\t\t\t\tdistanceList = []\n\t\t\t\t\ttotalDistance = 0.0\n\t\t\t\t\tfor k in range(len(materials)):\n\t\t\t\t\t\tif materials[k].controlSourceType == \"Plane\":\n\t\t\t\t\t\t\tGplane = math.fabs((j.multi_index[1] - materials[k].origin[0]) * materials[k].normal[0] + (j.multi_index[0] - materials[k].origin[1]) * materials[k].normal[1] + (i[2] - materials[k].origin[2]) * materials[k].normal[2])\n\t\t\t\t\t\t\tdistanceList.append(Gplane)\n\t\t\t\t\t\t\ttotalDistance += Gplane\n\t\t\t\t\t\tif materials[k].controlSourceType == \"Point\":\n\t\t\t\t\t\t\tGpoint = (math.sqrt(math.pow((j.multi_index[1]- materials[k].point[0]),2) + math.pow((j.multi_index[0] - materials[k].point[1]), 2)+math.pow((i[2] - materials[k].point[2]),2)))\n\t\t\t\t\t\t\tdistanceList.append(Gpoint)\n\t\t\t\t\t\t\ttotalDistance += Gpoint\n\t\t\t\t\tfor k in range(len(distanceList)):\n\t\t\t\t\t\tdistanceList[k] = distanceList[k] / totalDistance\n\t\t\t\t\t\tdistanceList[k] = 1.0 - distanceList[k]\n\t\t\t\t\t\t\n\t\t\t\t\t\ttupleMaterial[k][j.multi_index] = distanceList[k]\n\t\t\t\t\t\t\n\t\t\t\t\t\ttupleResultR[j.multi_index] += materials[k].materialColor[0] * distanceList[k] * materials[k].weight\n\t\t\t\t\t\ttupleResultG[j.multi_index] += materials[k].materialColor[1] * distanceList[k] * materials[k].weight\n\t\t\t\t\t\ttupleResultB[j.multi_index] += materials[k].materialColor[2] * distanceList[k] * materials[k].weight\n\t\t\t\t\t#if(tupleResult[j.multi_index] > 0):\n\t\t\t\t\t#\ttupleResult[j.multi_index] = round(tupleResult[j.multi_index]) \n\t\t\t\t\t#if(tupleResult[j.multi_index] == 0):\n\t\t\t\t\t#\t\ttupleResult[j.multi_index] = 1\n\t\t\t\t\t#if(tupleResult[j.multi_index] < 0):\n\t\t\t\t\t#\ttupleResult[j.multi_index] = round(0 - tupleResult[j.multi_index]) \n\t\t\t\telse:\n\t\t\t\t\ttupleResultR[j.multi_index] = 0\n\t\t\t\t\ttupleResultG[j.multi_index] = 0\n\t\t\t\t\ttupleResultB[j.multi_index] = 0\n\t\t\t\t\tfor k in range(len(materials)):\n\t\t\t\t\t\ttupleMaterial[k][j.multi_index] = 0.0\n\t\t\t\tj.iternext()\n\t\t\tlayersR.append(tupleResultR)\n\t\t\tlayersG.append(tupleResultG)\n\t\t\tlayersB.append(tupleResultB)\n\t\t\tfor k in range(len(materials)):\n\t\t\t\tlayerMaterial[k].append(tupleMaterial[k])\n\t\t\t\t\n\t\t\tm = m + 1\n\t\tprint \"i got here\"\n\t\tvolumeR=numpy.array(layersR) # create the 3d volume\n\t\tvolumeG=numpy.array(layersG) \n\t\tvolumeB=numpy.array(layersB)\n\t\tfor k in range(len(materials)):\n\t\t\tself.volumeComposition[k] = numpy.array(layerMaterial[k])\n\t\t\n\t\tvolumeGeneral.append(volumeR)\n\t\tvolumeGeneral.append(volumeG)\n\t\tvolumeGeneral.append(volumeB)\n\t\treturn volumeGeneral", "def cyclic_reduction(self, removed=False):\n word = self.copy()\n g = self.group.identity\n while not word.is_cyclically_reduced():\n exp1 = abs(word.exponent_syllable(0))\n exp2 = abs(word.exponent_syllable(-1))\n exp = min(exp1, exp2)\n start = word[0]**abs(exp)\n end = word[-1]**abs(exp)\n word = start**-1*word*end**-1\n g = g*start\n if removed:\n return word, g\n return word", "def restart():\n for pig in pigs.copy():\n space.remove(pig.shape, pig.shape.body)\n pigs.remove(pig)\n for bird in birds.copy():\n space.remove(bird.shape, bird.shape.body)\n birds.remove(bird)\n for column in columns.copy():\n space.remove(column.shape, column.shape.body)\n columns.remove(column)\n for beam in beams.copy():\n space.remove(beam.shape, beam.shape.body)\n beams.remove(beam)", "def pick(self, inv, pl, group):\r\n if self.rect.colliderect(pl):\r\n group.remove(self)\r\n if inv.count('key') == 0:\r\n inv += ['key']\r\n music_acceptor.activatedPortalSound()", "def test_ungrouping(self):\n n = self.create(NodeItem, UML.Node)\n a = self.create(ArtifactItem, UML.Artifact)\n\n self.group(n, a)\n self.ungroup(n, a)\n\n assert 0 == len(n.subject.deployment)\n assert 0 == len(self.kindof(UML.Deployment))", "def test_check_for_existing_reaction_removes_duplicates_in_opposite_directions(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n s1 = Species().from_smiles(\"[H]\")\n s2 = Species().from_smiles(\"CC\")\n s3 = Species().from_smiles(\"[H][H]\")\n s4 = Species().from_smiles(\"C[CH2]\")\n s1.label = 'H'\n s2.label = 'CC'\n s3.label = 'HH'\n s4.label = 'C[CH2]'\n\n rxn_f = TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction')\n )\n\n rxn_r = TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction')\n )\n\n rxn_f.reactants.sort()\n rxn_f.products.sort()\n\n cerm.add_reaction_to_core(rxn_f)\n cerm.register_reaction(rxn_f)\n\n reactions = cerm.search_retrieve_reactions(rxn_r)\n self.assertEqual(1, len(reactions), 'cerm.search_retrieve_reactions could not identify reverse reaction')\n\n found, rxn = cerm.check_for_existing_reaction(rxn_r)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to identify existing reaction in the reverse direction')\n self.assertEqual(rxn, rxn_f)", "def braid_group_orbit(self):\n from sage.graphs.digraph import DiGraph\n\n G = DiGraph(multiedges=True, loops=True)\n waiting = [self.relabel()]\n\n while waiting:\n c = waiting.pop()\n G.add_vertex(c)\n for i in range(self.length()):\n cc = self.braid_group_action(i).relabel()\n if cc not in G:\n waiting.append(cc)\n G.add_edge(c, cc, i)\n return G", "def mask(\n self, enc: SplitEncoding, random: bool = False, detach: bool = False\n ) -> tuple[SplitEncoding, SplitEncoding]:\n zs = enc.zs\n zy = enc.zy\n if detach:\n zs = zs.detach()\n zy = zy.detach()\n if random:\n zs_m = SplitEncoding(zs=torch.randn_like(zs), zy=zy)\n zy_m = SplitEncoding(zs=zs, zy=torch.randn_like(zy))\n else:\n zs_m = SplitEncoding(zs=torch.zeros_like(zs), zy=zy)\n zy_m = SplitEncoding(zs=zs, zy=torch.zeros_like(zy))\n return zs_m, zy_m", "def pick_grom_group(group, other, selected):\n\treturn Faction(over(group, selected), over(group + other, selected))", "def unsplit(self, variant_groups):\n for vargroup in variant_groups:\n self.variant_list.extend(vargroup.variant_list)\n self.pos = min([var.start for var in self.variant_list])\n self.end = max([var.end for var in self.variant_list])", "def separate_augmented_matrix(self):\r\n for row in range(self.SIZE):\r\n self.result[row] = self.matrix[row][-1]\r\n self.matrix[row].pop()", "def test_rplus_rminus(Group: Type[jaxlie.MatrixLieGroup]):\n T_wa = sample_transform(Group)\n T_wb = sample_transform(Group)\n T_ab = T_wa.inverse() @ T_wb\n\n assert_transforms_close(jaxlie.manifold.rplus(T_wa, T_ab.log()), T_wb)\n assert_arrays_close(jaxlie.manifold.rminus(T_wa, T_wb), T_ab.log())", "def group_remotion(a2_data, retained):\n for i in a2_data['I'].keys():\n for r in a2_data['I'][i]['R'].keys():\n for g in a2_data['I'][i]['R'][r].keys():\n if g not in retained:\n a2_data['I'][i]['R'][r].pop(g)\n return a2_data", "def _kill_group(self, x, y):\n if self[x, y] not in self.TURNS:\n raise BoardError('Can only kill black or white group')\n\n group = self.get_group(x, y)\n score = len(group)\n\n for x1, y1 in group:\n self[x1, y1] = self.EMPTY\n\n return score", "def test_strip_atom_stereochemistry(self):\n mol = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n nitrogen_idx = [\n atom.molecule_atom_index for atom in mol.atoms if atom.element.symbol == \"N\"\n ][0]\n\n assert mol.atoms[nitrogen_idx].stereochemistry == \"S\"\n mol.strip_atom_stereochemistry(smarts=\"[N+0X3:1](-[*])(-[*])(-[*])\")\n assert mol.atoms[nitrogen_idx].stereochemistry is None\n\n mol = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n assert mol.atoms[nitrogen_idx].stereochemistry == \"S\"\n mol.strip_atom_stereochemistry(smarts=\"[N+0X3:1](-[*])(-[*])(-[*])\")\n assert mol.atoms[nitrogen_idx].stereochemistry is None", "def split_by_similarity(dbsession, group):\n vectors = {}\n sorted_items = []\n current = group.items[0]\n vectors[current.id] = fill_vector(current)\n while len(sorted_items) < len(group.items):\n next_item = None\n next_sim = None\n for item in group.items:\n if item in sorted_items:\n continue\n if item.id not in vectors:\n vectors[item.id] = fill_vector(item)\n if not next_item or cosine(vectors[current.id], vectors[item.id]) > next_sim:\n next_item = item\n next_sim = cosine(vectors[current.id], vectors[item.id])\n if next_item:\n sorted_items.append(next_item)\n limit = len(group.items) / math.ceil(len(group.items) / 100)\n new_group = Group(value=group.value, label=group.label, parent=group, split=\"similar\")\n dbsession.add(new_group)\n count = 0\n for item in sorted_items:\n if count > limit:\n new_group = Group(value=group.value, label=group.label, parent=group, split=\"similar\")\n dbsession.add(new_group)\n count = 0\n item.group = new_group\n count = count + 1", "def sidechain(self):\n\n return self.atoms - self.backbone()", "def clean_morphs():\n blendshapes = cmds.ls(type=\"blendShape\")\n for blendShape in blendshapes:\n blend_target_list = cmds.listAttr(blendShape + '.w', m=True)\n\n for blend_target in blend_target_list:\n bs_fixed = blend_target.replace(\"head__eCTRL\", \"\")\n if (bs_fixed.find(\"__\") > 1):\n bs_split = bs_fixed.split(\"__\")\n bs_fixed = bs_fixed.replace(bs_split[0]+\"__\", \"\")\n bs_fixed = bs_fixed.replace(\"headInner__\", \"\")\n bs_fixed = bs_fixed.replace(\"head_eCTRL\", \"\")\n bs_fixed = bs_fixed.replace(\"head__\", \"\")\n bs_fixed = bs_fixed.replace(\"head_\", \"\")\n bs_fixed = bs_fixed.replace(\"PHM\", \"\")\n bs_fixed = bs_fixed.replace(\"CTRL\", \"\")\n bs_fixed = bs_fixed.replace(\"QT1\", \"\")\n bs_fixed = bs_fixed.replace(\"Shape\", \"\")\n\n oldMorph = blendShape + \".\" + blend_target\n try:\n # Rename Morphs (Blendshapes)\n cmds.aliasAttr(bs_fixed, oldMorph)\n except:\n pass", "def test_parameterize_mol_missing_stereo_openeye(self, force_field):\n toolkit_registry = ToolkitRegistry(toolkit_precedence=[OpenEyeToolkitWrapper])\n\n molecule = Molecule.from_smiles(\"CC1CCC(=O)O1\", allow_undefined_stereo=True)\n topology = Topology.from_molecules([molecule])\n\n force_field.create_openmm_system(\n topology,\n toolkit_registry=toolkit_registry,\n )", "def clean_copy(self):\n # this is a stub implementation\n #return Molecule(\"H2O\")\n m = self._gettokens()\n for t in self._gettokens():\n #if there is value errors or key errors, remove the invalid tokens\n if (t.isalpha() and t not in _atomic_mass) or (t not in \"()\" and not t.isalnum()):\n m.remove(t)\n str2 = \"\".join(m) \n return Molecule(str2)", "def _as_hidden_group(self):\n return self._to_hidden_group(self)", "def suspension(self):\n if not self:\n return self\n if self.arity is None or self.degree is None:\n raise TypeError('defined for homogeneous elements only')\n if self.convention != 'Berger-Fresse':\n raise NotImplementedError\n answer = self.zero()\n for k, v in self.items():\n nonzero = False\n try:\n p = SymmetricGroupElement(k[:self.arity])\n sign = p.sign\n nonzero = True\n except TypeError:\n pass\n if nonzero:\n answer += self.create({k[self.arity - 1:]: v * sign})\n return answer", "def add_synth_group(self, name=\"\"):\n return None" ]
[ "0.723399", "0.7145059", "0.7093139", "0.6711873", "0.5570379", "0.5568875", "0.54186857", "0.5407642", "0.53835154", "0.5234586", "0.5234586", "0.523138", "0.52302647", "0.5206368", "0.51776206", "0.5163662", "0.5118267", "0.5115464", "0.51110584", "0.510641", "0.5056884", "0.50111765", "0.49716237", "0.49452567", "0.4937246", "0.49324453", "0.4920834", "0.49179763", "0.4914563", "0.49032268", "0.49030703", "0.4897407", "0.48935497", "0.48865286", "0.48695698", "0.48614123", "0.4855507", "0.48545253", "0.48538658", "0.48475668", "0.4845796", "0.4844968", "0.48446786", "0.48435917", "0.48365226", "0.483541", "0.4828116", "0.4827048", "0.47986466", "0.47974715", "0.47936895", "0.47876415", "0.47846675", "0.4768204", "0.47675234", "0.4766108", "0.47624657", "0.47541133", "0.47535947", "0.47508886", "0.4748089", "0.4747878", "0.47474936", "0.47446093", "0.47436386", "0.47431025", "0.47403458", "0.47309902", "0.4723673", "0.47214556", "0.4704232", "0.4699997", "0.4699027", "0.46968415", "0.4693865", "0.46918985", "0.4691473", "0.468605", "0.4678256", "0.46733102", "0.46698138", "0.46683592", "0.46649408", "0.466359", "0.46622568", "0.46617067", "0.46587852", "0.46577564", "0.4655771", "0.4649117", "0.46443135", "0.46382752", "0.463278", "0.46289778", "0.46287033", "0.46268228", "0.46161976", "0.46150786", "0.46150208", "0.46104515" ]
0.7006468
3
If multiple copies of an atom in StereoGroup show up in the product, they should all be part of the same product StereoGroup.
def test_reaction_copies_stereogroup(self): # Stereogroup atoms are in the reaction with multiple copies in the product products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]', 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|', 'CC(=O)C') # stereogroup manually checked, product SMILES assumed correct. self.assertEqual( products, 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|' ) # Stereogroup atoms are not in the reaction, but have multiple copies in the # product. products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]', 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|', 'CC(=O)C') # stereogroup manually checked, product SMILES assumed correct. self.assertEqual( products, 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_grouping(self):\n n = self.create(NodeItem, UML.Node)\n a = self.create(ArtifactItem, UML.Artifact)\n\n self.group(n, a)\n\n assert 1 == len(n.subject.deployment)\n assert n.subject.deployment[0].deployedArtifact[0] is a.subject", "def test_reaction_splits_stereogroup(self):\n products = _reactAndSummarize('[C:1]OO[C:2]>>[C:2]O.O[C:1]',\n 'F[C@H](Cl)OO[C@@H](Cl)Br |o1:1,5|')\n # Two product sets, each with two mols:\n self.assertEqual(products.count('|o1:1|'), 4)", "def test_allow_multiples(self):\r\n o1 = self.b1.get(self.key)\r\n o2 = self.b2.get(self.key)\r\n\r\n o1.set_data(\"object-1\")\r\n o1.store()\r\n o2.set_data(\"object-2\")\r\n o2.store()\r\n\r\n conflicted = self.b1.get(self.key)\r\n siblings = filter(bool, (s.get_data() for s in conflicted.get_siblings()))\r\n self.assertEqual(len(siblings), 2)", "def test_reaction_defines_stereo(self):\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'FC(Cl)Br')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n\n # Remove group with defined stereo\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |&1:3|')\n\n # Remove atoms with defined stereo from group\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')", "def test_stereogroup_is_spectator_to_reaction(self):\n # 5a. Reaction preserves unrelated stereo\n products = _reactAndSummarize('[C@:1]F>>[C@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5b. Reaction ignores unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5c. Reaction inverts unrelated stereo'\n products = _reactAndSummarize('[C@:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5d. Reaction destroys unrelated stereo' 1:3|\n products = _reactAndSummarize('[C@:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |o1:3|')\n # 5e. Reaction assigns unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')", "def test_consistent_ids(self) -> None:\n bnode = BNode()\n g0_ts: _TripleSet = {\n (bnode, FOAF.name, Literal(\"Golan Trevize\")),\n (bnode, RDF.type, FOAF.Person),\n }\n bnode = BNode()\n g1_ts: _TripleSet = {\n (bnode, FOAF.name, Literal(\"Janov Pelorat\")),\n (bnode, RDF.type, FOAF.Person),\n }\n\n g0 = Graph()\n g0 += g0_ts\n cg0 = to_canonical_graph(g0)\n cg0_ts = GraphHelper.triple_set(cg0)\n\n g1 = Graph()\n g1 += g1_ts\n cg1 = to_canonical_graph(g1)\n cg1_ts = GraphHelper.triple_set(cg1)\n\n assert cg0_ts.issubset(\n cg1_ts\n ), \"canonical triple set cg0_ts should be a subset of canonical triple set cg1_ts\"", "def test_duplicate_as(bf: Session) -> None:\n peer_props = bf.q.bgpPeerConfiguration(nodes=SNAPSHOT_NODES_SPEC).answer().frame()\n as_groups = peer_props.groupby(\"Local_AS\")\n for local_as, as_group in as_groups:\n assert as_group[\"Node\"].nunique() == 1, \"ASN {} is duplicated on {}\".format(\n local_as, \", \".join(as_group[\"Node\"].unique()))", "def test_check_for_existing_reaction_keeps_identical_reactions_with_duplicate_flag(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'],\n duplicate=True)\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Cs12345', 'H'],\n duplicate=True)\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertFalse(found, 'check_for_existing_reaction failed to identify duplicate template reactions')", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())", "def test_check_for_existing_reaction_eliminates_identical_reactions_without_duplicate_flag(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'],\n duplicate=False)\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Cs12345', 'H'],\n duplicate=False)\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to eliminate reactions without duplicate tag')", "def test_grouping(self):\n n = self.create(NodeItem, UML.Node)\n c = self.create(ComponentItem, UML.Component)\n\n self.group(n, c)\n\n assert 1 == len(n.subject.ownedAttribute)\n assert 1 == len(n.subject.ownedConnector)\n assert 1 == len(c.subject.ownedAttribute)\n assert 2 == len(self.kindof(UML.ConnectorEnd))\n\n a1 = n.subject.ownedAttribute[0]\n a2 = c.subject.ownedAttribute[0]\n\n assert a1.isComposite\n assert a1 in n.subject.part\n\n connector = n.subject.ownedConnector[0]\n assert connector.end[0].role is a1\n assert connector.end[1].role is a2", "def test_install_set_multi(self):\n expected = copy.deepcopy(test_xdata)\n for thing in expected.xpath(\"Children[@identical='true']/Thing\"):\n thing.text = \"same\"\n self._install(\n [lxml.etree.Element(\n \"SetMulti\", value=\"same\",\n base='Test/Children[#attribute/identical = \"true\"]',\n sub=\"Thing/#text\")],\n expected)", "def check_sane(group):\n attrs = None\n\n for info in group:\n dup_info = dict(info)\n\n # Remove lat and lon\n for prohib in ('lat', 'lon'):\n if prohib in dup_info:\n del dup_info[prohib]\n\n if attrs is None:\n # Use the first file as a reference\n attrs = dup_info\n else:\n # Do the sanity check\n if dup_info.items() != attrs.items():\n msg = \"File '{}' doesn't match '{}' in same group\".format(\n attrs, dup_info\n )\n raise ValueError(msg)", "def test_grouping(self):\n n1 = self.create(NodeItem, UML.Node)\n n2 = self.create(NodeItem, UML.Node)\n\n self.group(n1, n2)\n\n assert n2.subject in n1.subject.nestedNode\n assert n1.subject not in n2.subject.nestedNode", "def _propose_atoms_in_order(self, atom_group):\n atom_torsions= []\n logp = []\n assert len(atom_group) == len(set(atom_group)), \"There are duplicate atom indices in the list of atom proposal indices\"\n while len(atom_group) > 0:\n #initialise an eligible_torsions_list\n eligible_torsions_list = list()\n\n for atom_index in atom_group:\n\n # Find the shortest path up to length four from the atom in question:\n shortest_paths = nx.algorithms.single_source_shortest_path(self._residue_graph, atom_index, cutoff=4)\n\n # Loop through the destination and path of each path and append to eligible_torsions_list\n # if destination has a position and path[1:3] is a subset of atoms with positions\n for destination, path in shortest_paths.items():\n\n # Check if the path is length 4 (a torsion) and that the destination has a position. Continue if not.\n if len(path) != 4 or destination not in self._atoms_with_positions_set:\n continue\n\n # If the last atom is in atoms with positions, check to see if the others are also.\n # If they are, append the torsion to the list of possible torsions to propose\n if set(path[1:3]).issubset(self._atoms_with_positions_set):\n eligible_torsions_list.append(path)\n\n assert len(eligible_torsions_list) != 0, \"There is a connectivity issue; there are no torsions from which to choose\"\n #now we have to randomly choose a single torsion\n ntorsions = len(eligible_torsions_list)\n random_torsion_index = np.random.choice(range(ntorsions))\n random_torsion = eligible_torsions_list[random_torsion_index]\n\n #append random torsion to the atom_torsions and remove source atom from the atom_group\n chosen_atom_index = random_torsion[0]\n first_old_atom_index = random_torsion[1]\n atom_torsions.append(random_torsion)\n atom_group.remove(chosen_atom_index)\n\n #add atom to atoms with positions and corresponding set\n self._atoms_with_positions_set.add(chosen_atom_index)\n\n #add a bond from the new to the previous torsion atom in the _reference_connectivity_graph\n self._reference_connectivity_graph.add_edge(chosen_atom_index, first_old_atom_index)\n\n #add the log probability of the choice to logp\n logp.append(np.log(1./ntorsions))\n\n # Ensure that logp is not ill-defined\n assert len(logp) == len(atom_torsions), \"There is a mismatch in the size of the atom torsion proposals and the associated logps\"\n\n return atom_torsions, logp", "def test_products_ref_groups_put(self):\n pass", "def test_unique_genome(self):\n p1 = self.player()\n p2 = self.player()\n self.assertTrue(p1.genome is p2.genome)", "def test_grouping(self):\n s = self.create(ComponentItem, UML.Component)\n uc1 = self.create(UseCaseItem, UML.UseCase)\n uc2 = self.create(UseCaseItem, UML.UseCase)\n\n self.group(s, uc1)\n assert 1 == len(uc1.subject.subject)\n self.group(s, uc2)\n assert 1 == len(uc2.subject.subject)\n\n # Classifier.useCase is not navigable to UseCase\n # self.assertEqual(2, len(s.subject.useCase))", "def check_if_group_member(self, organism):\n for key, item in self.phen_dict.items():\n if organism in item:\n self.declare(Organism(name=key))", "def update_from(self, grp_names):\n import GEOM, SMESH\n mesh_types = {\n GEOM.VERTEX : SMESH.NODE,\n GEOM.EDGE : SMESH.EDGE,\n GEOM.WIRE : SMESH.EDGE,\n GEOM.FACE : SMESH.FACE,\n GEOM.SHELL : SMESH.FACE,\n GEOM.SOLID : SMESH.VOLUME,\n GEOM.COMPSOLID : SMESH.VOLUME,\n }\n smesh = self.get_smesh()\n\n\n smesh_grps_MA = []\n smesh_grps_NO = []\n for grp in smesh.GetGroups() :\n if str(grp.GetType()) == 'NODE' :\n smesh_grps_NO.append(grp.GetName())\n else :\n smesh_grps_MA.append(grp.GetName())\n\n print smesh_grps_MA,smesh_grps_NO\n done = False\n for geom in self.give_geom().get_children():\n grp_name = geom.read_name()\n #if grp_name in smesh_grps:\n # continue\n #Modif Fournier\n print grp_name\n if grp_name in grp_names[0]:\n if grp_name in smesh_grps_MA:\n pass\n else :\n mesh_type = mesh_types.get(geom.get_shape_type())\n if mesh_type:\n #smesh.CreateGroup(mesh_type, grp_name)\n smesh.CreateGroupFromGEOM(mesh_type,grp_name,geom.get_sgeom())\n done = True\n if grp_name in grp_names[1]:\n if grp_name in smesh_grps_NO:\n continue\n #smesh.CreateGroup(SMESH.NODE,grp_name)\n smesh.CreateGroupFromGEOM(SMESH.NODE,grp_name,geom.get_sgeom())\n done = True\n return done", "def _enforce_coupling(self):\n for body in self.bodies:\n if body.group_master:\n for body2 in self.bodies:\n if body.group == body2.group and not body2.group_master:\n body2.couple_variables(body)\n\n for scenario in self.scenarios:\n if scenario.group_master:\n for scenario2 in self.scenarios:\n if scenario.group == scenario2.group and not scenario2.group_master:\n scenario2.couple_variables(scenario)", "def test_reaction_destroys_stereo(self):\n reaction = '[C@:1]>>[C:1]'\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'FC(Cl)Br')\n self.assertEqual(products, 'FC(Cl)Br')\n\n reaction = '[C@:1]F>>[C:1]F'\n # Reaction destroys stereo (but preserves unaffected group\n products = _reactAndSummarize(reaction,\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')\n # Reaction destroys stereo (but preserves the rest of the group\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)[C@@H](Cl)Br |&1:1,3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')", "def test_ungrouping(self):\n n = self.create(NodeItem, UML.Node)\n a = self.create(ArtifactItem, UML.Artifact)\n\n self.group(n, a)\n self.ungroup(n, a)\n\n assert 0 == len(n.subject.deployment)\n assert 0 == len(self.kindof(UML.Deployment))", "def _fix_genotypes_object(self, genotypes, variant_info):\n # Checking the name (if there were duplications)\n if self.has_index and variant_info.name != genotypes.variant.name:\n if not variant_info.name.startswith(genotypes.variant.name):\n raise ValueError(\"Index file not synced with IMPUTE2 file\")\n genotypes.variant.name = variant_info.name\n\n # Trying to set multi-allelic information\n if self.has_index and self._index_has_location:\n # Location was in the index, so we can automatically set the\n # multi-allelic state of the genotypes\n genotypes.multiallelic = variant_info.multiallelic\n\n else:\n # Location was not in the index, so we check one marker before and\n # after the one we found\n logging.warning(\"Multiallelic variants are not detected on \"\n \"unindexed files.\")", "def exclusive_arch(pathogen_groups_set, collapse_pathogen_groups):\n if len(pathogen_groups_set) == 1:\n return True\n\n # Only check pathogen grouping when the flag is on\n if collapse_pathogen_groups:\n if len(pathogen_groups_set) > 2:\n return False\n if 0 in pathogen_groups_set and 1 in pathogen_groups_set:\n return True\n if 3 in pathogen_groups_set and 4 in pathogen_groups_set:\n return True\n return False", "def addObjectsToGroup(self):\n\t\tmc.delete( self.objects, ch = True )\n\t\tmc.parent( self.objects, self.grp.name )\n\t\tmc.makeIdentity( self.objects, apply=True,t=1,r=1,s=1,n=2)\n\t\t#self.lockObjects()", "def mergable(self, frame):\n\t\tfor pos in self.srcList: \n\t\t\tif pos in frame.srcList:\n\t\t\t\treturn True\n\n\t\tfor pos in self.tgtList: \n\t\t\tif pos in frame.tgtList:\n\t\t\t\treturn True\n\n\t\treturn False", "def test_enlarge_2_add_reactive_species(self):\n m1 = Molecule(smiles='CC')\n spc1 = self.rmg.reaction_model.make_new_species(m1, label='C2H4')[0]\n self.rmg.reaction_model.enlarge(spc1)\n\n self.assertEqual(len(self.rmg.reaction_model.core.species), 2)\n self.assertTrue(self.rmg.reaction_model.core.species[1].reactive)\n\n m2 = Molecule(smiles='[CH3]')\n spc2 = self.rmg.reaction_model.make_new_species(m2, label='CH3')[0]\n self.rmg.reaction_model.enlarge(spc2)\n\n self.assertEqual(len(self.rmg.reaction_model.core.species), 3)\n self.assertTrue(self.rmg.reaction_model.core.species[2].reactive)", "def test_does_not_return_duplicate_groups(self):\n repo = Repository.objects.create(\n organization_id=self.org.id,\n name=self.project.name,\n )\n commit = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='a' * 40,\n )\n commit2 = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='b' * 40,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit,\n order=1,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit2,\n order=0,\n )\n GroupLink.objects.create(\n group_id=self.group.id,\n project_id=self.group.project_id,\n linked_type=GroupLink.LinkedType.commit,\n relationship=GroupLink.Relationship.resolves,\n linked_id=commit.id,\n )\n GroupResolution.objects.create(\n group=self.group,\n release=self.release,\n type=GroupResolution.Type.in_release,\n )\n\n response = self.client.get(self.path)\n\n assert response.status_code == 200, response.content\n assert len(response.data) == 1\n assert response.data[0]['id'] == six.text_type(self.group.id)", "def specifc_site(rd_mols, mols, target, site_description=None):\n\n # look for molgroup with same target and description\n mol_group = search_for_molgroup_by_description(target=target.title,\n description=site_description)\n\n if not mol_group:\n mol_group = MolGroup()\n\n mol_group.group_type = \"MC\"\n mol_group.target_id = target\n centre = calc_site_centre(rd_mols)\n mol_group.x_com = centre[0]\n mol_group.y_com = centre[1]\n mol_group.z_com = centre[2]\n mol_group.description = site_description\n mol_group.save()\n\n # A molecule tag record may exist already, but won't the first time the\n # target is loaded.\n\n try:\n mol_tag = MoleculeTag.objects.get(tag=site_description,\n target_id=target.id)\n except MoleculeTag.DoesNotExist:\n mol_tag = None\n\n if not mol_tag:\n # New site/tag or the tag has been deleted\n mol_tag = MoleculeTag()\n mol_tag.tag = site_description\n mol_tag.category = TagCategory.objects.get(category='Sites')\n mol_tag.target = target\n mol_tag.mol_group = mol_group\n mol_tag.save()\n else:\n # Tag already exists\n # Apart from the new mol_group and molecules, we shouldn't be\n # changing anything.\n mol_tag.mol_group = mol_group\n mol_tag.save()\n\n ids = [m.id for m in mols]\n print([a['id'] for a in mol_group.mol_id.values()])\n\n for mol_id in ids:\n if mol_id not in [a['id'] for a in mol_group.mol_id.values()]:\n logger.debug(\"mol_group mol_id=%s\", mol_id)\n this_mol = Molecule.objects.get(id=mol_id)\n mol_group.mol_id.add(this_mol)\n\n if mol_id not in [a['id'] for a in mol_tag.molecules.values()]:\n logger.debug(\"mol_tag mol_id=%s\", mol_id)\n this_mol = Molecule.objects.get(id=mol_id)\n mol_tag.molecules.add(this_mol)", "def same_group(self,i,j):\n if self.group_number(i) == self.group_number(j):\n return True\n else:\n return False", "def test_add_new_surface_objects(self):\n\n # create object with ReactionSystem behavior\n class rsys:\n pass\n\n class item:\n pass\n\n T = item()\n P = item()\n T.value_si = 1000.0\n P.value_si = 101000.0\n rsys.T = T\n rsys.P = P\n procnum = 2\n\n cerm = CoreEdgeReactionModel()\n\n spcA = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs]\n\n rxns = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n rxns += list(itertools.chain.from_iterable(react([((spcs[0], spcs[1]), ['H_Abstraction'])], procnum)))\n\n for rxn in rxns:\n cerm.make_new_reaction(rxn)\n\n cerm.core.species = [spcA] + spcs\n\n corerxns = []\n edgerxns = []\n edgespcs = set()\n for rxn in rxns:\n if set(rxn.reactants + rxn.products) <= set(cerm.core.species):\n corerxns.append(rxn)\n else:\n edgespcs |= set(cerm.core.species) - set(rxn.reactants + rxn.products)\n edgerxns.append(rxn)\n\n cerm.edge.species += list(edgespcs)\n\n cerm.core.reactions = corerxns\n cerm.edge.reactions = edgerxns\n\n cerm.surface.species = []\n cerm.surface.reactions = []\n\n new_surface_reactions = [cerm.edge.reactions[0]]\n new_surface_species = []\n obj = new_surface_reactions\n\n cerm.add_new_surface_objects(obj, new_surface_species, new_surface_reactions, rsys)\n\n empty = set()\n\n self.assertEqual(cerm.new_surface_spcs_add, empty)\n self.assertEqual(cerm.new_surface_spcs_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_add, set([cerm.edge.reactions[0]]))", "def speciate_genomes(self):\n for genome in self.genomes:\n species_found = False\n\n for species in self.species:\n compatibility = genome.compatibility(species.leader)\n\n if compatibility < self.config.compatibility_threshold:\n species.add_genome(genome)\n species_found = True\n break\n\n if not species_found:\n new_species = Species(self.species_id, genome, self.config, self.breeder)\n self.species.append(new_species)\n self.species_id += 1", "def test_share_distributor(self):\n\n # =================================================================\n # test: complete test, share distribution int\n # =================================================================\n\n sell_list = [StockOrderWrapper(self.order_2)]\n sell = sell_list[0]\n buy_orders = [StockOrderWrapper(self.order_3), StockOrderWrapper(self.order_4), StockOrderWrapper(self.order_7),\n StockOrderWrapper(self.order_8), StockOrderWrapper(self.order_11),\n StockOrderWrapper(self.order_12)]\n\n result = self.bidding_round_manager.share_distributor(sell_orders=sell_list, buy_orders=buy_orders)\n\n order_real_3 = result[0]\n order_real_4 = result[1]\n order_real_7 = result[2]\n order_real_8 = result[3]\n order_real_11 = result[4]\n order_real_12 = result[5]\n\n sell_order_dict_1 = {sell: 1}\n sell_order_dict_2 = {sell: 2}\n\n order_exp_3 = StockOrderWrapper(self.order_3)\n order_exp_4 = StockOrderWrapper(self.order_4)\n order_exp_7 = StockOrderWrapper(self.order_7)\n order_exp_8 = StockOrderWrapper(self.order_8)\n order_exp_11 = StockOrderWrapper(self.order_11)\n order_exp_12 = StockOrderWrapper(self.order_12)\n\n order_exp_3.transaction_dict = sell_order_dict_1\n order_exp_4.transaction_dict = sell_order_dict_2\n order_exp_7.transaction_dict = sell_order_dict_2\n order_exp_8.transaction_dict = sell_order_dict_2\n order_exp_11.transaction_dict = sell_order_dict_2\n order_exp_12.transaction_dict = sell_order_dict_1\n\n order_exp_3.shares_left = 4\n order_exp_4.shares_left = 8\n order_exp_7.shares_left = 8\n order_exp_8.shares_left = 8\n order_exp_11.shares_left = 8\n order_exp_12.shares_left = 4\n\n self.is_equal_order_wrapper(order_real_3, order_exp_3)\n self.is_equal_order_wrapper(order_real_4, order_exp_4)\n self.is_equal_order_wrapper(order_real_7, order_exp_7)\n self.is_equal_order_wrapper(order_real_8, order_exp_8)\n self.is_equal_order_wrapper(order_real_11, order_exp_11)\n self.is_equal_order_wrapper(order_real_12, order_exp_12)\n\n # =================================================================\n # test: share distribution float easy\n # =================================================================\n\n sell_list = [StockOrderWrapper(self.order_2)]\n sell = sell_list[0]\n\n buy_order_3 = StockOrderWrapper(self.order_3)\n buy_order_4 = StockOrderWrapper(self.order_4)\n buy_order_7 = StockOrderWrapper(self.order_7)\n buy_order_8 = StockOrderWrapper(self.order_8)\n buy_order_11 = StockOrderWrapper(self.order_11)\n buy_order_12 = StockOrderWrapper(self.order_12)\n\n sell.shares_left = 6\n buy_order_3.shares_left = 8 # 2.4 -> 3\n buy_order_4.shares_left = 4 # 1.2 -> 1\n buy_order_7.shares_left = 4 # 1.2 -> 1\n buy_order_8.shares_left = 4 # 1.2 -> 1\n buy_order_11.shares_left = 0 # 0.0 -> 0\n buy_order_12.shares_left = 0 # 0.0 -> 0\n\n buy_orders = [buy_order_3, buy_order_4, buy_order_7, buy_order_8, buy_order_11, buy_order_12]\n\n result = self.bidding_round_manager.share_distributor(sell_orders=sell_list, buy_orders=buy_orders)\n\n result_dict = {}\n\n for res in result:\n result_dict[res.stock_order.order_id] = res\n\n try:\n # noinspection PyUnusedLocal\n dummy = result[4]\n raise AssertionError(\"IndexError exception expected\")\n except IndexError:\n pass\n\n sell_order_dict_1 = {sell: 1}\n sell_order_dict_3 = {sell: 3}\n\n order_exp_3.transaction_dict = sell_order_dict_3\n order_exp_4.transaction_dict = sell_order_dict_1\n order_exp_7.transaction_dict = sell_order_dict_1\n order_exp_8.transaction_dict = sell_order_dict_1\n\n order_exp_3.shares_left = 5\n order_exp_4.shares_left = 3\n order_exp_7.shares_left = 3\n order_exp_8.shares_left = 3\n\n self.is_equal_order_wrapper(result_dict.get(3), order_exp_3)\n self.is_equal_order_wrapper(result_dict.get(4), order_exp_4)\n self.is_equal_order_wrapper(result_dict.get(7), order_exp_7)\n self.is_equal_order_wrapper(result_dict.get(8), order_exp_8)\n\n # =================================================================\n # test: share distribution float complex (multiple float iterations)\n # =================================================================\n\n sell_list = [StockOrderWrapper(self.order_2)]\n sell = sell_list[0]\n\n buy_order_3 = StockOrderWrapper(self.order_3)\n buy_order_4 = StockOrderWrapper(self.order_4)\n buy_order_7 = StockOrderWrapper(self.order_7)\n buy_order_8 = StockOrderWrapper(self.order_8)\n buy_order_11 = StockOrderWrapper(self.order_11)\n buy_order_12 = StockOrderWrapper(self.order_12)\n\n sell.shares_left = 18\n sell.stock_order.order_status = DEFINITIVE\n\n self.order_3.order_status = DEFINITIVE\n self.order_4.order_status = DEFINITIVE\n self.order_7.order_status = DEFINITIVE\n self.order_8.order_status = DEFINITIVE\n self.order_11.order_status = DEFINITIVE\n self.order_12.order_status = DEFINITIVE\n\n buy_order_3.shares_left = 6 # 3.6 -> 4\n buy_order_4.shares_left = 4 # 2.4 -> 2\n buy_order_7.shares_left = 2 # 1.2 -> 1\n buy_order_8.shares_left = 2 # 1.2 -> 1\n buy_order_11.shares_left = 8 # 4.8 -> 5\n buy_order_12.shares_left = 8 # 4.8 -> 5\n\n buy_orders = [buy_order_3, buy_order_4, buy_order_7, buy_order_8, buy_order_11, buy_order_12]\n\n result = self.bidding_round_manager.share_distributor(sell_orders=sell_list, buy_orders=buy_orders)\n\n result_dict = {}\n\n for res in result:\n result_dict[res.stock_order.order_id] = res\n\n try:\n # noinspection PyUnusedLocal\n dummy = result[6]\n raise AssertionError(\"IndexError exception expected\")\n except IndexError:\n pass\n\n sell_order_dict_1 = {sell: 1}\n sell_order_dict_2 = {sell: 2}\n sell_order_dict_4 = {sell: 4}\n sell_order_dict_5 = {sell: 5}\n\n order_exp_3.transaction_dict = sell_order_dict_4\n order_exp_4.transaction_dict = sell_order_dict_2\n order_exp_7.transaction_dict = sell_order_dict_1\n order_exp_8.transaction_dict = sell_order_dict_1\n order_exp_11.transaction_dict = sell_order_dict_5\n order_exp_12.transaction_dict = sell_order_dict_5\n\n order_exp_3.shares_left = 2\n order_exp_4.shares_left = 2\n order_exp_7.shares_left = 1\n order_exp_8.shares_left = 1\n order_exp_11.shares_left = 3\n order_exp_12.shares_left = 3\n\n self.is_equal_order_wrapper(result_dict.get(3), order_exp_3)\n self.is_equal_order_wrapper(result_dict.get(4), order_exp_4)\n self.is_equal_order_wrapper(result_dict.get(7), order_exp_7)\n self.is_equal_order_wrapper(result_dict.get(8), order_exp_8)\n self.is_equal_order_wrapper(result_dict.get(11), order_exp_11)\n self.is_equal_order_wrapper(result_dict.get(12), order_exp_12)\n\n # =================================================================\n # test: share distribution date (two stocks left after one float iteration)\n # =================================================================\n\n sell_list = [StockOrderWrapper(self.order_2)]\n sell = sell_list[0]\n\n buy_order_3 = StockOrderWrapper(self.order_3)\n buy_order_4 = StockOrderWrapper(self.order_4)\n buy_order_7 = StockOrderWrapper(self.order_7)\n buy_order_8 = StockOrderWrapper(self.order_8)\n buy_order_11 = StockOrderWrapper(self.order_11)\n buy_order_12 = StockOrderWrapper(self.order_12)\n\n sell.shares_left = 19\n\n buy_order_3.shares_left = 4 # 2.4 -> 2 -> 3\n buy_order_4.shares_left = 4 # 2.4 -> 2 -> 3\n buy_order_7.shares_left = 4 # 2.4 -> 2 -> 2\n buy_order_8.shares_left = 2 # 1.2 -> 1 -> 1\n buy_order_11.shares_left = 8 # 4.8 -> 5 -> 5\n buy_order_12.shares_left = 8 # 4.8 -> 5 -> 5\n\n buy_orders = [buy_order_3, buy_order_4, buy_order_7, buy_order_8, buy_order_11, buy_order_12]\n\n result = self.bidding_round_manager.share_distributor(sell_orders=sell_list, buy_orders=buy_orders)\n\n result_dict = {}\n\n for res in result:\n result_dict[res.stock_order.order_id] = res\n\n try:\n # noinspection PyUnusedLocal\n dummy = result[6]\n raise AssertionError(\"IndexError exception expected\")\n except IndexError:\n pass\n\n sell_order_dict_1 = {sell: 1}\n sell_order_dict_2 = {sell: 2}\n sell_order_dict_3 = {sell: 3}\n sell_order_dict_5 = {sell: 5}\n\n order_exp_3.transaction_dict = sell_order_dict_3\n order_exp_4.transaction_dict = sell_order_dict_3\n order_exp_7.transaction_dict = sell_order_dict_2\n order_exp_8.transaction_dict = sell_order_dict_1\n order_exp_11.transaction_dict = sell_order_dict_5\n order_exp_12.transaction_dict = sell_order_dict_5\n\n order_exp_3.shares_left = 1\n order_exp_4.shares_left = 1\n order_exp_7.shares_left = 2\n order_exp_8.shares_left = 1\n order_exp_11.shares_left = 3\n order_exp_12.shares_left = 3\n\n order_exp_11.order_status = DEFINITIVE\n order_exp_12.order_status = DEFINITIVE\n\n self.is_equal_order_wrapper(result_dict.get(3), order_exp_3)\n self.is_equal_order_wrapper(result_dict.get(4), order_exp_4)\n self.is_equal_order_wrapper(result_dict.get(7), order_exp_7)\n self.is_equal_order_wrapper(result_dict.get(8), order_exp_8)\n self.is_equal_order_wrapper(result_dict.get(11), order_exp_11)\n self.is_equal_order_wrapper(result_dict.get(12), order_exp_12)\n\n # =================================================================\n # test: share distribution int sell shares left\n # =================================================================\n\n sell_list = [StockOrderWrapper(self.order_2)]\n sell = sell_list[0]\n\n sell.shares_left = 100\n\n buy_order_3 = StockOrderWrapper(self.order_3)\n buy_order_4 = StockOrderWrapper(self.order_4)\n buy_order_7 = StockOrderWrapper(self.order_7)\n buy_order_8 = StockOrderWrapper(self.order_8)\n buy_order_11 = StockOrderWrapper(self.order_11)\n buy_order_12 = StockOrderWrapper(self.order_12)\n\n buy_order_3.shares_left = 4\n buy_order_4.shares_left = 7\n buy_order_7.shares_left = 9\n buy_order_8.shares_left = 4\n buy_order_11.shares_left = 8\n buy_order_12.shares_left = 3\n\n buy_orders = [buy_order_3, buy_order_4, buy_order_7, buy_order_8, buy_order_11, buy_order_12]\n\n result = self.bidding_round_manager.share_distributor(sell_orders=sell_list, buy_orders=buy_orders)\n\n order_exp_2 = StockOrderWrapper(self.order_2)\n order_exp_2.shares_left = 65\n\n self.is_equal_order_wrapper(result[0], order_exp_2)\n\n sell_order_dict_3 = {sell: 3}\n sell_order_dict_4 = {sell: 4}\n sell_order_dict_7 = {sell: 7}\n sell_order_dict_8 = {sell: 8}\n sell_order_dict_9 = {sell: 9}\n\n order_exp_3.transaction_dict = sell_order_dict_4\n order_exp_4.transaction_dict = sell_order_dict_7\n order_exp_7.transaction_dict = sell_order_dict_9\n order_exp_8.transaction_dict = sell_order_dict_4\n order_exp_11.transaction_dict = sell_order_dict_8\n order_exp_12.transaction_dict = sell_order_dict_3\n\n order_exp_3.shares_left = 0\n order_exp_4.shares_left = 0\n order_exp_7.shares_left = 0\n order_exp_8.shares_left = 0\n order_exp_11.shares_left = 0\n order_exp_12.shares_left = 0\n\n order_exp_3.order_status = PROCESSED\n order_exp_4.order_status = PROCESSED\n order_exp_7.order_status = PROCESSED\n order_exp_8.order_status = PROCESSED\n order_exp_11.order_status = PROCESSED\n order_exp_12.order_status = PROCESSED\n\n self.is_equal_order_wrapper(buy_order_3, order_exp_3)\n self.is_equal_order_wrapper(buy_order_4, order_exp_4)\n self.is_equal_order_wrapper(buy_order_7, order_exp_7)\n self.is_equal_order_wrapper(buy_order_8, order_exp_8)\n self.is_equal_order_wrapper(buy_order_11, order_exp_11)\n self.is_equal_order_wrapper(buy_order_12, order_exp_12)", "def _multiple_entity_mutated(self, mut_dat, output, variant, item, translocations, fusions, all_except):\n if self.version == \"canc\":\n variant = \"gene\"\n else:\n variant = \"DepMap_ID\"\n\n out_dict = {\"names\": lambda x: list(set(x[self.by[self.version]])), #functions for returning specific data types\n \"dataframe\": lambda x: x}\n\n if output == \"dict\":\n out = {k:mut_dat[self.by[self.version]].loc[v].unique() for k,v in mut_dat.groupby(variant).groups.items()}\n else:\n out = out_dict[output](mut_dat)\n\n return out", "def test_merge_stitches_together_geometry_collections(self):\n topology = {\n \"type\": \"Topology\",\n \"objects\": {\n \"collection\": {\n \"type\": \"GeometryCollection\",\n \"geometries\": [\n {\"type\": \"Polygon\", \"arcs\": [[0, 1]]},\n {\"type\": \"Polygon\", \"arcs\": [[-1, 2]]},\n ],\n }\n },\n \"arcs\": [\n [[1, 1], [1, 0]],\n [[1, 0], [0, 0], [0, 1], [1, 1]],\n [[1, 1], [2, 1], [2, 0], [1, 0]],\n ],\n }\n self.assertDictEqual(\n {\n \"type\": \"MultiPolygon\",\n \"coordinates\": [\n [[[1, 0], [0, 0], [0, 1], [1, 1], [2, 1], [2, 0], [1, 0]]]\n ],\n },\n self.merge(topology, [topology[\"objects\"][\"collection\"]]),\n )", "def __make_group_by_atom(self, group_name, name_list):\r\n pass", "def test_build_reference_dupes(self):\n items = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]\n obs1, obs2 = build_reference(items, 3)\n self.assertEqual(len(obs1), 3)\n self.assertEqual(len(obs2), 7)\n #check that the ref and nonref are same\n finals = set([])\n for item in obs1:\n if item in finals:\n raise AssertionError(\"Duplicate in reference!\")\n finals.add(item)\n for item in obs2:\n if item in finals:\n raise AssertionError(\"Duplicate in nonreference!\")\n finals.add(item)", "def imprint_merge_each_group():\r\n \r\n G = cubit.get_entities(\"group\")\r\n for gid in G:\r\n vid = cubit.get_group_volumes(gid)\r\n if len(vid)>1:\r\n cubit.cmd(f\"imprint vol {list_to_str(vid)}\")\r\n cubit.cmd(f\"merge vol {list_to_str(vid)}\")", "def groupSingles(self):\n #todo: last transaction is lost\n group = []\n singles = self.unAttachedTrans(None)\n propTrade = self.proposeTrade(group)\n propTrade.symbol = singles[0].symbol\n itr = 0\n openTrades = self.getOpenTrades(None)\n if openTrades:\n openTrade = openTrades[itr]\n trend = False\n else:\n trend = True\n\n for t in singles:\n if propTrade.isReal():\n if TradeGroup.tranBelong(propTrade, group, t):\n group.append(t)\n else:\n self.addProposal(propTrade, group)\n group = []\n propTrade = self.proposeTrade(group)\n else:\n if not trend and TradeGroup.tranBelong(openTrade, [], t):\n self.addProposal(propTrade, group)\n group = [t]\n propTrade = openTrade\n else:\n itr += 1\n if itr < len(openTrades):\n openTrade = openTrades[itr]\n else:\n trend = True\n if TradeGroup.tranBelong(propTrade, group, t):\n group.append(t)\n propTrade.addTransaction(t, False)\n propTrade.setPlan()\n else:\n self.addProposal(propTrade, group)\n group = [t]\n propTrade = self.proposeTrade(group)\n self.addProposal(propTrade, group)", "def atomisticSphere (flag, filin, filout, max_distance = 15, analysis = 1, atom_central = \"mean_point\", debug = 1):\n \n list_atom_pocket = parsePDB.loadCoordSectionPDB(filin)\n dico_stock_count = tool.generateStructCompositionAtomistic (max_distance, 3)\n \n if atom_central == \"mean_point\" : \n central_point = generateMeansPointPocket (list_atom_pocket)\n # else append barycenter pocket calculated by RADI\n \n for atom in list_atom_pocket : \n distance = parsePDB.distanceTwoatoms(central_point, atom)\n # print distance\n element = atom[\"element\"]\n name_atom = atom[\"name\"]\n residue = tool.transformAA(atom[\"resName\"])\n \n for distance_key in dico_stock_count.keys() : \n if distance <= distance_key or distance > max_distance : \n dico_stock_count [distance_key] [\"atom\"] = dico_stock_count [distance_key] [\"atom\"] + 1\n if element == \"C\" : \n dico_stock_count [distance_key] [\"carbon\"] = dico_stock_count [distance_key] [\"carbon\"] + 1\n elif element == \"N\" : \n dico_stock_count [distance_key] [\"nitrogen\"] = dico_stock_count [distance_key] [\"nitrogen\"] + 1\n elif element == \"S\" : \n dico_stock_count [distance_key] [\"sulfur\"] = dico_stock_count [distance_key] [\"sulfur\"] + 1\n elif element == \"O\" : \n dico_stock_count [distance_key] [\"oxygen\"] = dico_stock_count [distance_key] [\"oxygen\"] + 1\n elif element == \"H\" : \n dico_stock_count [distance_key] [\"hydrogen\"] = dico_stock_count [distance_key] [\"hydrogen\"] + 1\n \n if residue in dico_Hacceptor.keys () : \n if name_atom in dico_Hacceptor[residue] : \n dico_stock_count [distance_key] [\"hbond_acceptor\"] = dico_stock_count [distance_key] [\"hbond_acceptor\"] + 1\n \n if residue in dico_atom_Car : \n if name_atom in dico_atom_Car[residue] : \n dico_stock_count [distance_key] [\"aromatic\"] = dico_stock_count [distance_key] [\"aromatic\"] + 1\n \n if residue in dico_atom_hydrophobic : \n if name_atom in dico_atom_hydrophobic[residue] : \n dico_stock_count [distance_key] [\"hydrophobic\"] = dico_stock_count [distance_key] [\"hydrophobic\"] + 1\n \n if residue in dico_atom_Carg : \n if name_atom in dico_atom_Carg[residue] : \n dico_stock_count [distance_key] [\"alcool\"] = dico_stock_count [distance_key] [\"alcool\"] + 1\n \n \n if residue in dico_Hdonor.keys () : \n if name_atom in dico_Hdonor[residue] : \n dico_stock_count [distance_key] [\"hbond_donor\"] = dico_stock_count [distance_key] [\"hbond_donor\"] + 1\n \n if name_atom == \"CA\" or name_atom == \"O\" or name_atom == \"C\" or name_atom == \"N\" or name_atom == \"H\" or name_atom == \"HA\" :\n dico_stock_count [distance_key] [\"main_chain\"] = dico_stock_count [distance_key] [\"main_chain\"] + 1\n else : \n dico_stock_count [distance_key] [\"side_chain\"] = dico_stock_count [distance_key] [\"side_chain\"] + 1\n \n for distance_key in dico_stock_count.keys () : \n nb_atom = float(dico_stock_count [distance_key] [\"atom\"])\n if nb_atom == 0 : \n filout.write (flag + \"_atom_\" + str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_side_chain_\"+ str(distance_key) + \"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_main_chain_\" + str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_sulfur_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_carbone_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_nitrogen_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_oxygen_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hydrogen_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hbond_acceptor_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hbond_donor_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_alcool_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hydrophobic_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_aromatic_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n \n else : \n filout.write (flag + \"_atom_\" + str(distance_key) +\"\\t\" + str(nb_atom) + \"\\n\")\n filout.write (flag + \"_side_chain_\"+ str(distance_key) + \"\\t\" + str (dico_stock_count [distance_key] [\"side_chain\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_main_chain_\" + str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"main_chain\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_sulfur_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"sulfur\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_carbone_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"carbon\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_nitrogen_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"nitrogen\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_oxygen_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"oxygen\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hydrogen_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hydrogen\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hbond_acceptor_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hbond_acceptor\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hbond_donor_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hbond_donor\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_alcool_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"alcool\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hydrophobic_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hydrophobic\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_aromatic_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"aromatic\"] / nb_atom) + \"\\n\")", "def test_partially_one(self):\n setups = self.get_setup().decompress([\"dimensions.species_id\"])\n assert isinstance(setups, PlotSetupGroup)\n assert all(isinstance(setup, PlotSetup) for setup in setups)\n assert len(setups) == 2\n res = {\n (\n s.panels.collect_equal(\"dimensions\").variable,\n s.panels.collect_equal(\"dimensions\").species_id,\n s.panels.collect_equal(\"dimensions\").time,\n )\n for s in setups\n }\n sol = {\n ((\"dry_deposition\", \"wet_deposition\"), 1, (1, 2, 3)),\n ((\"dry_deposition\", \"wet_deposition\"), 2, (1, 2, 3)),\n }\n assert res == sol", "def sub_graph_merging(self):", "def test_add_lone_pairs_by_atom_valance(self):\n adj1 = \"\"\"multiplicity 3\n1 N u2 p1 c0 {2,S}\n2 S u0 p2 c0 {1,S} {3,S}\n3 H u0 p0 c0 {2,S}\"\"\"\n mol1 = Molecule().from_adjacency_list(adjlist=adj1)\n converter.add_lone_pairs_by_atom_valance(mol1)\n self.assertEqual(mol1.to_smiles(), '[N]S')\n mol1.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol1)\n self.assertEqual(mol1.to_smiles(), 'N#S')\n\n adj2 = \"\"\"multiplicity 3\n1 N u2 p1 c0 {2,S}\n2 N u0 p1 c0 {1,S} {3,S} {4,S}\n3 H u0 p0 c0 {2,S}\n4 H u0 p0 c0 {2,S}\"\"\"\n mol2 = Molecule().from_adjacency_list(adjlist=adj2)\n converter.add_lone_pairs_by_atom_valance(mol2)\n self.assertEqual(mol2.to_smiles(), '[N]N')\n mol2.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol2)\n self.assertEqual(mol2.to_smiles(), '[N-]=[NH2+]')\n\n adj3 = \"\"\"multiplicity 3\n1 C u0 p0 c0 {2,S} {5,S} {6,S} {7,S}\n2 C u0 p0 c0 {1,S} {3,S} {8,S} {9,S}\n3 C u2 p0 c0 {2,S} {4,S}\n4 H u0 p0 c0 {3,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {1,S}\n7 H u0 p0 c0 {1,S}\n8 H u0 p0 c0 {2,S}\n9 H u0 p0 c0 {2,S}\"\"\"\n mol3 = Molecule().from_adjacency_list(adjlist=adj3)\n converter.add_lone_pairs_by_atom_valance(mol3)\n self.assertEqual(mol3.to_smiles(), '[CH]CC')\n mol3.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol3)\n self.assertEqual(mol3.to_adjacency_list(), \"\"\"1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}\n2 C u0 p0 c0 {1,S} {6,S} {7,S} {8,S}\n3 C u0 p1 c0 {1,S} {9,S}\n4 H u0 p0 c0 {1,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {2,S}\n7 H u0 p0 c0 {2,S}\n8 H u0 p0 c0 {2,S}\n9 H u0 p0 c0 {3,S}\n\"\"\")\n\n adj4 = \"\"\"multiplicity 3\n1 C u0 p0 c0 {2,S} {4,S} {5,S} {6,S}\n2 C u0 p0 c0 {1,S} {3,S} {7,S} {8,S}\n3 N u2 p1 c0 {2,S}\n4 H u0 p0 c0 {1,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {1,S}\n7 H u0 p0 c0 {2,S}\n8 H u0 p0 c0 {2,S}\"\"\"\n mol4 = Molecule().from_adjacency_list(adjlist=adj4)\n converter.add_lone_pairs_by_atom_valance(mol4)\n self.assertEqual(mol4.to_smiles(), 'CC[N]')\n mol4.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol4)\n self.assertEqual(mol4.to_adjacency_list(), \"\"\"1 N u0 p2 c0 {3,S}\n2 C u0 p0 c0 {3,S} {4,S} {5,S} {6,S}\n3 C u0 p0 c0 {1,S} {2,S} {7,S} {8,S}\n4 H u0 p0 c0 {2,S}\n5 H u0 p0 c0 {2,S}\n6 H u0 p0 c0 {2,S}\n7 H u0 p0 c0 {3,S}\n8 H u0 p0 c0 {3,S}\n\"\"\")", "def group_handling(existing_uuids: Set[str]) -> None:", "def test_partially_update_device_group_by_id1(self):\n pass", "def generate_equivalent_content(self, base_content, base_osm):\n\t\tbase_product = base_content.product\n\t\tquantity = base_content.quantity\n\t\t# First, looking for a match\n\t\tmatch = base_product.productmatch_set.all()\n\t\tif len(match)>0:\n\t\t\tmatch = match[0]\n\t\t\tmathed_product = getattr(match, self.cart.osm+'_product') # Evil hack!! Or is it? I love Python :D\n\t\t\tif mathed_product is not None:\n\t\t\t\tequivalent_quantity = quantity # Same product, no need to generate equivalent quantity\n\t\t\t\tmatch_content = self.add_product(mathed_product, equivalent_quantity, is_user_added = False, is_match = True, is_suggested = False)\n\t\t\t\tsetattr(match_content, base_content.cart.osm+'_content', base_content)\n\t\t\t\tsetattr(base_content, match_content.cart.osm+'_content', match_content)\n\t\t\t\tequivalent_content = {\n\t\t\t\t\t'content': match_content,\n\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t'is_match': True,\n\t\t\t\t\t'is_suggested': False\n\t\t\t\t}\n\n\t\t\t\ttry:\n\t\t\t\t\tbase_content.save()\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tprint e\n\n\t\t\t\ttry:\n\t\t\t\t\tmatch_content.save()\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tprint e\n\n\t\t\t\t# print '\\tMatch : '+mathed_product.url\n\t\t\telse:\n\t\t\t\t# Look for similarities\n\t\t\t\tsimilarities = self.get_similarites(base_product, base_osm)\n\t\t\t\tif(len(similarities)>0):\n\t\t\t\t\t# Generate proper quantity\n\t\t\t\t\tequivalent_quantity = self.generate_equivalent_quantity(base_product, similarities[0][0], quantity)\n\t\t\t\t\tsim_content = self.add_product(similarities[0][0], equivalent_quantity, is_user_added = False, is_match = False, is_suggested = True, osm = base_osm)\n\t\t\t\t\tif sim_content is not None:\n\t\t\t\t\t\tsetattr(sim_content, base_content.cart.osm+'_content', base_content)\n\t\t\t\t\t\tsetattr(base_content, sim_content.cart.osm+'_content', sim_content)\n\t\t\t\t\t\tequivalent_content = {\n\t\t\t\t\t\t\t'content': sim_content,\n\t\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tbase_content.save()\n\t\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t\tconnection._rollback()\n\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tsim_content.save()\n\t\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t\tconnection._rollback()\n\t\t\t\t\telse:\n\t\t\t\t\t\tequivalent_content = {\n\t\t\t\t\t\t\t'content': None,\n\t\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t\t}\n\t\t\t\telse:\n\t\t\t\t\tequivalent_content = {\n\t\t\t\t\t\t'content': None,\n\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t}\n\n\n\t\telse:\n\t\t\t# Look for similarities\n\t\t\tsimilarities = self.get_similarites(base_product, base_osm)\n\t\t\tif(len(similarities)>0):\n\t\t\t\t# TODO : find proper quantity\n\t\t\t\tequivalent_quantity = self.generate_equivalent_quantity(base_product, similarities[0][0], quantity)\n\t\t\t\tsim_content = self.add_product(similarities[0][0], equivalent_quantity, is_user_added = False, is_match = False, is_suggested = True, osm = base_osm)\n\t\t\t\tif sim_content is not None:\n\t\t\t\t\tsetattr(sim_content, base_content.cart.osm+'_content', base_content)\n\t\t\t\t\tsetattr(base_content, sim_content.cart.osm+'_content', sim_content)\n\t\t\t\t\tequivalent_content = {\n\t\t\t\t\t\t'content': sim_content,\n\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t}\n\t\t\t\t\ttry:\n\t\t\t\t\t\tbase_content.save()\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tconnection._rollback()\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tsim_content.save()\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tconnection._rollback()\n\t\t\t\telse:\n\t\t\t\t\tequivalent_content = {\n\t\t\t\t\t\t'content': None,\n\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t}\n\n\t\t\telse:\n\t\t\t\tequivalent_content = {\n\t\t\t\t\t'content': None,\n\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t'is_match': False,\n\t\t\t\t\t'is_suggested': True\n\t\t\t\t}\n\n\t\treturn equivalent_content", "def test_check_for_existing_reaction_removes_duplicates_in_opposite_directions(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n s1 = Species().from_smiles(\"[H]\")\n s2 = Species().from_smiles(\"CC\")\n s3 = Species().from_smiles(\"[H][H]\")\n s4 = Species().from_smiles(\"C[CH2]\")\n s1.label = 'H'\n s2.label = 'CC'\n s3.label = 'HH'\n s4.label = 'C[CH2]'\n\n rxn_f = TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction')\n )\n\n rxn_r = TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction')\n )\n\n rxn_f.reactants.sort()\n rxn_f.products.sort()\n\n cerm.add_reaction_to_core(rxn_f)\n cerm.register_reaction(rxn_f)\n\n reactions = cerm.search_retrieve_reactions(rxn_r)\n self.assertEqual(1, len(reactions), 'cerm.search_retrieve_reactions could not identify reverse reaction')\n\n found, rxn = cerm.check_for_existing_reaction(rxn_r)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to identify existing reaction in the reverse direction')\n self.assertEqual(rxn, rxn_f)", "def already_exists(sub, subs):\n exists = False\n em = iso.categorical_edge_match('label', 1)\n #nm = iso.categorical_node_match('type', \"edu\")\n for compsub in subs:\n if nx.is_isomorphic(compsub, sub, edge_match=em):\n exists = True\n break\n return exists", "def test_ungrouping(self):\n n = self.create(NodeItem, UML.Node)\n c = self.create(ComponentItem, UML.Component)\n\n self.group(n, c)\n self.ungroup(n, c)\n\n assert 0 == len(n.subject.ownedAttribute)\n assert 0 == len(c.subject.ownedAttribute)\n assert 0 == len(self.kindof(UML.Property))\n assert 0 == len(self.kindof(UML.Connector))\n assert 0 == len(self.kindof(UML.ConnectorEnd))", "def visible_objects_and_duplis():\r\n \r\n for obj in context.visible_objects:\r\n if obj.type == 'MESH':\r\n yield (obj, obj.matrix_world.copy())\r\n \r\n if obj.dupli_type != 'NONE':\r\n obj.dupli_list_create(scene)\r\n for dob in obj.dupli_list:\r\n obj_dupli = dob.object\r\n if obj_dupli.type == 'MESH':\r\n yield (obj_dupli, dob.matrix.copy())\r\n \r\n obj.dupli_list_clear()", "def test_multiple_meshes(self):\n\n wrapper = Wrapper()\n wrapper.add_dataset(self.mesh)\n mesh2 = self.mesh\n mesh2.name = \"mesh2\"\n wrapper.add_dataset(mesh2)\n mesh_inside_wrapper1 = wrapper.get_dataset(self.mesh.name)\n mesh_inside_wrapper2 = wrapper.get_dataset(mesh2.name)\n\n self.assertEqual(\n sum(1 for _ in mesh_inside_wrapper1.iter(item_type=CUBA.POINT)),\n sum(1 for _ in mesh_inside_wrapper2.iter(item_type=CUBA.POINT)))", "def component_clone ( same ) : \n if isinstance ( same , str ) \\\n and same.strip().lower() in ( 'clone' , 'cloned' , 'same' ) : return True \n return False", "def test_partially_update_device_group_by_id(self):\n pass", "def test_check_for_existing_reaction_eliminates_identical_reactions(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'])\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'])\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to identify existing reaction')", "def test_split_orphan(self):\r\n orphans = self.split_mongo.get_orphans(self.split_course_key)\r\n self.assertEqual(len(orphans), 3, \"Wrong # {}\".format(orphans))\r\n location = self.split_course_key.make_usage_key('chapter', 'OrphanChapter')\r\n self.assertIn(location, orphans)\r\n location = self.split_course_key.make_usage_key('vertical', 'OrphanVert')\r\n self.assertIn(location, orphans)\r\n location = self.split_course_key.make_usage_key('html', 'OrphanHtml')\r\n self.assertIn(location, orphans)", "def test_ungrouping(self):\n n1 = self.create(NodeItem, UML.Node)\n n2 = self.create(NodeItem, UML.Node)\n\n self.group(n1, n2)\n self.ungroup(n1, n2)\n\n assert n2.subject not in n1.subject.nestedNode\n assert n1.subject not in n2.subject.nestedNode", "def test_no_shared_transformations():\n sdata = blobs()\n element_name = \"blobs_image\"\n test_space = \"test\"\n set_transformation(sdata.images[element_name], Identity(), to_coordinate_system=test_space)\n\n gen = sdata._gen_elements()\n for _, name, obj in gen:\n if name != element_name:\n assert test_space not in get_transformation(obj, get_all=True)\n else:\n assert test_space in get_transformation(obj, get_all=True)", "def test_distinct(self):\n pkgs = [\n make_package(factory=SQLPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=SQLPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=SQLPackage),\n ]\n for pkg in pkgs:\n self.db.save(pkg)\n saved_pkgs = self.db.distinct()\n\n self.assertCountEqual(saved_pkgs, set([p.name for p in pkgs]))", "def check_comm(instance):\n\n comm = instance.__dict__.get(\"commodity\")\n\n accounted_comm = set()\n\n for c in comm:\n\n for r in instance.reactions:\n r_dict = r.__dict__\n\n for label, species in r_dict.items():\n\n if instance.__dict__.get(\"recombination\") == Recomb_1:\n product = r_dict.get(\"left2\")\n\n else:\n product = r_dict.get(\"right2\")\n\n if product == c:\n accounted_comm.add(c)\n\n if set(comm) == accounted_comm:\n return True\n else:\n print(\"Commodity:\", set(comm))\n print(\"Commodity products made:\", accounted_comm)\n return False", "def testSynonymDuplicate(self):\n\t\t\t\tone = spinner.Word.objects.get_single('mac', True)\n\t\t\t\ttwo = spinner.Word.objects.get_single('macintosh', True)\n\t\n\t\t\t\tsyn = spinner.Synonym.objects.get_single(one, two, True)\n\t\t\t\t\n\t\t\t\tsyn2 = spinner.Synonym.objects.get_single(two, one, True)\n\n\t\t\t\tassert syn == syn2\n\n\t\t\t\tsyn.delete()\n\t\t\t\tone.delete()\n\t\t\t\ttwo.delete()", "def test_group(self):\n obs_group, obs_nogroup = group(self.seqstruct, 0.75)\n exp_group = {'cluster_337': ['cluster_343', 'cluster_345',\n 'cluster_339'],\n 'cluster_347': ['cluster_338'],\n 'cluster_344': ['cluster_340']}\n exp_nogroup = [self.seqstruct[6], self.seqstruct[8]]\n\n self.assertEqual(obs_group, exp_group)\n self.assertEqual(obs_nogroup, exp_nogroup)", "def make_set(g, nodes):\n s = Set()\n names = nodes['names']\n for ii,name in enumerate(names):\n \"\"\" \n We will assume node is entirely contained\n in group if they have one atom in common\n \"\"\" \n atoms = mdn.dic2list(nodes[name]['atoms'])\n atom0 = atoms[0]\n if (atom0 in mdn.dic2list(g['atoms'])):\n s.add(ii)\n return s", "def test_register_duplicate_group(self):\n self._storage.register_group(\"group1\", [])\n with self.assertRaises(DuplicateGroupException):\n self._storage.register_group(\"group1\", [])", "def _check_sn_uniqueness(self):\n if self.product_tracking == 'serial' and self.lot_producing_id:\n sml = self.env['stock.move.line'].search_count([\n ('lot_id', '=', self.lot_producing_id.id),\n ('location_id.usage', '=', 'production'),\n ('qty_done', '=', 1),\n ('state', '=', 'done')\n ])\n if sml:\n raise UserError(_('This serial number for product %s has already been produced', self.product_id.name))\n\n for move in self.move_finished_ids:\n if move.has_tracking != 'serial' or move.product_id == self.product_id:\n continue\n for move_line in move.move_line_ids:\n domain = [\n ('lot_id', '=', move_line.lot_id.id),\n ('qty_done', '=', 1),\n ('state', '=', 'done')\n ]\n message = _('The serial number %(number)s used for byproduct %(product_name)s has already been produced',\n number=move_line.lot_id.name,\n product_name=move_line.product_id.name)\n co_prod_move_lines = self.move_finished_ids.move_line_ids.filtered(lambda ml: ml.product_id != self.product_id)\n domain_unbuild = domain + [\n ('production_id', '=', False),\n ('location_dest_id.usage', '=', 'production')\n ]\n\n # Check presence of same sn in previous productions\n duplicates = self.env['stock.move.line'].search_count(domain + [\n ('location_id.usage', '=', 'production')\n ])\n if duplicates:\n # Maybe some move lines have been compensated by unbuild\n duplicates_unbuild = self.env['stock.move.line'].search_count(domain_unbuild + [\n ('move_id.unbuild_id', '!=', False)\n ])\n if not (duplicates_unbuild and duplicates - duplicates_unbuild == 0):\n raise UserError(message)\n # Check presence of same sn in current production\n duplicates = co_prod_move_lines.filtered(lambda ml: ml.qty_done and ml.lot_id == move_line.lot_id) - move_line\n if duplicates:\n raise UserError(message)\n\n for move in self.move_raw_ids:\n if move.has_tracking != 'serial':\n continue\n for move_line in move.move_line_ids:\n if float_is_zero(move_line.qty_done, precision_rounding=move_line.product_uom_id.rounding):\n continue\n domain = [\n ('lot_id', '=', move_line.lot_id.id),\n ('qty_done', '=', 1),\n ('state', '=', 'done')\n ]\n message = _('The serial number %(number)s used for component %(component)s has already been consumed',\n number=move_line.lot_id.name,\n component=move_line.product_id.name)\n co_prod_move_lines = self.move_raw_ids.move_line_ids\n domain_unbuild = domain + [\n ('production_id', '=', False),\n ('location_id.usage', '=', 'production')\n ]\n\n # Check presence of same sn in previous productions\n duplicates = self.env['stock.move.line'].search_count(domain + [\n ('location_dest_id.usage', '=', 'production')\n ])\n if duplicates:\n # Maybe some move lines have been compensated by unbuild\n duplicates_unbuild = self.env['stock.move.line'].search_count(domain_unbuild + [\n ('move_id.unbuild_id', '!=', False)\n ])\n if not (duplicates_unbuild and duplicates - duplicates_unbuild == 0):\n raise UserError(message)\n # Check presence of same sn in current production\n duplicates = co_prod_move_lines.filtered(lambda ml: ml.qty_done and ml.lot_id == move_line.lot_id) - move_line\n if duplicates:\n raise UserError(message)", "def test_enlarge_1_add_nonreactive_species(self):\n m0 = Molecule(smiles='[He]')\n spc0 = self.rmg.reaction_model.make_new_species(m0, label='He', reactive=False)[0]\n self.rmg.reaction_model.enlarge(spc0)\n\n self.assertEqual(len(self.rmg.reaction_model.core.species), 1)\n self.assertFalse(self.rmg.reaction_model.core.species[0].reactive)", "def duplicate(self, other):\n return (self.uas_position.duplicate(other.uas_position) and\n self.uas_heading == other.uas_heading)", "def test_usearch_handles_unions(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n # Should detect and remove chimeric sequence based\r\n # during ref based detection\r\n\r\n exp_otu_ids = ['0', '1', '2']\r\n\r\n # will retain 'chimera' with union option.\r\n exp_clusters = [['Solemya', 'Solemya_seq2'], ['chimera'],\r\n ['usearch_ecoli_seq', 'usearch_ecoli_seq2']\r\n ]\r\n\r\n app = UsearchReferenceOtuPicker(\r\n params={'save_intermediate_files': False,\r\n 'db_filepath':\r\n self.tmp_ref_database,\r\n 'output_dir': self.temp_dir,\r\n 'remove_usearch_logs': True,\r\n 'reference_chimera_detection':\r\n True,\r\n 'de_novo_chimera_detection':\r\n True,\r\n 'cluster_size_filtering':\r\n False,\r\n 'minlen': 12,\r\n 'w': 12,\r\n 'minsize': 1,\r\n 'percent_id': 0.97,\r\n 'percent_id_err': 0.97,\r\n 'abundance_skew': 2,\r\n 'chimeras_retention': 'union'\r\n })\r\n\r\n obs = app(self.tmp_seq_filepath2, self.tmp_otu_ref_database)\r\n\r\n obs_otu_ids = sorted(obs.keys())\r\n obs_clusters = sorted(obs.values())\r\n # The relation between otu ids and clusters is abitrary, and\r\n # is not stable due to use of dicts when parsing clusters -- therefore\r\n # just checks that we have the expected group of each\r\n self.assertEqual(obs_otu_ids, exp_otu_ids)\r\n self.assertEqual(obs_clusters, exp_clusters)", "def complete_material_equivalences(self):\n for material in self.materials:\n material.geu = self\n for material_aux in self.materials:\n material.equivalent_materials.add(material_aux)", "def test_combine(self):\n fc1 = self.read_feature()\n fc2 = self.read_feature('Aegean_Sea')\n fc1.merge(fc2)\n name = 'Weird Disjoint Regions'\n combined = fc1.combine(name)\n assert len(combined.features) == 1\n self.check_feature(combined.features[0], expected_name=name,\n expected_type='MultiPolygon')", "def set_equivalent_cart(self, base_cart):\n\t\t# Emptying cart\n\t\tself.empty()\n\n\t\t# Getting base cart content\n\t\tcontents = base_cart.cart_content_set.all()\n\t\tbase_osm = base_cart.osm\n\t\tequivalence_store = {}\n\n\t\tfor content in contents:\n\t\t\tbase_product = content.product\n\t\t\tquantity = content.quantity\n\n\t\t\t# First, looking for a match\n\t\t\tmatch = base_product.productmatch_set.all()\n\t\t\tif len(match)>0:\n\t\t\t\tmatch = match[0]\n\t\t\t\tmathed_product = getattr(match, self.cart.osm+'_product') # Evil hack!! Or is it? I love Python :D\n\t\t\t\tif mathed_product is not None:\n\t\t\t\t\tmatch_content = self.add_product(mathed_product, quantity, is_user_added = False, is_match = True, is_suggested = False)\n\t\t\t\t\tsetattr(match_content, content.cart.osm+'_content', content)\n\t\t\t\t\tsetattr(content, match_content.cart.osm+'_content', match_content)\n\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t'content': match_content,\n\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t'is_match': True,\n\t\t\t\t\t\t'is_suggested': False\n\t\t\t\t\t}\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcontent.save()\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tprint e\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tmatch_content.save()\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tprint e\n\n\t\t\t\t\t# print '\\tMatch : '+mathed_product.url\n\t\t\t\telse:\n\t\t\t\t\t# Look for similarities\n\t\t\t\t\tsimilarities = self.get_similarites(base_product, base_osm)\n\t\t\t\t\tif(len(similarities)>0):\n\t\t\t\t\t\tsim_content = self.add_product(similarities[0][0], quantity, is_user_added = False, is_match = False, is_suggested = True)\n\t\t\t\t\t\tsetattr(sim_content, content.cart.osm+'_content', content)\n\t\t\t\t\t\tsetattr(content, sim_content.cart.osm+'_content', sim_content)\n\t\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t\t'content': sim_content,\n\t\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tcontent.save()\n\t\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t\tconnection._rollback()\n\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tsim_content.save()\n\t\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t\tconnection._rollback()\n\t\t\t\t\telse:\n\t\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t\t'content': None,\n\t\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t\t}\n\n\n\t\t\telse:\n\t\t\t\t# Look for similarities\n\t\t\t\tsimilarities = self.get_similarites(base_product, base_osm)\n\t\t\t\tif(len(similarities)>0):\n\t\t\t\t\tsim_content = self.add_product(similarities[0][0], quantity, is_user_added = False, is_match = False, is_suggested = True)\n\t\t\t\t\tsetattr(sim_content, content.cart.osm+'_content', content)\n\t\t\t\t\tsetattr(content, sim_content.cart.osm+'_content', sim_content)\n\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t'content': sim_content,\n\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t}\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcontent.save()\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tconnection._rollback()\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tsim_content.save()\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tconnection._rollback()\n\t\t\t\telse:\n\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t'content': None,\n\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t}\n\n\n\t\treturn equivalence_store", "def test_distinct(self):\n pkgs = [\n make_package(factory=DynamoPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=DynamoPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=DynamoPackage),\n ]\n self._save_pkgs(*pkgs)\n saved_pkgs = self.db.distinct()\n self.assertCountEqual(saved_pkgs, set([p.name for p in pkgs]))", "def makeGuiMultiAtomSet(residue,multiGuiName,guiSetsNames,elementSymbol,mappingType,chemAtomSet):\n \n if \"|\" in multiGuiName:\n return\n \n residueMapping = getResidueMapping(residue) \n molType = residue.molResidue.molType\n for guiName in guiSetsNames:\n atomSetMapping = residueMapping.findFirstAtomSetMapping(name=makeGuiName(guiName, elementSymbol, molType))\n if atomSetMapping is None:\n print \"Non-existent group error in makeGuiMultiAtomSet for\", residue.molResidue.ccpCode, residue.seqCode, guiName\n return\n #atomSet = atomSetMapping.atomSets[0]\n chemAtomSet1 = atomSetMapping.chemAtomSet\n \n for guiName2 in guiSetsNames:\n atomSetMapping2 = residueMapping.findFirstAtomSetMapping(name=makeGuiName(guiName2, elementSymbol, molType))\n if atomSetMapping2 is None:\n print \"Non-existent group error in makeGuiMultiAtomSet for\", residue.molResidue.ccpCode\n return\n #atomSet = atomSetMapping2.atomSets[0]\n chemAtomSet2 = atomSetMapping2.chemAtomSet\n if chemAtomSet2 and chemAtomSet1:\n if chemAtomSet1.isProchiral != chemAtomSet2.isProchiral:\n print \"Prochiratity error in makeGuiMultiAtomSet for\", residue.molResidue.ccpCode\n return\n if chemAtomSet1.isEquivalent != chemAtomSet2.isEquivalent:\n print \"Equivalent error in makeGuiMultiAtomSet for \", residue.molResidue.ccpCode\n return\n\n atomSets = []\n for guiName in guiSetsNames:\n name0 = makeGuiName(guiName, elementSymbol, molType)\n atomSetSerials = residueMapping.findFirstAtomSetMapping(name=name0).atomSetSerials\n for atom in residue.atoms:\n atomSet = atom.atomSet\n if atomSet:\n if atomSet.serial in atomSetSerials and atomSet not in atomSets:\n atomSets.append(atomSet)\n break\n \n if not residueMapping.findFirstAtomSetMapping(name=multiGuiName):\n atomSetMapping = makeAtomSetMapping(residueMapping, multiGuiName, atomSets,\n chemAtomSet, mappingType)\n \n return atomSetMapping", "def _check_group(self):\n if len(self.groups) != 2:\n raise ValueError(\"There have to be two groups!\")\n\n # Check the number of atoms in each group is the same\n n_group1 = 0\n for key, value in self.groups[0].items():\n n_group1 += value\n\n n_group2 = 0\n for key, value in self.groups[1].items():\n n_group2 += value\n\n if n_group1 != n_group2:\n f1 = self._group2formula(self.groups[0])\n f2 = self._group2formula(self.groups[1])\n msg = \"The two groups have to have the same number of atoms.\\n\"\n msg += \"Group 1: {} Group 2: {}\".format(f1, f2)\n raise ValueError(msg)", "def _merge_tags_into(self, target):\n for tag in self.tags.all():\n if target.tags.filter(tag=tag.tag).exists():\n tag.delete()\n else:\n tag.assignment_group = target\n tag.save()", "def __check_for_duplicates(self, point) -> bool:\n # Check all already published (active) dirt objects (stored and received from the goal_list)\n for dirt in self.active_dirt_list:\n if self.__comparing_points(point, dirt.pose.position):\n return True\n return False", "def test_ungrouping(self):\n s = self.create(ComponentItem, UML.Component)\n uc1 = self.create(UseCaseItem, UML.UseCase)\n uc2 = self.create(UseCaseItem, UML.UseCase)\n\n self.group(s, uc1)\n self.group(s, uc2)\n\n self.ungroup(s, uc1)\n assert 0 == len(uc1.subject.subject)\n # Classifier.useCase is not navigable to UseCase\n # self.assertEqual(1, len(s.subject.useCase))\n\n self.ungroup(s, uc2)\n assert 0 == len(uc2.subject.subject)\n # Classifier.useCase is not navigable to UseCase\n # self.assertEqual(0, len(s.subject.useCase))", "def __group_alt_atoms__(self, atoms):\n def ordering_key(atoms):\n return atoms[0].alt_id\n alt_ids = coll.defaultdict(list)\n for atom in atoms:\n alt_ids[atom.alt_id].append(atom)\n\n if len(alt_ids) == 1:\n return list(alt_ids.values())\n\n if None in alt_ids:\n common = alt_ids.pop(None)\n for alt_id, specific_atoms in list(alt_ids.items()):\n for common_atom in common:\n copied = copy.deepcopy(common_atom)\n copied.alt_id = alt_id\n specific_atoms.append(copied)\n\n return sorted(list(alt_ids.values()), key=ordering_key)", "def test_equality(self):\n\n for name in TEST_NAMES:\n self.colorspace.setEqualityGroup(name)\n self.assertEqual(name, self.colorspace.getEqualityGroup())", "def test_4_sgops(self):\n from celib.symmetry_module import get_spacegroup\n result = get_spacegroup(self.lattice, self.atomtypes, self.basis, True, 1e-10)\n #The FCC lattice has 48 point group operations; we want to check each of them\n #against the correct values.\n self.assertEqual(result.sg_op.shape, (3,3,48))\n #We have to load the correct symmetry operations from file to check against.\n sgops = path.join(self.fdir, \"sgops.dat\")\n with open(sgops) as f:\n lines = f.readlines()\n model = self._read_sgopgs(lines, 48)\n\n for i in range(48):\n self.assertTrue(allclose(model[i], result.sg_op[:,:,i]),\n \"\\n{}\\n{}\\n\\n{}\".format(i, model[i], result.sg_op[:,:,i]))", "def group_group_collide(sprite_group, o_sprite_group):\n sprites = set(sprite_group)\n for sprite in sprites:\n if group_collide(o_sprite_group, sprite):\n sprite_group.remove(sprite)\n sprite.delete()\n return True\n return False", "def createGroup(self, *group):\n if not self.rank:\n logging.info('Creating atom group {}'.format(group))\n\n if not len(group):\n for idSS in self.pargs['idSS']:\n self.lmp.command('group group{} type {}'.format(idSS, idSS))\n else:\n self.lmp.command('group ' + ('{} ' * len(group)).format(*group))", "def uploaded_by_group(self) -> bool:\n return self.user_id == 100", "def test_duplicatesMerged(self):\n def feature(s):\n return self.project.child(s + '.feature')\n feature('5').copyTo(feature('15'))\n feature('5').copyTo(feature('16'))\n\n self.builder.build(\n self.project, self.project.child('NEWS'),\n 'Project Name 5.0')\n\n self.assertEquals(\n self.project.child('NEWS').getContent(),\n 'Project Name 5.0\\n'\n '================\\n'\n '\\n'\n 'Features\\n'\n '--------\\n'\n ' - We now support the web. (#5, #15, #16)\\n'\n ' - The widget is more robust. (#12)\\n'\n '\\n'\n 'Bugfixes\\n'\n '--------\\n'\n ' - Broken stuff was fixed. (#23)\\n'\n '\\n'\n 'Improved Documentation\\n'\n '----------------------\\n'\n ' - foo.bar.Baz.quux (#40)\\n'\n ' - writing Foo servers (#41)\\n'\n '\\n'\n 'Deprecations and Removals\\n'\n '-------------------------\\n'\n ' - Stupid stuff was deprecated. (#25)\\n'\n '\\n'\n 'Other\\n'\n '-----\\n'\n ' - #30, #35\\n'\n '\\n\\n'\n 'Here is stuff which was present previously.\\n')", "def test_parent_with_iterables(self):\n def makeCubesAndGrp():\n cmds.file(new=1, f=1)\n cubes = []\n for x in range(10):\n cubes.append(pm.polyCube()[0])\n group = pm.group(empty=True)\n return cubes, group\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[:4], group)\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[:4] + [group])\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[0], cubes[1], cubes[2], cubes[3], group)\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[0], cubes[1], [cubes[2], cubes[3], group])\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent([cubes[0], cubes[1]], cubes[2], [cubes[3], group])\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)", "def group_collide(sprite_group, other_object):\n sprites = set(sprite_group)\n for sprite in sprites:\n if sprite.collide(other_object):\n sprite_group.remove(sprite)\n sprite.delete()\n return True\n return False", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n yield (obj, obj.matrix_world.copy())\n\n if obj.instance_type != 'NONE':\n obj.dupli_list_create(scene)\n for dob in obj.dupli_list:\n obj_dupli = dob.object\n if obj_dupli.type == 'MESH':\n yield (obj_dupli, dob.matrix.copy())\n\n obj.dupli_list_clear()", "def dupeIt(*args):\n\n sel=cmds.ls(sl=True, type=\"transform\", l=True)\n inputs = cmds.radioButtonGrp(\"inputsRBG\", q=True, sl=True)\n if sel:\n base=sel[0]\n if len(sel)>1:\n objs=sel[1:]\n transforms = {}\n x=0\n\n for obj in objs:\n #get pos, rot, scale\n pos = cmds.xform(obj, ws=True, q=True, t=True)\n rot = cmds.xform(obj, ws=True, q=True, ro=True)\n scal = cmds.getAttr(\"%s.scale\"%obj)[0]\n transforms[x] = [pos, rot, scal]\n\n #delete the obj\n cmds.delete(obj)\n x=x+1\n\n for key in transforms.keys():\n if inputs == 1:\n dupe = cmds.duplicate(base)[0]\n elif inputs == 3:\n dupe = cmds.duplicate(base, un=True, rr=True)[0]\n elif inputs == 2:\n dupe = cmds.duplicate(base, ic=True)[0]\n print dupe\n cmds.xform(dupe, ws=True, t=transforms[key][0])\n cmds.xform(dupe, ws=True, ro=transforms[key][1])\n cmds.setAttr(\"%s.scale\"%dupe, transforms[key][2][0], transforms[key][2][1], transforms[key][2][2])\n\n#TODO - checkbox to copy inputs on orig objects to corresponding inputs on top level of duplicates\n\n else:\n cmds.warning(\"You need to select more than one object in order to swap!\")\n else:\n cmds.warning(\"Please select some transform nodes to dupe!\")", "def subgroup(self, gens):\n\n if not all(g in self for g in gens):\n raise ValueError(\"The group does not contain the supplied generators\")\n\n G = PermutationGroup(gens)\n return G", "def test_04_self_unshare_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n dog.uaccess.unshare_group_with_user(meowers, dog)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertFalse(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_group_unshare_users(meowers)))", "def test_isomorphism_match(data):\n\n reference = data.draw(ISO_BUILDER)\n nodes = data.draw(st.sets(st.sampled_from(list(reference.nodes)),\n max_size=len(reference)))\n graph = reference.subgraph(nodes)\n\n note((\"Reference nodes\", reference.nodes(data=True)))\n note((\"Reference edges\", reference.edges))\n note((\"Graph nodes\", graph.nodes(data=True)))\n note((\"Graph edges\", graph.edges))\n\n node_match = nx.isomorphism.categorical_node_match('element', None)\n matcher = nx.isomorphism.GraphMatcher(reference, graph, node_match=node_match)\n expected = make_into_set(matcher.subgraph_isomorphisms_iter())\n found = make_into_set(vermouth.graph_utils.isomorphism(reference, graph))\n\n note((\"Found\", found))\n note((\"Expected\", expected))\n\n if not expected:\n event(\"Not subgraphs\")\n if found == expected:\n event(\"Exact match\")\n\n assert found <= expected", "def _add_group(self, group):\n\n if group.name not in self.groups:\n # it's brand new, add him!\n self.groups[group.name] = group\n if self.groups[group.name] != group:\n # different object, merge\n self._merge_groups(self.groups[group.name], group)", "def link_stone(self, index):\n stone = self.get(index)\n # Create a group of a single stone first,\n # this way every stone will have a group\n group = Group(self)\n group.add_member(index)\n stone.group = group\n # Add this new group to the list of all groups\n self.groups.append(group)\n # Now check for contiguous groups\n contiguous = self.contiguous_groups(index)\n # Add the single stone group to the list\n contiguous.append(group)\n # Link all of these together\n self.link_groups(contiguous)", "def makeResidueAtomSets(residue, aromaticsEquivalent=True):\n \n getResidueMapping(residue)\n \n equivalent = {}\n elementSymbolDict = {}\n nonequivalent = {}\n multiSet = {}\n chemAtomSetDict = {}\n inMultiSet = {}\n molType = residue.molResidue.molType\n \n for atom in residue.atoms: \n chemAtom = atom.chemAtom\n chemAtomSetDict[atom] = chemAtom\n elementSymbol = chemAtom.elementSymbol\n chemAtomSet = chemAtom.chemAtomSet\n\n if chemAtomSet is None:\n name = chemAtom.name\n makeAtomSet(name,(atom,),None,'simple')\n \n else:\n name = chemAtomSet.name\n elementSymbolDict[name] = elementSymbol\n chemAtomSetDict[name] = chemAtomSet\n if chemAtomSet.isEquivalent:\n if equivalent.get(name) is None:\n equivalent[name] = []\n equivalent[name].append(atom)\n \n elif (chemAtomSet.isEquivalent is None) and atom.atomSet and (len(atom.atomSet.atoms) > 1):\n # aromatic rotation prev set\n if equivalent.get(name) is None:\n equivalent[name] = []\n equivalent[name].append(atom)\n \n elif (chemAtomSet.isEquivalent is None) and (not atom.atomSet) and aromaticsEquivalent:\n # aromatic rotation to be set\n if equivalent.get(name) is None:\n equivalent[name] = []\n equivalent[name].append(atom)\n \n else:\n if nonequivalent.get(name) is None:\n nonequivalent[name] = []\n nonequivalent[name].append(atom)\n \n if chemAtomSet.chemAtomSet is not None:\n multiName = chemAtomSet.chemAtomSet.name\n chemAtomSetDict[multiName] = chemAtomSet.chemAtomSet\n elementSymbolDict[multiName] = elementSymbol\n if multiSet.get(multiName) is None:\n multiSet[multiName] = {}\n multiSet[multiName][name] = 1\n inMultiSet[name] = multiName\n\n for groupName in equivalent.keys():\n atoms = equivalent[groupName]\n elementSymbol = elementSymbolDict[groupName]\n chemAtomSet = chemAtomSetDict[groupName]\n if len(atoms)==2:\n # not enough atoms for multi sets!\n makeAtomSet(groupName,atoms,chemAtomSet,'simple')\n else:\n if inMultiSet.get(groupName):\n # e.g. for Val Hg1*\n makeAtomSet(groupName,atoms,chemAtomSet,'stereo')\n \n else:\n makeAtomSet(groupName,atoms,chemAtomSet,'simple')\n\n for groupName in nonequivalent.keys():\n atoms = nonequivalent[groupName]\n elementSymbol = elementSymbolDict[groupName]\n chemAtomSet = chemAtomSetDict[groupName]\n atomSetNames = []\n \n if len(atoms) == 1:\n atom = atoms[0]\n # not enough atoms for prochiral. Corrupt ChemComp\n makeAtomSet(atom.name, atoms, None, 'simple')\n continue\n \n for atom in atoms:\n name = chemAtomSetDict[atom].name\n makeAtomSet(name,(atom,),chemAtomSet,'stereo')\n atomSetNames.append(name)\n\n for n, atom in enumerate(atoms):\n \n #name = chemAtomSetDict[atom].name\n #name2 = makeNonStereoName(molType, name, n)\n # Shouldn't have to do this if non-equiv groups have paired names\n \n name2 = makeNonStereoName(molType, '%s%d' % (chemAtomSet.name[:-1], n), n)\n \n makeGuiMultiAtomSet(residue, name2, atomSetNames,\n elementSymbol,'nonstereo',chemAtomSet)\n\n makeGuiMultiAtomSet(residue, groupName, atomSetNames,\n elementSymbol,'ambiguous',chemAtomSet)\n\n for groupName in multiSet.keys():\n atomSetNames = multiSet[groupName].keys()\n elementSymbol = elementSymbolDict[groupName]\n chemAtomSet = chemAtomSetDict[groupName]\n if \"|\" in groupName:\n # we don't do these pseudoatoms in Analysis\n continue\n\n # e.g. for Val Hga*\n for n, atomSetName in enumerate(atomSetNames):\n name2 = makeNonStereoName(molType, atomSetName, n)\n makeGuiMultiAtomSet(residue, name2, atomSetNames,\n elementSymbol,'nonstereo',chemAtomSet)\n \n makeGuiMultiAtomSet(residue, groupName, atomSetNames,\n elementSymbol,'ambiguous',chemAtomSet)", "def test_make_new_species(self):\n\n # adding 3 unique species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().from_smiles('[OH]'),\n Species().from_smiles('CC'),\n Species().from_smiles('[CH3]')]\n\n for spc in spcs:\n cerm.make_new_species(spc)\n\n self.assertEquals(len(cerm.species_dict), len(spcs))\n self.assertEquals(len(cerm.index_species_dict), len(spcs))\n\n # adding 3 unique, and 1 already existing species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().from_smiles('[OH]'),\n Species().from_smiles('CC'),\n Species().from_smiles('[CH3]'),\n Species().from_smiles('CC')] # duplicate species\n\n for spc in spcs:\n cerm.make_new_species(spc)\n\n self.assertEquals(len(cerm.species_dict), len(spcs) - 1)\n self.assertEquals(len(cerm.index_species_dict), len(spcs) - 1)", "def test_duplicate_ids():\n assert query_row(db_conf, 'osm_buildings', 51001)['type'] == 'way'\n assert query_row(db_conf, 'osm_buildings', -51001)['type'] == 'mp'\n assert query_row(db_conf, 'osm_buildings', 51011)['type'] == 'way'\n assert query_row(db_conf, 'osm_buildings', -51011)['type'] == 'mp'", "def isduplicate(self, a, b):\n open(self.mybib, 'w').write(a)\n open(self.otherbib, 'w').write(b)\n res = sp.call('papers add {} --bibtex {} --update-key --mode r --debug'.format(self.otherbib, self.mybib), shell=True)\n return res != 0", "def _do_merge(ext, exts_other):\n for ext_other in exts_other:\n if not ext.is_duplicate(ext_other):\n return False\n return True", "def contract_skeletons_multi(\n g_multi: nx.MultiDiGraph, skeleton_groups: Iterable[Iterable[Skeleton]]\n) -> nx.MultiDiGraph:\n skid_mapping = dict()\n for group in skeleton_groups:\n if not isinstance(group, SkeletonGroup):\n group = SkeletonGroup(group)\n for skel in group:\n skid_mapping[skel.id] = group\n\n g_contracted = nx.MultiDiGraph()\n g_contracted.graph.update(deepcopy(g_multi.graph))\n\n for node, data in g_multi.nodes(data=True):\n group = skid_mapping.get(node)\n if group:\n g_contracted.add_node(\n group.id, skeleton_group=group, skeleton=None, obj=group\n )\n else:\n g_contracted.add_node(node, **data)\n\n for pre_skid, post_skid, data in g_multi.edges(data=True):\n pre_id = pre_skid if pre_skid not in skid_mapping else skid_mapping[pre_skid].id\n post_id = (\n post_skid if post_skid not in skid_mapping else skid_mapping[post_skid].id\n )\n\n crossings = [\n Crossing.from_sides(\n g_contracted.node[pre_id][\"obj\"].side,\n g_contracted.node[post_id][\"obj\"].side,\n )\n ]\n existing_crossing = data.get(\"crossing\")\n if existing_crossing:\n crossings.append(existing_crossing)\n\n g_contracted.add_edge(\n pre_id,\n post_id,\n area=data[\"area\"],\n crossing=Crossing.from_group(crossings, ignore_none=True),\n )\n\n return g_contracted" ]
[ "0.57665616", "0.5714965", "0.56087047", "0.5555653", "0.53248274", "0.5304287", "0.5222042", "0.520931", "0.5198424", "0.5198424", "0.5195729", "0.5162648", "0.5140849", "0.51147324", "0.5112582", "0.5106029", "0.50989425", "0.50513166", "0.5048671", "0.50138116", "0.50047845", "0.50035423", "0.5003025", "0.49802876", "0.49784875", "0.49448675", "0.49367747", "0.4920625", "0.4905566", "0.49040255", "0.48981592", "0.48940545", "0.4891991", "0.48881286", "0.48877743", "0.48755658", "0.48659295", "0.48437762", "0.4839538", "0.48355854", "0.48321676", "0.48252666", "0.4822933", "0.48181304", "0.48127013", "0.48086035", "0.4806441", "0.47954896", "0.47830552", "0.4780924", "0.47793138", "0.47771758", "0.47547373", "0.47436273", "0.47415432", "0.47374016", "0.47351936", "0.47340786", "0.47335666", "0.47333187", "0.47179374", "0.47178915", "0.4715488", "0.4713217", "0.47117037", "0.4710387", "0.47060674", "0.4703426", "0.46970332", "0.46936482", "0.4688557", "0.46877402", "0.46837685", "0.46791768", "0.46739402", "0.46736175", "0.46665436", "0.466408", "0.4660641", "0.4652001", "0.4645628", "0.46435538", "0.46431243", "0.4642526", "0.46419534", "0.46411508", "0.46394923", "0.46361774", "0.4635677", "0.46344703", "0.46326405", "0.46201056", "0.46165866", "0.46082568", "0.46057984", "0.4604283", "0.46036118", "0.4594637", "0.45925948", "0.4587282" ]
0.693719
0
initializes a new instance of the class
def __init__(self, infos, logger): super().__init__( infos, 'Delete Green Cloudformation Stack', logger, infos.green_infos )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new(self):\n self._init()", "def initialize(cls):", "def init(self) -> None:", "def __init__ (self):\n pass", "def __init__(self) -> None:\n # TODO: Provide the complete constructor for this object", "def init(self) -> None:\n ...", "def __init__():", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def __call__(cls, *args, **kwargs):\n obj = type.__call__(cls, *args, **kwargs)\n obj.initialize()\n return obj", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def init( self ):\n\t\treturn self", "def __init__(self):\n\n pass", "def __init__(self):\n\n pass", "def __init__(self):\n\n pass", "def init(self):\n raise NotImplementedError", "def init(self):\n raise NotImplementedError", "def __init__(self, **kwargs):\n pass", "def __init__(self, **kwargs):\n pass", "def __init__(self, **kwargs):\n pass", "def __init__ (self) :", "def __init__(self, *args, **kwargs) -> None:\n pass", "def __init__(self, *args, **kwargs) -> None:\n pass", "def _init(self):\n pass", "def _init(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError(\"This class cannot be instantiated!\")", "def __init__(self, **kwargs):\n raise NotImplementedError", "def init(self):", "def init(self):", "def __init__(self, *args, **kwargs):\n pass", "def __init__(self, *args, **kwargs):\n pass", "def __init__(self, *args, **kwargs):\n pass", "def __init__(self, *args, **kwargs):\n pass", "def __init__(self, **kwds):\n raise NotImplementedError", "def __init__(self, *args, **kwargs):\n raise NotImplementedError", "def __init__(self):\r\n return", "def __init__(self):\r\n return", "def __init__(self):\r\n return", "def __init__(self):\r\n return", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def _init(self):", "def initialize(self, **kwargs):", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass", "def __init__(self):\n pass" ]
[ "0.81939584", "0.81770974", "0.78722274", "0.7813727", "0.7803766", "0.77187806", "0.75613856", "0.752962", "0.752962", "0.752962", "0.752962", "0.752962", "0.752962", "0.752962", "0.752962", "0.7493935", "0.7488739", "0.7488739", "0.7488739", "0.7488739", "0.7477393", "0.74687505", "0.74687505", "0.74687505", "0.74588406", "0.74588406", "0.74468803", "0.74468803", "0.74468803", "0.74437255", "0.7443459", "0.7443459", "0.74315", "0.7426786", "0.7415324", "0.7409065", "0.7398854", "0.7398854", "0.7363836", "0.7363836", "0.7363836", "0.7363836", "0.7359625", "0.7358158", "0.733319", "0.733319", "0.733319", "0.733319", "0.7332991", "0.7332991", "0.7332991", "0.7332991", "0.7332991", "0.7332991", "0.7332991", "0.7332991", "0.7332991", "0.7332991", "0.7332991", "0.7332991", "0.7332991", "0.7332991", "0.7309834", "0.7290193", "0.727736", "0.727736", "0.727736", "0.727736", "0.727736", "0.727736", "0.727736", "0.727736", "0.727736", "0.727736", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727", "0.7260727" ]
0.0
-1
Always print out typedefs for syntactic reasons in case of more passes.
def visit_Typedef(self, node): return str_node(node)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getPrettyType(self):\n s = self.sym\n if self.sym == None:\n s = self.define\n return \"Typedef (alias type: %s)\" % s.getType()", "def typedefs(self):\n raise exceptions.NotImplementedError()", "def typedef(typedefs):\n\n\n for d in typedefs:\n\n\n type = map_type(d[\"type\"])\n typedef = d[\"typedef\"]\n\n MAPPINGS[typedef] = type", "def print_abs_type(self):\n pass", "def explore_type(name, datatype, is_child):\n actual_type = datatype.strip_typedefs()\n if is_child:\n print (\"The type of %s is a typedef of type '%s'.\" %\n (name, str(actual_type)))\n else:\n print (\"The type '%s' is a typedef of type '%s'.\" %\n (name, str(actual_type)))\n\n Explorer.explore_type(name, actual_type, is_child)\n return False", "def check_typedef_style(ctx, stmt):\n\n elemtype = stmt.search_one(\"type\")\n if elemtype is None:\n return\n\n # errors are appended to the context, such that we can just call the\n # base checks here.\n OCLintFunctions.check_enumeration_style(ctx, stmt)\n OCLintFunctions.check_bad_types(ctx, stmt)\n OCLintFunctions.check_posix_pattern_equal(ctx, stmt)", "def printListOfCalibTypes (self) :\n print '\\nprintListOfCalibTypes(): list_of_clib_types:' #, self.list_of_clib_types\n for type in self.list_of_clib_types : print ' ', type", "def missing_types():\n\n return ...", "def _parse_types(self):\n for root in self.roots:\n for types in root.iter('types'):\n for node in types.iter('type'):\n type_name = GLGenerator.get_name(node)\n text = GLGenerator.get_text(node).strip()\n if '*' in text and not text.startswith('struct'):\n self.pointer_types.append(type_name)", "def pe(t):\n print('__')\n print(t)\n print(type(t))\n sys.exit()", "def injectTypes (g):\n\tself=__module__\n\ts=g.symbols\n\tg.token('TYPE_VAR', '_|[A-Z][A-Z0-9]*')\n\tg.rule('TypeParameter', s.LSB, listOf(g.agroup(s.TYPE_VAR, s.FQNAME), s.COMMA, g), s.RSB)\n\tg.rule('TypeReference', s.FQNAME._as('name'), s.TypeParameter.optional()._as('parameters'))\n\tg.group('TypeValue')\n\tg.rule('TypeExpression')\n\tg.rule('TypeUnionSuffix', s.PIPE, s.TypeValue)\n\tg.group('TypePrefix', s.TypeReference)\n\tg.group('TypeSuffix', s.TypeUnionSuffix)\n\tg.rule('TypeExpression', s.TypePrefix, s.TypeSuffix.zeroOrMore())\n\tg.rule('TypeParens', s.LP, listOf(s.TypeExpression, s.COMMA, g), s.RP)\n\ts.TypeValue.set(s.TypeParens, s.TypeExpression)\n\tg.rule('TypeSlot', s.CheckIndent, g.aword('@slot'), s.NAME._as('name'), g.arule(s.COLON, s.TypeValue).optional()._as('value'), s.EOL, s.Documentation.optional()._as('documentation'))\n\tg.group('TypeLine', s.TypeSlot)\n\tg.group('TypeCode', s.COMMENT, s.TypeLine)\n\tg.rule('TypeBody', s.Indent, s.TypeCode.zeroOrMore(), s.Dedent)\n\tg.rule('Type', s.CheckIndent, g.aword('@type'), s.TypeReference._as('name'), g.arule(s.COLON, s.TypeValue).optional()._as('value'), s.EOL, s.Documentation.optional()._as('documentation'), s.TypeBody.optional())", "def ntypes(self): # -> None:\n ...", "def buildTypedefInfoDeclaration(self):\n if len(self.name) == 0:\n dec = \"<p>Aliases an anonymous %s.</p>\\n\" % (self.prettyType)\n else:\n dec = \"<p>Aliases a %s, called: '%s'.</p>\\n\" % (self.prettyType, self.name)\n dec += \"<p>\"+self.info+\"</p>\\n\"\n dec += \"<table border=1 cellpadding=5>\\n\"\n for mem in self.members:\n dec += mem.buildFullInfoDeclaration()\n dec += \"</table>\\n\"\n return dec", "def fortran_typedefs(self) -> str:\n result = ''\n public = ''\n if self.public is None:\n return result\n if self.public:\n public = ', public'\n for err_name, err_code in error_codes.items():\n result += ' integer, parameter{} :: {}_{} = {}\\n'.format(\n public, self.f_prefix, err_name, err_code)\n result += '\\n'\n\n for kind_name, kind_def in kinds.items():\n result += ' integer, parameter{} :: {}_{} = {}\\n'.format(\n public, self.f_prefix, kind_name, kind_def)\n result += '\\n'\n\n for member in self.members:\n result += member.fortran_type_definition()\n if self.public:\n result += member.fortran_public_declarations()\n\n return result", "def type_():\n pass", "def show_type(type_):\n click.echo(format_type(type_))", "def print_type(obj: object) -> None:\n print(f'{type(obj)}')", "def make_typedefs(self):\n type_dict = self.python_madz_types_dict + self.mangled_namespace\n res = \"{} = {{}}\\n\".format(type_dict)\n\n for node in self.description.declarations():\n varname = self.python_madz_types + self.mangled_namespace + \"___\" + node.name\n # Hack to get self referential top level structs.\n if (node.type.node_type() == pdl.TypeStruct):\n self._is_top_level = varname\n res += self.gen_type_string(node.type)\n res += \"\\n\"\n else:\n res += \"{} = {}\\n\".format(varname, self.gen_type_string(node.type))\n res += \"{}['{}'] = {}\\n\".format(type_dict, node.name, varname)\n return res", "def validate_typedef(self, entry):\n\n check_fields(entry, ['params', 'name', 'edges', 'states'])\n name = entry['name']\n self.typedefs[name] = entry", "def deptype(self) -> str:", "def sitofp(self, typ):", "def _type(self) -> str:\n ...", "def dump_tokens(w):\r\n out = []\r\n lexer.input(w)\r\n while True:\r\n tok = lexer.token()\r\n if not tok: break\r\n out.append(tok.type)\r\n return ' '.join(out)", "def fixupTypedefs(self):\n import types\n # now iterate over looking to fix up the structure defines\n for sym in self.data:\n # was this a typedef, if so do we need to change the kalimba define\n if sym.getType() == \"typedef\":\n # did we know what the original is\n if sym.define.getType() == \"value\":\n # we didn't recognise it, is it something we actually know\n # about, first check if its a union or structure\n if type(sym.getDefineValue()) == types.UnicodeType:\n name = sym.getDefineValue().split()\n if len(name) == 1:\n if self.declare.has_symbol(name[0]):\n sym.setStruct(self.declare[name[0]])\n elif self.declare.has_symbol(\"typedef\"+name[0]):\n sym.setStruct(self.declare[\"typedef\"+name[0]])\n else:\n if self.declare.has_symbol(name[0]+name[1]):\n sym.setStruct(self.declare[name[0]+name[1]])\n else:\n if self.declare.has_symbol(sym.getDefineValue()):\n sym.setStruct(self.declare[sym.getDefineValue()])", "def generate_type_hierarchy(ctx):\n ctx.run(\"./env/bin/python -m puresnmp.types > doc/typetree.rst\")", "def p(t):\n print('__')\n print(t)\n print(type(t))", "def ntypes(self): # -> list[None]:\n ...", "def test_get_types(self):\n pass", "def show_type(self, arg):\n return (str(arg), str(type(arg)), arg)", "def print_warning_msgs():\n for err in TypeWarning.warnings:\n print err", "def print_types(data: bytearray):\n print(data.decode(\"ascii\"))\n print(\"\".join(\"^\" if is_lms(i, data) else \" \" for i in range(len(data))))", "def print_abs_type(self):\n return 'HI'", "def print_abs_type(self):\n return 'Generic'", "def buildTypedefInfoDeclaration(self):\n if len(self.name) == 0:\n dec = \"<p>Aliases an anonymous enumeration.</p>\\n\"\n else:\n dec = \"<p>Aliases a enumeration, called: '\"+self.name+\"'.</p>\\n\"\n dec += \"<p>\"+self.info+\"</p>\\n\"\n dec += \"<table border=1 cellpadding=5>\\n\"\n for entry in self.entries:\n dec += entry.buildFullInfoDeclaration()\n dec += \"</table>\\n\"\n return dec", "def get_check_types():", "def print_types(self):\n print type(self.posXposYposZ)\n print type(self.posXposYnegZ)\n print type(self.posXnegYposZ)\n print type(self.posXnegYnegZ)\n print type(self.negXposYposZ)\n print type(self.negXposYnegZ)\n print type(self.negXnegYposZ)\n print type(self.negXnegYnegZ)", "def ntypes(self): # -> list[str]:\n ...", "def _propagate_types(self):\n pass", "def test_typedef01201m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef01201m/typeDef01201m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef01201m/typeDef01201m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def reset():\n global typeMap, declList\n typeMap = {}\n declList = builtins[:]\n for t in builtins: typeMap[t.identifier] = t", "def type_check(self):\n self.link_all_refs()\n self.check_ast()", "def _type_repr(t):\n string = repr(t)\n for type_, alias in _TYPE_ABBREVIATIONS.items():\n string = string.replace(repr(type_), alias)\n string = re.sub(r\"<(class|type) '([\\w.]+)'>\", r\"\\2\", string)\n string = re.sub(r\"typecheck\\.(\\w+)\", r\"\\1\", string)\n return string", "def return_to_enclosing_type():\n print (\"\\nReturning to enclosing type...\\n\")", "def print_datatypes(model: nn.Module, model_name: str, sep: str = \"\\n\") -> None:\n log = model_name + \"'s datatypes:\" + sep\n log += sep.join(str(t) for t in model_utils.get_model_tensor_datatype(model))\n logger.info(log)", "def initialize_types():\n global VOID, VOID_P, VOID_PP\n global CHAR, CHAR_P, CHAR_PP\n global INT, INT_P, INT_108A\n global ULONG, UINT\n \n VOID = gdb.lookup_type(\"void\")\n VOID_P = VOID.pointer()\n VOID_PP = VOID_P.pointer()\n \n CHAR = gdb.lookup_type(\"char\")\n CHAR_P = CHAR.pointer()\n CHAR_PP = CHAR_P.pointer()\n \n INT = gdb.lookup_type(\"int\")\n INT_P = INT.pointer()\n INT_108A = INT.array(108)\n\n UINT = gdb.lookup_type(\"unsigned int\")\n ULONG = gdb.lookup_type(\"unsigned long\")", "def _expand_skipped_type(name, s_type, data, output, options):\n base_decl = s_type.get_declaration()\n base_decl_hash = base_decl.hash\n base_type_name = None\n # Ensure that the type we are typdefing wasn't skipped:\n if base_decl_hash in data.skipped_enums:\n del data.skipped_enums[base_decl_hash]\n base_type_name = name.replace('_', '-') + \"-enum\"\n _process_realized_enum(base_type_name, base_decl, output, options)\n else:\n decl_type = data.skipped_records.get(base_decl_hash)\n if decl_type:\n del data.skipped_records[base_decl_hash]\n base_type_str = name.replace('_', '-') + \"-record\"\n _process_record(base_type_str, decl_type[0], base_decl, output, options)\n if decl_type[0] == _ElaboratedType.UNION:\n base_type_name = f\"(:union {base_type_str})\"\n else:\n base_type_name = f\"(:struct {base_type_str})\"\n return base_type_name", "def _ptype(ptype_value, verbose=True):\n types = {\n 0:[\"PT_NULL\",\"Program header table entry unused\"],\n 1:[\"PT_LOAD\",\"Loadable program segment\"],\n 2:[\"PT_DYNAMIC\",\"Dynamic linking information\"],\n 3:[\"PT_INTERP\",\"Program interpreter\"],\n 4:[\"PT_NOTE\",\"Auxiliary information\"],\n 5:[\"PT_SHLIB\",\"Reserved\"],\n 6:[\"PT_PHDR\",\"Entry for header table itself\"],\n 7:[\"PT_TLS\",\"Thread-local storage segment\"],\n 8:[\"PT_NUM\",\"Number of defined types\"],\n 0x60000000:[\"PT_LOOS\",\"Start of OS-specific\"],\n 0x6474e550:[\"PT_GNU_EH_FRAME\",\"GCC .eh_frame_hdr segment\"],\n 0x6474e551:[\"PT_GNU_STACK\",\"Indicates stack executability\"],\n 0x6474e552:[\"PT_GNU_RELRO\",\"Read-only after relocation\"],\n 0x6ffffffa:[\"PT_SUNWBSS\",\"Sun Specific segment\"],\n 0x6ffffffb:[\"PT_SUNWSTACK\",\"Stack segment\"],\n 0x6fffffff:[\"PT_HIOS\",\"End of OS-specific\"],\n 0x70000000:[\"PT_LOPROC\",\"Start of processor-specific\"],\n 0x7fffffff:[\"PT_HIPROC\",\"End of processor-specific\"],\n }\n\n if types.get(ptype_value):\n value = types[ptype_value]\n if verbose:\n return \"%s (%s)\" % (value[0], value[1])\n else:\n return \"%s\" % value[0]\n else:\n return \"UNKNOWN\"", "def fortran_type_definition(self) -> str:\n result = 'type {}_{}\\n'.format(self.f_prefix, self.name)\n result += ' integer (c_intptr_t) :: ptr = 0\\n'\n ctn = ''.join(member.fortran_contains_definition() for member in self.members)\n if ctn:\n result += 'contains\\n' + ctn\n result += 'end type {}_{}\\n'.format(self.f_prefix, self.name)\n if self.public:\n result += 'public :: {}_{}\\n'.format(self.f_prefix, self.name)\n result += '\\n'\n return indent(result, 4*' ')", "def onlyTypes(self):\n\t\tself.collector = self.collector.WhereElementIsElementType()\n\t\treturn self", "def readOtherTypes(self):\n types = {}\n for m in re.finditer(\"TYPE (\\w*) = (.*);\", self.data):\n typename, type_string = m.groups() \n if typename not in self.types.keys():\n types[typename] = type_string\n \n return types", "def test_typedef01301m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef01301m/typeDef01301m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef01301m/typeDef01301m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def gettypes(self):\n return [str(self.sd.xlate(t[0])) for t in self.sd.types]", "def cython_nppytypes_nopred(self):\n if self._cython_nppytypes_nopred is None:\n npts_nopred = self.ts.cython_nptype(self.t_nopred, depth=1)\n npts_nopred = [npts_nopred] if isinstance(npts_nopred, basestring) \\\n else npts_nopred\n nppyts_nopred = _maprecurse(self.ts.cython_pytype, npts_nopred)\n self._cython_nppytypes_nopred = nppyts_nopred\n return self._cython_nppytypes_nopred", "def test_typedef01101m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef01101m/typeDef01101m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef01101m/typeDef01101m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_typedef01401m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef01401m/typeDef01401m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef01401m/typeDef01401m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def cython_nptypes_nopred(self):\n if self._cython_nptypes_nopred is None:\n self._cython_nptypes_nopred = self.ts.cython_nptype(self.t_nopred, depth=1)\n return self._cython_nptypes_nopred", "def display_file_types():\n\n print 'Available file types. Each line contains the file type and the list of extensions by those the file type is determined. To include FOOBAR file type to search use --FOOBAR, to exlude use --noFOOBAR. You can include and exclude a number of file types.'\n for ftype, extensions in TYPES().iteritems():\n print '%s: %s' % (ftype, ', '.join(extensions))", "def type_others():\n return \"<string>\"", "def resolveTypedef(type):\n for typedef in _typeDefsFixup:\n if typedef.id == type.id:\n return typedef.type\n\n return type", "def test_typedef00901m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00901m/typeDef00901m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00901m/typeDef00901m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def readSimpleTypes(self):\n types = {}\n for m in re.finditer(\"TYPE (.*) = (.*);\", self.data):\n typename, typetype = m.groups() \n if typetype in self.SIMPLETYPES:\n types[typename] = typetype\n \n return types", "def test_typedef00403m_type_def00403m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00403m/typeDef00403m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00403m/typeDef00403m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def repr_type(a: Annotation) -> str:\n return typing._type_repr(a)", "def cython_npcytypes_nopred(self):\n if self._cython_npcytypes_nopred is None:\n npts_nopred = self.ts.cython_nptype(self.t_nopred, depth=1)\n npts_nopred = [npts_nopred] if isinstance(npts_nopred, basestring) \\\n else npts_nopred\n npcyts_nopred = _maprecurse(self.ts.cython_cytype, npts_nopred)\n self._cython_npcytypes_nopred = npcyts_nopred\n return self._cython_npcytypes_nopred", "def explore_type(name, datatype, is_child):\n if datatype.code == gdb.TYPE_CODE_ENUM:\n if is_child:\n print (\"%s is of an enumerated type '%s'.\" %\n (name, str(datatype)))\n else:\n print (\"'%s' is an enumerated type.\" % name)\n else:\n if is_child:\n print (\"%s is of a scalar type '%s'.\" %\n (name, str(datatype)))\n else:\n print (\"'%s' is a scalar type.\" % name)\n\n if is_child:\n Explorer.return_to_enclosing_type_prompt()\n Explorer.return_to_enclosing_type()\n\n return False", "def display(self, type_get):\n data = self.build_data()\n for word_type, content in data.items():\n count_def = 1\n if type_get and self.word_type_dict[word_type] != type_get:\n continue\n pron = content[1]\n print(bcolors.BOLD + bcolors.YELLOW + \"%s /%s/ (%s)\" % (self.word, pron, self.word_type_dict[word_type])\n + bcolors.ENDC)\n for sense_dict in content[0]:\n type_def = self.get_type_of_def(sense_dict)\n if type_def:\n type_def = \" \" + type_def + \" \"\n print(\"%s.\" % str(count_def) +\n bcolors.ITALIC + bcolors.GREEN + \"%1s\" % type_def + bcolors.ENDC +\n \"%s\" % self.chunk_str(sense_dict['definitions'][0]))\n if 'examples' in sense_dict:\n self.display_examples(sense_dict['examples'])\n\n print(\"\\r\")\n\n if 'subsenses' in sense_dict:\n self.display_subsenses(sense_dict['subsenses'], count_def)\n\n print(\"\\r\")\n count_def += 1", "def get_type_list(cls):\n\n from pygments.lexers import get_all_lexers\n return [(name, aliases[0]) for name, aliases, filetypes, mimetypes in get_all_lexers()]", "def test_typedef00501m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00501m/typeDef00501m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00501m/typeDef00501m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def adapt_none():\n frame = sys._getframe(1)\n frame.f_locals['__types__'] = []", "def use_types( self ) :\n return self._use_types", "def test_typedef00401m_type_def00401m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00401m/typeDef00401m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00401m/typeDef00401m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def terminal_types(self):\n return (self,)", "def _consume_type(self):\n try:\n self._consume(self.VARIABLE_TYPES)\n except CompilationEngineError:\n self._consume(TokenTypes.IDENTIFIER) # Class name", "def test_typedef00502m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00502m/typeDef00502m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00502m/typeDef00502m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def explain(symbol):\n if isinstance(symbol, Symbolic):\n print(symbol.source)\n else: \n print(symbol)", "def test_typedef00101m_type_def00101m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00101m/typeDef00101m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00101m/typeDef00101m1_p.xml\",\n class_name=\"Answer\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def etypes(self): # -> list[None]:\n ...", "def ugly():\n\n global _pretty\n _pretty = False", "def w_is_typed(tokens):\n return (\n 'type' in tokens or\n 'answerblock' in tokens or\n 'drawbox' in tokens or\n 'answerfigure' in tokens\n )", "def fpext(self, typ):", "def readTypes(self):\r\n types = {}\r\n for m in re.finditer(\"TYPE (.*) = (.*);\", self.data):\r\n typename, typetype = m.groups() \r\n if typetype in self.SIMPLETYPES:\r\n types[typename] = typetype\r\n else:\r\n types[typename] = \"#\" + typetype\r\n \r\n return types", "def readAggregatedSimpleTypes(self):\n types = {}\n # SETs\n for m in re.finditer(\"TYPE (\\w*) = SET (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'SET ' + typetype\n \n # BAGs\n for m in re.finditer(\"TYPE (\\w*) = BAG (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'BAG ' + typetype\n \n # LISTs\n for m in re.finditer(\"TYPE (\\w*) = LIST (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'LIST ' + typetype\n \n # ARRAYs\n for m in re.finditer(\"TYPE (\\w*) = ARRAY (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'ARRAY ' + typetype\n \n # STRING vectors\n for m in re.finditer(\"TYPE (\\w*) = STRING\\((.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'STRING(' + typetype\n \n return types", "def hook_type(self) ->str:", "def uitofp(self, typ):", "def etypes(self): # -> None:\n ...", "def test_no_source():\n assert get_type_hints(int) == {}", "def no_type_annotation(self, z):", "def fptosi(self, typ):", "def getTypeString(self):\n return '_'.join(self.types)", "def types_msg(instance, types):\r\n\r\n reprs = []\r\n for type in types:\r\n try:\r\n reprs.append(repr(type[\"name\"]))\r\n except Exception:\r\n reprs.append(repr(type))\r\n return \"%r is not of type %s\" % (instance, \", \".join(reprs))", "def test_typedef00301m_type_def00301m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00301m/typeDef00301m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00301m/typeDef00301m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def spelling(self):\r\n return conf.lib.clang_getTypeKindSpelling(self.value)", "def test_typedef00201m_type_def00201m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00201m/typeDef00201m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00201m/typeDef00201m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def provoke_and_handle_TypeError():\n try:\n print(\"loetungdusohn\" + 3)\n except TypeError as te:\n print(f\"Sorry! {te}\")", "def test_typedef00204m_type_def00204m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00204m/typeDef00204m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00204m/typeDef00204m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_typedef01202m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef01202m/typeDef01202m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef01202m/typeDef01202m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def type(name):", "def getImmediatelyAddableTypes(self, context=None):\n return self.getLocallyAllowedTypes()", "def DumpSchema(root):\n out = []\n if isinstance(root, type):\n root = root()\n _DumpSchema(root, out, [root.__class__.__name__])\n return '\\n'.join(sorted(out))", "def schemaCleanupTypes():\n libxml2mod.xmlSchemaCleanupTypes()" ]
[ "0.6419347", "0.6250635", "0.6137227", "0.6129789", "0.6000314", "0.5863036", "0.58271354", "0.57932997", "0.5779045", "0.5737586", "0.5731451", "0.5704002", "0.5692695", "0.5642191", "0.5641333", "0.56264526", "0.55807245", "0.55783653", "0.5577333", "0.556277", "0.55400676", "0.55122435", "0.5502504", "0.5487541", "0.54831356", "0.5473207", "0.545613", "0.5425239", "0.54082775", "0.54028195", "0.5394299", "0.53655773", "0.53592384", "0.53548867", "0.5354852", "0.53445566", "0.5320233", "0.53067476", "0.52956855", "0.52913934", "0.5221735", "0.5216449", "0.5215424", "0.5212927", "0.52099913", "0.5208334", "0.520518", "0.52041125", "0.5202557", "0.5174647", "0.5167531", "0.5167019", "0.5165243", "0.5162542", "0.5151388", "0.5150288", "0.5148327", "0.514736", "0.5134022", "0.5118681", "0.5109256", "0.5101959", "0.5101831", "0.50954616", "0.50948685", "0.50936913", "0.50830156", "0.508188", "0.50751185", "0.50705695", "0.5069565", "0.506643", "0.50643814", "0.5060551", "0.5055709", "0.50411564", "0.5041149", "0.50401545", "0.5034797", "0.50308967", "0.50276875", "0.5027658", "0.50239533", "0.5017669", "0.5016917", "0.5012345", "0.5008883", "0.5008522", "0.50078446", "0.500482", "0.5001128", "0.5000745", "0.5000338", "0.5000053", "0.49977502", "0.4997211", "0.4995637", "0.49955708", "0.49914134", "0.49826336" ]
0.5954348
5
Always print out struct declarations for syntactic reasons in case of more passes.
def visit_Struct(self, node): return str_node(node)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_out_struct(self):\n args ={\n \"name\": self.python_madz_types + (\"OUTSTRUCT\" if self.namespace == \"\" else self._namespace_mangle(self.namespace)),\n \"fields\":\"\"\n }\n\n res = \\\n\"\"\"class {name}(Structure):\n _fields_ = [{fields}]\n\"\"\"\n for node in self.description.definitions():\n args['fields'] += self.gen_type_tuple_string(node.name, node.type) + \", \"\n\n return res.format(**args)", "def dumpAST(obj, ind=0):\n indChar = (\"\\t\" * ind) + \"-> \" if ind else \"\"\n print(indChar + \"[\" + obj.t + \"]\")\n if not obj.title == \"\":\n print(\"\\t\" + indChar + \"Title: \" + obj.title)\n if not obj.info == \"\":\n print(\"\\t\" + indChar + \"Info: \" + obj.info)\n if not obj.destination == \"\":\n print(\"\\t\" + indChar + \"Destination: \" + obj.destination)\n if obj.is_open:\n print(\"\\t\" + indChar + \"Open: \" + str(obj.is_open))\n if obj.last_line_blank:\n print(\n \"\\t\" + indChar + \"Last line blank: \" + str(obj.last_line_blank))\n if obj.start_line:\n print(\"\\t\" + indChar + \"Start line: \" + str(obj.start_line))\n if obj.start_column:\n print(\"\\t\" + indChar + \"Start Column: \" + str(obj.start_column))\n if obj.end_line:\n print(\"\\t\" + indChar + \"End line: \" + str(obj.end_line))\n if not obj.string_content == \"\":\n print(\"\\t\" + indChar + \"String content: \" + obj.string_content)\n if not obj.info == \"\":\n print(\"\\t\" + indChar + \"Info: \" + obj.info)\n if len(obj.strings) > 0:\n print(\"\\t\" + indChar + \"Strings: ['\" + \"', '\".join(obj.strings) +\n \"']\")\n if obj.c:\n if type(obj.c) is list:\n print(\"\\t\" + indChar + \"c:\")\n for b in obj.c:\n dumpAST(b, ind + 2)\n else:\n print(\"\\t\" + indChar + \"c: \"+obj.c)\n if obj.label:\n print(\"\\t\" + indChar + \"Label:\")\n for b in obj.label:\n dumpAST(b, ind + 2)\n if hasattr(obj.list_data, \"type\"):\n print(\"\\t\" + indChar + \"List Data: \")\n print(\"\\t\\t\" + indChar + \"[type] = \" + obj.list_data['type'])\n if hasattr(obj.list_data, \"bullet_char\"):\n print(\n \"\\t\\t\" + indChar + \"[bullet_char] = \" +\n obj.list_data['bullet_char'])\n if hasattr(obj.list_data, \"start\"):\n print(\"\\t\\t\" + indChar + \"[start] = \" + obj.list_data['start'])\n if hasattr(obj.list_data, \"delimiter\"):\n print(\n \"\\t\\t\" + indChar + \"[delimiter] = \" +\n obj.list_data['delimiter'])\n if hasattr(obj.list_data, \"padding\"):\n print(\n \"\\t\\t\" + indChar + \"[padding] = \" + obj.list_data['padding'])\n if hasattr(obj.list_data, \"marker_offset\"):\n print(\n \"\\t\\t\" + indChar + \"[marker_offset] = \" +\n obj.list_data['marker_offset'])\n if len(obj.inline_content) > 0:\n print(\"\\t\" + indChar + \"Inline content:\")\n for b in obj.inline_content:\n dumpAST(b, ind + 2)\n if len(obj.children) > 0:\n print(\"\\t\" + indChar + \"Children:\")\n for b in obj.children:\n dumpAST(b, ind + 2)\n if len(obj.attributes):\n print(\"\\t\" + indChar + \"Attributes:\")\n for key, val in obj.attributes.iteritems():\n print(\"\\t\\t\" + indChar + \"[{0}] = {1}\".format(key, val))", "def print_debug(self):\n print()\n print(\"Variable names ({} total):\".format(len(self.variable_names)))\n print()\n for variable in self.variable_names:\n print(variable)\n print()\n\n print(\"Clauses:\")\n print()\n for clause in self.abstract_clauses:\n print(clause)", "def dump(co):\n for attr in [\"name\", \"argcount\", \"posonlyargcount\",\n \"kwonlyargcount\", \"names\", \"varnames\",\n \"cellvars\", \"freevars\", \"nlocals\", \"flags\"]:\n print(\"%s: %s\" % (attr, getattr(co, \"co_\" + attr)))\n print(\"consts:\", tuple(consts(co.co_consts)))", "def pprint(self):\n # just here for defining the interface; work is done in subclasses\n pass", "def print_header(self):\n print(\"\\nTesting {} - {}\\n\".format(self.__schema_name, self.__schema_path))", "def pprint(obj):\n for argname in sorted([x for x in dir(obj) if not x.startswith('__')]):\n # Skip callables\n if hasattr(getattr(obj, argname), '__call__'):\n continue\n print(\"{} : {}\".format(argname, getattr(obj, argname)))", "def test_empty_structure():\n empty = SME_Struct()\n\n assert isinstance(empty.version, str)\n assert empty.teff is not None\n assert empty.logg is not None\n assert empty.vmic == 0\n assert empty.vmac == 0\n assert empty.vsini == 0\n\n assert empty.nseg == 0\n assert empty.wave is None\n assert empty.spec is None\n assert empty.uncs is None\n assert empty.synth is None\n assert empty.cont is None\n assert empty.mask is None\n assert empty.mask_good is None\n assert empty.mask_bad is None\n # assert empty.mask_line is None\n # assert empty.mask_continuum is None\n\n assert empty.cscale.shape == (0, 1)\n assert empty.vrad.shape == (0,)\n assert empty.cscale_flag == \"none\"\n assert empty.vrad_flag == \"none\"\n assert empty.cscale_degree == 0\n\n assert empty.mu is not None\n assert empty.nmu == 7\n\n # assert empty.md5 is not None\n\n assert empty.linelist is not None\n assert empty.species is not None\n assert len(empty.species) == 0\n assert empty.atomic is not None\n\n assert empty.monh == 0\n assert not np.isnan(empty[\"abund Fe\"])\n assert empty.abund[\"H\"] == 12\n assert not np.isnan(empty.abund()[\"Mg\"])\n\n assert empty.system_info is not None\n assert empty.system_info.arch == \"\"\n\n assert len(empty.fitparameters) == 0\n assert empty.fitresults is not None\n assert empty.fitresults.covariance is None\n\n assert empty.atmo is not None\n assert empty.atmo.depth is None\n\n assert empty.nlte is not None\n assert empty.nlte.elements == []", "def print_out():\n pass", "def ugly():\n\n global _pretty\n _pretty = False", "def print_parsed(specs):\n observed_types = set()\n for i in specs.values():\n observed_types.update(i['types'])\n observed_types = sorted(observed_types)\n\n s = ['# Observed types from the parsed document']\n s.append('TRACKTYPES = [')\n for i in observed_types:\n s.append(\" '{}',\".format(i))\n s.append(']')\n print('\\n'.join(s) + '\\n')\n\n data_types = specs['bigDataUrl']['types']\n\n s = ['# Tracks for which the definition specifies bigDataUrl']\n s.append('DATA_TRACKTYPES = [')\n for i in data_types:\n s.append(\" '{}',\".format(i))\n s.append(']')\n print('\\n'.join(s) + '\\n')\n print('param_defs = [')\n print()\n for k, v in sorted(specs.items()):\n print(\n (\n '''\n Param(\n name=\"{k}\",\n fmt={v[format]},\n types={v[types]},\n required={v[required]},\n validator=str),'''.format(**locals())\n )\n )", "def _print_structure(self):\n outstr = str(self._element) + '(' + str(self.get_height()) + ')['\n if self._leftchild:\n outstr = outstr + str(self._leftchild._element) + ' '\n else:\n outstr = outstr + '* '\n if self._rightchild:\n outstr = outstr + str(self._rightchild._element) + ']'\n else:\n outstr = outstr + '*]'\n if self._parent:\n outstr = outstr + ' -- ' + str(self._parent._element)\n else:\n outstr = outstr + ' -- *'\n print(outstr)\n if self._leftchild:\n self._leftchild._print_structure()\n if self._rightchild:\n self._rightchild._print_structure()", "def _get_summary_struct(self):\n _features = _precomputed_field(\n _internal_utils.pretty_print_list(self.get('features')))\n _exclude = _precomputed_field(\n _internal_utils.pretty_print_list(self.get('excluded_features')))\n fields = [\n (\"Features\", _features),\n (\"Excluded features\", _exclude),\n (\"Output column name\", 'output_column_name'),\n (\"Max categories per column\", 'max_categories'),\n ]\n section_titles = ['Model fields']\n\n return ([fields], section_titles)", "def print_object_details(obj: object) -> None:\n print_section(obj, 'Type', print_type)\n print_section(obj, 'Documentation', print_documentation)\n print_section(obj, 'Attributes', print_attributes)\n print_section(obj, 'Methods', print_methods)\n print_section_delimiter()", "def print_structure(file_path):\n pprint(read_or_exit(file_path), width=140)", "def _print_structure(self):\n if self._isthisapropertree() is False:\n print(\"ERROR: this is not a proper Binary Search Tree. ++++++++++\")\n outstr = str(self._element) + \" (hgt=\" + str(self._height) + \")[\"\n if self._leftchild is not None:\n outstr = outstr + \"left: \" + str(self._leftchild._element)\n else:\n outstr = outstr + \"left: *\"\n if self._rightchild is not None:\n outstr += \"; right: \" + str(self._rightchild._element) + \"]\"\n else:\n outstr = outstr + \"; right: *]\"\n if self._parent is not None:\n outstr = outstr + \" -- parent: \" + str(self._parent._element)\n else:\n outstr = outstr + \" -- parent: *\"\n print(outstr)\n if self._leftchild is not None:\n self._leftchild._print_structure()\n if self._rightchild is not None:\n self._rightchild._print_structure()", "def buildSimpleInfoDeclaration(self):\n return \"\"", "def debug_info_struct(fdt):\n # Traverse node tree in depth first\n depth = 0\n path = b''\n root = fdt.get_root_node()\n debug_node(fdt, root, depth, path)", "def print_verbose(self) -> None:\n print(self)\n if self.meta is not None:\n print(self.meta.__repr__())", "def dump_objects():\n pass", "def pretty_print(self):\n pt = PrettyTable()\n for i in self.files_summary:\n pt.field_names = [\"File Name\", \"Classes\", \"Functions\", \"Lines\", \"Characters\"]\n pt.add_row(list([i, self.files_summary[i][\"class\"], self.files_summary[i][\"function\"], self.files_summary[i][\"line\"], self.files_summary[i][\"char\"]]))\n print(pt) #Using a Print statement here because i tried to return self.pt and it didnt give me anything but the print works", "def describe():", "def odump(obj, no_dunder=True, whelp=False):\n import builtins\n builtin_types = [ty for ty in builtins.__dict__.values() if isinstance(ty, type)]\n print(type(obj))\n if type(obj) not in builtin_types and hasattr(obj, '__doc__') and getattr(obj, '__doc__'):\n print(getattr(obj, '__doc__'))\n print()\n for attr in dir(obj):\n if no_dunder and attr.startswith('__'):\n continue\n oattr = getattr(obj, attr)\n if hasattr(oattr, '__class__'):\n tdesc = f'({oattr.__class__.__name__})'\n else:\n tdesc = f'({str(type(attr))})'\n if callable(oattr):\n soattr = '<function or method>'\n tdesc = ''\n else:\n try:\n soattr = str(oattr)\n if not soattr:\n soattr = \"''\"\n except TypeError as exc:\n # Some objects return wrong (non-string) results for str() call,\n # (raising exception like \"TypeError: __str__ returned non-string (type list)\")\n soattr = f'ERROR: string representation of an attribute could not be computed ({exc}))'\n print(f'.{attr:20} = {soattr:5} {tdesc}', end='')\n if whelp and hasattr(oattr, '__doc__') and getattr(oattr, '__doc__'):\n if type(oattr) in builtin_types:\n print(f' (builtin)')\n else:\n print(f\"\\n {getattr(oattr, '__doc__')}\\n\")", "def show_symbol_table(st):\n print(st)\n # Dump the name lists get_*()\n if isinstance(st, symtable.Function):\n for nlist in _NAME_LISTS:\n names = getattr(st, \"get_\"+nlist)()\n if names:\n print(' {} : {!r}'.format(nlist, names))\n # Dump the properties as short names is_global -> global, etc..\n for s in st.get_symbols():\n scope = to_scope_name(s._Symbol__scope)\n props = [scope]\n for p in _NAME_PROPS:\n if getattr(s, \"is_\"+p)():\n props.append(p)\n print(' \"{}\" : {}'.format(s.get_name(), ', '.join(props)))", "def print_objects(self):\n print(\"Spaces: {}\".format([s.name for s in self.spaces]))\n print(\"Characters: {}\".format([c.name for c in self.characters]))\n print(\"Items: {}\".format([i.name for i in self.items]))", "def debug3(node):\n print \"%.01f obj: %r left: %r right: %r\" % (node.value, node.objects, node.left.objects, node.right.objects)", "def help_dump(self):\n print(DUMP)", "def print_state(self):\n print(self.type,\n self.persons[0].identifier,\n self.persons[1].identifier)", "def _get_summary_struct(self):\n model_fields = [\n ('Number of classes', 'num_classes'),\n ('Number of feature columns', 'num_features'),\n ('Input image shape', 'input_image_shape'),\n ]\n training_fields = [\n ('Number of examples', 'num_examples'),\n (\"Training loss\", 'training_loss'),\n (\"Training time (sec)\", 'training_time'),\n ]\n\n section_titles = ['Schema', 'Training summary']\n return([model_fields, training_fields], section_titles)", "def print_empty(self):\n ...", "def print_full(self):# pragma: no cover\n print('Will now print all optional output arrays - ')\n print(' yint_seg: ')\n print((self.yint_seg))\n print(' ')\n print(' slope_seg: ')\n print(self.slope_seg)\n print(' ')\n print(' sigyint_seg: ')\n print(self.sigyint_seg)\n print(' ')\n print(' sigslope_seg: ')\n print(self.sigslope_seg)\n print(' ')\n print(' inv_var_2d: ')\n print((self.inv_var_2d))\n print(' ')\n print(' firstf_int: ')\n print((self.firstf_int))\n print(' ')\n print(' ped_int: ')\n print((self.ped_int))\n print(' ')\n print(' cr_mag_seg: ')\n print((self.cr_mag_seg))", "def _get_summary_struct(self):\n\n model_fields = [\n (\"Number of reference examples\", 'num_examples')]\n\n training_fields = [\n (\"Method\", 'method'),\n (\"Total training time (seconds)\", 'training_time')]\n\n sections = [model_fields, training_fields]\n section_titles = ['Schema', 'Training']\n\n return (sections, section_titles)", "def dump_ast(self,node,annotate_fields=True,disabled_fields=None,\n include_attributes=False,indent=2\n ):\n \n #@+others\n #@+node:ekr.20121206222535.7800: *4* class AstDumper\n class AstDumper:\n \n def __init__(self,u,annotate_fields,disabled_fields,format,include_attributes,indent_ws):\n \n self.u = u\n self.annotate_fields = annotate_fields\n self.disabled_fields = disabled_fields\n self.format = format\n self.include_attributes = include_attributes\n self.indent_ws = indent_ws\n\n #@+others\n #@+node:ekr.20121119162429.4140: *5* dump\n def dump(self,node,level=0):\n sep1 = '\\n%s' % (self.indent_ws*(level+1))\n if isinstance(node,ast.AST):\n fields = [(a,self.dump(b,level+1)) for a,b in self.get_fields(node)]\n # ast.iter_fields(node)]\n if self.include_attributes and node._attributes:\n fields.extend([(a,self.dump(getattr(node,a),level+1))\n for a in node._attributes])\n aList = self.extra_attributes(node)\n if aList: fields.extend(aList)\n if self.annotate_fields:\n aList = ['%s=%s' % (a,b) for a,b in fields]\n else:\n aList = [b for a,b in fields]\n compressed = not any([isinstance(b,list) and len(b)>1 for a,b in fields])\n name = node.__class__.__name__\n if compressed and len(','.join(aList)) < 100:\n return '%s(%s)' % (name,','.join(aList))\n else:\n sep = '' if len(aList) <= 1 else sep1\n return '%s(%s%s)' % (name,sep,sep1.join(aList))\n elif isinstance(node,list):\n compressed = not any([isinstance(z,list) and len(z)>1 for z in node])\n sep = '' if compressed and len(node) <= 1 else sep1\n return '[%s]' % ''.join(\n ['%s%s' % (sep,self.dump(z,level+1)) for z in node])\n else:\n return repr(node)\n #@+node:ekr.20121119162429.4141: *5* extra_attributes & helpers\n def extra_attributes (self,node):\n \n '''Return the tuple (field,repr(field)) for all extra fields.'''\n \n d = {\n 'e': self.do_repr,\n # '_parent':self.do_repr,\n 'cache':self.do_cache_list,\n # 'ivars_dict': self.do_ivars_dict,\n 'reach':self.do_reaching_list,\n 'typ': self.do_types_list,\n }\n\n aList = []\n for attr in sorted(d.keys()):\n if hasattr(node,attr):\n val = getattr(node,attr)\n f = d.get(attr)\n s = f(attr,node,val)\n if s:\n aList.append((attr,s),)\n return aList\n #@+node:ekr.20121206222535.7802: *6* AstDumper.helpers\n def do_cache_list(self,attr,node,val):\n return self.u.dump_cache(node)\n \n # def do_ivars_dict(self,attr,node,val):\n # return repr(val)\n\n def do_reaching_list(self,attr,node,val):\n assert attr == 'reach'\n return '[%s]' % ','.join(\n [self.format(z).strip() or repr(z)\n for z in getattr(node,attr)])\n\n def do_repr(self,attr,node,val):\n return repr(val)\n\n def do_types_list(self,attr,node,val):\n assert attr == 'typ'\n return '[%s]' % ','.join(\n [repr(z) for z in getattr(node,attr)])\n #@+node:ekr.20121206222535.7803: *5* get_fields\n def get_fields (self,node):\n \n fields = [z for z in ast.iter_fields(node)]\n result = []\n for a,b in fields:\n if a not in self.disabled_fields:\n if b not in (None,[]):\n result.append((a,b),)\n return result\n #@+node:ekr.20121208161542.7875: *5* kind\n def kind(self,node):\n \n return node.__class__.__name__\n #@-others\n #@-others\n \n if isinstance(node,ast.AST):\n indent_ws = ' '*indent\n dumper = AstDumper(self,annotate_fields,disabled_fields or [],\n self.format,include_attributes,indent_ws)\n return dumper.dump(node)\n else:\n raise TypeError('expected AST, got %r' % node.__class__.__name__)", "def buildTypedefInfoDeclaration(self):\n if len(self.name) == 0:\n dec = \"<p>Aliases an anonymous %s.</p>\\n\" % (self.prettyType)\n else:\n dec = \"<p>Aliases a %s, called: '%s'.</p>\\n\" % (self.prettyType, self.name)\n dec += \"<p>\"+self.info+\"</p>\\n\"\n dec += \"<table border=1 cellpadding=5>\\n\"\n for mem in self.members:\n dec += mem.buildFullInfoDeclaration()\n dec += \"</table>\\n\"\n return dec", "def dump(self) :\n st = \"%s=%s, valid=%d, found=%d, type=%s stringValue=%s\" \\\n %(self.name_, str(self.value_), self.valid_, self.found_, \\\n self.type_, self.stringValue_)\n print st", "def pretty_print(self):\n output = \"Count: \"\n if self.soft:\n output += \"S\"\n output += str(self.count)\n if self.can_double:\n output += \", can double\"\n if self.can_split:\n output += \", can split\"\n print(output)", "def __repr__(self):\n\n\t\t# Preparing variables\n\t\tl_s_content = [\t\t# List containing the content to print\n\t\t\t\"> The structure object :\"\n\t\t]\n\n\t\t# PDB fields\n\t\tl_s_content.append(\"s_name : {}\".format(self.s_name))\n\n\t\t# Structural fields\n\t\tl_s_content.append(\"i_atom_count : {}\".format(self.i_atom_count))\n\t\tl_s_content.append(\"a_atoms : {}\".format(len(self.a_atoms)))\n\n\t\t# Grid fields\n\t\tl_s_content.append(\"b_loaded : {}\".format(self.b_loaded))\n\t\tl_s_content.append(\"a_grid : {}\".format(self.a_grid.size))\n\n\t\treturn \"\\n\".join(l_s_content)\t\t# Returns the content to show", "def display(self):\n print \"\\n\\n***********************\\n\"\n print \"Info about group %s, name=%s, path=%s\" % (self.sdef['id'], \n self.name, self.path)\n print \"sdef=\"\n pp.pprint(self.sdef)\n print \"expanded_def=\"\n pp.pprint (self.expanded_def)\n print \"includes=\"\n pp.pprint (self.includes)\n print \"parent_attributes=\"\n pp.pprint (self.parent_attributes)\n print \"attributes=\"\n pp.pprint (self.attributes)\n print \"mstats=\"\n pp.pprint (self.mstats)", "def print_attributes1(obj):\n\tfor attr in vars(obj):\n\t\tprint(attr, getattr(obj, attr))", "def info(self):\n\t\timport inspect\n\t\n\t\tmessage = \"All variables available for star ID %i\" % self.ID\t\t\n\t\tprint message\n\t\tprint '-'*len(message)\n\t\tattributes = inspect.getmembers(self, lambda a:not(inspect.isroutine(a)))\n\t\tfor a in attributes:\n\t\t\tif (a[0].startswith('__') and a[0].endswith('__')): continue\n\t\t\tprint a[0], \"=\", a[1]", "def dumpstruct(obj, data: bytes = None, offset: int = 0, color: bool = True, output: str = \"print\"):\n if output not in (\"print\", \"string\"):\n raise ValueError(f\"Invalid output argument: {output!r} (should be 'print' or 'string').\")\n\n if isinstance(obj, Instance):\n return _dumpstruct(obj._type, obj, obj.dumps(), offset, color, output)\n elif isinstance(obj, Structure) and data:\n return _dumpstruct(obj, obj(data), data, offset, color, output)\n else:\n raise ValueError(\"Invalid arguments\")", "def print_info(self):\n \n i = 1\n for pcap_packet in self.pcap_packets:\n print '----------------frame: %d------------' % i\n i += 1\n pcap_packet.ethernet.print_info()\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n print '################# packet in the frame ################'\n pcap_packet.ip.print_info()\n \n #skp the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n print '@@@@@@@@@@@@@@@@@@@ tcp fields @@@@@@@@@@@@@@@@@@@@'\n pcap_packet.tcp.print_info()\n \n print\n #endof for", "def print_info(self):\n \n i = 1\n for pcap_packet in self.pcap_packets:\n print '----------------frame: %d------------' % i\n i += 1\n pcap_packet.ethernet.print_info()\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n print '################# packet in the frame ################'\n pcap_packet.ip.print_info()\n \n #skp the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n print '@@@@@@@@@@@@@@@@@@@ tcp fields @@@@@@@@@@@@@@@@@@@@'\n pcap_packet.tcp.print_info()\n \n print\n #endof for", "def printGeneration(tree):\n for mod in tree:\n if mod.param != []:\n print(str(mod.symbol) + str(mod.param).replace(\"[\",\"(\").replace(\"]\",\")\"),end=\"\")\n else:\n print(str(mod.symbol),end=\"\")\n print(\"\")", "def print_abs_type(self):\n pass", "def report(self):\n print()\n print(\"%-15s %-25s %s\" % (\"Class\", \"Name\", \"File\"))\n print(\"%-15s %-25s %s\" % (\"-----\", \"----\", \"----\"))\n for m in sorted(self.flatten(), key=lambda n: n.identifier):\n print(\"%-15s %-25s %s\" % (type(m).__name__, m.identifier, m.filename or \"\"))", "def __print_header():\n __collen[\"id\"] = max(__collen[\"id\"], 2) # min is \"ID\"\n __collen[\"name\"] = max(__collen[\"name\"], 14) # min is \"Subvolume Name\"\n __collen[\"used_lim\"] = max(__collen[\"used_lim\"], 10) # min is \"Max (Used)\"\n __collen[\"excl_lim\"] = max(__collen[\"excl_lim\"], 11) # min is \"Max (Excl.)\"\n print(\"ID{:s} | Subvolume Name{:s} | {:s}Used | {:s}Max (Used) | {:s}Exclusive | {:s}Max (Excl.)\".format(\n \" \"*(__collen[\"id\"]-2),\n \" \"*(__collen[\"name\"]-14),\n \" \"*(MAX_SIZE-4),\n \" \"*(__collen[\"used_lim\"]-10),\n \" \"*(MAX_SIZE-9),\n \" \"*(__collen[\"excl_lim\"]-11)))", "def print_schemas(self):\n self.indent_depth += 1\n for i in self.definitions:\n def_name = i.split('/')[-1]\n self.write(f'.. _{def_name}:')\n self.write('')\n self.write(f'{def_name} Schema')\n self.write(f'{\"`\" * (len(def_name) + 7)}')\n self.write('')\n self.write('.. code-block:: json', self.indent_depth)\n self.indent_depth += 1\n self.write('')\n self.definition_rst(def_name)\n self.indent_depth -= 1\n self.write('')\n self.write('')\n self.indent_depth -= 1", "def print_help():\n parser = parsersetup()\n parser.print_help()", "def debug_info_header(header):\n print(colored(\"Header:\", 'cyan'), colored(\"Valid FDT magic value found\", \"green\", attrs=['bold']))\n print(colored(\"Header\", 'cyan'), \"-> Total Size of file: \",\n colored('{0:>8d} {0:>#8x}'.format(header.totalsize), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Offset to Struct Block: \",\n colored('{0:>8d} {0:>#8x}'.format(header.off_dt_struct), 'yellow'), \" with size: \",\n colored('{0:>8d} {0:>#8x}'.format(header.size_dt_struct), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Offset to String Block: \",\n colored('{0:>8d} {0:>#8x}'.format(header.off_dt_strings), 'yellow'), \" with size: \",\n colored('{0:>8d} {0:>#8x}'.format(header.size_dt_strings), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Offset to Memory Reser: \",\n colored('{0:>8d} {0:>#8x}'.format(header.off_mem_rsvmap), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Version of DTB: \",\n colored('{0:>8d} {0:>#8x}'.format(header.version), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Previous Version of DTB:\",\n colored('{0:>8d} {0:>#8x}'.format(header.last_comp_version), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Boot CPU Number: \",\n colored('{0:>8d} {0:>#8x}'.format(header.boot_cpuid_phys), 'yellow'))\n print()", "def test_get_structure_fmt(self):\n fmt, block_size = get_structure_fmt(SlotHeaderStructure)\n self.assertEqual(fmt, self.fmt_slot_header)\n self.assertEqual(block_size, self.block_size_slot_header)", "def buildFullInfoDeclaration(self, title=0, depth=0):\n return \"\"", "def print_passed(self):\n if self.instance_type == \"FILE\":\n print(colored(\"PASS\", \"green\") + f\" | [{self.instance_type}] {self.instance_location}/{self.instance_name}\")\n\n if self.instance_type == \"HOST\":\n print(\n colored(\"PASS\", \"green\")\n + f\" | [{self.instance_type}] {self.instance_hostname} [SCHEMA ID] {self.schema_id}\"\n )", "def print_info(self):\n \n print \"\"\"version: %d\\t header_len: %d\\t tos: %s\\t total_len: %d\n id: %s\\t flags_reservedbit: %d\\t flags_dont_fragment: %d\\t flags_more_fragment: %d\n fragment_offset: %d\\t TTL: %d\\t protocol: %s\\t\n header_checksum: %s\\t\n src: %s\\t dst: %s\n opt_paddings: %s\"\"\" % (\n self.version, self.header_len, self.type_of_service, self.total_len, self.id, self.flags_reservedbit, \n self.flags_dont_fragment, self.flags_more_fragment, \n self.fragment_offset, self.TTL, self.protocol, self.header_checksum, self.src, self.dst, repr(self.opt_paddings))", "def structParser(lines):\n blc = 0 #blank line counter\n bc = 0 #block counter\n struct = []\n record = False\n for line in lines:\n if len(line) == 1:\n blc +=1\n record = False\n if blc == 2:\n blc = 0\n bc +=1\n record = True\n if record and bc < 3:\n struct.append(line)\n\n yield struct", "def dump(self):\r\n for (name, value) in self.__table__.items():\r\n print (name)\r\n print (value)", "def __post_init__(self) -> None:\n _validate_struct_class(self.struct_class)", "def __post_init__(self) -> None:\n _validate_struct_class(self.struct_class)", "def show_class_details(name, f):\n print '%s:' % name\n print '\\tobject:', f\n print '\\t__name__:', \n try:\n print f.__name__\n except AttributeError:\n print '(no __name__)'\n print '\\t__doc__', repr(f.__doc__)\n return", "def printer(obj, ident=''):\n import inspect\n print ident + obj.__class__.__name__.upper()\n ident += ' '\n lists = []\n for name in dir(obj):\n elem = getattr(obj, name)\n if isinstance(elem, list) and name != u'decoded_content':\n lists.append(elem)\n elif not inspect.ismethod(elem):\n if not name.startswith('__'):\n if name == u'data' and elem:\n print ident + u'data = '\n printer(elem, ident + ' ')\n else:\n print ident + u'%s\\t= %s' % (name, getattr(obj, name))\n for l in lists:\n for i in l:\n printer(i, ident + ' ')", "def printer(obj, ident=''):\n import inspect\n print ident + obj.__class__.__name__.upper()\n ident += ' '\n lists = []\n for name in dir(obj):\n elem = getattr(obj, name)\n if isinstance(elem, list) and name != u'decoded_content':\n lists.append(elem)\n elif not inspect.ismethod(elem):\n if not name.startswith('__'):\n if name == u'data' and elem:\n print ident + u'data = '\n printer(elem, ident + ' ')\n else:\n print ident + u'%s\\t= %s' % (name, getattr(obj, name))\n for l in lists:\n for i in l:\n printer(i, ident + ' ')", "def printSummary(self):\n pass", "def flush_structure(self):\n ...", "def pre_validate_struct(self, struct):\n pass", "def showSchema (self):\n\t\ts=[];add=s.append\n\t\tfor i in range(len(self.schema)):\n\t\t\tadd (\"%d. %s\" % (i+1, self.schema[i]))\n\t\treturn join (s, '\\n')", "def print_pairing_info(melon_types):\n\n # Fill in the rest", "def verbose(self, block: Block):\n print('\\n\\n==============================')\n print('Hash:\\t\\t', block.hash.hexdigest())\n print('Previous Hash:\\t', block.previous_hash.hexdigest())\n print('Nounce:\\t\\t', block.nonce)\n print('Data:\\t\\t', block.data)\n print('\\n\\n==============================')", "def PrettyPrint(self):\r\n print(self.data)\r\n return", "def print_help(self): # pylint: disable-msg=W0221\n pass", "def pprint(self):\n print(self.pprint_str())", "def print_state(state,indent=4):\n if state != False:\n for (name,val) in vars(state).items():\n if name != '__name__':\n for x in range(indent): sys.stdout.write(' ')\n sys.stdout.write(state.__name__ + '.' + name)\n print(' =', val)\n else: print('False')", "def NeedsDebugInfo(self):\n return True", "def generate_ze_pretty_printers(header):\n printers = open(\"ze_printers.def\", \"w\")\n\n matches = re.finditer(r'typedef struct _ze_([_a-z]+)_callbacks_t\\n\\{\\n([a-zA-Z_;\\s\\n]+)\\n\\} ze_([_a-z]+)_callbacks_t;', header)\n\n for match in matches:\n api_domain = snake_to_camel(match.group(1))\n for l in match.group(2).splitlines():\n parts = l.split()\n api_match = re.match(r'ze_pfn([a-zA-Z]+)Cb_t', parts[0])\n api_name_tail = api_match.group(1)\n api_name = 'ze' + api_name_tail\n\n param_type = 'ze_' + camel_to_snake(api_name_tail) + '_params_t'\n\n search_str = \"typedef struct _{}\\n{{\\n([0-9\\sa-zA-Z_\\*;\\n]*)}}\".format(param_type)\n args = re.search(search_str, header)\n\n args = args.group(1).replace('\\n', '').strip().split(\";\")[:-1]\n\n printers.write(\"case static_cast<uint32_t>(ZEApiKind::{}): {{\\n\".format(api_name))\n printers.write(\"const auto *Args = reinterpret_cast<{}*>(Data->args_data);\\n\".format(param_type))\n for arg in args:\n arg_name = arg.strip().split(\" \")[-1].replace('*', '')\n arg_types = [ x.strip() for x in arg.strip().split(\" \")[:-1]]\n printers.write(\"PrintOffset();\\n\")\n scalar = [\"size_t*\", \"void**\", \"uint32_t*\", \"uint64_t*\"]\n if any(item in scalar for item in arg_types):\n printers.write('std::cout << \"{}: \" << *(Args->{}) << \"\\\\n\";\\n'.format(arg_name[1:], arg_name))\n else:\n printers.write(' std::cout << \"{}: \" << Args->{} << \"\\\\n\";\\n'.format(arg_name, arg_name))\n printers.write(\"break;\\n\")\n printers.write(\"}\\n\")\n\n printers.close()", "def printing_vars(self):\n print(\"Name is \", self.name)", "def buildFullInfoDeclaration(self, title, depth=0):\n # if this is the title make it look like it\n if title:\n dec = \"<a class=\\\"anchor\\\" name=\\\"\"+self.link+\"\\\"></a><h2><tt>typedef \"+self.name+\"</tt></h2>\"\n else:\n dec = \"<a class=\\\"anchor\\\" name=\\\"\"+self.link+\"\\\"></a><p>typedef \"+self.name+\"</p>\\n\"\n # iclude any information about this field\n dec += \"<p>\"+self.info+\"</p>\\n\"\n # if this aliases another symbol we know about, just link to that\n if self.sym != None:\n dec += \"<p>Aliases: <a href=\\\"#\"+self.sym.getLink()+\"\\\">\"+self.sym.getName()+\"</a></p>\\n\"\n else:\n # this may be an anonymous symbol, in which case print that out properly\n if self.define.getType() == \"value\":\n dec += \"<p>Aliases: \"+self.define.getValue()+\"</p>\\n\"\n elif self.define.getType() in [\"struct\", \"enum\", \"union\"]:\n dec += self.define.buildTypedefInfoDeclaration()\n else:\n dec += \"<p>Aliases: \"+self.define.getType()+\" \"+self.define.getName()+\"</p>\\n\"\n if title:\n dec += \"<hr>\\n\"\n return dec", "def output_debug_info(self):", "def DumpSchema(root):\n out = []\n if isinstance(root, type):\n root = root()\n _DumpSchema(root, out, [root.__class__.__name__])\n return '\\n'.join(sorted(out))", "def buildSimpleInfoDeclaration(self):\n if self.info == \"\":\n raise Exception(\"Info block empty in symbol: \"+self.name)\n # buid some html to show\n dec = \" <tr>\\n\"\n dec += \" <td><a href=\\\"#\"+self.link+\"\\\">\"+self.name+\"</a></td>\\n\"\n dec += \" <td>\"+self.getPrettyType()\n dec += \"</td>\\n\"\n dec += \" <td>\"+self.info+\"</td>\\n\"\n dec += \" </tr>\\n\"\n return dec", "def p(mess, obj):\n if hasattr(obj, 'shape'):\n print(mess, type(obj), obj.shape, \"\\n\", obj)\n else:\n print(mess, type(obj), \"\\n\", obj)", "def _print_attribute(self):\n print(vars(self))", "def print_info(self):\n print('Condition list: (Cell definitions)')\n if len(list(self.condition_dict.keys())) > 0:\n for ID in list(self.condition_dict.keys()):\n ident = self.condition_dict[ID][0]['identifier']\n print(\n f'\\t[{ident}]: {len(self.condition_dict[ID])} definition(s)')\n else:\n print('\\tNo instances.')\n print()\n print('Modcell types: (Cell mappings on module)')\n if len(list(self.modcells.keys())) > 0:\n for ident in list(self.modcells.keys()):\n print(f'\\t[{ident}]: {len(self.modcells[ident])} definition(s)')\n else:\n print('\\tNo instances.')\n print()\n print('String definitions (Series of modcells)')\n passed = True\n if len(list(self.string_cond.keys())) > 0:\n for str_key in self.string_cond:\n try:\n print(\n f\"\\t[{str_key}]: {len(self.multilevel_ivdata['string'][str_key]['V'])} definition(s)\")\n except:\n passed = False\n continue\n if not passed:\n print('String definitions are defined by deducing the combination of module definitions. So, for an accurate display of the string-level definitions, call this module after enacting .simulate()')\n else:\n print('\\tNo instances.')\n print()", "def print_summary(self, print_level = 0):\n\n print(\"==========================\")\n print(\"= FUNtoFEM model summary =\")\n print(\"==========================\")\n print(\"Model name:\", self.name)\n print(\"Number of bodies:\", len(self.bodies))\n print(\"Number of scenarios:\", len(self.scenarios))\n print(\" \")\n print(\"------------------\")\n print(\"| Bodies summary |\")\n print(\"------------------\")\n for body in self.bodies:\n print(\"Body:\", body.id, body.name)\n print(\" coupling group:\", body.group)\n print(\" transfer scheme:\", type(body.transfer))\n print(\" shape parameteration:\", type(body.shape))\n for vartype in body.variables:\n print(' variable type:', vartype)\n print(' number of ', vartype, ' variables:', len(body.variables[vartype]))\n if print_level >= 0:\n for var in body.variables[vartype]:\n print(' variable:', var.name, ', active?', var.active,', coupled?', var.coupled)\n print(' value and bounds:', var.value, var.lower, var.upper)\n\n print(\" \")\n print(\"--------------------\")\n print(\"| Scenario summary |\")\n print(\"--------------------\")\n for scenario in self.scenarios:\n print(\"scenario:\", scenario.id, scenario.name)\n print(\" coupling group:\", scenario.group)\n print(\" steps:\", scenario.steps)\n print(\" steady?:\", scenario.steady)\n for func in scenario.functions:\n print(' function:', func.name, ', analysis_type:', func.analysis_type)\n print(' adjoint?', func.adjoint)\n if not scenario.steady:\n print(' time range', func.start, ',', func.stop)\n print(' averaging', func.averaging)\n\n\n for vartype in scenario.variables:\n print(' variable type:', vartype)\n print(' number of ', vartype, ' variables:', len(scenario.variables[vartype]))\n if print_level >= 0:\n for var in scenario.variables[vartype]:\n print(' variable:', var.id, var.name, ', active?', var.active,', coupled?', var.coupled)\n print(' value and bounds:', var.value, var.lower, var.upper)", "def print_help(self):\r\n\r\n print (\"\"\"Show data values for assignment.\r\n\r\nUsage:\r\n cat <request or table path>\r\n cat --id <assignment_id> #Where assignment_id provided by 'vers <table path>' command\r\n\r\nFormatting flags:\r\n\r\n -c or --comments - Show comments on/off\r\n -nc or --no-comments\r\n\r\n -ph or --horizontal - Print table horizontally\r\n -pa or --vertical - Print table vertically\r\n (If no '--horizontal' or '--vertical' flag is given, the layout of table is determined automatically:\r\n vertical layout if table has only 1 row and more than 3 columns, horizontal otherwise)\r\n\r\n -b or --borders - Switch show borders on of off\r\n -nb or --no-borders\r\n\r\n -h or --header - Show header on/off\r\n -nh or --no-header\r\n\r\n -t or --time - Show time\r\n -nt or --no-time\r\n\r\nExamples:\r\n > cat /test/test_vars/test_table #print latest data for test_table\r\n > cat /test/test_vars/test_table::subtest #print latest data in subtest variation\r\n > cat /test/test_vars/test_table:::2012-08 #print data latest for august 2012\r\n\r\nSee also 'dump' command which is 'cat' formatted to save data to files. 'help dump'\r\n\r\n \"\"\")", "def defAsString(self):\n\n sdefs = []\n sdefs.append(\"typedef struct {\")\n for f in self.ctypes:\n sdefs.append(self._fieldDefAsString(f))\n \n sdefs.append(\"} %s;\" % (self.name))\n sdefs.append('')\n \n return '\\n'.join(sdefs)", "def print_me(self):\n \n print(\"CLASS {}\".format(self))\n print(\"Name: {}\".format(self.name))\n print(\"Class ID: {}\".format(self.id))\n print(\"Stereotype: {}\".format(self.stereotype))\n print(\"Inherits flag: {}\".format(self.inherits_flag))\n print(\"Depends flag: {}\".format(self.depends_flag))\n print(\"Inherits flag: {}\".format(self.inherits_flag))\n print(\"Realizes flag: {}\".format(self.realizes_flag))\n print(\"Atributes:\") \n for attr in self.attr_list:\n attr.print_me()\n \n print(\"Methods:\") \n for mtd in self.method_list:\n mtd.print_me()\n \n print(\"Associations:\")\n print(self.association_list)\n for assoc in self.association_list:\n assoc.print_me()\n \n print(\"###########################\\n\\n\")", "def print_brief_summary(self):\n print (\"Model {}\".format(self.modelName))\n print (\"Precision {}\".format(self.precision))\n print (\"Recall {}\".format(self.recall))\n print (\"f1 score {}\".format(self.f1))\n \n # work here\n print (\"\\nGold NER label counts:\")\n for ner in self.gold_cts.keys():\n print (\"{} : {} (tag{})\".format(self.gold_cts[ner], self.nerTags.ids_to_words([ner]), ner))\n print (\"\\nPredicted NER label counts:\")\n for ner in self.pred_cts.keys():\n print (\"{} : {} (tag{})\".format(self.pred_cts[ner], self.nerTags.ids_to_words([ner]), ner))", "def print_pointers(self):\n\n ### FILL IN ###", "def fullDebug():\r\n global CurrentState\r\n global CurrentInput\r\n global RESPONSEOPTIONS\r\n print(\"___________________________\")\r\n print(\"\\n*Current Input:\", CurrentInput)\r\n print(\"*Current State: \", CurrentState)\r\n print(\"\\n*Response Options: \", RESPONSEOPTIONS)\r\n print(\"___________________________\")", "def declaration(self) -> global___Statement.Declaration:", "def printhelp():", "def NeedsDebugInfo(self):\n return False", "def init_structs(self):\n self.v6_struct = struct.Struct(V6_STRUCT_STRING)\n self.v5_struct = struct.Struct(V5_STRUCT_STRING)\n self.v4_struct = struct.Struct(V4_STRUCT_STRING)\n self.v3_struct = struct.Struct(V3_STRUCT_STRING)", "def debug(self):\n \n #path\n print('Path information:')\n for k, v in self.__path.items():\n print(k, v)\n \n #sample count\n print('Sample statistic of each phase')\n for k, v in self.__phase_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each class')\n for k, v in self.__area_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each train')\n for k, v in self.__train_sample_count.items():\n print(k, v)", "def print_me(self, tabs=0, tab=' '):\n pre = tab*tabs\n print(pre+'Demand/Load:')\n print(pre+' demands:', self._demands)\n print(pre+' penalty:', self._penalty)\n print(pre+' capacity:', self._capacity)", "def is_struct(self):\n return False", "def genStruct(self, typeinfo, typeName, alias):\n OutputGenerator.genStruct(self, typeinfo, typeName, alias)\n\n if alias:\n # Add name -> alias mapping\n self.addName(self.alias, typeName, alias)\n else:\n # May want to only emit definition on this branch\n True\n\n members = [member.text for member in typeinfo.elem.findall('.//member/name')]\n self.structs[typeName] = members\n memberTypes = [member.text for member in typeinfo.elem.findall('.//member/type')]\n for member_type in memberTypes:\n self.addMapping(typeName, member_type)", "def Structs():\n idx = idc.get_first_struc_idx()\n while idx != ida_idaapi.BADADDR:\n sid = idc.get_struc_by_idx(idx)\n yield (idx, sid, idc.get_struc_name(sid))\n idx = idc.get_next_struc_idx(idx)", "def dump_schemed_data(obj):\n out = OrderedDict()\n\n # Check all interfaces provided by the object\n ifaces = obj.__provides__.__iro__\n\n # Check fields from all interfaces\n for iface in ifaces:\n fields = zope.schema.getFieldsInOrder(iface)\n for name, field in fields:\n # ('header', <zope.schema._bootstrapfields.TextLine object at 0x1149dd690>)\n out[name] = getattr(obj, name, None)\n\n return out", "def nice(self):\n print(self.getName(), \":\", self.getLen())", "def print_tree(self):\n\t\tprint(self.__print_tree('', True, ''))" ]
[ "0.59062266", "0.5870518", "0.57124054", "0.56050116", "0.55951077", "0.55576754", "0.55329996", "0.550045", "0.546557", "0.5464527", "0.5442078", "0.5435242", "0.5433921", "0.5400498", "0.53979063", "0.5391494", "0.5334889", "0.5334051", "0.53326344", "0.53083193", "0.53073317", "0.5286873", "0.52846456", "0.5269945", "0.5267612", "0.52650416", "0.52574456", "0.5252534", "0.5249628", "0.5240753", "0.5231248", "0.521654", "0.52153915", "0.5211494", "0.5211096", "0.52040845", "0.5194274", "0.5192642", "0.518774", "0.518733", "0.51777303", "0.5175592", "0.5175592", "0.5167889", "0.5160526", "0.515947", "0.5153595", "0.5152362", "0.5145556", "0.5141514", "0.5135924", "0.5122209", "0.51137054", "0.5111137", "0.51104397", "0.5108345", "0.510727", "0.510727", "0.51071596", "0.5100749", "0.5100749", "0.51004815", "0.5091908", "0.5082973", "0.50748605", "0.5063387", "0.5062307", "0.50621605", "0.5053415", "0.50533533", "0.5049898", "0.50478065", "0.50467545", "0.50457895", "0.5045779", "0.5043018", "0.504216", "0.50399405", "0.5033593", "0.503024", "0.50294036", "0.50271654", "0.50229937", "0.5022032", "0.501903", "0.50186956", "0.5015517", "0.50140923", "0.50020903", "0.49950358", "0.49940598", "0.49910685", "0.4983827", "0.498004", "0.4967456", "0.49650374", "0.49631745", "0.4961801", "0.4954624", "0.49497283" ]
0.5306634
21
Generation from a statement node. This method exists as a wrapper for individual visit_ methods to handle different treatment of some statements in this context.
def _generate_stmt(self, n, add_indent=False): typ = type(n) if add_indent: self.indent_level += 2 indent = self._make_indent() if add_indent: self.indent_level -= 2 if typ in ( c_ast.Decl, c_ast.Assignment, c_ast.Cast, c_ast.UnaryOp, c_ast.BinaryOp, c_ast.TernaryOp, c_ast.FuncCall, c_ast.ArrayRef, c_ast.StructRef, c_ast.Constant, c_ast.ID, c_ast.Typedef, c_ast.ExprList): # These can also appear in an expression context so no semicolon # is added to them automatically # # Only print out expression if they are part of slice if n.sliced: return indent + self.visit(n) + ';\n' else: return indent + '{}\n' elif typ in (c_ast.Compound,): # No extra indentation required before the opening brace of a # compound - because it consists of multiple lines it has to # compute its own indentation. # return self.visit(n) else: if n.sliced: return indent + self.visit(n) + '\n' else: return ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_stmt(self, statement):\n fn_map = {\n statements.If: self.gen_if,\n statements.While: self.gen_while,\n statements.DoWhile: self.gen_do_while,\n statements.For: self.gen_for,\n statements.Switch: self.gen_switch,\n statements.Goto: self.gen_goto,\n statements.Break: self.gen_break,\n statements.Continue: self.gen_continue,\n statements.Return: self.gen_return,\n statements.Label: self.gen_label,\n statements.Case: self.gen_case,\n statements.RangeCase: self.gen_range_case,\n statements.Default: self.gen_default,\n statements.Empty: self.gen_empty_statement,\n statements.Compound: self.gen_compound_statement,\n statements.InlineAssemblyCode: self.gen_inline_assembly,\n statements.ExpressionStatement: self.gen_expression_statement,\n statements.DeclarationStatement: self.gen_declaration_statement,\n }\n if type(statement) in fn_map:\n with self.builder.use_location(statement.location):\n fn_map[type(statement)](statement)\n else: # pragma: no cover\n raise NotImplementedError(str(statement))", "def _process_stmt(self, node: ast.stmt) -> None:\n if isinstance(node, (ast.ClassDef, ast.FunctionDef)):\n self._process_def(node)\n elif isinstance(node, ast.Assign):\n self._process_assign(node)\n elif isinstance(node, ast.Expr):\n self._process_expr(node)\n else:\n self.visit(node)", "def _analyse_stmt_Expr(self, statement: ast.Expr, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def gen_expression_statement(self, statement):\n self.gen_expr(statement.expression, rvalue=True)\n # TODO: issue a warning when expression result is non void?", "def _analyse_stmt_Pass(self, statement: ast.Pass, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next)", "def _analyse_statements(\n self, statements: List[ast.stmt], *, next: CFNode\n ) -> CFNode:\n for statement in reversed(statements):\n analyse = getattr(self, \"_analyse_stmt_\" + type(statement).__name__)\n next = analyse(statement, next=next)\n return next", "def visit(self, node):\n # inline self.node_context.add and the superclass's visit() for performance\n method = self._method_cache[type(node)]\n self.node_context.contexts.append(node)\n try:\n # This part inlines ReplacingNodeVisitor.visit\n if isinstance(node, ast.stmt):\n # inline qcore.override here\n old_statement = self.current_statement\n try:\n self.current_statement = node\n ret = method(node)\n finally:\n self.current_statement = old_statement\n else:\n ret = method(node)\n except node_visitor.VisitorError:\n raise\n except Exception as e:\n self.show_error(\n node,\n \"%s\\nInternal error: %r\" % (traceback.format_exc(), e),\n error_code=ErrorCode.internal_error,\n )\n ret = UNRESOLVED_VALUE\n finally:\n self.node_context.contexts.pop()\n if ret is None:\n ret = UNRESOLVED_VALUE\n if self.annotate:\n node.inferred_value = ret\n return ret", "def process_stmt(self, stmt):\n if isinstance(stmt, (ast.While, ast.If)):\n self.process_branch(stmt)\n elif isinstance(stmt, ast.Expr):\n self.process_expr(stmt)\n elif isinstance(stmt, ast.Assign):\n self.process_assign(stmt)\n elif isinstance(stmt, ast.Break):\n self.breaks.append(self.curr_block)\n elif isinstance(stmt, ast.Continue):\n self.continues.append(self.curr_block)\n else:\n # self.replacer.visit(stmt)\n # Append a normal statement to the current block\n self.curr_block.add(stmt)", "def Statement(self):\n if self.currtok[1].name == \"SEMI\":\n self.currtok = next(self.tg)\n return semicolon()\n if self.currtok[1].name == \"LCURLY\":\n return self.Block()\n if self.currtok[1].name == \"IDENT\":\n if self.functions.get(self.currtok[0]) is None:\n return self.Assignment()\n else:\n return self.FunctionCall()\n if self.currtok[1].name == \"if\":\n return self.IfStatement()\n if self.currtok[1].name == \"print\":\n return self.PrintStmt()\n if self.currtok[1].name == \"while\":\n return self.WhileStatement()\n if self.currtok[1].name == \"return\":\n return self.ReturnStmt()\n\n raise SLUCSyntaxError(\"ERROR: Unexpected token {0} on line {1}\".\n format(self.currtok[1], str(self.currtok[2] - 1)))", "def Statements(self):\n states = list()\n while self.currtok[1].name in {\"SEMI\", \"LCURLY\", \"IDENT\", \"if\", \"print\", \"while\", \"return\"}:\n state = self.Statement()\n states.append(state)\n return StatementsStmt(states)", "def generic_visit(self, node: ast.AST) -> None:", "def compile_statements(self):\n\t\n\t\tif self.tokenizer.get_token() == 'do':\n\t\t\tself.compile_do()\n\t\telif self.tokenizer.get_token() == 'let':\n\t\t\tself.compile_let()\n\t\telif self.tokenizer.get_token() == 'while':\n\t\t\tself.compile_while()\n\t\telif self.tokenizer.get_token() == 'return':\n\t\t\tself.compile_return()\n\t\telif self.tokenizer.get_token() == 'if':\n\t\t\tself.compile_if()", "def swap(self, node):\n new_node = self.choose_statement()\n\n if isinstance(new_node, ast.stmt):\n # The source `if P: X` is added as `if P: pass`\n if hasattr(new_node, 'body'):\n new_node.body = [ast.Pass()]\n if hasattr(new_node, 'orelse'):\n new_node.orelse = []\n if hasattr(new_node, 'finalbody'):\n new_node.finalbody = []\n\n # ast.copy_location(new_node, node)\n return new_node", "def run(self, statement):\n\t\tassert statement is not None\n\t\tif isinstance(statement, AST):\n\t\t\tstatement_code = to_source(statement)\n\t\ttry:\n\t\t\texec statement_code in self._globals, self._locals\n\t\texcept Exception, e:\n\t\t\tprint 'Statement raised error:', str(e), 'in', statement_code, 'in context', id(self)\n\t\t\treturn\n\t\tself._number_of_statements += 1\n\t\tself._number_of_lines += statement_code.count(newline)\n\t\treturn statement\n\t\t# TODO : number of lines", "def visit_expr_stmt(self: Parser, node: doc.Expr) -> None:\n\n res = self.eval_expr(node.value)\n if res is None:\n pass\n elif isinstance(res, Frame):\n res.add_callback(partial(res.__exit__, None, None, None))\n res.__enter__()\n elif isinstance(res, PrimExpr):\n T.evaluate(res)\n elif isinstance(res, (int, bool)):\n T.evaluate(tvm.tir.const(res))\n elif isinstance(res, tvm.relay.Call) and not res.args:\n # Using GlobalVar.__call__ with no arguments is ambiguous, as\n # each IR has a different function Call representation. If\n # this occurs, convert to the TIR representation.\n T.evaluate(tvm.tir.call_tir(res.op))\n elif isinstance(res, str):\n # Ignore docstrings\n pass\n else:\n self.report_error(node, f\"Parsing resulted in unexpected type {type(res)}\")", "def body(self, statements):\n for stmt in statements:\n self.current_context = None\n self.visit(stmt)\n if self.current_label is not None:\n # Create a noop statement to hold the last label:\n self.create_stmt(dast.NoopStmt, statements[-1], nopush=True)", "def gen_declaration_statement(self, statement):\n declaration = statement.declaration\n if isinstance(declaration, declarations.VariableDeclaration):\n if declaration.storage_class == \"static\":\n self.gen_local_static_variable(declaration)\n else:\n self.gen_local_variable(declaration)\n elif isinstance(declaration, declarations.FunctionDeclaration):\n # Ehm, okay, we have declared a function, no worries.\n # Just ensure that it does not have a body.\n assert not declaration.body\n else:\n raise NotImplementedError(str(declaration))", "def gen_compound_statement(self, statement) -> None:\n for inner_statement in statement.statements:\n self.gen_stmt(inner_statement)", "def statement( ): # parse rountin for statement that makes sure the token is one of the following, eventually there will be an error caught\n\n\ttok = tokens.peek( )\n\tif debug: print( \"statement: \", tok )\n\tif tok == \"if\":\n\t\tstat = parseIfStatement( )\n\t\treturn stat\n\telif tok == \"while\":\n\t\tstat = parseWhileStatement( )\n\t\treturn stat\n\telse: \n\t\tstat = parseAssign( )\n\t\treturn stat", "def visit_simple_stmt(self, node: Node) -> Iterator[Line]:\n prev_type: Optional[int] = None\n for child in node.children:\n if (prev_type is None or prev_type == token.SEMI) and is_arith_like(child):\n wrap_in_parentheses(node, child, visible=False)\n prev_type = child.type\n\n is_suite_like = node.parent and node.parent.type in STATEMENT\n if is_suite_like:\n if (\n self.mode.is_pyi or Preview.dummy_implementations in self.mode\n ) and is_stub_body(node):\n yield from self.visit_default(node)\n else:\n yield from self.line(+1)\n yield from self.visit_default(node)\n yield from self.line(-1)\n\n else:\n if (\n not (self.mode.is_pyi or Preview.dummy_implementations in self.mode)\n or not node.parent\n or not is_stub_suite(node.parent)\n ):\n yield from self.line()\n yield from self.visit_default(node)", "def statement_eval(node, table):\n\n if node.kind == \"MOD_OP\":\n table = mod_op_eval(node, table)\n\n elif node.kind == \"SWAP_OP\":\n table = swap_op_eval(node, table)\n\n elif node.kind == \"FROM_LOOP\":\n block_node = node.block\n\n # TODO: check start condition\n\n while True:\n # Execute the block.\n table = block_eval(block_node, table)\n\n # Break if the end condition is satisfied.\n if expr_eval(node.end_condition, table):\n break\n\n elif node.kind == \"FOR_LOOP\":\n var_dec = node.var_declaration\n until_node = node.end_condition\n increment_node = node.increment_statement\n\n # Initialize the variable.\n table[var_dec.name] = expr_eval(var_dec.expr, table)\n\n while True:\n # Execute the block and increment statement.\n if not node.inc_at_end:\n table = mod_op_eval(increment_node, table)\n \n table = block_eval(node.block, table)\n\n if node.inc_at_end:\n table = mod_op_eval(increment_node, table)\n\n # Break if the end condition is satisfied.\n if table.refs[until_node.name] == expr_eval(until_node.expr, table):\n break\n\n table = var_condition_eval(until_node, table)\n\n elif node.kind == \"IF\":\n # Check the condition; if it fails, execute the\n # 'false' branch if it exists.\n\n if expr_eval(node.condition, table):\n table = block_eval(node.true, table)\n elif \"false\" in node.data:\n table = block_eval(node.false, table)\n\n elif node.kind == \"DO/UNDO\":\n # Do the action_block, then do the yielding block,\n # then undo the action block.\n table = block_eval(node.action_block, table)\n\n if \"yielding_block\" in node.data:\n table = block_eval(node.yielding_block, table)\n\n table = block_eval(inverter.unblock(node.action_block), table)\n\n elif node.kind == \"RESULT\":\n # Overwrites the variable 'result' with the given expression.\n table[\"result\"] = expr_eval(node.expr, table)\n\n elif node.kind == \"VAR_DEC\":\n table[node.name] = expr_eval(node.expr, table)\n\n elif node.kind == \"VAR_CONDITION\":\n table = var_condition_eval(node, table)\n\n elif node.kind == \"BLOCK\":\n table = block_eval(node, table)\n\n elif node.kind == \"FUNCTION_CALL\":\n # Call the function, then update table with the results.\n function = shared.program.functions[node.name]\n\n output = function.evaluate(\n node.backwards,\n node.ref_args,\n [expr_eval(arg, table) for arg in node.ref_args],\n [expr_eval(arg, table) for arg in node.const_args]\n )\n\n # After evaluating the function, the output table will\n # contain changed variables.\n table.update_refs(output)\n\n elif node.kind == \"UN\":\n inverted_node = inverter.unstatement(node.statement)\n table = statement_eval(inverted_node, table)\n\n elif node.kind == \"EXIT\":\n if expr_eval(node.condition, table):\n # We return by raising an exception.\n raise shared.ReturnException(expr_eval(node.value, table))\n\n elif node.kind == \"ENTER\":\n # Do nothing when we actually encounter these.\n pass\n\n return table", "def build_statement(\n user: AbstractUser,\n verb: Verb,\n obj: Activity,\n context: Context,\n statement_id: Optional[uuid.UUID] = None,\n) -> Optional[Statement]:\n timestamp = timezone.now().isoformat()\n actor = _get_actor_from_user(user)\n if statement_id is None:\n statement_id = uuid.uuid4()\n if actor is None:\n logger.warning(\"Unable to get an XAPI actor definition for user %s\", user.id)\n return None\n return Statement(\n actor=actor,\n context=context,\n id=statement_id,\n object=obj,\n timestamp=timestamp,\n verb=verb,\n )", "def link_stmt(self, stmt):\n if stmt.kind == PTN.EXPR_STMT:\n self.link_expr(stmt.expr)\n elif stmt.kind == PTN.COMP_STMT:\n self.link_comp_stmt(stmt)\n elif stmt.kind == PTN.IF_STMT:\n self.link_if_stmt(stmt)\n elif stmt.kind == PTN.WHILE_STMT:\n self.link_while_stmt(stmt)\n elif stmt.kind == PTN.RET_STMT:\n if stmt.val is not None:\n self.link_expr(stmt.val)\n elif stmt.kind == PTN.WRITE_STMT:\n self.link_expr(stmt.expr)", "def from_statement(cls, statement):\r\n return cls('\\n'.join(textwrap.dedent(statement).splitlines()[1:]))", "def _ast_node(self, statement: ast.AST, **edges: CFNode) -> CFNode:\n node = CFNode(ast_node=statement)\n self._graph.add_node(node, edges=edges)\n return node", "def statements(self):\n node = self.annotated_ast_node\n nodes_subtexts = list(_split_code_lines(node.body, self.text))\n if nodes_subtexts == [(self.ast_node.body, self.text)]:\n # This block is either all comments/blanks or a single statement\n # with no surrounding whitespace/comment lines. Return self.\n return (PythonStatement._construct_from_block(self),)\n cls = type(self)\n statement_blocks = [\n cls.__construct_from_annotated_ast(subnodes, subtext, self.flags)\n for subnodes, subtext in nodes_subtexts]\n # Convert to statements.\n statements = []\n for b in statement_blocks:\n statement = PythonStatement._construct_from_block(b)\n statements.append(statement)\n # Optimization: set the new sub-block's ``statements`` attribute\n # since we already know it contains exactly one statement, itself.\n assert 'statements' not in b.__dict__\n b.statements = (statement,)\n return tuple(statements)", "def visit(self, node):", "def visit(self, node):", "def process_statement (lx,wlist,fb):\n # Grammar for the statement language is:\n # S -> P is AR Ns | P is A | P Is | P Ts P\n # AR -> a | an\n # We parse this in an ad hoc way.\n msg = add_proper_name (wlist[0],lx)\n if (msg == ''):\n if (wlist[1] == 'is'):\n if (wlist[2] in ['a','an']):\n lx.add (wlist[3],'N')\n fb.addUnary ('N_'+wlist[3],wlist[0])\n else:\n lx.add (wlist[2],'A')\n fb.addUnary ('A_'+wlist[2],wlist[0])\n else:\n stem = verb_stem(wlist[1])\n if (len(wlist) == 2):\n lx.add (stem,'I')\n fb.addUnary ('I_'+stem,wlist[0])\n else:\n msg = add_proper_name (wlist[2],lx)\n if (msg == ''):\n lx.add (stem,'T')\n fb.addBinary ('T_'+stem,wlist[0],wlist[2])\n return msg", "def process_statement (lx,wlist,fb):\n # Grammar for the statement language is:\n # S -> P is AR Ns | P is A | P Is | P Ts P\n # AR -> a | an\n # We parse this in an ad hoc way.\n msg = add_proper_name (wlist[0],lx)\n if (msg == ''):\n if (wlist[1] == 'is'):\n if (wlist[2] in ['a','an']):\n lx.add (wlist[3],'N')\n fb.addUnary ('N_'+wlist[3],wlist[0])\n else:\n lx.add (wlist[2],'A')\n fb.addUnary ('A_'+wlist[2],wlist[0])\n else:\n stem = verb_stem(wlist[1])\n if (len(wlist) == 2):\n lx.add (stem,'I')\n fb.addUnary ('I_'+stem,wlist[0])\n else:\n msg = add_proper_name (wlist[2],lx)\n if (msg == ''):\n lx.add (stem,'T')\n fb.addBinary ('T_'+stem,wlist[0],wlist[2])\n return msg", "def Statement(self):\n t = self.token\n if t.stmt_begin:\n self._advance()\n return t.std()\n ex = self.expression(0)\n self._advance([\"NEWLINE\", \"END\", \"DEDENT\"])\n return ex", "def stmts_to_stmt(statements):\n if len(statements) == 1:\n return statements[0]\n array = FakeArray(statements, arr_type=pr.Array.NOARRAY)\n return FakeStatement([array])", "def irgen_stmt(stmt, builder, table):\n irgen = dict({\"ret\" : irgen_ret,\n \"continue\" : irgen_continue,\n \"break\" : irgen_break,\n \"call\" : irgen_proc,\n \"assign\" : irgen_assign,\n \"skip\" : irgen_skip,\n })\n \n operator = stmt.operator\n irgen[operator](stmt, builder, table)", "def dfs_visit(self, node):\n super(MutantGenerator, self).generic_visit(node)", "def onStatement(self, match):\n\t\treturn self.process(match[0])", "def create_stmt(self, stmtcls, ast, params=None, nopush=False):\n if params is None:\n stmtobj = stmtcls(parent=self.current_parent, ast=ast)\n else:\n stmtobj = stmtcls(parent=self.current_parent, ast=ast, **params)\n stmtobj.label = self.current_label\n self.current_label = None\n\n if self.current_block is None or self.current_parent is None:\n self.error(\"Statement not allowed in this context.\", ast)\n else:\n self.current_block.append(stmtobj)\n if not nopush:\n self.push_state(stmtobj)\n return stmtobj", "def _analyse_stmt_Global(self, statement: ast.Global, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next)", "def interpret(self):\n tree = self.parser.parse()\n if tree is None:\n return ''\n self.visit(tree)", "def _analyse_stmt_For(self, statement: ast.For, *, next: CFNode) -> CFNode:\n return self._analyse_loop(statement, next=next)", "def __post_init__(self) -> None:\n self.current_line = Line(mode=self.mode)\n\n v = self.visit_stmt\n Ø: Set[str] = set()\n self.visit_assert_stmt = partial(v, keywords={\"assert\"}, parens={\"assert\", \",\"})\n self.visit_if_stmt = partial(\n v, keywords={\"if\", \"else\", \"elif\"}, parens={\"if\", \"elif\"}\n )\n self.visit_while_stmt = partial(v, keywords={\"while\", \"else\"}, parens={\"while\"})\n self.visit_for_stmt = partial(v, keywords={\"for\", \"else\"}, parens={\"for\", \"in\"})\n self.visit_try_stmt = partial(\n v, keywords={\"try\", \"except\", \"else\", \"finally\"}, parens=Ø\n )\n self.visit_except_clause = partial(v, keywords={\"except\"}, parens={\"except\"})\n self.visit_with_stmt = partial(v, keywords={\"with\"}, parens={\"with\"})\n self.visit_classdef = partial(v, keywords={\"class\"}, parens=Ø)\n self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)\n self.visit_return_stmt = partial(v, keywords={\"return\"}, parens={\"return\"})\n self.visit_import_from = partial(v, keywords=Ø, parens={\"import\"})\n self.visit_del_stmt = partial(v, keywords=Ø, parens={\"del\"})\n self.visit_async_funcdef = self.visit_async_stmt\n self.visit_decorated = self.visit_decorators\n\n # PEP 634\n self.visit_match_stmt = self.visit_match_case\n self.visit_case_block = self.visit_match_case", "def gen_empty_statement(self, statement) -> None:\n pass", "def generic_visit(self, node):\n\n visit_method_name = 'visit_' + node.__class__.__name__\n if hasattr(self, visit_method_name):\n method = getattr(self, visit_method_name)\n method(node)\n\n return node", "def visit(self, node):\n method_name = 'visit_' + type(node).__name__\n visit_method = getattr(self, method_name, self.generic_visit)\n return visit_method(node)", "def verbatim(self, stmt, suppress=False):\n if not suppress:\n self.statements.append(stmt)\n\n return stmt", "def dfs_ast(func):\n def wrapper(self, node):\n new_node = func(self, node)\n for child in ast.iter_child_nodes(new_node):\n self.visit(child)\n return new_node\n\n return wrapper", "def statements(self):\n\n while self.token.value not in ('EOF', 'else', 'end'):\n\n with self.resync('\\n', consume=True):\n self.statement()\n\n if not self.match(Tokens.SYMBOL, \";\"):\n self.error(\"expected ';' after statement \", token=self.prev_token, after_token=True)\n\n # consume the 'end' token if there is one\n self.match(Tokens.KEYWORD, 'end')", "def compile_else(self):\n\n\t\txml = self.tokenizer.keyword() + self.tokenizer.symbol() + '<statements>\\n'\n\t\tself.outfile.write(xml)\n\n\t\twhile self.tokenizer.get_token() != '}':\n\t\t\tself.compile_statements()\n\n\t\txml = '</statements>\\n' + self.tokenizer.symbol()\n\t\tself.outfile.write(xml)", "def transform_statements(f, body):\n def rec(tree):\n # TODO: brittle, may need changes as the Python AST evolves. Better ways to do this?\n if type(tree) in (FunctionDef, AsyncFunctionDef, ClassDef, With, AsyncWith):\n tree.body = rec(tree.body)\n elif type(tree) in (If, For, While, AsyncFor):\n tree.body = rec(tree.body)\n tree.orelse = rec(tree.orelse)\n elif type(tree) is Try:\n tree.body = rec(tree.body)\n tree.orelse = rec(tree.orelse)\n tree.finalbody = rec(tree.finalbody)\n for handler in tree.handlers:\n handler.body = rec(handler.body)\n elif type(tree) is list: # multiple-statement body in AST\n return [output_stmt for input_stmt in tree for output_stmt in rec(input_stmt)]\n # A single statement. Transform it.\n replacement = f(tree)\n if not isinstance(replacement, list):\n raise TypeError(\"`f` must return a list of statements, got {} with value '{}'\".format(type(replacement), replacement)) # pragma: no cover\n return replacement\n return rec(body)", "def _analyse_stmt_With(self, statement: ast.With, *, next: CFNode) -> CFNode:\n return self._analyse_with(statement, next=next)", "def _build_statement(self, document, statement, homepage, user=None, user_id=None):\n\n if re.match(r\"^http(s?):\\/\\/.*\", homepage) is None:\n homepage = f\"http://{homepage}\"\n\n statement = self.build_common_statement_properties(\n statement, homepage, user=user, user_id=user_id\n )\n\n statement[\"context\"].update(\n {\"contextActivities\": {\"category\": [{\"id\": \"https://w3id.org/xapi/lms\"}]}}\n )\n\n statement[\"object\"] = {\n \"definition\": {\n \"type\": \"http://id.tincanapi.com/activitytype/document\",\n \"name\": {self.get_locale(): document.title},\n },\n \"id\": f\"uuid://{document.id}\",\n \"objectType\": \"Activity\",\n }\n\n return statement", "def parseStmt(line):\n print(\"Statement\")\n index=0\n if line[0] == 's':\n print(\"Set\")\n index += 4\n rest = line[index:]\n cmds = str.split(rest, ',')\n parseSet(cmds)\n elif line[0] == 'h':\n exit()\n elif line[0] == 'j':\n index += 5\n if line[index] == ' ':\n print(\"Jumpt\")\n index += 1\n rest = line[index:]\n cmds = str.split(rest, ',')\n parseJumpt(cmds)\n else:\n print(\"Jump\")\n rest = line[index:]\n cmds = str.split(rest, ',')\n parseJump(cmds)\n else:\n print(\"Invalid Operation\")", "def _analyse_stmt_Import(self, statement: ast.Import, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def _ExecStatement(self, stmt):\n assert isinstance(stmt, pymake.parserdata.Statement)\n if isinstance(stmt, pymake.parserdata.Rule):\n name = stmt.targetexp.resolvestr(self._makefile, self._makefile.variables)\n value = stmt.depexp.resolvestr(self._makefile, self._makefile.variables)\n self._dependencies[name] = value\n elif (isinstance(stmt, pymake.parserdata.StaticPatternRule) or\n isinstance(stmt, pymake.parserdata.Command) or\n isinstance(stmt, pymake.parserdata.EmptyDirective)):\n pass # Ignore commands\n elif isinstance(stmt, pymake.parserdata.Include):\n pass # Ignore includes\n elif isinstance(stmt, pymake.parserdata.SetVariable):\n stmt.execute(self._makefile, None)\n elif isinstance(stmt, pymake.parserdata.ConditionBlock):\n for cond, children in stmt:\n if cond.evaluate(self._makefile):\n for s in children:\n self._ExecStatement(s)\n break\n else:\n assert False, 'Unknown type of statement %s' % stmt", "def generate(self, tnode, indent=\" \", extra_indent=\" \"):\n\n s = \"\"\n\n if isinstance(tnode, ast.Comment):\n s += indent + \"/*\" + tnode.text + \"*/\\n\"\n\n elif isinstance(tnode, ast.LitExp):\n s += str(tnode.val).encode(\"string-escape\")\n\n elif isinstance(tnode, ast.IdentExp):\n s += str(tnode.name)\n\n elif isinstance(tnode, ast.ArrayRefExp):\n s += self.generate(tnode.exp, indent, extra_indent)\n s += \"[\" + self.generate(tnode.sub, indent, extra_indent) + \"]\"\n\n elif isinstance(tnode, ast.CallExp):\n s += self.generate(tnode.exp, indent, extra_indent) + \"(\"\n s += \",\".join(\n map(lambda x: self.generate(x, indent, extra_indent), tnode.args)\n )\n s += \")\"\n\n elif isinstance(tnode, ast.CastExp):\n s += \"(\" + self.generate(tnode.castto, indent, extra_indent) + \")\"\n s += self.generate(tnode.exp, indent, extra_indent)\n\n elif isinstance(tnode, ast.UnaryExp):\n s += self.generate(tnode.exp, indent, extra_indent)\n if tnode.op_type == tnode.PLUS:\n s = \"+\" + s\n elif tnode.op_type == tnode.MINUS:\n s = \"-\" + s\n elif tnode.op_type == tnode.LNOT:\n s = \"!\" + s\n elif tnode.op_type == tnode.BNOT:\n s = \"~\" + s\n elif tnode.op_type == tnode.PRE_INC:\n s = \" ++\" + s\n elif tnode.op_type == tnode.PRE_DEC:\n s = \" --\" + s\n elif tnode.op_type == tnode.POST_INC:\n s += \"++ \"\n elif tnode.op_type == tnode.POST_DEC:\n s += \"-- \"\n elif tnode.op_type == tnode.DEREF:\n s = \"*\" + s\n elif tnode.op_type == tnode.ADDRESSOF:\n s = \"&\" + s\n elif tnode.op_type == tnode.SIZEOF:\n s = \"sizeof \" + s\n else:\n g.err(\n __name__\n + \": internal error: unknown unary operator type: %s\"\n % tnode.op_type\n )\n\n elif isinstance(tnode, ast.BinOpExp):\n s += self.generate(tnode.lhs, indent, extra_indent)\n if tnode.op_type == tnode.PLUS:\n s += \"+\"\n elif tnode.op_type == tnode.MINUS:\n s += \"-\"\n elif tnode.op_type == tnode.MULT:\n s += \"*\"\n elif tnode.op_type == tnode.DIV:\n s += \"/\"\n elif tnode.op_type == tnode.MOD:\n s += \"%\"\n elif tnode.op_type == tnode.LT:\n s += \"<\"\n elif tnode.op_type == tnode.GT:\n s += \">\"\n elif tnode.op_type == tnode.LE:\n s += \"<=\"\n elif tnode.op_type == tnode.GE:\n s += \">=\"\n elif tnode.op_type == tnode.EE:\n s += \"==\"\n elif tnode.op_type == tnode.NE:\n s += \"!=\"\n elif tnode.op_type == tnode.LOR:\n s += \"||\"\n elif tnode.op_type == tnode.LAND:\n s += \"&&\"\n elif tnode.op_type == tnode.EQ:\n s += \"=\"\n elif tnode.op_type == tnode.PLUSEQ:\n s += \"+=\"\n elif tnode.op_type == tnode.MINUSEQ:\n s += \"-=\"\n elif tnode.op_type == tnode.MULTEQ:\n s += \"*=\"\n elif tnode.op_type == tnode.DIVEQ:\n s += \"/=\"\n elif tnode.op_type == tnode.MODEQ:\n s += \"%=\"\n elif tnode.op_type == tnode.COMMA:\n s += \",\"\n elif tnode.op_type == tnode.BOR:\n s += \"|\"\n elif tnode.op_type == tnode.BAND:\n s += \"&\"\n elif tnode.op_type == tnode.BXOR:\n s += \"^\"\n elif tnode.op_type == tnode.BSHL:\n s += \"<<\"\n elif tnode.op_type == tnode.BSHR:\n s += \">>\"\n elif tnode.op_type == tnode.BSHLEQ:\n s += \"<<=\"\n elif tnode.op_type == tnode.BSHREQ:\n s += \">>=\"\n elif tnode.op_type == tnode.BANDEQ:\n s += \"&=\"\n elif tnode.op_type == tnode.BXOREQ:\n s += \"^=\"\n elif tnode.op_type == tnode.BOREQ:\n s += \"|=\"\n elif tnode.op_type == tnode.DOT:\n s += \".\"\n elif tnode.op_type == tnode.SELECT:\n s += \"->\"\n else:\n g.err(\n __name__\n + \": internal error: unknown binary operator type: %s\"\n % tnode.op_type\n )\n s += self.generate(tnode.rhs, indent, extra_indent)\n\n elif isinstance(tnode, ast.TernaryExp):\n s += self.generate(tnode.test, indent, extra_indent) + \"?\"\n s += self.generate(tnode.true_exp, indent, extra_indent) + \":\"\n s += self.generate(tnode.false_exp, indent, extra_indent)\n\n elif isinstance(tnode, ast.ParenExp):\n s += \"(\" + self.generate(tnode.exp, indent, extra_indent) + \")\"\n\n elif isinstance(tnode, ast.CompStmt):\n s += indent + \"{\\n\"\n for stmt in tnode.kids:\n s += self.generate(stmt, indent + extra_indent, extra_indent)\n s += indent + \"}\\n\"\n\n elif isinstance(tnode, ast.ExpStmt):\n s += indent + self.generate(tnode.exp, indent, extra_indent) + \";\\n\"\n\n elif isinstance(tnode, ast.IfStmt):\n s += (\n indent + \"if (\" + self.generate(tnode.test, indent, extra_indent) + \") \"\n )\n if isinstance(tnode.true_stmt, ast.CompStmt):\n tstmt_s = self.generate(tnode.true_stmt, indent, extra_indent)\n s += tstmt_s[tstmt_s.index(\"{\") :]\n if tnode.false_stmt:\n s = s[:-1] + \" else \"\n else:\n s += \"\\n\"\n s += self.generate(tnode.true_stmt, indent + extra_indent, extra_indent)\n if tnode.false_stmt:\n s += indent + \"else \"\n if tnode.false_stmt:\n if isinstance(tnode.false_stmt, ast.CompStmt):\n tstmt_s = self.generate(tnode.false_stmt, indent, extra_indent)\n s += tstmt_s[tstmt_s.index(\"{\") :]\n else:\n s += \"\\n\"\n s += self.generate(\n tnode.false_stmt, indent + extra_indent, extra_indent\n )\n\n elif isinstance(tnode, ast.ForStmt):\n s += indent + \"for (\"\n if tnode.init:\n s += self.generate(tnode.init, indent, extra_indent)\n s += \"; \"\n if tnode.test:\n s += self.generate(tnode.test, indent, extra_indent)\n s += \"; \"\n if tnode.iter:\n s += self.generate(tnode.iter, indent, extra_indent)\n s += \") \"\n if isinstance(tnode.stmt, ast.CompStmt):\n stmt_s = self.generate(tnode.stmt, indent, extra_indent)\n s += stmt_s[stmt_s.index(\"{\") :]\n else:\n s += \"\\n\"\n s += self.generate(tnode.stmt, indent + extra_indent, extra_indent)\n\n elif isinstance(tnode, ast.WhileStmt):\n s += (\n indent\n + \"while (\"\n + self.generate(tnode.test, indent, extra_indent)\n + \") \"\n )\n if isinstance(tnode.stmt, ast.CompStmt):\n stmt_s = self.generate(tnode.stmt, indent, extra_indent)\n s += stmt_s[stmt_s.index(\"{\") :]\n else:\n s += \"\\n\"\n s += self.generate(tnode.stmt, indent + extra_indent, extra_indent)\n\n elif isinstance(tnode, ast.VarDec):\n if not tnode.isnested:\n s += indent\n s += \" \".join(tnode.type_name) + \" \"\n s += \", \".join(\n map(lambda x: self.generate(x, indent, extra_indent), tnode.var_inits)\n )\n if not tnode.isnested:\n s += \";\\n\"\n\n elif isinstance(tnode, ast.ParamDec):\n s += indent + str(tnode.ty) + \" \" + str(tnode.name)\n\n elif isinstance(tnode, ast.FunDec):\n s += indent + str(tnode.return_type) + \" \" + str(tnode.modifiers)\n s += tnode.name + \"(\"\n s += \", \".join(\n map(lambda x: self.generate(x, indent, extra_indent), tnode.params)\n )\n s += \")\" + self.generate(tnode.body, indent, extra_indent)\n\n elif isinstance(tnode, ast.Pragma):\n s += indent + \"#pragma \" + str(tnode.pstring) + \"\\n\"\n\n elif isinstance(tnode, ast.TransformStmt):\n g.err(\n __name__\n + \": internal error: a transformation statement is never generated as an output\"\n )\n\n else:\n g.err(\n __name__\n + \": internal error: unrecognized type of AST: %s\"\n % tnode.__class__.__name__\n )\n\n return s", "def __startTiling(self, stmt, tile_level, int_vars):\n\n # expression statement\n if isinstance(stmt, ast.ExpStmt):\n return stmt\n\n # compound statement\n elif isinstance(stmt, ast.CompStmt):\n tstmts = []\n for s in stmt.stmts:\n ts = self.__startTiling(s, tile_level, int_vars)\n if isinstance(ts, ast.CompStmt):\n tstmts.extend(ts.stmts)\n else:\n tstmts.append(ts)\n stmt.stmts = tstmts\n return stmt\n\n # if statement\n elif isinstance(stmt, ast.IfStmt):\n stmt.true_stmt = self.__startTiling(stmt.true_stmt, tile_level, int_vars)\n if stmt.false_stmt:\n stmt.false_stmt = self.__startTiling(\n stmt.false_stmt, tile_level, int_vars\n )\n return stmt\n\n # for loop statement\n elif isinstance(stmt, ast.ForStmt):\n\n # apply loop tiling on this loop\n tiling_results = self.__tile(stmt, tile_level, [], [], None, int_vars)\n\n # return the tiled AST\n t_stmts = []\n for is_tiled, stmts in tiling_results:\n if self.use_boundary_tiling and not is_tiled:\n new_tile_level = min(tile_level - 1, self.recursive_tile_level)\n if new_tile_level > 0:\n stmts = [\n self.__startTiling(s, new_tile_level, int_vars)\n for s in stmts\n ]\n t_stmts.extend(stmts)\n tiled_ast = ast.CompStmt(t_stmts)\n\n # return the tiled AST\n return tiled_ast\n\n # unknown statement\n else:\n err(\n \"orio.module.ortil.transformation internal error: unknown type of statement: %s\"\n % stmt.__class__.__name__\n )", "def visit_Node(self, node):\n pass", "def render_statement(statement, bind=None):\n\n if isinstance(statement, Query):\n if bind is None:\n bind = statement.session.get_bind(statement._mapper_zero_or_none())\n\n statement = statement.statement\n\n elif bind is None:\n bind = statement.bind\n\n class Compiler(bind.dialect.statement_compiler):\n\n def visit_bindparam(self, bindparam, *args, **kwargs):\n return self.render_literal_value(bindparam.value, bindparam.type)\n\n def render_literal_value(self, value, type_):\n if isinstance(value, six.integer_types):\n return str(value)\n\n elif isinstance(value, (datetime.date, datetime.datetime)):\n return \"'%s'\" % value\n\n return super(Compiler, self).render_literal_value(value, type_)\n\n return Compiler(bind.dialect, statement).process(statement)", "def gen_for(self, stmt: statements.For) -> None:\n condition_block = self.builder.new_block()\n body_block = self.builder.new_block()\n final_block = self.builder.new_block()\n iterator_block = self.builder.new_block()\n self.break_block_stack.append(final_block)\n self.continue_block_stack.append(iterator_block)\n\n # Initialization:\n if stmt.init:\n if isinstance(stmt.init, declarations.VariableDeclaration):\n self.gen_local_variable(stmt.init)\n else:\n self.gen_expr(stmt.init, rvalue=True)\n self.builder.emit_jump(condition_block)\n\n # Condition:\n self.builder.set_block(condition_block)\n if stmt.condition:\n self.gen_condition(stmt.condition, body_block, final_block)\n else:\n self.builder.emit_jump(body_block)\n\n # Body:\n self.builder.set_block(body_block)\n self.gen_stmt(stmt.body)\n self.builder.emit_jump(iterator_block)\n\n # Iterator part:\n self.builder.set_block(iterator_block)\n if stmt.post:\n self.gen_expr(stmt.post, rvalue=True)\n self.builder.emit_jump(condition_block)\n\n # Continue here:\n self.builder.set_block(final_block)\n self.break_block_stack.pop()\n self.continue_block_stack.pop()", "def gen_grammar_visitor(\n node: ParseTreeNode,\n string: str,\n left_offset: int,\n run_data: ObjectParserRun,\n result: ObjectParserResult\n) -> VisitorReturnType:\n #print(\"visiting\", node, node.rule_name, \" :: \", string)\n if node.rule_name == \"arg_identifier\":\n slice_to_parse = (left_offset, left_offset+len(string))\n arg = run_data.get_arg_by_name(node.value)\n if arg is None:\n raise ValueError(f\"Trying to parse grammar with arg {node.value} but not found\")\n delegation = run_data.left_fill_arg(arg, slice_to_parse)\n parse_return = yield delegation\n if not parse_return.parse_success:\n parse_return = parse_return.add_fail(f\"Stack Message: Fail on arg '{node.value}'\")\n return parse_return, v(parse_return)\n elif node.rule_name == \"str_match\":\n return _visit_str_match(node, string, left_offset, result), v()\n elif node.rule_name == \"sufix\":\n out_return, things_to_accept = yield from _visit_sufix(\n node, string, left_offset, run_data, result)\n return out_return, things_to_accept\n else:\n remaining_string = string\n new_left_offset = left_offset\n acceptables = []\n if isinstance(node, arpeggio.NonTerminal):\n for child in node:\n visitv = yield from gen_grammar_visitor(\n child, remaining_string, new_left_offset, run_data, result)\n parse_return, new_acceptables = visitv\n if not parse_return.parse_success:\n #print(\"FAIL on\", child, string)\n return parse_return, v()\n acceptables.extend(new_acceptables)\n remaining_string = parse_return.remaining_string\n new_left_offset += parse_return.remaining_right_starti\n # TODO (DNGros): Figure out what to put in as what_parsed here\n new_return = ParseDelegationReturnMetadata.create_from_substring(\n None, string, remaining_string, left_offset)\n return new_return, acceptables", "def to_source(node, indent_with=' ' * 4, add_line_information=False):\r\n generator = SourceGenerator(indent_with, add_line_information)\r\n generator.visit(node)\r\n return ''.join(generator.result)", "def visit(self, node):\n name = 'visit_%s' % node.__class__.__name__\n try:\n method = getattr(self, name)\n except AttributeError:\n method = self.default_visit\n method(node)", "def visit(self, node):\n name = 'visit_%s' % node.__class__.__name__\n try:\n method = getattr(self, name)\n except AttributeError:\n method = self.default_visit\n method(node)", "def visit_async_stmt(self, node: Node) -> Iterator[Line]:\n yield from self.line()\n\n children = iter(node.children)\n for child in children:\n yield from self.visit(child)\n\n if child.type == token.ASYNC or child.type == STANDALONE_COMMENT:\n # STANDALONE_COMMENT happens when `# fmt: skip` is applied on the async\n # line.\n break\n\n internal_stmt = next(children)\n if Preview.improved_async_statements_handling in self.mode:\n yield from self.visit(internal_stmt)\n else:\n for child in internal_stmt.children:\n yield from self.visit(child)", "def _analyse_stmt_Assign(self, statement: ast.Assign, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def parseStatements(inputFile):\n lex = Lexor(inputFile)\n while lex.peek() != '':\n parseStmt(lex.next())", "def insert(self, node):\n new_node = self.choose_statement()\n\n if isinstance(new_node, ast.stmt) and hasattr(new_node, 'body'):\n # Inserting `if P: X` as `if P:`\n new_node.body = [node]\n if hasattr(new_node, 'orelse'):\n new_node.orelse = []\n if hasattr(new_node, 'finalbody'):\n new_node.finalbody = []\n # ast.copy_location(new_node, node)\n return new_node\n\n # Only insert before `return`, not after it\n if isinstance(node, ast.Return):\n if isinstance(new_node, ast.Return):\n return new_node\n else:\n return [new_node, node]\n\n return [node, new_node]", "def run(self, statement):\n\n # Remove spaces and EOL\n statement = statement.strip()\n if not statement: # Empty string\n yield (None, None, None, None)\n\n # Split the sql into separate queries and run each one.\n # Unless it's saving a favorite query, in which case we\n # want to save them all together.\n if statement.startswith('\\\\fs'):\n components = [statement]\n\n else:\n components = sqlparse.split(statement)\n\n for sql in components:\n # Remove spaces, eol and semi-colons.\n sql = sql.rstrip(';')\n\n # \\G is treated specially since we have to set the expanded output.\n if sql.endswith('\\\\G'):\n special.set_expanded_output(True)\n sql = sql[:-2].strip()\n try: # Special command\n _logger.debug('Trying a dbspecial command. sql: %r', sql)\n cur = self.conn.cursor()\n for result in special.execute(cur, sql):\n yield result\n except special.CommandNotFound: # Regular SQL\n yield self.execute_normal_sql(sql)", "def generic_visit(self, n):\n self._add_ast_elem(n)\n for c_name, c in n.children():\n self.visit(c)", "def interpret(self):\n tree = self.parser.parse()\n if tree is None:\n return ''\n return self.visit(tree)", "def generic_visit(self, node):\n raise Exception('No visit_{} method'.format(type(node).__name__))", "def _(self, node: ModelReturn):\n val = self.visit(node.value)\n return f\"( return {val} )\"", "def compile_statements(self) -> None:\n while self._get_current_token() != '}':\n if self._get_current_token() in self.STATEMENT_TOKENS:\n getattr(self, 'compile_' + self._get_current_token())()\n else:\n raise CompilationEngineError(f\"{self._get_current_token()} is an expected token at this point\")", "def translate_call_to_sql(self, query, clause, state):\n node1 = clause[0]\n rel = clause[1]\n if rel.labels is None:\n return\n\n # load here so we get the uniquified name registered with the connection:\n self.load()\n old_graph = node1._graph_table\n old_graph_alias = node1._graph_alias\n new_graph = self.get_name()\n # create a new alias (which is fine given we have a unique table name),\n # this will transparently handle qualified graph table names:\n new_graph_alias = state.get_table_aliases(new_graph, new_graph + '_c')[0]\n node1._graph_table = new_graph\n node1._graph_alias = new_graph_alias\n # TO DO: support this in query.py:\n #state.unregister_table_alias(old_graph, old_graph_alias)\n state.register_table_alias(new_graph, new_graph_alias)\n # prevent the generation of a label restriction based on the virtual graph name:\n rel.labels = None\n # now finish translation with standard translator:\n query.pattern_clause_to_sql(clause, new_graph_alias, state)", "def _analyse_stmt_Break(self, statement: ast.Break, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=self._context[_BREAK])", "def __new__(cls, value: object, *pos_args: Any, **kw_args: Any) -> 'Statement':\n stmt = super().__new__(cls, value)\n return stmt", "def PrintStmt(self):\n args = list()\n self.currtok = next(self.tg)\n if self.currtok[1].name == \"LPAREN\":\n self.currtok = next(self.tg)\n arg = self.PrintArg()\n args.append(arg)\n while self.currtok[1].name == \"COMMA\":\n self.currtok = next(self.tg)\n arg = self.PrintArg()\n args.append(arg)\n\n if self.currtok[1].name == \"RPAREN\":\n self.currtok = next(self.tg)\n if self.currtok[1].name == \"SEMI\":\n return printstmtStmt(args)\n raise SLUCSyntaxError(\"ERROR: Missing right semicolon line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing right paren or a comma line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing left paren on line {0}\".format(str(self.currtok[2] - 1)))", "def process(self, statement):\n input_text = statement.text\n\n # Use the result cached by the process method if it exists\n if input_text in self.cache:\n cached_result = self.cache[input_text]\n self.cache = {}\n return cached_result\n\n # Getting the mathematical terms within the input statement\n expression = str(self.simplify_chunks(self.normalize(input_text)))\n\n response = Statement(text=expression)\n\n try:\n response.text += '= ' + str(\n eval(expression, {f: self.functions[f] for f in self.functions})\n )\n\n # Replace '**' with '^' for evaluated exponents\n response.text = response.text.replace('**', '^')\n\n # The confidence is 1 if the expression could be evaluated\n response.confidence = 1\n except:\n response.confidence = 0\n\n return response", "def visit(self, node, params={}):\n if not node: return\n if isinstance(node,list):\n for item in list:\n self.visit(item)\n try:\n if isinstance(node, ast.NumLitExp):\n pass\n\n elif isinstance(node, ast.StringLitExp):\n pass\n\n elif isinstance(node, ast.IdentExp):\n if params.get('in') == 'loop_header':\n if not node.name in list(self.loop_bounds.keys()):\n self.vars.add(node.name)\n\n elif isinstance(node, ast.ArrayRefExp):\n self.visit(node.exp) # array variable\n self.visit(node.sub_exp) # array index\n\n elif isinstance(node, ast.FunCallExp):\n self.visit(node.args)\n\n elif isinstance(node, ast.UnaryExp):\n self.visit(node.exp) # the operand\n\n elif isinstance(node, ast.BinOpExp):\n self.visit(node.lhs,params)\n self.visit(node.rhs,params)\n\n elif isinstance(node, ast.ParenthExp):\n self.visit(node.exp)\n\n elif isinstance(node, ast.Comment):\n pass\n\n elif isinstance(node, ast.ExpStmt):\n self.visit(node.exp)\n\n elif isinstance(node, ast.GotoStmt):\n pass\n\n elif isinstance(node, ast.CompStmt):\n for s in node.stmts:\n self.visit(s)\n\n elif isinstance(node, ast.IfStmt):\n self.visit(node.test)\n self.visit(node.true_stmt)\n self.visit(node.false_stmt)\n\n elif isinstance(node, ast.ForStmt):\n self._nest += 1\n self.loop_bounds[node.init.lhs.name] = (str(node.init.rhs),str(node.test.rhs))\n self.visit(node.init)\n self.visit(node.test,params={'in':'loop_header'})\n self.visit(node.iter,params={'in':'loop_header'})\n self.visit(node.stmt,params={'in':'loop_header'})\n if self._nest > self.maxnest: self.maxnest = self._nest\n self._nest -= 1\n\n\n elif isinstance(node, ast.TransformStmt):\n pass\n\n elif isinstance(node, ast.VarDecl):\n pass\n\n elif isinstance(node, ast.VarDeclInit):\n self.visit(node.init_exp)\n\n elif isinstance(node, ast.Pragma):\n pass\n\n elif isinstance(node, ast.Container):\n self.visit(node.ast)\n\n elif isinstance(node, ast.DeclStmt):\n for decl in node.decls:\n self.visit(decl)\n else:\n err('internal error: unrecognized type of AST: %s' % node.__class__.__name__, self)\n except Exception as e:\n err(\"Exception in node %s: %s\" % (node.__class__, e), self)", "def _analyse_stmt_FunctionDef(\n self, statement: ast.FunctionDef, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def accept(visitor):", "def accept(visitor):", "def _build_statement(self, video, statement, homepage, user=None, user_id=None):\n if re.match(r\"^http(s?):\\/\\/.*\", homepage) is None:\n homepage = f\"http://{homepage}\"\n\n statement = self.build_common_statement_properties(\n statement, homepage, user=user, user_id=user_id\n )\n\n category_id = (\n \"https://w3id.org/xapi/lms\"\n if statement[\"verb\"][\"id\"] == \"http://id.tincanapi.com/verb/downloaded\"\n else \"https://w3id.org/xapi/video\"\n )\n\n statement[\"context\"].update(\n {\"contextActivities\": {\"category\": [{\"id\": category_id}]}}\n )\n\n statement[\"object\"] = {\n \"definition\": {\n \"type\": self._get_activity_type(video),\n \"name\": {self.get_locale(): video.title},\n },\n \"id\": f\"uuid://{video.id}\",\n \"objectType\": \"Activity\",\n }\n\n return statement", "def parse_statements(script):\n # pylint: disable=too-many-branches\n stmt = ''\n quote = None\n for char in script:\n if quote != '--':\n stmt += char\n if quote is None:\n if char == ';':\n yield stmt.strip()\n stmt = ''\n elif char == \"'\":\n quote = \"'\"\n elif char == '\"':\n quote = '\"'\n elif char == '$':\n quote = '$'\n elif char == '-':\n quote = '-'\n elif quote in ('\"', \"'\"):\n if quote == char:\n quote = None\n elif quote == '-':\n if char == '-':\n quote = '--'\n stmt = stmt[:-2]\n else:\n quote = None\n elif quote == '--':\n if char == '\\n':\n quote = None\n elif quote.startswith('$'):\n if quote != '$' and quote.endswith('$'):\n if stmt.endswith(quote):\n quote = None\n else:\n quote += char\n stmt = stmt.strip()\n if stmt:\n yield stmt", "def complete_statement(self, line):\n if not line or (not pyparsing.Or(self.commentGrammars).setParseAction(lambda x: '').transformString(line)):\n raise EmptyStatement()\n statement = self.parsed(line)\n while statement.parsed.multilineCommand and (statement.parsed.terminator == ''):\n statement = '%s\\n%s' % (statement.parsed.raw,\n self.pseudo_raw_input(self.continuation_prompt))\n statement = self.parsed(statement)\n if not statement.parsed.command:\n raise EmptyStatement()\n return statement", "def doctest_DKBCCCsvStatementParser():", "def gen_case(self, stmt: statements.Case) -> None:\n block = self.builder.new_block()\n assert self.switch_options is not None\n value = self.context.eval_expr(stmt.value)\n if value in self.switch_options:\n self.error(\"Case defined multiple times\", stmt.location)\n self.switch_options[value] = block\n self.builder.emit_jump(block) # fall through\n self.builder.set_block(block)\n self.gen_stmt(stmt.statement)", "def _analyse_stmt_Delete(self, statement: ast.Delete, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def split_sub_statement(stream, node_types):\n \n if isinstance(stream, Node):\n stream = stream.get_inner_body()\n \n current_node = None\n \n try:\n while True:\n \n token = next(stream)\n #print('current token ', token)\n \n matched = False\n \n for node_type in node_types:\n match = Node.match_begin(node_type, token, stream)\n if match:\n \n matched = True\n previous_node = current_node\n \n # build current node\n current_node = node_type()\n current_node.children += match\n \n if previous_node:\n yield previous_node\n \n # stop looking for a match \n break\n \n # non matching token \n if not matched:\n \n if current_node:\n current_node.children.append(token)\n else:\n yield token\n except:\n pass\n\n if current_node: \n yield current_node", "def prepare(self, connection, stmt):\n return Statement(connection, stmt)", "def prepare(self, connection, stmt):\n return Statement(connection, stmt)", "def generic_visit(self, node):\n \n node.map_subcalls(self.visit)", "def _complete_statement(self, line):\n if not line or (not pyparsing.Or(self.commentGrammars).setParseAction(lambda x: '').transformString(line)):\n raise EmptyStatement()\n statement = self.parser_manager.parsed(line)\n while statement.parsed.multilineCommand and (statement.parsed.terminator == ''):\n statement = '%s\\n%s' % (statement.parsed.raw,\n self.pseudo_raw_input(self.continuation_prompt))\n statement = self.parser_manager.parsed(statement)\n if not statement.parsed.command:\n raise EmptyStatement()\n return statement", "def process_expression_ast(stmt_ast: ast.Expr, stmt_ast_parent_block):\n # first, add a reference from stmt_ast to its parent block\n stmt_ast.parent_block = stmt_ast_parent_block\n logger.log.info(f\"Instantiating a symbolic state for AST instance stmt_ast = {stmt_ast}\")\n # initialise empty list of symbols\n all_symbols: list = []\n # walk the ast to find the symbols used\n for walked_ast in ast.walk(stmt_ast):\n # extract information according to type\n if type(walked_ast) is ast.Name:\n all_symbols.append(walked_ast.id)\n \n # instantiate symbolic state\n logger.log.info(f\"Instantiating new StatementSymbolicState instance with symbols {all_symbols}\")\n symbolic_state: SymbolicState = StatementSymbolicState(all_symbols, stmt_ast)\n return symbolic_state", "def visit_Declaration(self, node):\n name = self.name_gen.next()\n extend_ops = self.extend_ops\n self.push_name(name)\n base_code = compile(node.base.py_ast, self.filename, mode='eval')\n extend_ops([\n # f_globals = globals()\n (LOAD_GLOBAL, 'globals'),\n (CALL_FUNCTION, 0x0000),\n (STORE_FAST, 'f_globals'),\n\n # eval_ = eval\n (LOAD_GLOBAL, 'eval'),\n (STORE_FAST, 'eval_'),\n\n # foo_cls = eval('Window', toolkit, f_globals)\n # foo = foo_cls.__enaml_call__(identifiers, toolkit)\n (LOAD_FAST, 'eval_'),\n (LOAD_CONST, base_code),\n (LOAD_FAST, 'toolkit'),\n (LOAD_FAST, 'f_globals'),\n (CALL_FUNCTION, 0x0003),\n (LOAD_ATTR, '__enaml_call__'),\n (LOAD_FAST, 'identifiers'),\n (LOAD_FAST, 'toolkit'),\n (CALL_FUNCTION, 0x0002),\n (STORE_FAST, name),\n ])\n\n if node.identifier:\n extend_ops([\n # identifiers['foo'] = foo\n (LOAD_FAST, name),\n (LOAD_FAST, 'identifiers'),\n (LOAD_CONST, node.identifier),\n (STORE_SUBSCR, None),\n ])\n \n visit = self.visit\n for item in node.body:\n visit(item)\n \n extend_ops([\n # return foo\n (LOAD_FAST, name),\n (RETURN_VALUE, None),\n ])\n\n self.pop_name()", "def visit_Compound(self, node):\n for statement in node.statements:\n self.visit(statement)", "def compile_statements(self):\r\n tok_type = self.tokenizer.token_type()\r\n while tok_type == JackTokenizer.KEYWORD_T:\r\n key = self.tokenizer.key_word()\r\n if key == \"let\":\r\n self.compile_let()\r\n elif key == \"do\":\r\n self.compile_do()\r\n elif key == \"while\":\r\n self.compile_while()\r\n elif key == \"return\":\r\n self.compile_return()\r\n else:\r\n self.compile_if()\r\n tok_type = self.tokenizer.token_type()\r\n continue\r\n self.tokenizer.advance() # ignore ';' symbol\r\n tok_type = self.tokenizer.token_type()", "def generate_return(self):\n if self._delete:\n statement = self._generate_delete_return()\n elif self._cache:\n statement = self._generate_cache_return()\n elif self._set_properties:\n statement = self._generate_set_properties_return()\n elif self._set_labels:\n statement = self._generate_set_labels_return()\n elif self._remove_labels:\n statement = self._generate_remove_labels_return()\n elif self._aggregate:\n statement = self._generate_aggregate_return()\n else:\n statement = self._generate_distinct_return()\n return statement", "def _process_expr(self, node: ast.Expr) -> None:\n if isinstance(node.value, ast.Call):\n self._process_call(node.value)\n elif isinstance(node.value, ast.Constant):\n self._process_constant(node.value)\n else:\n self.visit(node)", "def _(self, node: BinaryOp):\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n return f\"( {node.op} {left} {right} )\"", "def compile_statements(self):\r\n while self.__tokenizer.token_type() == TYPES_DIC[\"KEYWORD\"]:\r\n if self.__tokenizer.keyword() == TYPES_DIC[\"LET\"]:\r\n self.compile_let()\r\n elif self.__tokenizer.keyword() == TYPES_DIC[\"DO\"]:\r\n self.compile_do()\r\n elif self.__tokenizer.keyword() == TYPES_DIC[\"WHILE\"]:\r\n self.compile_while()\r\n elif self.__tokenizer.keyword() == TYPES_DIC[\"RETURN\"]:\r\n self.compile_return()\r\n elif self.__tokenizer.keyword() == TYPES_DIC[\"IF\"]:\r\n self.compile_if()" ]
[ "0.6982189", "0.6952451", "0.6714583", "0.6317924", "0.6317603", "0.6093038", "0.6031703", "0.6027475", "0.5918272", "0.5912852", "0.58984697", "0.58264", "0.58131385", "0.5812323", "0.5779691", "0.5761809", "0.57449067", "0.57259613", "0.567997", "0.5676137", "0.5661649", "0.56575805", "0.55972964", "0.55552006", "0.5507355", "0.5478496", "0.5473227", "0.5473227", "0.547289", "0.547289", "0.5464989", "0.54423773", "0.5436206", "0.5414224", "0.54126483", "0.53991157", "0.5353603", "0.53523475", "0.5348914", "0.53417736", "0.5341412", "0.53103703", "0.5305896", "0.52789736", "0.5241544", "0.5228468", "0.5209972", "0.5194691", "0.5192071", "0.5189394", "0.51890063", "0.5160249", "0.5151477", "0.5146846", "0.514615", "0.5145797", "0.51443535", "0.51416475", "0.51052463", "0.5096944", "0.50954896", "0.50954896", "0.5082283", "0.50690764", "0.50631666", "0.5062218", "0.50543624", "0.5047421", "0.504207", "0.5041155", "0.5025315", "0.50223845", "0.50074923", "0.50022614", "0.5001381", "0.49956387", "0.4993638", "0.49860743", "0.49840385", "0.4967963", "0.4967963", "0.49614072", "0.494849", "0.4943136", "0.4938408", "0.49302393", "0.4920642", "0.49181935", "0.49176317", "0.49176317", "0.49142286", "0.49058932", "0.4905169", "0.4902199", "0.48919344", "0.4870879", "0.48230034", "0.48139858", "0.4811197", "0.48105305" ]
0.64626193
3
Test the fast upate code against a for loop.
def test_update(): learner = optlearner.VolatilityLearner() for reward in [0, 1]: slow_pIk = slow_update(learner, reward) learner._update(reward) yield npt.assert_array_equal, slow_pIk, learner.pIk learner.reset()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testHasForLoop(self):\n no_foreach = build_code(['x=1'], [], ['x=3'], concise=False)\n foreach = build_code(['x=1'], ['x=2'], ['x=3'], concise=False)\n self.assertNotIn('for', no_foreach)\n self.assertIn('for', foreach)", "def test_run_loop_success(self):\n found = False\n pyint = Interpreter(limit=15)\n try:\n pyint.run(code=BF_CODE_LOOP_TWICE)\n except SystemExit: \n found = True\n self.assertFalse(found)", "def _do_iteration(self):\n return True", "def _sideffect_test_23(self, arg):\n if self.iter < 3:\n self.iter += 1\n return False\n else:\n return True", "def check_all():\r\n i = 100000\r\n while i <= 999996:\r\n if check(i):\r\n print(i)\r\n i = i + 1", "def multiple_eval_for_loops_v2():", "def test_40_for(self):\n\t\tinput = \"\"\"function foo():integer; var a:integer; begin\n\t\tfor a:=false to 1 do begin end\n\t\treturn 1;\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: For(Id(a)BooleanLiteral(False),IntLiteral(1),True,[])\"\n\t\tself.assertTrue(TestChecker.test(input,expect,440))", "def multiple_eval_for_loops_v1():", "def set_test_loop(self , _iter):\n\t\tself.__test=_iter", "def algorithm_loop(self):", "def loop():\n global loop_idx\n sys.stdout.write('loop index %d/%d\\r\\n' % (loop_idx, _LOOPS))\n time.sleep(0.5)\n loop_idx += 1\n return loop_idx > _LOOPS", "def performance():\n\tn = 1024\n\twhile n < 5000000: \n\t\tsorted = range(n)\n\t\tnow = time()\n\n\t\t# Code whose performance is to be evalutated\n\t\tbs_contains(sorted, -1)\n\n\t\tdone = time()\n\n\t\tprint n, (done-now)*10000\n\t\tn *= 2", "def _loop_raw(raw):\n raise NotImplemented", "def test_39_for(self):\n\t\tinput = \"\"\"function foo():integer; var a:integer; begin\n\t\tfor a:=1+1.5 to -2 do begin end\n\t\treturn 1;\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: For(Id(a)BinaryOp(+,IntLiteral(1),FloatLiteral(1.5)),UnaryOp(-,IntLiteral(2)),True,[])\"\n\t\tself.assertTrue(TestChecker.test(input,expect,439))", "def main():\n for i in range(5):\n check_row()\n put_beeeper_if_not()\n go_next_row()", "def sim_alternating():\n catches = 0\n for _ in range(100000):\n j = np.random.uniform()*1000\n # j = np.random.exponential(500)\n t_i = 0\n i = 0\n while t_i < j+100:\n if i % 2 == 0:\n t_i += 10\n else:\n t_i += 20\n if j < t_i and t_i < j+1:\n catches += 1\n i += 1\n print(catches/100000)", "def mystery1(input_val):\n global counter\n for index in range(input_val):\n for dummy_index in range(5):\n counter += 1", "def test_f(self):\n ans = [0, 0, 1, 7, 2, 5, 8, 16, 3, 19, 6]\n for i in range(1, 11):\n print(i)\n self.assertEqual(f(i), ans[i])", "def test_37_for(self):\n\t\tinput = \"\"\"function foo():integer; var a:real; begin\n\t\tfor a:=1 to 2 do\n\t\twith a:integer;b:boolean; do begin b:=true; break; end\n\t\treturn 1;\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: For(Id(a)IntLiteral(1),IntLiteral(2),True,[With([VarDecl(Id(a),IntType),VarDecl(Id(b),BoolType)],[AssignStmt(Id(b),BooleanLiteral(True)),Break])])\"\n\t\tself.assertTrue(TestChecker.test(input,expect,437))", "def repeated_iteration(self) -> global___Statement.Iteration.RepeatedIteration:", "def uniqueCheckLoop(aList):\r\n n = len(aList)\r\n for i in range(n-1):\r\n for j in range(i+1, n):\r\n if aList[i] == aList[j]:\r\n return True\r\n return False", "def test_template_forloop(self):\n print \"Running: %s - %s\" % (self.id(), self.shortDescription())\n # compare sequence items\n for i in range(0, 3):\n input_item = g.config['templates']['sequence'][i]\n output_item = self.output_config['out_templates']['sequence'][i]\n\n self.assertEqual(input_item, output_item,\n 'sequence items (%i) do not match' % i)", "def test_spin_loop(self):\n\n # Spin data.\n select = [1, 0]\n\n # Loop over the spins.\n i = 0\n for spin in mol_res_spin.spin_loop('@N5'):\n # Test the selection.\n self.assertEqual(spin.select, select[i])\n\n # Test the spin names.\n self.assertEqual(spin.name, 'N5')\n\n # Increment i.\n i = i + 1\n\n # Test loop length.\n self.assertEqual(i, 2)", "def method1(automaton, level):\r\n\r\n old_bad_twin = automaton\r\n i = 1\r\n while i <= level:\r\n new_bad_twin = generate_bad_twin(old_bad_twin, i)\r\n good_twin = generate_good_twin(new_bad_twin)\r\n synchronized, ambiguous_transitions = synchronize_1(new_bad_twin, good_twin)\r\n for src_name, dst_name in ambiguous_transitions:\r\n states = synchronized.get_states()\r\n if find_loops(states[dst_name], {src_name}):\r\n return i - 1\r\n old_bad_twin = new_bad_twin\r\n i += 1\r\n return True", "def test_fleur_relax_continue_converged(self, run_with_cache, mock_code_factory):\n assert False", "def test_ascending_smoke(fastonly):\n word_length = 5\n while word_length < 65:\n run_ascending_test(word_length, 3, 1000 if fastonly else 100000)\n word_length += 1", "def mbieLoop (self) :\n self.iterCnt = 0\n while self.iterCnt < 5000:\n s = self.mdp.s0\n for h in range(self.H) :\n self.QUpper = QBoundsSolver(self.mdp, self.PHat, self.QUpper, self.Ntotal, 0.1, True, self.stop)\n a = np.argmax(self.QUpper[s])\n s_, self.R[s,a] = self.mdp.step(s, a)\n self.updateVisitStatistics(s, a, s_)\n s = s_\n\n if self.iterCnt % 10 == 0: \n print(self.iterCnt)\n print(self.QUpper)\n\n self.iterCnt += 1", "def test_solution_usage(self, test_data):\n for finput in test_data[\"EKFSLAM.EKFSLAM.Fu\"][:1]:\n params = finput\n\n solution.used[\"EKFSLAM.EKFSLAM.Fu\"] = False\n\n EKFSLAM.EKFSLAM.Fu(**params)\n\n assert not solution.used[\"EKFSLAM.EKFSLAM.Fu\"], \"The function uses the solution\"", "def skip_test(n):\n return k > 0 and magic * n * k**0.5 >= t4_ref", "def test_stress(self):\n primorial100 = 4711930799906184953162487834760260422020574773409675520188634839616415335845034221205289256705544681972439104097777157991804380284218315038719444943990492579030720635990538452312528339864352999310398481791730017201031090\n for i in range(10000):\n self.assertEqual(primorial(100), primorial100)", "def _used_after(t_, u_, i_):\n is_retained_snapshot = t_ < T - 1 and s[t_ + 1, u_] == 1\n is_used_by_successor = not all([r[t_, v] == 0 or v <= i_ for v in g.successors(u_)])\n return is_retained_snapshot or is_used_by_successor", "def trial(test):\n clear, index, byte = test\n assert len(clear) == blocksize\n\n # Handle general case\n tmp = rand(index)\n pad = padding(tmp, blocksize)\n tmp = xor(tmp + pad, clear)\n tmp[index] = byte\n assert len(tmp) == blocksize\n if not query(tmp + block):\n return False\n\n # Handle cases like above\n if index == 0:\n return True\n tmp[index - 1] ^= 0xff\n return query(tmp + block)", "def test_correct_value(self):\n self.assertTrue(py_function(6) == 36)\n self.assertFalse(py_function(5) == 9)\n for i in range(0, 10):\n self.assertTrue(py_function(i) == i**2 if i != 0 else 100)", "def test_for_statement():\n r = convert_code(\n \"{foreach $foo as $bar}content{/foreach}\")\n assert r == \"{% for bar in foo %}content{% endfor %}\"", "def run_tests(test_count=1000, buyer_count=10):\n\n found_error = False\n\n for i in range(test_count):\n\n bp, sp, bw = get_preferences(buyer_count)\n matches = get_matches(bp, sp, bw)\n\n if not check_stability(bp, sp, matches):\n print('ERROR!!!')\n found_error = True\n\n if not found_error:\n print('Executed {} tests without errors'.format(test_count))", "def mystery3(input_val):\n global counter\n for index in range(input_val):\n for dummy_index in range(int(1.1 ** index)):\n counter += 1", "def method2(automaton, level):\r\n\r\n old_bad_twin = automaton\r\n i = 1\r\n while i <= level:\r\n new_bad_twin = generate_bad_twin(old_bad_twin, i)\r\n c2 = condition_C2(new_bad_twin)\r\n c3 = condition_C3(new_bad_twin)\r\n if not(c2 or c3):\r\n good_twin = generate_good_twin(new_bad_twin)\r\n synchronized, ambiguous_transitions = synchronize_1(new_bad_twin, good_twin)\r\n c1 = condition_C1(ambiguous_transitions)\r\n if not c1:\r\n for src_name, dst_name in ambiguous_transitions:\r\n states = synchronized.get_states()\r\n if find_loops(states[dst_name], {src_name}):\r\n return i - 1\r\n old_bad_twin = new_bad_twin\r\n i += 1\r\n return True", "def test_random_smoke(fastonly):\n word_length = 5\n while word_length < 65:\n run_random_test(word_length, 3, 1000 if fastonly else 100000, word_length)\n word_length += 1", "def loop_run(self, loops):\n self.loop_seek(self.num_loops + loops)", "def loop(func, n):\n for i in range(n):\n func()", "def optimize(self, ngen):\n res = 0\n for res in self(ngen):\n pass\n return res", "def test_old_for_statement():\n r = convert_code(\n \"{foreach item=bar from=foo}content{/foreach}\")\n assert r == \"{% for bar in foo %}content{% endfor %}\"", "def verifyFasta(head,seq,pred):\n\treturn True", "def test_skip_next_if_vx_equals_kk(self, cpu):\n for x in range(0x0, 0xF):\n for v in range(0x0, 0xFF):\n cpu.V_register[x] = v\n for kk in range(0x0, 0xFF):\n cpu.opcode = 0x3000 | (x << 8) | kk\n for pc in [cpu.memory_start, cpu.memory_size - 4]:\n cpu.program_counter = pc\n cpu.skip_next_if_vx_equals_kk()\n if v == kk:\n assert(cpu.program_counter == pc + 2)\n else:\n assert(cpu.program_counter == pc)", "def _analyse_stmt_AsyncFor(\n self, statement: ast.AsyncFor, *, next: CFNode\n ) -> CFNode:\n return self._analyse_loop(statement, next=next)", "def test_it(self):\n self.n += 1\n if self.n >= 5:\n self.fail(\"eventually failing\")", "def testIntcodeProgram():\n\n testData = [\n {\n \"input\": [1, 0, 0, 0, 99],\n \"output\": [2, 0, 0, 0, 99]\n },\n {\n \"input\": [2, 3, 0, 3, 99],\n \"output\": [2, 3, 0, 6, 99]\n },\n {\n \"input\": [2, 4, 4, 5, 99, 0],\n \"output\": [2, 4, 4, 5, 99, 9801]\n },\n {\n \"input\": [1, 1, 1, 4, 99, 5, 6, 0, 99],\n \"output\": [30, 1, 1, 4, 2, 5, 6, 0, 99]\n },\n ]\n\n overallSuccess = True\n\n for test in testData:\n input = test['input']\n expectedResult = test['output']\n\n result = runIntcode(input.copy())\n\n if result == expectedResult:\n print (\"Testing\", input, \"... ok\")\n else:\n print (\"Testing\", input, \"... fail, got \", result)\n overallSuccess = False\n\n return overallSuccess", "def test(all=False):\n\n # Do the import internally, so that this function doesn't increase total\n # import time\n from iptest import run_iptestall\n run_iptestall(inc_slow=all)", "def main():\r\n _evaluative_test(5)\r\n _fuzz_test(1)\r\n _fuzz_test(1, 512)\r\n _fuzz_test(1, 1512)\r\n _fuzz_test(1000)\r\n _fuzz_test(1000, 512)\r\n _fuzz_test(1000, 4077)", "def test_analog_in_out_loop(self):\n for v in range(0, 6, 1):\n self.l.output(ao0=v, ao1=v)\n r = self.l.input(channels=(8,8,9,9), gains=(1,4,1,4))[0]\n for i in r:\n self.assertTrue(abs(v-i) < .1,\n \"measured %g for %g\" % (i, v))", "def factorial_loop(n):\n\n pass # @todo -fix this", "def preLoopFunctions(self):\n\t\treturn", "def vectorized(self):\n return False", "def test_u_statistic(self):\n for seed in range(5):\n\n random_state = np.random.RandomState(seed)\n\n for i in range(4, self.test_max_size + 1):\n arr1 = random_state.rand(i, 1)\n arr2 = random_state.rand(i, 1)\n\n u_stat = dcor_internals._u_distance_correlation_sqr_naive(\n arr1, arr2)\n u_stat_fast = dcor_internals._u_distance_correlation_sqr_fast(\n arr1, arr2)\n\n self.assertAlmostEqual(u_stat, u_stat_fast)", "def check_pile(self, iteration):\n\n for r in range(0, self.number_radial, 1):\n for theta in range(0, self.number_angles, 1):\n\n if self.array[r][theta] < self.max_peak:\n self.array[r][theta] = self.array[r][theta]\n\n else:\n if self.force:\n self.topple_sumulate_centrifugal_force(r, theta, iteration)\n else:\n self.topple(r, theta, iteration)", "def quick_run(node_list, seeds):\n all_nodes = node_list.copy()\n for node in all_nodes:\n node.heal()\n for seed_num in seeds:\n all_nodes[seed_num].infect()\n return sum([1 if node.infected else 0 for node in all_nodes])", "def __test_set(fill, memap):\n x = datetime.now()\n for i in range(fill):\n memap.boolean_set((\"yumm\" + str(i)), i)\n y = datetime.now()\n sec = (y - x).total_seconds()\n sec_per_set = sec / fill\n micsec = sec_per_set * 1000000\n print(\"It took {} microseconds to set values set in {} seconds\".format(fill, micsec))\n return micsec", "def judge(genA: typing.Iterator[int], genB: typing.Iterator[int], steps: int) -> int:\n res = 0\n for na, nb in it.islice(zip(genA, genB), steps):\n la, lb = lower16(na), lower16(nb)\n if la == lb:\n res += 1\n return res", "def testCounting(self):\n \n clk = Signal(0)\n clock_gen = ClkDriver(clk, period=4)\n \n for i in range(1, 6):\n #print \"Testing\", i, \"bits\"\n out = Signal(intbv(0)[i:])\n prev_out = Signal(intbv(2**i - 1)[i:])\n counter = Counter(out, clk, Signal(1))\n \n # make sure it increments and wraps at modulo 2^n\n @always(clk.posedge)\n def test():\n #print out, prev_out\n self.assertEqual(int(out), int((prev_out + 1) % 2**(len(prev_out))))\n prev_out.next = out\n \n sim = Simulation(counter, clock_gen, test)\n sim.run(12 * 2**i, quiet=1)", "def _optimized(self):\n return False", "def bookkeep(self) :\n\t\tself.loopiter += 1", "def test_rand_func2(self):\n for i in range(0, 100000):\n num = random.randint(0, 32535143990)\n func2_comp(num)", "def loop(self):\n pass", "def quick_test():\n if PERIOD < 2:\n return False\n if SIZE % PERIOD != 0:\n return False\n return True", "def epidemic_finish(states, iteration):\n return np.sum(states) == 0 and iteration > 10", "def test_skip_next_if_vx_not_equals_kk(self, cpu):\n for x in range(0x0, 0xF):\n for v in range(0x0, 0xFF):\n cpu.V_register[x] = v\n for kk in range(0x0, 0xFF):\n cpu.opcode = 0x4000 | (x << 8) | kk\n for pc in [cpu.memory_start, cpu.memory_size - 4]:\n cpu.program_counter = pc\n cpu.skip_next_if_vx_not_equals_kk()\n if v != kk:\n assert(cpu.program_counter == pc + 2)\n else:\n assert(cpu.program_counter == pc)", "def fuzz():\n if FUZZ:\n time.sleep(random.random())", "def test_isLucky():\n\n # Test a big range of numbers.\n for x in range(-1000, 1000):\n\n # The value of 'x' is lucky number seven.\n if x == 7:\n assert isLucky(x)\n # We're not lucky.\n else:\n assert not isLucky(x)", "def test_unsized(self):\n cache = FIFOCache()\n for i in range(500):\n cache[i] = i\n for i in range(500):\n assert i in cache\n assert cache[i] == i", "def corrected_clump_tester(clump):\n tester = True\n for block in clump:\n if len(block) >= 3: # Fixed block!\n tester = False\n break\n return tester", "async def checkNewLoop(self):\n pass", "def _enable_scan_single_bytecode(code, name):\n bc = bytecode.Bytecode.from_code(code)\n Instr = bytecode.Instr\n\n # Updates LOAD_GLOBAL to LOAD_FAST when arg is name\n for instr in bc:\n if isinstance(instr, Instr) \\\n and instr.name == \"LOAD_GLOBAL\" and instr.arg == name:\n instr.set(\"LOAD_FAST\", name)\n\n # Some needed information from the first/main FOR_ITER and the heading\n # \"filter\" part of the generator expression or list/set comprehension\n for_idx = next(idx for idx, instr in enumerate(bc)\n if getattr(instr, \"name\", None) == \"FOR_ITER\")\n for_instr = bc[for_idx]\n begin_label_idx = for_idx - 1\n try:\n filter_last_idx = last(idx for idx, instr in enumerate(bc)\n if isinstance(instr, Instr)\n and instr.is_cond_jump()\n and instr.arg == begin_label_idx)\n except StopIteration:\n filter_last_idx = for_idx\n\n # Adds the block before the loop (i.e., first label) to append/add/yield\n # the first input directly from FOR_ITER and save the first \"prev\"\n # accordingly\n heading_instructions = [(\"DUP_TOP\",),\n (\"STORE_FAST\", name)] + {\n \"<listcomp>\": [(\"LIST_APPEND\", 2)],\n \"<setcomp>\": [(\"SET_ADD\", 2)],\n \"<genexpr>\": [(\"YIELD_VALUE\",),\n (\"POP_TOP\",)]\n }[bc.name]\n bc[begin_label_idx:begin_label_idx] = (\n [instr.copy() for instr in bc[for_idx:filter_last_idx + 1]] +\n [Instr(*args) for args in heading_instructions]\n )\n\n # Adds ending block that stores the result to prev before a new iteration\n loop_instructions = [\"SET_ADD\", \"LIST_APPEND\", \"YIELD_VALUE\"]\n ending_idx = next(-idx for idx, instr in enumerate(reversed(bc), 1)\n if isinstance(instr, Instr)\n and instr.name in loop_instructions)\n ending_instructions = [(\"DUP_TOP\",),\n (\"STORE_FAST\", name)]\n bc[ending_idx:ending_idx] = \\\n [Instr(*args) for args in ending_instructions]\n\n return bc.to_code()", "def testfunction(expr,n):\n \n if expr == g_plus: init, expr = R1d.SpinUp, g_plus\n elif expr == g_minus: init, expr = R1d.SpinDown, g_minus\n else: return \"error\"\n \n a = McLaurin(expr, n) \n \n bool_list = []\n for n in range(5):\n for i in range(-n,n+1):\n bool_list.append(Coef(a,n,i) == R1d.a(n,i, init))\n if bool_list[-1] == False:\n print(\"Step: \", n, \" pos: \", i)\n return all(bool_list)", "def mystery2(input_val):\n global counter\n for index in range(input_val):\n for dummy_index in range(index / 2, index):\n counter += 1", "def test_full_house_flush_ind(self):", "def _optimise(self):\n pass", "def vectorized_loops(self, data):\n\n # TODO: finish this.\n return np.add(np.multiply(data,data), data)", "def test_for_loop(self, modes, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"for int m in {}\\n\\tMeasureFock() | m\".format(modes)\n )\n assert np.all(\n bb._forvar[\"m\"] == np.array(modes)\n )\n assert bb.operations == [\n {'op': 'MeasureFock', 'args': [], 'kwargs': {}, 'modes': [modes[0]]},\n {'op': 'MeasureFock', 'args': [], 'kwargs': {}, 'modes': [modes[1]]},\n {'op': 'MeasureFock', 'args': [], 'kwargs': {}, 'modes': [modes[2]]}\n ]", "def test_add(self):\n\n for i in range(1, 200 + 1):\n\n for j in range(1, 200 + 1):\n\n for k in range(1, 200 + 1):\n\n value = i + j + k\n assert value == add(i, j, k)", "def test_foo(self):\n self.ran = True\n 1 / 0", "def test_solution_usage(self, test_data):\n for finput in test_data[\"EKFSLAM.EKFSLAM.f\"][:1]:\n params = finput\n\n solution.used[\"EKFSLAM.EKFSLAM.f\"] = False\n\n EKFSLAM.EKFSLAM.f(**params)\n\n assert not solution.used[\"EKFSLAM.EKFSLAM.f\"], \"The function uses the solution\"", "def test_unsized(self):\n cache = LRUCache()\n for i in range(500):\n cache[i] = i\n for i in range(500):\n assert i in cache\n assert cache[i] == i", "def postLoopFunctions(self):\n\t\treturn", "def oneIteration(self):\n\t\traise NotImplementedError", "def eliminating_loop_example():\n\n totals_comp = [sum(row) for row in poke_stats]\n\n return(totals_comp)", "def quick_check(self):\n #loop three times and moce the servo \n for ang in range(self.MIDPOINT - 115, self.MIDPOINT+116, 115):\n self.servo(ang)\n time.sleep(.05)\n if self.read_distance() < self.SAFE_DISTANCE:\n return False\n #if the three-part check didn't freak out\n return True", "def test_run_loop_exc(self):\n with self.assertRaises(ExecutionException):\n pyint = Interpreter(limit=1)\n pyint.run(code=BF_CODE_LOOP_TWICE)", "def every_n_iters(self, runner: Runner, n: int):\n if runner.iter < self.start_iter:\n return True\n return (runner.iter + 1 - self.start_iter) % n == 0 if n > 0 else False", "def test_remainder(self):\n alp = list(range(5))\n targets = generate_targets(alp, 12)\n\n counts = Counter(targets)\n for item in alp:\n self.assertGreaterEqual(counts[item], 2)\n self.assertLessEqual(counts[item], 3)", "def _fp_evaluate(sequence, iteration, tolerance):\n return np.abs(sequence[iteration] - sequence[iteration - 1]) < tolerance", "def slow(newETM): #Status: Done, not tested\r\n pass", "def test_stress_change_trigger_carefully(self):\n for _i in range(config.nstress):\n self.test_change_trigger_carefully()", "def faster():\n try:\n ttsEng.faster()\n except Exception, e:\n logging.error(e)", "def verify(self, k, code, counter = -1, window=30, allowed_steps=2):\n # if counter == -1:\n # verifycode = self.hotp(k, counter)\n # else:\n for i in range(0, allowed_steps + 1):\n c = hex(int((time.time() - i * window) // window))[2:]\n while len(c) < 16:\n c = \"0\" + c\n\n verifycode = self.totp(k, c, window=window)\n if code == verifycode:\n return True\n return False", "async def handle(self, iteration):\n if not (self.ai.floating_buildings_bm and self.ai.supply_used >= 199):\n for queen in self.queens.idle:\n if self.enemies.closer_than(10, queen.position):\n self.ai.add_action(queen.attack(self.enemies.closest_to(queen.position)))\n continue\n selected = self.hatchery.closest_to(queen.position)\n if queen.energy >= 25 and not selected.has_buff(QUEENSPAWNLARVATIMER):\n self.ai.add_action(queen(EFFECT_INJECTLARVA, selected))\n continue\n elif queen.energy >= 25:\n await self.ai.place_tumor(queen)\n\n for hatch in self.hatchery.ready.noqueue:\n if not self.queens.closer_than(4, hatch):\n for queen in self.queens.idle:\n if not self.ai.townhalls.closer_than(4, queen):\n self.ai.add_action(queen.move(hatch.position))\n break\n\n return True", "def test_burst_loop(self):\n chans, gains, scans, rate = (10,10,10,10), (1,2,4,5), 1024, 2048\n v = [v[0] for v in self.l.burst_sync(\n channels=chans, gains=gains,\n num_scans=scans, rate=rate)]\n for vi in v:\n for r in vi:\n self.assertTrue(abs(r-2.5) < .1,\n \"%s should be cal, 2.5v\" % vi[0])", "def main():\n for i in range(4):\n fix_tower()\n if front_is_clear():\n move_to_next()", "def dance(self):\n if not self.safe_to_dance():\n return False #shutdown\n for x in range(4): \n self.shuffle()\n self.skipp()\n self.spin_dizzy()\n self.for_back()\n self.break_neck()\n self.swiggly()\n self.break_neck()\n self.backward_shimmey()", "def _cal_core_loop(tik_instance, num_data_one_loop, core_loop, ub_ori):\n align_loop = tik_instance.Scalar(\"uint64\")\n align_loop.set_as((ub_ori + num_data_one_loop - 1) // num_data_one_loop)\n with tik_instance.if_scope((align_loop - 1) * core_loop *\n num_data_one_loop >= ub_ori):\n align_loop.set_as(align_loop - 1)\n\n remainder = tik_instance.Scalar(\"uint64\")\n remainder.set_as(core_loop % align_loop)\n with tik_instance.if_scope(remainder == 0):\n remainder.set_as(align_loop)\n\n return align_loop, remainder", "def is_not_used(code):\n return 0 <= code <= 999" ]
[ "0.627312", "0.62058866", "0.61405146", "0.6024515", "0.5982735", "0.59628016", "0.5885802", "0.58731663", "0.57872266", "0.5761439", "0.5710765", "0.56505996", "0.5533442", "0.5478586", "0.54322", "0.5400814", "0.53748524", "0.5355801", "0.52825946", "0.5275815", "0.5266114", "0.5252527", "0.5249735", "0.5241025", "0.52404034", "0.5225804", "0.52198577", "0.5200932", "0.5196754", "0.51901054", "0.5159903", "0.5158452", "0.51463413", "0.5141352", "0.51397985", "0.5104258", "0.50957525", "0.5078117", "0.50774413", "0.5058233", "0.50556356", "0.5044632", "0.5032771", "0.50292546", "0.50228333", "0.5019328", "0.50109553", "0.5009949", "0.5008233", "0.49957818", "0.49943432", "0.49930772", "0.49867314", "0.49816936", "0.49717334", "0.49717063", "0.49710786", "0.49685392", "0.49633935", "0.49585044", "0.49470556", "0.49468112", "0.49453875", "0.4943481", "0.49417794", "0.49381483", "0.493767", "0.4931684", "0.49306536", "0.49299297", "0.49148062", "0.49110404", "0.49094313", "0.49051586", "0.4894071", "0.4891548", "0.48915404", "0.4889072", "0.48880666", "0.48857576", "0.4885506", "0.48801303", "0.48797977", "0.48780394", "0.48779288", "0.48692438", "0.48681083", "0.48673093", "0.48654956", "0.48652852", "0.48616365", "0.4860736", "0.4860069", "0.4858616", "0.48567498", "0.48517707", "0.4848536", "0.48462367", "0.48448652", "0.48323873" ]
0.48848116
81
This is a more or less literal translation of the original code.
def slow_update(learner, reward): pIk = learner.pIk.copy() k_grid = learner.k_grid I_grid = learner.I_grid p_grid = learner.p_grid Ip1gIk = learner._I_trans pp1gpIp1 = learner._p_trans for k in xrange(k_grid.size): # 1) Multiply pIk by Ip1gIk and integrate out I. This will give pIp1k pIp1k = np.zeros((p_grid.size, I_grid.size)) for Ip1 in xrange(I_grid.size): for p in xrange(p_grid.size): pIp1k[p, Ip1] = np.sum(Ip1gIk[Ip1, :, k] * pIk[p, :, k]) # 2) Multiply pIp1k by pp1gpIp1 and integrate out p. pp1Ip1k = np.zeros((p_grid.size, I_grid.size)) for Ip1 in xrange(I_grid.size): for pp1 in xrange(p_grid.size): pp1Ip1k[pp1, Ip1] = np.sum(pIp1k[:, Ip1] * pp1gpIp1[pp1, :, Ip1].T) # 3) Place pp1Ip1k into pIk (belief that is carried to the next trial) pIk[:, :, k] = pp1Ip1k if reward: for k in xrange(k_grid.size): for p in xrange(p_grid.size): pIk[p, :, k] *= p_grid[p] else: for k in xrange(k_grid.size): for p in xrange(p_grid.size): pIk[p, :, k] *= 1 - p_grid[p] # Normalization pIk /= pIk.sum() return pIk
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def code():", "def _fix_up(self, cls, code_name):", "def exercise_b2_106():\r\n pass", "def retranslate(self):\r\n pass", "def retranslate(self):\r\n pass", "def exercise_b2_107():\r\n pass", "def exercise_b2_113():\r\n pass", "def test_fix_code_typical_code():\r\n\r\n pass", "def exercise_b2_53():\r\n pass", "def exercise_b2_27():\r\n pass", "def exercise_b2_39():\r\n pass", "def exercise_b2_93():\r\n pass", "def exercise_b2_82():\r\n pass", "def literal():\n return Literal()", "def exercise_b2_26():\r\n pass", "def question_new_translate():", "def polish(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def regular(self):", "def exercise_b2_98():\r\n pass", "def exercise_b2_69():\r\n pass", "def exercise_b2_52():\r\n pass", "def g():", "def unpolish(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def _transform(self, original, coder):\n msg = list(original)\n for k in range(len(msg)):\n if 0x40 < ord(msg[k]) < 0x5a:\n msg[k] = coder[msg[k]]\n return u\"\".join(msg)", "def _transform(self, original, coder):\n msg = list(original)\n for k in range(len(msg)):\n if 0x590 < ord(msg[k]) < 0xfb50:\n msg[k] = coder[msg[k]]\n return u\"\".join(msg)", "def _str_converter(self, code):\r\n\t\treturn \"\"\"{}\"\"\".format(code)", "def __call__(self,thing):\n return self.compiled(thing)", "def exercise_b2_56():\r\n pass", "def translate():\n pass", "def to_code(self, ipt_args_in_construct: str, variable_name: str, output_var: str, code_fragment):", "def exercise_b2_43():\r\n pass", "def _repr_(self):\n return \"Extended code coming from %s\" % self.original_code()", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def lro(self) -> global___Snippet.Lro:", "def reconst(U,s,VT):\n return (U * s) @ VT", "def _latex_(self):\n return \"\\\\textnormal{Extended code coming from %s}\" % self.original_code()", "def exercise_b2_86():\r\n pass", "def pre_processor(self):", "def clean_code(code):\n return code", "def __init__(self):\n super(OperatorCodegen, self).__init__()", "def __call__(value):", "def _translate(self, data):\n pass\n return [i*2 for i in data]", "def map(self) -> global___Statement.Declaration:", "def exo2():", "def t_STR_LITER(t):\n return t", "def simple():", "def simple():", "def exercise_b2_70():\r\n pass", "def compile(expression):", "def translate(self):\n pass", "def substantiate():", "def gen_c_code(self, comp, dest, jump):\r\n return '111' + self.comp(comp) + self.dest(dest) + self.jump(jump)", "def base():", "def _format_instruction_code_23E(self, val):\n return val", "def v(w,s):\n return w", "def evaluateCode(lang, code):", "def change_code(code):\n if len(str(code)) == 9:\n code = str(0) + str(code)\n else:\n code = str(code)\n return code", "def CL(self):", "def problem_298():\n pass", "def _wrap_code(self, inner):\r\n yield 0, \"<code>\"\r\n for tup in inner:\r\n yield tup\r\n yield 0, \"</code>\"", "def lab8_q4():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def gen_build_str_def():\n\treturn \"\"", "def cython_literal(self, lit):\n if isinstance(lit, Number):\n cy_lit = str(lit).replace('-', 'Neg').replace('+', 'Pos')\\\n .replace('.', 'point')\n elif isinstance(lit, basestring):\n cy_lit = repr(lit)\n return cy_lit", "def _regr_basic():", "def cpp_function(self):", "def build(c):", "def _latex_(self):\n return \"\\\\textnormal{Decoder of } %s \\\\textnormal{ through } %s\" % (self.code(), self.original_decoder())", "def non_pythranizable(arg):\n return arg", "def compileInstruction(self, ins):\n pass", "def test_function_statement3():\n r = convert_code(\n \"{foo arg1=bar[1]|modifier arg2=foo.bar.foo arg3=foo.bar[3]|modifier:array[0]:\\\"hello $foo \\\" arg4=foo.bar.awesome[3]|modifier2:7:'hello':\\\"hello\\\":\\\"`$apple.banana`\\\"}\")\n assert r == \"{{ {\\'arg1\\': bar[1]|modifier, \\'arg2\\': foo.bar.foo, \\'arg3\\': foo.bar[3]|modifier(array[0], \\\"hello ${foo} \\\"), \\'arg4\\': foo.bar.awesome[3]|modifier2(7, \\'hello\\', \\\"hello\\\", \\\"${apple.banana}\\\")}|foo }}\"", "def t_ICONST(t):\n return t", "def intern(string): # real signature unknown; restored from __doc__\n return \"\"", "def lab8_q2():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def string_reverser(our_string):\\\\\n\\\n # TODO: Write your solution here\\", "def test_function_statement2():\n r = convert_code(\n \"{foo arg1=bar[1] arg2=foo.bar.foo arg3=foo.bar[3] arg4=foo.bar.awesome[3] }\")\n assert r == \"{{ {'arg1': bar[1], 'arg2': foo.bar.foo, 'arg3': foo.bar[3], 'arg4': foo.bar.awesome[3]}|foo }}\"", "def lab7_q4():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def lab8_q3():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def sth():", "def transbrl (arg):\r\n return n.translate(p.translate(arg))", "def code(self):\n raise NotImplementedError()", "def gen_code(self, template, dec_id, i_alt):\n v = self.get_alt(dec_id, i_alt)\n\n # assuming the placeholder var is always at the end\n # which is true given how we chop up the chunks\n return template + str(v), str(v)", "def __init__(self, name, code):\n self.name_in_source = name\n if isinstance(name, text_type):\n strip_symbols_re = compile_re('-|_')\n self.canonical_name = strip_symbols_re.sub('', name.lower())\n else:\n self.canonical_name = name\n self.code = code", "def cx():", "def _boilerplate_to_python(indent):\n indent_str = \" \" * indent\n boilerplate = indent_str + \"import core.vba_library\\n\"\n boilerplate = indent_str + \"import core.vba_context\\n\"\n boilerplate += indent_str + \"from core.utils import safe_print\\n\"\n boilerplate += indent_str + \"from core.utils import safe_str_convert\\n\"\n boilerplate += indent_str + \"from core.utils import plus\\n\"\n boilerplate += indent_str + \"from core.utils import eq\\n\"\n boilerplate += indent_str + \"from core.utils import neq\\n\"\n boilerplate += indent_str + \"from core.utils import lt\\n\"\n boilerplate += indent_str + \"from core.utils import lte\\n\"\n boilerplate += indent_str + \"from core.utils import gt\\n\"\n boilerplate += indent_str + \"from core.utils import gte\\n\"\n boilerplate += indent_str + \"import core.utils\\n\"\n boilerplate += indent_str + \"from core.python_jit import update_array\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_num\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_int\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_str\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_int_list\\n\\n\"\n boilerplate += indent_str + \"try:\\n\"\n boilerplate += indent_str + \" \" * 4 + \"vm_context\\n\"\n boilerplate += indent_str + \"except (NameError, UnboundLocalError):\\n\"\n boilerplate += indent_str + \" \" * 4 + \"vm_context = context\\n\"\n return boilerplate", "def make_codes(self):\n\t\troot = heapq.heappop(self.heap)#obtenemos la raiz del arbol\n\t\tcurrent_code = \"\"\n\t\tself.make_codes_helper(root, current_code)", "def transform(self):", "def map():", "def lab9_q4():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def lower_case_really():", "def fortran_c_wrapper(self) -> str:\n return ''", "def make_codes_helper(self, root, current_code):\n\t\tif (root == None):\n\t\t\t\treturn\n\t\tif(root.char != None):\n\t\t\tself.codes[root.char] = current_code#guardamos el codigo binario en un diccionario\n\t\t\tself.reverse_mapping[current_code] = root.char#guardamos el caracter en un diccionario donde el 'key' sera el codigo binario.\n\t\t\treturn\n\n\t\tself.make_codes_helper(root.left, current_code + \"0\") #avanzamos recursivamente\n\t\tself.make_codes_helper(root.right, current_code + \"1\")", "def __repr__(self):\n return '<{0}>'.format(self.code)", "def lab7_q2():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def transform():", "def lab7_q3():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def gencode(self):\n\n fullcode = self.code_cfg\n variables = codeconfig_getvars(fullcode)\n if len(variables) > 0:\n variables.sort(key=lambda x: len(x), reverse=True)\n for va in variables:\n if eval(\"self.\" + va[1:]) is None or eval(\"self.\" + va[1:]) == \"\":\n warnings.warn(\"Inserting None for \" + va, UserWarning)\n fullcode = fullcode.replace(va, str(eval(\"self.\" + va[1:])))\n # testing for sintax errors\n #compile(fullcode, \"<Test ModelCall Code>\\n\" + fullcode, 'exec')\n tmpfile = os.path.join(gettempdir(), \"TestModelCallCode.\" + self.complexity + \".py\")\n with open(tmpfile, \"w\") as f: f.write(fullcode)\n compile(fullcode, tmpfile, 'exec')\n return fullcode", "def compile(self):\n\t\traise NotImplemented()" ]
[ "0.6567136", "0.62829435", "0.5988698", "0.59373957", "0.59373957", "0.5923645", "0.5897334", "0.58716434", "0.58613706", "0.58475685", "0.5832422", "0.5757656", "0.57205373", "0.56826997", "0.5669326", "0.5593282", "0.55611753", "0.556074", "0.5530139", "0.54967827", "0.5487342", "0.54844946", "0.5474367", "0.5467053", "0.54644275", "0.54604375", "0.544546", "0.5431496", "0.54048586", "0.5393732", "0.53889155", "0.53479546", "0.53464764", "0.53464764", "0.53464764", "0.53464764", "0.53464764", "0.53398705", "0.5335051", "0.5333635", "0.5327044", "0.5324164", "0.53226525", "0.5318026", "0.52966684", "0.5295363", "0.52796197", "0.5271038", "0.526976", "0.52469194", "0.52469194", "0.5245855", "0.52456564", "0.5227088", "0.52235895", "0.5214243", "0.52118295", "0.5211373", "0.5197234", "0.519547", "0.51855904", "0.518287", "0.5170321", "0.5166885", "0.5158347", "0.5155707", "0.51382536", "0.5134773", "0.51280195", "0.51250434", "0.51204526", "0.5119628", "0.51195866", "0.5113997", "0.51120365", "0.51035386", "0.5101512", "0.51002514", "0.50990766", "0.50959146", "0.50851", "0.5081959", "0.50817716", "0.507505", "0.5070908", "0.50702035", "0.5059353", "0.50562567", "0.50542474", "0.5044446", "0.5037944", "0.5035409", "0.503535", "0.5028019", "0.5025988", "0.5021327", "0.50207126", "0.50167906", "0.5015874", "0.5015051", "0.50137144" ]
0.0
-1
Tests the install workflow using the built in workflows.
def tests_pull_workflow(self): daemon_client = {} client = self.get_client(daemon_client) for container in client.containers(all=True): if 'test-container' in \ ''.join([name for name in container.get('Names')]): client.remove_container('test-container') if ['{0}:latest'.format(TEST_IMAGE)] in \ [i.get('RepoTags') for i in client.images()]: client.remove_image(TEST_IMAGE, force=True) # execute install workflow self.env.execute('install', task_retries=0) container_instance = {} for instance in self.env.storage.get_node_instances(): if 'container_id' in instance.runtime_properties.keys(): container_instance = instance self.assertTrue(container_instance is not None, 'Failed getting container.') container_id = container_instance.runtime_properties.get( 'container_id') containers = client.containers(all=True) self.assertTrue(container_id in [c.get('Id') for c in containers]) self.env.execute('uninstall', task_retries=3) repotags = [] for i in client.images(): repotags.append(i.get('RepoTags')) self.assertFalse(TEST_IMAGE in [tag for tag in repotags]) if ['{0}:latest'.format(TEST_IMAGE)] in \ [i.get('RepoTags') for i in client.images()]: client.remove_image(TEST_IMAGE, force=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_install(self):\n pass", "def test_deploy_workflow_definition(self):\n pass", "def test_installed(self):\n # OSA script should have been installed in setUp function\n self.assertTrue(self.run_function(\"assistive.installed\", [OSA_SCRIPT]))\n # Clean up install\n self.run_function(\"assistive.remove\", [OSA_SCRIPT])\n # Installed should now return False\n self.assertFalse(self.run_function(\"assistive.installed\", [OSA_SCRIPT]))", "def test_install_empty(self):\n ctx, graph = self._make_ctx_and_graph()\n pr = LifecycleProcessor(graph)\n with current_workflow_ctx.push(ctx):\n pr.install()\n assert graph.tasks == []", "def this_needs_work_test_hook_install(self):\n self.do_test_hook_install(testee.install_setup, False)", "def test_after_install(self):\n self.run_test_suites(self.after_install_test_suite_list)", "def test_workflows_get(self):\n pass", "def test_workflows_get(self):\n pass", "def test_install(self):\n self.userbase('install')\n self.assertImplements(self.store, IRealm)\n self.assertImplements(self.store, ICredentialsChecker)", "def test_install(self):\n self.installer._run_command = Mock()\n self.installer._pretty_print = Mock()\n self.installer.verify_installation = Mock()\n stdout = Mock()\n stderr = Mock()\n self.installer._run_command.return_value = (stdout, stderr)\n stdout.channel.recv_exit_status.return_value = 0\n self.installer.verify_installation.return_value = \"Success\"\n self.installer.install()\n # self.installer.verify_installation.return_value = \"Fail\"\n # with self.assertRaises(Exception):\n # self.installer.install()\n self.installer.verify_installation.return_value = \"Success\"\n stdout.channel.recv_exit_status.return_value = -1\n with self.assertRaises(Exception):\n self.installer.install()\n self.installer._run_command.side_effect = SSHException\n with self.assertRaises(SSHException):\n self.installer.install()", "def setUp(self):\n # Let's install a bundle to use in tests\n self.run_function(\"assistive.install\", [OSA_SCRIPT, True])", "def test_install_packages():\n\n\tassert packaging.install_packages(pkgs) == None", "def test_installed(self):\n self.assertTrue(self.qi.isProductInstalled(PROJECTNAME))", "def test_install(ctx):\n ctx.run(\"pip uninstall {PROJECT_NAME} --yes\".format(PROJECT_NAME=PROJECT_NAME), warn=True)\n ctx.run(\"pip install --no-cache-dir --no-index --find-links=file:./dist {PROJECT_NAME}\".format(PROJECT_NAME=PROJECT_NAME))\n ctx.run(\"pip uninstall {PROJECT_NAME} --yes\".format(PROJECT_NAME=PROJECT_NAME))", "def test_workflows_list(self):\n pass", "def previewinstall(self, installed=[]):\n\n if( self.mode == \"install\"):\n \n # resolve circular dependencies\n if( self.name in installed ):\n return\n else:\n installed.append( self.name )\n \n print \"\\n\" + 20*'-' + \" Starting \" + self.name + \" Installation Test \" + 20*'-' + '\\n'\n \n # additional modules\n mods = self.optmodules + self.reqmodules + self.reqmodules_external + self.reqmodules_buildonly\n if( len(mods) > 0 ):\n for modname in mods:\n mod = self.parent.module(modname)\n if( mod.mode == \"install\" and not mod.name in installed ):\n print \"+ \" + self.name + \" will launch installation of \" + mod.name\n mod.previewinstall(installed)\n print \"+ \"+ self.name + \" using \" + mod.name + \" at [ \" + mod.installPath + \" ]\"\n\n print \"\\n+ Environment Settings used for building \" + self.name + \":\"\n # print environment settings recursively\n self.setEnv(self, [], True )\n\n if( self.hasCMakeBuildSupport ):\n #self.setCMakeVars(self, [])\n print \"\\n+ Generated CMake command for building \" + self.name + \":\"\n print ' $ ',self.genCMakeCmd()\n \n print \"\\n+ \" + self.name + \" installation finished.\"\n print '\\n' + 20*'-' + \" Finished \" + self.name + \" Installation Test \" + 20*'-' + '\\n'", "def testGetWorkflow(self):\n assert self.dummySubscription['workflow'] == self.dummyWorkFlow, \\\n 'Couldn\\'t add Workflow to Subscription'", "def test_install(self):\n\n\n adminuser,adminpass = self.testdata.find_account_for('toolmanager')\n\n self.utils.account.login_as(adminuser,adminpass)\n\n self.contribtool.install(TOOLNAME,adminuser,adminpass)", "def test_installments_get(self):\n pass", "def _run_ci_integration_test():\n _run_install(False)\n _run_integration_tests_on_github(False)", "def test_launch_deployment(self):\n pass", "def test_website_companies_install_additions(self):\n pass", "def requires_setup(step, setup_names):\r\n pass", "def test_website_companies_install_applications(self):\n pass", "def test_call(self):\n output, _error = self.executor.pip('install', 'attrs').batch()\n self.assertEqual(output, 'attrs installed')", "def test_install_tools(self):\n dummy_installation_path = BUILD_DIR + \"selenium-taurus\"\n base_link = \"file:///\" + RESOURCES_DIR + \"selenium\"\n\n shutil.rmtree(dirname(dummy_installation_path), ignore_errors=True)\n\n selenium_server_link = java.SELENIUM_DOWNLOAD_LINK\n junit_link = java.JUNIT_DOWNLOAD_LINK\n junit_mirrors = java.JUNIT_MIRRORS_SOURCE\n hamcrest_link = java.HAMCREST_DOWNLOAD_LINK\n try:\n java.SELENIUM_DOWNLOAD_LINK = base_link + \"/selenium-server-standalone-2.46.0.jar\"\n java.JUNIT_DOWNLOAD_LINK = base_link + \"/junit-4.12.jar\"\n java.JUNIT_MIRRORS_SOURCE = base_link + \"unicode_file\"\n java.HAMCREST_DOWNLOAD_LINK = base_link + \"/hamcrest-core-1.3.jar\"\n\n self.assertFalse(exists(dummy_installation_path))\n\n self.obj.settings.merge({\n \"selenium-server\": join(dummy_installation_path, \"selenium-server.jar\"),\n \"hamcrest-core\": join(dummy_installation_path, \"tools\", \"junit\", \"hamcrest-core.jar\"),\n \"path\": join(dummy_installation_path, \"tools\", \"junit\", \"junit.jar\")\n })\n\n self.obj.execution.merge({\n \"scenario\": {\n \"script\": RESOURCES_DIR + \"selenium/junit/jar/\"},\n \"runner\": \"junit\"})\n self.obj.install_required_tools()\n self.obj.prepare()\n self.assertIsInstance(self.obj, JUnitTester)\n self.assertTrue(exists(join(dummy_installation_path, \"selenium-server.jar\")))\n self.assertTrue(exists(join(dummy_installation_path, \"tools\", \"junit\", \"junit.jar\")))\n self.assertTrue(\n exists(join(dummy_installation_path, \"tools\", \"junit\", \"hamcrest-core.jar\")))\n finally:\n java.SELENIUM_DOWNLOAD_LINK = selenium_server_link\n java.JUNIT_DOWNLOAD_LINK = junit_link\n java.HAMCREST_DOWNLOAD_LINK = hamcrest_link\n java.JUNIT_MIRRORS_SOURCE = junit_mirrors", "def test_workflows_restart(self):\n pass", "async def test_full_user_flow_multiple_installations(\n hass: HomeAssistant,\n mock_setup_entry: AsyncMock,\n mock_verisure_config_flow: MagicMock,\n) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result.get(\"step_id\") == \"user\"\n assert result.get(\"type\") == FlowResultType.FORM\n assert result.get(\"errors\") == {}\n\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {\n \"email\": \"verisure_my_pages@example.com\",\n \"password\": \"SuperS3cr3t!\",\n },\n )\n await hass.async_block_till_done()\n\n assert result2.get(\"step_id\") == \"installation\"\n assert result2.get(\"type\") == FlowResultType.FORM\n assert result2.get(\"errors\") is None\n\n result3 = await hass.config_entries.flow.async_configure(\n result2[\"flow_id\"], {\"giid\": \"54321\"}\n )\n await hass.async_block_till_done()\n\n assert result3.get(\"type\") == FlowResultType.CREATE_ENTRY\n assert result3.get(\"title\") == \"descending (54321th street)\"\n assert result3.get(\"data\") == {\n CONF_GIID: \"54321\",\n CONF_EMAIL: \"verisure_my_pages@example.com\",\n CONF_PASSWORD: \"SuperS3cr3t!\",\n }\n\n assert len(mock_verisure_config_flow.login.mock_calls) == 1\n assert len(mock_setup_entry.mock_calls) == 1", "def test_managed_install(visualstudio, tmp_path):\n assert not visualstudio.managed_install", "def test_integrations(session):\n session.install('-rrequirements-dev.txt')\n session.install('-e', '.')\n run_pytest_integrations(session)", "def test_kite_install(qtbot):\n install_manager = KiteInstallationThread(None)\n installation_statuses = []\n\n def installation_status(status):\n installation_statuses.append(status)\n\n def error_msg(error):\n # Should not enter here\n assert False\n\n def download_progress(progress, total):\n assert total != 0\n\n def finished():\n if sys.platform.startswith(\"linux\"):\n expected_installation_status = [\n DOWNLOADING_SCRIPT,\n DOWNLOADING_INSTALLER,\n INSTALLING,\n FINISHED]\n else:\n expected_installation_status = [\n DOWNLOADING_INSTALLER,\n INSTALLING,\n FINISHED]\n\n # This status can be obtained the second time our tests are run\n if not installation_statuses == ['Installation finished']:\n assert installation_statuses == expected_installation_status\n\n install_manager.sig_installation_status.connect(installation_status)\n install_manager.sig_error_msg.connect(error_msg)\n install_manager.sig_download_progress.connect(download_progress)\n install_manager.finished.connect(finished)\n with qtbot.waitSignal(install_manager.finished, timeout=INSTALL_TIMEOUT):\n install_manager.install()\n\n # Check that kite was installed and is running\n qtbot.waitUntil(\n lambda: check_if_kite_installed() and check_if_kite_running(),\n timeout=5000)", "def test_all_python_versions_deploy():\n pass", "def test_packages_present(self):\n packages = [\"ca-certificates\", \"sudo\", \"wget\", \"unzip\"]\n for pkg in packages:\n with self.subTest(package=pkg):\n self.assertTrue(self.host.package(pkg).is_installed)", "def test_execute_xia_automated_workflow(self, mock_run):\n self.assert_(execute_xia_automated_workflow.run())\n\n self.assert_(execute_xia_automated_workflow.run())\n self.assertEqual(mock_run.call_count, 2)\n\n self.assert_(execute_xia_automated_workflow.run())\n self.assertEqual(mock_run.call_count, 3)", "def test_before(self):\n\n support.create_project(self, 'candice')\n support.add_step(self)\n support.add_step(self, position='0')\n\n project = cauldron.project.get_internal_project()\n steps = project.steps\n\n self.assertTrue(steps[0].filename.startswith('S01'))\n self.assertTrue(steps[1].filename.startswith('S02'))", "def setUp(self):\n trytond.tests.test_tryton.install_module('nereid_webshop')", "def test_create_workflow(self):\n pipeline_id = 2\n nodes = [\n {\"piping_id\": 3, \"compute_resource_name\": \"host\",\n \"plugin_parameter_defaults\": [{\"name\": \"prefix\", \"default\": \"test\"},\n {\"name\": \"dummyInt\", \"default\": 3}]},\n {\"piping_id\": 4, \"compute_resource_name\": \"host\"},\n {\"piping_id\": 5, \"compute_resource_name\": \"host\"}\n ]\n data = {\n 'title': 'Workflow1',\n 'previous_plugin_inst_id': 1,\n 'nodes_info': json.dumps(nodes)\n }\n response = self.client.create_workflow(pipeline_id, data)\n workflow_title = response['title']\n self.assertEqual(workflow_title, data['title'])\n workflow_id = response['id']\n response = self.client.get_workflow_plugin_instances(workflow_id, data)\n self.assertEqual(response['total'], 3)", "def install(self):\n # This installs the packages defined in self.packages\n super().install()\n # Do any other installation work that is needed. If a license key is\n # required then use the custom_assess_status_check() function below to\n # determine whether it is needed.\n # This assess_status() will determine what status the charm is at after\n # install.\n self.assess_status()", "def test_install(self):\n # This call should not throw an exception\n checked_subprocess_run(f\"{self.python} -m pip install .\")\n\n # Check the version number from `pip info`\n info, _ = checked_subprocess_run(f\"{self.python} -m pip show {PACKAGE_NAME}\")\n\n # The info section from pip is formatted as a RFC 2882 mail header.\n parser = HeaderParser()\n data = parser.parsestr(info)\n version = data[\"version\"]\n\n # Version should be set, should not be the default 0.0.0, and should\n # match __version__ set in the package.\n self.assertTrue(version)\n self.assertNotEqual(version, \"0.0.0\")\n self.assertEqual(version, __version__)", "def test_verify_installation(self):\n self.installer._pretty_print = Mock()\n self.installer._run_command = Mock()\n stdout = Mock()\n stderr = Mock()\n self.installer._run_command.return_value = (stdout, stderr)\n stderr.read().splitlines.return_value = []\n self.assertEqual(\"Success\", self.installer.verify_installation())\n stderr.read().splitlines.return_value = [\"error\"]\n self.assertEqual(\"Fail\", self.installer.verify_installation())", "def _install(self):\n\n pass", "def init_workflow():\n pass", "def test_install(self):\n self.assertIn('kser', [x.key for x in pkg_resources.working_set])", "def test_install_build_single(build_all):\n build_all.run(\"install --requires=foobar/1.0@user/testing --build=foo/*\")\n build_all.assert_listed_binary({\"bar/1.0@user/testing\": (bar_id, \"Cache\"),\n \"foo/1.0@user/testing\": (foo_id, \"Build\"),\n \"foobar/1.0@user/testing\": (foobar_id, \"Cache\"),\n })\n assert \"foo/1.0@user/testing: Forced build from source\" in build_all.out\n assert \"bar/1.0@user/testing: Forced build from source\" not in build_all.out\n assert \"foobar/1.0@user/testing: Forced build from source\" not in build_all.out\n assert \"No package matching\" not in build_all.out", "def test_workflows_post(self):\n pass", "def test_install_with_batch(self):\n parsed_targets = (\n OrderedDict(((\"gettext-runtime\", None), (\"p5-Mojolicious\", None))),\n \"repository\",\n )\n pkg_cmd = MagicMock(return_value={\"retcode\": 0})\n patches = {\n \"cmd.run_all\": pkg_cmd,\n \"pkg_resource.parse_targets\": MagicMock(return_value=parsed_targets),\n }\n with patch.dict(pkgng.__salt__, patches):\n with patch(\"salt.modules.pkgng.list_pkgs\", ListPackages()):\n added = pkgng.install(batch=True)\n expected = {\n \"gettext-runtime\": {\"new\": \"0.20.1\", \"old\": \"\"},\n \"p5-Mojolicious\": {\"new\": \"8.40\", \"old\": \"\"},\n }\n self.assertDictEqual(added, expected)\n pkg_cmd.assert_called_with(\n [\"pkg\", \"install\", \"-y\", \"gettext-runtime\", \"p5-Mojolicious\"],\n output_loglevel=\"trace\",\n python_shell=False,\n env={\"BATCH\": \"true\", \"ASSUME_ALWAYS_YES\": \"YES\"},\n )", "def test_install_create_operation(self):\n ctx, graph = self._make_ctx_and_graph()\n\n pr = self._make_lifecycle_processor(\n ctx, graph,\n nodes=[self._make_node(\n operations={\n 'cloudify.interfaces.lifecycle.create':\n self._make_operation()\n },\n plugins=[self._make_plugin()]\n )],\n instances=[self._make_instance()]\n )\n with current_workflow_ctx.push(ctx):\n pr.install()\n assert any(\n task.name == 'plugin1.op1'\n for task in graph.tasks\n )\n assert any(\n task.name == 'SetNodeInstanceStateTask'\n and task.info == 'started'\n for task in graph.tasks\n )", "def test_pip_install(salt_call_cli):\n dep = \"PyGithub\"\n repo = \"https://github.com/saltstack/salt.git\"\n\n try:\n install = salt_call_cli.run(\"--local\", \"pip.install\", dep)\n assert install.returncode == 0\n\n use_lib = salt_call_cli.run(\"--local\", \"github.get_repo_info\", repo)\n assert \"Authentication information could\" in use_lib.stderr\n finally:\n ret = salt_call_cli.run(\"--local\", \"pip.uninstall\", dep)\n assert ret.returncode == 0\n use_lib = salt_call_cli.run(\"--local\", \"github.get_repo_info\", repo)\n assert \"The github execution module cannot be loaded\" in use_lib.stderr", "def test_manual_install_1(monkeypatch):\n\n monkeypatch.setattr(platform, 'system', lambda: 'Linux')\n monkeypatch.setattr(platform, 'machine', lambda: 'x86_64')\n monkeypatch.setattr(tempfile, 'mkdtemp', lambda: '/tmp/tempdir')\n monkeypatch.setattr(shutil, 'rmtree', lambda path: True)\n monkeypatch.setattr(shutil, 'copyfileobj', lambda src, dest: True)\n monkeypatch.setattr(os, 'listdir', lambda path: [\n 'terraform-provider-terraform_v0.11.2_x4', 'pkg1', 'pkg2'])\n monkeypatch.setattr(os, 'chmod', lambda path, permissions: True)\n\n def mp_zip_file(dest, mode):\n class MockedZipFile:\n def extractall(self, dest):\n return True\n\n def close(self):\n return True\n\n return MockedZipFile()\n\n def mp_url_open(url):\n class MockedUrlOpen:\n def __enter__(self):\n return 'content'\n\n def __exit__(self, type, value, traceback):\n pass\n\n return MockedUrlOpen()\n\n def mp_open(file, mode):\n class MockedOpen:\n def __enter__(self):\n return 'content'\n\n def __exit__(self, type, value, traceback):\n pass\n\n return MockedOpen()\n\n monkeypatch.setattr(urllib.request, 'urlopen', mp_url_open)\n monkeypatch.setattr(builtins, 'open', mp_open)\n\n monkeypatch.setattr(zipfile, 'ZipFile', mp_zip_file)\n\n manual_install(['pkg1@1.0.2', 'pkg2'], '/tmp/stone-burner_plugins')", "def run_install_tutorial_check(self):\n\n src = join_path(self.test_suite.current_test_cache_dir, self.smoke_test_src)\n cc_exe = os.environ[\"CC\"]\n cc_options = [\n \"-o\",\n self.smoke_test,\n src,\n \"-I{0}\".format(self.prefix.include),\n \"-I{0}\".format(self.spec[\"numactl\"].prefix.include),\n \"-L{0}\".format(self.prefix.lib),\n \"-laml\",\n \"-lexcit\",\n \"-lpthread\",\n ]\n\n self.run_test(\n cc_exe, cc_options, purpose=\"test: compile {0} tutorial\".format(self.smoke_test)\n )\n self.run_test(self.smoke_test, purpose=\"test: run {0} tutorial\".format(self.smoke_test))", "def run_tests(session):\n set_environment_variables(PYBAMM_ENV, session=session)\n session.run_always(\"pip\", \"install\", \"-e\", \".[all]\")\n if sys.platform == \"linux\" or sys.platform == \"darwin\":\n session.run_always(\"pip\", \"install\", \"-e\", \".[odes]\")\n session.run_always(\"pip\", \"install\", \"-e\", \".[jax]\")\n session.run(\"python\", \"run-tests.py\", \"--all\")", "def test_install_without_args(self):\n parsed_targets = (\n OrderedDict(((\"gettext-runtime\", None), (\"p5-Mojolicious\", None))),\n \"repository\",\n )\n pkg_cmd = MagicMock(return_value={\"retcode\": 0})\n patches = {\n \"cmd.run_all\": pkg_cmd,\n \"pkg_resource.parse_targets\": MagicMock(return_value=parsed_targets),\n }\n with patch.dict(pkgng.__salt__, patches):\n with patch(\"salt.modules.pkgng.list_pkgs\", ListPackages()):\n added = pkgng.install()\n expected = {\n \"gettext-runtime\": {\"new\": \"0.20.1\", \"old\": \"\"},\n \"p5-Mojolicious\": {\"new\": \"8.40\", \"old\": \"\"},\n }\n self.assertDictEqual(added, expected)\n pkg_cmd.assert_called_with(\n [\"pkg\", \"install\", \"-y\", \"gettext-runtime\", \"p5-Mojolicious\"],\n output_loglevel=\"trace\",\n python_shell=False,\n env={},\n )", "async def test_full_user_flow_single_installation(\n hass: HomeAssistant,\n mock_setup_entry: AsyncMock,\n mock_verisure_config_flow: MagicMock,\n) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result.get(\"step_id\") == \"user\"\n assert result.get(\"type\") == FlowResultType.FORM\n assert result.get(\"errors\") == {}\n\n mock_verisure_config_flow.get_installations.return_value = {\n k1: {k2: {k3: [v3[0]] for k3, v3 in v2.items()} for k2, v2 in v1.items()}\n for k1, v1 in mock_verisure_config_flow.get_installations.return_value.items()\n }\n\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {\n \"email\": \"verisure_my_pages@example.com\",\n \"password\": \"SuperS3cr3t!\",\n },\n )\n await hass.async_block_till_done()\n\n assert result2.get(\"type\") == FlowResultType.CREATE_ENTRY\n assert result2.get(\"title\") == \"ascending (12345th street)\"\n assert result2.get(\"data\") == {\n CONF_GIID: \"12345\",\n CONF_EMAIL: \"verisure_my_pages@example.com\",\n CONF_PASSWORD: \"SuperS3cr3t!\",\n }\n\n assert len(mock_verisure_config_flow.login.mock_calls) == 1\n assert len(mock_setup_entry.mock_calls) == 1", "def run_integration(session):\n set_environment_variables(PYBAMM_ENV, session=session)\n session.run_always(\"pip\", \"install\", \"-e\", \".[all]\")\n if sys.platform == \"linux\":\n session.run_always(\"pip\", \"install\", \"-e\", \".[odes]\")\n session.run(\"python\", \"run-tests.py\", \"--integration\")", "def _run_ci_publish():\n _run_install(False)\n _run_tests(False)\n _run_publish(True)", "def test_install_app(tmpdir):\n basedir = os.path.join(tmpdir, 'test')\n Flowserv(\n basedir=basedir,\n database=TEST_URL,\n workers={'python': {}},\n open_access=True,\n run_async=True,\n s3bucket='test',\n clear=True\n )", "def test_is_installed():\n assert _is_installed('coverage') is True # regular dependency\n assert _is_installed('pytest') is True # dev dependency\n assert _is_installed('missing') is False # missing dependency", "def test_dependencies_are_installed(self):\n installed = [p['id'] for p in self.qi.listInstalledProducts()]\n self.assertIn('plone.restapi', installed)\n self.assertIn('plone.app.contenttypes', installed)\n self.assertIn('plone.app.multilingual', installed)", "def task_installTestData(self):\n if env.get('environment') == 'production':\n abort(\"Don't use installTestData in production.\")\n\n if postgres.tableExists('trac', 'system'):\n abort(\"Existing Trac tables found.\")\n\n with settings(user=self.serviceUser):\n # Run trac initenv to create the postgresql database tables, but use\n # a throwaway trac-env directory because that comes from\n # https://github.com/twisted-infra/trac-config/tree/master/trac-env\n try:\n run('~/virtualenv/bin/trac-admin '\n '/tmp/trac-init initenv TempTrac postgres://@/trac git \"\"')\n finally:\n run(\"rm -rf /tmp/trac-init\")\n\n # Run an upgrade to add plugin specific database tables and columns.\n run('~/virtualenv/bin/trac-admin config/trac-env upgrade --no-backup')", "def test_find_workflow_definitions(self):\n pass", "def test_basic_workflow(self):\n\n test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n sys.path.append(test_data_dir)\n import basic\n tmpdir = tempfile.mkdtemp()\n package_path = os.path.join(tmpdir, 'workflow.tar.gz')\n try:\n compiler.Compiler().compile(basic.save_most_frequent_word, package_path)\n with open(os.path.join(test_data_dir, 'basic.yaml'), 'r') as f:\n golden = yaml.load(f)\n compiled = self._get_yaml_from_tar(package_path)\n\n self.maxDiff = None\n # Comment next line for generating golden yaml.\n self.assertEqual(golden, compiled)\n finally:\n # Replace next line with commented line for gathering golden yaml.\n shutil.rmtree(tmpdir)\n # print(tmpdir)", "def setup_package():\n\n global TEST_WORKSPACE\n TEST_WORKSPACE = env.get_workspace('authentication')\n\n os.environ['TEST_WORKSPACE'] = TEST_WORKSPACE\n\n test_config = {}\n\n # Setup environment variables for the test cases.\n host_port_cfg = {'viewer_host': 'localhost',\n 'viewer_port': env.get_free_port(),\n 'viewer_product': 'authentication'}\n\n test_env = env.test_env(TEST_WORKSPACE)\n\n codechecker_cfg = {\n 'check_env': test_env,\n 'workspace': TEST_WORKSPACE,\n 'checkers': []\n }\n\n codechecker_cfg.update(host_port_cfg)\n\n codechecker_cfg['run_names'] = []\n\n test_config['codechecker_cfg'] = codechecker_cfg\n\n # Export configuration for the tests.\n env.export_test_cfg(TEST_WORKSPACE, test_config)\n\n # Enable authentication and start the CodeChecker server.\n env.enable_auth(TEST_WORKSPACE)\n print(\"Starting server to get results\")\n _start_server(codechecker_cfg, test_config, False)", "def this_needs_work_test_hook_upgrade(self):\n self.do_test_hook_install(testee.upgrade_setup, True)", "def test_update_resumed_install(self):\n ctx, graph = self._make_ctx_and_graph()\n\n node = self._make_node(\n operations={\n 'cloudify.interfaces.lifecycle.create':\n self._make_operation(operation='plugin1.op1'),\n 'cloudify.interfaces.lifecycle.delete':\n self._make_operation(operation='plugin1.op2')\n },\n plugins=[{'name': 'plugin1', 'package_name': 'plugin1'}]\n )\n instance = self._make_instance()\n pr = self._make_lifecycle_processor(\n ctx, graph,\n nodes=[node],\n instances=[instance]\n )\n with current_workflow_ctx.push(ctx):\n pr.install()\n\n # after creating the install graph, resume it - it should first\n # delete the instance, before re-installing it\n ctx.resume = True\n instance['state'] = 'creating'\n pr._update_resumed_install(graph)\n\n delete_task_index = None\n install_task_index = None\n for ix, task in enumerate(graph.linearize()):\n if task.name == 'plugin1.op1':\n install_task_index = ix\n elif task.name == 'plugin1.op2':\n delete_task_index = ix\n\n assert install_task_index is not None\n assert delete_task_index is not None\n assert delete_task_index < install_task_index", "def test_install_and_remove(self):\n new_bundle = \"com.smileonmymac.textexpander\"\n self.assertTrue(self.run_function(\"assistive.install\", [new_bundle]))\n self.assertTrue(self.run_function(\"assistive.remove\", [new_bundle]))", "def setUp(self):\n self.framework = FrameworkFactory.get_framework()\n self.framework.start()\n self.ipopo = install_ipopo(self.framework)\n\n # Install the test bundle\n self.module = install_bundle(self.framework)", "def test_cron_workflow_service_create_cron_workflow(self):\n pass", "def test(self):\n for arch, python in self.python:\n self.run(f\"{python} -m pytest\")", "def run_tests(self):\n\n self.manifest_path = os.path.join('tests',\n 'functional',\n 'manifest.ini')\n TestRun.run_tests(self)", "def test_00_setup(self):\n with mock_api(magento_base_responses):\n import_batch(self.session, 'magento.website', self.backend_id)\n import_batch(self.session, 'magento.store', self.backend_id)\n import_batch(self.session, 'magento.storeview', self.backend_id)\n import_record(self.session, 'magento.res.partner.category',\n self.backend_id, 1)", "def test_execute_deployment(self):\n pass", "def test_product_is_installed(self):\n try:\n result = self.installer.is_product_installed(PROJECT_NAME)\n except AttributeError:\n result = self.installer.isProductInstalled(PROJECT_NAME)\n self.assertTrue(result)", "def test_github_systers_automated_testing(self):\n automated_testing_url = self.base_url + \"/systers/automated-testing\"\n readme_url = automated_testing_url + \"/blob/master/README.md\"\n driver = self.driver\n driver.get(\"{0}/systers/automated-testing\".format(self.base_url))\n wiki_elem = driver.find_element_by_partial_link_text(\"webdriveriki\")\n wiki_elem.send_keys(Keys.ENTER)\n time.sleep(2)\n readme_elem = driver.find_element_by_link_text(\"Setup Steps\")\n readme_elem.send_keys(Keys.ENTER)\n time.sleep(2)\n assert driver.current_url == readme_url", "def test_quick_build(self):\n pass", "def test_3_full_pipeline(install_test_files, data_dir):\n with make_workdir() as workdir:\n cl = [\"bcbio_nextgen.py\",\n get_post_process_yaml(data_dir, workdir),\n os.path.join(data_dir, os.pardir, \"110106_FC70BUKAAXX\"),\n os.path.join(data_dir, \"run_info.yaml\")]\n subprocess.check_call(cl)", "def test_workflow(system_conf):\n check_call(f\"mpirun -n 3 pyrate conv2tif -f {system_conf}\", shell=True)\n check_call(f\"mpirun -n 3 pyrate prepifg -f {system_conf}\", shell=True)\n check_call(f\"mpirun -n 3 pyrate process -f {system_conf}\", shell=True)\n check_call(f\"mpirun -n 3 pyrate merge -f {system_conf}\", shell=True)\n\n # assert logs generated in the outdir\n params = Configuration(system_conf).__dict__\n for stage in ['conv2tif', 'prepifg', 'process', 'merge']:\n log_file_name = 'pyrate.log.' + stage\n files = list(Path(params[cf.OUT_DIR]).glob(log_file_name + '.*'))\n assert len(files) == 1", "def prepare_run(self, **kwargs):\n assert self.cloud\n LOGGER.debug('Validating run tests...')\n for test in kwargs.get('tests', self.stests):\n if test in self.stests:\n self.tests.append(test)\n else:\n raise Exception(f\"Test name '{test}' is invalid\")\n\n if not os.path.exists(self.task_dir):\n os.makedirs(self.task_dir)\n\n task = os.path.join(self.rally_dir, 'task.yaml')\n if not os.path.exists(task):\n LOGGER.error(\"Task file '%s' does not exist.\", task)\n raise Exception(f\"Task file '{task}' does not exist.\")\n self.task_file = os.path.join(self.task_dir, 'task.yaml')\n shutil.copyfile(task, self.task_file)\n\n task_macro = os.path.join(self.rally_dir, 'macro')\n if not os.path.exists(task_macro):\n LOGGER.error(\"Task macro dir '%s' does not exist.\", task_macro)\n raise Exception(f\"Task macro dir '{task_macro}' does not exist.\")\n macro_dir = os.path.join(self.task_dir, 'macro')\n if os.path.exists(macro_dir):\n shutil.rmtree(macro_dir)\n shutil.copytree(task_macro, macro_dir)\n\n self.update_keystone_default_role()\n self.compute_cnt = self.count_hypervisors()\n self.network_extensions = self.cloud.get_network_extensions()\n self.flavor_alt = self.create_flavor_alt()\n self.services = [service.name for service in\n functest_utils.list_services(self.cloud)]\n\n LOGGER.debug(\"flavor: %s\", self.flavor_alt)", "def _get_install_steps(self):\n\n content = self._get_yaml_content()\n\n return self._fix_install_steps(content['install'])", "def test_installed(self):\n script = which('parsefin')\n if not script:\n raise SkipTest(\"Not installed\")\n script = script[0]\n\n return self.runScript(script)", "def test_apps(self):\n ## List the dirs in PATH\n apps = []\n for path in self.paths:\n apps.extend(os.listdir(path))\n \n for app in self.expected_executables:\n assert app in apps", "def test_initialization(self):\n self.assertEqual(self.installer.host_name, \"ec2.amazon.com\")\n self.assertEqual(self.installer.user_name, \"ec2\")\n self.assertEqual(self.installer.os, \"ubuntu\")\n self.assertEqual(self.installer.key_path, \"./ODFEAMIInstanceKey.pem\")\n self.assertEqual(self.installer.RPM_package_version, \"1.4.0\")\n self.assertEqual(self.installer.APT_OSS_version, \"7.4.2\")", "def test_installed(self):\n check_output('unity --help', shell=True)", "def _run_ci_test():\n _run_install(False)\n _run_coverage_html(False)\n _run_typecheck_xml(False)\n _run_lint(True)", "def test_reinstall_packages():\n\tassert packaging.install_packages(pkgs) == None", "def run_tests(self):\n # Charm does not defer hooks so that test is not included.\n # Trigger a package change which requires a restart\n self.run_package_change_test(\n 'ovn-central',\n 'ovn-central')", "def test_ifPythonModuleIsInstalled():\n for name in config.toTest:\n testConfig = dynamicallyLoadModule(name)\n if \"pyModule\" in testConfig.config:\n print \"pyModule: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfPythonModuleIsInstalled, testConfig.config", "def setUp(self):\n self.hass = get_test_home_assistant()\n self.assertTrue(setup_component(self.hass, remote.DOMAIN, {'remote': {\n 'platform': 'demo',\n }}))", "def test_install_with_local(self):\n parsed_targets = (\n OrderedDict(((\"gettext-runtime\", None), (\"p5-Mojolicious\", None))),\n \"repository\",\n )\n pkg_cmd = MagicMock(return_value={\"retcode\": 0})\n patches = {\n \"cmd.run_all\": pkg_cmd,\n \"pkg_resource.parse_targets\": MagicMock(return_value=parsed_targets),\n }\n with patch.dict(pkgng.__salt__, patches):\n with patch(\"salt.modules.pkgng.list_pkgs\", ListPackages()):\n added = pkgng.install(local=True)\n expected = {\n \"gettext-runtime\": {\"new\": \"0.20.1\", \"old\": \"\"},\n \"p5-Mojolicious\": {\"new\": \"8.40\", \"old\": \"\"},\n }\n self.assertDictEqual(added, expected)\n pkg_cmd.assert_called_with(\n [\"pkg\", \"install\", \"-yU\", \"gettext-runtime\", \"p5-Mojolicious\"],\n output_loglevel=\"trace\",\n python_shell=False,\n env={},\n )", "def test_install_build_double(build_all):\n build_all.run(\"install --requires=foobar/1.0@user/testing --build=foo/* --build=bar/*\")\n build_all.assert_listed_binary({\"bar/1.0@user/testing\": (bar_id, \"Build\"),\n \"foo/1.0@user/testing\": (foo_id, \"Build\"),\n \"foobar/1.0@user/testing\": (foobar_id, \"Cache\"),\n })\n assert \"foo/1.0@user/testing: Forced build from source\" in build_all.out\n assert \"bar/1.0@user/testing: Forced build from source\" in build_all.out\n assert \"foobar/1.0@user/testing: Forced build from source\" not in build_all.out\n assert \"No package matching\" not in build_all.out", "def do_test(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tif not self.build['dotest']:\n\t\t\tself.log('Tests configured off, not running',level=logging.DEBUG)\n\t\t\treturn\n\t\t# Test in reverse order\n\t\tself.log('PHASE: test', level=logging.DEBUG)\n\t\tself.stop_all()\n\t\tself.start_all()\n\t\tfor module_id in self.module_ids(rev=True):\n\t\t\t# Only test if it's installed.\n\t\t\tif self.is_installed(self.shutit_map[module_id]):\n\t\t\t\tself.log('RUNNING TEST ON: ' + module_id, level=logging.DEBUG)\n\t\t\t\tself.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)\n\t\t\t\tif not self.shutit_map[module_id].test(self):\n\t\t\t\t\tself.fail(module_id + ' failed on test', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover\n\t\t\t\tself.logout(echo=False)", "def run_tests(self):\n # Trigger a config change which triggers a deferred hook.\n self.run_charm_change_hook_test('configure_ovs')\n\n # Trigger a package change which requires a restart\n self.run_package_change_test(\n 'openvswitch-switch',\n 'openvswitch-switch')", "def run_tests(self):\n # Trigger a config change which triggers a deferred hook.\n self.run_charm_change_hook_test('configure_ovs')\n\n # Trigger a package change which requires a restart\n self.run_package_change_test(\n 'openvswitch-switch',\n 'openvswitch-switch')", "def do_workflow(self, arg=None):\n\n def add_steps_to_workflow(curr_flow):\n while True:\n cmd_call = simple_input('Please choose a command to add to the workflow.', cmds, True)\n if cmd_call not in ['DONE', 'EXIT']:\n if self.is_output_cmd(cmd_call):\n curr_flow.add_output(cmd_call)\n else:\n curr_flow.add_step(cmd_call)\n cmds.pop(cmds.index(cmd_call))\n\n _conf = simple_input('Do you want to configure this command?', ['Y','N'], True) if self.is_configureable(cmd) else None\n if _conf == 'Y':\n curr_flow.configure_step(cmd_call)\n\n elif cmd_call == 'DONE':\n break\n else:\n return\n return curr_flow.has_steps()\n\n def confirm_workflow(curr_flow):\n checks = [('START', 'Start workflow?'), ('ADD', 'Do you want to add more steps?'),\n ('RESTART', 'Do you want to start over?')]\n curr_flow.draw_steps()\n for check in checks:\n _continue = simple_input(check[1], ['Y', 'N', 'EXIT'])\n if _continue == 'Y':\n return check[0]\n if _continue == 'EXIT':\n return 'EXIT'\n return 'INVALID'\n\n print('Preparing Workflow Wizard...')\n options = sorted(self.cmds + self.output_cmds)\n from smores.workflow import Workflow\n workflow = Workflow(self)\n target, load_type = self.validate_args('', 'file')\n if target:\n _l = True if target in self.inputs['files'].keys() else False\n workflow.add_target(target, load_type, _l)\n print('Please choose the commands you would like to add to the workflow.'\n '\\nCommands will be executed in the order in which they are added.'\n '\\n\\nPlease note that some commands have dependencies that must be satisfied. An overview of '\n 'command dependencies is available on the main SMOREs wiki on Github')\n print('\\nAvailable Commands for WorkFlow')\n cmds = []\n for i, _o in enumerate(options):\n print('{1}'.format(i, _o))\n cmds.append(_o)\n cmds.append('DONE')\n steps_added = add_steps_to_workflow(workflow)\n while steps_added:\n _run = confirm_workflow(workflow)\n if _run == 'START':\n break\n elif _run == 'ADD':\n _ = add_steps_to_workflow(workflow)\n elif _run == 'RESTART':\n self.do_workflow('')\n else:\n return\n workflow.run()\n print('Workflow has completed.')\n return\n\n else:\n print('Workflows currently have to be setup without the file already being loaded.')\n return", "def install_experiment(self):\n # read git credentials configuration\n try:\n with open('GlobalConfigurations/tokens.json', 'r') as tokens_file:\n data = json.load(tokens_file)\n username = data['GitHub']['user']\n password = data['GitHub']['password']\n\n except EnvironmentError:\n print('Cannot open tokens file')\n\n protocol_name = self.protocol_config['protocol']\n working_directory = self.protocol_config['workingDirectory']\n cp = list(self.protocol_config['CloudProviders'].keys())\n git_address = self.protocol_config['CloudProviders'][cp[0]]['git']['gitAddress']\n git_branch = self.protocol_config['CloudProviders'][cp[0]]['git']['gitBranch']\n\n for idx in range(len(working_directory)):\n os.system(f'fab -f Execution/fabfile.py install_git_project:{username},{password},{git_branch[idx]},'\n f'{git_address[idx]},{working_directory[idx]} --parallel | '\n f' tee WebApp/ExecutionLogs/{protocol_name}.log')", "def test_integration():\n\n p = pipeline.Pipeline(\n FX_TRAIN,\n FX_TEST,\n FX_LOOKUP,\n RESULTS_DIR\n )\n\n p.run()\n\n assert os.path.isfile(p.path(pipeline.PRED_FILE))", "def test_update_software_asset_install_script(self):\n pass", "async def test_full_user_flow_multiple_installations_with_mfa(\n hass: HomeAssistant,\n mock_setup_entry: AsyncMock,\n mock_verisure_config_flow: MagicMock,\n) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result.get(\"step_id\") == \"user\"\n assert result.get(\"type\") == FlowResultType.FORM\n assert result.get(\"errors\") == {}\n\n mock_verisure_config_flow.login.side_effect = VerisureLoginError(\n \"Multifactor authentication enabled, disable or create MFA cookie\"\n )\n\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {\n \"email\": \"verisure_my_pages@example.com\",\n \"password\": \"SuperS3cr3t!\",\n },\n )\n await hass.async_block_till_done()\n\n assert result2.get(\"type\") == FlowResultType.FORM\n assert result2.get(\"step_id\") == \"mfa\"\n\n mock_verisure_config_flow.login.side_effect = None\n\n result3 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {\n \"code\": \"123456\",\n },\n )\n await hass.async_block_till_done()\n\n assert result3.get(\"step_id\") == \"installation\"\n assert result3.get(\"type\") == FlowResultType.FORM\n assert result3.get(\"errors\") is None\n\n result4 = await hass.config_entries.flow.async_configure(\n result3[\"flow_id\"], {\"giid\": \"54321\"}\n )\n await hass.async_block_till_done()\n\n assert result4.get(\"type\") == FlowResultType.CREATE_ENTRY\n assert result4.get(\"title\") == \"descending (54321th street)\"\n assert result4.get(\"data\") == {\n CONF_GIID: \"54321\",\n CONF_EMAIL: \"verisure_my_pages@example.com\",\n CONF_PASSWORD: \"SuperS3cr3t!\",\n }\n\n assert len(mock_verisure_config_flow.login.mock_calls) == 1\n assert len(mock_verisure_config_flow.request_mfa.mock_calls) == 1\n assert len(mock_verisure_config_flow.validate_mfa.mock_calls) == 1\n assert len(mock_setup_entry.mock_calls) == 1", "def test_0030_check_workflow_repository(self):\n repository = self.test_db_util.get_repository_by_name_and_owner(workflow_repository_name, common.test_user_1_name)\n strings_displayed = ['Workflows', 'New workflow for 0060_filter', '0.1']\n strings_not_displayed = ['Valid tools', 'Invalid tools']\n self.display_manage_repository_page(repository, strings_displayed=strings_displayed, strings_not_displayed=strings_not_displayed)", "def test_documentation_popxl_mnist_rts_train_test(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts --test\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def testRecipeSetupArgs(self):\n # We want to access the tool's state object to load recipes and go through\n # modules.\n # pylint: disable=protected-access\n self.tool._state = dftw_state.DFTimewolfState(config.Config)\n\n for recipe in self.tool._recipes_manager.GetRecipes():\n self.tool._state.LoadRecipe(recipe.contents, dftimewolf_recipes.MODULES)\n modules = recipe.contents['modules']\n preflights = recipe.contents.get('preflights', [])\n for module in modules + preflights:\n runtime_name = module.get('runtime_name', module['name'])\n setup_func = self.tool.state._module_pool[runtime_name].SetUp\n expected_args = set(inspect.getfullargspec(setup_func).args)\n expected_args.remove('self')\n provided_args = set(module['args'])\n\n self.assertEqual(\n expected_args,\n provided_args,\n f'Error in {recipe.name}:{runtime_name}')", "def test_change_workflow_definition(self):\n pass" ]
[ "0.7448684", "0.6557498", "0.65304166", "0.65078044", "0.6446686", "0.63318056", "0.6329271", "0.6329271", "0.63074094", "0.63003325", "0.62722313", "0.62695414", "0.6268539", "0.62517077", "0.62472796", "0.62345684", "0.62330306", "0.62122643", "0.62017137", "0.61228365", "0.60967636", "0.60955435", "0.6092244", "0.60470665", "0.60467124", "0.602744", "0.60023284", "0.59837157", "0.5982973", "0.5973623", "0.59631354", "0.5962251", "0.5958869", "0.5948059", "0.592943", "0.59101105", "0.58784693", "0.5878416", "0.58687425", "0.586416", "0.5861653", "0.585079", "0.58493656", "0.584301", "0.5842035", "0.5825325", "0.5817417", "0.5803581", "0.57856745", "0.5784312", "0.5757813", "0.57449514", "0.5738575", "0.57347393", "0.5728287", "0.57237977", "0.57182115", "0.5706946", "0.57057106", "0.5698571", "0.5695916", "0.5691275", "0.5689725", "0.5685521", "0.5684203", "0.5680807", "0.56762254", "0.56719965", "0.56719345", "0.5660518", "0.566006", "0.5655014", "0.56378984", "0.5633859", "0.5629682", "0.5627534", "0.5617764", "0.56152296", "0.5610822", "0.56101996", "0.5609474", "0.5606824", "0.56033325", "0.55988675", "0.55931276", "0.5589551", "0.5587859", "0.5586737", "0.5584237", "0.5581172", "0.5578185", "0.5578185", "0.5576427", "0.5568562", "0.5560159", "0.55600166", "0.55559283", "0.55527353", "0.55498534", "0.55487895", "0.554695" ]
0.0
-1
Rename a file or folder
def rename_file(self, file_id, new_name): func = f"setRenameFile(Token: $Token, FileRenames: $FileRenames)" query = f"mutation SetRenameFile($Token: String!, $FileRenames: [FileRenameInfo]!) {{ {func} }}" request = {"operationName": "SetRenameFile", "variables": { "Token": self.KEYS["Token"], "FileRenames": [{ "ID": file_id, "NewName": new_name }] }, "query": query } header = {"x-api-key": self.KEYS["x-api-key"]} response = requests.post(URL_API, headers=header, data=json.dumps(request)) if response.ok: rd = json.loads(response.text) if "errors" in rd: messages = [] for error in rd["errors"]: messages.append(error["message"]) message = '\n'.join(messages) raise DegooError(f"getUserInfo failed with: {message}") else: return rd["data"]['setRenameFile'] else: raise DegooError(f"renameFile failed with: {response}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def RenameFile(self, oldname: str, newname: str) -> None:\n ...", "def rename(path, new_path):\n fs.rename(path, new_path)", "def rename(self, src, dst):\n os.rename(src, dst)", "def rename_file(path, old_name, new_name):\n \n old_file = os.path.join(path, old_name)\n new_file = os.path.join(path, new_name)\n os.rename(old_file, new_file)", "def rename(path_file_folder, new_name):\n old_name = path_file_folder[path_file_folder.rfind('/') + 1:] if '/' in path_file_folder else path_file_folder\n if old_name == new_name:\n raise DegooError(f\"rename: Old name and new name \\\"{new_name}\\\" cannot be the same\")\n\n if isinstance(path_file_folder, int):\n file_id = path_file_folder\n elif isinstance(path_file_folder, str):\n file_id = path_id(path_file_folder)\n else:\n raise DegooError(f\"rm: Illegal file: {path_file_folder}\")\n\n return api.rename_file(file_id, new_name)", "def rename_file (self):\n\t\tassert self.__filename, \"Renaming could not complete because the new filename could not be determined, one or more needed arguments is empty!\"\n\t\tos.rename( self._file.path, self.__filename )\n\t\t\n\t\tif self.verbose and self.log :\tself.log.info( 'File renamed from %s to %s' % (self._file.path, self.__filename))", "def rename(oldname, newname):", "def rename(project, project_dir, files_dir, recursive, offset):\n project = NamingProject(project, project_dir=project_dir)\n renamer = FileRename(project, files_dir, recursive=recursive, offset=offset)\n renamer.run()", "def auto_rename(file_path, new_name):\n \n # Return if no file given\n if not file_path:\n return ''\n else:\n file_path = file_path\n \n # Get the new name\n new_path = change_basename(file_path, new_name)\n \n \n # Changed?\n if new_path != file_path:\n # Try to rename\n try:\n shutil.move(os.path.join(settings.MEDIA_ROOT, file_path), os.path.join(settings.MEDIA_ROOT, new_path))\n except IOError:\n # Error? Restore original name\n new_path = file_path\n \n # Return the new path replacing backslashes (for Windows)\n return new_path", "def rename_file(self, path, new_name):\n try:\n self.rename_narrative(self._parse_path(path), self.get_userid(), new_name)\n except WorkspaceError as err:\n raise HTTPError(err.http_code, err.message)\n except Exception as err:\n raise HTTPError(\n 500, \"An error occurred while renaming your Narrative: {}\".format(err)\n )", "def rename(self, target):\n target = os.fspath(target)\n return error.checked_call(os.rename, self.strpath, target)", "def rename_file(source, oldname, newname):\n #source = client_variables.output_folder\n renamefiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for renamefile in renamefiles:\n if renamefile.endswith(ext):\n renamefile = source + \"/\" + renamefile\n print \"renaming:\", renamefile\n newname = source + \"/\" + newname\n print \"newname:\", newname\n os.rename(renamefile, newname)\n elif renamefile.startswith(oldname):\n renamefile = source + \"/\" + renamefile\n print \"renaming:\", renamefile\n newname = source + \"/\" + newname\n print \"newname:\", newname\n os.rename(renamefile, newname)", "def rename(old, new):", "def rename(old, new):", "def fileRename(current_file,num,digits):\n # Key, value pairs of what to replace.\n dictobj = {\n '<num>': get_numbering_format(digits, num),\n '<datetaken>': date_to_string(get_date_taken(current_file),'%Y%m%d__%H_%M'),\n '<dname>': dirname\n }\n # Rename\n new_filename = multi_replace(filename_pattern, dictobj)\n shutil.move(current_file, new_filename)", "def rename(path, new_name):\r\n path = encode(path)\r\n if path.lower().startswith(\"smb://\"):\r\n new_name = encode(new_name, True)\r\n try:\r\n samba.rename(os.path.basename(path), new_name, os.path.dirname(path))\r\n except:\r\n import traceback\r\n logger.info(\r\n \"deportesalacarta.core.filetools mkdir: Error al renombrar el archivo o carpeta\" + traceback.format_exc())\r\n platformtools.dialog_notification(\"Error al renombrar\", path)\r\n return False\r\n else:\r\n new_name = encode(new_name, False)\r\n try:\r\n os.rename(path, os.path.join(os.path.dirname(path), new_name))\r\n except OSError:\r\n import traceback\r\n logger.info(\r\n \"deportesalacarta.core.filetools mkdir: Error al renombrar el archivo o carpeta\" + traceback.format_exc())\r\n platformtools.dialog_notification(\"Error al renombrar\", path)\r\n return False\r\n\r\n return True", "def base_rename(self, new_name):\n\n new_path = join(dirname(self.fspath), new_name)\n\n return self.rename(new_path)", "def fileRenameandReplace(filename,newfilename):\n try:\n os.rename(filename,newfilename)\n logging.info(\"Json file renamed in PD path\")\n except Exception as er:\n print (\"Not able to rename the json file \")\n return False", "def os_rename(self, source, destination):\n cmd = ['/bin/mv', source, destination]\n process = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n returncode = subprocess.Popen.wait(process)\n return returncode", "def rename_file(old_path, new_path):\n if os.path.exists(new_path):\n raise FileExistsError(errno.EEXIST, os.strerror(errno.EEXIST),\n old_path, new_path)\n os.rename(old_path, new_path)", "def _renameFile(fileToRename, newName):\n\ttry:\n\t\tos.rename(str(fileToRename), newName)\n\texcept OSError as err:\n\t\tmsgBox = QtGui.QMessageBox()\n\t\tmsgBox.setText(\"Unable to rename file.\\n Error: %s\" % err)\n\t\tmsgBox.exec_()", "def re_name(name,new_name):\n\n try:\n os.rename(config_tools.full_dest+name,config_tools.full_dest+new_name)\n except OSError:\n print(f\"Не удалось переименовать {name}\")\n else:\n print(f\"{name} успешно переименновавано в {new_name}\")", "def rename(self, old_path, new_path):\n self.rename_file(old_path, new_path)\n self.checkpoints.rename_all_checkpoints(old_path, new_path)", "def name(self, new_name):\n self.rename(new_name)", "def file_rename(old_path, image_class):\n\tdirectory, filename, extension = filename_split(old_path)\n\tnew_filename = filename_generate(image_class)\n\tnew_path = os.path.join(directory, new_filename + extension)\n\tos.rename(old_path, new_path)\n\treturn new_path", "def rrename(filepath, namelength = 15):\n\tfilepath = os.path.abspath(filepath)\n\tif not os.path.isfile(filepath):\n\t\traise ValueError(\"The path is not a valid file path: \"+str(filepath))\n\t\n\tpath, name = os.path.split(filepath)\n\tname, ext = os.path.splitext(name)\n\tos.rename(filepath, os.path.join(path, rname(namelength)+ext)) #We replace name by the random name and reconstruct the path", "def change_dir(filename):", "def test_6a_change_file_name(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif not GST.rename_file_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare renaming test.\")\n self.dismiss_dialogs()\n function = js_func[\"rename\"] % (GST.gs_file_paths[\"file_to_rename_path\"], GST.gs_file_paths[\"after_rename_path\"])\n try:\n self.send_request(function, \"rename()\")\n except Exception as e:\n raise RenameException(\"Failed to rename the file: \" + e.__str__())\n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise RenameException(\"Failed to rename the file: \" + response)", "def rename(self, name=None, destination=None):\n raise NotImplementedError\n return None", "def mv(*args):\n filenames = _glob(args)\n nfilenames = len(filenames)\n if nfilenames < 2:\n print 'Need at least two arguments'\n elif nfilenames == 2:\n try:\n os.rename(filenames[0], filenames[1])\n except os.error, detail:\n print \"%s: %s\" % (detail[1], filenames[1])\n else:\n for filename in filenames[:-1]:\n try:\n dest = filenames[-1] + '/' + filename\n if not os.path.isdir(filenames[-1]):\n print 'Last argument needs to be a directory'\n return\n os.rename(filename, dest)\n except os.error, detail:\n print \"%s: %s\" % (detail[1], filename)", "def ChangeName(self, newName):\n if newName != \"\":\n newPath = self.format + os.sep + \"playlists\" + os.sep + newName + \".txt\"\n os.replace(self.path, newPath)\n self.path = newPath", "def rename_file(file_path, pattern, replacement):\n old_file_name = os.path.basename(file_path)\n new_file_name = re.sub(pattern, replacement, old_file_name)\n return new_file_name", "def __rename_file(filename, suffix):\n filename = PDFWorkshop.__clean_filename(filename)\n return PDFWorkshop.__add_filename_suffix(filename, suffix)", "def rename_file(f: pathlib.Path) -> str:\n m = mutagen.File(f)\n if m is None: return\n new_name_parts = []\n if \"tracknumber\" in m:\n if \"discnumber\" in m:\n new_name_parts.append(pad_num_str(m[\"discnumber\"][0]) + \".\")\n new_name_parts.append(pad_num_str(m[\"tracknumber\"][0]) + \" - \")\n new_name_parts.append(m[\"title\"][0].replace(\"/\", \"_\"))\n if \"version\" in m:\n new_name_parts.append(\" - \" + \" - \".join(m[\"version\"]).replace(\"/\", \"_\"))\n return \"\".join(new_name_parts)", "def _rename(person_folder: str):\n all_image_paths = iglob(os.path.join(person_folder, \"*.*\"))\n all_image_paths = sorted([image for image in all_image_paths if image.endswith(\n \".jpg\") or image.endswith(\".png\") or image.endswith(\".jpeg\")])\n person_name = os.path.basename(os.path.normpath(person_folder))\n concat_name = '_'.join(person_name.split())\n for index, image_path in enumerate(all_image_paths):\n image_name = concat_name + '_' + '%04d' % (index + 1)\n file_ext = pathlib.Path(image_path).suffix\n new_image_path = os.path.join(person_folder, image_name + file_ext)\n os.rename(image_path, new_image_path)\n os.rename(person_folder, person_folder.replace(person_name, concat_name))", "def rename(old, new):\n\ttry:\n\t\tos.rename(old, new)\n\texcept OSError as e:\n\t\tif e.errno != errno.EEXIST:\n\t\t\traise\n\t\tos.remove(old)", "def rename_file(source, destination, alog):\n\n # Some error checking against a legitimate source & destination.\n if not type(source) is str:\n raise CoreError('Source is not of str type.')\n elif not type(destination) is str:\n raise CoreError('Destination is not of str type.')\n elif not os.path.isfile(source):\n raise CoreError(source + ' is not a valid file.')\n\n head, tail = os.path.split(destination)\n if not os.path.isdir(head + '/'):\n try:\n os.makedirs(head + '/')\n except:\n raise CoreError('Failed to create new directory: '\n + (head + '/'))\n\n for i in range(0, len(MuzikArkive.illegal_name_characters)):\n if MuzikArkive.illegal_name_characters[i] in tail:\n tail = tail.replace(MuzikArkive.illegal_name_characters[i], '_')\n alog.rlog = MuzikArkive.illegal_name_characters[i] \\\n + ' was removed from ' + destination\n\n if not os.path.isfile(destination):\n try:\n os.rename(source, destination)\n except:\n raise CoreError('os.rename() Failed.')\n else:\n head, tail = destination.rsplit('.', 1)\n rname = True\n i = 1\n while rname:\n addon = '[' + str(i) + '].'\n if not os.path.isfile(head + addon + tail):\n try:\n os.rename(source, (head + addon + tail))\n except:\n raise CoreError('os.rename() Failed.')\n else:\n rname = False\n else:\n i += 1", "def change_nm(src,dst):\n\timport os\n\ttry:\n\t\tos.rename(src,dst)\n\texcept:\n\t\tprint \"this is a mistake\"\n\t\treturn -1\n\n\treturn 0", "def RenameFile(path, org, new):\n\n cont = zip(org, new)\n if not path.endswith((\"/\", \"\\\\\")):\n path = path + \"\\\\\"\n for file in cont:\n if os.path.isfile(path + file[0]):\n os.rename(path + file[0], path + file[1])\n\n return len(org) - len(new)", "def mv(src_path, dest_path):\n try:\n os.rename(src_path, dest_path)\n except OSError:\n # this will happen on windows\n os.remove(dest_path)\n os.rename(src_path, dest_path)", "def drename(filepath, namelength = 15):\n\tfilepath = os.path.abspath(filepath)\n\tif not os.path.isfile(filepath):\n\t\traise ValueError(\"The path is not a valid file path: \"+str(filepath))\n\t\n\tpath, name = os.path.split(filepath)\n\tname, ext = os.path.splitext(name)\n\tos.rename(filepath, os.path.join(path, dname(namelength)+ext)) #We replace name by the random name and reconstruct the path", "def rename(self, name):\n return self.parent.rename(self, name)", "def rename(self, name):\n return self.client.api.rename(self.id, name)", "def change_filepath_name(filepath: str, new_name: str, new_ext: Optional[str] = None) -> str:\n ext = new_ext or filepath_ext(filepath)\n return str(pathlib.Path(filepath).with_name(new_name).with_suffix(ext))", "def projectFileRenamed(self, oldfn, newfn):\n editor = self.getOpenEditor(oldfn)\n if editor:\n editor.fileRenamed(newfn)", "def RenameFile(self, file_id, new_title):\n f = self.service.files().update(fileId=file_id, body={\"title\":new_title}).execute()\n return f[\"id\"]", "def do_mv(self, args):\n if args:\n args = args.split()\n\n if not args or len(args) < 2:\n print('Usage: mv source_file target_file')\n return\n\n src = args[0]\n dst = args[1]\n if not (src.startswith('shared/') and dst.startswith('shared/')\n or self._user):\n print('login required for specifying non-shared file with mv')\n return\n\n try:\n new_name = self._qm.rename_file(self._user, src, dst)\n print('renamed file', src, 'to', new_name)\n except Exception as e:\n print('ERROR renaming %s: %s' % (src, e), file=sys.stderr)\n return", "def change_file_name(self, n):\n if type(n) != str or n is None:\n raise TypeError(\"Wrong type! Please pass 'n' as a string!\")\n self.name = n", "def my_rename(self, src, dst):\n self.renamerCalled = True", "def rename(img):\n ext = splitext(img)[1].lower()\n name = get_date(open(img))\n if name is not None:\n name = name + ext\n return copy(img, name)", "def renaming(directory, folder):\r\n\t# rename every file in the directory\r\n\tfor file in sorted(os.listdir(directory)):\r\n\t\t# rename the file\r\n\t\tos.rename(os.path.join(directory, file), os.path.join(directory, str(folder) + '_' + str(file)))\r\n\tprint(\"Finished folder \" + str(folder)) # progress track\r", "def hmove(src_path, res_path):\n os.rename(src_path, res_path)", "def _rename(self, id: str, name: str) -> RenameFolderResponseModel:\n endpoint: ApiEndpoint = self.api_endpoint_group.rename\n request_obj: RenameFolderRequestModel = endpoint.load_request(name=name)\n response: RenameFolderResponseModel = endpoint.perform_request(\n http=self.auth.http, request_obj=request_obj, id=id\n )\n return response", "def _renameDir(self) -> None:\n try:\n path = self._currPath.joinpath(self._editItemNameBefore)\n nameAfter = self._editItem.text()\n pathTo = self._currPath.joinpath(nameAfter)\n path.rename(pathTo)\n self._listDirectories()\n renamedItem = self._model.findItems(nameAfter)\n index = self._model.indexFromItem(renamedItem[0])\n self._mainFileView.scrollTo(index)\n self._mainFileView.setCurrentIndex(index)\n except FileExistsError:\n self._statusBar.showMessage('File/folder with that name already exists!', 3000)\n self._listDirectories()", "def rename_file(original, content_type, condo_name):\n condo_name = sanitize_filename(condo_name)\n original_file = os.path.join(DOWNLOAD_PATH, original)\n new_name = os.path.join(DOWNLOAD_PATH, content_type + \"\\\\\" + condo_name + \".txt\")\n extracted_file = os.path.join(DOWNLOAD_PATH, unzip_file(original_file))\n if os.path.exists(new_name):\n os.remove(new_name)\n os.renames(extracted_file, new_name)\n os.remove(original_file)", "def MoveFile(path, new_path):\n try:\n RemoveFile(new_path)\n os.rename(path, new_path)\n except OSError, e:\n if e.errno != errno.ENOENT:\n raise", "def rename_files():\n folder_dir = r\"C:\\Users\\keithmoore1.AD\\Desktop\\HAFB\\prankOrig\"\n files = os.listdir(folder_dir)\n save_path = os.getcwd() # current working directory\n for file in files:\n #remove digits from name\n new_file = file.lstrip(\"0123456789\")\n print(file, \" - \", new_file)\n # rename filename\n os.chdir(folder_dir)\n os.rename(file,new_file)\n # get back home\n os.chdir(save_path)", "def rename(self,oldName,newName):\n #--Update references\n fileInfo = self[oldName]\n self[newName] = self[oldName]\n del self[oldName]\n self.table.moveRow(oldName,newName)\n #--FileInfo\n fileInfo.name = newName\n #--File system\n newPath = os.path.join(fileInfo.dir,newName)\n oldPath = os.path.join(fileInfo.dir,oldName)\n renameFile(oldPath,newPath)\n #--Done\n fileInfo.madeBackup = False", "def move_file(self, old_file: str, new_sub_dir: str):\n full_old_path = os.path.join(self.root, old_file)\n full_new_path = os.path.join(self.root, new_sub_dir, old_file)\n os.rename(full_old_path, full_new_path)", "def remoteTestsFileRenamed(self, projectId, filePath, fileName, fileExtension, newName):\n if len(filePath) > 0:\n complete_path = \"%s/%s.%s\" % (filePath, fileName, fileExtension)\n else:\n complete_path = \"%s.%s\" % ( fileName, fileExtension)\n tabId = self.checkAlreadyOpened(path = complete_path, \n remoteFile=True, \n repoType=UCI.REPO_TESTS, \n project=projectId)\n if tabId is not None:\n doc = self.tab.widget(tabId)\n self.tab.setCurrentIndex(tabId)\n buttons = QMessageBox.Yes | QMessageBox.No \n answer = QMessageBox.question(self, Settings.instance().readValue( key = 'Common/name' ), \n self.tr(\"This file has been renamed.\\nDo you want to update the name ?\") , buttons)\n if answer == QMessageBox.Yes:\n doc.updateFilename( filename=newName )\n doc.setUnmodify()\n elif answer == QMessageBox.No:\n doc.unSaved()\n doc.setModify()", "def rename_file(file_path, equipt_nr):\n work_tuples = parse_columns()\n # Regex used to get differents parts of the file path\n path_regex = re.compile(r'(?P<path>[\\w\\\\:]*)\\\\(?P<filename>[\\w]*).(?P<extension>[\\w].)')\n # Match object containing the different parts of the file path\n match = path_regex.search(file_path)\n\n # Getting the right file to rename\n associated_nr = 0\n for ii in work_tuples:\n if match.group('filename') == ii[0]:\n associated_nr = ii[equipt_nr+1]\n\n # Renaming the file\n os.rename(file_path, match.group('path')+'\\\\'+associated_nr+'.'+match.group('extension'))", "def change_file_name():\n path = \"/etc/atuned/webserver/\"\n file_list = os.listdir(path)\n file_list.sort(key=lambda fn: os.path.getmtime(path + fn))\n if len(file_list) > 0 and re.match(r'\\S*-\\d{17}\\S*', file_list[-1]) is None:\n old_name = file_list[-1].split(\".\")[0]\n curr_time = time.strftime(\"%Y%m%d%H%M%S\", time.localtime())\n new_name = old_name + \"-\" + str(curr_time) + str(random.randint(100, 999))\n os.rename(path + old_name + \".txt\", path + new_name + \".txt\")", "def copy_rename_file(source_file_path: str, target_dir: str, new_name: str) -> str:\n shutil.copy2(source_file_path, target_dir)\n target_path = os.path.join(target_dir, os.path.basename(source_file_path))\n new_file_name = new_name + get_extension(source_file_path)\n new_file_path = os.path.join(target_dir, new_file_name)\n os.rename(target_path, new_file_path)\n return new_file_path", "def rename(self, name):\n self.name = name", "def rename(self,oldName,newName):\n isLoaded = self.isLoaded(oldName)\n if isLoaded: self.unload(oldName)\n FileInfos.rename(self,oldName,newName)\n self.refreshDoubleTime()\n if isLoaded: self.load(newName)", "def mv_file(file_name: str, path: str) -> None:\n global number_of_files\n if file_name.startswith(\".\"):\n pass\n else:\n for extensions in file_formats_list:\n if file_.endswith(extensions):\n shutil.move(desktop + \"/\" + file_, path)\n print(f\"moving {colored(file_name, 'yellow')} to {path}\")\n number_of_files += 1\n else:\n pass", "def rename(ctx, input_file, output_file):\n ctx.ensure_object(dict)\n ctx.obj[\"reader\"] = PFBReader(input_file)\n ctx.obj[\"writer\"] = PFBWriter(output_file)", "def renameFile(oldPath,newPath,makeBack=False):\n if os.path.exists(newPath): \n if makeBack:\n backPath = newPath+'.bak'\n if os.path.exists(backPath):\n os.remove(backPath)\n os.rename(newPath,backPath)\n else:\n os.remove(newPath)\n os.rename(oldPath,newPath)", "def file_name(self, new_file_name):\n self._file_name = os.path.abspath(new_file_name).replace(\"\\\\\", \"/\")", "def prefix_file(filename, prefix):\n path, file_or_dir = os.path.split(filename)\n new_filename = os.path.join(path, prefix + file_or_dir)\n os.rename(filename, new_filename)", "def move(self,fileName,destDir):\n if not os.path.exists(destDir): \n os.makedirs(destDir)\n srcPath = os.path.join(self.dir,fileName)\n destPath = os.path.join(destDir,fileName)\n renameFile(srcPath,destPath)\n self.refresh()", "def main():\n print(\"Current directory is\", os.getcwd())\n os.chdir('Lyrics/Lyrics')\n\n for dir_name, dir_list, file_list in os.walk(\".\"):\n for filename in file_list:\n file_path = dir_name + \"\\\\\" + filename\n new_name = get_fixed_filename(file_path)\n os.rename(file_path, new_name)", "def rename(self, src, dst, preserve=False):\n self.connect()\n if preserve:\n self._write('RENAMENX %s %s\\r\\n' % (src, dst))\n return self._get_numeric_response()\n else:\n self._write('RENAME %s %s\\r\\n' % (src, dst))\n return self._get_simple_response().strip()", "def rename_files(files: list, new_file_name: str) -> bool:\n if len(files) == 0:\n print(\"list of files was empty. Could not rename files.\")\n return False\n\n path = None\n for index, item in enumerate(files, start=1):\n path = Path(rf\"{item}\")\n\n if path.exists():\n # Path class takes care of path slashes depending on system\n new_path = Path(str(path.parent) + \"/\" + new_file_name +\n str(index) + path.suffix)\n path.replace(new_path)\n\n else:\n print(\"Path did not exist. Check file path for errors.\")\n return False\n return True", "def rename_folder(self, name: str, folder: Folder) -> None:\n folder.title = name\n self._save_feeds()", "def rename_file(fname):\n x,y = load_file(fname)\n date=y[0].split(\".\")\n if len(y[2])<20:\n title=y[2]\n else:\n title=y[2][0:20]\n title=title.replace(\" \",\"_\")\n \n new_name=\"{}{}{}{}.csv\".format(date[2],date[1],date[0],title)\n new_appendix=rename_appendix(y[10],new_name)\n os.rename(fname,new_name)\n replace_line(new_name,10,'Anhang;\"{}\"'.format(new_appendix))\n return new_name", "def rename(cls, client, resource, new_name) :\n\t\ttry :\n\t\t\trenameresource = rewriteaction()\n\t\t\tif type(resource) == cls :\n\t\t\t\trenameresource.name = resource.name\n\t\t\telse :\n\t\t\t\trenameresource.name = resource\n\t\t\treturn renameresource.rename_resource(client,new_name)\n\t\texcept Exception as e :\n\t\t\traise e", "def rename(self, target):\r\n py.process.cmdexec(\"svn move --force %s %s\" %(str(self), str(target)))", "def renameid(target_folder):\n\tfor filepath in iglob(target_folder, recursive=True):\n\t\tp = Path(filepath)\n\t\tnew = f\"{p.stem[0:6]}{p.suffix}\"\n\t\ttry:\n\t\t\tp.rename(Path(p.parent, new))\n\t\t\tlogger.info(new)\n\t\texcept Exception as e:\n\t\t\tlogger.error(e)\n\t\t\tcontinue", "def _rename_file(self, old_path, new_path):\n if not self.mount():\n return False\n _log(\"AnnexGvfsBackend._rename_file(%r -> %r)\" % (old_path, new_path))\n old_dir_uri = self.path_to_uri(os.path.dirname(old_path))\n new_dir_uri = self.path_to_uri(os.path.dirname(new_path))\n old_uri = self.path_to_uri(old_path)\n new_uri = self.path_to_uri(new_path)\n try:\n if not self.gvfs.create_dir_p(new_dir_uri):\n raise IOError()\n if not self.gvfs.rename_file(old_uri, new_uri):\n raise IOError()\n except IOError:\n return False\n else:\n return True", "def rename(self, name: str):\n self.doc['name'] = name", "def main(root, filelist):\n #print \"got %s: %s\" % (root, filelist)\n rename(root, filelist)", "def changeFilenames(speciesfolder, species):\n\tfor filename in os.listdir(speciesfolder):\n\t\tif filename.startswith(\"generic\"):\n\t\t\tnewname = filename.replace(\"generic\", species)\n\t\t\tos.rename(os.path.join(speciesfolder, filename), os.path.join(speciesfolder, newname))", "def renamefile(filename):\n new_data_list = []\n with open(filename, 'r') as f:\n data_list = f.read().split('\\n')\n\n print('Generating new data list..')\n for data in tqdm(data_list):\n if len(data) == 0:\n continue\n data_info = data.split(' ')\n\n #data_info[0] = data_info[0].replace('jpg', 'png')\n #data_info[1] = data_info[1].replace('jpg', 'png')\n for it, name in enumerate(data_info):\n data_info[it] = '/'.join(name.split('/')[1:])\n if data_info[2].find('extras') == -1:\n new_data_list.append(' '.join(data_info))\n\n with open(filename, 'w') as f:\n print('writing new data names..')\n\n for it, data in tqdm(enumerate(new_data_list)):\n if len(data) == 0:\n continue\n\n if it == len(new_data_list)-1:\n f.write(data)\n else:\n f.write(data+'\\n')\n\n print('Done.')", "def _rename_ondisk(self):\n if not self.has_moved or not self.renames_remaining:\n return\n\n try:\n os.rename(self.rename_phase_src, self.rename_phase_dst)\n except Exception:\n sys.stderr.write(\"Failed to renamed '%s' to '%s'\\n\" %\n (self.rename_phase_src,\n self.rename_phase_dst))\n raise\n\n self._rename_phase += 1", "def rename(self, name, overwrite=False):\n return _image.image_rename(self, name, overwrite)", "def rename(file, format_spec, dir=DIR()):\n\tfile = pathlib.Path(file)\n\t\n\tprint(\"Parsing {name}...\".format(name=file.name))\n\t\n\tarticle = Article(file.read_bytes())\n\t\n\tnew_file = format_spec.format(\n\t\tarticle = article,\n\t\ttitle = dir.getTitle(file) or article.getTitle(),\n\t\tauthor = article.getAuthor() or dir.getAuthor(file),\n\t\tboard = article.getBoard(),\n\t\ttime = article.getTime() or dir.getTime(file) or format_dummy\n\t)\n\tnew_file = safe_file_name(new_file)\n\tnew_file = file.with_name(new_file)\n\t\n\tif file == new_file:\n\t\tprint(\"Same file name!\\n\")\n\t\treturn\n\t\n\tif new_file.exists():\n\t\tnum = 2\n\t\t\n\t\twhile True:\n\t\t\ttemp_file = \"{name} ({num}){ext}\".format(\n\t\t\t\tnum = num,\n\t\t\t\tname = new_file.stem,\n\t\t\t\text = new_file.suffix\n\t\t\t)\n\t\t\ttemp_file = new_file.with_name(temp_file)\n\t\t\t\n\t\t\tif file == temp_file:\n\t\t\t\tprint(\"Same file name!\\n\")\n\t\t\t\treturn\n\t\t\t\t\n\t\t\tif not temp_file.exists():\n\t\t\t\tnew_file = temp_file\n\t\t\t\tbreak\n\t\t\t\t\n\t\t\tnum += 1\n\t\n\tprint(\"Rename to {name}...\\n\".format(name=new_file.name))\n\t\n\tfile.rename(new_file)", "def rename_in_dir(directory, names, name_only=False):\n for curfile in get_mp3_files(directory):\n parent, curname = split(curfile)\n index = splitext(curname)[0]\n basename = names[curname]\n newname = \"%s%s.mp3\" % (\n '' if name_only else str(index)+' - ', basename)\n newname = join(parent, newname)\n try:\n rename(curfile, newname)\n except FileNotFoundError:\n continue", "def remoteAdaptersFileRenamed(self, filePath, fileName, fileExtension, newName):\n if len(filePath) > 0:\n complete_path = \"%s/%s.%s\" % (filePath, fileName, fileExtension)\n else:\n complete_path = \"%s.%s\" % ( fileName, fileExtension)\n tabId = self.checkAlreadyOpened(path = complete_path, \n remoteFile=True, \n repoType=UCI.REPO_ADAPTERS, \n project=0)\n if tabId is not None:\n doc = self.tab.widget(tabId)\n self.tab.setCurrentIndex(tabId)\n buttons = QMessageBox.Yes | QMessageBox.No \n answer = QMessageBox.question(self, Settings.instance().readValue( key = 'Common/name' ), \n self.tr(\"This file has been renamed.\\nDo you want to update the name ?\") , buttons)\n if answer == QMessageBox.Yes:\n doc.updateFilename( filename=newName )\n doc.setUnmodify()\n elif answer == QMessageBox.No:\n doc.unSaved()\n doc.setModify()", "def test_rename_python_api(self):\n\n rename.rename([NEW_APP_NAME, NEW_DOMAIN])\n self.assertTrue(os.path.exists(RENAMED_PROJECT_DIR))", "def rename(self,newName):\n self.userName = newName", "def fs_rename_entry(self, oldPath, newPath):\n\t\treturn Job(SDK.PrlSrv_FsRenameEntry(self.handle, oldPath, newPath)[0])", "def rename_site(site_name, newname):\n siteid = _get_site_id(site_name)\n if siteid is None: # or not path.exists():\n raise FileNotFoundError\n cur = conn.cursor(cursor_factory=pgx.RealDictCursor)\n querystring = 'update sites set name = %s where id = %s;'\n result = execute_query(querystring, (newname, siteid))\n # create the physical destination (mirror) so that css and images can be moved there\n path = WEBROOT / site_name\n newpath = WEBROOT / newname\n path.rename(newpath)", "def renameFile(oldName,newName):\n \n thisFunc = inspect.currentframe().f_code.co_name\n if oldName == newName:\n return True\n else:\n try:\n os.rename(oldName,newName)\n return True\n except Exception as e:\n print(f\"{thisFunc} issue: {e}\")\n return False", "def renamed(self, source, dest):\r\n self.__close_and_reload(source, new_filename=dest)", "def ren_mosaic(mosaic_dir='K:/IID_SaltonSea/Tasks/Soil mapping/PhotoDocumentation/Original/', \r\n file_pattern='*stitch.jpg'): \r\n \r\n \r\n if not os.path.exists(mosaic_dir):\r\n sys.exit('input folder does not exist')\r\n \r\n mosaics = []\r\n for root, dirnames, filenames in os.walk(mosaic_dir):\r\n for filename in fnmatch.filter(filenames, file_pattern):\r\n mosaics.append(os.path.join(root, filename).replace('\\\\','/'))\r\n \r\n s = 0\r\n r = 0\r\n for m in mosaics:\r\n dir_name = os.path.dirname(m).split('/')[-1]\r\n new_name = os.path.dirname(m) + '/' + dir_name + '.jpg'\r\n if os.path.exists(new_name):\r\n print('skipping: %s' % m)\r\n s+=1\r\n else:\r\n os.rename(m, new_name)\r\n print('renamed: %s' % new_name)\r\n r+=1\r\n \r\n print('renamed total of %i files' % r)\r\n print('skipped total of %i files' % s)", "def userRenamed(self, oldname, newname):\n # Send messasge to Server bot.\n self.data_in(text=\"\", type=\"renamed\", oldname=oldname, newname=newname)", "def mv(path_file_folder, new_path):\n if not is_folder(new_path):\n raise DegooError(f\"mv: The target path is not a folder\")\n\n source_path = path_file_folder if is_folder(path_file_folder) else path_file_folder[:path_file_folder.rfind('/')]\n\n if source_path == new_path:\n raise DegooError(f\"mv: The target path cannot be the same as the source path\")\n\n if isinstance(path_file_folder, int):\n file_id = path_file_folder\n elif isinstance(path_file_folder, str):\n file_id = path_id(path_file_folder)\n else:\n raise DegooError(f\"rm: Illegal file: {path_file_folder}\")\n\n if isinstance(new_path, int):\n new_parent_id = new_path\n elif isinstance(new_path, str):\n new_parent_id = path_id(new_path)\n else:\n raise DegooError(f\"rm: Illegal destination folder: {new_path}\")\n\n return api.mv(file_id, new_parent_id)", "def _download_rename(filename):\n url_loc = \"http://www.stsci.edu/~kgordon/beast/\"\n fname_dld = download_file(\"%s%s\" % (url_loc, filename))\n extension = filename.split(\".\")[-1]\n fname = \"%s.%s\" % (fname_dld, extension)\n os.rename(fname_dld, fname)\n return fname", "def rename(self, name):\n return _coconut_tail_call(self.__class__, name)" ]
[ "0.7906396", "0.7777854", "0.77087426", "0.763373", "0.755172", "0.752285", "0.74998015", "0.74556607", "0.73914254", "0.7262698", "0.71969265", "0.7161511", "0.70347023", "0.70347023", "0.70059544", "0.699431", "0.69706184", "0.69470656", "0.6944914", "0.69139314", "0.690678", "0.6895383", "0.6836334", "0.6759159", "0.6758383", "0.66953576", "0.6692369", "0.66760796", "0.6631283", "0.6598985", "0.65902644", "0.6589315", "0.65879625", "0.65762246", "0.6564192", "0.65625", "0.65452", "0.65241337", "0.6520967", "0.6510135", "0.6495447", "0.64804715", "0.6477491", "0.6463269", "0.6455275", "0.645133", "0.64508396", "0.6435965", "0.64357495", "0.6434677", "0.6422475", "0.6420993", "0.63922477", "0.63774306", "0.63617945", "0.6356483", "0.63391465", "0.6336046", "0.6331424", "0.63249457", "0.63212776", "0.63180065", "0.63056314", "0.630361", "0.63011396", "0.62961215", "0.62741417", "0.62692577", "0.6267169", "0.6251954", "0.6240164", "0.6228311", "0.62247205", "0.6207256", "0.6196744", "0.619262", "0.61749816", "0.6154889", "0.61319", "0.6124154", "0.6115063", "0.61100465", "0.61018884", "0.6093815", "0.60911745", "0.6090507", "0.606767", "0.6051891", "0.6046319", "0.60349137", "0.6031384", "0.6022493", "0.60031706", "0.59924173", "0.5989763", "0.5984434", "0.5969955", "0.596817", "0.5968144", "0.59469587" ]
0.6451277
46
Move a file or folder to new destination
def mv(self, file_id, new_parent_id): func = f"setMoveFile(Token: $Token, Copy: $Copy, NewParentID: $NewParentID, FileIDs: $FileIDs)" query = f"mutation SetMoveFile($Token: String!, $Copy: Boolean, $NewParentID: String!, $FileIDs: [String]!) {{ {func} }}" request = {"operationName": "SetMoveFile", "variables": { "Token": self.KEYS["Token"], "NewParentID": new_parent_id, "FileIDs": [ file_id ] }, "query": query } header = {"x-api-key": self.KEYS["x-api-key"]} response = requests.post(URL_API, headers=header, data=json.dumps(request)) if response.ok: rd = json.loads(response.text) if "errors" in rd: messages = [] for error in rd["errors"]: messages.append(error["message"]) message = '\n'.join(messages) raise DegooError(f"getUserInfo failed with: {message}") else: return rd["data"]['setMoveFile'] else: raise DegooError(f"renameFile failed with: {response}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_file(source, destination):\n shutil.move(source, destination)", "def moveFile(source, dest):\n try:\n shutil.move(source, dest) \n except IOError as e:\n print (\"Unable to move file. %s\" %(e))", "def mv(self, src_path, dst_path):\n try:\n postdata = codecs.encode(json.dumps({ 'src': src_path, 'dst': dst_path }), 'utf-8')\n self._urlopen('/api/fileops/move', postdata).read()\n except HTTPError as err:\n raise RuntimeError(\"Unable to move '{}' to '{}'\".format(src_path, dst_path))", "def move(self,src,dst):\n src = os.path.join(self.testpath,src)\n dst = os.path.join(self.testpath,dst)\n directory = os.path.split(dst)[0]\n try:\n os.makedirs(directory)\n except OSError:\n pass\n\n shutil.move(src,dst)", "def move_file(self, path: PathLike, dest: PathLike, force: bool = False):", "def move(self, destination, **kwargs):\n assert _os.path.exists(self.__str__()) == True\n _shutil.move(self.__str__(), destination, **kwargs)", "def act_move_file(self, file_source, file_target):\n try:\n if not os.path.isfile(file_source):\n return\n path = os.path.dirname(file_target)\n if not os.path.exists(path):\n os.makedirs(path)\n shutil.move(file_source, file_target)\n #shutil.copy2(file_source, file_target)\n #os.remove(file_source)\n self.logger.debug('%s: Action: <move> %s -> %s', self.name, file_source, file_target)\n except:\n self.logger.exception('Error on file move: %s -> %s', file_source, file_target)", "def relocate(self, source, destination):\n destination_dir = os.path.dirname(destination)\n if not os.path.exists(destination_dir):\n self.subdir(destination_dir)\n os.rename(source, destination)", "def file_move(self, from_path, to_path):\n params = {'root': self.session.root,\n 'from_path': format_path(from_path),\n 'to_path': format_path(to_path)}\n\n url, params, headers = self.request(\"/fileops/move\", params)\n\n return self.rest_client.POST(url, params, headers)", "def move_file(file, dest_path):\n if os.path.isdir(dest_path):\n shutil.move(file, dest_path)\n else:\n os.mkdir(dest_path)\n shutil.move(file, dest_path)", "def _move(self, in_file, dest):\n dest = os.path.abspath(dest)\n _, in_base_name = os.path.split(in_file)\n dest_parent_dir, _ = os.path.split(dest)\n if os.path.exists(dest):\n out_file = os.path.join(dest, in_base_name)\n else:\n if not os.path.exists(dest_parent_dir):\n os.makedirs(dest_parent_dir)\n out_file = dest\n shutil.move(in_file, dest)\n\n return out_file", "def mv(cur_path, new_path):\n cur_abs = navigate.get_abs_path(cur_path)\n new_abs = navigate.get_abs_path(new_path)\n cur_parent, cur_name = navigate.split_path(cur_abs)\n new_parent, new_name = navigate.split_path(new_abs)\n up_parent, up_name = navigate.split_path(new_parent)\n if not db.file_exists(cur_parent, cur_name):\n print \"Error: '\" + cur_name + \"' does not exist.\"\n elif up_parent is not None and not db.directory_exists(up_parent, up_name):\n print \"Error: '\" + new_parent + \"' is not a valid directory.\"\n elif db.file_exists(new_parent, new_name):\n print \"Error: '\" + new_name + \"' already exists at that location.\"\n else:\n cur_dbox_path = '/' + cur_name\n new_dbox_path = '/' + new_name\n access_token = db.get_access_to_file(cur_parent, cur_name)\n client = dropbox.client.DropboxClient(access_token)\n client.file_move(cur_dbox_path, new_dbox_path)\n db.move_file(cur_parent, cur_name, new_parent, new_name)", "def move_file(path):\n new_path = os.path.join(TEST_DIR, TEST_FILE)\n command = ['mv', TEST_FILE, new_path]\n file_operation(path, command)", "def mv(path_file_folder, new_path):\n if not is_folder(new_path):\n raise DegooError(f\"mv: The target path is not a folder\")\n\n source_path = path_file_folder if is_folder(path_file_folder) else path_file_folder[:path_file_folder.rfind('/')]\n\n if source_path == new_path:\n raise DegooError(f\"mv: The target path cannot be the same as the source path\")\n\n if isinstance(path_file_folder, int):\n file_id = path_file_folder\n elif isinstance(path_file_folder, str):\n file_id = path_id(path_file_folder)\n else:\n raise DegooError(f\"rm: Illegal file: {path_file_folder}\")\n\n if isinstance(new_path, int):\n new_parent_id = new_path\n elif isinstance(new_path, str):\n new_parent_id = path_id(new_path)\n else:\n raise DegooError(f\"rm: Illegal destination folder: {new_path}\")\n\n return api.mv(file_id, new_parent_id)", "def movefile(destpath,filename,sourcepath):\n\n\tcommand = 'mv ' + filename + ' ' + destpath\n\t\n\ttry :\n\t\tst = commands.getstatusoutput(command)\n\texcept Exception:\n\t\traise", "def move(self,fileName,destDir):\n self.unload(fileName)\n FileInfos.move(self,fileName,destDir)", "def moveDirectoryContents(self, source, target, force=False):\n if source.endswith('/') or source.endswith('\\\\'):\n source += '*'\n else:\n source += os.path.sep + '*'\n if force:\n command = 'mv -f %s %s'\n else:\n command = 'mv %s %s'\n self.communicate(command % (source, target))", "def move(self, source, target, force=False):\n if force:\n command = 'mv -f %s %s'\n else:\n command = 'mv %s %s'\n self.communicate(command % (source, target))", "def move(self,fileName,destDir):\n if not os.path.exists(destDir): \n os.makedirs(destDir)\n srcPath = os.path.join(self.dir,fileName)\n destPath = os.path.join(destDir,fileName)\n renameFile(srcPath,destPath)\n self.refresh()", "def move(self, name, source, dest):\n self.m.path.assert_absolute(source)\n self.m.path.assert_absolute(dest)\n self._run(name, ['move', source, dest])\n self.m.path.mock_copy_paths(source, dest)\n self.m.path.mock_remove_paths(source)", "def MoveFile(path, new_path):\n try:\n RemoveFile(new_path)\n os.rename(path, new_path)\n except OSError, e:\n if e.errno != errno.ENOENT:\n raise", "def move_to(self, destination):\n params = {\n \"destination\": destination.project_folder_id\n }\n self.client._perform_empty(\"POST\", \"/project-folders/%s/move\" % self.project_folder_id, params=params)", "def MovePath(options, src, dst):\n # if the destination is not an existing directory, then overwrite it\n if os.path.isdir(dst):\n dst = os.path.join(dst, os.path.basename(src))\n\n # If the destination exists, the remove it\n if os.path.exists(dst):\n if options.force:\n Remove(['-vfr', dst])\n if os.path.exists(dst):\n raise OSError('mv: FAILED TO REMOVE ' + dst)\n else:\n raise OSError('mv: already exists ' + dst)\n for _ in range(5):\n try:\n os.rename(src, dst)\n break\n except OSError as error:\n print('Failed on %s with %s, retrying' % (src, error))\n time.sleep(5)\n else:\n print('Gave up.')\n raise OSError('mv: ' + error)", "def mv(self, src: int, dest: int) -> bool:\n url = 'https://webapi.115.com/files/move'\n result = self.s.post(url, data={'pid': dest, 'fid[0]': src}, headers={'Origin': origin['webapi'], 'Referer': referer['115'].format(self.default_dir)}).json()\n if result['errno'] == '':\n _ = functools.reduce(dict.__getitem__, self._dirs_lookup[src], self.dirs) # TODO: need to test\n self._dirs_lookup[src] = self._dirs_lookup[dest].append(dest)\n parent = functools.reduce(dict.__getitem__, self._dirs_lookup[src], self.dirs)\n if src not in parent:\n parent.update({src: _})\n else:\n parent.get(src).update(_)\n return True", "def move(source, destination):\n logger.info(\"Move: %s -> %s\" % (source, destination))\n try:\n __create_destdir(destination)\n shutil.move(source, destination)\n return True\n except Exception:\n logger.exception(\"Failed to Move: %s -> %s\" % (source, destination))\n return False", "def moveFile(self, srcPath):\n # Gets the classification for the file type of the path moved\n classification = self.classifyFile(srcPath)\n\n if classification:\n # Gets the output path given the file type\n newPath = self.outPaths[classification][\"outPath\"] + srcPath.split(\"/\")[-1]\n\n # Execute instruction\n os.replace(srcPath, newPath)", "def move(self, target):\n if target.relto(self):\n raise error.EINVAL(target, \"cannot move path into a subdirectory of itself\")\n try:\n self.rename(target)\n except error.EXDEV: # invalid cross-device link\n self.copy(target)\n self.remove()", "def move_file(self, old_file: str, new_sub_dir: str):\n full_old_path = os.path.join(self.root, old_file)\n full_new_path = os.path.join(self.root, new_sub_dir, old_file)\n os.rename(full_old_path, full_new_path)", "def _move_to_inserted_directory(file_path: str):\n parts = list(Path(file_path).parts)\n parts.insert(-1, 'inserted')\n move(file_path, str(Path(*parts)))", "def move_to(self, file_name, to_dir, change_name_to=None):\n self._check_filename(file_name)\n src = posixpath.join(server_setup.LOCAL_DIR, file_name)\n file_name = file_name if change_name_to is None else change_name_to\n dest = posixpath.join(self.root, to_dir, file_name)\n print(f\"--> Moving file {src} to {dest}\")\n self._check_file_exists(dest, should_exist=False)\n self.copy(src, dest)\n self.remove(src)", "def wrap_move_file_folder(src, dst):\r\n try:\r\n if os.path.exists(dst):\r\n if os.path.isdir(dst):\r\n shutil.rmtree(dst)\r\n else:\r\n os.remove(dst)\r\n except Exception:\r\n pass\r\n for i in range(5):\r\n try:\r\n shutil.move(src, dst)\r\n break\r\n except Exception:\r\n time.sleep(10)", "def wrap_move_file_folder(src, dst):\r\n try:\r\n if os.path.exists(dst):\r\n if os.path.isdir(dst):\r\n shutil.rmtree(dst)\r\n else:\r\n os.remove(dst)\r\n except Exception:\r\n pass\r\n for i in range(5):\r\n try:\r\n shutil.move(src, dst)\r\n break\r\n except Exception:\r\n time.sleep(10)", "def move_item(dataobj_id, new_path):\n file = get_by_id(dataobj_id)\n data_dir = get_data_dir()\n out_dir = (data_dir / new_path).resolve()\n if not file:\n raise FileNotFoundError\n if (out_dir / file.parts[-1]).exists():\n raise FileExistsError\n elif is_relative_to(out_dir, data_dir) and out_dir.exists(): # check file isn't\n return shutil.move(str(file), f\"{get_data_dir()}/{new_path}/\")\n return False", "def move(self, dest, overwrite=False):\n dest = normpath(dest)\n try:\n remote = get_remote(dest)\n except ValueError: # Nothing exists at dest, nothing to worry about.\n pass\n else: # Something exists here.\n if not overwrite:\n raise ValueError(\"Something exists at %s\" % remote.uri)\n # There's no way to copy and overwrite at the same time,\n # so delete the existing file first.\n # Note that this can delete folders too.\n remote.delete()\n\n if not pdbox._args.get(\"dryrun\"):\n result = execute(pdbox.dbx.files_move_v2, self.path, dest)\n pdbox.debug(\"Metadata response: %s\" % result.metadata)\n\n pdbox.info(\"Moved %s to %s\" % (self.path, dbx_uri(dest)))\n if not pdbox._args.get(\"dryrun\"): # Return the newly created object.\n return get_remote(None, meta=result.metadata)", "def on_moved(self, event):\n print(\"Moved\")\n time.sleep(5)\n self.moveFile(event.dest_path)", "def move_file(self, ctx):\n pass", "def move_files(file: str, destination: str):\n\n try:\n result = _process_files(\"mv\", \"-v\", file, destination)\n except FileNotFoundError:\n print(\"ERROR: '{}' does not exist.\".format(file))\n except FolderNotFoundError:\n print(\n \"ERROR: '{}' destination does not exist.\".format(destination)\n )\n except InsufficientRightsError:\n print(\"ERROR: Insufficient rights to destination '{}'.\".format(\n destination)\n )\n else:\n print(result)", "def move_from_temp_directory(self):", "def mv(src_path, dest_path):\n try:\n os.rename(src_path, dest_path)\n except OSError:\n # this will happen on windows\n os.remove(dest_path)\n os.rename(src_path, dest_path)", "def move_file(source_file, dest_file, sudo=True):\n LOG.info(\"Copy file and preserve attributes\")\n cmd = \"mv {} {}\".format(source_file, dest_file)\n _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False)", "def moveImage(image, dest):\n if not os.path.exists(dest):\n os.mkdir(dest)\n move(image, dest)", "def file_move(session, dc_ref, src_file, dst_file):\n LOG.debug(\"Moving file from %(src)s to %(dst)s.\",\n {'src': src_file, 'dst': dst_file})\n vim = session._get_vim()\n move_task = session._call_method(\n session._get_vim(),\n \"MoveDatastoreFile_Task\",\n vim.get_service_content().fileManager,\n sourceName=src_file,\n sourceDatacenter=dc_ref,\n destinationName=dst_file,\n destinationDatacenter=dc_ref)\n session._wait_for_task(move_task)\n LOG.debug(\"File moved\")", "def moveAsset(self, src, dst):\n if not self.exists( self.dirname(dst) ):\n self.makedirs( self.dirname(dst) )\n self.move(src, dst)\n\n cache_src = self.cache_path(src)\n if not os.path.exists(cache_src):\n return \n\n cache_dst = self.cache_path(dst)\n if not os.path.exists( os.path.dirname(cache_dst) ):\n os.makedirs( os.path.dirname(cache_dst) )\n shutil.move(cache_src, cache_dst)", "def move_file(path_from, filename):\n finaldir = getormakedir(settings.UPLOAD_DEST_DIR, filename)\n\n path_to = os.path.join(finaldir, filename)\n\n if not os.path.exists(path_to):\n shutil.copyfile(path_from, path_to)\n if settings.REMOVE_UPLOAD_FILES:\n remove_file(path_from)\n\n return path_to", "def move_to(self, file_name, to_dir, change_name_to=None):\n raise NotImplementedError", "def move_file(host, source_fqpath, dest_fqpath):\n command = \"mv %s %s\" % (source_fqpath, dest_fqpath)\n rcode, _, rerr = g.run(host, command)\n\n if rcode == 0:\n return True\n\n g.log.error('mv failed: %s' % rerr)\n return False", "def move(self, new_path):\n assert isinstance(new_path, str)\n if not new_path.startswith('/'):\n new_path = '/' + new_path\n if new_path.endswith('/'):\n self.filename = new_path + self.name\n else:\n try:\n self.items.get(filepath=new_path, is_dir=True)\n self.filename = new_path + '/' + self.name\n except exceptions.NotFound:\n self.filename = new_path\n\n return self.update(system_metadata=True)", "def move_dirs(args):\n src = args[0]\n dst = args[1]\n print(\"Moving from: {}\".format(src))\n print(\" to: {}\".format(dst))\n shutil.move(src, dst)\n return", "def move(src, dst, ignore=False, force=False):\n copy(src, dst, ignore, force)\n remove(src)", "def move_file(src, dst):\n # Sanity checkpoint\n src = re.sub('[^\\w/\\-\\.\\*]', '', src)\n dst = re.sub('[^\\w/\\-\\.\\*]', '', dst)\n if len(re.sub('[\\W]', '', src)) < 5 or len(re.sub('[\\W]', '', dst)) < 5:\n debug.log(\"Error: Moving file failed. Provided paths are invalid! src='%s' dst='%s'\"%(src, dst))\n else:\n # Check destination\n check = False\n if dst[-1] == '/':\n if os.path.exists(dst):\n check = True # Valid Dir\n else:\n debug.log(\"Error: Moving file failed. Destination directory does not exist (%s)\"%(dst)) #DEBUG\n elif os.path.exists(dst):\n if os.path.isdir(dst):\n check = True # Valid Dir\n dst += '/' # Add missing slash\n else:\n debug.log(\"Error: Moving file failed. %s exists!\"%dst)\n elif os.path.exists(os.path.dirname(dst)):\n check = True # Valid file path\n else:\n debug.log(\"Error: Moving file failed. %s is an invalid distination!\"%dst)\n if check:\n # Check source\n files = glob.glob(src)\n if len(files) != 0:\n debug.log(\"Moving File(s)...\", \"Move from %s\"%src, \"to %s\"%dst)\n for file_ in files:\n # Check if file contains invalid symbols:\n invalid_chars = re.findall('[^\\w/\\-\\.\\*]', os.path.basename(file_))\n if invalid_chars:\n debug.graceful_exit((\"Error: File %s contains invalid \"\n \"characters %s!\"\n )%(os.path.basename(file_), invalid_chars))\n continue\n # Check file exists\n if os.path.isfile(file_):\n debug.log(\"Moving file: %s\"%file_)\n shutil.move(file_, dst)\n else:\n debug.log(\"Error: Moving file failed. %s is not a regular file!\"%file_)\n else: debug.log(\"Error: Moving file failed. No files were found! (%s)\"%src)", "def move_to(self, file_name, to_dir=None, change_name_to=None):\n self._check_filename(file_name)\n from_path = os.path.join(self.local_root, file_name)\n\n if not os.path.isfile(from_path):\n raise FileNotFoundError(\n f\"{file_name} not found in {self.local_root} on local machine\"\n )\n\n file_name = file_name if change_name_to is None else change_name_to\n to_dir = \"\" if to_dir is None else to_dir\n to_path = posixpath.join(self.root, to_dir, file_name)\n self.makedir(to_dir)\n self._check_file_exists(to_path, should_exist=False)\n\n with self.ssh.open_sftp() as sftp:\n print(f\"Transferring {from_path} to server\")\n sftp.put(from_path, to_path)\n\n print(f\"--> Deleting {from_path} on local machine\")\n os.remove(from_path)", "def move(self, **kwargs):\n if os.path.exists(self.old_artifact_path):\n if os.path.exists(self.target):\n shutil.rmtree(self.target)\n log.info(\"Copying %s on the local filesystem\" % self.type)\n shutil.copytree(self.old_artifact_path, self.target)\n else:\n log.warning(\"Not moving docs, because the build dir is unknown.\")", "def move_character(character, dest):\n character_path = dirname(character.path)\n shutil.move(character_path, dest)", "def mv(self, source: str, filename: str) -> None:\n\n self.cp(source, filename)\n self.rm(source)", "def hmove(src_path, res_path):\n os.rename(src_path, res_path)", "def move_file(source, destination):\n #source = client_variables.output_folder\n #destination = client_variables.client_folder\n copyfiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for copyfile in copyfiles:\n if copyfile.endswith(ext):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)\n elif copyfile.startswith('GetTotalByYearReport'):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)", "def move_file_in_dir(name_file, desten):\n\n if os.path.isfile(config_tools.full_dest+name_file):\n try:\n shutil.move(config_tools.full_dest + name_file, config_tools.full_dest + desten)\n except OSError:\n print(f\"Не удалось переместить {name_file} в папку:{desten}\")\n else:\n print(f\"Файл {name_file} находиться в папке {desten}\")", "def move(self, dst, src): # pragma: no cover\n raise NotImplementedError(\"Implement this\")", "def transfer_files(src: str, dst: str, move_src_data: bool = False):\n if move_src_data:\n logger.info('Move {0} to {1}'.format(src, dst))\n shutil.move(src, dst)\n else:\n logger.info('Copy {0} to {1}'.format(src, dst))\n copy_tree(src, dst)", "def os_rename(self, source, destination):\n cmd = ['/bin/mv', source, destination]\n process = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n returncode = subprocess.Popen.wait(process)\n return returncode", "def move_file():\n # print(\"\\n\".join(os.listdir(filepath)))\n # folders = [os.path.join(filepath, fld) for fld in os.listdir(filepath)]\n # print(filepath + \":\\n \" + \"\\n \".join(folders))\n folders = filter(os.path.isdir, os.listdir(u\".\"))\n # print(\"Sub-folders: \", u\"\\n\".join(folders))\n for folder in folders:\n files = [os.path.join(folder, fn) for fn in os.listdir(folder)]\n files = filter(os.path.isfile, files)\n for fn in files:\n _, filename = os.path.split(fn)\n shutil.move(fn, filename)\n assert 0 == len(os.listdir(folder))", "def MoveFile(from_path, to_path, check_conflicts=True):\n from_path = from_path.replace(\"/\", \"\\\\\")\n if check_conflicts and not os.path.isfile(from_path): # Don't move non-existant path\n raise FileNotFoundError(\"Path {} does not exist!\".format(from_path))\n to_path = to_path.replace(\"/\", \"\\\\\")\n if check_conflicts and not os.path.isdir(os.path.dirname(to_path)): # Don't move to non-existant dir\n raise FileNotFoundError(\"Path {} does not exist to move to!\".format(os.path.dirname(to_path)))\n values = __get_current_values()\n if check_conflicts and os.path.isfile(to_path): # Don't move to already-existing destination unless it will be deleted/moved\n values.reverse()\n try:\n to_path_index = values.index(\"\\\\??\\\\\" + to_path)\n except ValueError:\n to_path_index = -1\n if to_path_index % 2 == 0 or to_path_index == -1:\n raise FileExistsError(\"Path {} already exists and isn't already being deleted/moved!\".format(to_path))\n values.reverse()\n values.append(\"\\\\??\\\\\" + from_path)\n values.append(\"\\\\??\\\\\" + to_path)\n __set_registry(values)", "def on_moved(self, event):\n super(myEventHandler,self).on_moved(event)\n #moveto events from external folders have no src_path\n source = event.src_path\n dest = event.dest_path\n if event.is_directory:\n splitpath = split(source)\n splitdest = split(dest)\n if splitpath[1] == splitdest[1]:\n try:\n #where are we moving from\n pass\n #file = splitpath[1]\n #pathtoonedir = self.onedir.getonedirrectory()\n #oldpath = splitpath[0].replace(pathtoonedir ,\"\")\n #calculate new path\n #newpath = splitdest[0].replace(pathtoonedir ,\"\")\n #if oldpath is \"\":\n # oldpath = os.path.sep\n #self.onedir.movefile(file,newpath,oldpath)\n except OSError as e:\n print \"Error copying file! \" + e\n exit(1)\n else:\n #rename!!!!!!!!\n oldname = source\n newname = dest\n pathtoonedir = self.onedir.getonedirrectory()\n oldname = oldname.replace(pathtoonedir ,\"\")\n newname = newname.replace(pathtoonedir ,\"\")\n self.onedir.renamedirectory(oldname,newname)\n else:\n #if it comes from outside the folder structure\n if source is None:\n try:\n #use os.path.split to get file name and path\n splitpath = split(dest)\n file = splitpath[1]\n pathtoonedir = self.onedir.getonedirrectory()\n relpath = splitpath[0].replace(pathtoonedir ,\"\")\n self.onedir.sendfile(file, relpath)\n except OSError as e:\n print \"Error copying file! \" + e.strerror\n exit(1)\n except IOError as e:\n print \"IOerror creating file \" + e.strerror\n else:\n #file was moved!\n #check if name stays the same i.e. it's a move not a rename!\n splitpath = split(source)\n splitdest = split(dest)\n if splitpath[1] == splitdest[1]:\n try:\n #where are we moving from\n file = splitpath[1]\n pathtoonedir = self.onedir.getonedirrectory()\n oldpath = splitpath[0].replace(pathtoonedir ,\"\")\n #calculate new path\n newpath = splitdest[0].replace(pathtoonedir ,\"\")\n if oldpath is \"\":\n oldpath = os.path.sep\n self.onedir.movefile(file,newpath,oldpath)\n except OSError as e:\n print \"Error copying file! \" + e\n exit(1)\n else:\n #rename!!!!!!!!\n file = splitpath[1]\n newname = splitdest[1]\n pathtoonedir = self.onedir.getonedirrectory()\n path = splitpath[0].replace(pathtoonedir ,\"\")\n if path is \"\":\n path = os.path.sep\n else:\n path = path[1:]\n self.onedir.rename(file,path,newname)", "def move_file_to_dir(f, dest_dir):\n ls = list_files(dest_dir)\n if f not in ls:\n shutil.move(f, dest_dir)", "def move_file_to_directory(base_path, file_name, directory_name):\n path = FileUtils.full_path\n\n full_file_path = path(base_path, file_name)\n full_dir_path = path(base_path, directory_name)\n full_new_path = path(full_dir_path, file_name)\n try:\n os.rename(full_file_path, full_new_path)\n except FileNotFoundError:\n pass\n # pass for now", "def moveFile(sourceFullPath,targetDir):\n\n thisFunc = inspect.currentframe().f_code.co_name\n try:\n shutil.move(sourceFullPath,targetDir)\n return True\n except Exception as e:\n print(f\"{thisFunc} issue: {e}\")\n return False", "def _do_move(self, artist, album, song):\n try:\n move_to = \"{0}{1}/{2}/\".format(self.dupe_dir, \n artist, album)\n if not os.path.exists(move_to):\n os.makedirs(move_to)\n \n shutil.move(song['path'], move_to)\n self.moved.append(song)\n return 1\n except:\n self.logger.error(\"Could not move file: {0}\".format(str(song['path'])))\n return 0", "def move(self, *args, **kw):\n return self.execute_action('move', *args, **kw)", "def mv(self, item, destination, execute=False):\n file = self.drive.files().update(\n fileId=item[\"id\"],\n addParents=destination[\"id\"],\n removeParents=\",\".join(item[\"parents\"]),\n fields=\"id, name, parents\",\n supportsAllDrives=self.shared_drive[0],\n )\n if execute:\n file = file.execute()\n return file", "def _move_self_to(self, new_dir=None, new_name=None):\n if self.is_downloaded:\n if new_dir and not new_name:\n shutil.move(self._download_path, os.path.join(new_dir, self.download_filename))\n elif new_name and not new_dir:\n shutil.move(self._download_path, os.path.join(self.download_dir, new_name))\n elif new_name and new_dir:\n shutil.move(self._download_path, os.path.join(new_dir, new_name))", "def SshMoveFile(host, src_path, dest_path):\n command = ['ssh', host, 'test', '-e', src_path]\n result = RunCommand(command)\n if result:\n # Nothing to do if src_path doesn't exist.\n return result\n\n command = ['ssh', host, 'mv', src_path, dest_path]\n result = RunCommand(command)\n if result:\n raise ExternalError('Failed to ssh mv \"%s\" -> \"%s\" on \"%s\" (%s)' %\n (src_path, dest_path, host, result))", "def move_recursively(src, dst, overwrite=False, changed_only=True):\n if os.path.isdir(src):\n movetree(src, dst, overwrite, changed_only)\n else:\n movefile(src, dst, overwrite, changed_only)", "def move(name, other, newname=None):", "def mv(self, mv_from, mv_to, **kwargs):\n return self.exec_command('mv %s %s' % (mv_from, mv_to), **kwargs)", "def moveTo(self, newFolder):\n moveURI = self.metaData.getLink(\"move\")\n parent = self.metaData.getLinkIndex('parent')\n\n assert parent != -1\n assert moveURI is not None\n if not hasattr(newFolder, \"metaData\"): raise TypeError(\"Your newFolder does not have a metaData property\")\n if not hasattr(newFolder, \"selfLink\"): raise TypeError(\"Your newFolder does not have a self link\")\n\n self.metaData.jsonObj['links'][parent] = {'href' : newFolder.selfLink, 'rel' : 'parent'}\n header = self._baseHeader.copy()\n header['Content-Type'] = \"application/vnd.huddle.data+json\"\n response = self._adapter.putRequest(moveURI,header, json.dumps(self.metaData.jsonObj))\n\n newLink = self._client.getUrlFromHeaderLink(response['Headers']['link'])\n return Folder(self._client, newLink)", "def move_media(items, dest):\n for file in items:\n filename = os.path.basename(file)\n os.rename(file, dest + '\\\\' + filename)", "def _move_item(self, src, dst):\n \"Does nothing\"", "def copyfile(source, dest, newname=None):\n\n if not os.path.exists(source):\n #print 'no such file %s' %source\n return False\n shutil.copy(source, newname)\n dest = os.path.join(dest, newname)\n if os.path.exists(dest):\n os.remove(dest)\n shutil.move(newname, dest)\n return True", "def move_file(self, file_name:str, new_dir:str)->bool:\n ret_value = True \n new_dir = os.path.expanduser(os.path.expandvars(new_dir)) \n try: \n os.rename(file_name, new_dir+\"/\"+file_name.rsplit(\"/\", 1)[-1]) \n except: \n print(\"Failed to move file (%s) to publisher (%s)\" % (file_name, new_dir))\n ret_value = False \n return ret_value", "def move_file_on_datastore(content, datastore_name, datacenter_name, source, destination):\n datacenter = get_obj(content, [vim.Datacenter], datacenter_name)\n datastore = get_obj(content, [vim.Datastore], datastore_name)\n task = vim.FileManager.MoveDatastoreFile_Task(\n content.fileManager,\n '[{0}] {1}'.format(datastore_name, source),\n datacenter,\n '[{0}] {1}'.format(datastore_name, destination),\n datacenter,\n True\n )\n wait_for_task(task)", "def safe_move(src: str, dst: str) -> None:\n try:\n os.rename(src, dst)\n except OSError as err:\n\n if err.errno == errno.EXDEV:\n # Generate a unique ID, and copy `<src>` to the target directory\n # with a temporary name `<dst>.<ID>.tmp`. Because we're copying\n # across a filesystem boundary, this initial copy may not be\n # atomic. We intersperse a random UUID so if different processes\n # are copying into `<dst>`, they don't overlap in their tmp copies.\n copy_id = uuid4()\n tmp_dst = \"%s.%s.tmp\" % (dst, copy_id)\n shutil.copyfile(src, tmp_dst)\n\n # Then do an atomic rename onto the new name, and clean up the\n # source image.\n os.rename(tmp_dst, dst)\n os.unlink(src)\n else:\n raise", "def move(model, origin, dest):\n model.move(origin, dest)", "def move(model, origin, dest):\n model.move(origin, dest)", "def move_to(self, path: str) -> None:\n self._new_path = os.path.join(path, self.annot_type, os.path.basename(self._file_path))\n os.rename(self._file_path, self._new_path)\n self._file_was_moved = True", "def move_files(src, dst, filenames):\n for filename in filenames:\n os.rename(os.path.join(src, filename), os.path.join(dst, filename))", "def _move_mount(original_root, mount_entry):\n target = mount_entry.target[len(original_root):]\n _LOGGER.info('Mount move %r => %s', mount_entry, target)\n\n try:\n fs_linux.mount_move(target, mount_entry.target)\n except FileNotFoundError as err:\n _LOGGER.warning('missing mountpoint %r: %s',\n mount_entry.target, err)", "def move(self, dest_fqpath):\n ret = move_file(self._host, self._fqpath, dest_fqpath)\n\n if ret:\n # TODO: change this to use a setter/getter for heavy lifting once\n # and can reset everything from one place\n self._previous_fqpath = self._fqpath\n self._fqpath = dest_fqpath\n\n return True\n\n return False", "def mv(ctx, fromname, toname):\n\n # create local copies of ctx vaiables for easy access\n path = ctx.obj[\"path\"]\n gitCommand = ctx.obj[\"gitCommand\"]\n\n if(not isdir(path)):\n print(\"No notes directory found at \" + path)\n\n else:\n dir_name, _ = split(path + \"/\" + toname)\n makedirs(dir_name, exist_ok=True)\n\n move(path + \"/\" + fromname, path + \"/\" + toname)\n system(gitCommand + \"add .\")\n system(gitCommand + \"commit -m 'moved \" + fromname + \"to\" + toname + \"'\")", "def _move(self, id: str, parent_id: str) -> MoveFolderResponseModel:\n endpoint: ApiEndpoint = self.api_endpoint_group.move\n request_obj: MoveFolderRequestModel = endpoint.load_request(parent_id=parent_id)\n response: MoveFolderResponseModel = endpoint.perform_request(\n http=self.auth.http,\n request_obj=request_obj,\n id=id,\n )\n return response", "def move_file(original_path,final_path,max_attempts=30):\n assert_is_string(original_path)\n assert_is_string(final_path)\n\n attempt_counter = 0\n while attempt_counter < max_attempts:\n attempt_counter += 1\n if attempt_counter > 1:\n # Pause if something went wrong, (yt-dl is a suspect, might not be closing files?)\n time.sleep(attempt_counter)\n logging.debug(\"Attempt \"+repr(attempt_counter)+\" to move \"+repr(original_path)+\" to \"+repr(final_path))\n try:\n # Make sure output folder exists\n output_dir = os.path.dirname(final_path)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n assert(os.path.exists(output_dir))\n # Move file\n shutil.move(original_path, final_path)\n assert(not os.path.exists(original_path))\n assert(os.path.exists(final_path))\n return\n except WindowsError, err:\n logging.exception(err)\n logging.error(\"Failed to move file: \"+repr(original_path)+\" to \"+repr(final_path))\n continue\n # If we get here we already have an exception to re-raise\n logging.critical(\"move_file() Too many failed attempts to move a file!\")\n logging.critical(\"move_file()\"+repr(locals()))\n raise", "def move_file(self, from_path: str, to_path: str, force: bool = False) -> Dict:\n raise NotImplementedError", "def move(self, newPath):\n\t\tif self.hasUdim:\n\t\t\tfor a in self.udimPaths:\n\t\t\t\ta.move(newPath)\n\t\telse:\n\t\t\tsuper( textureFile, self ).move( newPath )", "def move(mover, backup, regular_expressions, capture_groups):\r\n find, move = regular_expressions\r\n mover.find_files(find)\r\n mover.move_files(move, capture_groups)\r\n backup.write_to_json()", "def _process_file_movement(src:str, dest:str, is_move=False)->bool:\n debug_str = \"move\" if (is_move) else \"copy\"\n \n objects = _list_objects(src) # list objects\n for obj in objects:\n if _is_dir(dest) or _is_dir(src):\n temp_dest = _append_object(dest, _get_dest_obj_name(src, obj))\n else:\n temp_dest = dest\n \n if _is_s3(src) and _is_s3(dest): #s3 to s3\n src_bucket, _ = _extract_bucket_key(src)\n dest_bucket, dest_key = _extract_bucket_key(temp_dest)\n print(f\"{debug_str} file s3://{src_bucket}/{obj} to {temp_dest}\")\n status = _copy_s3_to_s3(src_bucket, obj, dest_bucket, dest_key)\n if status and is_move:\n aws_s3_rm(f\"s3://{src_bucket}/{obj}\")\n elif _is_s3(src): # s3 to local\n src_bucket, _ = _extract_bucket_key(src)\n _create_local_dir(temp_dest) # create dir if doesn't exist\n print(f\"{debug_str} file s3://{src_bucket}/{obj} to {temp_dest}\")\n status = _copy_s3_to_local(src_bucket, obj, temp_dest)\n if status and is_move:\n aws_s3_rm(f\"s3://{src_bucket}/{obj}\")\n elif _is_s3(dest): # local to s3\n dest_bucket, dest_key = _extract_bucket_key(temp_dest)\n print(f\"{debug_str} file {obj} to {temp_dest}\")\n status = _copy_local_to_s3(obj, dest_bucket, dest_key)\n if status and is_move:\n os.remove(obj) \n \n if not status:\n raise Error(f\"S3 {debug_str} failed.\")\n return True", "def move_to_folder(folder = \"output\"):\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\") or files.endswith(\".tpl\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def move_to_folder(folder = \"output\"):\n\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def do_mv(self, args):\n if args:\n args = args.split()\n\n if not args or len(args) < 2:\n print('Usage: mv source_file target_file')\n return\n\n src = args[0]\n dst = args[1]\n if not (src.startswith('shared/') and dst.startswith('shared/')\n or self._user):\n print('login required for specifying non-shared file with mv')\n return\n\n try:\n new_name = self._qm.rename_file(self._user, src, dst)\n print('renamed file', src, 'to', new_name)\n except Exception as e:\n print('ERROR renaming %s: %s' % (src, e), file=sys.stderr)\n return", "def move_back(self) -> None:\n if self._file_was_moved:\n os.rename(self._new_path, self._file_path)\n pass", "def movefile(src, dst, overwrite=False, changed_only=True, link=False):\n if os.path.isdir(dst):\n dst = os.path.join(dst, os.path.basename(src))\n if os.path.exists(dst) and not overwrite:\n return False\n if samefile(src, dst):\n return False\n if not os.path.exists(dst):\n dstdir = dirname(dst)\n if not os.path.exists(dstdir):\n os.makedirs(dstdir)\n else:\n # `dst` exists, check for changes\n if changed_only:\n sstat = os.stat(src)\n dstat = os.stat(dst)\n if (sstat.st_size ==\n dstat.st_size and sstat.st_mtime <= dstat.st_mtime):\n # same size and destination more recent, do not move\n return False\n try:\n shutil.move(src, dst)\n except WindowsError:\n pass\n return True", "def findfif2move(self, source, destination, foldername):\n import glob\n import shutil\n\n os.chdir(source)\n mainfolders = os.listdir(u'.')\n\n for fname in mainfolders:\n try:\n if fname[:2] == foldername:\n subjectdir = os.path.join(source, fname)\n os.chdir(subjectdir)\n subfolders = os.listdir(u'.')\n \n # for each subject in the provided subfolders \n for s in subfolders:\n if s[0] == 's':\n sessiondir = os.path.join(subjectdir, s)\n os.chdir(sessiondir)\n file = glob.glob(\"*.fif\") # find files to move\n\n for files in file: \n shutil.copy(os.path.join(sessiondir,files),\n destination + fname[1:])\n except Exception:\n print(\"Something went wrong while copying the data >>>\", fname)\n pass\n os.chdir(source)" ]
[ "0.836858", "0.77318025", "0.7662355", "0.7631933", "0.7586252", "0.7535129", "0.7519395", "0.75076956", "0.74930936", "0.74675715", "0.74577117", "0.74045736", "0.7384608", "0.7289181", "0.7248294", "0.72342104", "0.72236645", "0.71997166", "0.7152438", "0.71472096", "0.7135161", "0.71248496", "0.7086715", "0.70857054", "0.7081649", "0.7071994", "0.70670146", "0.7057347", "0.7026951", "0.7022701", "0.69901067", "0.69901067", "0.6960929", "0.6924839", "0.69231266", "0.6922818", "0.6920797", "0.69072384", "0.689624", "0.6854582", "0.6838575", "0.68164694", "0.6772632", "0.676135", "0.67549825", "0.6752511", "0.6711055", "0.6705086", "0.67039824", "0.6698963", "0.6684778", "0.6682905", "0.6659238", "0.6654517", "0.66487205", "0.6608142", "0.65995055", "0.65993685", "0.65982896", "0.6523938", "0.6523083", "0.6503699", "0.6482178", "0.6470362", "0.64680797", "0.64535093", "0.64526737", "0.64351404", "0.642383", "0.63965327", "0.6362592", "0.6359773", "0.6347062", "0.63444054", "0.632441", "0.63161665", "0.63115484", "0.630341", "0.6297658", "0.62970823", "0.62819254", "0.6270646", "0.6270646", "0.6267944", "0.6257132", "0.6231779", "0.6229399", "0.6211866", "0.62025166", "0.6200217", "0.6175744", "0.6173233", "0.6163678", "0.6160654", "0.61578465", "0.6154873", "0.6141696", "0.61182475", "0.6115137", "0.6113358" ]
0.62955916
80
Rename a file or folder
def rename(path_file_folder, new_name): old_name = path_file_folder[path_file_folder.rfind('/') + 1:] if '/' in path_file_folder else path_file_folder if old_name == new_name: raise DegooError(f"rename: Old name and new name \"{new_name}\" cannot be the same") if isinstance(path_file_folder, int): file_id = path_file_folder elif isinstance(path_file_folder, str): file_id = path_id(path_file_folder) else: raise DegooError(f"rm: Illegal file: {path_file_folder}") return api.rename_file(file_id, new_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def RenameFile(self, oldname: str, newname: str) -> None:\n ...", "def rename(path, new_path):\n fs.rename(path, new_path)", "def rename(self, src, dst):\n os.rename(src, dst)", "def rename_file(path, old_name, new_name):\n \n old_file = os.path.join(path, old_name)\n new_file = os.path.join(path, new_name)\n os.rename(old_file, new_file)", "def rename_file (self):\n\t\tassert self.__filename, \"Renaming could not complete because the new filename could not be determined, one or more needed arguments is empty!\"\n\t\tos.rename( self._file.path, self.__filename )\n\t\t\n\t\tif self.verbose and self.log :\tself.log.info( 'File renamed from %s to %s' % (self._file.path, self.__filename))", "def rename(oldname, newname):", "def rename(project, project_dir, files_dir, recursive, offset):\n project = NamingProject(project, project_dir=project_dir)\n renamer = FileRename(project, files_dir, recursive=recursive, offset=offset)\n renamer.run()", "def auto_rename(file_path, new_name):\n \n # Return if no file given\n if not file_path:\n return ''\n else:\n file_path = file_path\n \n # Get the new name\n new_path = change_basename(file_path, new_name)\n \n \n # Changed?\n if new_path != file_path:\n # Try to rename\n try:\n shutil.move(os.path.join(settings.MEDIA_ROOT, file_path), os.path.join(settings.MEDIA_ROOT, new_path))\n except IOError:\n # Error? Restore original name\n new_path = file_path\n \n # Return the new path replacing backslashes (for Windows)\n return new_path", "def rename_file(self, path, new_name):\n try:\n self.rename_narrative(self._parse_path(path), self.get_userid(), new_name)\n except WorkspaceError as err:\n raise HTTPError(err.http_code, err.message)\n except Exception as err:\n raise HTTPError(\n 500, \"An error occurred while renaming your Narrative: {}\".format(err)\n )", "def rename(self, target):\n target = os.fspath(target)\n return error.checked_call(os.rename, self.strpath, target)", "def rename_file(source, oldname, newname):\n #source = client_variables.output_folder\n renamefiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for renamefile in renamefiles:\n if renamefile.endswith(ext):\n renamefile = source + \"/\" + renamefile\n print \"renaming:\", renamefile\n newname = source + \"/\" + newname\n print \"newname:\", newname\n os.rename(renamefile, newname)\n elif renamefile.startswith(oldname):\n renamefile = source + \"/\" + renamefile\n print \"renaming:\", renamefile\n newname = source + \"/\" + newname\n print \"newname:\", newname\n os.rename(renamefile, newname)", "def rename(old, new):", "def rename(old, new):", "def fileRename(current_file,num,digits):\n # Key, value pairs of what to replace.\n dictobj = {\n '<num>': get_numbering_format(digits, num),\n '<datetaken>': date_to_string(get_date_taken(current_file),'%Y%m%d__%H_%M'),\n '<dname>': dirname\n }\n # Rename\n new_filename = multi_replace(filename_pattern, dictobj)\n shutil.move(current_file, new_filename)", "def rename(path, new_name):\r\n path = encode(path)\r\n if path.lower().startswith(\"smb://\"):\r\n new_name = encode(new_name, True)\r\n try:\r\n samba.rename(os.path.basename(path), new_name, os.path.dirname(path))\r\n except:\r\n import traceback\r\n logger.info(\r\n \"deportesalacarta.core.filetools mkdir: Error al renombrar el archivo o carpeta\" + traceback.format_exc())\r\n platformtools.dialog_notification(\"Error al renombrar\", path)\r\n return False\r\n else:\r\n new_name = encode(new_name, False)\r\n try:\r\n os.rename(path, os.path.join(os.path.dirname(path), new_name))\r\n except OSError:\r\n import traceback\r\n logger.info(\r\n \"deportesalacarta.core.filetools mkdir: Error al renombrar el archivo o carpeta\" + traceback.format_exc())\r\n platformtools.dialog_notification(\"Error al renombrar\", path)\r\n return False\r\n\r\n return True", "def base_rename(self, new_name):\n\n new_path = join(dirname(self.fspath), new_name)\n\n return self.rename(new_path)", "def fileRenameandReplace(filename,newfilename):\n try:\n os.rename(filename,newfilename)\n logging.info(\"Json file renamed in PD path\")\n except Exception as er:\n print (\"Not able to rename the json file \")\n return False", "def os_rename(self, source, destination):\n cmd = ['/bin/mv', source, destination]\n process = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n returncode = subprocess.Popen.wait(process)\n return returncode", "def rename_file(old_path, new_path):\n if os.path.exists(new_path):\n raise FileExistsError(errno.EEXIST, os.strerror(errno.EEXIST),\n old_path, new_path)\n os.rename(old_path, new_path)", "def _renameFile(fileToRename, newName):\n\ttry:\n\t\tos.rename(str(fileToRename), newName)\n\texcept OSError as err:\n\t\tmsgBox = QtGui.QMessageBox()\n\t\tmsgBox.setText(\"Unable to rename file.\\n Error: %s\" % err)\n\t\tmsgBox.exec_()", "def re_name(name,new_name):\n\n try:\n os.rename(config_tools.full_dest+name,config_tools.full_dest+new_name)\n except OSError:\n print(f\"Не удалось переименовать {name}\")\n else:\n print(f\"{name} успешно переименновавано в {new_name}\")", "def rename(self, old_path, new_path):\n self.rename_file(old_path, new_path)\n self.checkpoints.rename_all_checkpoints(old_path, new_path)", "def name(self, new_name):\n self.rename(new_name)", "def file_rename(old_path, image_class):\n\tdirectory, filename, extension = filename_split(old_path)\n\tnew_filename = filename_generate(image_class)\n\tnew_path = os.path.join(directory, new_filename + extension)\n\tos.rename(old_path, new_path)\n\treturn new_path", "def rrename(filepath, namelength = 15):\n\tfilepath = os.path.abspath(filepath)\n\tif not os.path.isfile(filepath):\n\t\traise ValueError(\"The path is not a valid file path: \"+str(filepath))\n\t\n\tpath, name = os.path.split(filepath)\n\tname, ext = os.path.splitext(name)\n\tos.rename(filepath, os.path.join(path, rname(namelength)+ext)) #We replace name by the random name and reconstruct the path", "def change_dir(filename):", "def test_6a_change_file_name(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif not GST.rename_file_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare renaming test.\")\n self.dismiss_dialogs()\n function = js_func[\"rename\"] % (GST.gs_file_paths[\"file_to_rename_path\"], GST.gs_file_paths[\"after_rename_path\"])\n try:\n self.send_request(function, \"rename()\")\n except Exception as e:\n raise RenameException(\"Failed to rename the file: \" + e.__str__())\n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise RenameException(\"Failed to rename the file: \" + response)", "def rename(self, name=None, destination=None):\n raise NotImplementedError\n return None", "def mv(*args):\n filenames = _glob(args)\n nfilenames = len(filenames)\n if nfilenames < 2:\n print 'Need at least two arguments'\n elif nfilenames == 2:\n try:\n os.rename(filenames[0], filenames[1])\n except os.error, detail:\n print \"%s: %s\" % (detail[1], filenames[1])\n else:\n for filename in filenames[:-1]:\n try:\n dest = filenames[-1] + '/' + filename\n if not os.path.isdir(filenames[-1]):\n print 'Last argument needs to be a directory'\n return\n os.rename(filename, dest)\n except os.error, detail:\n print \"%s: %s\" % (detail[1], filename)", "def ChangeName(self, newName):\n if newName != \"\":\n newPath = self.format + os.sep + \"playlists\" + os.sep + newName + \".txt\"\n os.replace(self.path, newPath)\n self.path = newPath", "def rename_file(file_path, pattern, replacement):\n old_file_name = os.path.basename(file_path)\n new_file_name = re.sub(pattern, replacement, old_file_name)\n return new_file_name", "def __rename_file(filename, suffix):\n filename = PDFWorkshop.__clean_filename(filename)\n return PDFWorkshop.__add_filename_suffix(filename, suffix)", "def rename_file(f: pathlib.Path) -> str:\n m = mutagen.File(f)\n if m is None: return\n new_name_parts = []\n if \"tracknumber\" in m:\n if \"discnumber\" in m:\n new_name_parts.append(pad_num_str(m[\"discnumber\"][0]) + \".\")\n new_name_parts.append(pad_num_str(m[\"tracknumber\"][0]) + \" - \")\n new_name_parts.append(m[\"title\"][0].replace(\"/\", \"_\"))\n if \"version\" in m:\n new_name_parts.append(\" - \" + \" - \".join(m[\"version\"]).replace(\"/\", \"_\"))\n return \"\".join(new_name_parts)", "def _rename(person_folder: str):\n all_image_paths = iglob(os.path.join(person_folder, \"*.*\"))\n all_image_paths = sorted([image for image in all_image_paths if image.endswith(\n \".jpg\") or image.endswith(\".png\") or image.endswith(\".jpeg\")])\n person_name = os.path.basename(os.path.normpath(person_folder))\n concat_name = '_'.join(person_name.split())\n for index, image_path in enumerate(all_image_paths):\n image_name = concat_name + '_' + '%04d' % (index + 1)\n file_ext = pathlib.Path(image_path).suffix\n new_image_path = os.path.join(person_folder, image_name + file_ext)\n os.rename(image_path, new_image_path)\n os.rename(person_folder, person_folder.replace(person_name, concat_name))", "def rename(old, new):\n\ttry:\n\t\tos.rename(old, new)\n\texcept OSError as e:\n\t\tif e.errno != errno.EEXIST:\n\t\t\traise\n\t\tos.remove(old)", "def rename_file(source, destination, alog):\n\n # Some error checking against a legitimate source & destination.\n if not type(source) is str:\n raise CoreError('Source is not of str type.')\n elif not type(destination) is str:\n raise CoreError('Destination is not of str type.')\n elif not os.path.isfile(source):\n raise CoreError(source + ' is not a valid file.')\n\n head, tail = os.path.split(destination)\n if not os.path.isdir(head + '/'):\n try:\n os.makedirs(head + '/')\n except:\n raise CoreError('Failed to create new directory: '\n + (head + '/'))\n\n for i in range(0, len(MuzikArkive.illegal_name_characters)):\n if MuzikArkive.illegal_name_characters[i] in tail:\n tail = tail.replace(MuzikArkive.illegal_name_characters[i], '_')\n alog.rlog = MuzikArkive.illegal_name_characters[i] \\\n + ' was removed from ' + destination\n\n if not os.path.isfile(destination):\n try:\n os.rename(source, destination)\n except:\n raise CoreError('os.rename() Failed.')\n else:\n head, tail = destination.rsplit('.', 1)\n rname = True\n i = 1\n while rname:\n addon = '[' + str(i) + '].'\n if not os.path.isfile(head + addon + tail):\n try:\n os.rename(source, (head + addon + tail))\n except:\n raise CoreError('os.rename() Failed.')\n else:\n rname = False\n else:\n i += 1", "def change_nm(src,dst):\n\timport os\n\ttry:\n\t\tos.rename(src,dst)\n\texcept:\n\t\tprint \"this is a mistake\"\n\t\treturn -1\n\n\treturn 0", "def RenameFile(path, org, new):\n\n cont = zip(org, new)\n if not path.endswith((\"/\", \"\\\\\")):\n path = path + \"\\\\\"\n for file in cont:\n if os.path.isfile(path + file[0]):\n os.rename(path + file[0], path + file[1])\n\n return len(org) - len(new)", "def mv(src_path, dest_path):\n try:\n os.rename(src_path, dest_path)\n except OSError:\n # this will happen on windows\n os.remove(dest_path)\n os.rename(src_path, dest_path)", "def drename(filepath, namelength = 15):\n\tfilepath = os.path.abspath(filepath)\n\tif not os.path.isfile(filepath):\n\t\traise ValueError(\"The path is not a valid file path: \"+str(filepath))\n\t\n\tpath, name = os.path.split(filepath)\n\tname, ext = os.path.splitext(name)\n\tos.rename(filepath, os.path.join(path, dname(namelength)+ext)) #We replace name by the random name and reconstruct the path", "def rename(self, name):\n return self.parent.rename(self, name)", "def rename(self, name):\n return self.client.api.rename(self.id, name)", "def change_filepath_name(filepath: str, new_name: str, new_ext: Optional[str] = None) -> str:\n ext = new_ext or filepath_ext(filepath)\n return str(pathlib.Path(filepath).with_name(new_name).with_suffix(ext))", "def projectFileRenamed(self, oldfn, newfn):\n editor = self.getOpenEditor(oldfn)\n if editor:\n editor.fileRenamed(newfn)", "def RenameFile(self, file_id, new_title):\n f = self.service.files().update(fileId=file_id, body={\"title\":new_title}).execute()\n return f[\"id\"]", "def rename_file(self, file_id, new_name):\n\n func = f\"setRenameFile(Token: $Token, FileRenames: $FileRenames)\"\n query = f\"mutation SetRenameFile($Token: String!, $FileRenames: [FileRenameInfo]!) {{ {func} }}\"\n\n request = {\"operationName\": \"SetRenameFile\",\n \"variables\": {\n \"Token\": self.KEYS[\"Token\"],\n \"FileRenames\": [{\n \"ID\": file_id,\n \"NewName\": new_name\n }]\n },\n \"query\": query\n }\n\n header = {\"x-api-key\": self.KEYS[\"x-api-key\"]}\n\n response = requests.post(URL_API, headers=header, data=json.dumps(request))\n\n if response.ok:\n rd = json.loads(response.text)\n\n if \"errors\" in rd:\n messages = []\n for error in rd[\"errors\"]:\n messages.append(error[\"message\"])\n message = '\\n'.join(messages)\n raise DegooError(f\"getUserInfo failed with: {message}\")\n else:\n return rd[\"data\"]['setRenameFile']\n else:\n raise DegooError(f\"renameFile failed with: {response}\")", "def do_mv(self, args):\n if args:\n args = args.split()\n\n if not args or len(args) < 2:\n print('Usage: mv source_file target_file')\n return\n\n src = args[0]\n dst = args[1]\n if not (src.startswith('shared/') and dst.startswith('shared/')\n or self._user):\n print('login required for specifying non-shared file with mv')\n return\n\n try:\n new_name = self._qm.rename_file(self._user, src, dst)\n print('renamed file', src, 'to', new_name)\n except Exception as e:\n print('ERROR renaming %s: %s' % (src, e), file=sys.stderr)\n return", "def change_file_name(self, n):\n if type(n) != str or n is None:\n raise TypeError(\"Wrong type! Please pass 'n' as a string!\")\n self.name = n", "def my_rename(self, src, dst):\n self.renamerCalled = True", "def rename(img):\n ext = splitext(img)[1].lower()\n name = get_date(open(img))\n if name is not None:\n name = name + ext\n return copy(img, name)", "def renaming(directory, folder):\r\n\t# rename every file in the directory\r\n\tfor file in sorted(os.listdir(directory)):\r\n\t\t# rename the file\r\n\t\tos.rename(os.path.join(directory, file), os.path.join(directory, str(folder) + '_' + str(file)))\r\n\tprint(\"Finished folder \" + str(folder)) # progress track\r", "def hmove(src_path, res_path):\n os.rename(src_path, res_path)", "def _rename(self, id: str, name: str) -> RenameFolderResponseModel:\n endpoint: ApiEndpoint = self.api_endpoint_group.rename\n request_obj: RenameFolderRequestModel = endpoint.load_request(name=name)\n response: RenameFolderResponseModel = endpoint.perform_request(\n http=self.auth.http, request_obj=request_obj, id=id\n )\n return response", "def _renameDir(self) -> None:\n try:\n path = self._currPath.joinpath(self._editItemNameBefore)\n nameAfter = self._editItem.text()\n pathTo = self._currPath.joinpath(nameAfter)\n path.rename(pathTo)\n self._listDirectories()\n renamedItem = self._model.findItems(nameAfter)\n index = self._model.indexFromItem(renamedItem[0])\n self._mainFileView.scrollTo(index)\n self._mainFileView.setCurrentIndex(index)\n except FileExistsError:\n self._statusBar.showMessage('File/folder with that name already exists!', 3000)\n self._listDirectories()", "def rename_file(original, content_type, condo_name):\n condo_name = sanitize_filename(condo_name)\n original_file = os.path.join(DOWNLOAD_PATH, original)\n new_name = os.path.join(DOWNLOAD_PATH, content_type + \"\\\\\" + condo_name + \".txt\")\n extracted_file = os.path.join(DOWNLOAD_PATH, unzip_file(original_file))\n if os.path.exists(new_name):\n os.remove(new_name)\n os.renames(extracted_file, new_name)\n os.remove(original_file)", "def MoveFile(path, new_path):\n try:\n RemoveFile(new_path)\n os.rename(path, new_path)\n except OSError, e:\n if e.errno != errno.ENOENT:\n raise", "def rename_files():\n folder_dir = r\"C:\\Users\\keithmoore1.AD\\Desktop\\HAFB\\prankOrig\"\n files = os.listdir(folder_dir)\n save_path = os.getcwd() # current working directory\n for file in files:\n #remove digits from name\n new_file = file.lstrip(\"0123456789\")\n print(file, \" - \", new_file)\n # rename filename\n os.chdir(folder_dir)\n os.rename(file,new_file)\n # get back home\n os.chdir(save_path)", "def rename(self,oldName,newName):\n #--Update references\n fileInfo = self[oldName]\n self[newName] = self[oldName]\n del self[oldName]\n self.table.moveRow(oldName,newName)\n #--FileInfo\n fileInfo.name = newName\n #--File system\n newPath = os.path.join(fileInfo.dir,newName)\n oldPath = os.path.join(fileInfo.dir,oldName)\n renameFile(oldPath,newPath)\n #--Done\n fileInfo.madeBackup = False", "def move_file(self, old_file: str, new_sub_dir: str):\n full_old_path = os.path.join(self.root, old_file)\n full_new_path = os.path.join(self.root, new_sub_dir, old_file)\n os.rename(full_old_path, full_new_path)", "def remoteTestsFileRenamed(self, projectId, filePath, fileName, fileExtension, newName):\n if len(filePath) > 0:\n complete_path = \"%s/%s.%s\" % (filePath, fileName, fileExtension)\n else:\n complete_path = \"%s.%s\" % ( fileName, fileExtension)\n tabId = self.checkAlreadyOpened(path = complete_path, \n remoteFile=True, \n repoType=UCI.REPO_TESTS, \n project=projectId)\n if tabId is not None:\n doc = self.tab.widget(tabId)\n self.tab.setCurrentIndex(tabId)\n buttons = QMessageBox.Yes | QMessageBox.No \n answer = QMessageBox.question(self, Settings.instance().readValue( key = 'Common/name' ), \n self.tr(\"This file has been renamed.\\nDo you want to update the name ?\") , buttons)\n if answer == QMessageBox.Yes:\n doc.updateFilename( filename=newName )\n doc.setUnmodify()\n elif answer == QMessageBox.No:\n doc.unSaved()\n doc.setModify()", "def rename_file(file_path, equipt_nr):\n work_tuples = parse_columns()\n # Regex used to get differents parts of the file path\n path_regex = re.compile(r'(?P<path>[\\w\\\\:]*)\\\\(?P<filename>[\\w]*).(?P<extension>[\\w].)')\n # Match object containing the different parts of the file path\n match = path_regex.search(file_path)\n\n # Getting the right file to rename\n associated_nr = 0\n for ii in work_tuples:\n if match.group('filename') == ii[0]:\n associated_nr = ii[equipt_nr+1]\n\n # Renaming the file\n os.rename(file_path, match.group('path')+'\\\\'+associated_nr+'.'+match.group('extension'))", "def change_file_name():\n path = \"/etc/atuned/webserver/\"\n file_list = os.listdir(path)\n file_list.sort(key=lambda fn: os.path.getmtime(path + fn))\n if len(file_list) > 0 and re.match(r'\\S*-\\d{17}\\S*', file_list[-1]) is None:\n old_name = file_list[-1].split(\".\")[0]\n curr_time = time.strftime(\"%Y%m%d%H%M%S\", time.localtime())\n new_name = old_name + \"-\" + str(curr_time) + str(random.randint(100, 999))\n os.rename(path + old_name + \".txt\", path + new_name + \".txt\")", "def copy_rename_file(source_file_path: str, target_dir: str, new_name: str) -> str:\n shutil.copy2(source_file_path, target_dir)\n target_path = os.path.join(target_dir, os.path.basename(source_file_path))\n new_file_name = new_name + get_extension(source_file_path)\n new_file_path = os.path.join(target_dir, new_file_name)\n os.rename(target_path, new_file_path)\n return new_file_path", "def rename(self, name):\n self.name = name", "def rename(self,oldName,newName):\n isLoaded = self.isLoaded(oldName)\n if isLoaded: self.unload(oldName)\n FileInfos.rename(self,oldName,newName)\n self.refreshDoubleTime()\n if isLoaded: self.load(newName)", "def mv_file(file_name: str, path: str) -> None:\n global number_of_files\n if file_name.startswith(\".\"):\n pass\n else:\n for extensions in file_formats_list:\n if file_.endswith(extensions):\n shutil.move(desktop + \"/\" + file_, path)\n print(f\"moving {colored(file_name, 'yellow')} to {path}\")\n number_of_files += 1\n else:\n pass", "def rename(ctx, input_file, output_file):\n ctx.ensure_object(dict)\n ctx.obj[\"reader\"] = PFBReader(input_file)\n ctx.obj[\"writer\"] = PFBWriter(output_file)", "def renameFile(oldPath,newPath,makeBack=False):\n if os.path.exists(newPath): \n if makeBack:\n backPath = newPath+'.bak'\n if os.path.exists(backPath):\n os.remove(backPath)\n os.rename(newPath,backPath)\n else:\n os.remove(newPath)\n os.rename(oldPath,newPath)", "def file_name(self, new_file_name):\n self._file_name = os.path.abspath(new_file_name).replace(\"\\\\\", \"/\")", "def prefix_file(filename, prefix):\n path, file_or_dir = os.path.split(filename)\n new_filename = os.path.join(path, prefix + file_or_dir)\n os.rename(filename, new_filename)", "def move(self,fileName,destDir):\n if not os.path.exists(destDir): \n os.makedirs(destDir)\n srcPath = os.path.join(self.dir,fileName)\n destPath = os.path.join(destDir,fileName)\n renameFile(srcPath,destPath)\n self.refresh()", "def main():\n print(\"Current directory is\", os.getcwd())\n os.chdir('Lyrics/Lyrics')\n\n for dir_name, dir_list, file_list in os.walk(\".\"):\n for filename in file_list:\n file_path = dir_name + \"\\\\\" + filename\n new_name = get_fixed_filename(file_path)\n os.rename(file_path, new_name)", "def rename(self, src, dst, preserve=False):\n self.connect()\n if preserve:\n self._write('RENAMENX %s %s\\r\\n' % (src, dst))\n return self._get_numeric_response()\n else:\n self._write('RENAME %s %s\\r\\n' % (src, dst))\n return self._get_simple_response().strip()", "def rename_files(files: list, new_file_name: str) -> bool:\n if len(files) == 0:\n print(\"list of files was empty. Could not rename files.\")\n return False\n\n path = None\n for index, item in enumerate(files, start=1):\n path = Path(rf\"{item}\")\n\n if path.exists():\n # Path class takes care of path slashes depending on system\n new_path = Path(str(path.parent) + \"/\" + new_file_name +\n str(index) + path.suffix)\n path.replace(new_path)\n\n else:\n print(\"Path did not exist. Check file path for errors.\")\n return False\n return True", "def rename_folder(self, name: str, folder: Folder) -> None:\n folder.title = name\n self._save_feeds()", "def rename_file(fname):\n x,y = load_file(fname)\n date=y[0].split(\".\")\n if len(y[2])<20:\n title=y[2]\n else:\n title=y[2][0:20]\n title=title.replace(\" \",\"_\")\n \n new_name=\"{}{}{}{}.csv\".format(date[2],date[1],date[0],title)\n new_appendix=rename_appendix(y[10],new_name)\n os.rename(fname,new_name)\n replace_line(new_name,10,'Anhang;\"{}\"'.format(new_appendix))\n return new_name", "def rename(cls, client, resource, new_name) :\n\t\ttry :\n\t\t\trenameresource = rewriteaction()\n\t\t\tif type(resource) == cls :\n\t\t\t\trenameresource.name = resource.name\n\t\t\telse :\n\t\t\t\trenameresource.name = resource\n\t\t\treturn renameresource.rename_resource(client,new_name)\n\t\texcept Exception as e :\n\t\t\traise e", "def rename(self, target):\r\n py.process.cmdexec(\"svn move --force %s %s\" %(str(self), str(target)))", "def renameid(target_folder):\n\tfor filepath in iglob(target_folder, recursive=True):\n\t\tp = Path(filepath)\n\t\tnew = f\"{p.stem[0:6]}{p.suffix}\"\n\t\ttry:\n\t\t\tp.rename(Path(p.parent, new))\n\t\t\tlogger.info(new)\n\t\texcept Exception as e:\n\t\t\tlogger.error(e)\n\t\t\tcontinue", "def _rename_file(self, old_path, new_path):\n if not self.mount():\n return False\n _log(\"AnnexGvfsBackend._rename_file(%r -> %r)\" % (old_path, new_path))\n old_dir_uri = self.path_to_uri(os.path.dirname(old_path))\n new_dir_uri = self.path_to_uri(os.path.dirname(new_path))\n old_uri = self.path_to_uri(old_path)\n new_uri = self.path_to_uri(new_path)\n try:\n if not self.gvfs.create_dir_p(new_dir_uri):\n raise IOError()\n if not self.gvfs.rename_file(old_uri, new_uri):\n raise IOError()\n except IOError:\n return False\n else:\n return True", "def rename(self, name: str):\n self.doc['name'] = name", "def main(root, filelist):\n #print \"got %s: %s\" % (root, filelist)\n rename(root, filelist)", "def changeFilenames(speciesfolder, species):\n\tfor filename in os.listdir(speciesfolder):\n\t\tif filename.startswith(\"generic\"):\n\t\t\tnewname = filename.replace(\"generic\", species)\n\t\t\tos.rename(os.path.join(speciesfolder, filename), os.path.join(speciesfolder, newname))", "def renamefile(filename):\n new_data_list = []\n with open(filename, 'r') as f:\n data_list = f.read().split('\\n')\n\n print('Generating new data list..')\n for data in tqdm(data_list):\n if len(data) == 0:\n continue\n data_info = data.split(' ')\n\n #data_info[0] = data_info[0].replace('jpg', 'png')\n #data_info[1] = data_info[1].replace('jpg', 'png')\n for it, name in enumerate(data_info):\n data_info[it] = '/'.join(name.split('/')[1:])\n if data_info[2].find('extras') == -1:\n new_data_list.append(' '.join(data_info))\n\n with open(filename, 'w') as f:\n print('writing new data names..')\n\n for it, data in tqdm(enumerate(new_data_list)):\n if len(data) == 0:\n continue\n\n if it == len(new_data_list)-1:\n f.write(data)\n else:\n f.write(data+'\\n')\n\n print('Done.')", "def _rename_ondisk(self):\n if not self.has_moved or not self.renames_remaining:\n return\n\n try:\n os.rename(self.rename_phase_src, self.rename_phase_dst)\n except Exception:\n sys.stderr.write(\"Failed to renamed '%s' to '%s'\\n\" %\n (self.rename_phase_src,\n self.rename_phase_dst))\n raise\n\n self._rename_phase += 1", "def rename(self, name, overwrite=False):\n return _image.image_rename(self, name, overwrite)", "def rename(file, format_spec, dir=DIR()):\n\tfile = pathlib.Path(file)\n\t\n\tprint(\"Parsing {name}...\".format(name=file.name))\n\t\n\tarticle = Article(file.read_bytes())\n\t\n\tnew_file = format_spec.format(\n\t\tarticle = article,\n\t\ttitle = dir.getTitle(file) or article.getTitle(),\n\t\tauthor = article.getAuthor() or dir.getAuthor(file),\n\t\tboard = article.getBoard(),\n\t\ttime = article.getTime() or dir.getTime(file) or format_dummy\n\t)\n\tnew_file = safe_file_name(new_file)\n\tnew_file = file.with_name(new_file)\n\t\n\tif file == new_file:\n\t\tprint(\"Same file name!\\n\")\n\t\treturn\n\t\n\tif new_file.exists():\n\t\tnum = 2\n\t\t\n\t\twhile True:\n\t\t\ttemp_file = \"{name} ({num}){ext}\".format(\n\t\t\t\tnum = num,\n\t\t\t\tname = new_file.stem,\n\t\t\t\text = new_file.suffix\n\t\t\t)\n\t\t\ttemp_file = new_file.with_name(temp_file)\n\t\t\t\n\t\t\tif file == temp_file:\n\t\t\t\tprint(\"Same file name!\\n\")\n\t\t\t\treturn\n\t\t\t\t\n\t\t\tif not temp_file.exists():\n\t\t\t\tnew_file = temp_file\n\t\t\t\tbreak\n\t\t\t\t\n\t\t\tnum += 1\n\t\n\tprint(\"Rename to {name}...\\n\".format(name=new_file.name))\n\t\n\tfile.rename(new_file)", "def rename_in_dir(directory, names, name_only=False):\n for curfile in get_mp3_files(directory):\n parent, curname = split(curfile)\n index = splitext(curname)[0]\n basename = names[curname]\n newname = \"%s%s.mp3\" % (\n '' if name_only else str(index)+' - ', basename)\n newname = join(parent, newname)\n try:\n rename(curfile, newname)\n except FileNotFoundError:\n continue", "def remoteAdaptersFileRenamed(self, filePath, fileName, fileExtension, newName):\n if len(filePath) > 0:\n complete_path = \"%s/%s.%s\" % (filePath, fileName, fileExtension)\n else:\n complete_path = \"%s.%s\" % ( fileName, fileExtension)\n tabId = self.checkAlreadyOpened(path = complete_path, \n remoteFile=True, \n repoType=UCI.REPO_ADAPTERS, \n project=0)\n if tabId is not None:\n doc = self.tab.widget(tabId)\n self.tab.setCurrentIndex(tabId)\n buttons = QMessageBox.Yes | QMessageBox.No \n answer = QMessageBox.question(self, Settings.instance().readValue( key = 'Common/name' ), \n self.tr(\"This file has been renamed.\\nDo you want to update the name ?\") , buttons)\n if answer == QMessageBox.Yes:\n doc.updateFilename( filename=newName )\n doc.setUnmodify()\n elif answer == QMessageBox.No:\n doc.unSaved()\n doc.setModify()", "def test_rename_python_api(self):\n\n rename.rename([NEW_APP_NAME, NEW_DOMAIN])\n self.assertTrue(os.path.exists(RENAMED_PROJECT_DIR))", "def rename(self,newName):\n self.userName = newName", "def fs_rename_entry(self, oldPath, newPath):\n\t\treturn Job(SDK.PrlSrv_FsRenameEntry(self.handle, oldPath, newPath)[0])", "def rename_site(site_name, newname):\n siteid = _get_site_id(site_name)\n if siteid is None: # or not path.exists():\n raise FileNotFoundError\n cur = conn.cursor(cursor_factory=pgx.RealDictCursor)\n querystring = 'update sites set name = %s where id = %s;'\n result = execute_query(querystring, (newname, siteid))\n # create the physical destination (mirror) so that css and images can be moved there\n path = WEBROOT / site_name\n newpath = WEBROOT / newname\n path.rename(newpath)", "def renameFile(oldName,newName):\n \n thisFunc = inspect.currentframe().f_code.co_name\n if oldName == newName:\n return True\n else:\n try:\n os.rename(oldName,newName)\n return True\n except Exception as e:\n print(f\"{thisFunc} issue: {e}\")\n return False", "def renamed(self, source, dest):\r\n self.__close_and_reload(source, new_filename=dest)", "def ren_mosaic(mosaic_dir='K:/IID_SaltonSea/Tasks/Soil mapping/PhotoDocumentation/Original/', \r\n file_pattern='*stitch.jpg'): \r\n \r\n \r\n if not os.path.exists(mosaic_dir):\r\n sys.exit('input folder does not exist')\r\n \r\n mosaics = []\r\n for root, dirnames, filenames in os.walk(mosaic_dir):\r\n for filename in fnmatch.filter(filenames, file_pattern):\r\n mosaics.append(os.path.join(root, filename).replace('\\\\','/'))\r\n \r\n s = 0\r\n r = 0\r\n for m in mosaics:\r\n dir_name = os.path.dirname(m).split('/')[-1]\r\n new_name = os.path.dirname(m) + '/' + dir_name + '.jpg'\r\n if os.path.exists(new_name):\r\n print('skipping: %s' % m)\r\n s+=1\r\n else:\r\n os.rename(m, new_name)\r\n print('renamed: %s' % new_name)\r\n r+=1\r\n \r\n print('renamed total of %i files' % r)\r\n print('skipped total of %i files' % s)", "def userRenamed(self, oldname, newname):\n # Send messasge to Server bot.\n self.data_in(text=\"\", type=\"renamed\", oldname=oldname, newname=newname)", "def mv(path_file_folder, new_path):\n if not is_folder(new_path):\n raise DegooError(f\"mv: The target path is not a folder\")\n\n source_path = path_file_folder if is_folder(path_file_folder) else path_file_folder[:path_file_folder.rfind('/')]\n\n if source_path == new_path:\n raise DegooError(f\"mv: The target path cannot be the same as the source path\")\n\n if isinstance(path_file_folder, int):\n file_id = path_file_folder\n elif isinstance(path_file_folder, str):\n file_id = path_id(path_file_folder)\n else:\n raise DegooError(f\"rm: Illegal file: {path_file_folder}\")\n\n if isinstance(new_path, int):\n new_parent_id = new_path\n elif isinstance(new_path, str):\n new_parent_id = path_id(new_path)\n else:\n raise DegooError(f\"rm: Illegal destination folder: {new_path}\")\n\n return api.mv(file_id, new_parent_id)", "def _download_rename(filename):\n url_loc = \"http://www.stsci.edu/~kgordon/beast/\"\n fname_dld = download_file(\"%s%s\" % (url_loc, filename))\n extension = filename.split(\".\")[-1]\n fname = \"%s.%s\" % (fname_dld, extension)\n os.rename(fname_dld, fname)\n return fname", "def rename(self, name):\n return _coconut_tail_call(self.__class__, name)" ]
[ "0.7906396", "0.7777854", "0.77087426", "0.763373", "0.752285", "0.74998015", "0.74556607", "0.73914254", "0.7262698", "0.71969265", "0.7161511", "0.70347023", "0.70347023", "0.70059544", "0.699431", "0.69706184", "0.69470656", "0.6944914", "0.69139314", "0.690678", "0.6895383", "0.6836334", "0.6759159", "0.6758383", "0.66953576", "0.6692369", "0.66760796", "0.6631283", "0.6598985", "0.65902644", "0.6589315", "0.65879625", "0.65762246", "0.6564192", "0.65625", "0.65452", "0.65241337", "0.6520967", "0.6510135", "0.6495447", "0.64804715", "0.6477491", "0.6463269", "0.6455275", "0.645133", "0.6451277", "0.64508396", "0.6435965", "0.64357495", "0.6434677", "0.6422475", "0.6420993", "0.63922477", "0.63774306", "0.63617945", "0.6356483", "0.63391465", "0.6336046", "0.6331424", "0.63249457", "0.63212776", "0.63180065", "0.63056314", "0.630361", "0.63011396", "0.62961215", "0.62741417", "0.62692577", "0.6267169", "0.6251954", "0.6240164", "0.6228311", "0.62247205", "0.6207256", "0.6196744", "0.619262", "0.61749816", "0.6154889", "0.61319", "0.6124154", "0.6115063", "0.61100465", "0.61018884", "0.6093815", "0.60911745", "0.6090507", "0.606767", "0.6051891", "0.6046319", "0.60349137", "0.6031384", "0.6022493", "0.60031706", "0.59924173", "0.5989763", "0.5984434", "0.5969955", "0.596817", "0.5968144", "0.59469587" ]
0.755172
4
Move a file or folder
def mv(path_file_folder, new_path): if not is_folder(new_path): raise DegooError(f"mv: The target path is not a folder") source_path = path_file_folder if is_folder(path_file_folder) else path_file_folder[:path_file_folder.rfind('/')] if source_path == new_path: raise DegooError(f"mv: The target path cannot be the same as the source path") if isinstance(path_file_folder, int): file_id = path_file_folder elif isinstance(path_file_folder, str): file_id = path_id(path_file_folder) else: raise DegooError(f"rm: Illegal file: {path_file_folder}") if isinstance(new_path, int): new_parent_id = new_path elif isinstance(new_path, str): new_parent_id = path_id(new_path) else: raise DegooError(f"rm: Illegal destination folder: {new_path}") return api.mv(file_id, new_parent_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_file(source, destination):\n shutil.move(source, destination)", "def moveFile(source, dest):\n try:\n shutil.move(source, dest) \n except IOError as e:\n print (\"Unable to move file. %s\" %(e))", "def move_file(self, path: PathLike, dest: PathLike, force: bool = False):", "def act_move_file(self, file_source, file_target):\n try:\n if not os.path.isfile(file_source):\n return\n path = os.path.dirname(file_target)\n if not os.path.exists(path):\n os.makedirs(path)\n shutil.move(file_source, file_target)\n #shutil.copy2(file_source, file_target)\n #os.remove(file_source)\n self.logger.debug('%s: Action: <move> %s -> %s', self.name, file_source, file_target)\n except:\n self.logger.exception('Error on file move: %s -> %s', file_source, file_target)", "def move_file(file, dest_path):\n if os.path.isdir(dest_path):\n shutil.move(file, dest_path)\n else:\n os.mkdir(dest_path)\n shutil.move(file, dest_path)", "def file_move(self, from_path, to_path):\n params = {'root': self.session.root,\n 'from_path': format_path(from_path),\n 'to_path': format_path(to_path)}\n\n url, params, headers = self.request(\"/fileops/move\", params)\n\n return self.rest_client.POST(url, params, headers)", "def move_file(path):\n new_path = os.path.join(TEST_DIR, TEST_FILE)\n command = ['mv', TEST_FILE, new_path]\n file_operation(path, command)", "def mv(self, src_path, dst_path):\n try:\n postdata = codecs.encode(json.dumps({ 'src': src_path, 'dst': dst_path }), 'utf-8')\n self._urlopen('/api/fileops/move', postdata).read()\n except HTTPError as err:\n raise RuntimeError(\"Unable to move '{}' to '{}'\".format(src_path, dst_path))", "def movefile(destpath,filename,sourcepath):\n\n\tcommand = 'mv ' + filename + ' ' + destpath\n\t\n\ttry :\n\t\tst = commands.getstatusoutput(command)\n\texcept Exception:\n\t\traise", "def move(self, source, target, force=False):\n if force:\n command = 'mv -f %s %s'\n else:\n command = 'mv %s %s'\n self.communicate(command % (source, target))", "def move(self, name, source, dest):\n self.m.path.assert_absolute(source)\n self.m.path.assert_absolute(dest)\n self._run(name, ['move', source, dest])\n self.m.path.mock_copy_paths(source, dest)\n self.m.path.mock_remove_paths(source)", "def move(self,fileName,destDir):\n self.unload(fileName)\n FileInfos.move(self,fileName,destDir)", "def _move(self, in_file, dest):\n dest = os.path.abspath(dest)\n _, in_base_name = os.path.split(in_file)\n dest_parent_dir, _ = os.path.split(dest)\n if os.path.exists(dest):\n out_file = os.path.join(dest, in_base_name)\n else:\n if not os.path.exists(dest_parent_dir):\n os.makedirs(dest_parent_dir)\n out_file = dest\n shutil.move(in_file, dest)\n\n return out_file", "def move_files(file: str, destination: str):\n\n try:\n result = _process_files(\"mv\", \"-v\", file, destination)\n except FileNotFoundError:\n print(\"ERROR: '{}' does not exist.\".format(file))\n except FolderNotFoundError:\n print(\n \"ERROR: '{}' destination does not exist.\".format(destination)\n )\n except InsufficientRightsError:\n print(\"ERROR: Insufficient rights to destination '{}'.\".format(\n destination)\n )\n else:\n print(result)", "def move(self, destination, **kwargs):\n assert _os.path.exists(self.__str__()) == True\n _shutil.move(self.__str__(), destination, **kwargs)", "def moveDirectoryContents(self, source, target, force=False):\n if source.endswith('/') or source.endswith('\\\\'):\n source += '*'\n else:\n source += os.path.sep + '*'\n if force:\n command = 'mv -f %s %s'\n else:\n command = 'mv %s %s'\n self.communicate(command % (source, target))", "def move_file():\n # print(\"\\n\".join(os.listdir(filepath)))\n # folders = [os.path.join(filepath, fld) for fld in os.listdir(filepath)]\n # print(filepath + \":\\n \" + \"\\n \".join(folders))\n folders = filter(os.path.isdir, os.listdir(u\".\"))\n # print(\"Sub-folders: \", u\"\\n\".join(folders))\n for folder in folders:\n files = [os.path.join(folder, fn) for fn in os.listdir(folder)]\n files = filter(os.path.isfile, files)\n for fn in files:\n _, filename = os.path.split(fn)\n shutil.move(fn, filename)\n assert 0 == len(os.listdir(folder))", "def move(self,src,dst):\n src = os.path.join(self.testpath,src)\n dst = os.path.join(self.testpath,dst)\n directory = os.path.split(dst)[0]\n try:\n os.makedirs(directory)\n except OSError:\n pass\n\n shutil.move(src,dst)", "def wrap_move_file_folder(src, dst):\r\n try:\r\n if os.path.exists(dst):\r\n if os.path.isdir(dst):\r\n shutil.rmtree(dst)\r\n else:\r\n os.remove(dst)\r\n except Exception:\r\n pass\r\n for i in range(5):\r\n try:\r\n shutil.move(src, dst)\r\n break\r\n except Exception:\r\n time.sleep(10)", "def wrap_move_file_folder(src, dst):\r\n try:\r\n if os.path.exists(dst):\r\n if os.path.isdir(dst):\r\n shutil.rmtree(dst)\r\n else:\r\n os.remove(dst)\r\n except Exception:\r\n pass\r\n for i in range(5):\r\n try:\r\n shutil.move(src, dst)\r\n break\r\n except Exception:\r\n time.sleep(10)", "def MoveFile(path, new_path):\n try:\n RemoveFile(new_path)\n os.rename(path, new_path)\n except OSError, e:\n if e.errno != errno.ENOENT:\n raise", "def move_file(self, ctx):\n pass", "def move(self, *args, **kw):\n return self.execute_action('move', *args, **kw)", "def move(source, destination):\n logger.info(\"Move: %s -> %s\" % (source, destination))\n try:\n __create_destdir(destination)\n shutil.move(source, destination)\n return True\n except Exception:\n logger.exception(\"Failed to Move: %s -> %s\" % (source, destination))\n return False", "def move(self,fileName,destDir):\n if not os.path.exists(destDir): \n os.makedirs(destDir)\n srcPath = os.path.join(self.dir,fileName)\n destPath = os.path.join(destDir,fileName)\n renameFile(srcPath,destPath)\n self.refresh()", "def mv(self, src: int, dest: int) -> bool:\n url = 'https://webapi.115.com/files/move'\n result = self.s.post(url, data={'pid': dest, 'fid[0]': src}, headers={'Origin': origin['webapi'], 'Referer': referer['115'].format(self.default_dir)}).json()\n if result['errno'] == '':\n _ = functools.reduce(dict.__getitem__, self._dirs_lookup[src], self.dirs) # TODO: need to test\n self._dirs_lookup[src] = self._dirs_lookup[dest].append(dest)\n parent = functools.reduce(dict.__getitem__, self._dirs_lookup[src], self.dirs)\n if src not in parent:\n parent.update({src: _})\n else:\n parent.get(src).update(_)\n return True", "def moveFile(sourceFullPath,targetDir):\n\n thisFunc = inspect.currentframe().f_code.co_name\n try:\n shutil.move(sourceFullPath,targetDir)\n return True\n except Exception as e:\n print(f\"{thisFunc} issue: {e}\")\n return False", "def on_moved(self, event):\n print(\"Moved\")\n time.sleep(5)\n self.moveFile(event.dest_path)", "def mv(cur_path, new_path):\n cur_abs = navigate.get_abs_path(cur_path)\n new_abs = navigate.get_abs_path(new_path)\n cur_parent, cur_name = navigate.split_path(cur_abs)\n new_parent, new_name = navigate.split_path(new_abs)\n up_parent, up_name = navigate.split_path(new_parent)\n if not db.file_exists(cur_parent, cur_name):\n print \"Error: '\" + cur_name + \"' does not exist.\"\n elif up_parent is not None and not db.directory_exists(up_parent, up_name):\n print \"Error: '\" + new_parent + \"' is not a valid directory.\"\n elif db.file_exists(new_parent, new_name):\n print \"Error: '\" + new_name + \"' already exists at that location.\"\n else:\n cur_dbox_path = '/' + cur_name\n new_dbox_path = '/' + new_name\n access_token = db.get_access_to_file(cur_parent, cur_name)\n client = dropbox.client.DropboxClient(access_token)\n client.file_move(cur_dbox_path, new_dbox_path)\n db.move_file(cur_parent, cur_name, new_parent, new_name)", "def move(self, target):\n if target.relto(self):\n raise error.EINVAL(target, \"cannot move path into a subdirectory of itself\")\n try:\n self.rename(target)\n except error.EXDEV: # invalid cross-device link\n self.copy(target)\n self.remove()", "def move_item(dataobj_id, new_path):\n file = get_by_id(dataobj_id)\n data_dir = get_data_dir()\n out_dir = (data_dir / new_path).resolve()\n if not file:\n raise FileNotFoundError\n if (out_dir / file.parts[-1]).exists():\n raise FileExistsError\n elif is_relative_to(out_dir, data_dir) and out_dir.exists(): # check file isn't\n return shutil.move(str(file), f\"{get_data_dir()}/{new_path}/\")\n return False", "def move_file(src, dst):\n # Sanity checkpoint\n src = re.sub('[^\\w/\\-\\.\\*]', '', src)\n dst = re.sub('[^\\w/\\-\\.\\*]', '', dst)\n if len(re.sub('[\\W]', '', src)) < 5 or len(re.sub('[\\W]', '', dst)) < 5:\n debug.log(\"Error: Moving file failed. Provided paths are invalid! src='%s' dst='%s'\"%(src, dst))\n else:\n # Check destination\n check = False\n if dst[-1] == '/':\n if os.path.exists(dst):\n check = True # Valid Dir\n else:\n debug.log(\"Error: Moving file failed. Destination directory does not exist (%s)\"%(dst)) #DEBUG\n elif os.path.exists(dst):\n if os.path.isdir(dst):\n check = True # Valid Dir\n dst += '/' # Add missing slash\n else:\n debug.log(\"Error: Moving file failed. %s exists!\"%dst)\n elif os.path.exists(os.path.dirname(dst)):\n check = True # Valid file path\n else:\n debug.log(\"Error: Moving file failed. %s is an invalid distination!\"%dst)\n if check:\n # Check source\n files = glob.glob(src)\n if len(files) != 0:\n debug.log(\"Moving File(s)...\", \"Move from %s\"%src, \"to %s\"%dst)\n for file_ in files:\n # Check if file contains invalid symbols:\n invalid_chars = re.findall('[^\\w/\\-\\.\\*]', os.path.basename(file_))\n if invalid_chars:\n debug.graceful_exit((\"Error: File %s contains invalid \"\n \"characters %s!\"\n )%(os.path.basename(file_), invalid_chars))\n continue\n # Check file exists\n if os.path.isfile(file_):\n debug.log(\"Moving file: %s\"%file_)\n shutil.move(file_, dst)\n else:\n debug.log(\"Error: Moving file failed. %s is not a regular file!\"%file_)\n else: debug.log(\"Error: Moving file failed. No files were found! (%s)\"%src)", "def MovePath(options, src, dst):\n # if the destination is not an existing directory, then overwrite it\n if os.path.isdir(dst):\n dst = os.path.join(dst, os.path.basename(src))\n\n # If the destination exists, the remove it\n if os.path.exists(dst):\n if options.force:\n Remove(['-vfr', dst])\n if os.path.exists(dst):\n raise OSError('mv: FAILED TO REMOVE ' + dst)\n else:\n raise OSError('mv: already exists ' + dst)\n for _ in range(5):\n try:\n os.rename(src, dst)\n break\n except OSError as error:\n print('Failed on %s with %s, retrying' % (src, error))\n time.sleep(5)\n else:\n print('Gave up.')\n raise OSError('mv: ' + error)", "def move_file(host, source_fqpath, dest_fqpath):\n command = \"mv %s %s\" % (source_fqpath, dest_fqpath)\n rcode, _, rerr = g.run(host, command)\n\n if rcode == 0:\n return True\n\n g.log.error('mv failed: %s' % rerr)\n return False", "def _do_move(self, artist, album, song):\n try:\n move_to = \"{0}{1}/{2}/\".format(self.dupe_dir, \n artist, album)\n if not os.path.exists(move_to):\n os.makedirs(move_to)\n \n shutil.move(song['path'], move_to)\n self.moved.append(song)\n return 1\n except:\n self.logger.error(\"Could not move file: {0}\".format(str(song['path'])))\n return 0", "def relocate(self, source, destination):\n destination_dir = os.path.dirname(destination)\n if not os.path.exists(destination_dir):\n self.subdir(destination_dir)\n os.rename(source, destination)", "def file_move(session, dc_ref, src_file, dst_file):\n LOG.debug(\"Moving file from %(src)s to %(dst)s.\",\n {'src': src_file, 'dst': dst_file})\n vim = session._get_vim()\n move_task = session._call_method(\n session._get_vim(),\n \"MoveDatastoreFile_Task\",\n vim.get_service_content().fileManager,\n sourceName=src_file,\n sourceDatacenter=dc_ref,\n destinationName=dst_file,\n destinationDatacenter=dc_ref)\n session._wait_for_task(move_task)\n LOG.debug(\"File moved\")", "def move_dirs(args):\n src = args[0]\n dst = args[1]\n print(\"Moving from: {}\".format(src))\n print(\" to: {}\".format(dst))\n shutil.move(src, dst)\n return", "def move_file(source_file, dest_file, sudo=True):\n LOG.info(\"Copy file and preserve attributes\")\n cmd = \"mv {} {}\".format(source_file, dest_file)\n _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False)", "def move_to(self, file_name, to_dir, change_name_to=None):\n self._check_filename(file_name)\n src = posixpath.join(server_setup.LOCAL_DIR, file_name)\n file_name = file_name if change_name_to is None else change_name_to\n dest = posixpath.join(self.root, to_dir, file_name)\n print(f\"--> Moving file {src} to {dest}\")\n self._check_file_exists(dest, should_exist=False)\n self.copy(src, dest)\n self.remove(src)", "def mv(src_path, dest_path):\n try:\n os.rename(src_path, dest_path)\n except OSError:\n # this will happen on windows\n os.remove(dest_path)\n os.rename(src_path, dest_path)", "def move_file_in_dir(name_file, desten):\n\n if os.path.isfile(config_tools.full_dest+name_file):\n try:\n shutil.move(config_tools.full_dest + name_file, config_tools.full_dest + desten)\n except OSError:\n print(f\"Не удалось переместить {name_file} в папку:{desten}\")\n else:\n print(f\"Файл {name_file} находиться в папке {desten}\")", "def move_character(character, dest):\n character_path = dirname(character.path)\n shutil.move(character_path, dest)", "def moveFile(self, srcPath):\n # Gets the classification for the file type of the path moved\n classification = self.classifyFile(srcPath)\n\n if classification:\n # Gets the output path given the file type\n newPath = self.outPaths[classification][\"outPath\"] + srcPath.split(\"/\")[-1]\n\n # Execute instruction\n os.replace(srcPath, newPath)", "def move(src, dst, ignore=False, force=False):\n copy(src, dst, ignore, force)\n remove(src)", "def _move(self, id: str, parent_id: str) -> MoveFolderResponseModel:\n endpoint: ApiEndpoint = self.api_endpoint_group.move\n request_obj: MoveFolderRequestModel = endpoint.load_request(parent_id=parent_id)\n response: MoveFolderResponseModel = endpoint.perform_request(\n http=self.auth.http,\n request_obj=request_obj,\n id=id,\n )\n return response", "def move_file(self, old_file: str, new_sub_dir: str):\n full_old_path = os.path.join(self.root, old_file)\n full_new_path = os.path.join(self.root, new_sub_dir, old_file)\n os.rename(full_old_path, full_new_path)", "def _move_to_inserted_directory(file_path: str):\n parts = list(Path(file_path).parts)\n parts.insert(-1, 'inserted')\n move(file_path, str(Path(*parts)))", "def move_to(self, file_name, to_dir, change_name_to=None):\n raise NotImplementedError", "def move_to(self, destination):\n params = {\n \"destination\": destination.project_folder_id\n }\n self.client._perform_empty(\"POST\", \"/project-folders/%s/move\" % self.project_folder_id, params=params)", "def transfer_files(src: str, dst: str, move_src_data: bool = False):\n if move_src_data:\n logger.info('Move {0} to {1}'.format(src, dst))\n shutil.move(src, dst)\n else:\n logger.info('Copy {0} to {1}'.format(src, dst))\n copy_tree(src, dst)", "def move_file(source, destination):\n #source = client_variables.output_folder\n #destination = client_variables.client_folder\n copyfiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for copyfile in copyfiles:\n if copyfile.endswith(ext):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)\n elif copyfile.startswith('GetTotalByYearReport'):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)", "def MoveFile(from_path, to_path, check_conflicts=True):\n from_path = from_path.replace(\"/\", \"\\\\\")\n if check_conflicts and not os.path.isfile(from_path): # Don't move non-existant path\n raise FileNotFoundError(\"Path {} does not exist!\".format(from_path))\n to_path = to_path.replace(\"/\", \"\\\\\")\n if check_conflicts and not os.path.isdir(os.path.dirname(to_path)): # Don't move to non-existant dir\n raise FileNotFoundError(\"Path {} does not exist to move to!\".format(os.path.dirname(to_path)))\n values = __get_current_values()\n if check_conflicts and os.path.isfile(to_path): # Don't move to already-existing destination unless it will be deleted/moved\n values.reverse()\n try:\n to_path_index = values.index(\"\\\\??\\\\\" + to_path)\n except ValueError:\n to_path_index = -1\n if to_path_index % 2 == 0 or to_path_index == -1:\n raise FileExistsError(\"Path {} already exists and isn't already being deleted/moved!\".format(to_path))\n values.reverse()\n values.append(\"\\\\??\\\\\" + from_path)\n values.append(\"\\\\??\\\\\" + to_path)\n __set_registry(values)", "def move_file(path_from, filename):\n finaldir = getormakedir(settings.UPLOAD_DEST_DIR, filename)\n\n path_to = os.path.join(finaldir, filename)\n\n if not os.path.exists(path_to):\n shutil.copyfile(path_from, path_to)\n if settings.REMOVE_UPLOAD_FILES:\n remove_file(path_from)\n\n return path_to", "def move(queue: Queue,\n from_path: list,\n to_path: str\n ) -> None:\n if len(from_path) > 1: # In case files were got with mask\n for single_path in from_path:\n file = os.path.basename(os.path.normpath(single_path))\n files_location = os.path.commonpath(from_path)\n queue.put(file)\n Thread(target=move_file, args=(queue, files_location, to_path)).start()\n print('Files have been moved.')\n else: # In other cases there will be just one item in array\n source_location = from_path[0]\n if os.path.isdir(from_path[0]):\n files = os.listdir(source_location)\n folder_name = os.path.basename(os.path.normpath(source_location))\n path_to_folder = os.path.join(to_path, folder_name)\n\n if not os.path.exists(path_to_folder):\n os.mkdir(path_to_folder)\n\n threads = []\n for file in files:\n # Each file we put to a queue that has limited number of items.\n # And than it creates a separate thread for each file.\n queue.put(file)\n move_thread = Thread(target=move_file, args=(queue, source_location, path_to_folder))\n threads.append(move_thread)\n move_thread.start()\n # Make sure that all our thread are finished before removing original folder\n for thread in threads:\n thread.join()\n\n os.rmdir(source_location)\n print('Folder has been moved.')\n elif os.path.isfile(from_path[0]): # If it's a file we just copy it without any threads\n file_location = from_path[0]\n file_name = os.path.basename(os.path.normpath(file_location))\n if not os.path.exists(file_name):\n shutil.move(file_location, to_path)\n print(f'File {file_name} has been moved.')\n else:\n print(f'File {file_name} already exists')\n elif not os.path.exists(from_path[0]):\n raise NameError('No such files or folders.')", "def moveImage(image, dest):\n if not os.path.exists(dest):\n os.mkdir(dest)\n move(image, dest)", "def move(self, dest, overwrite=False):\n dest = normpath(dest)\n try:\n remote = get_remote(dest)\n except ValueError: # Nothing exists at dest, nothing to worry about.\n pass\n else: # Something exists here.\n if not overwrite:\n raise ValueError(\"Something exists at %s\" % remote.uri)\n # There's no way to copy and overwrite at the same time,\n # so delete the existing file first.\n # Note that this can delete folders too.\n remote.delete()\n\n if not pdbox._args.get(\"dryrun\"):\n result = execute(pdbox.dbx.files_move_v2, self.path, dest)\n pdbox.debug(\"Metadata response: %s\" % result.metadata)\n\n pdbox.info(\"Moved %s to %s\" % (self.path, dbx_uri(dest)))\n if not pdbox._args.get(\"dryrun\"): # Return the newly created object.\n return get_remote(None, meta=result.metadata)", "def on_moved(self, event):\n super(myEventHandler,self).on_moved(event)\n #moveto events from external folders have no src_path\n source = event.src_path\n dest = event.dest_path\n if event.is_directory:\n splitpath = split(source)\n splitdest = split(dest)\n if splitpath[1] == splitdest[1]:\n try:\n #where are we moving from\n pass\n #file = splitpath[1]\n #pathtoonedir = self.onedir.getonedirrectory()\n #oldpath = splitpath[0].replace(pathtoonedir ,\"\")\n #calculate new path\n #newpath = splitdest[0].replace(pathtoonedir ,\"\")\n #if oldpath is \"\":\n # oldpath = os.path.sep\n #self.onedir.movefile(file,newpath,oldpath)\n except OSError as e:\n print \"Error copying file! \" + e\n exit(1)\n else:\n #rename!!!!!!!!\n oldname = source\n newname = dest\n pathtoonedir = self.onedir.getonedirrectory()\n oldname = oldname.replace(pathtoonedir ,\"\")\n newname = newname.replace(pathtoonedir ,\"\")\n self.onedir.renamedirectory(oldname,newname)\n else:\n #if it comes from outside the folder structure\n if source is None:\n try:\n #use os.path.split to get file name and path\n splitpath = split(dest)\n file = splitpath[1]\n pathtoonedir = self.onedir.getonedirrectory()\n relpath = splitpath[0].replace(pathtoonedir ,\"\")\n self.onedir.sendfile(file, relpath)\n except OSError as e:\n print \"Error copying file! \" + e.strerror\n exit(1)\n except IOError as e:\n print \"IOerror creating file \" + e.strerror\n else:\n #file was moved!\n #check if name stays the same i.e. it's a move not a rename!\n splitpath = split(source)\n splitdest = split(dest)\n if splitpath[1] == splitdest[1]:\n try:\n #where are we moving from\n file = splitpath[1]\n pathtoonedir = self.onedir.getonedirrectory()\n oldpath = splitpath[0].replace(pathtoonedir ,\"\")\n #calculate new path\n newpath = splitdest[0].replace(pathtoonedir ,\"\")\n if oldpath is \"\":\n oldpath = os.path.sep\n self.onedir.movefile(file,newpath,oldpath)\n except OSError as e:\n print \"Error copying file! \" + e\n exit(1)\n else:\n #rename!!!!!!!!\n file = splitpath[1]\n newname = splitdest[1]\n pathtoonedir = self.onedir.getonedirrectory()\n path = splitpath[0].replace(pathtoonedir ,\"\")\n if path is \"\":\n path = os.path.sep\n else:\n path = path[1:]\n self.onedir.rename(file,path,newname)", "def move_from_temp_directory(self):", "def move_file_to_dir(f, dest_dir):\n ls = list_files(dest_dir)\n if f not in ls:\n shutil.move(f, dest_dir)", "def SshMoveFile(host, src_path, dest_path):\n command = ['ssh', host, 'test', '-e', src_path]\n result = RunCommand(command)\n if result:\n # Nothing to do if src_path doesn't exist.\n return result\n\n command = ['ssh', host, 'mv', src_path, dest_path]\n result = RunCommand(command)\n if result:\n raise ExternalError('Failed to ssh mv \"%s\" -> \"%s\" on \"%s\" (%s)' %\n (src_path, dest_path, host, result))", "def move_to(self, file_name, to_dir=None, change_name_to=None):\n self._check_filename(file_name)\n from_path = os.path.join(self.local_root, file_name)\n\n if not os.path.isfile(from_path):\n raise FileNotFoundError(\n f\"{file_name} not found in {self.local_root} on local machine\"\n )\n\n file_name = file_name if change_name_to is None else change_name_to\n to_dir = \"\" if to_dir is None else to_dir\n to_path = posixpath.join(self.root, to_dir, file_name)\n self.makedir(to_dir)\n self._check_file_exists(to_path, should_exist=False)\n\n with self.ssh.open_sftp() as sftp:\n print(f\"Transferring {from_path} to server\")\n sftp.put(from_path, to_path)\n\n print(f\"--> Deleting {from_path} on local machine\")\n os.remove(from_path)", "def os_rename(self, source, destination):\n cmd = ['/bin/mv', source, destination]\n process = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n returncode = subprocess.Popen.wait(process)\n return returncode", "def move(self, dst, src): # pragma: no cover\n raise NotImplementedError(\"Implement this\")", "def hmove(src_path, res_path):\n os.rename(src_path, res_path)", "def move(name, other, newname=None):", "def move(self, **kwargs):\n if os.path.exists(self.old_artifact_path):\n if os.path.exists(self.target):\n shutil.rmtree(self.target)\n log.info(\"Copying %s on the local filesystem\" % self.type)\n shutil.copytree(self.old_artifact_path, self.target)\n else:\n log.warning(\"Not moving docs, because the build dir is unknown.\")", "def moveAsset(self, src, dst):\n if not self.exists( self.dirname(dst) ):\n self.makedirs( self.dirname(dst) )\n self.move(src, dst)\n\n cache_src = self.cache_path(src)\n if not os.path.exists(cache_src):\n return \n\n cache_dst = self.cache_path(dst)\n if not os.path.exists( os.path.dirname(cache_dst) ):\n os.makedirs( os.path.dirname(cache_dst) )\n shutil.move(cache_src, cache_dst)", "def move_file_to_directory(base_path, file_name, directory_name):\n path = FileUtils.full_path\n\n full_file_path = path(base_path, file_name)\n full_dir_path = path(base_path, directory_name)\n full_new_path = path(full_dir_path, file_name)\n try:\n os.rename(full_file_path, full_new_path)\n except FileNotFoundError:\n pass\n # pass for now", "def safe_move(src: str, dst: str) -> None:\n try:\n os.rename(src, dst)\n except OSError as err:\n\n if err.errno == errno.EXDEV:\n # Generate a unique ID, and copy `<src>` to the target directory\n # with a temporary name `<dst>.<ID>.tmp`. Because we're copying\n # across a filesystem boundary, this initial copy may not be\n # atomic. We intersperse a random UUID so if different processes\n # are copying into `<dst>`, they don't overlap in their tmp copies.\n copy_id = uuid4()\n tmp_dst = \"%s.%s.tmp\" % (dst, copy_id)\n shutil.copyfile(src, tmp_dst)\n\n # Then do an atomic rename onto the new name, and clean up the\n # source image.\n os.rename(tmp_dst, dst)\n os.unlink(src)\n else:\n raise", "def move_file(self, file_name:str, new_dir:str)->bool:\n ret_value = True \n new_dir = os.path.expanduser(os.path.expandvars(new_dir)) \n try: \n os.rename(file_name, new_dir+\"/\"+file_name.rsplit(\"/\", 1)[-1]) \n except: \n print(\"Failed to move file (%s) to publisher (%s)\" % (file_name, new_dir))\n ret_value = False \n return ret_value", "def _process_file_movement(src:str, dest:str, is_move=False)->bool:\n debug_str = \"move\" if (is_move) else \"copy\"\n \n objects = _list_objects(src) # list objects\n for obj in objects:\n if _is_dir(dest) or _is_dir(src):\n temp_dest = _append_object(dest, _get_dest_obj_name(src, obj))\n else:\n temp_dest = dest\n \n if _is_s3(src) and _is_s3(dest): #s3 to s3\n src_bucket, _ = _extract_bucket_key(src)\n dest_bucket, dest_key = _extract_bucket_key(temp_dest)\n print(f\"{debug_str} file s3://{src_bucket}/{obj} to {temp_dest}\")\n status = _copy_s3_to_s3(src_bucket, obj, dest_bucket, dest_key)\n if status and is_move:\n aws_s3_rm(f\"s3://{src_bucket}/{obj}\")\n elif _is_s3(src): # s3 to local\n src_bucket, _ = _extract_bucket_key(src)\n _create_local_dir(temp_dest) # create dir if doesn't exist\n print(f\"{debug_str} file s3://{src_bucket}/{obj} to {temp_dest}\")\n status = _copy_s3_to_local(src_bucket, obj, temp_dest)\n if status and is_move:\n aws_s3_rm(f\"s3://{src_bucket}/{obj}\")\n elif _is_s3(dest): # local to s3\n dest_bucket, dest_key = _extract_bucket_key(temp_dest)\n print(f\"{debug_str} file {obj} to {temp_dest}\")\n status = _copy_local_to_s3(obj, dest_bucket, dest_key)\n if status and is_move:\n os.remove(obj) \n \n if not status:\n raise Error(f\"S3 {debug_str} failed.\")\n return True", "def move(self, source, dest, dry_run=False, after=False, force=False,\n include=None, exclude=None):\n eh = SimpleErrorHandler()\n self._client.execute('move', source, dest, n=dry_run, A=after,\n f=force, I=include, X=exclude, eh=eh)\n\n return bool(eh)", "def move_recursively(src, dst, overwrite=False, changed_only=True):\n if os.path.isdir(src):\n movetree(src, dst, overwrite, changed_only)\n else:\n movefile(src, dst, overwrite, changed_only)", "def move_media(items, dest):\n for file in items:\n filename = os.path.basename(file)\n os.rename(file, dest + '\\\\' + filename)", "def mv(self, source: str, filename: str) -> None:\n\n self.cp(source, filename)\n self.rm(source)", "def move_file(self, from_path: str, to_path: str, force: bool = False) -> Dict:\n raise NotImplementedError", "def move(model, origin, dest):\n model.move(origin, dest)", "def move(model, origin, dest):\n model.move(origin, dest)", "def _move_item(self, src, dst):\n \"Does nothing\"", "def move_file(original_path,final_path,max_attempts=30):\n assert_is_string(original_path)\n assert_is_string(final_path)\n\n attempt_counter = 0\n while attempt_counter < max_attempts:\n attempt_counter += 1\n if attempt_counter > 1:\n # Pause if something went wrong, (yt-dl is a suspect, might not be closing files?)\n time.sleep(attempt_counter)\n logging.debug(\"Attempt \"+repr(attempt_counter)+\" to move \"+repr(original_path)+\" to \"+repr(final_path))\n try:\n # Make sure output folder exists\n output_dir = os.path.dirname(final_path)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n assert(os.path.exists(output_dir))\n # Move file\n shutil.move(original_path, final_path)\n assert(not os.path.exists(original_path))\n assert(os.path.exists(final_path))\n return\n except WindowsError, err:\n logging.exception(err)\n logging.error(\"Failed to move file: \"+repr(original_path)+\" to \"+repr(final_path))\n continue\n # If we get here we already have an exception to re-raise\n logging.critical(\"move_file() Too many failed attempts to move a file!\")\n logging.critical(\"move_file()\"+repr(locals()))\n raise", "def move_delete(dir_path, filename):\n # Get path, name from filename\n path, name = os.path.split(filename)\n # Normalize with destination considerations\n nf = os.path.join(dir_path, increment_file_number(dir_path, name))\n move_file(filename, nf)", "def Move(args):\n\n parser = argparse.ArgumentParser(usage='mv [Options] sources... dest',\n description=Move.__doc__)\n parser.add_argument(\n '-v', '--verbose', dest='verbose', action='store_true',\n default=False,\n help='verbose output.')\n parser.add_argument(\n '-f', '--force', dest='force', action='store_true',\n default=False,\n help='force, do not error it files already exist.')\n parser.add_argument('srcs', nargs='+')\n parser.add_argument('dest')\n\n options = parser.parse_args(args)\n\n if options.verbose:\n print('mv %s %s' % (' '.join(options.srcs), options.dest))\n\n for src in options.srcs:\n MovePath(options, src, options.dest)\n return 0", "def mv(self, mv_from, mv_to, **kwargs):\n return self.exec_command('mv %s %s' % (mv_from, mv_to), **kwargs)", "def mv(ctx, fromname, toname):\n\n # create local copies of ctx vaiables for easy access\n path = ctx.obj[\"path\"]\n gitCommand = ctx.obj[\"gitCommand\"]\n\n if(not isdir(path)):\n print(\"No notes directory found at \" + path)\n\n else:\n dir_name, _ = split(path + \"/\" + toname)\n makedirs(dir_name, exist_ok=True)\n\n move(path + \"/\" + fromname, path + \"/\" + toname)\n system(gitCommand + \"add .\")\n system(gitCommand + \"commit -m 'moved \" + fromname + \"to\" + toname + \"'\")", "def test_move_file_new_path(self, mock_message, mock_move):\n\n volume_path = os.path.join('the', 'volume', 'path')\n file_path_1 = os.path.join('my_dir', 'my_file.txt')\n file_path_2 = os.path.join('my_dir', 'my_file.json')\n full_path_file_1 = os.path.join(volume_path, file_path_1)\n full_path_file_2 = os.path.join(volume_path, file_path_2)\n\n file_1 = storage_test_utils.create_file(file_path=file_path_1, workspace=self.old_workspace)\n file_2 = storage_test_utils.create_file(file_path=file_path_2, workspace=self.old_workspace)\n file_ids = [file_1.id, file_2.id]\n\n # Call function\n move_files(file_ids, new_workspace=None, new_file_path='/test/path')\n\n # Check results\n mock_move.assert_called()", "def _move_mount(original_root, mount_entry):\n target = mount_entry.target[len(original_root):]\n _LOGGER.info('Mount move %r => %s', mount_entry, target)\n\n try:\n fs_linux.mount_move(target, mount_entry.target)\n except FileNotFoundError as err:\n _LOGGER.warning('missing mountpoint %r: %s',\n mount_entry.target, err)", "def mv(self, file_id, new_parent_id):\n func = f\"setMoveFile(Token: $Token, Copy: $Copy, NewParentID: $NewParentID, FileIDs: $FileIDs)\"\n query = f\"mutation SetMoveFile($Token: String!, $Copy: Boolean, $NewParentID: String!, $FileIDs: [String]!) {{ {func} }}\"\n\n request = {\"operationName\": \"SetMoveFile\",\n \"variables\": {\n \"Token\": self.KEYS[\"Token\"],\n \"NewParentID\": new_parent_id,\n \"FileIDs\": [\n file_id\n ]\n },\n \"query\": query\n }\n\n header = {\"x-api-key\": self.KEYS[\"x-api-key\"]}\n\n response = requests.post(URL_API, headers=header, data=json.dumps(request))\n\n if response.ok:\n rd = json.loads(response.text)\n\n if \"errors\" in rd:\n messages = []\n for error in rd[\"errors\"]:\n messages.append(error[\"message\"])\n message = '\\n'.join(messages)\n raise DegooError(f\"getUserInfo failed with: {message}\")\n else:\n return rd[\"data\"]['setMoveFile']\n else:\n raise DegooError(f\"renameFile failed with: {response}\")", "def move_files(src, dst, filenames):\n for filename in filenames:\n os.rename(os.path.join(src, filename), os.path.join(dst, filename))", "def do_move(self, rel=True):\n cmd = self.MGMSG_MOT_MOVE_ABSOLUTE\n if rel:\n cmd = self.MGMSG_MOT_MOVE_RELATIVE\n self.__send_short(cmd, self.__chan, 0x00)", "def move_file_on_datastore(content, datastore_name, datacenter_name, source, destination):\n datacenter = get_obj(content, [vim.Datacenter], datacenter_name)\n datastore = get_obj(content, [vim.Datastore], datastore_name)\n task = vim.FileManager.MoveDatastoreFile_Task(\n content.fileManager,\n '[{0}] {1}'.format(datastore_name, source),\n datacenter,\n '[{0}] {1}'.format(datastore_name, destination),\n datacenter,\n True\n )\n wait_for_task(task)", "def do_mv(self, args):\n if args:\n args = args.split()\n\n if not args or len(args) < 2:\n print('Usage: mv source_file target_file')\n return\n\n src = args[0]\n dst = args[1]\n if not (src.startswith('shared/') and dst.startswith('shared/')\n or self._user):\n print('login required for specifying non-shared file with mv')\n return\n\n try:\n new_name = self._qm.rename_file(self._user, src, dst)\n print('renamed file', src, 'to', new_name)\n except Exception as e:\n print('ERROR renaming %s: %s' % (src, e), file=sys.stderr)\n return", "def convert_and_move_file (filename, origpath, wavpath, mp4path, mono):\n name, ext = path.splitext(filename)\n if ext == \".mp4\":\n print(filename)\n convert_to_wav (filename, name, origpath, wavpath, mono)\n\n if not path.exists(mp4path):\n makedirs(mp4path)\n oldlocation = path.join(origpath, filename)\n newlocation = path.join(mp4path, filename)\n shutil.move(oldlocation, newlocation)", "def move(self, dir, logs=None) -> str:\n if logs is not None:\n logs.append((self.id, ValidActions.MOVE, dir))\n return \"m {} {}\".format(self.id, dir)", "def _move_image(self, label, ind):\r\n root, file_name = os.path.split(self.df.sorted_in_folder[ind])\r\n # two lines below check if the filepath contains as an ending a folder with the name of one of the labels\r\n # if so, this folder is being cut out of the path\r\n if os.path.split(root)[1] in labels:\r\n root = os.path.split(root)[0]\r\n# output_path = os.path.join(root, label, file_name)\r\n output_path = self.label_dir + '/' + label + '/' + file_name\r\n print(\"file_name =\",file_name)\r\n print(\" %s --> %s\" % (file_name, label))\r\n move(self.df.sorted_in_folder[ind], output_path)\r\n \r\n # keep track that the image location has been changed by putting the new location-path in sorted_in_folder \r\n self.df.loc[ind,'sorted_in_folder'] = output_path\r\n \r\n #####\r", "def aws_s3_mv(src:str, dest:str)->bool:\n if _is_s3(src) or _is_s3(dest):\n status = _process_file_movement(src, dest, True)\n else:\n raise Error(\"None of the src/dest is an s3 filesystem. Use local file utils to move.\")\n if not status:\n raise Error(\"S3 move failed.\")\n return True", "def move(self, move):\n raise NotImplementedError()", "def mv_file(file_name: str, path: str) -> None:\n global number_of_files\n if file_name.startswith(\".\"):\n pass\n else:\n for extensions in file_formats_list:\n if file_.endswith(extensions):\n shutil.move(desktop + \"/\" + file_, path)\n print(f\"moving {colored(file_name, 'yellow')} to {path}\")\n number_of_files += 1\n else:\n pass", "def move(owner_id=None, target_album_id=None, photo_id=None):\n params = {\n 'owner_id': owner_id,\n 'target_album_id': target_album_id,\n 'photo_id': photo_id\n }\n result = call('photos.move', **params)\n return parse_response(result)", "def move(mover, backup, regular_expressions, capture_groups):\r\n find, move = regular_expressions\r\n mover.find_files(find)\r\n mover.move_files(move, capture_groups)\r\n backup.write_to_json()" ]
[ "0.83455", "0.7957065", "0.7644527", "0.76268214", "0.75139415", "0.74997723", "0.74345535", "0.74257374", "0.73716736", "0.7324468", "0.7222057", "0.7205844", "0.72037476", "0.71912545", "0.7157873", "0.7147585", "0.7131787", "0.7130521", "0.71214455", "0.71214455", "0.71021426", "0.7076626", "0.7056545", "0.70539224", "0.70270497", "0.7010007", "0.70044804", "0.6918016", "0.6903254", "0.68890315", "0.6875991", "0.68396103", "0.68098575", "0.676809", "0.6758894", "0.6745995", "0.6739574", "0.6703446", "0.67027336", "0.6687395", "0.6686262", "0.66766536", "0.6663754", "0.6663103", "0.66555065", "0.65762424", "0.655628", "0.65443397", "0.65157914", "0.6513807", "0.65096086", "0.6474707", "0.64675033", "0.64446115", "0.64261043", "0.64215463", "0.64195496", "0.6412945", "0.64115286", "0.64017034", "0.6395142", "0.6387441", "0.63864475", "0.6373944", "0.63724816", "0.63646275", "0.634617", "0.63385504", "0.6336498", "0.6330803", "0.63277227", "0.6323149", "0.63206166", "0.6257816", "0.6241463", "0.62344044", "0.62178314", "0.62035537", "0.62035537", "0.6202593", "0.6191869", "0.6186198", "0.6175804", "0.6157353", "0.61507237", "0.6149133", "0.61480504", "0.61457944", "0.614205", "0.612537", "0.61180043", "0.61133575", "0.61033624", "0.60982543", "0.6082693", "0.60775965", "0.60741186", "0.6062835", "0.6062756", "0.60554564" ]
0.69655865
27
Get the versions from GitHub tags
def get_versions(self): # They randomly use and don't use 'r' prefix so we have to sort # versions manually versions = list(self._get_github_tags()) versions.sort( key=operator.attrgetter('base_version'), reverse=True, ) return versions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_github_chandra_models_version_info():\n with urlopen('https://api.github.com/repos/sot/chandra_models/tags') as url:\n response = url.read()\n tags = json.loads(response.decode('utf-8'))\n\n with urlopen('https://api.github.com/repos/sot/chandra_models/branches') as url:\n response = url.read()\n branches = json.loads(response.decode('utf-8'))\n\n all_versions_info = {t[\"name\"]: t for t in tags}\n all_versions_info.update({b[\"name\"]: b for b in branches})\n return all_versions_info", "def _select_version_tags(tags):\n return [t for t in tags if VERSION_REGEX.match(t)]", "def get_tags_and_dates(repository_name):\n tags_query = \"SELECT t.name, c.commit_author_date \" \\\n \"FROM github_commit c, release_tag t \" \\\n \"where t.commit_url = c.url and t.repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)", "def versions(self):\n versions = (t.lstrip('v') for t in self.tags)\n return filter(version_is_valid, versions)", "def show_git_versions(ctx):\n\n ws = get_workspace(config)\n\n exp = Experiment(ws, config[\"experiment_name\"])\n\n versions = [\n (run.id, run.get_properties()[\"azureml.git.commit\"]) for run in exp.get_runs()\n ]\n\n print(tabulate(versions, headers=[\"Run ID\", \"Git Version\"]))", "def versions():\n result = timeline.versions()\n if result:\n click.echo('\\n'.join(result))", "def get_package_versions(name: str) -> List[str]:\n with request.urlopen(PYPI_SIMPLE_API_URL + name) as response:\n html = response.read()\n\n return re.findall(f'>{name}-(.+).tar', html.decode())", "def get_repository_tags(repository_name):\n tags_query = \"SELECT * FROM release_tag where repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)", "def get_stack_versions(stack_root):\n stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)\n code, out = call((STACK_SELECT_PREFIX, stack_selector_path, 'versions'))\n versions = []\n if 0 == code:\n for line in out.splitlines():\n versions.append(line.rstrip('\\n'))\n if not versions:\n versions = get_versions_from_stack_root(stack_root)\n return versions", "def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]", "def get_versions(self):\n raise NotImplementedError", "async def manage_version():\n\n try:\n repo = git.Repo(search_parent_directories=True)\n version = repo.git.describe('--tags')\n except Exception:\n version = \"v0.0.0\"\n\n base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n creation_time = time.ctime(os.path.getmtime(base_dir))\n\n response = {'version': version, 'deployedOn': creation_time}\n return OK(response)", "def getVersions(self):\n logger.debug(\"Func: getVersions\")\n\n try:\n return self._currentSceneInfo[\"Versions\"]\n except:\n return []", "def get_version_from_git(opts):\n\tstdout = opts.tag or Popen(gitargs, stdout=PIPE).communicate()[0].rstrip('\\n')\n\n\tversion, gitmeta = process_git_tag(opts.regex, stdout)\n\n\treturn version, gitmeta", "def get_latest_tags(self):\n\n start = len(self.tags) - self.num_comparisons\n tags = self.tags\n latest = []\n for i in xrange(len(tags)):\n if i >= start:\n parts = tags[i]['ref'].split('/')\n release_num = parts[2]\n sha = tags[i]['object']['sha']\n tag = [release_num, sha]\n latest.append(tag)\n return latest", "def get_linked_versions(version='current'):\n version = check_version_str(version)\n chapters = [10, 9, 8]\n version_page = 'https://research.cs.wisc.edu/htcondor/manual/{ver}/{chapter}_Version_History.html'\n r = requests.get(version_page.format(ver=version, chapter=chapters[0]))\n if r.status_code == 404:\n # Try different chapter numbers, as it changes for different versions\n i = 1\n while r.status_code == 404 and i < len(chapters):\n r = requests.get(version_page.format(ver=version, chapter=chapters[i]))\n i += 1\n if r.status_code == 404:\n return []\n soup_vers = bs4.BeautifulSoup(r.text, 'lxml')\n versions = [x.text.replace('Version ', '')\n for x in soup_vers.find_all('a')\n if x.text.startswith('Version')]\n return versions", "def versions(self) -> List['RadsProjectVersion']:\n logger.debug(f\"retrieve versions of {self}\")\n listing = self.storage.request_text(f\"{self.path}/releaselisting\")\n return [RadsProjectVersion(self, RadsVersion(l)) for l in listing.splitlines()]", "def find_branches(versions):\n\n versions = map(LooseVersion, versions)\n\n # group versions by (major, minor) parts\n major_minor = lambda item: item.version[:2]\n versions.sort()\n tip = last(versions)\n grouped = groupby(versions, key=major_minor)\n\n chunks = (tuple(value) for key, value in grouped)\n\n # we only take versions which has patches\n chunks = (versions for versions in chunks if len(versions) > 1)\n\n # and we only need latest patch releases\n result = map(last, chunks)\n\n # we also add the last version bacause it is a tip\n if last(result) is not tip:\n result.append(tip)\n\n return [item.vstring for item in result]", "def run(docker_hub_client, args):\n resp = docker_hub_client.get_tags(args.orgname, args.reponame, args.page)\n if resp['code'] == 200:\n if resp['content']['count'] > 0:\n rows = []\n for repo in resp['content']['results']:\n formatted_date = ''\n if repo['last_updated']:\n formatted_date = dateutil.parser \\\n .parse(repo['last_updated'])\n formatted_date = formatted_date.strftime(\"%Y-%m-%d %H:%M\")\n # Convert full_size in bytes to KB\n size_in_kb = repo['full_size'] / 1024\n formatted_size = readableMemoryFormat(size_in_kb)\n rows.append([repo['name'], formatted_size, formatted_date])\n header = ['Name', 'Size', 'Last updated']\n print_result(args.format, rows, header, resp['content']['count'],\n args.page)\n else:\n print('Error fetching tags for: {0}/{1}'.\n format(args.orgname, args.reponame))", "def unsafe_get_stack_versions():\n stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)\n code, out = call((STACK_SELECT_PREFIX, stack_selector_path, 'versions'))\n versions = []\n if 0 == code:\n for line in out.splitlines():\n versions.append(line.rstrip('\\n'))\n return (code, out, versions)", "def get_tags(self):\n return self.get_url_data(self.api_url + 'refs/tags')", "def get_versions():\n ret_obj = {'versions': picard_versions(current_app)}\n return make_response(jsonify(ret_obj), 200)", "def gettag(self):\n cmd = [\"git\", \"tag\"]\n p = Popen(cmd, cwd=self.filename, stdout=PIPE)\n data, res = p.communicate()\n return data.decode(\"utf-8\").split(\"\\n\")", "def ListVersions(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def get_version():\n parent_dir = os.path.dirname(os.path.realpath(__file__))\n while True:\n if '.git' in os.listdir(parent_dir):\n break\n parent_dir = os.path.dirname(parent_dir)\n git_log = os.path.join(parent_dir,'.git','logs','HEAD')\n handle = open(git_log,'r')\n log_lines = [l.split('\\t') for l in handle.readlines()]\n #now get latest github commit\n url = 'https://api.github.com/repos/thomasvangurp/epiGBS/commits'\n context = ssl._create_unverified_context()\n result = json.load(urllib.urlopen(url,context=context))\n print('')", "def _get_semver_versions(self, versions):\n semver = []\n for ver in versions:\n semver.append(api.to_semver(ver))\n return semver", "def get_tags_list(url, auth_token, repo_name):\n response, _ = get_response(url + '/v2/' + repo_name + '/tags/list',\n auth_token)\n result = response.get('tags', [])\n return result", "def get_versions(start='current'):\n start = check_version_str(start)\n versions = get_linked_versions(start)\n\n results = versions[:]\n while results:\n results = get_linked_versions(results[-1])\n print results\n if results:\n versions.extend(results)\n\n versions = [x for x in set(versions) if check_manual_exists(x)]\n return sort_versions(versions, reverse=True)", "def versions(self) -> Dict[str, str]:\n self.__logger.debug('Eva.versions called')\n return self.__http_client.api_versions()", "def identifyVersions(self, logger):\n results = []\n # extract the version from the copyright string\n for work_str in self._version_strings:\n results.append(self.extractVersion(work_str, start_index=work_str.find(self.VERSION_STRING) + len(self.VERSION_STRING)))\n if len(results) == 0 and self._sanity_exists:\n return [self.VERSION_UNKNOWN]\n # return the result\n return results", "def test_basic_version_seek(self):\n version_prefix = 'v'\n tags = []\n for i in range(15):\n tags.append(_TagInfo('%s1.0.%s' % (version_prefix, i),\n 'commit%s' % i,\n version_prefix))\n for i in range(15):\n shuffle(tags)\n self.assertEqual(_seek_last_semver_tag(tags).name, 'v1.0.14')\n self.assertEqual(_seek_last_semver_tag(tags, '1.0.14').name,\n 'v1.0.13')", "def image_versions(self, image_name):\n # TODO: Expand to read all tags locally, not just a fixed list\n try:\n return {\"latest\": self.image_version(image_name, \"latest\")}\n except ImageNotFoundException:\n return {}", "def version(self):\n return tuple(int(x) for x in self.tag.split('.'))", "def __gitTagList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), True)", "def do_list_tags(cs, args):\n resp, tags = cs.repositories.list_tags(args.repository)\n tags = [{\"Tag\": t} for t in tags]\n utils.print_list(tags, [\"Tag\"], sortby=\"Tag\")", "def get_semver_versions(self, versions):\n semver = []\n for ver in versions:\n semver.append(api.to_semver(ver))\n return semver", "def svn_tag_range(tag):\n # rev last is the actual tag rev\n rev_last = svn_tag_rev_map()[tag]\n \n # rev_start is either the prev release\n # or its the beginning of the rc branch\n t_val = SVNTag(tag)\n if t_val.patch() == 0:\n rc = svn_tag_to_rc(tag)\n rev_start = svn_rc_creation_map()[rc]\n else:\n ok = False\n prev_tag = \"%d.%d.%d\" % (t_val.major(), t_val.minor(), t_val.patch() -1 )\n # fix missing release 2.0.1 doesn't exist, we only have\n # 2.0.0 and 2.0.2\n if prev_tag == \"2.0.1\":\n rc = svn_tag_to_rc(tag)\n rev_start = svn_rc_creation_map()[rc]\n else:\n rev_start = svn_tag_rev_map()[prev_tag]\n return rev_start, rev_last", "def get_tags(directory=None):\n out = check_output('git tag -l', shell=True, cwd=directory)\n return [l.strip() for l in out.splitlines()]", "def get_release_info(version='v1.1-dev', date='2021-07-22'):\n # go to the repository directory\n dir_orig = os.getcwd()\n os.chdir(os.path.dirname(os.path.dirname(__file__)))\n\n # grab git info into string\n try:\n cmd = \"git describe --tags\"\n version = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n version = version.decode('utf-8').strip()\n\n # if there are new commits after the latest release\n if '-' in version:\n version, num_commit = version.split('-')[:2]\n version += '-{}'.format(num_commit)\n\n cmd = \"git log -1 --date=short --format=%cd\"\n date = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n date = date.decode('utf-8').strip()\n except:\n pass\n\n # go back to the original directory\n os.chdir(dir_orig)\n return version, date", "def versions(self):\n return self._versions", "def get_ver():\n import subprocess\n\n proc = subprocess.run(\n [\"git\", \"describe\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n if not proc.returncode == 0:\n return\n v = proc.stdout.decode().strip()\n if \"-\" not in v:\n ret = v\n else:\n csum = v[v.rindex(\"-\") + 1 :]\n base = v[: v.rindex(\"-\")]\n count = base[base.rindex(\"-\") + 1 :]\n tag = base[: base.rindex(\"-\")]\n ret = f\"{tag}.post{count}+{csum}\"\n return ret", "def git_list_tags(tags=None,\n tagrgx=TAGRGX_DEFAULT,\n append_tags=None,\n git_cmd=git_cmd,\n heading_level=heading_level,\n include_cmd=include_cmd,\n ):\n git_list_tags_cmd = git_cmd[:] + ['tag', '-l']\n\n if tags is None:\n\n if True:\n git_get_first_rev_cmd = [\n 'rev-list', '--all', '--reverse', '--abbrev-commit'] #|head -n 1\n cmd = git_cmd + git_get_first_rev_cmd\n first_rev_output = subprocess.check_output(cmd).splitlines()\n if not first_rev_output:\n raise Exception(('no first revision found:',\n ('cmd', cmd),\n ('output', first_rev_output)))\n else:\n yield first_rev_output[0].rstrip()\n\n tag_output = subprocess.check_output(git_list_tags_cmd).splitlines()\n logging.debug(('tag_output', tag_output))\n\n # import semantic_version\n versiontags = []\n for x in tag_output:\n x = str(x)\n if re.match(tagrgx, x):\n if x.startswith('v'):\n _x = x[1:]\n elif x.startswith('release/'):\n _x = x[7:]\n else:\n _x = x\n ver = semantic_version.Version(_x.rstrip())\n versiontags.append((ver, x))\n for version, _tag in sorted(versiontags):\n yield _tag\n if append_tags:\n for _tag in append_tags:\n yield _tag", "def _sort_latest_tag(self, versions: List[dict], tag_key: str) -> Dict:\n return next(\n iter(\n sorted(\n versions,\n reverse=True,\n key=lambda s: list(\n map(\n int,\n filter(None, re.sub(r\"[^0-9.]+\", \"\", s.get(tag_key), re.I).split(\".\")),\n )\n )\n if \".\" in s.get(tag_key)\n else [-1],\n )\n )\n )", "def get_last_tag_by_version(directory=None):\n cmd = \"git for-each-ref --sort='*authordate' \" \\\n \"--format='%(refname:short)' refs/tags/upstream\"\n output = check_output(cmd, shell=True, cwd=directory, stderr=PIPE)\n tags = []\n versions = []\n for line in output.splitlines():\n tags.append(line.strip())\n ver = re.match(r\"[0-9]+\\.[0-9]+\\.[0-9]+\", line)\n if ver:\n versions.append(ver)\n return tags[versions.index(max(versions))] if versions else ''", "def get_github_library_version(name, url):\n while True:\n # For the release, make sure the default versions do not include \"-dev\"\n version = raw_input(\"Version of %s?: \" % name)\n if not url_exists(\"%s/releases/tag/%s\" % (url, version)):\n print_warning(\"The version of %s is not valid. Ensure you've chosen a correct value by checking the \"\n \"GitHub releases for exact naming at \"\n \"%s/releases before you continue.\" % (name, url))\n return version", "def all(self):\r\n if self._versions is None or \\\r\n len(self._versions) == 0:\r\n url = \"%s/versions\" % self._url\r\n params = {'f':'json'}\r\n res = self._con.get(url, params)\r\n self._versions = []\r\n if 'versions' in res:\r\n for v in res['versions']:\r\n guid = v['versionGuid'][1:-1]\r\n vurl = \"%s/versions/%s\" % (self._url, guid)\r\n self._versions.append(Version(url=vurl,\r\n flc=self._flc,\r\n gis=self._gis))\r\n return self._versions\r\n return self._versions", "def versions(self, name):\n if not len(self):\n self.update()\n return [version for version in self if os.path.basename(version) == name]", "def _get_versions(self, package):\n raise NotImplementedError(self, \"_get_versions\")", "def test_none_version_return_if_all_excluded(self): # pylint: disable=invalid-name\n version_prefix = 'v'\n tags = [_TagInfo('v1.0.1', 'commit1', version_prefix),\n _TagInfo('notsemver', 'commit2', version_prefix),\n _TagInfo('v1.0.v2', 'commit2', version_prefix)]\n self.assertEqual(_seek_last_semver_tag(tags, '1.0.1'), None)", "def _get_cache_tags(self):\n try:\n project = self._get_project()\n version = self._get_version()\n except Exception:\n log.warning(\n \"Error while retrieving project or version for this view.\",\n exc_info=True,\n )\n return []\n\n tags = []\n if project:\n tags.append(project.slug)\n if project and version:\n tags.append(get_cache_tag(project.slug, version.slug))\n if project and self.project_cache_tag:\n tags.append(get_cache_tag(project.slug, self.project_cache_tag))\n return tags", "def test_ls_returns_sorted_versions():\n with pipeline.fixture(assets=[\"Asset1\"], subsets=[\"animRig\"], versions=1):\n for asset in pipeline.ls():\n previous_version = 0\n for subset in asset[\"subsets\"]:\n for version in subset[\"versions\"]:\n version = version[\"version\"]\n assert version > previous_version\n previous_version = version", "def test_none_version_return(self):\n version_prefix = 'v'\n tags = []\n for i in range(15):\n tags.append(_TagInfo('1.0.' + str(i),\n 'commit' + str(i),\n version_prefix))\n for i in range(15):\n shuffle(tags)\n self.assertEqual(_seek_last_semver_tag(tags, version_prefix), None)", "def get_releases(is_vertebrate: bool):\n url = \"http://ftp.ensemblgenomes.org/pub?\"\n if is_vertebrate:\n url = \"http://ftp.ensembl.org/pub?\"\n ret = retry(requests.get, 3, url)\n # sort releases new to old\n releases = sorted(\n [int(i) for i in re.findall(r'\"release-(\\d+)/\"', ret.text)],\n reverse=True,\n )\n if is_vertebrate:\n # ignore immature releases\n releases = [r for r in releases if r > 46]\n return releases", "def select_versions(self):\n return []", "def select_latest_micro_versions(versions):\n seen_minors = set()\n res = []\n\n for ver, _ in sorted(\n versions.items(),\n # Sort by (minor_version, upload_time) in descending order\n key=lambda x: (Version(x[0]).release[:2], x[1]),\n reverse=True,\n ):\n minor_ver = Version(ver).release[:2]\n\n if minor_ver not in seen_minors:\n seen_minors.add(minor_ver)\n res.insert(0, ver)\n\n return res", "def get_releases(repo, quiet=False, per_page=None) -> List[str]:\n req_url = f\"https://api.github.com/repos/{owner}/{repo}/releases\"\n\n params = {}\n if per_page is not None:\n if per_page < 1 or per_page > 100:\n raise ValueError(\"per_page must be between 1 and 100\")\n params[\"per_page\"] = per_page\n\n request = get_request(req_url, params=params)\n num_tries = 0\n while True:\n num_tries += 1\n try:\n with urllib.request.urlopen(request, timeout=10) as resp:\n result = resp.read()\n break\n except urllib.error.HTTPError as err:\n if err.code == 401 and os.environ.get(\"GITHUB_TOKEN\"):\n raise ValueError(\"GITHUB_TOKEN env is invalid\") from err\n elif err.code == 403 and \"rate limit exceeded\" in err.reason:\n raise ValueError(\n f\"use GITHUB_TOKEN env to bypass rate limit ({err})\"\n ) from err\n elif err.code in (404, 503) and num_tries < max_http_tries:\n # GitHub sometimes returns this error for valid URLs, so retry\n print(f\"URL request {num_tries} did not work ({err})\")\n continue\n raise RuntimeError(f\"cannot retrieve data from {req_url}\") from err\n\n releases = json.loads(result.decode())\n if not quiet:\n print(f\"found {len(releases)} releases for {owner}/{repo}\")\n\n avail_releases = [\"latest\"]\n avail_releases.extend(release[\"tag_name\"] for release in releases)\n return avail_releases", "def get_release_files(tag_name, config) -> Tuple[List[ReleaseFile], Dict[str, SourceFile]]:\n\n @retry_multi(5)\t# retry at most 5 times\n def execute_request(path):\n \"\"\"!\n @brief Performs a GET request with the given path. To be used with Github's REST API.\n @returns If successful, returns a .JSON object\n \"\"\"\n headers = {\n \"Accept\": \"application/vnd.github.v3+json\"\n }\n url = \"https://api.github.com\" + path\n\n # GET https://api.github.com/<path> Accept: \"application/vnd.github.v3+json\"\n\n response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOUT)\n\n response.raise_for_status() # Raise a RequestException if we failed, and trigger retry\n\n return response.json()\n\n build_group_regex = re.compile(\"fs2_open_.*-builds-([^.-]*)(-([^.]*))?.*\") # regex for matching binary .zip's and .7z's\n source_file_regex = re.compile(\"fs2_open_.*-source-([^.]*)?.*\") # regex for matching source .zip's and .7z's\n\n # Get the github release metadata of the given tag name\n response = execute_request(\n \"/repos/{}/releases/tags/{}\".format(config[\"github\"][\"repo\"], tag_name))\n\n # Extract the binary and source files from the response[\"asset\"] metadata\n binary_files = []\n source_files = {}\n for asset in response[\"assets\"]:\n url = asset[\"browser_download_url\"]\n name = asset[\"name\"]\n\n group_match = build_group_regex.match(name)\n\n if group_match is not None:\n platform = group_match.group(1)\n # x64 is the Visual Studio name but for consistency we need Win64\n if platform == \"x64\":\n platform = \"Win64\"\n\n binary_files.append(ReleaseFile(name, url, platform, group_match.group(3)))\n else:\n group_match = source_file_regex.match(name)\n\n if group_match is None:\n continue\n\n group = group_match.group(1)\n\n source_files[group] = SourceFile(name, url, group)\n\n binary_files.sort(key=lambda ReleaseFile: ReleaseFile.name)\n\n return binary_files, source_files", "def get_version():\n try:\n return check_output(\n \"git describe --tags\".split(\" \")\n ).decode('utf-8').strip()\n except CalledProcessError:\n return check_output(\n \"git rev-parse --short HEAD\".split(\" \")\n ).decode('utf-8').strip()", "def version():\n import inspect\n import shlex\n import subprocess\n\n def output(command):\n path = os.path.realpath(os.path.dirname(inspect.stack(0)[0][1]))\n return subprocess.check_output(shlex.split(command), cwd=path).strip()\n\n return (\n output(\"git rev-parse --show-toplevel\"),\n output(\"git remote get-url origin\"),\n output(\"git describe --always\"),\n )", "def do_list_versions(**kwargs):\n mle = MLEngineHook()\n model_name = kwargs['dag_run'].conf.get('model_name')\n model_versions = mle.list_versions(PROJECT, model_name)\n kwargs['ti'].xcom_push(key='model_versions', value=model_versions)", "def git_changelog(\n path=None,\n tags=None,\n append_tags=None,\n git_bin=None,\n format='rst',\n heading_char=None,\n heading_level=2,\n include_cmd=True):\n\n git_bin = (\n distutils.spawn.find_executable('git') if git_bin is None else git_bin)\n git_cmd = [git_bin]\n if path:\n git_cmd.extend(['-R', path])\n\n _format = format.lower()\n if heading_char is None:\n if _format == 'rst':\n heading_char = '^'\n elif _format == 'md':\n heading_char = '#'\n\n def git_list_tags(tags=None,\n tagrgx=TAGRGX_DEFAULT,\n append_tags=None,\n git_cmd=git_cmd,\n heading_level=heading_level,\n include_cmd=include_cmd,\n ):\n \"\"\"List git tag pairs which match a regex\n\n Keyword Arguments:\n tags (list): empty list of addition tags\n tagrgx (``rawstr``): default: ``'v?\\d+.*'``\n append_tags (list or None): additional tags to append\n git_cmd (list): list of command strings\n heading_level (int): heading level 2 = '##'\n include_cmd=True,\n\n Yields:\n str: tag name\n\n \"\"\"\n git_list_tags_cmd = git_cmd[:] + ['tag', '-l']\n\n if tags is None:\n\n if True:\n git_get_first_rev_cmd = [\n 'rev-list', '--all', '--reverse', '--abbrev-commit'] #|head -n 1\n cmd = git_cmd + git_get_first_rev_cmd\n first_rev_output = subprocess.check_output(cmd).splitlines()\n if not first_rev_output:\n raise Exception(('no first revision found:',\n ('cmd', cmd),\n ('output', first_rev_output)))\n else:\n yield first_rev_output[0].rstrip()\n\n tag_output = subprocess.check_output(git_list_tags_cmd).splitlines()\n logging.debug(('tag_output', tag_output))\n\n # import semantic_version\n versiontags = []\n for x in tag_output:\n x = str(x)\n if re.match(tagrgx, x):\n if x.startswith('v'):\n _x = x[1:]\n elif x.startswith('release/'):\n _x = x[7:]\n else:\n _x = x\n ver = semantic_version.Version(_x.rstrip())\n versiontags.append((ver, x))\n for version, _tag in sorted(versiontags):\n yield _tag\n if append_tags:\n for _tag in append_tags:\n yield _tag\n\n tagsiter = git_list_tags(tags=tags,\n append_tags=append_tags,\n git_cmd=git_cmd)\n tags = list(tagsiter)\n logging.debug(('tags', tags))\n\n def git_get_rev_date(revstr, git_cmd=git_cmd):\n git_get_rev_date_cmd = ['log', '-n1', revstr, '--format=%ci']\n cmd = git_cmd + git_get_rev_date_cmd\n return subprocess.check_output(cmd).strip()\n\n def iter_tag_pairs(tags,\n format='rst',\n heading_char='^',\n heading_level=2,\n include_cmd=True,\n encoding='utf-8'\n ):\n \"\"\"Iterate over 2-tuple tag pairs e.g. ``[(tag1, tag2), ]``\n\n Args:\n tags (list\n \"\"\"\n tagdates = collections.OrderedDict()\n tagpairsiter = _izip(tags,\n itertools.islice(tags, 1, None))\n tagpairs = list(tagpairsiter)\n logging.debug(('tagpairs', tagpairs))\n\n _format = format.lower()\n if _format not in ['rst', 'md']:\n raise ValueError(('format unsupported', _format))\n\n def iter_release_data(tagpairs, git_cmd):\n for (tag1, tag2) in tagpairs[::-1]:\n data = {}\n tag1 = tag1.decode(encoding) if hasattr(tag1, 'decode') else tag1\n #tag1date = tagdates.setdefault(tag1, git_get_rev_date(tag1))\n tag2date = tagdates.setdefault(tag2, git_get_rev_date(tag2))\n data['tag2date'] = tag2date\n heading = rst_escape(\"%s (%s)\" % (tag2, tag2date.decode(encoding))) # TODO: date\n data['heading'] = heading\n logpath = \"%s..%s\" % (tag1, tag2)\n data['logpath'] = logpath\n changelog_cmd = ['log', '--reverse', '--pretty=format:* %s [%h]', logpath]\n data['changelog_cmd'] = changelog_cmd\n changelog_cmdstr = \"log --reverse --pretty=format:'* %s [%h]' \" + logpath\n data['changelog_cmdstr'] = changelog_cmdstr\n cmd = git_cmd + changelog_cmd\n data['cmd'] = cmd\n logging.debug(cmd)\n logging.debug(('cmdstr*', ' '.join(cmd)))\n output = subprocess.check_output(cmd)\n data['_output'] = output\n data['output_rst'] = rst_escape(output)\n yield data\n\n #\n tag1 = tag2\n\n def template_as_rst(tagpairs,\n git_cmd,\n heading_char=heading_char,\n include_cmd=True):\n for data in iter_release_data(tagpairs, git_cmd):\n # RST heading\n yield ''\n yield ''\n yield data['heading']\n yield heading_char * len(data['heading'])\n if include_cmd:\n yield \"::\"\n yield \"\"\n yield \" git %s\" % (data['changelog_cmdstr'])\n yield \"\"\n yield data['output_rst']\n\n def template_as_md(tagpairs,\n git_cmd,\n heading_char='#',\n heading_level=2,\n include_cmd=True):\n for data in iter_release_data(tagpairs, git_cmd):\n # RST heading\n yield ''\n yield ''\n if heading_level:\n yield \"%s %s\" % ((heading_level * heading_char), data['heading'])\n if include_cmd:\n yield \"```bash\"\n yield \"$ git %s\" % (data['changelog_cmdstr'])\n yield \"```\"\n yield \"\"\n yield data['output_rst']\n\n if _format == 'rst':\n return template_as_rst(\n tagpairs,\n git_cmd,\n heading_char=heading_char,\n include_cmd=include_cmd)\n elif _format == 'md':\n return template_as_md(\n tagpairs,\n git_cmd,\n heading_char=heading_char,\n heading_level=heading_level,\n include_cmd=include_cmd)\n\n\n for line in iter_tag_pairs(tags,\n format=format,\n heading_char=heading_char,\n heading_level=heading_level,\n include_cmd=include_cmd):\n yield line", "def getTags(number=None):", "def get_tags(self):\n tags = []\n for image in self.client.images.list():\n for tag in image.tags:\n if tag.startswith(self.repository_name):\n tokens = tag.split(':')\n tags.append(tokens[1])\n return tags", "def tags(self):\r\n url = self.base_url + 'tags/'\r\n return json.loads(self.bb.load_url(url))", "def get_released_versions(package_name):\n url = \"https://pypi.python.org/pypi/{}/json\".format(package_name)\n data = json.load(urllib.request.urlopen(url))\n\n versions = {\n # We can actually select any element in `dist_files` because all the distribution files\n # should have almost the same upload time.\n version: dist_files[0][\"upload_time\"]\n for version, dist_files in data[\"releases\"].items()\n # If len(dist_files) = 0, this release is unavailable.\n # Example: https://pypi.org/project/xgboost/0.7\n #\n # > pip install 'xgboost==0.7'\n # ERROR: Could not find a version that satisfies the requirement xgboost==0.7\n if len(dist_files) > 0 and (not dist_files[0].get(\"yanked\", False))\n }\n return versions", "def get_version():\n import subprocess\n proc = subprocess.Popen(\n 'hg log -r tip --template \"{latesttagdistance}\"',\n shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n pending, _ = proc.communicate()\n return \"%(tag)sd%(pending)s\" % dict(tag=config.TAG, pending=pending)", "def certifiVersions():\n log = logger.new(function='certifiVersions')\n r = yield treq.get('https://pypi.python.org/pypi/certifi/json', timeout=5)\n log.msg(\"got certifi versions!\")\n data = yield r.json()\n\n # Note: this takes advantage of the fact that certifi's releases have the\n # same version number sort order as lexicographical. If that changes,\n # this will break.\n releases = sorted(data[u'releases'].keys())\n\n first_release = releases.index('14.05.14')\n target_versions = releases[first_release:]\n\n result = []\n for version in target_versions:\n files = data[u'releases'][version]\n\n # Find the .tar.gz release.\n for file in files:\n if file[u'filename'].endswith(u'.tar.gz'):\n break\n else:\n continue\n\n log.msg(\"new release located\", version=version, tarball=file[u'url'])\n result.append((version, file[u'url']))\n\n returnValue(result)", "def listNoteVersions(self, authenticationToken, noteGuid):\r\n pass", "def make_semver(repo_root, build_number):\n branch_name, sha, tags = parse_describe(repo_root)\n if tags:\n # There are git tags to consider. Parse them all then choose the one that is latest (sorted by semver rules)\n return sorted([make_version_number(branch_name, build_number, tag, sha) for tag in tags])[-1]\n else:\n return make_version_number(branch_name, build_number, None, sha)", "def version_check():\n try:\n with open('git.json', 'r') as fp:\n git_md = json.loads(fp.read())\n except IOError:\n # In the event that there is no git metadata, just print null values\n # twice.\n print \"null\"\n print \"null\"\n return\n\n if git_md['GitHub']:\n if git_md['GitHubUser'] is not None and git_md[\n 'GitHubRepo'] is not None:\n latest_release = json.loads(\n urllib2.urlopen(\n \"https://api.github.com/repos/%s/%s/releases/latest\" % (\n git_md['GitHubUser'], git_md['GitHubRepo'])).read())\n latest_tag = latest_release['tag_name']\n\n # Go through all of the tags to see if this commit matches a tag.\n tags = json.loads(\n urllib2.urlopen(\n \"https://api.github.com/repos/%s/%s/git/refs/tags\" % (\n git_md['GitHubUser'], git_md['GitHubRepo'])).read())\n\n current_tag = \"Unreleased\"\n for tag in tags:\n if tag['object']['sha'] == git_md['GitSHA']:\n current_tag = tag['ref'].split('/')[-1]\n\n print current_tag\n print latest_tag\n else:\n print \"MissingGitHubDetails\"\n print \"MissingGitHubDetails\"\n else:\n # In the event that there is a git file, but it doesn't indicate GitHub\n # then just print some stuff indicating that.\n print \"NonGitHub\"\n print \"NonGitHub\"", "def tags():", "def getversion_git(path=None):\n _program_dir = path or _get_program_dir()\n cmd = 'git'\n try:\n subprocess.Popen([cmd], stdout=subprocess.PIPE).communicate()\n except OSError:\n # some Windows git versions provide git.cmd instead of git.exe\n cmd = 'git.cmd'\n\n with open(os.path.join(_program_dir, '.git/config')) as f:\n tag = f.read()\n # Try 'origin' and then 'gerrit' as remote name; bail if can't find either.\n remote_pos = tag.find('[remote \"origin\"]')\n if remote_pos == -1:\n remote_pos = tag.find('[remote \"gerrit\"]')\n if remote_pos == -1:\n tag = '?'\n else:\n s = tag.find('url = ', remote_pos)\n e = tag.find('\\n', s)\n tag = tag[(s + 6):e]\n t = tag.strip().split('/')\n tag = f\"[{t[0][:-1]}] {'-'.join(t[3:])}\"\n dp = subprocess.Popen([cmd, '--no-pager',\n 'log', '-1',\n '--pretty=format:\"%ad|%an|%h|%H|%d\"',\n '--abbrev-commit',\n '--date=iso'],\n cwd=_program_dir,\n stdout=subprocess.PIPE)\n info, _ = dp.communicate()\n info = info.decode(config.console_encoding).split('|')\n date = info[0][:-6]\n date = time.strptime(date.strip('\"'), '%Y-%m-%d %H:%M:%S')\n dp = subprocess.Popen([cmd, 'rev-list', 'HEAD'],\n cwd=_program_dir,\n stdout=subprocess.PIPE)\n rev, stderr = dp.communicate()\n rev = f'g{len(rev.splitlines())}'\n hsh = info[3] # also stored in '.git/refs/heads/master'\n if (not date or not tag or not rev) and not path:\n raise VersionParseError\n return (tag, rev, date, hsh)", "def get_revision_list(self):\n response = self._get_request(\n DeckhandClient.get_path(DeckhandPaths.REVISION_LIST)\n )\n self._handle_bad_response(response)\n revisions = yaml.safe_load(response.text)\n return revisions.get('results', [])", "def do_version_tag(args, image_name_tag, image_name):\n if args.versiontag is True:\n date_stamp = \"{:%Y%m%d%H%M%S}\".format(datetime.now())\n version_tag = args.tag + '-' + date_stamp\n image_name_version_tag = f\"{image_name}:{version_tag}\"\n return_code = tag(image_name_tag, image_name_version_tag)\n if return_code == 0:\n push(args, image_name_version_tag)", "def get_versions(self):\n versions = TextVersion.objects.filter(text__exact=self).order_by('-created')\n # TODO: use new postgresql 8.4 row_number as extra select to do that\n #for index in xrange(len(versions)):\n # v = versions[index]\n # # version_number is 1-based\n # setattr(v, 'version_number', len(versions) - index)\n return versions", "def github_svn_rev2hash(tag: str, rev): # pragma: no cover\n uri = f'https://github.com/wikimedia/{tag}/!svn/vcc/default'\n request = fetch(uri, method='PROPFIND',\n data=\"<?xml version='1.0' encoding='utf-8'?>\"\n '<propfind xmlns=\\\"DAV:\\\"><allprop/></propfind>',\n headers={'label': str(rev),\n 'user-agent': 'SVN/1.7.5 {pwb}'})\n dom = xml.dom.minidom.parse(BytesIO(request.content))\n hsh = dom.getElementsByTagName('C:git-commit')[0].firstChild.nodeValue\n date = dom.getElementsByTagName('S:date')[0].firstChild.nodeValue\n date = time.strptime(date[:19], '%Y-%m-%dT%H:%M:%S')\n return hsh, date", "def GetRevisionsSample():\n client = CreateClient()\n for entry in client.GetResources(limit=55).entry:\n revisions = client.GetRevisions(entry)\n for revision in revisions.entry:\n print revision.publish, revision.GetPublishLink()", "def _get_version():\n try:\n code, output = _run_cmd('git', 'describe', '--tags')\n if code:\n return 'unknown'\n output = output.decode('utf8').strip().split('-')\n if len(output) != 3:\n return 'unknown'\n version = '%s+%s' % (output[0], output[2])\n\n code, _ = _run_cmd('git', 'diff', '--quiet')\n if code:\n version += '+dirty'\n\n return version\n except OSError:\n return 'unknown'", "def parse_versions(self, source):\n config = VersionsConfigParser()\n has_read = config.read(source)\n\n if not has_read:\n logger.warning(\"'%s' cannot be read.\", source)\n return []\n\n try:\n versions = config.items('versions')\n except NoSectionError:\n logger.debug(\n \"'versions' section not found in %s.\",\n source\n )\n return []\n\n logger.info(\n '- %d versions found in %s.',\n len(versions), source\n )\n\n return versions", "def getversion_onlinerepo(path: str = 'branches/master'):\n # Gerrit API responses include )]}' at the beginning,\n # make sure to strip it out\n buf = fetch(\n 'https://gerrit.wikimedia.org/r/projects/pywikibot%2Fcore/' + path,\n headers={'user-agent': '{pwb}'}).text[4:]\n try:\n return json.loads(buf)['revision']\n except Exception as e:\n raise VersionParseError(f'{e!r} while parsing {buf!r}')", "def releases():\n result = run('ls %(releases_dir)s' % env)\n releases_list = re.split('\\s+', result)\n releases_list.sort(reverse=True)\n return releases_list", "def get_pypi_versions(pins):\n versions = {}\n for pkgname in pins:\n versions[pkgname] = get_pypi_version(pkgname)\n return versions", "def semver_from(changelog: Path) -> Version:\n with open(changelog) as f:\n matches = SEMVER_RE.finditer(f.read())\n versions: List[Version] = []\n is_unreleased = False\n for match in matches:\n version = match.groupdict()[\"version\"]\n if version.lower() == \"unreleased\":\n is_unreleased = True\n else:\n versions.append(Version.parse(version))\n\n versions.sort()\n latest = versions[-1]\n print(latest)\n return latest.bump_prerelease() if is_unreleased else latest", "def get_object_tagging(Bucket=None, Key=None, VersionId=None):\n pass", "def api_versions(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"api_versions\")", "def api_versions(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"api_versions\")", "def api_versions(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"api_versions\")", "def api_versions(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"api_versions\")", "def list_versions(self):\n version_url = self._get_base_version_url()\n\n resp, body = self.raw_request(version_url, 'GET')\n # NOTE: We need a raw_request() here instead of request() call because\n # \"list API versions\" API doesn't require an authentication and we can\n # skip it with raw_request() call.\n self._error_checker(resp, body)\n\n body = json.loads(body)\n self.validate_response(schema.list_versions, resp, body)\n return rest_client.ResponseBody(resp, body)", "def process_git_tag(regex, inputtag):\n\ttry: \n\t\tgitre = re.compile(regex)\n\t\tmatch = gitre.search(inputtag)\n\t\tgroups = match.groupdict()\n\t\tversion = groups.get('version', '.unknown')\n\t\tdate = groups.get('date', '')\n\t\tgitmeta = groups.get('gitmeta', '')\n\t\tif date:\n\t\t\tversion = '.'.join([version, ''.join(date.split('-'))])\n\texcept (AttributeError, EnvironmentError, OSError):\n\t\tversion, gitmeta = '.unknown', ''\n\n\treturn version, gitmeta", "def list_versions(quartus_versions):\n for key in quartus_versions.keys():\n print(key)", "def get_artefactversions(self, artefact):\n\n if self.url == 'test':\n artefactversionlist = [artefact + '-1.0.0-80.x86_64.rpm', artefact + '-1.0.0-81.x86_64.rpm']\n else:\n if 'fk-' in artefact:\n tmp = artefact.split('fk-')\n leverable = tmp[1].split('_')[0]\n else:\n leverable = 'tools'\n\n artefactversionlist = []\n try:\n response = urlopen(\n 'http://' + self.url + '/nexus/service/local/lucene/search?repositoryId=rpm-dev&g=fk.rpm.'\n + leverable + '&a=' + artefact)\n except (HTTPError, URLError) as e:\n logger.error(e)\n return ['Error getting artefactversions!!!']\n\n metadata_root = elementTree.parse(response)\n for data in metadata_root.iter('artifact'):\n extension = 'x86_64.rpm'\n for ext in data.findall('.//extension'):\n if 'rpm' in ext.text:\n extension = ext.text\n artefactversionlist.append(artefact + '-' + '.' + extension + '.rpm')\n # artefactversiondict[data.find('version').text] = extension\n\n return artefactversionlist", "def show_tags(config, args):\n for item in lib.input_json_lines():\n yield config.repo.tag(item)", "def versions(self, stored=False) -> List['RadsSolutionVersion']:\n\n if stored:\n fspath = self.storage.fspath(self.path)\n if not os.path.isdir(fspath):\n return [] # solution not in storage\n listing = []\n for path in os.listdir(fspath):\n if not os.path.isdir(os.path.join(fspath, path)):\n continue\n listing.append(path)\n else:\n logger.debug(f\"retrieve versions of {self}\")\n listing = self.storage.request_text(f\"{self.path}/releaselisting\").splitlines()\n return sorted(RadsSolutionVersion(self, RadsVersion(l)) for l in listing)", "def get(self) -> Iterable[instarepo.github.Repo]:\n return self._filter_pushed_after(\n self._filter_pushed_before(\n self._filter_language(\n self._filter_prefix(\n self._filter_forks(\n self._filter_archived(\n self.github.get_all_repos(self.sort, self.direction)\n )\n )\n )\n )\n )\n )", "def versionHistory(self):\n url = self.metaData().getLink(\"version-history\")\n assert url is not None\n\n header = self._baseHeader.copy()\n response = self._adapter.getRequest(url, header)\n\n return json.loads(response['Body'])", "def get_versions(self, async = False):\n\n\t\tself._send_message(\"VERSION\", \"\\x00\")\n\n\t\tif not async:\n\t\t\treturn EndpointSync(self, \"VERSION\").get_data()", "def get_release(repo, tag=\"latest\", quiet=False) -> dict:\n api_url = f\"https://api.github.com/repos/{owner}/{repo}\"\n req_url = (\n f\"{api_url}/releases/latest\"\n if tag == \"latest\"\n else f\"{api_url}/releases/tags/{tag}\"\n )\n request = get_request(req_url)\n releases = None\n num_tries = 0\n\n while True:\n num_tries += 1\n try:\n with urllib.request.urlopen(request, timeout=10) as resp:\n result = resp.read()\n remaining = int(resp.headers[\"x-ratelimit-remaining\"])\n if remaining <= 10:\n warnings.warn(\n f\"Only {remaining} GitHub API requests remaining \"\n \"before rate-limiting\"\n )\n break\n except urllib.error.HTTPError as err:\n if err.code == 401 and os.environ.get(\"GITHUB_TOKEN\"):\n raise ValueError(\"GITHUB_TOKEN env is invalid\") from err\n elif err.code == 403 and \"rate limit exceeded\" in err.reason:\n raise ValueError(\n f\"use GITHUB_TOKEN env to bypass rate limit ({err})\"\n ) from err\n elif err.code == 404:\n if releases is None:\n releases = get_releases(repo, quiet)\n if tag not in releases:\n raise ValueError(\n f\"Release {tag} not found (choose from {', '.join(releases)})\"\n )\n elif err.code == 503 and num_tries < max_http_tries:\n # GitHub sometimes returns this error for valid URLs, so retry\n warnings.warn(f\"URL request {num_tries} did not work ({err})\")\n continue\n raise RuntimeError(f\"cannot retrieve data from {req_url}\") from err\n\n release = json.loads(result.decode())\n tag_name = release[\"tag_name\"]\n if not quiet:\n print(f\"fetched release {tag_name!r} info from {owner}/{repo}\")\n\n return release", "def test_sort_git_master_and_latest(self):\n identifiers = [\"latest\", \"master\", \"1.0\", \"2.0\", \"1.1\", \"1.9\", \"1.10\"]\n self.project.repo_type = REPO_TYPE_GIT\n self.project.save()\n self.project.versions.get(slug=LATEST).delete()\n\n for identifier in identifiers:\n get(\n Version,\n project=self.project,\n type=BRANCH,\n identifier=identifier,\n verbose_name=identifier,\n slug=identifier,\n )\n\n versions = list(Version.objects.filter(project=self.project))\n self.assertEqual(\n [\"master\", \"latest\", \"2.0\", \"1.10\", \"1.9\", \"1.1\", \"1.0\"],\n [v.slug for v in sort_version_aware(versions)],\n )", "def test_master_versions(self):\n m = self.d.master(4242)\n r = self.d.release(79)\n v = m.versions\n\n self.assertEqual(len(v), 2)\n self.assertTrue(r in v)\n self.assertEqual(r.master, m)\n\n r2 = self.d.release(3329867)\n self.assertTrue(r2.master is None)" ]
[ "0.6975342", "0.67174757", "0.66325366", "0.6590542", "0.6541449", "0.64341235", "0.64160585", "0.6401173", "0.6249824", "0.61892974", "0.61882895", "0.618634", "0.61683893", "0.61543375", "0.6109142", "0.60987633", "0.608993", "0.6088227", "0.6088083", "0.6067755", "0.6048255", "0.6031801", "0.59784067", "0.5967782", "0.59651834", "0.59623706", "0.5916952", "0.59006125", "0.5888183", "0.5876248", "0.5844022", "0.5800758", "0.5795267", "0.57910717", "0.5773356", "0.57729566", "0.57677305", "0.57672817", "0.5761738", "0.57571834", "0.57547903", "0.5741839", "0.5718486", "0.57183444", "0.5709966", "0.5700895", "0.5676435", "0.5662947", "0.5660371", "0.5657027", "0.5646151", "0.5645501", "0.5603453", "0.55960965", "0.5589112", "0.5587741", "0.5585738", "0.55855507", "0.55822796", "0.557985", "0.5576967", "0.55713457", "0.5569728", "0.5546848", "0.5534729", "0.55334675", "0.55315983", "0.55195874", "0.551248", "0.5504377", "0.549565", "0.5488155", "0.5481631", "0.5448921", "0.5447098", "0.5441589", "0.5440423", "0.54365057", "0.54350597", "0.5432983", "0.54296243", "0.5428936", "0.5428663", "0.5424152", "0.5423924", "0.5423924", "0.5423924", "0.5423924", "0.5423101", "0.54152274", "0.54108214", "0.5405856", "0.54014295", "0.5388774", "0.5383467", "0.53809303", "0.53801286", "0.53787875", "0.5376756", "0.5372467" ]
0.7527406
0
Get all attributs values.
def __str__(self): status = "height = {}\n".format(self.height) status += "width = {}\n".format(self.width) status += "channels = {}\n".format(self.channels) status += "classes = {}\n".format(self.classes) status += "batch_size = {}\n".format(self.batch_size) status += "epochs = {}\n".format(self.epochs) status += "save_step = {}\n".format(self.save_step) status += "learning_rate = {}\n".format(self.learning_rate) status += "momentum = {}\n".format(self.momentum) return status
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_attribute(self):\n for attr, value in self.__dict__.items():\n print(attr, value)", "def values(self):\n return self.attrs.values()", "def values(self):\n return [getattr(self, a.name) for a in self.__attrs_attrs__]", "def getAttributes(self):\n pass", "def getAttributes(self):\n return self.attributes", "def getAttributes(self):\n return self.attributes", "def get_attributes(self):\n\n endpoint = self._get_api_endpoint() + '/attributes'\n results = self.tq.get(endpoint, withp='attribute')\n if 'data' not in results:\n return {}\n\n return results['data']\n # tr = {}\n # for attribute in results['data']:\n # tr[attribute['attribute']['name']] = attribute['value']\n # return tr", "def get_attrs(self):\n attrs = []\n for attribute in self.__dict__.keys():\n attrs.append(attribute)", "def getAllAttributes(self, limit=None):\n return self.getAttributeRange(limit=limit)", "def GetAttributes(self):\r\n\r\n return self._attr", "def get_attributes(self):\n return self.attributes", "def all(self):\r\n return self.attr_dict.keys()", "def all_attributes(self):\n\n attributes = []\n for level in self.levels:\n attributes.extend(level.attributes)\n\n return attributes", "def get_attributes(self):\n return self._attributes_cache", "def get_attributes(cls):\r\n return []", "def getDataAttributes(self):\n asRet = [];\n asAttrs = dir(self);\n for sAttr in asAttrs:\n if sAttr[0] == '_' or sAttr[0] == 'k':\n continue;\n if sAttr in self.kasInternalAttributes:\n continue;\n oValue = getattr(self, sAttr);\n if callable(oValue):\n continue;\n asRet.append(sAttr);\n return asRet;", "def attrib(self) -> Any:\n return self.attributes", "def attributes(self):\n\n return list(self._attributes.values())", "def get_attribute_list(self):\n return self.dp.get_attribute_list()", "def get_attr_values_with_name(self, name):\n return [attr.value for attr in self.get_attrs_with_name(name)]", "def get_attributes(cls):\n return cls._attributes", "def _get_all_attributes(self):\n all_attributes = self.__dict__.copy()\n all_attributes.update(self.class_attributes)\n return all_attributes", "def _get_all_attributes(self):\n\n attributes= []\n for shader in self._verts:\n attributes.extend(shader.attributes)\n # No attribute in fragment shaders\n attributes = list(set(attributes))\n return attributes", "def getAttributes(self):\n return _libsbml.XMLToken_getAttributes(self)", "def Values(self):\r\n\t\treturn self._get_attribute('values')", "def _read_attributes(root):\n output_list = []\n for _, value in enumerate(root[0][2]):\n attr = Attribute(value)\n output_list.append(attr)\n return output_list", "def get_attributes(self) -> Dict[str, str]:\n pass", "def read_global_attributes(self):\n return self._attrs.keys()", "def get_attribute_list(self):\n attributes = [attr for attr in vars(self.entries[0]) if not attr.startswith('__')]\n return attributes", "def _get_all_attributes(self) -> Dict[str, Any]:\n all_attributes = self.__dict__.copy()\n all_attributes.update(self.class_attributes)\n return all_attributes", "def attributes(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:\n return pulumi.get(self, \"attributes\")", "def getComputedAttributes(self) -> list:\n if self.loggingEnabled:\n self.logger.debug(f\"Starting getComputedAttributes\")\n path = \"/config/computedAttributes\"\n res = self.connector.getData(self.endpoint + path)\n data = res[\"children\"]\n nextPage = res[\"_page\"].get(\"next\", \"\")\n # while nextPage != \"\":\n # res = self.connector.getData(self.endpoint+path,\n # params=params, headers=self.header)\n # data += res['children']\n # nextPage = res['_page'].get('next','')\n return res", "def get_attributes(self):\n\t\treturn dict(list(self.__element.items()))", "def getattrs(self, attrlist):\n\t\treturn np.array([getattr(self, attr) for attr in attrlist])", "def iter_attributes(self):\n return iteritems(self.schema)", "def ListAttributes(self):\n\n ListAttributes(self)", "def get_attributes(cls):\r\n return [\r\n Attribute('height', None),\r\n Attribute('width', None),\r\n Attribute('parts', None),\r\n Attribute('analyses', None),\r\n Attribute('initial_value', None),\r\n Attribute('submit_analyses', None),\r\n Attribute('label', ''),\r\n ]", "def listglobal(self):\n return list(self.attributes.keys())", "def attributes(self):\n\n attributes = []\n\n for member in self.members:\n if member.attribute:\n attributes.append(member)\n\n return attributes", "def get_attributes(self):\n attrs = list()\n syms = list()\n for item in self.gradual_items:\n gi = item.as_integer()\n attrs.append(gi[0])\n syms.append(gi[1])\n return attrs, syms", "def iterate(self):\n if self.output_info:\n return self.output_info.iteritems()\n else:\n return self.attributes.iteritems()", "def getPropertiesAll():", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def attributes(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"attributes\")", "def get_attributes(doc):\n\treturn doc.keys()", "def attributes(self):\n return self.__dict.keys()", "def getAttributes(self, keys):\n return self.graph._readExtendedAttributes(dbKeys)", "def get_all_attributes(dataset):\n return dataset.flatMap(lambda x: x.keys()).distinct().collect()", "def attributes(self):", "def get_attributes(cls, entity):\n return entity.category.facts.all()", "def all_values(cls) -> List[str]:\n return list(member.value for member in cls.__members__.values())", "def get_attributes_from_amount_of_elements(self, key):\n l_of_attr_val = []\n els = self.driver.find_elements(self.by, self.value)\n for i in range(len(els)):\n el = els[0].find_elements(self.by, self.value)[i].get_attribute(key)\n l_of_attr_val.append(el)\n logging.getLogger(__name__).info(\n \"Attributes from amount of elements: {}\\nby = {}\\nvalue = {}\".format(l_of_attr_val, self.by, self.value))\n return l_of_attr_val", "def get_attributes(cls):\r\n return [Attribute('size', '20'),\r\n Attribute('label', ''), ]", "def values(self):\r\n return self.__values", "def _values(self):\n return self.__values", "def values(self):\n return iter(util.annotate(v) for _, v in self._pairs())", "def getAttributes(self, convertToString = False):\n d = self.__dict__\n list = []\n \n # loop through list given return values in proper format\n for item in self.defaultAttributes:\n if d.has_key(item):\n if convertToString:\n list.append(str(d[item]))\n else:\n list.append(d[item])\n return list", "def get_all_values(self):\n return self.display_table.get_all_values(root=self.display_table_root,include=self.params)", "def attributes(self) -> Optional[Mapping[str, Sequence[str]]]:\n return pulumi.get(self, \"attributes\")", "def GetAttributes(self):\n return dict(self._attrs)", "def getValues(self):\n return self.__get('values')", "def values():", "def load_attrs(self):\n return loads(self.get_attr().GetObject()) or {}", "def _getAttrMap(self):\r\n if not getattr(self, 'attrMap'):\r\n self.attrMap = {}\r\n for (key, value) in self.attrs:\r\n self.attrMap[key] = value\r\n return self.attrMap", "def attributes(self) -> typing.Iterator[typing.Tuple[str]]:\n minimize = self.lattice._context._minimize(self._extent, self._intent)\n return (i.members() for i in minimize)", "def values(cls):\n if cls._values is None:\n cls._values = tuple(getattr(cls, attr) for attr in cls.keys())\n return cls._values", "def get_attributes(cls):\r\n return [\r\n Attribute('size', '20'),\r\n Attribute('inline', False),\r\n Attribute('label', ''),\r\n ]", "def attributes(self, *args):\n kwargs = {}\n if args:\n kwargs[\"attributenames\"] = args\n\n r = self._token_id_request(urljoin(self._url, Client._attribute_resource), **kwargs)\n\n # parse contennt looking for all attributes\n attributes = []\n for line in r.text.splitlines():\n r = re.match(\"(userdetails\\.attribute\\.name)=(.*)\", line)\n if r:\n name = r.groups()[1]\n attributes.append([name, None])\n continue # next line\n\n r = re.match(\"(userdetails\\.attribute\\.value)=(.*)\", line)\n if r:\n value = r.groups()[1]\n # last name parsed is where it has to\n # be stacked\n if attributes[-1][1] == None:\n attributes[-1][1] = value\n if isinstance(attributes[-1][1], list):\n attributes[-1][1].append(value)\n else:\n # cast to list\n attributes[-1].append([attributes[-1][1], value])\n\n return dict([(item[0], item[1]) for item in attributes])", "def valuerefs(self):\r\n return self.data.values()", "def values(self):\n return [ self[x] for x in self ]", "def attrs(self):\n return [name for name in self.traits() if self.trait_metadata(name, \"attr\")]", "def attrs(self):\n return [name for name in self.traits() if self.trait_metadata(name, \"attr\")]", "def GetValues(self):", "def values(self):\n\t\treturn self.myVals", "def values(self):\n return self._ctx.values()", "def values(self):\r\n return [self[k] for k in self]", "def get_values(self):\n return map(lambda x: x.value(),self)", "def attrs(self):\n return self.__dict__", "def attributes(self):\n ...", "def values(self):\n values = []\n for key in self.keys():\n values.append(self[key])\n return values", "def values (self):\n return self._values", "def values (self):\n return self._values", "def readAttributes(self, *args):\n return _libsbml.ASTBasePlugin_readAttributes(self, *args)", "def view_values(self, attr_list): # DONE\n values = {}\n for attr in attr_list:\n values[attr] = list(self.data[attr].values)\n return json.dumps(values)", "def _attrlist(self,obj, attrs):\n vlist = [obj.__getattribute__(attr) for attr in attrs]\n return vlist", "def values(self):\n return self._values", "def values(self):\n return self._values", "def values(self):\n return self._values", "def values(self):\n return self._values", "def values(self):\n return self[\"values\"]", "def values(self):\n return self[\"values\"]", "def GetAttributeValuesString(self):\n attributes = []\n for attribute_name, attribute_value in sorted(self.__dict__.items()):\n # Not using startswith to improve performance.\n if attribute_name[0] == '_' or attribute_value is None:\n continue\n\n if isinstance(attribute_value, bytes):\n raise TypeError(\n 'Attribute: {0:s} value of type bytes not supported.'.format(\n attribute_name))\n\n if isinstance(attribute_value, dict):\n raise TypeError(\n 'Attribute: {0:s} value of type dict not supported.'.format(\n attribute_name))\n\n attribute_string = '{0:s}: {1!s}'.format(attribute_name, attribute_value)\n attributes.append(attribute_string)\n\n return ', '.join(attributes)", "def values(self):\n return [p.value for p in self]", "def getattrs(self):\n # ICAT 4.5.0 also lists the meta attributes as attributes in\n # the entity info. Need to remove them here, as they should\n # not be added to InstAttr.\n return self.getfieldnames('ATTRIBUTE') - Entity.MetaAttr", "def values(self):\n return [self[name] for name in self.keys()]", "def getAttributes(self, name):\r\n ent = self.entities[name]\r\n\r\n attrs = []\r\n while ent != None:\r\n this_ent_attrs = copy.copy(ent[\"attributes\"])\r\n this_ent_attrs.reverse()\r\n attrs.extend(this_ent_attrs)\r\n ent = self.entities.get(ent[\"supertype\"], None)\r\n\r\n attrs.reverse()\r\n return attrs", "def get_attribute_values_from_log(log, attribute_name):\n attributes = attributes_filter.get_attribute_values(log, attribute_name)\n return attributes", "def values(cls):\n return cls._values", "def attributes(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"attributes\")" ]
[ "0.7782514", "0.7658524", "0.75126153", "0.7339408", "0.7190724", "0.7190724", "0.71009326", "0.7089321", "0.7067784", "0.7013646", "0.7010554", "0.6959679", "0.6932175", "0.687121", "0.6866942", "0.6840013", "0.6839982", "0.68385345", "0.6838188", "0.67684263", "0.67606723", "0.67578924", "0.67535", "0.6718302", "0.67153895", "0.6693746", "0.6693646", "0.66679907", "0.6660982", "0.66444445", "0.6616469", "0.6598152", "0.65589565", "0.6489182", "0.646971", "0.6442366", "0.64413476", "0.64396834", "0.6436057", "0.6416528", "0.63791585", "0.6373064", "0.6321512", "0.6321512", "0.6321512", "0.63130164", "0.63068336", "0.62967134", "0.62876576", "0.6276588", "0.62684643", "0.6255983", "0.62382644", "0.62316406", "0.61985487", "0.61669344", "0.6164574", "0.61609817", "0.61531335", "0.61516505", "0.6143543", "0.612968", "0.61278564", "0.61223686", "0.610757", "0.6103805", "0.60668665", "0.6062784", "0.6062464", "0.6059799", "0.6054859", "0.6052001", "0.6039108", "0.6039108", "0.60357594", "0.6032907", "0.60299164", "0.6020301", "0.6010628", "0.6010612", "0.60006917", "0.59912056", "0.59879804", "0.59879804", "0.5983913", "0.59765106", "0.5976109", "0.597478", "0.597478", "0.597478", "0.597478", "0.5969098", "0.5969098", "0.59666413", "0.5965913", "0.59639686", "0.5963501", "0.59619415", "0.59483457", "0.59430414", "0.5935401" ]
0.0
-1
Get all attributs values.
def __str__(self): status = "height = {}\n".format(self.height) status += "width = {}\n".format(self.width) status += "channels = {}\n".format(self.channels) status += "input_dim = {}\n".format(self.input_dim) status += "architecture = {}\n".format(self.architecture) status += "activations = {}\n".format(self.activations) status += "batch_size = {}\n".format(self.batch_size) status += "epochs = {}\n".format(self.epochs) status += "save_step = {}\n".format(self.save_step) status += "learning_rate = {}\n".format(self.learning_rate) status += "momentum = {}\n".format(self.momentum) return status
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_attribute(self):\n for attr, value in self.__dict__.items():\n print(attr, value)", "def values(self):\n return self.attrs.values()", "def values(self):\n return [getattr(self, a.name) for a in self.__attrs_attrs__]", "def getAttributes(self):\n pass", "def getAttributes(self):\n return self.attributes", "def getAttributes(self):\n return self.attributes", "def get_attributes(self):\n\n endpoint = self._get_api_endpoint() + '/attributes'\n results = self.tq.get(endpoint, withp='attribute')\n if 'data' not in results:\n return {}\n\n return results['data']\n # tr = {}\n # for attribute in results['data']:\n # tr[attribute['attribute']['name']] = attribute['value']\n # return tr", "def get_attrs(self):\n attrs = []\n for attribute in self.__dict__.keys():\n attrs.append(attribute)", "def getAllAttributes(self, limit=None):\n return self.getAttributeRange(limit=limit)", "def GetAttributes(self):\r\n\r\n return self._attr", "def get_attributes(self):\n return self.attributes", "def all(self):\r\n return self.attr_dict.keys()", "def all_attributes(self):\n\n attributes = []\n for level in self.levels:\n attributes.extend(level.attributes)\n\n return attributes", "def get_attributes(self):\n return self._attributes_cache", "def get_attributes(cls):\r\n return []", "def getDataAttributes(self):\n asRet = [];\n asAttrs = dir(self);\n for sAttr in asAttrs:\n if sAttr[0] == '_' or sAttr[0] == 'k':\n continue;\n if sAttr in self.kasInternalAttributes:\n continue;\n oValue = getattr(self, sAttr);\n if callable(oValue):\n continue;\n asRet.append(sAttr);\n return asRet;", "def attrib(self) -> Any:\n return self.attributes", "def attributes(self):\n\n return list(self._attributes.values())", "def get_attribute_list(self):\n return self.dp.get_attribute_list()", "def get_attr_values_with_name(self, name):\n return [attr.value for attr in self.get_attrs_with_name(name)]", "def get_attributes(cls):\n return cls._attributes", "def _get_all_attributes(self):\n all_attributes = self.__dict__.copy()\n all_attributes.update(self.class_attributes)\n return all_attributes", "def _get_all_attributes(self):\n\n attributes= []\n for shader in self._verts:\n attributes.extend(shader.attributes)\n # No attribute in fragment shaders\n attributes = list(set(attributes))\n return attributes", "def getAttributes(self):\n return _libsbml.XMLToken_getAttributes(self)", "def Values(self):\r\n\t\treturn self._get_attribute('values')", "def _read_attributes(root):\n output_list = []\n for _, value in enumerate(root[0][2]):\n attr = Attribute(value)\n output_list.append(attr)\n return output_list", "def get_attributes(self) -> Dict[str, str]:\n pass", "def read_global_attributes(self):\n return self._attrs.keys()", "def get_attribute_list(self):\n attributes = [attr for attr in vars(self.entries[0]) if not attr.startswith('__')]\n return attributes", "def _get_all_attributes(self) -> Dict[str, Any]:\n all_attributes = self.__dict__.copy()\n all_attributes.update(self.class_attributes)\n return all_attributes", "def attributes(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:\n return pulumi.get(self, \"attributes\")", "def getComputedAttributes(self) -> list:\n if self.loggingEnabled:\n self.logger.debug(f\"Starting getComputedAttributes\")\n path = \"/config/computedAttributes\"\n res = self.connector.getData(self.endpoint + path)\n data = res[\"children\"]\n nextPage = res[\"_page\"].get(\"next\", \"\")\n # while nextPage != \"\":\n # res = self.connector.getData(self.endpoint+path,\n # params=params, headers=self.header)\n # data += res['children']\n # nextPage = res['_page'].get('next','')\n return res", "def get_attributes(self):\n\t\treturn dict(list(self.__element.items()))", "def getattrs(self, attrlist):\n\t\treturn np.array([getattr(self, attr) for attr in attrlist])", "def iter_attributes(self):\n return iteritems(self.schema)", "def ListAttributes(self):\n\n ListAttributes(self)", "def get_attributes(cls):\r\n return [\r\n Attribute('height', None),\r\n Attribute('width', None),\r\n Attribute('parts', None),\r\n Attribute('analyses', None),\r\n Attribute('initial_value', None),\r\n Attribute('submit_analyses', None),\r\n Attribute('label', ''),\r\n ]", "def listglobal(self):\n return list(self.attributes.keys())", "def attributes(self):\n\n attributes = []\n\n for member in self.members:\n if member.attribute:\n attributes.append(member)\n\n return attributes", "def get_attributes(self):\n attrs = list()\n syms = list()\n for item in self.gradual_items:\n gi = item.as_integer()\n attrs.append(gi[0])\n syms.append(gi[1])\n return attrs, syms", "def iterate(self):\n if self.output_info:\n return self.output_info.iteritems()\n else:\n return self.attributes.iteritems()", "def getPropertiesAll():", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def attributes(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"attributes\")", "def get_attributes(doc):\n\treturn doc.keys()", "def attributes(self):\n return self.__dict.keys()", "def getAttributes(self, keys):\n return self.graph._readExtendedAttributes(dbKeys)", "def get_all_attributes(dataset):\n return dataset.flatMap(lambda x: x.keys()).distinct().collect()", "def attributes(self):", "def get_attributes(cls, entity):\n return entity.category.facts.all()", "def all_values(cls) -> List[str]:\n return list(member.value for member in cls.__members__.values())", "def get_attributes_from_amount_of_elements(self, key):\n l_of_attr_val = []\n els = self.driver.find_elements(self.by, self.value)\n for i in range(len(els)):\n el = els[0].find_elements(self.by, self.value)[i].get_attribute(key)\n l_of_attr_val.append(el)\n logging.getLogger(__name__).info(\n \"Attributes from amount of elements: {}\\nby = {}\\nvalue = {}\".format(l_of_attr_val, self.by, self.value))\n return l_of_attr_val", "def get_attributes(cls):\r\n return [Attribute('size', '20'),\r\n Attribute('label', ''), ]", "def values(self):\r\n return self.__values", "def _values(self):\n return self.__values", "def values(self):\n return iter(util.annotate(v) for _, v in self._pairs())", "def getAttributes(self, convertToString = False):\n d = self.__dict__\n list = []\n \n # loop through list given return values in proper format\n for item in self.defaultAttributes:\n if d.has_key(item):\n if convertToString:\n list.append(str(d[item]))\n else:\n list.append(d[item])\n return list", "def get_all_values(self):\n return self.display_table.get_all_values(root=self.display_table_root,include=self.params)", "def attributes(self) -> Optional[Mapping[str, Sequence[str]]]:\n return pulumi.get(self, \"attributes\")", "def GetAttributes(self):\n return dict(self._attrs)", "def getValues(self):\n return self.__get('values')", "def values():", "def load_attrs(self):\n return loads(self.get_attr().GetObject()) or {}", "def _getAttrMap(self):\r\n if not getattr(self, 'attrMap'):\r\n self.attrMap = {}\r\n for (key, value) in self.attrs:\r\n self.attrMap[key] = value\r\n return self.attrMap", "def attributes(self) -> typing.Iterator[typing.Tuple[str]]:\n minimize = self.lattice._context._minimize(self._extent, self._intent)\n return (i.members() for i in minimize)", "def values(cls):\n if cls._values is None:\n cls._values = tuple(getattr(cls, attr) for attr in cls.keys())\n return cls._values", "def get_attributes(cls):\r\n return [\r\n Attribute('size', '20'),\r\n Attribute('inline', False),\r\n Attribute('label', ''),\r\n ]", "def attributes(self, *args):\n kwargs = {}\n if args:\n kwargs[\"attributenames\"] = args\n\n r = self._token_id_request(urljoin(self._url, Client._attribute_resource), **kwargs)\n\n # parse contennt looking for all attributes\n attributes = []\n for line in r.text.splitlines():\n r = re.match(\"(userdetails\\.attribute\\.name)=(.*)\", line)\n if r:\n name = r.groups()[1]\n attributes.append([name, None])\n continue # next line\n\n r = re.match(\"(userdetails\\.attribute\\.value)=(.*)\", line)\n if r:\n value = r.groups()[1]\n # last name parsed is where it has to\n # be stacked\n if attributes[-1][1] == None:\n attributes[-1][1] = value\n if isinstance(attributes[-1][1], list):\n attributes[-1][1].append(value)\n else:\n # cast to list\n attributes[-1].append([attributes[-1][1], value])\n\n return dict([(item[0], item[1]) for item in attributes])", "def valuerefs(self):\r\n return self.data.values()", "def values(self):\n return [ self[x] for x in self ]", "def attrs(self):\n return [name for name in self.traits() if self.trait_metadata(name, \"attr\")]", "def attrs(self):\n return [name for name in self.traits() if self.trait_metadata(name, \"attr\")]", "def GetValues(self):", "def values(self):\n\t\treturn self.myVals", "def values(self):\n return self._ctx.values()", "def values(self):\r\n return [self[k] for k in self]", "def get_values(self):\n return map(lambda x: x.value(),self)", "def attrs(self):\n return self.__dict__", "def attributes(self):\n ...", "def values(self):\n values = []\n for key in self.keys():\n values.append(self[key])\n return values", "def values (self):\n return self._values", "def values (self):\n return self._values", "def readAttributes(self, *args):\n return _libsbml.ASTBasePlugin_readAttributes(self, *args)", "def view_values(self, attr_list): # DONE\n values = {}\n for attr in attr_list:\n values[attr] = list(self.data[attr].values)\n return json.dumps(values)", "def _attrlist(self,obj, attrs):\n vlist = [obj.__getattribute__(attr) for attr in attrs]\n return vlist", "def values(self):\n return self._values", "def values(self):\n return self._values", "def values(self):\n return self._values", "def values(self):\n return self._values", "def values(self):\n return self[\"values\"]", "def values(self):\n return self[\"values\"]", "def GetAttributeValuesString(self):\n attributes = []\n for attribute_name, attribute_value in sorted(self.__dict__.items()):\n # Not using startswith to improve performance.\n if attribute_name[0] == '_' or attribute_value is None:\n continue\n\n if isinstance(attribute_value, bytes):\n raise TypeError(\n 'Attribute: {0:s} value of type bytes not supported.'.format(\n attribute_name))\n\n if isinstance(attribute_value, dict):\n raise TypeError(\n 'Attribute: {0:s} value of type dict not supported.'.format(\n attribute_name))\n\n attribute_string = '{0:s}: {1!s}'.format(attribute_name, attribute_value)\n attributes.append(attribute_string)\n\n return ', '.join(attributes)", "def values(self):\n return [p.value for p in self]", "def getattrs(self):\n # ICAT 4.5.0 also lists the meta attributes as attributes in\n # the entity info. Need to remove them here, as they should\n # not be added to InstAttr.\n return self.getfieldnames('ATTRIBUTE') - Entity.MetaAttr", "def values(self):\n return [self[name] for name in self.keys()]", "def getAttributes(self, name):\r\n ent = self.entities[name]\r\n\r\n attrs = []\r\n while ent != None:\r\n this_ent_attrs = copy.copy(ent[\"attributes\"])\r\n this_ent_attrs.reverse()\r\n attrs.extend(this_ent_attrs)\r\n ent = self.entities.get(ent[\"supertype\"], None)\r\n\r\n attrs.reverse()\r\n return attrs", "def get_attribute_values_from_log(log, attribute_name):\n attributes = attributes_filter.get_attribute_values(log, attribute_name)\n return attributes", "def values(cls):\n return cls._values", "def attributes(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"attributes\")" ]
[ "0.7782514", "0.7658524", "0.75126153", "0.7339408", "0.7190724", "0.7190724", "0.71009326", "0.7089321", "0.7067784", "0.7013646", "0.7010554", "0.6959679", "0.6932175", "0.687121", "0.6866942", "0.6840013", "0.6839982", "0.68385345", "0.6838188", "0.67684263", "0.67606723", "0.67578924", "0.67535", "0.6718302", "0.67153895", "0.6693746", "0.6693646", "0.66679907", "0.6660982", "0.66444445", "0.6616469", "0.6598152", "0.65589565", "0.6489182", "0.646971", "0.6442366", "0.64413476", "0.64396834", "0.6436057", "0.6416528", "0.63791585", "0.6373064", "0.6321512", "0.6321512", "0.6321512", "0.63130164", "0.63068336", "0.62967134", "0.62876576", "0.6276588", "0.62684643", "0.6255983", "0.62382644", "0.62316406", "0.61985487", "0.61669344", "0.6164574", "0.61609817", "0.61531335", "0.61516505", "0.6143543", "0.612968", "0.61278564", "0.61223686", "0.610757", "0.6103805", "0.60668665", "0.6062784", "0.6062464", "0.6059799", "0.6054859", "0.6052001", "0.6039108", "0.6039108", "0.60357594", "0.6032907", "0.60299164", "0.6020301", "0.6010628", "0.6010612", "0.60006917", "0.59912056", "0.59879804", "0.59879804", "0.5983913", "0.59765106", "0.5976109", "0.597478", "0.597478", "0.597478", "0.597478", "0.5969098", "0.5969098", "0.59666413", "0.5965913", "0.59639686", "0.5963501", "0.59619415", "0.59483457", "0.59430414", "0.5935401" ]
0.0
-1
Get all attributs values.
def __str__(self): status = "height = {}\n".format(self.height) status += "width = {}\n".format(self.width) status += "channels = {}\n".format(self.channels) status += "architecture = {}\n".format(self.architecture) status += "activations = {}\n".format(self.activations) status += "conv_activations = {}\n".format(self.conv_activations) status += "conv_architecture = {}\n".format(self.conv_architecture) status += "kernel_sizes = {}\n".format(self.kernel_sizes) status += "pool_kernel = {}\n".format(self.pool_kernel) status += "batch_size = {}\n".format(self.batch_size) status += "epochs = {}\n".format(self.epochs) status += "save_step = {}\n".format(self.save_step) status += "learning_rate = {}\n".format(self.learning_rate) status += "momentum = {}\n".format(self.momentum) return status
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_attribute(self):\n for attr, value in self.__dict__.items():\n print(attr, value)", "def values(self):\n return self.attrs.values()", "def values(self):\n return [getattr(self, a.name) for a in self.__attrs_attrs__]", "def getAttributes(self):\n pass", "def getAttributes(self):\n return self.attributes", "def getAttributes(self):\n return self.attributes", "def get_attributes(self):\n\n endpoint = self._get_api_endpoint() + '/attributes'\n results = self.tq.get(endpoint, withp='attribute')\n if 'data' not in results:\n return {}\n\n return results['data']\n # tr = {}\n # for attribute in results['data']:\n # tr[attribute['attribute']['name']] = attribute['value']\n # return tr", "def get_attrs(self):\n attrs = []\n for attribute in self.__dict__.keys():\n attrs.append(attribute)", "def getAllAttributes(self, limit=None):\n return self.getAttributeRange(limit=limit)", "def GetAttributes(self):\r\n\r\n return self._attr", "def get_attributes(self):\n return self.attributes", "def all(self):\r\n return self.attr_dict.keys()", "def all_attributes(self):\n\n attributes = []\n for level in self.levels:\n attributes.extend(level.attributes)\n\n return attributes", "def get_attributes(self):\n return self._attributes_cache", "def get_attributes(cls):\r\n return []", "def getDataAttributes(self):\n asRet = [];\n asAttrs = dir(self);\n for sAttr in asAttrs:\n if sAttr[0] == '_' or sAttr[0] == 'k':\n continue;\n if sAttr in self.kasInternalAttributes:\n continue;\n oValue = getattr(self, sAttr);\n if callable(oValue):\n continue;\n asRet.append(sAttr);\n return asRet;", "def attrib(self) -> Any:\n return self.attributes", "def attributes(self):\n\n return list(self._attributes.values())", "def get_attribute_list(self):\n return self.dp.get_attribute_list()", "def get_attr_values_with_name(self, name):\n return [attr.value for attr in self.get_attrs_with_name(name)]", "def get_attributes(cls):\n return cls._attributes", "def _get_all_attributes(self):\n all_attributes = self.__dict__.copy()\n all_attributes.update(self.class_attributes)\n return all_attributes", "def _get_all_attributes(self):\n\n attributes= []\n for shader in self._verts:\n attributes.extend(shader.attributes)\n # No attribute in fragment shaders\n attributes = list(set(attributes))\n return attributes", "def getAttributes(self):\n return _libsbml.XMLToken_getAttributes(self)", "def Values(self):\r\n\t\treturn self._get_attribute('values')", "def _read_attributes(root):\n output_list = []\n for _, value in enumerate(root[0][2]):\n attr = Attribute(value)\n output_list.append(attr)\n return output_list", "def get_attributes(self) -> Dict[str, str]:\n pass", "def read_global_attributes(self):\n return self._attrs.keys()", "def get_attribute_list(self):\n attributes = [attr for attr in vars(self.entries[0]) if not attr.startswith('__')]\n return attributes", "def _get_all_attributes(self) -> Dict[str, Any]:\n all_attributes = self.__dict__.copy()\n all_attributes.update(self.class_attributes)\n return all_attributes", "def attributes(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:\n return pulumi.get(self, \"attributes\")", "def getComputedAttributes(self) -> list:\n if self.loggingEnabled:\n self.logger.debug(f\"Starting getComputedAttributes\")\n path = \"/config/computedAttributes\"\n res = self.connector.getData(self.endpoint + path)\n data = res[\"children\"]\n nextPage = res[\"_page\"].get(\"next\", \"\")\n # while nextPage != \"\":\n # res = self.connector.getData(self.endpoint+path,\n # params=params, headers=self.header)\n # data += res['children']\n # nextPage = res['_page'].get('next','')\n return res", "def get_attributes(self):\n\t\treturn dict(list(self.__element.items()))", "def getattrs(self, attrlist):\n\t\treturn np.array([getattr(self, attr) for attr in attrlist])", "def iter_attributes(self):\n return iteritems(self.schema)", "def ListAttributes(self):\n\n ListAttributes(self)", "def get_attributes(cls):\r\n return [\r\n Attribute('height', None),\r\n Attribute('width', None),\r\n Attribute('parts', None),\r\n Attribute('analyses', None),\r\n Attribute('initial_value', None),\r\n Attribute('submit_analyses', None),\r\n Attribute('label', ''),\r\n ]", "def listglobal(self):\n return list(self.attributes.keys())", "def attributes(self):\n\n attributes = []\n\n for member in self.members:\n if member.attribute:\n attributes.append(member)\n\n return attributes", "def get_attributes(self):\n attrs = list()\n syms = list()\n for item in self.gradual_items:\n gi = item.as_integer()\n attrs.append(gi[0])\n syms.append(gi[1])\n return attrs, syms", "def iterate(self):\n if self.output_info:\n return self.output_info.iteritems()\n else:\n return self.attributes.iteritems()", "def getPropertiesAll():", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def attributes(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"attributes\")", "def get_attributes(doc):\n\treturn doc.keys()", "def attributes(self):\n return self.__dict.keys()", "def getAttributes(self, keys):\n return self.graph._readExtendedAttributes(dbKeys)", "def get_all_attributes(dataset):\n return dataset.flatMap(lambda x: x.keys()).distinct().collect()", "def attributes(self):", "def get_attributes(cls, entity):\n return entity.category.facts.all()", "def all_values(cls) -> List[str]:\n return list(member.value for member in cls.__members__.values())", "def get_attributes_from_amount_of_elements(self, key):\n l_of_attr_val = []\n els = self.driver.find_elements(self.by, self.value)\n for i in range(len(els)):\n el = els[0].find_elements(self.by, self.value)[i].get_attribute(key)\n l_of_attr_val.append(el)\n logging.getLogger(__name__).info(\n \"Attributes from amount of elements: {}\\nby = {}\\nvalue = {}\".format(l_of_attr_val, self.by, self.value))\n return l_of_attr_val", "def get_attributes(cls):\r\n return [Attribute('size', '20'),\r\n Attribute('label', ''), ]", "def values(self):\r\n return self.__values", "def _values(self):\n return self.__values", "def values(self):\n return iter(util.annotate(v) for _, v in self._pairs())", "def getAttributes(self, convertToString = False):\n d = self.__dict__\n list = []\n \n # loop through list given return values in proper format\n for item in self.defaultAttributes:\n if d.has_key(item):\n if convertToString:\n list.append(str(d[item]))\n else:\n list.append(d[item])\n return list", "def get_all_values(self):\n return self.display_table.get_all_values(root=self.display_table_root,include=self.params)", "def attributes(self) -> Optional[Mapping[str, Sequence[str]]]:\n return pulumi.get(self, \"attributes\")", "def GetAttributes(self):\n return dict(self._attrs)", "def getValues(self):\n return self.__get('values')", "def values():", "def load_attrs(self):\n return loads(self.get_attr().GetObject()) or {}", "def _getAttrMap(self):\r\n if not getattr(self, 'attrMap'):\r\n self.attrMap = {}\r\n for (key, value) in self.attrs:\r\n self.attrMap[key] = value\r\n return self.attrMap", "def attributes(self) -> typing.Iterator[typing.Tuple[str]]:\n minimize = self.lattice._context._minimize(self._extent, self._intent)\n return (i.members() for i in minimize)", "def values(cls):\n if cls._values is None:\n cls._values = tuple(getattr(cls, attr) for attr in cls.keys())\n return cls._values", "def get_attributes(cls):\r\n return [\r\n Attribute('size', '20'),\r\n Attribute('inline', False),\r\n Attribute('label', ''),\r\n ]", "def attributes(self, *args):\n kwargs = {}\n if args:\n kwargs[\"attributenames\"] = args\n\n r = self._token_id_request(urljoin(self._url, Client._attribute_resource), **kwargs)\n\n # parse contennt looking for all attributes\n attributes = []\n for line in r.text.splitlines():\n r = re.match(\"(userdetails\\.attribute\\.name)=(.*)\", line)\n if r:\n name = r.groups()[1]\n attributes.append([name, None])\n continue # next line\n\n r = re.match(\"(userdetails\\.attribute\\.value)=(.*)\", line)\n if r:\n value = r.groups()[1]\n # last name parsed is where it has to\n # be stacked\n if attributes[-1][1] == None:\n attributes[-1][1] = value\n if isinstance(attributes[-1][1], list):\n attributes[-1][1].append(value)\n else:\n # cast to list\n attributes[-1].append([attributes[-1][1], value])\n\n return dict([(item[0], item[1]) for item in attributes])", "def valuerefs(self):\r\n return self.data.values()", "def values(self):\n return [ self[x] for x in self ]", "def attrs(self):\n return [name for name in self.traits() if self.trait_metadata(name, \"attr\")]", "def attrs(self):\n return [name for name in self.traits() if self.trait_metadata(name, \"attr\")]", "def GetValues(self):", "def values(self):\n\t\treturn self.myVals", "def values(self):\n return self._ctx.values()", "def values(self):\r\n return [self[k] for k in self]", "def get_values(self):\n return map(lambda x: x.value(),self)", "def attrs(self):\n return self.__dict__", "def attributes(self):\n ...", "def values(self):\n values = []\n for key in self.keys():\n values.append(self[key])\n return values", "def values (self):\n return self._values", "def values (self):\n return self._values", "def readAttributes(self, *args):\n return _libsbml.ASTBasePlugin_readAttributes(self, *args)", "def view_values(self, attr_list): # DONE\n values = {}\n for attr in attr_list:\n values[attr] = list(self.data[attr].values)\n return json.dumps(values)", "def _attrlist(self,obj, attrs):\n vlist = [obj.__getattribute__(attr) for attr in attrs]\n return vlist", "def values(self):\n return self._values", "def values(self):\n return self._values", "def values(self):\n return self._values", "def values(self):\n return self._values", "def values(self):\n return self[\"values\"]", "def values(self):\n return self[\"values\"]", "def GetAttributeValuesString(self):\n attributes = []\n for attribute_name, attribute_value in sorted(self.__dict__.items()):\n # Not using startswith to improve performance.\n if attribute_name[0] == '_' or attribute_value is None:\n continue\n\n if isinstance(attribute_value, bytes):\n raise TypeError(\n 'Attribute: {0:s} value of type bytes not supported.'.format(\n attribute_name))\n\n if isinstance(attribute_value, dict):\n raise TypeError(\n 'Attribute: {0:s} value of type dict not supported.'.format(\n attribute_name))\n\n attribute_string = '{0:s}: {1!s}'.format(attribute_name, attribute_value)\n attributes.append(attribute_string)\n\n return ', '.join(attributes)", "def values(self):\n return [p.value for p in self]", "def getattrs(self):\n # ICAT 4.5.0 also lists the meta attributes as attributes in\n # the entity info. Need to remove them here, as they should\n # not be added to InstAttr.\n return self.getfieldnames('ATTRIBUTE') - Entity.MetaAttr", "def values(self):\n return [self[name] for name in self.keys()]", "def getAttributes(self, name):\r\n ent = self.entities[name]\r\n\r\n attrs = []\r\n while ent != None:\r\n this_ent_attrs = copy.copy(ent[\"attributes\"])\r\n this_ent_attrs.reverse()\r\n attrs.extend(this_ent_attrs)\r\n ent = self.entities.get(ent[\"supertype\"], None)\r\n\r\n attrs.reverse()\r\n return attrs", "def get_attribute_values_from_log(log, attribute_name):\n attributes = attributes_filter.get_attribute_values(log, attribute_name)\n return attributes", "def values(cls):\n return cls._values", "def attributes(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"attributes\")" ]
[ "0.7782514", "0.7658524", "0.75126153", "0.7339408", "0.7190724", "0.7190724", "0.71009326", "0.7089321", "0.7067784", "0.7013646", "0.7010554", "0.6959679", "0.6932175", "0.687121", "0.6866942", "0.6840013", "0.6839982", "0.68385345", "0.6838188", "0.67684263", "0.67606723", "0.67578924", "0.67535", "0.6718302", "0.67153895", "0.6693746", "0.6693646", "0.66679907", "0.6660982", "0.66444445", "0.6616469", "0.6598152", "0.65589565", "0.6489182", "0.646971", "0.6442366", "0.64413476", "0.64396834", "0.6436057", "0.6416528", "0.63791585", "0.6373064", "0.6321512", "0.6321512", "0.6321512", "0.63130164", "0.63068336", "0.62967134", "0.62876576", "0.6276588", "0.62684643", "0.6255983", "0.62382644", "0.62316406", "0.61985487", "0.61669344", "0.6164574", "0.61609817", "0.61531335", "0.61516505", "0.6143543", "0.612968", "0.61278564", "0.61223686", "0.610757", "0.6103805", "0.60668665", "0.6062784", "0.6062464", "0.6059799", "0.6054859", "0.6052001", "0.6039108", "0.6039108", "0.60357594", "0.6032907", "0.60299164", "0.6020301", "0.6010628", "0.6010612", "0.60006917", "0.59912056", "0.59879804", "0.59879804", "0.5983913", "0.59765106", "0.5976109", "0.597478", "0.597478", "0.597478", "0.597478", "0.5969098", "0.5969098", "0.59666413", "0.5965913", "0.59639686", "0.5963501", "0.59619415", "0.59483457", "0.59430414", "0.5935401" ]
0.0
-1
Address for the xbee, where the response originated from.
def address(self): return self._address
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getAddress(self):\r\n return self._endpoint.getAddress()", "def addr(self):\r\n return self._addr", "def getAddress(self):\r\n raise NotImplementedError('Endpoint can not be used directly.')", "def address(self):\n ...", "def address(self) -> str:\n return self._backend.address", "def address(self):\n\n return self._address", "def _get_address(self):\n return self.__address", "def address(self) -> object:\n return self._address", "def address(self):\n return self._ref_address", "def address(self):\n \n return self.__ip", "def address(self) -> str:\n return self._address", "def address(self) -> str:\n return self._address", "def address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"address\")", "def address(self):\n return self.data.get('address')", "def address(self) -> str:\n return pulumi.get(self, \"address\")", "def address(self) -> str:\n return pulumi.get(self, \"address\")", "def address(self) -> str:\n return pulumi.get(self, \"address\")", "def get_current_address(self):\n pass", "def getAddress(self):\r\n return self._container.getAddress()", "def address(self):\n\n return self.config.dict[\"fhdhr\"][\"address\"]", "def get_address(self):\n \n return self._addr", "def get_address(self):\n \n return self._addr", "def get_address(self):\n return logic.address(self.get_program())", "def get_address(self):\n if self.address:\n return self.address", "def address(self) -> int:\n return self._address", "def address(self, name):\n return self.query(name).response.answer[0].items[0].address", "def get_address(self, ):\n return self.get_parameter('address')", "def getAddress(self) -> int:\n ...", "def remote_getWebsocketAddress(self):\r\n return self._extAddress", "def get_address(self):\n self.rs485.clear_buffers()\n self.rs485.write_command('#00?0')\n response = self.rs485.read_response()\n pattern = '\\$.*? (.*?) \\r\\n'\n hexval = re.findall(pattern,response).pop()\n address = int(hexval,16)\n return address", "def endpoint_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"endpoint_address\")", "def address(self):\n return str(self._address)", "def address(self):\n return \"%s:%s\" % (self.ip, self.port)", "def getWebsocketAddress(self):\r\n return self.callRemote('getWebsocketAddress')", "def fromaddr(self):\n return self._from", "def address_string(self):\n\n if self.server.log_ip_activated:\n host = self.client_address[0]\n else:\n host = '127.0.0.1'\n if self.server.resolve_clients:\n return socket.getfqdn(host)\n else:\n return host", "def _getAddress(self, result):\r\n ((serverReady, _), (clientReady, _)) = result\r\n\r\n if not (serverReady and clientReady):\r\n # There was a problem in making the server/client ready for the\r\n # connection attempt\r\n # TODO: What should we do here?\r\n return Failure(InternalError('Server/Client could not be prepared '\r\n 'for connection attempt.'))\r\n\r\n return self._serverEndpoint.getAddress()", "def Address(self) -> _n_5_t_0:", "def get_address(self):\n \n if self._addr == None:\n return self._socket.getpeername()\n return self._addr", "def _get_bus_address(self):\n if 'PULSE_DBUS_SERVER' in os.environ:\n address = os.environ['PULSE_DBUS_SERVER']\n else:\n bus = dbus.SessionBus()\n server_lookup = bus.get_object(\"org.PulseAudio1\", \"/org/pulseaudio/server_lookup1\")\n address = server_lookup.Get(\"org.PulseAudio.ServerLookup1\", \"Address\",\n dbus_interface=\"org.freedesktop.DBus.Properties\")\n return address", "def peer_address(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"peer_address\")", "def get_self_address(self):\n return self.self_host, self.self_port", "def get_address(self):\r\n return \"iDigi\"", "def discovery_address(self):\n\n return self.config.dict[\"fhdhr\"][\"discovery_address\"]", "def address(self) -> Optional[str]:\n return pulumi.get(self, \"address\")", "def address(self) -> Optional[str]:\n return pulumi.get(self, \"address\")", "def address(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"address\")", "def address(self):\n zone = '/' + self.zone if self.zone else ''\n if self._bundle is not None:\n return self._bundle.address() + zone\n elif self.sent_id is not None:\n return self.sent_id + zone\n else:\n return '?' + zone", "def address(self):\n if self.con_strategy == \"local\":\n return self.address_local()\n if self.con_strategy == \"remote\":\n return self.address_remote()\n return None", "def server_address(self):\n return self._server_address", "def get_addr(self):\n return Server.t_addresses.get(threading.get_ident())", "def getAddress(self):\n return self.hostname, self.port", "def get_reply_address(self):\n\t\trequest = self.context.get('request')\n\t\tif request and request.venue:\n\t\t\treturn request.venue.support_email_address\n\n\t\treturn self.get_default_reply_address", "def ip_address(self):\n return self.address", "def get_infoblox_address_connection(self):\n return self.m_connection.iblox_a_records", "def get_client_address(self,environ):\n try:\n return environ['HTTP_X_FORWARDED_FOR'].split(',')[-1].strip()\n except KeyError:\n return environ['REMOTE_ADDR']", "def reply_to(self):\n return self.receiver.remote_source.address", "def get_address(self) -> Optional[str]:\n raise NotImplementedError()", "def address_1(self):\n return self._address_1", "def remote_addr(self):\n fwd = self.environ.get('HTTP_X_FORWARDED_FOR', None)\n if fwd is None:\n return self.environ.get('REMOTE_ADDR')\n # sometimes x-forwarded-for contains multiple addresses,\n # actual client is first, rest are proxy\n fwd = fwd.split(',')[0]\n return fwd", "def address(self):\n\n\t\telement = Element(driver=self.driver,\n\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t locator=BillPayPageLocator.ADDRESS_INPUT)\n\t\treturn element.element_value", "def address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address\")", "def address(self):\n return f\"{self._type}.{self._id}\"", "def instant_messaging_address(self) -> str:\n return self._instant_messaging_address", "def LocalAddress(self) -> _n_5_t_0:", "def get_address(self):\n return self.get_ipv4_address()", "def sender(self) -> Address:\n return self._sender", "def ip_addr(self):\n return self.ip_addresses[0]", "def _get_addr(self, protocol, address):\n if address:\n return address[0]\n else:\n return protocol.transport.getPeer().host", "def api_myip():\n return request.remote_addr, 200, {'Content-Type': 'text/plain'}", "def receive_address(self):\n url = self.base_url + 'account/receive_address'\n self.session.headers.update(self.sign(url))\n resp =self.session.get(url)\n return pd.Series(resp.json())", "def get_address(xpub):\n return xpub.to_address() # p2pkh", "def remote_addr(self):\r\n route = self.remote_route\r\n return route[0] if route else None", "def get_information_object_address(self):\n return self.information_object_address", "def _get_address(self):\n return utf82unicode(pn_terminus_get_address(self._impl))", "def get_address(self):\n \n return tuple('localhost',self._port)", "def get_address(self):\n \n return tuple('localhost',self._port)", "def address(self):\n return f'Address = {self._peer.address}/{self._peer.subnet.prefixlen}'", "def get_remit_to_address(self): \n return self.remit_to_address", "def endpoint_sub_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"endpoint_sub_address\")", "def lan_address(self):\n _, port = self._socket.getsockname()\n return (\"127.0.0.1\", port)", "def wan_address(self):\n return self._wan_address", "def sender(self):\n key, alt = ('Sender', 'From') if not self.resent else \\\n ('Resent-Sender', 'Resent-From')\n value = self.get(key) or self.get(alt)\n _, addr = getaddresses([value])[0]\n return addr", "def endpoint_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_address\")", "def endpoint_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_address\")", "def wan_address(self):\n if self._community.dispersy:\n host = self._community.dispersy.wan_address[0]\n\n if host == \"0.0.0.0\":\n host = self._community.dispersy.lan_address[0]\n\n else:\n host = \"0.0.0.0\"\n\n _, port = self._socket.getsockname()\n return (host, port)", "def intranet_address(self) -> str:\n return pulumi.get(self, \"intranet_address\")", "def internet_address(self) -> str:\n return pulumi.get(self, \"internet_address\")", "def full_out_address(self) -> str:\n return f'{self.host}:{self.port_out}'", "def getWebsocketAddress(self):\r\n return self._endpoint.getWebsocketAddress()", "def remote_addr(self):\r\n return self._environ.get('REMOTE_ADDR', '0.0.0.0')", "def fax(self):\n return self._fax", "def getSicxAddress(self) -> Address:\n return self._sICX_address.get()", "def external_IP(self):\r\n return self._external_ip", "def full_address(self) -> str:\n return f'{self.host}:{self.port}'", "def address(self, json):\n if json['Response'] != None and json['Response']['View'] != None and len(json['Response']['View'])>0:\n location = json['Response']['View'][0]['Result'][0]['Location']\n self._address = location.get('Address')\n return self._address.get('Label')\n else:\n logging.error(\"Problem with JSON Response, Json Dump %s, fetched using %s!\", json, self._getName())\n self._address = None\n return None" ]
[ "0.7066411", "0.6962745", "0.6875606", "0.68373436", "0.67819583", "0.67610395", "0.6751729", "0.6742783", "0.6728146", "0.6693614", "0.665993", "0.665993", "0.6601143", "0.6547315", "0.65288603", "0.65288603", "0.65288603", "0.6516686", "0.6515416", "0.6487318", "0.6476641", "0.6476641", "0.6471611", "0.64041483", "0.63869673", "0.63837004", "0.6381342", "0.63761353", "0.6305225", "0.62778234", "0.6251322", "0.62352073", "0.6202982", "0.61932635", "0.6187248", "0.6181417", "0.61678827", "0.6167602", "0.6160381", "0.61481816", "0.60977024", "0.60815257", "0.60804427", "0.6079974", "0.6077609", "0.6077609", "0.60607654", "0.60566974", "0.6045705", "0.6032161", "0.59868586", "0.59580785", "0.59490514", "0.59486383", "0.5926675", "0.5919045", "0.5918834", "0.5911286", "0.58885336", "0.5866717", "0.58373266", "0.58357894", "0.583211", "0.582289", "0.582068", "0.5820192", "0.5788966", "0.5787424", "0.5785771", "0.57796156", "0.57780695", "0.57626224", "0.57558644", "0.5750556", "0.5746193", "0.5746084", "0.5746084", "0.5715472", "0.57087845", "0.5703109", "0.5686394", "0.5686066", "0.5674813", "0.56675106", "0.56675106", "0.5663283", "0.5653372", "0.5650459", "0.5643043", "0.5637113", "0.5636925", "0.5634127", "0.56280243", "0.56238174", "0.5620297", "0.56164736" ]
0.6983551
5
Fill the packets data properties.
def fill_data(self, data): self._data = data self._data_length = data[1:3] self._frame_id = data[4] self._address = XbeeAddress(data[5:9], data[9:13], data[13:15]) self._at_command = data[15:17] self._command_status = data[17] try: self._command_data = data[18:21] self._checksum = data[22] except IndexError: self._command_data = None self._checksum = data[18]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initAttributes(self):\n CCSDS.DU.DataUnit.initAttributes(self)\n self.dataFieldHeaderFlag = 0\n self.setPacketLength()", "def _init_data(self) -> None:\n self.dtype = dict()\n self.shape = dict()\n self.size = dict()\n self.attrs = dict()\n self.data_ptr = dict()\n\n if self.mode == 'r':\n for k in self.fp.keys():\n self.dtype[k] = self.fp[k].dtype\n self.shape[k] = self.fp[k].shape\n self.size[k] = self.fp[k].shape[0]\n self.data_ptr[k] = 0", "def set_properties(self):\n\n # assign feed entries from the root of the parsed data\n if hasattr(self.parsed_data, \"entries\"):\n self.items = self.parsed_data.entries\n\n # check if it is a feed root or feed element\n if hasattr(self.parsed_data, \"feed\"):\n source_data = self.parsed_data.feed\n else:\n source_data = self.parsed_data\n\n # assign available properties not listed in keymap\n self.title = source_data.title\n self.link = source_data.link\n\n for key in self.parsed_data.keymap.keys():\n if hasattr(self, key) and not getattr(self, key):\n attr_value = source_data.get(key)\n if isinstance(attr_value, struct_time):\n attr_value = self.serialize_datetime(attr_value)\n\n setattr(self, key, attr_value)", "def _initialize_data(self):\n self.reset_count = 0\n self._idn_no_firmware = \"KEPCO,BOP 50-20,E1234,\"\n self._firmware = 2.6\n self._init_data()", "def _collect_data(self) -> None:\n self.set_websocket_data()\n self.set_stratum_data()\n self.set_cache_data()\n self.collect_peer_connection_metrics()\n self.set_tx_storage_data()", "def prepare_data(self):", "def get_data(self):\n self.data = dict()\n # list to save all the attributes we are going to create\n self.attr = []\n # list to save all the groups available in the incomming input\n self.groups.extend(self.values.keys())\n # Grouping\n self.parse_data()", "def _update_data(self):\n for attribute in [\"flow_rate\"]:\n self._data[attribute] = self._connection.measure", "def populate_data_from_message(self, msg):\n for field in self:\n try:\n setattr(field, 'data', getattr(msg, field.name))\n except:\n continue", "def initData(self):\n self.checksum = 0\n return self._writeMessage(0, [], 'initData')", "def populate_data(self):\r\n # Importing StationData with the standard imports causes a redundancy\r\n # problem, so it is imported here only when it is needed.\r\n from stationData import StationData\r\n # Find data requirements from all plumes.\r\n requirements = describe.PLUMES\r\n # Loop over plumes and define parameters to be used for pulling data.\r\n grib_file = pygrib.open(self.grib_file_path)\r\n for req in requirements:\r\n (plume,data_types,grid_level_type,grid_level,unused) = req\r\n selected = grib_file.select(shortName=data_types,\r\n typeOfLevel=grid_level_type,\r\n level=grid_level)\r\n for i, message in enumerate(selected):\r\n if i % 20 == 0:\r\n print '%s %s/%s Grib messages processed for %s' %\\\r\n (PRETEXT, i + 1, len(selected), req[0])\r\n for sdo in StationData.instances:\r\n if sdo.grib_i is None:\r\n StationData.populate_grid_information(message,\r\n self.config)\r\n sdo.add_data(plume,self.member_name,message)\r\n grib_file.close()\r\n return", "def __init__(self, data: dict):\n super().__init__(data)\n self._supports_validation = False\n self._ping_data_raw = data['pingData']", "def _initialize_data(self):\n self.unique_id = 123\n\n self.gas_valve_open = False\n self.buffer_valve_open = False\n self.pump_valve_open = False\n\n self.operatingmode = 0\n\n self.sample_pressure_high_limit = 100\n self.sample_pressure_low_limit = 10\n self.sample_pressure = 0\n\n self.error = 0\n\n self.buffer_pressure_high = True", "def init_params(self):\n self.clear()\n self._init_load_data()\n self._init_net_delay_data()", "def __init__(self):\n self._distance_data = []\n self._location_data = []\n self._package_data = []", "def __init__(self, data):\n\t\tself.protocol_version, self.le_state, self.playback_state, \\\n\t\t self.source, self.le_flags, self.playback_flags, \\\n\t\t self.source_flags, self.fullness, self.point_rate, \\\n\t\t self.point_count = \\\n\t\t\tstruct.unpack(\"<BBBBHHHHII\", data)", "def setup(self): \n self.suburbs_dict = dict()\n self.raw_proIds_dict = dict()\n self.propertyIds_dict = dict()\n self.valuations = dict()", "def __init__(self):\n \n self.packetType = DATA\n self.types = [BYTE, # Packet type\n FLOAT, # Battery voltage\n FLOAT, FLOAT, FLOAT, FLOAT, # Temperature readings\n FLOAT, FLOAT, # Pressure and humidity readings\n BYTE, BYTE, BYTE, # GPS Year, month, date (sensor computer)\n BYTE, BYTE, BYTE, # GPS Hour, minute, second (sensor computer)\n LONG, LONG, LONG, # GPS latitude, longitude, altitude (sensor computer)\n ULONG, UINT, BYTE, # GPS speed, heading, num satellites (sensor computer)\n FLOAT, FLOAT, FLOAT, # IMU data (accelerometer)\n FLOAT, FLOAT, FLOAT, # IMU data (gyroscope)\n FLOAT, FLOAT, FLOAT, # IMU data (magnetometer)\n FLOAT, FLOAT, FLOAT, # Attitude data\n ULONG, # Time since reset\n BOOL, UINT, # Data logging\n ULONG, # Time since last data arrival\n ULONG, # Relay states\n BYTE, BYTE, BYTE, # GPS Year, month, date (comm computer)\n BYTE, BYTE, BYTE, # GPS Hour, minute, second (comm computer)\n LONG, LONG, LONG # GPS latitude, longitude, altitude (comm computer)\n ] \n\n self.values = [0]*len(self.types)\n self.values[0] = DATA", "def prepare_data(self, config: TreeConfigParser) -> None:\n self.data = Data(config)\n self.data.prepare_input()\n self.data.prepare_output()", "def mk_data(self):\n self.data = self.DEFAULTS.copy()\n\n for template in self.raw_data.get('extends', []):\n template_data = self.load_template(template)\n self.data.update(template_data)\n\n self.data.update(self.raw_data)\n\n str_replace(self.data)\n\n if self.data.get('redirect_stderr'):\n self.data.pop('stderr')", "def reinit_data(self):\n self.if_name_map, \\\n self.if_alias_map, \\\n self.if_id_map, \\\n self.oid_name_map = Namespace.get_sync_d_from_all_namespace(mibs.init_sync_d_interface_tables, self.db_conn)\n\n self.update_data()", "def initAttributes(self):\n Packet.initAttributes(self)\n self.packetType = TM_PACKET_TYPE", "def _loadData(self, data):\n self._data = data\n self.createdAt = utils.toDatetime(data.attrib.get('createdAt'))\n self.email = data.attrib.get('email')\n self.friend = utils.cast(bool, data.attrib.get('friend'))\n self.friendlyName = data.attrib.get('friendlyName')\n self.home = utils.cast(bool, data.attrib.get('home'))\n self.id = utils.cast(int, data.attrib.get('id'))\n self.server = utils.cast(bool, data.attrib.get('server'))\n self.servers = self.findItems(data, MyPlexServerShare)\n self.thumb = data.attrib.get('thumb')\n self.username = data.attrib.get('username', '')\n for server in self.servers:\n server.accountID = self.id", "def _init_net_delay_data(self):\n if self._net_delay_raw_data is None:\n return\n\n json_data = json_util.load_content(self._net_delay_raw_data)\n for row in json_data:\n app_id = int(row['app'])\n src_node_id = int(row['src_node'])\n dst_node_id = int(row['dst_node'])\n net_delay = float(row['net_delay'])\n self._net_delay_data[app_id][src_node_id][dst_node_id].append(net_delay)", "def __fill_data_variables(self):\n data_vars = []\n for data_var in self.ts.data.data_vars:\n data_vars.append(data_var)\n\n self.data_vars = Dropdown(\n options=data_vars,\n value=data_vars[0],\n description='Data variables:',\n disabled=False,\n style = {'description_width': 'initial'},\n layout={'width': '400px'},\n )\n\n self.data_vars.observe(self.on_data_vars_change)", "def initAttributes(self):\n Packet.initAttributes(self)\n self.packetType = TC_PACKET_TYPE", "def fillData(self):\n self.graphColors = c.getGraphColors()\n self._tupleListToStrings()\n self.colorlist.SetSelection(0)\n self.delayvalue.SetValue(str(c.getGraphDelay()))\n self._updateButtons(None)", "def _make_data(cls, data: 'Data_ARP') -> 'dict[str, Any]': # type: ignore[override]\n return {\n 'htype': data.htype,\n 'ptype': data.ptype,\n 'hlen': data.hlen,\n 'plen': data.plen,\n 'oper': data.oper,\n 'sha': data.sha,\n 'spa': data.spa,\n 'tha': data.tha,\n 'tpa': data.tpa,\n 'payload': cls._make_payload(data),\n }", "def __udp_initialize_packet(self, seq):\n packet_payload, packet_size = self.__get_file_chunk()\n self.packets_status.update(\n {seq: {\"status\": 1, \"payload\": packet_payload, \"size\": packet_size}})", "def fillData(self):\n self.textexpt.SetValue(c.getExperimentFolder(self._user))\n self.textfold.SetValue(c.getDataFolder(self._user))\n self.textfile.SetValue(c.getDataFile(self._user))\n self.prependscan.SetValue(c.getPrependScan(self._user))", "def __init__(self, data):\n # add play_guid as it sometimes doesn't exist\n if 'play_guid' not in data:\n data['play_guid'] = ''\n # loop through data\n for x in data:\n # set information as correct data type\n mlbgame.object.setobjattr(self, x, data[x])", "def setParameters(self) -> None:\n # get a list of the header and data files in the folder\n self.headerF = glob.glob(os.path.join(self.dataPath, \"*.XTR\"))\n if len(self.headerF) == 0:\n self.headerF = glob.glob(os.path.join(self.dataPath, \"*.XTRX\"))\n self.dataF = glob.glob(os.path.join(self.dataPath, \"*.RAW\"))\n # data byte information might be different for each file\n # so it is a dictionary\n self.dataByteOffset: Dict = {}\n self.recChannels = {}\n self.dataByteSize = 4\n # data type\n self.dtype = np.float32\n # get the number of data files and header files - this should be equal\n self.numHeaderFiles: int = len(self.headerF)\n self.numDataFiles: int = len(self.dataF)", "def _finalize_data(self):\n\n if isinstance(self.node_data, np.ndarray): # SR workflow\n self.node_data = da.from_array(self.node_data)\n elif isinstance(self.node_data, list): # vr workflow\n struct_data = np.empty(len(self.node_data), dtype=self.data.dtype)\n datavals = np.array(self.node_data)\n for cnt, varname in enumerate(self.data.dtype.names):\n struct_data[varname] = datavals[:, cnt]\n self.node_data = da.from_array(struct_data)\n if isinstance(self.data, np.ndarray):\n self.data = da.from_array(self.data)", "def initialize(self):\n self.data = None\n self.errors = []", "def fillData(self):\n self.textname.SetValue(c.getUserName())\n self.textphon.SetValue(str(c.getPhone()))\n self.textcarr.SetValue(c.getCarrier())\n self.smsfin.SetValue(c.getSmsFinished())\n self.smserr.SetValue(c.getSmsError())", "def _init_empty(self):\n self._data = []", "def _populate(self):\n self.addDemographics()\n self.addLabs()\n self.addProblems()\n self.addMeds()\n self.addAllergies()\n self.addImmunizations()\n self.addVitals()\n self.populated_p = True", "def set_properties(struct):", "def set_data(self,pdata):\n self.uid.data=pdata[0]\n self.pid.data=pdata[1]\n self.pName.data=pdata[2]\n self.pAge.data=pdata[3]\n self.dateOfSubmission.data=pdata[4]\n self.bedType.data=pdata[5]\n self.address.data=pdata[6]\n self.city.data=pdata[7]\n self.state.data=pdata[8]\n self.status.data=pdata[9]", "def assign_values(self, data):\n\n for key in self.__dict__.keys():\n if key in data.keys():\n setattr(self, key, data[key]) # handy built-in function", "def _setData(self):\n\n if not self.stationId:\n return\n \"\"\" \n # get the ressource url and adjust lat and lon from data portal\n query = sparqls.stationResource(self.stationId)\n key, val = RunSparql(query, 'array').run()\n if val: \n self.url = val[0][0]\n self.lat = float(val[0][2])\n self.lon = float(val[0][3])\n \"\"\"\n\n # it is possible, that a station id has multiple URI\n # ask for all URI\n query = sparqls.stationData(self.uri, 'all')\n data = RunSparql(query, 'pandas').run()\n\n if not data.empty:\n self._data = data\n else:\n self._data = 'no data available'\n\n # check if data is available and extract the 'unique' data products\n if isinstance(self._data, pd.DataFrame):\n p = self._data['specLabel'].unique()\n self._products = pd.DataFrame(p)\n\n # replace samplingheight=None with empty string\n self._data.samplingheight.replace(to_replace=[None], value=\"\", inplace=True)\n else:\n self._products = 'no data available'", "def setData(self,data):\n self.data = data\n self.size = len(data)", "def setData(self,data):\n self.data = data\n self.size = len(data)", "def setPacket(self, packet):\n\t\tself.clear()\n\t\tself.packet = packet\n\t\t\n\t\tfields = self.fields\n\t\t\n\t\tfields.append(['Reception time', '%s:%s:%s.%s' % tuple(packet.time), None])\n\t\t\n\t\tif self.packet.isInvalid:\n\t\t\treturn\n\t\t\n\t\tfields.append(['Transmission info', 'CRC passed: %s, LQI: %s, RSSI: %s' % (packet.CRCOk, packet.LQI, packet.RSSI), None])\n\t\tfields.append(['PHY fields', '', None])\n\t\tphy = len(fields) - 1\n\t\tfields.append(['Frame length', len(packet.load), phy])\n\t\t\n\t\tfields.append(['MAC fields', '', None])\n\t\tmac = len(fields) - 1\n\t\tfields.append(['Frame control', packet.frameControl, mac])\n\t\tfields.append(['Frame Type', packet.frameType, mac])\n\t\tfields.append(['Security enabled', packet.securityEnabled, mac])\n\t\tfields.append(['Frame pending', packet.framePending, mac])\n\t\tfields.append(['Ack. request', packet.ackRequest, mac])\n\t\tfields.append(['Intra-PAN', packet.intraPAN, mac])\n\t\tfields.append(['Dest. addressing mode', packet.dstAddrMode, mac])\n\t\tfields.append(['Source addressing mode', packet.srcAddrMode, mac])\n\t\tfields.append(['Sequence number', packet.seqNumber, mac])\n\t\t\n\t\tif hasattr(packet, 'dstPANID'):\n\t\t\tfields.append(['Destination PAN-ID', packet.dstPANID, mac])\n\t\t\n\t\tif hasattr(packet, 'dstAddr'):\n\t\t\tfields.append(['Destination address', packet.dstAddr, mac])\n\t\t\n\t\tif hasattr(packet, 'srcPANID'):\n\t\t\tfields.append(['Source PAN-ID', packet.srcPANID, mac])\n\t\t\t\n\t\tif hasattr(packet, 'srcAddr'):\n\t\t\tfields.append(['Source address', packet.srcAddr, mac])\n\t\t\t\n\t\tif hasattr(packet, 'payload'):\n\t\t\tfields.append(['Payload', packet.payload, mac])\n\t\t\n\t\tif hasattr(packet, 'commandType'):\n\t\t\tfields.append(['Command type', packet.commandType, mac])\n\t\t\n\t\tif hasattr(packet, 'commandPayload'):\n\t\t\tfields.append(['Command payload', packet.commandPayload, mac])\n\t\t\n\t\tif hasattr(packet, 'superFrameSpec'):\n\t\t\tfields.append(['Superframe specification', packet.superFrameSpec, mac])\n\t\t\tsfs = len(fields) - 1\n\t\t\tfields.append(['Beacon order', packet.beaconOrder, sfs])\n\t\t\tfields.append(['Superframe order', packet.superFrameOrder, sfs])\n\t\t\tfields.append(['finalCAPSlot', packet.finalCAPSlot, sfs])\n\t\t\tfields.append(['Batt. life extension', packet.battLifeExt, sfs])\n\t\t\tfields.append(['PAN Coordinator', packet.PANCoord, sfs])\n\t\t\tfields.append(['Association permit', packet.assocPermit, sfs])\n\t\t\n\t\tif hasattr(packet, 'GTS'):\n\t\t\tfields.append(['GTS specification', packet.GTS, mac])\n\t\t\tgts = len(fields) - 1\n\t\t\tfields.append(['GTS descriptor count', packet.GTSDescrCount, gts])\n\t\t\tfields.append(['GTS permit', packet.GTSPermit, gts])\n\t\t\tif int(packet.GTSDescrCount, 16) > 0:\n\t\t\t\tfields.append(['GTS directions', packet.GTSDirections, gts])\n\t\t\t\tfields.append(['GTS descriptors list', '', gts])\n\t\t\t\tdscList = len(fields) - 1\n\t\t\t\tfor i in xrange(int(packet.GTSDescrCount, 16)):\n\t\t\t\t\tfields.append(['Descriptor #'+str(i), '', dscList])\n\t\t\t\t\td = len(fields) - 1\n\t\t\t\t\tfields.append(['Device short address', packet.GTSDescriptors[i].deviceShortAddr, d])\n\t\t\t\t\tfields.append(['GTS starting slot', packet.GTSDescriptors[i].GTSStartingSlot, d])\n\t\t\t\t\tfields.append(['GTS length', packet.GTSDescriptors[i].GTSLength, d])\n\t\t\t\n\t\t\tfields.append(['Pending addresses list', '', gts])\n\t\t\tpnd = len(fields) - 1\n\t\t\tif int(packet.numShortAddrPnd, 16) > 0 or int(packet.numShortAddrPnd, 16) > 0:\n\t\t\t\tfor i in xrange(int(self.numShortAddrPnd, 16)):\n\t\t\t\t\tfields.append(['Short addr. #%i' % i, packet.shortAddrPndList[i], pnd])\n\n\t\t\t\tfor i in xrange(int(self.numLongAddrPnd, 16)):\n\t\t\t\t\tfields.append(['Long addr. #%i' % i, packet.longAddrPndList[i], pnd])\n\t\t\n\t\tif hasattr(packet, 'bcnPayload'):\n\t\t\tfields.append(['Beacon payload', packet.bcnPayload, mac])\n\t\t\n\t\tself.beginInsertRows(QModelIndex(), 0, len(self.fields)+1)\n\t\tself.endInsertRows()\n\t\tfor field in fields:\n\t\t\tprint field", "def _init_data(self, data):\n assert type(data) is dict, \"dict expected: %r\" % type(data)\n assert len(data) is 1, \"size of dict should be 1: %r\" % len(data)\n self._name = data.keys()[0]\n self._data = np.asarray(data[self._name])\n self._set = True", "def setupPacket(self):\n return None", "def __post_init__(self):\n # Only do this if source_data already exists (not during its own initialization)\n if \"SOURCE_DATA\" in globals():\n for data_field in fields(self):\n setattr(self, data_field.name, getattr(SOURCE_DATA, data_field.name))", "def __init__(self, data: list):\n self.__data = copy.deepcopy(data)", "def __init__(self, data: bytes):\n super().__init__()\n self._expected_packet_type = MessageType.MAIN\n self._expected_data_size = 34\n self._data_raw = b''\n self._packet_type = MessageType.UNDEFINED\n self._packet_number = 0\n self.time_stamp_1MHz = 0\n self.accelerometer_x = 0\n self.accelerometer_y = 0\n self.accelerometer_z = 0\n self.magnetometer_x = 0\n self.magnetometer_y = 0\n self.magnetometer_z = 0\n self.gyroscope_x = 0\n self.gyroscope_y = 0\n self.gyroscope_z = 0\n self.quaternion_q0 = 0\n self.quaternion_q1 = 0\n self.quaternion_q2 = 0\n self.quaternion_q3 = 0\n self.flags = 0\n self.shield_and_kinetis_byte = 0\n self._is_valid = False\n self._parse_data(data)", "def parse_data(self):\n\n msg = self.xml['dom'].childNodes[0]\n self.data = xml_to_dicts(msg, False)\n\n # Get some metadata together\n self.id = \"%s:%s\" % (self.data['src']['name']['#cdata'], self.data['src']['id']['#cdata'])\n self.temp = self.data['tmpr']['#cdata']\n self.watts = self.data['ch1']['watts']['#cdata']\n\n # Time - CurrentCost and local\n self.date = {}\n self.date['cc'] = [ int(self.data['date'][k]['#cdata']) for k in ('dsb','hr','min','sec') ]\n self.date['now'] = localtime()", "def __init__(self, data):\n # check if dataset contains time information\n # (fetched from bootloader storage)\n if len(data) == 61:\n (_, seconds, minutes, hours, days, months, years) = struct.unpack(\n '<55sBBBBBB', data)\n self.date = datetime(2000 + years, months, days, hours, minutes,\n seconds)\n\n # Only parse preceding data\n data = data[:55]\n power = [0, 0]\n kWh = [0, 0]\n MWh = [0, 0]\n (_, digital, speed, active, power[0], kWh[0], MWh[0], power[1], kWh[1],\n MWh[1]) = struct.unpack('<32sH4sBLHHLHH', data)\n\n analog = struct.unpack(\n '<{}{}'.format('H' * 16, 'x' * (len(data) - 32)), data)\n\n self.analog = {}\n for channel in range(0, 16):\n self.analog[channel + 1] = round(\n self._convert_analog(analog[channel]), 3)\n\n self.digital = {}\n for channel in range(0, 16):\n self.digital[channel + 1] = self._convert_digital(digital, channel)\n\n '''\n self.speed = {}\n for channel in range(0, 4):\n self.speed[channel + 1] = round(\n self._convert_speed(speed[channel]), 3)\n \n\n self.energy = {}\n for channel in range(0, 2):\n self.energy[channel + 1] = round(\n self._convert_energy(MWh[channel], kWh[channel], active,\n channel), 3)\n \n\n self.power = {}\n for channel in range(0, 2):\n self.power[channel + 1] = round(\n self._convert_power(power[channel], active, channel), 3)\n '''", "def __init__(self,\n resp_data,\n ):\n self.raw_data = resp_data.dict()\n\n # Packet parsed for host db processing\n self.parsed_data = {'insert': {'product': self.get_product_pack(),\n 'selling_status': self.get_selling_status_pack(),\n 'shipping_info': self.get_shipping_info_pack(),\n 'listing_info': self.get_listing_info_pack(),\n },\n 'items_received': resp_data.dict()['searchResult']['_count']}", "def initDataParms(self):\n self.xpos = self.pltw.curvelist[self.blkno].xvinfo.vidx\n self.data = self.pltw.blklst[self.blkno] # original data block\n self.idata = None # interpolated data\n (self.nvec, self.npt) = self.data.shape\n self.xmin = (self.data[self.xpos]).min()\n self.xmax = (self.data[self.xpos]).max()\n self.xspan = self.xmax - self.xmin\n if self.parent.test:\n self.dx = self.xspan / (self.npt * 5)", "def setInfoData(self):\n self.infoData['id'] = str(self.id)\n self.infoData['name'] = str(self.name)\n self.infoData['space'] = str(self.space.id)\n self.infoData['size'] = str(self.size)\n self.infoData['ndof'] = str(self.ndof)\n self.infoData['type'] = str(self.type)\n self.infoData['operator'] = str(self.operator)\n self.infoData['operande_id'] = str(self.operande_id)\n self.infoData['func_arguments'] = str(self.func_arguments)\n self.infoData['nderiv'] = str(self.nderiv)\n self.infoData['parameval'] = str(self.parameval)\n self.infoData['paramevalfunc'] = str(self.paramevalfunc)\n self.infoData['loc_id'] = str(self.loc_id)", "def setData(self,data):\n self.data = struct.pack(\"!d\",data)", "def package_data(self, data):\n pass", "def __init__(self):\n\n self.records = {}\n self.port_obj = None", "def to_data(self) -> dict:\n return {'pingData': {'challenge': self.ping_challenge}}", "def __init__(self, data={}):\n self._update_(data)", "def data(self, data):\n self.__data = data", "def setData(self,data):\n self.data = struct.pack(\"!I\",data)", "def __init__(self):\n self.data = []\n self.record = {}", "def _get_data(self) -> dict:\n LOGGER.debug(f\"Setting data property for {self.dirname}\")\n data = {}\n for axis in range(1, 4):\n # Subsample by 8 since this does not vary quickly\n data[f\"aoatter{axis}\"] = (\n self.tlm[f\"aoatter{axis}\"].vals[::ATT_ERR_SUBSAMP].astype(np.float32)\n )\n data[\"aokalstr\"] = self.tlm[\"aokalstr\"].vals\n # fmt: off\n data[\"npnt_kalm\"] = (\n (self.tlm[\"aopcadmd\"].vals == \"NPNT\")\n & (self.tlm[\"aoacaseq\"].vals == \"KALM\")\n )\n # fmt: on\n for slot in range(8):\n data[f\"aca_track{slot}\"] = self.tlm[f\"aoacfct{slot}\"].vals == \"TRAK\"\n data[f\"aca_ir{slot}\"] = self.tlm[f\"aoaciir{slot}\"].vals == \"ERR\"\n data[\"times\"] = self.tlm[\"aokalstr\"].times\n data[\"perigee_times\"] = self.tlm.perigee_times.astype(np.float32)\n data[\"perigee\"] = self.perigee.date\n data[\"rad_entry\"] = self.rad_entry.date\n data[\"rad_exit\"] = self.rad_exit.date\n data[\"obss\"] = self.obss.as_array()\n\n return data", "def __init__(self, data: dict):\n self._data = {\n '': 'Location', # this is required\n 'street': '',\n 'suburb': '',\n 'location': '',\n 'stop': ''\n }\n\n self._data.update(data)", "def set_data(self, data):\n\n pass", "def reset(self):\n for k in self.data_keys:\n setattr(self, k, [])\n self.size = 0", "def set_message_data(self) -> None:\n if PrimaryFlight.MESSAGETYPE == self.type:\n self.message_data = PrimaryFlight(self.data, self.config)\n elif GPS.MESSAGETYPE == self.type:\n self.message_data = GPS(self.data, self.config)\n elif Attitude.MESSAGETYPE == self.type:\n self.message_data = Attitude(self.data, self.config)\n elif EngineData.MESSAGETYPE == self.type:\n self.message_data = EngineData(self.data, self.config)\n else:\n self.message_data = MessageData(self.data, self.config)", "def get_data(self):\n data = {\n \"ts\": self.drone.pos[0][0],\n \"drone\": self.drone,\n \"subject\": self.subject,\n \"peds\": self.peds, # can be None\n \"objs\": self.objs # can be None\n }\n self.empty_bag()\n return data", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def fill_host(self, data):\n check_input_params(data, self.HOST)\n self.host = data[self.HOST]", "def fill_host(self, data):\n check_input_params(data, self.HOST)\n self.host = data[self.HOST]", "def set_up_all(self):\n # Based on h/w type, choose how many ports to use\n self.dut_ports = self.dut.get_ports(self.nic)\n # Verify that enough ports are available\n self.verify(len(self.dut_ports) >= 1, \"Insufficient ports\")\n\n localPort = self.tester.get_local_port(self.dut_ports[0])\n self.tester_itf = self.tester.get_interface(localPort)\n self.tester_mac = self.tester.get_mac(localPort)\n self.pf_interface = self.dut.ports_info[self.dut_ports[0]]['intf']\n self.pf_mac = self.dut.get_mac_address(0)\n self.pf_pci = self.dut.ports_info[self.dut_ports[0]]['pci']\n self.pmdout = PmdOutput(self.dut)\n self.cores = \"1S/2C/1T\"\n self.pkt1 = \"Ether(dst='%s')/IP(src='10.0.0.1',dst='192.168.0.2')/SCTP(dport=80, sport=80)/('X'*48)\" % self.pf_mac\n self.pkt2 = \"Ether(dst='%s')/IP(src='10.0.0.1',dst='192.168.0.2')/UDP(dport=50, sport=50)/('X'*48)\" % self.pf_mac\n self.pkt3 = \"Ether(dst='%s')/IP(src='10.0.0.1',dst='192.168.0.3')/TCP(dport=50, sport=50)/('X'*48)\" % self.pf_mac\n self.pkt4 = \"Ether(dst='%s')/IP(src='10.0.0.1',dst='192.168.0.2')/('X'*48)\" % self.pf_mac\n self.pkt5 = \"Ether(dst='%s')/IPv6(src='2001::1',dst='2001::2',nh=132)/SCTP(dport=80, sport=80)/('X'*48)\" % self.pf_mac\n self.pkt6 = \"Ether(dst='%s')/IPv6(src='2001::1',dst='2001::2')/UDP(dport=50, sport=50)/('X'*48)\" % self.pf_mac\n self.pkt7 = \"Ether(dst='%s')/IPv6(src='2001::2',dst='2001::3')/TCP(dport=50, sport=50)/('X'*48)\" % self.pf_mac\n self.pkt8 = \"Ether(dst='%s')/IPv6(src='2001::2',dst='2001::3')/('X'*48)\" % self.pf_mac\n self.prio_pkt1 = \"Ether(dst='%s')/Dot1Q(prio=1)/IP(src='10.0.0.1',dst='192.168.0.2')/TCP(dport=80, sport=80)/('X'*48)\" % self.pf_mac\n self.prio_pkt2 = \"Ether(dst='%s')/Dot1Q(prio=2)/IP(src='10.0.0.1',dst='192.168.0.2')/TCP(dport=80, sport=80)/('X'*48)\" % self.pf_mac\n self.prio_pkt3 = \"Ether(dst='%s')/Dot1Q(prio=3)/IP(src='10.0.0.1',dst='192.168.0.2')/TCP(dport=80, sport=80)/('X'*48)\" % self.pf_mac", "def read_packet(self):\n\n\t\t#self.debug(\"READ BUFFER SIZE: %d\" % len(self.buff))\n\t\tbackup = self.buff[:]\n\t\tpacket = Packet()\n\t\ttry:\n\t\t\tpacket.direction = self.node\n\t\t\tpacket.ident = self.unpack('ubyte')\n\t\t\t\n\t\t\t#Defined structs from huge dict\n\t\t\tfor datatype, name in self.get_struct(packet):\n\t\t\t\t# this populates packet.data with {name: value}\n\t\t\t\tpacket.data[name] = self.unpack(datatype)\n\n\t\t\t# I believe the following are packet-type specific fixes for variable-length packets.\n\n\t\t\t#0x17\n\t\t\tif packet.ident == 0x17:\n\t\t\t\tif packet.data['unknown'] > 0:\n\t\t\t\t\tpacket.data['x2'] = self.unpack('short')\n\t\t\t\t\tpacket.data['y2'] = self.unpack('short')\n\t\t\t\t\tpacket.data['z2'] = self.unpack('short')\n\t\t\n\t\t\t#0x33\n\t\t\tif packet.ident in (0x33, 0x34):\n\t\t\t\tpacket.data['data'] = self.unpack_array_fast('byte', packet.data['data_size'])\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n#\t\t\t#0x34\n#\t\t\tif packet.ident == 0x34:\n#\t\t\t\tcoords = self.unpack_array_fast('short', packet.data['data_size'])\n#\t\t\t\tbtype = self.unpack_array_fast('byte', packet.data['data_size'])\n#\t\t\t\tmetadata = self.unpack_array_fast('byte', packet.data['data_size'])\n#\t\t\t\tpacket.data[\"blocks\"] = []\n#\t\t\t\tfor i in zip(coords, btype, metadata):\n#\t\t\t\t\tblock = {}\n#\t\t\t\t\tblock[\"x\"] =\t\ti[0] >> 12\n#\t\t\t\t\tblock[\"z\"] = 0x0F & i[0] >> 8\n#\t\t\t\t\tblock[\"y\"] = 0xFF & i[0]\n#\t\t\t\t\tblock[\"type\"] = i[1]\n#\t\t\t\t\tblock[\"metadata\"] = i[2]\n#\t\t\t\t\tpacket.data[\"blocks\"].append(block)\n#\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n\t\t\t#0x3C\n\t\t\tif packet.ident == 0x3C:\n\t\t\t\trecords = self.unpack_array_fast('byte', packet.data['data_size']*3)\n\t\t\t\ti = 0\n\t\t\t\tpacket.data[\"blocks\"] = []\n\t\t\t\twhile i < packet.data['data_size']*3:\n\t\t\t\t\tpacket.data[\"blocks\"].append(dict(zip(('x','y','z'), records[i:i+3])))\n\t\t\t\t\ti+=3\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n\t\t\t#0x68\n\t\t\tif packet.ident == 0x68:\n\t\t\t\tpacket.data[\"slots_data\"] = self.unpack_array('slot', packet.data[\"data_size\"])\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\t#0x82:\n\t\t\tif packet.ident == 0x82:\n\t\t\t\tpacket.data[\"text\"] = []\n\t\t\t\tfor i in range(4):\n\t\t\t\t\tpacket.data[\"text\"].append(packet.data[\"line_%s\" % (i+1)])\n\t\t\t\t\t\n\t\t\t#0x83\n\t\t\tif packet.ident == 0x83:\n\t\t\t\tpacket.data[\"data\"] = self.unpack_array_fast('byte', packet.data['data_size'])\n\t\t\t\tdel packet.data[\"data_size\"]\n\n\t\t\t# Sets packet.original to the byte string that the packet was decoded from.\n\t\t\tpacket.original = backup[:len(backup) - len(self.buff)]\n\n\t\t\treturn packet\n\n\t\texcept IncompleteData:\n\t\t\tself.buff = backup\n\t\t\treturn None\n\t\texcept Exception, ex:\n\t\t\tself.buff = backup\n\t\t\tex.args += (self.buff[20:],)\n\t\t\traise", "def setData(self, data):\n self.data = struct.pack(\"!I\",data)", "def _setInfo(self):\n\n if len(self.data.shape)==1:\n self.numChannels = 1\n self.totalSamples = len(self.data)\n else:\n self.numChannels = self.data.shape[1]\n self.totalSamples = self.data.shape[0]\n \n self.duration = float(self.totalSamples)/self.rate # [sec]\n self.dataType = str(self.data.dtype)", "def __init__(self, dat):\n self.data = dat", "def __config_attributes(self):\n self.__name = self.__data[self.__code][\"airportName\"]\n self.__country = Country(name=self.__data[self.__code][\"countryName\"],\n code=self.__data[self.__code][\"countryCode\"])\n try:\n self.__city = self.__data[self.__code][\"city\"]\n except Exception:\n self.__city = ''", "def setData(self,data):\n self.data = struct.pack(\"!Q\",data)", "def setData(self,data):\n self.data = struct.pack(\"!Q\",data)", "def _loadData(self, data):\n Clip._loadData(self, data)\n PlexSession._loadData(self, data)", "def _loadData(self, data):\n self._data = data\n self.id = utils.cast(int, data.attrib.get('id'))\n self.accountID = utils.cast(int, data.attrib.get('accountID'))\n self.serverId = utils.cast(int, data.attrib.get('serverId'))\n self.machineIdentifier = data.attrib.get('machineIdentifier')\n self.name = data.attrib.get('name')\n self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))\n self.numLibraries = utils.cast(int, data.attrib.get('numLibraries'))\n self.allLibraries = utils.cast(bool, data.attrib.get('allLibraries'))\n self.owned = utils.cast(bool, data.attrib.get('owned'))\n self.pending = utils.cast(bool, data.attrib.get('pending'))", "def __initializeData():\n\tdata = OrderedDict()\n\tdata['Saved_LIVE'] = False\n\tdata['Saved_POST'] = False\n\tdata['Time_Written_POST'] = datetime.datetime.now().strftime('%m-%d-%Y, %H:%M')\n\tdata['Time_Written_LIVE'] = datetime.datetime.now().strftime('%m-%d-%Y, %H:%M')\n\treturn data", "def setData(self,data):\n self.data = struct.pack(\"!f\",data)", "def _loadData(self, data):\n self._data = data\n self.friend = self._initpath == self.key\n self.allowCameraUpload = utils.cast(bool, data.attrib.get('allowCameraUpload'))\n self.allowChannels = utils.cast(bool, data.attrib.get('allowChannels'))\n self.allowSync = utils.cast(bool, data.attrib.get('allowSync'))\n self.email = data.attrib.get('email')\n self.filterAll = data.attrib.get('filterAll')\n self.filterMovies = data.attrib.get('filterMovies')\n self.filterMusic = data.attrib.get('filterMusic')\n self.filterPhotos = data.attrib.get('filterPhotos')\n self.filterTelevision = data.attrib.get('filterTelevision')\n self.home = utils.cast(bool, data.attrib.get('home'))\n self.id = utils.cast(int, data.attrib.get('id'))\n self.protected = utils.cast(bool, data.attrib.get('protected'))\n self.recommendationsPlaylistId = data.attrib.get('recommendationsPlaylistId')\n self.restricted = data.attrib.get('restricted')\n self.thumb = data.attrib.get('thumb')\n self.title = data.attrib.get('title', '')\n self.username = data.attrib.get('username', '')\n self.servers = self.findItems(data, MyPlexServerShare)\n for server in self.servers:\n server.accountID = self.id", "def prep_data():\n loader = DLoader()\n cap = loader.visitor_cnt\n\n pass", "def set_data(self, data):\n data_len = len(data)\n if data_len > 8:\n raise ValueError(\"Data length is {} but must not be larger than 8\".format(data_len))\n self.dlc = data_len\n self.data = data.ljust(8, b'\\x00')", "def __init__(self, data):\n # loop through data\n for x in data:\n # create pitches list if attribute name is pitches\n if x == 'pitches':\n self.pitches = []\n for y in data[x]:\n self.pitches.append(Pitch(y))\n else:\n # set information as correct data type\n mlbgame.object.setobjattr(self, x, data[x])", "def pack(self):\n data = {\n 'name': self._name,\n 'piece': self._piece,\n 'pos': self._pos,\n 'cash': self._cash,\n 'properties': []\n }\n\n for i in self._properties:\n data['properties'].append({'name': i.name, 'value': i.value})\n\n return data", "def payload_data(self, pkts):\n\n\t\t#Get all the payload bytes exchanged over MPTCP connections\n\t\tpayload_bytes = 0\n\t\tprint \"Determining the number of payload bytes excluding headers....\"\n\t\t#DSS = 0x2\n\t\tfor i in range(len(pkts)):\n\t\t\tif(TCPOption_MP in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 2 and Raw in pkts[i]):\n\t\t\t\tpayload_bytes += len(pkts[i][Raw].load)\n\t\t\t\t#print(\"DSN: %s; subflow_seqnum: %s; Data(bytes): %s\" % (pkts[i][TCPOption_MP].mptcp.dsn, pkts[i][TCPOption_MP].mptcp.subflow_seqnum, len(pkts[i][Raw].load)))\n\n\t\tprint \"Total Number of payload bytes in the file (entire MPTCP connections) excluding headers): %s\" % (payload_bytes)\n\t\t#MPTCP WITH SUBFLOW CONNECTIONS\n\t\t#MPTCP_JOINs = 0x1\n\t\tprint \"============================================================\"\n\t\tprint \"SUBFLOW Connections with their respective MPTCP connection (identified by connectionID)\"\n\t\tfor i in range(len(pkts)):\n\n\t\t\t#Initial Join Message\n\t\t\t#rcv_token Identifies the connection to which the subflow belongs: connectionID\n\t\t\tif(MPTCP_JoinSYN in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 1):\n\t\t\t\tprint(\"New subflow: connectionID: %s; src: %s; dest: %s; snd_nonce: %s\" % (pkts[i][TCPOption_MP].mptcp.rcv_token, pkts[i][IP].src, pkts[i][IP].dst, pkts[i][TCPOption_MP].mptcp.snd_nonce))\n\n\t\t#TODO: Now Need to track per-connection and per-subflow state", "def to_data(self, *, defaults = False):\n data = {}\n put_approximate_online_count_into(self.approximate_online_count, data, defaults)\n put_approximate_user_count_into(self.approximate_user_count, data, defaults)\n put_description_into(self.description, data, defaults)\n type(self).discovery_splash.put_into(self.discovery_splash, data, defaults)\n put_emojis_into(self.emojis, data, defaults)\n put_features_into(self.features, data, defaults)\n type(self).icon.put_into(self.icon, data, defaults)\n put_id_into(self.id, data, defaults)\n type(self).invite_splash.put_into(self.invite_splash, data, defaults)\n put_stickers_into(self.stickers, data, defaults)\n put_name_into(self.name, data, defaults)\n return data", "def __init__(self, data_size):\n try:\n self.data_size = int(data_size)\n except ValueError as exc:\n raise ValueError(\"Exepected arg 'size' to be int: \" + str(exc))\n self.packet = bytearray()\n self.in_data = False\n self.header_pos = 0\n self.transport = None", "def post_process(self, packet: 'dict[str, Any]') -> 'Schema':\n # for Pad1 option, length is always 0\n if self.type == Enum_Option.Pad1:\n self.len = 0\n return self", "def post_process(self, packet: 'dict[str, Any]') -> 'MPTCP':\n ret = self.data\n\n ret.option = Enum_Option.Multipath_TCP\n ret.length = self.test['length']\n ret.subtype = Enum_MPTCPOption.get(packet['test']['subtype'])\n\n return ret", "def reset_data(self):\n self.data = []", "def initialize_packet(self, packet):\n self._packet = packet\n self._pacbra = packet\n\n # Adapt the quadrature nodes and weights\n eps = self._packet.get_eps()\n self._nodes = self.transform_nodes(self._packet.get_parameters(), eps)\n self._weights = self._QR.get_weights()\n\n # Force a call of 'preprare'\n self._values = None\n self._bases = None\n self._coeffs = None" ]
[ "0.6564761", "0.6411371", "0.62188", "0.6214954", "0.61160105", "0.60499305", "0.592144", "0.5917901", "0.58758026", "0.5839473", "0.58239967", "0.58188593", "0.5807109", "0.57820135", "0.57779455", "0.5756668", "0.57558346", "0.57392687", "0.57246864", "0.57196575", "0.57140046", "0.5703263", "0.56832993", "0.5676967", "0.5621933", "0.5619268", "0.5612158", "0.5608776", "0.56012183", "0.5590031", "0.5586473", "0.5582122", "0.5575587", "0.5568616", "0.5562546", "0.5561093", "0.55575836", "0.5554885", "0.55348426", "0.55110824", "0.5508872", "0.550696", "0.550696", "0.54869133", "0.54636335", "0.54564637", "0.5449526", "0.5432733", "0.54318047", "0.5430736", "0.54220927", "0.54154295", "0.5407501", "0.5403473", "0.53992194", "0.5396198", "0.53959477", "0.53923607", "0.5391051", "0.5390968", "0.5374441", "0.5371333", "0.5361004", "0.53589195", "0.53448", "0.53358823", "0.5334823", "0.5333856", "0.53249085", "0.53249085", "0.53249085", "0.53249085", "0.53249085", "0.53249085", "0.532189", "0.532189", "0.5314253", "0.53087354", "0.5300227", "0.529885", "0.5295016", "0.52933264", "0.5293083", "0.5293083", "0.5289149", "0.52839303", "0.527901", "0.5278517", "0.5276621", "0.52720875", "0.52698493", "0.5268379", "0.52664953", "0.5225165", "0.5224343", "0.52179134", "0.5211881", "0.5209725", "0.5203397", "0.5198819" ]
0.6753045
0
Call me before using any of the tables or classes in the model.
def init_model(connection): db = connection for obj in common.__dict__.itervalues(): if type(obj) == type and issubclass(obj, common.Model) and hasattr(obj, '__tablename__'): tablename = getattr(obj, '__tablename__') obj._object_store = Domain(db, tablename) collection_to_class[obj._object_store] = obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_model(self):\n pass", "def prepare_model(self, **kwargs):\n pass", "def initialize_model(self):\n pass", "def setUp(self):\n create_table(self.DATABASE_PATH)\n self.model = model.CodeReviewDatabase(self.DATABASE_PATH)", "def _before_execute(self, db):\n pass", "def init_model(engine):\n ## Reflected tables must be defined and mapped here\n #global reflected_table\n #reflected_table = sa.Table(\"Reflected\", meta.metadata, autoload=True,\n # autoload_with=engine)\n #orm.mapper(Reflected, reflected_table)\n #\n meta.Session.configure(bind=engine)\n meta.engine = engine", "def _tables(self):\n assert False, \"subclass responsibility\"", "def setup_models(self):\n pass", "def setUp(self):\n self.model = sqlite_model()\n self.model.create_new(':memory:')", "def init_models(self):\n from ron import Application\n from ron.models.basemodel import BaseModel\n if self.models == None or not Application().db:\n return\n models_namespace = self.__namespace + \".models\" # TODO: allow customize this\n try:\n models_package = import_module(models_namespace)\n except:\n models_package = None\n if models_package:\n models_modules = self._get_package_modules(models_package)\n for model_name in models_modules:\n imported_model = import_module('.' + model_name, package=models_namespace)\n for i in dir(imported_model):\n attribute = getattr(imported_model, i)\n if inspect.isclass(attribute) and issubclass(attribute, BaseModel):\n self.models.append(attribute)\n Application().db().database.create_tables(self.models)", "def initialize(self, model):\n pass", "def setUp(self):\n super().setUp()\n self.database.datamodels.find_one.return_value = self.DATA_MODEL", "def _prepare(cls):\n # the dbmodel is either the proxy base or ourselves\n dbmodel = cls._meta.concrete_model if cls._meta.proxy else cls\n cls.__dbclass__ = dbmodel\n if not hasattr(dbmodel, \"__instance_cache__\"):\n # we store __instance_cache__ only on the dbmodel base\n dbmodel.__instance_cache__ = {}\n super()._prepare()", "def db_table(self):", "def _pre_setup(self):\n apps.clear_cache()\n call_command('migrate', interactive=False, verbosity=0)\n call_command('loaddata', 'initial_data', verbosity=0)\n super(DatatableViewTestCase, self)._pre_setup()", "def setUpClass(self):\n\n base_model = BaseModel()", "def __init__(self):\n self.conf = None\n self.section = None\n self._engine = None\n self._session = None\n self.base_model = declarative_base()", "def before_request():\n g.db = models.DB\n g.db.connect()", "def __post_init_check(self):\n try:\n t = self.time\n m = self.metadata\n except AttributeError as e:\n clsname = self.__class__.__name__\n raise TypeError(f\"Model not initialized. Please call 'SupernovaModel.__init__' within the '{clsname}.__init__'\") from e", "def setUp(self):\n self.model = ModelBase(\n '__TestModel__' + self.mixin.__name__,\n (self.mixin,),\n {'__module__': self.mixin.__module__}\n )\n\n with connection.schema_editor() as schema_editor:\n schema_editor.create_model(self.model)", "def _connectModel(self):\n pass", "def test_model_class(self):\n db = Alchy(self.app)\n\n self.assertEquals(\n db.Model.__dict__['__init__'], alchy.model.ModelBase.__init__)\n self.assertIsInstance(\n db.Model.__dict__['query'], alchy.query.QueryProperty)", "def create_models( self ):", "def _before_execute(self, db, entity):\n pass", "def _before_execute(self, db, entity):\n pass", "def before_request():\n g.db = models.DATABASE\n g.db.connect()", "def before_request():\n g.db = models.DATABASE\n g.db.connect()", "def before_request():\n g.db = models.DATABASE\n g.db.connect()", "def before_request():\n g.db = models.DATABASE\n g.db.connect()", "def before_request():\n g.db = models.DATABASE\n g.db.connect()", "def _pre_mcs_init(cls):\n # technically you could also put a @classmethod with the same name on\n # the Model class, if you prefer that approach", "def _load_db(self):\n for type_ in self._types:\n try:\n type_.table(self._metadata)\n except InvalidRequestError:\n pass\n # Reflect metadata so auto-mapping works\n self._metadata.reflect(self._engine)\n # Make sure the tables exist\n self._metadata.create_all()", "def test_table_definition(self):\r\n create_table(LowercaseKeyModel)\r\n create_table(CapitalizedKeyModel)\r\n\r\n delete_table(LowercaseKeyModel)\r\n delete_table(CapitalizedKeyModel)", "def __init__(self):\n engine = db_connect()\n create_reals_table(engine)\n self.Session = sessionmaker(bind=engine)", "def before_model_run(self, data_handle=None):\n pass", "def __init__(self, *args):\n self.engine = db.create_engine('mysql+pymysql://root:''@127.0.0.1:3306/northwind', echo=True)\n self.connection = self.engine.connect()\n self.metadata = db.MetaData()\n self.tables = db.Table(*args, self.metadata, autoload=True, autoload_with=self.engine)", "def setUp(self):\n self.db_handler = DynamoDBHandler(ModelTests.TABLE_NAME)\n self.init_table()\n self.items = {}\n self.init_items()\n self.populate_table()", "def test_table_definition(self):\n create_table(LowercaseKeyModel)\n create_table(CapitalizedKeyModel)\n\n delete_table(LowercaseKeyModel)\n delete_table(CapitalizedKeyModel)", "def test_override_model_class(self):\n class MyModelBase(object):\n def testing(self):\n return 'testing'\n\n Model = declarative_base(cls=MyModelBase)\n\n class Foo(Model):\n __tablename__ = 'foo'\n _id = Column(types.Integer(), primary_key=True)\n name = Column(types.String())\n\n db = Alchy(self.app, Model=Model)\n\n self.assertTrue(issubclass(db.Model, MyModelBase),\n 'db.Model should be a subclass of MyModelBase')\n\n db.create_all()\n\n self.assertEquals(db.session.query(Foo).all(), Foo.query.all(),\n 'Model classes should have a query property')\n\n record = Foo(name='Name')\n\n self.assertEquals(record.testing(), 'testing')", "def setUp(self):\n self.base1 = BaseModel()", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def initialize():\n\n db.connect() # Se conecta\n db.create_tables([Entry], safe=True) # Crea las tablas\n # safe=true evita crear modelos ya creados", "def load_model(self):\n pass", "def setUp(self):\n try:\n # Get default data from medical_forum_data_dump.sql, populate tables and connect to DB\n ENGINE.populate_tables()\n self.connection = ENGINE.connect()\n\n # In case of error/exception in populating tables, clear all tables data\n except Exception as exception:\n print(exception)\n ENGINE.clear()", "def _pre_setup(self, *args, **kwargs):\n get_user_model().objects.all().delete()\n super()._pre_setup(*args, **kwargs)", "def setUp(self):\n\n # Connect to articles database\n self.db = sqlite3.connect(Utils.FILE + \"/models/articles.sqlite\")\n\n # Create database cursor\n self.cur = self.db.cursor()", "def setUp(self):\n\t\tprint(\"\\n-------------------------------------\\nIn Test_ResNet_Models:\", self._testMethodName)\n\t\t\t\n\t\tself.models = [models.resnet18(), models.resnet18().state_dict()]\n\t\tself.model_names = [\"resnet18()\", \"resnet18().state_dict()\"]", "def setUp(self):\n self.data = DatabaseIntermediary()", "def __init__(self):\n\t\tDBHelper.initialize() #initiate dababase helper", "def initialize(self):\r\n if not self.context:\r\n self.context = SQLContext(self.url, self.connection, self.schema)\r\n if self.table is None:\r\n self.table = self.context.table(self.table_name)\r\n if not self.fields:\r\n self.read_fields()\r\n self.field_names = self.fields.names()", "def strict_startup(self):\n self.load_up_initial_db(TIMESTAMP_PARSE_DICT)\n self.clean()\n self.add_numeric_cols()", "def setup_method(self):\n MANAGER._tables = {}\n MANAGER._views = {}", "def setUp(self):\n db.drop_all() # clean up the last tests\n db.create_all() # make our sqlalchemy tables", "def setUp(self):\n super(InfiniteFeedTests, self).setUp()\n self.db_set_up()", "def setUp(self):\n user = Users.query.first()", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def init_db():\n global app\n YourResourceModel.init_db(app)", "def before_request():\n g.db = models.DATABASE\n g.db.connect()\n g.user = current_user", "def before_request():\n g.db = models.DATABASE\n g.db.connect()\n g.user = current_user", "def setUp(self):\n init_db()\n self.client = Client(schema)", "def __init__(self):\n engine = db_connect()\n create_tables(engine)\n self.Session = sessionmaker(bind=engine)", "def init_model(engine):\n meta.Session.configure(bind=engine)\n meta.engine = engine", "def setUp(self):\n self.minerals = Mineral.objects.all()\n self.mineral = Mineral.objects.get(pk=12)", "def setUp(self):\n self.minerals = Mineral.objects.all()\n self.mineral = Mineral.objects.get(pk=12)", "def before_request():\n g.db = models.DATABASE\n g.db.connect()\n g.user=current_user", "def load_model(self) -> Any:", "def start(self):\n \n self.db.session.add(self.sql_model_instance)\n self.db.session.commit()", "def setup_schema(self):\n models.Base.metadata.create_all(self.session.bind)", "def __init__(self, model):\n\t\tself.model = model", "def postLoad(self):\n pass", "def _execute(self, model_obj):", "def init_database(self):\n # init_database(self.engine)", "def setUp(self):\n super(PermissionsTestCase, self).setUp()\n self._set_model_to(True)", "def __init__(self):\n Model.__init__(self)\n self.rellenar_lista()", "def setup(cls):\n super().setup()\n cls.db = DBCommunication()", "def _during_execute(self, db):\n pass", "def __init__(self):\n self.model = None", "def __init__(self):\n self.model = None", "def setUp(self):\n super(MaintenanceModeMiddlewareTestCase, self).setUp()\n self._set_model_to(False)", "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)", "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)", "def setUp(self):\n self.mock_model = Mock()", "def setUpClass(cls):\n cls.database_connection = DatabaseHandler(database_path)\n cls.database_connection.connect()\n processing.create_table_if_not_exist(cls.database_connection, table_name)\n cls.database_connection.close()", "def __init__(self):\n engine = connect_to_db()\n create_lyrics_table(engine) #declarative base stuff\n self.Session = sessionmaker(bind=engine)\n\n # self.create_connection()\n # self.create_table()", "def sync_tables():\n sync_table(ShoppingList)\n sync_table(User)\n sync_table(Category)\n sync_table(Feed)\n sync_table(News)\n sync_table(Photo)\n sync_table(Profile)\n sync_table(Video)\n sync_type(FeedPhoto)\n sync_type(NewsPhoto)", "def setUpClass(cls):\n print(\"Testing started for: \", cls.__name__)\n ENGINE.remove_database()\n # Create all DB tables\n ENGINE.create_tables()", "def boot(self):\n self._columns = ()\n self._creates = {}\n\n self._sql = \"\"\n self._sql_binding = \"\"\n self._bindings = ()\n\n self._updates = ()\n\n self._wheres = ()\n self._order_by = ()\n self._group_by = ()\n self._joins = ()\n self._having = ()\n\n self._aggregates = ()\n\n self._limit = False\n self._offset = False\n self.set_action(\"select\")" ]
[ "0.67817503", "0.6613017", "0.6608557", "0.6572093", "0.6526641", "0.6456574", "0.6391493", "0.63803416", "0.63562936", "0.63505673", "0.63026184", "0.62730545", "0.624931", "0.6238614", "0.62380886", "0.6188223", "0.6165031", "0.6163278", "0.61397433", "0.61384445", "0.6124946", "0.6114949", "0.6108393", "0.6089702", "0.6089702", "0.60856104", "0.60856104", "0.60856104", "0.60856104", "0.60856104", "0.60817367", "0.6079621", "0.60138893", "0.6008646", "0.59875923", "0.5966888", "0.5966721", "0.59620166", "0.594673", "0.59287643", "0.59223896", "0.59223896", "0.59223896", "0.59223896", "0.59223896", "0.59223896", "0.59223896", "0.59223896", "0.59223896", "0.59223896", "0.59223896", "0.59173846", "0.5912364", "0.59089524", "0.59048593", "0.58952075", "0.58936936", "0.5888525", "0.5880778", "0.5874688", "0.586827", "0.5866327", "0.586394", "0.5856933", "0.58473307", "0.584372", "0.584372", "0.584372", "0.584372", "0.584372", "0.58436835", "0.5834643", "0.5834643", "0.58247495", "0.5820413", "0.5818563", "0.5802374", "0.5802374", "0.580124", "0.5791257", "0.57896036", "0.57887155", "0.57826024", "0.5777233", "0.5765144", "0.57641727", "0.5760538", "0.57561576", "0.57553977", "0.5739142", "0.5731074", "0.5731074", "0.57242215", "0.5716806", "0.5716806", "0.57117194", "0.57040274", "0.57031935", "0.5702405", "0.56997836", "0.56939995" ]
0.0
-1
Sets up the database session
def setup(): global connection connection = MySQLdb.connect(host=config.get('mysql.host'), user=config.get('mysql.user'), passwd=config.get('mysql.password'), db=config.get('mysql.db'), ssl={'ca' : config.get('mysql.cert')}) init_model(connection)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_db_session():\n g.s = database.db_session()", "def setup_session():\n print(\"Setting up session\")\n engine = setup_engine()\n Base.metadata.bin = engine\n\n DBSession = sessionmaker(bind=engine)\n session = DBSession()\n\n return session", "def init_database(self):\n engine = create_engine('sqlite:///todo.db?check_same_thread=False')\n self.Base.metadata.create_all(engine)\n self.session = sessionmaker(bind=engine)()", "def init_db(self):\n self.db_config = databaseutils.process_db_config(self.config['db'])\n\n from sqlalchemy import create_engine\n from sqlalchemy.orm import sessionmaker, scoped_session\n self.engine = create_engine(self.db_config.constr, pool_recycle=3600)\n self.session = scoped_session(sessionmaker(bind=self.engine))\n\n # Make sure tables are created\n DB_Base.metadata.create_all(self.engine)", "def __init__(self):\n engine = db_connect()\n create_tables(engine)\n self.Session = sessionmaker(bind=engine)", "def _set_session(self):\n self.__session = sessionmaker(bind=self.__engine)()", "def init_db(self):\n\n # The user can provide a custom string\n if self.database is None:\n self.logger.error(\"You must provide a database url, exiting.\")\n sys.exit(1)\n\n self.engine = create_engine(self.database, convert_unicode=True)\n self.session = scoped_session(\n sessionmaker(autocommit=False, autoflush=False, bind=self.engine)\n )\n\n # Database Setup\n Base.query = self.session.query_property()\n\n # import all modules here that might define models so that\n # they will be registered properly on the metadata. Otherwise\n # you will have to import them first before calling init_db()\n import expfactory.database.models\n\n self.Base = Base\n self.Base.metadata.create_all(bind=self.engine)", "def setup_database(self):\n self.db.setup_database()", "def init_db(self):\n self.db_config = databaseutils.process_db_config(self.config['db'])\n\n from sqlalchemy import create_engine\n from sqlalchemy.orm import sessionmaker, scoped_session\n self.engine = create_engine(self.db_config.constr, pool_recycle=3600)\n self.session = scoped_session(sessionmaker(bind=self.engine))\n\n # Make sure tables are created\n DB_Base.metadata.create_all(self.engine)\n\n if self.sqlite_file is not None:\n dbname = 'sqlite:///%s' % self.sqlite_file\n self.sqlite_engine = create_engine(dbname, echo=False)\n self.sqlite_session = scoped_session(sessionmaker(bind=self.sqlite_engine))\n DB_Base.metadata.create_all(self.sqlite_engine)\n logger.info('Using SQLite %s' % self.sqlite_engine)", "def __init__(self):\n engine = db_connect()\n self.Session = sessionmaker(bind=engine)", "def setup(db_file):\n global session\n\n db_conn = \"sqlite:///%s\" % db_file\n logger.info(\"DB Connection: %s\" % db_conn)\n engine = create_engine(db_conn, connect_args={'check_same_thread':False})\n engine.Echo = True\n Base.metadata.create_all(engine)\n\n Session = scoped_session(sessionmaker(bind=engine))\n session = Session()\n print \"DB Connection: %s\" % db_conn", "def init_database(self):\n # init_database(self.engine)", "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.session = sessionmaker(bind=engine)", "def __init__(self):\n engine = create_engine(\"postgresql://postgres:1@localhost:5432/postgres\")\n session_class = sessionmaker(bind=engine)\n self.session = session_class()", "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)", "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)", "def __init__(self):\n engine = db_connect()\n create_reals_table(engine)\n self.Session = sessionmaker(bind=engine)", "def init_database(self):\n init_database(self.engine)", "def setup_db():\n logger.info('Setting up db')\n setup_all_db()\n setup_emails()", "def prepare_database(config):\n global Session\n engine = sqlalchemy.create_engine(config.db_string)\n session_factory = sqlalchemy.orm.sessionmaker(bind=engine)\n Session = sqlalchemy.orm.scoped_session(session_factory)", "def database_setup():\n Base.metadata.create_all(bind=engine)\n db = LocalSession()\n try:\n populate_from_json(db, Vector, str(VECTORS))\n populate_from_json(db, Gender, str(GENDERS))\n populate_from_json(db, Tag, str(TAGS))\n finally:\n db.close()", "def _initTestingDB(): \n from sqlalchemy import create_engine\n engine = create_engine('sqlite://')\n from .models import (\n Base,\n TodoUser,\n )\n DBSession.configure(bind=engine)\n Base.metadata.create_all(engine)\n \n return DBSession", "def setup_db():\n engine = create_engine(settings.DATABASE)\n ModelBase.metadata.bind = engine\n ModelBase.metadata.create_all(engine)\n\n return scoped_session(sessionmaker(bind=engine))", "def initialize_database():\n\n global DatabaseSession\n from config import config\n\n engine = create_engine(\n config[saq.CONFIG['global']['instance_type']].SQLALCHEMY_DATABASE_URI, \n **config[saq.CONFIG['global']['instance_type']].SQLALCHEMY_DATABASE_OPTIONS)\n\n DatabaseSession = sessionmaker(bind=engine)\n saq.db = scoped_session(DatabaseSession)", "def init_db():\n # We are setting the module variables here for the first time, so disable the warning\n global DB_USER_TABLE # pylint: disable=global-variable-undefined\n global DB_CUSTOMER_TABLE # pylint: disable=global-variable-undefined\n global DB_USER_CUSTOMER_RELS_TABLE # pylint: disable=global-variable-undefined\n global DB_TICKET_TABLE # pylint: disable=global-variable-undefined\n global DB_COMMENT_TABLE # pylint: disable=global-variable-undefined\n\n db = TinyDB(app.config['DB_NAME'])\n\n DB_USER_TABLE = db.table('users')\n DB_CUSTOMER_TABLE = db.table('customers')\n DB_USER_CUSTOMER_RELS_TABLE = db.table('user_customer_rels')\n DB_TICKET_TABLE = db.table('tickets')\n DB_COMMENT_TABLE = db.table('comments')", "def init_database(cls):\n conn = config.db_connection_string(Settings)\n cls.Engine = create_engine(conn, echo=Settings.get('DEBUG'))\n cls.Session = sessionmaker(bind=cls.Engine)\n return cls", "def connectDb(self):\n self.db = Database('sqlite',self.settings.sqlitefilename)\n self.db.user = self.session.getAttribute(self.settings.authenvar)\n self.db.settings = self.settings\n self.db.logger = self.logger\n self.db.cgiparam = self.cgiparam\n self.db.writelog = self.writelog", "def setup_db(app):\n\twith app.app_context():\n\t\t@app.teardown_appcontext\n\t\tdef shutdown_session(response_or_exc):\n\t\t\tdb_session.remove()\n\t\t\treturn response_or_exc", "def setup_user_db():\n with create_app().app_context():\n sess = GlobalDB.db().session\n insert_codes(sess)\n sess.commit()", "def init_db(connection, echo):\r\n\r\n # create the database tables as defined\r\n engine = create_engine(connection, echo=echo)\r\n Base.metadata.create_all(engine)\r\n\r\n # create a session\r\n Base.metadata.bind = engine\r\n BaseSession = sessionmaker(bind=engine)\r\n session = BaseSession()\r\n\r\n # set the shared Model session\r\n Model.use_session(session)\r\n\r\n return (engine, session)", "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)\n logging.info(\"****DuplicatesPipeline: database connected****\")", "def setup_db_conn():\n # TODO update so DB does not have to be hard coded\n # Currently DB is hardcoded", "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)\n logging.info(\"****SaveRestaurantsPipeline: database connected****\")", "def initialize_database():\n # Create the schema\n Base.metadata.create_all(engine)\n\n # Create a connection/database session\n session = Session()\n\n # Now, create a few restaurants:\n cupcake = Restaurant(name=\"Cupcakes\")\n five_guys = Restaurant(name=\"Five Guys\")\n ihop = Restaurant(name=\"IHOP\")\n\n # And a few users:\n mike = User(name=\"Mike\")\n ryan = User(name=\"Ryan\")\n\n # And finally a few votes:\n mike.preferences.append(Preference(vote=\"+1\", restaurant=five_guys))\n ryan.preferences.append(Preference(vote=\"+0\", restaurant=five_guys))\n ryan.preferences.append(Preference(vote=\"-0\", restaurant=cupcake))\n\n session.add(mike)\n session.add(ryan)\n session.add(ihop)\n\n session.commit()\n\n session.close()", "def __init__(self, db_session):\n self.db_session = db_session", "def init_db():\n return SQLAlchemy(app, session_options={\n 'expire_on_commit': False,\n })", "def init_db():\n db.drop_all()\n db.create_all()\n\n print(\"Initialized Connect 4 Database.\")", "def _setup_db(app: FastAPI) -> None:\n engine = create_async_engine(str(settings.db_url), echo=settings.db_echo)\n session_factory = async_scoped_session(\n sessionmaker(\n engine,\n expire_on_commit=False,\n class_=AsyncSession,\n ),\n scopefunc=current_task,\n )\n app.state.db_engine = engine\n app.state.db_session_factory = session_factory", "def database_session():\n if \"CI\" in os.environ:\n con = psycopg2.connect(\n host=os.environ[\"POSTGRES_HOST\"],\n port=os.environ[\"POSTGRES_PORT\"],\n user=os.environ[\"PGUSER\"],\n password=os.environ[\"PGPASSWORD\"],\n )\n else:\n con = psycopg2.connect(host=\"127.0.0.1\", port=\"5432\")\n # Setup\n con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n cursor = con.cursor()\n cursor.execute(f'create database \"{DB}\";')\n session = Meta.init(CONN_STRING).Session()\n yield session\n\n # Teardown\n engine = session.get_bind()\n session.close()\n engine.dispose()\n Meta.engine = None\n\n cursor.execute(f'drop database \"{DB}\";')\n cursor.close()\n con.close()", "def initdb():\n db.drop_all()\n db.configure_mappers()\n db.create_all()\n db.session.commit()", "def init_db():\n db = get_db()\n Page.create_table(db)\n PageVersion.create_table(db)\n User.create_table(db)", "def __init__(self):\n engine = connect_to_db()\n create_lyrics_table(engine) #declarative base stuff\n self.Session = sessionmaker(bind=engine)\n\n # self.create_connection()\n # self.create_table()", "def _create_db_session(self):\r\n session = Session()\r\n try:\r\n yield session\r\n session.commit()\r\n except Exception:\r\n session.rollback()\r\n raise\r\n finally:\r\n session.close()", "def init_db():\n db.drop_all()\n db.configure_mappers()\n db.create_all()\n db.session.commit()", "def init_db():\n\tdb.drop_all()\n\tdb.create_all()\n\n\tprint(\"Initialized Database.\")\n\treturn", "def create_db_session(self):\n mysql_conn_str = f\"mysql+pymysql://{self.DB_USER}:{self.DB_PSWD}@{self.DB_HOST}:{self.DB_PORT}/{self.DB_NAME}\"\n engine = create_engine(mysql_conn_str)\n Base.metadata.bind = engine\n DBSession = sessionmaker(bind=engine)\n session = DBSession()\n self.create_tables(engine)\n return session", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User], safe=True)\n DATABASE.close()", "def setup_session(self, transaction_retries=3):\n\n if not self.is_enabled(Subsystem.database):\n raise RuntimeError(\"Database subsystem was not enabled\")\n\n self.Session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=self.engine))\n\n self.conflict_resolver = ConflictResolver(self.open_session, self.transaction_retries)\n\n for name, coin in self.coins.all():\n coin.wallet_model.backend = coin.backend\n coin.address_model.backend = coin.backend\n coin.transaction_model.backend = coin.backend\n coin.account_model.backend = coin.backend", "def init_db():\n current_app.logger.info('Creating database...')\n db.drop_all()\n db.create_all()\n db.session.commit()", "def __init__(self, session):\n self.session = session\n self.dbi = DBInterface(self.session)", "def setdb():\n\n if not database_exists(DB_URL):\n print('Creating database.')\n create_database(DB_URL)\n\n print('Creating tables.')\n db.create_all()\n print('Shiny!')", "def __init__(self):\n self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'.\n format(getenv('HBNB_MYSQL_USER'),\n getenv('HBNB_MYSQL_PWD'),\n getenv('HBNB_MYSQL_HOST'),\n getenv('HBNB_MYSQL_DB')),\n pool_pre_ping=True)\n\n if getenv('HBNB_ENV') == 'test':\n \"\"\" Drop all tables\"\"\"\n Base.metadata.drop_all(self.__engine)\n\n Base.metadata.create_all(self.__engine)\n Session = sessionmaker(self.__engine)\n self.__session = Session()", "def _init_db(self):\n cursor = self._main_connection.cursor()\n cursor.execute(self.sql[\"create_table\"])\n self._main_connection.commit()", "def initDb():\n createDb()\n admin = User(\n name=\"faby\",\n lastname=\"star\",\n username=\"faby\",\n email=\"star._faby@hotmail.com\",\n isAdmin=True,\n cellphone=\"0983856136\",\n )\n admin.onSetPassord(\"faby123\")\n db.session.add(admin)\n db.session.commit()", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User, Entry], safe=True)\n DATABASE.close()", "def setup(self):\n self.load_connection_info(self.ini_filename)\n if self.conn_info:\n self.logger.info('Load connection info of Postgres')\n\n psql_connection_info = f\"dbname={self.conn_info['dbname']} \" \\\n f\"user={self.conn_info['user']} \" \\\n f\"password={self.conn_info['password']} \" \\\n f\"port={self.conn_info['port']}\" \n \n check_db = self.create_db(psql_connection_info)\n\n connection = psycopg2.connect((\n f\"dbname=password_manager \" \\\n f\"user={self.conn_info['user']} \" \\\n f\"password={self.conn_info['password']} \" \\\n f\"port={self.conn_info['port']}\")) \n cursor = connection.cursor()\n\n if check_db:\n self.logger.info('Database has been created')\n\n check_tables = self.create_tables(connection, \n cursor, \n self.sql_query_table_person, \n self.sql_query_table_login_data)\n \n if check_tables:\n self.logger.info('Tables have been created')\n else:\n self.logger.info('Tables do not exist')\n else:\n self.logger.info('Database does not exist')\n \n connection.close()\n cursor.close()\n else:\n self.logger.info('Connection to Postgres could not esablished')", "def init_session(connection_string=None, drop=False):\n if connection_string is None:\n engine = create_engine('sqlite://',\n echo=False,\n connect_args={'check_same_thread':False},\n poolclass=StaticPool)\n else:\n engine = create_engine(connection_string)\n\n from database.model import Base\n\n global session\n\n if drop:\n try:\n old_session = session\n Base.metadata.drop_all(bind=old_session.bind)\n except:\n pass\n\n db_session = scoped_session(sessionmaker(autocommit=False,\n autoflush=False,\n bind=engine))\n Base.metadata.create_all(bind=engine)\n\n session = db_session", "def set_db_session(cls, session) -> scoped_session or Session:\n cls._db_session = session", "def initialize():\n DATABASE.connect()\n DATABASE.drop_tables([Journal], safe=True)\n DATABASE.create_tables([Journal], safe=True)\n DATABASE.close()", "def setUp(self):\n\n self.client = server.app.test_client()\n server.app.config['TESTING'] = True\n server.app.config['SECRET_KEY'] = \"123\"\n\n # Connect to test database\n model.connect_to_db(server.app, \"postgresql:///testdb\")\n\n # Create tables and add sample data\n model.db.create_all()\n # example_data()\n\n with self.client as c:\n with c.session_transaction() as session:\n session['user_id'] = 1\n session['username'] = 'j'\n session['name'] = 'l'\n session['cal_id'] = 1", "def init_database():\n exists = Agent.query.all()\n if exists is None or len(exists) == 0:\n # Setting up agent\n agent = Agent(name='OpenCampus',\n about=\"Este es el chabot de Open Campus capaz de resolver dudas sobre los diferentes cursos de la oferta actual de Open Campus\")\n\n db.session.add(agent)\n\n # Setting upd properties\n\n description_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/description\")\n begin_date_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/beginDate\")\n end_date_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/endDate\")\n requirement_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/requirement\")\n duration_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/duration\")\n cost_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/cost\")\n teacher_name_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/teacherName\")\n content_name_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/content\")\n course_name_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/courseName\")\n\n # db.session.add(description_prop)\n # db.session.add(begin_date_prop)\n # db.session.add(end_date_prop)\n # db.session.add(requirement_prop)\n # db.session.add(duration_prop)\n # db.session.add(cost_prop)\n # db.session.add(teacher_name_prop)\n # db.session.add(content_name_prop)\n # db.session.add(course_name_prop)\n\n # Setting up answers\n\n ObtenerInformacionAnswer = Answer(uri=\"ObtenerInformacionAnswer\", answer_template=\"{%description%}\",\n properties=[description_prop])\n\n # db.session.add(ObtenerInformacionAnswer)\n ObtenerFechasAnswer = Answer(uri=\"ObtenerFechasAnswer\",\n answer_template=\"Las fechas importantes del curso son {%beginDate%} y termina el dia {%endDate%}\",\n properties=[begin_date_prop, end_date_prop])\n\n # db.session.add(ObtenerFechasAnswer)\n ObtenerFechasInicioAnswer = Answer(uri=\"ObtenerFechasInicioAnswer\",\n answer_template=\"El curso inicia el dia {%beginDate%}\",\n properties=[begin_date_prop])\n # db.session.add(ObtenerFechasInicioAnswer)\n ObtenerFechasFinAnswer = Answer(uri=\"ObtenerFechasFinAnswer\",\n answer_template=\"El curso finaliza el dia {%endDate%}\",\n properties=[end_date_prop])\n # db.session.add(ObtenerFechasFinAnswer)\n ObtenerPrerequisitosAnswer = Answer(uri=\"ObtenerPrerequisitosAnswer\",\n answer_template=\"Los prerequisitos del curso son {%requirement%}\",\n properties=[requirement_prop])\n # db.session.add(ObtenerPrerequisitosAnswer)\n ObtenerDuracionAnswer = Answer(uri=\"ObtenerDuracionAnswer\",\n answer_template=\"El curso tiene una duracion de {%duration%}\",\n properties=[duration_prop])\n # db.session.add(ObtenerDuracionAnswer)\n ObtenerPrecioAnswer = Answer(uri=\"ObtenerPrecioAnswer\", answer_template=\"{%cost%}\", properties=[cost_prop])\n # db.session.add(ObtenerPrecioAnswer)\n ObtenerDocenteAnswer = Answer(uri=\"ObtenerDocenteAnswer\",\n answer_template=\"El docente encargado del curso es {%teacherName%}\",\n properties=[teacher_name_prop],\n refers_to=\"http://127.0.0.1/ockb/course/ontology/hasTeacher\")\n # db.session.add(ObtenerDocenteAnswer)\n ObtenerContenidosAnswer = Answer(uri=\"ObtenerContenidosAnswer\",\n answer_template=\"Los contenidos a tratar en el curso son {%content%}\",\n properties=[content_name_prop],\n refers_to=\"http://127.0.0.1/ockb/course/ontology/hasContenido\")\n # db.session.add(ObtenerContenidosAnswer)\n ListarCursosAnswer = Answer(uri=\"ListarCursosAnswer\",\n answer_template=\"Los cursos de la oferta actual son: {%courseName%}\",\n properties=[course_name_prop],\n refers_to=\"http://127.0.0.1/ockb/course/ontology/hasCourse\",\n answer_from=\"http://127.0.0.1/ockb/resources/OpenCampusFebrero-Julio\")\n\n # Setting up resolution\n ObtenerInformacionResolution = Resolution(uri=\"ObtenerInformacionResolution\",\n question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerFechasResolution = Resolution(uri=\"ObtenerFechasResolution\", question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerFechasInicioResolution = Resolution(uri=\"ObtenerFechasInicioResolution\",\n question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerFechasFinResolution = Resolution(uri=\"ObtenerFechasFinResolution\",\n question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerPrerequisitosResolution = Resolution(uri=\"ObtenerPrerequisitosResolution\",\n question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerDuracionResolution = Resolution(uri=\"ObtenerDuracionResolution\", question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerPrecioResolution = Resolution(uri=\"ObtenerPrecioResolution\", question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerDocenteResolution = Resolution(uri=\"ObtenerDocenteResolution\", question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerContenidosResolution = Resolution(uri=\"ObtenerContenidosResolution\",\n question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n\n # Setting up Entity\n\n curso_entity = Entity(name=\"http://127.0.0.1/ockb/course/resource/Course\")\n\n # setting up synonyms:\n Synonym(name=\"Mooc\", entity=curso_entity)\n Synonym(name=\"Taller\", entity=curso_entity)\n Synonym(name=\"Curso\", entity=curso_entity)\n Synonym(name=\"Open Course\", entity=curso_entity)\n\n # Setting up intents\n\n ObtenerInformacion = Intent(name=\"ObtenerInformacion\", agent=agent,\n description=\"Obtener una breve descripcion del curso\",\n answer=ObtenerInformacionAnswer, resolution=ObtenerInformacionResolution,\n entities=[curso_entity])\n ObtenerFechas = Intent(name=\"ObtenerFechas\", agent=agent,\n description=\"Obtener las fechas importantes del curso\",\n answer=ObtenerFechasAnswer, resolution=ObtenerFechasResolution, entities=[curso_entity])\n ObtenerFechasInicio = Intent(name=\"ObtenerFechasInicio\", agent=agent,\n description=\"Obtener las fechas de inicio del curso\",\n answer=ObtenerFechasInicioAnswer, resolution=ObtenerFechasInicioResolution,\n entities=[curso_entity])\n ObtenerFechasFin = Intent(name=\"ObtenerFechasFin\", agent=agent,\n description=\"Obtener las fechas de finalizacion del curso\",\n answer=ObtenerFechasFinAnswer, resolution=ObtenerFechasFinResolution,\n entities=[curso_entity])\n ObtenerPrerequisitos = Intent(name=\"ObtenerPrerequisitos\", agent=agent,\n description=\"Obtener prerequisitos del curso\",\n answer=ObtenerPrerequisitosAnswer,\n resolution=ObtenerPrerequisitosResolution)\n ObtenerDuracion = Intent(name=\"ObtenerDuracion\", agent=agent,\n description=\"Obtener la duracion del curso\", answer=ObtenerDuracionAnswer,\n resolution=ObtenerDuracionResolution, entities=[curso_entity])\n ObtenerPrecio = Intent(name=\"ObtenerPrecio\", agent=agent, description=\"Obtener el precio del curso\",\n answer=ObtenerPrecioAnswer, resolution=ObtenerPrecioResolution, entities=[curso_entity])\n ObtenerDocente = Intent(name=\"ObtenerDocente\", agent=agent,\n description=\"Obtener los nombres de los docentes del curso\",\n answer=ObtenerDocenteAnswer, resolution=ObtenerDocenteResolution,\n entities=[curso_entity])\n ObtenerContenidos = Intent(name=\"ObtenerContenidos\", agent=agent,\n description=\"Obtener los contenidos del curso\",\n answer=ObtenerContenidosAnswer, resolution=ObtenerDocenteResolution,\n entities=[curso_entity])\n ListarCursos = Intent(name=\"ListarCursos\", agent=agent,\n description=\"Presentar la oferta actual de cursos\", answer=ListarCursosAnswer,\n resolution=ObtenerContenidosResolution)\n # Setting up sentences\n\n Sentence(intent=ObtenerInformacion, sentence=\"De que trata el mooc?\")\n Sentence(intent=ObtenerInformacion, sentence=\"Quiero informacion del curso de emprendimiento\")\n Sentence(intent=ObtenerInformacion, sentence=\"Muestrame un resumen del mooc?\")\n Sentence(intent=ObtenerInformacion, sentence=\"Breve introducción al curso\")\n Sentence(intent=ObtenerInformacion, sentence=\"que es emprendimiento\")\n Sentence(intent=ObtenerInformacion, sentence=\"De que se trata el curso?\")\n Sentence(intent=ObtenerInformacion, sentence=\"De qué va el curso?\")\n Sentence(intent=ObtenerInformacion, sentence=\"Me ayudas con información acerca del curso?\")\n Sentence(intent=ObtenerFechas, sentence=\"Cuáles son las fechas importantes del curso?\")\n Sentence(intent=ObtenerFechas, sentence=\"Fechas clave del curso\")\n Sentence(intent=ObtenerFechas, sentence=\"Que fechas debo tomar en cuenta\")\n Sentence(intent=ObtenerFechas, sentence=\"fechas de inicio y fin\")\n Sentence(intent=ObtenerFechas, sentence=\"Cuándo comienza el curso?\")\n Sentence(intent=ObtenerFechas, sentence=\"Fechas importantes del curso de inteligencia artificial\")\n Sentence(intent=ObtenerFechas, sentence=\"Cuáles son las fechas importantes del curso de emprendimiento\")\n Sentence(intent=ObtenerFechasInicio, sentence=\"Cuándo inicia el curso de emprendimiento\")\n Sentence(intent=ObtenerFechasInicio, sentence=\"Cuándo empiezan los cursos ?\")\n Sentence(intent=ObtenerFechasInicio, sentence=\"Fecha de inicio de los moocs?\")\n Sentence(intent=ObtenerFechasInicio, sentence=\"Día de inicio de los moocs ?\")\n Sentence(intent=ObtenerFechasInicio, sentence=\"En que fecha inician los moocs?\")\n Sentence(intent=ObtenerFechasInicio, sentence=\"A partir de que fecha empiezan los mooc?\")\n Sentence(intent=ObtenerFechasFin, sentence=\"Cuando finaliza el curso?\")\n Sentence(intent=ObtenerFechasFin, sentence=\"En que fecha termina el curso?\")\n Sentence(intent=ObtenerFechasFin, sentence=\"Cuando termina el curso?\")\n Sentence(intent=ObtenerPrerequisitos,\n sentence=\"Cuáles son los requisitos necesarios para el curso de emprendimiento\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Cuáles son los prerequisitos?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Requisitos previos de ingreso al curso\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Dame a conocer los prerequisitos\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Me puedes indicar los prerequistos necesarios?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Que necesito saber antes de iniciar el curso\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"que se necesita saber para este curso\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Los pre requisitos cuales son?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Qué se necesita?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Qué debería saber para tomar el curso?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Qué conocimientos previos debo tener?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Qué tengo que saber?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Requisitos previos\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Conocimientos previos\")\n Sentence(intent=ObtenerDuracion, sentence=\"Cuanto dura el curso de empendimiento\")\n Sentence(intent=ObtenerDuracion, sentence=\"Duración del curso\")\n Sentence(intent=ObtenerDuracion, sentence=\"Número de horas del mooc?\")\n Sentence(intent=ObtenerDuracion, sentence=\"En cuántas semanas se realiza el curso?\")\n Sentence(intent=ObtenerDuracion, sentence=\"Cuanto dura el curso\")\n Sentence(intent=ObtenerDuracion, sentence=\"Tiempo que dura un curso?\")\n Sentence(intent=ObtenerDuracion, sentence=\"cuanto puede durar un curso mooc\")\n Sentence(intent=ObtenerDuracion, sentence=\"cuanto dura el curso?\")\n Sentence(intent=ObtenerDuracion, sentence=\"cual es la duracion de psicologia social?\")\n Sentence(intent=ObtenerDuracion, sentence=\"Cuánto tiempo dura el mooc?\")\n Sentence(intent=ObtenerDuracion, sentence=\"De cuántas semanas es el mooc?\")\n Sentence(intent=ObtenerDuracion, sentence=\"Cuántas horas dura el mooc?\")\n Sentence(intent=ObtenerDuracion, sentence=\"Cuánto tiempo dura el mooc?\")\n Sentence(intent=ObtenerDuracion, sentence=\"De cuántas semanas es el mooc?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cual es el precio del curso de emprendimiento\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuál es el precio?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuánto vale?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Valor del curso?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Costo\")\n Sentence(intent=ObtenerPrecio, sentence=\"Inversión total del curso\")\n Sentence(intent=ObtenerPrecio, sentence=\"cual es el valor de los componentes?\")\n Sentence(intent=ObtenerPrecio, sentence=\"costo de los cursos?\")\n Sentence(intent=ObtenerPrecio, sentence=\"cuanto cuesta los cursos\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuál es precio del curso?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuánto cuesta el mooc de Administración Empresarial?\")\n Sentence(intent=ObtenerPrecio, sentence=\"tiene algun valor los cursos?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuanto cuesta el curso Método Toyota?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuanto cuesta un curso?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Que vale el curso ?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Es gratis?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuanto vale el mooc?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Precio\")\n Sentence(intent=ObtenerDocente, sentence=\"Cual es el docente del curso de emprendimiento\")\n Sentence(intent=ObtenerDocente, sentence=\"Quién es mi profesor en el curso?\")\n Sentence(intent=ObtenerDocente, sentence=\"Docente del mooc?\")\n Sentence(intent=ObtenerDocente, sentence=\"Qué docente imparte el mooc?\")\n Sentence(intent=ObtenerDocente, sentence=\"Quién es el docente encargado de la materia?\")\n Sentence(intent=ObtenerDocente, sentence=\"Nombre del docente del mooc\")\n Sentence(intent=ObtenerDocente, sentence=\"Que profesor esta a cargo del curso\")\n Sentence(intent=ObtenerDocente, sentence=\"cual es mi docente del mooc\")\n Sentence(intent=ObtenerDocente, sentence=\"información del docente\")\n Sentence(intent=ObtenerDocente, sentence=\"Quién es el docente encargado?\")\n Sentence(intent=ObtenerDocente, sentence=\"Quién va a dar el MOOC\")\n Sentence(intent=ObtenerDocente, sentence=\"Que docente acompaña al estudiante?\")\n Sentence(intent=ObtenerDocente, sentence=\"Cual es el profe de Salud Sexual y Reproductiva\")\n Sentence(intent=ObtenerContenidos, sentence=\"Cuáles son los contenidos a tratar en el curos de emprendimiento\")\n Sentence(intent=ObtenerContenidos, sentence=\"Contenido del curso\")\n Sentence(intent=ObtenerContenidos, sentence=\"Cuál es la temática de cada curso?\")\n Sentence(intent=ObtenerContenidos, sentence=\"Qué temas se van a tratar en cada curso?\")\n Sentence(intent=ObtenerContenidos, sentence=\"De que se tratan los moocs\")\n Sentence(intent=ObtenerContenidos, sentence=\"Cuáles son las temas del curso?\")\n Sentence(intent=ObtenerContenidos, sentence=\"Que se va a tratar en este curso?\")\n Sentence(intent=ObtenerContenidos, sentence=\"Qué se va a dar en el curso?\")\n Sentence(intent=ListarCursos, sentence=\"Que cursos hay\")\n Sentence(intent=ListarCursos, sentence=\"Muestrame los cursos\")\n Sentence(intent=ListarCursos, sentence=\"Cual es la oferta actual\")\n Sentence(intent=ListarCursos, sentence=\"Cuentame que cursos tienes\")\n Sentence(intent=ListarCursos, sentence=\"Que cursos me ofreces\")\n Sentence(intent=ListarCursos, sentence=\"Que cursos estan disponibles\")\n Sentence(intent=ListarCursos, sentence=\"Listame los cursos\")\n Sentence(intent=ListarCursos, sentence=\"Que cursos tiene\")\n\n # db.session.add(intent_obtenerinformacion)\n # db.session.add(intent_obtenerfechas)\n # db.session.add(intent_obtenerfechasinicio)\n # db.session.add(intent_obtenerfechasfin)\n # db.session.add(intent_obtenerprerequisitos)\n # db.session.add(intent_obtenerduracion)\n # db.session.add(intent_obtenerprecio)\n # db.session.add(intent_obtenerdocente)\n # db.session.add(intent_obtenercontenidos)\n # db.session.add(intent_listarCursos)\n\n db.session.commit()", "def setup_db(config):\n # create the URL from the configuration\n db_url = URL(**config)\n db = create_engine(db_url)\n Session = sessionmaker(db)\n # Link the relational model to the database\n Model.metadata.create_all(db)\n\n return db, Session", "def setUp(self):\n\n self.client = server.app.test_client()\n server.app.config['TESTING'] = True\n server.app.config['SECRET_KEY'] = \"123\"\n\n # Connect to test database\n model.connect_to_db(server.app, \"postgresql:///testdb\")\n\n # Create tables and add sample data\n model.db.create_all()\n # example_data()\n\n with self.client as c:\n with c.session_transaction() as session:\n session['user_id'] = 33\n session['username'] = 'balloonicorn'\n session['name'] = 'balloonicorn'", "def __init_database(self):\n from admin.database import init_db\n init_db()", "def setup_db():\n\n engine = config['tg.app_globals'].sa_engine\n # model.init_model(engine)\n # model.metadata.create_all(engine)", "def connect(cls):\n engine = create_engine(cls._build_uri(), echo=False)\n cls.Session = sessionmaker()\n cls.Session.configure(bind=engine)", "def setUp(self):\n db.create_all()\n user = User(\"eschoppik\", \"secret\", \"Elie S\", \"elie@elie.com\")\n db.session.add(user)\n db.session.commit()", "def initialize_database():\n # TODO: Refactor the funtime library\n this.db = Store(this.host).create_lib(this.store_name).get_store()", "def init(self):\n self.db.connect()\n try:\n self.db.create_tables([JambiModel], safe=True)\n JambiModel.create(ref='0')\n self.logger.info('Database initialized')\n except IntegrityError:\n self.logger.info('Database was already initialized')\n self.db.close()", "def init_tables(self):\n\n settings.Base.metadata.tables[\n 'session_master'].drop(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].drop(bind=settings.engine)\n\n settings.Base.metadata.tables[\n 'session_master'].create(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].create(bind=settings.engine)\n\n logging.info(\"Sessionization Tables created\")", "def set_up(self, drop=False):\n\n # todo extract database name from engine url and report for brevity\n engine = self.__orm.engine\n if database_exists(engine.url):\n print(\"Database {} already exists.\".format(engine.url))\n if drop:\n print(\"Dropping old database {}\".format(engine.url))\n drop_database(engine.url)\n with possibly_talking_action(\"Re-creating database...\"):\n create_database(engine.url)\n else:\n with possibly_talking_action(\"Creating database...\"):\n create_database(engine.url)\n\n with possibly_talking_action(\"Creating tables...\"):\n Base.metadata.create_all(engine)\n\n print(\"Database {} created successfully\".format(engine.url))", "def bootstrap(self):\n\n self.db = connection_manager.get(DbConnection, host=self.ip, port=3306, user=self.user, password=self.password)\n\n self.connected = True", "def init_session(self):\n pass", "def init_session(self):\n pass", "def init():\n print(\"Executing initialization\")\n print(db.dsn)\n cursor = yield momoko.Op(\n db.execute,\n \"\"\"\n DROP SCHEMA public CASCADE;\n CREATE SCHEMA public;\n CREATE TABLE game\n (\n game_id text PRIMARY KEY,\n players integer,\n state bytea,\n timestamp timestamp\n );\n CREATE UNIQUE INDEX ix_game_id\n ON game\n (game_id);\n CREATE INDEX ix_timestamp\n ON game\n (timestamp);\n \"\"\")\n try:\n print(cursor.fetchall())\n except psycopg2.ProgrammingError:\n pass\n io = ioloop.IOLoop.instance()\n io.stop()", "def setUp(self):\n\n app.config[\"TESTING\"] = True\n app.config[\"SECRET_KEY\"] = \"ABC\"\n self.client = app.test_client()\n\n # Connect to test database\n connect_to_db(app)\n db.drop_all()\n db.create_all()\n load_test()\n\n # Put user1 into session.\n with self.client as c:\n with c.session_transaction() as sess:\n sess[\"current_user\"] = 1", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"Capstone\"\n self.database_path = \"postgres://{}/{}\".format('localhost:5432', self.database_name)\n setup_db(self.app , self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n\n self.db.create_all()", "def setup_db(self) -> None:\n conn = mysql.connector.connect(\n user=self.app.config[\"DATABASE_USER\"], password=self.app.config[\"DATABASE_PASSWORD\"],\n host=self.app.config[\"DATABASE_HOST\"], port=self.app.config[\"DATABASE_PORT\"], raise_on_warnings=True\n )\n try:\n cursor = conn.cursor()\n cursor.execute(\n \"CREATE DATABASE IF NOT EXISTS {} CHARACTER SET utf8\".format(self.app.config[\"DATABASE_NAME\"])\n )\n conn.commit()\n except:\n raise\n else:\n with self.DBManager(self.app) as connection:\n for model in sorted(lib.get_subclasses(lib.models.Model), key=lambda x: x.index):\n model.setup_table(connection=connection)\n finally:\n conn.close()", "def initialize():\n \n db.connect()\n db.create_tables([Product], safe=True)", "def database_session_wide(app) -> SQLAlchemy:\n if os.path.exists(get_test_database_path()):\n os.unlink(get_test_database_path())\n\n from pipwatch_api.datastore.models import DATABASE\n DATABASE.init_app(app=app)\n DATABASE.create_all()\n yield DATABASE\n\n DATABASE.drop_all()\n DATABASE.session.close()\n\n os.unlink(get_test_database_path())", "def __post_init__(self):\n self.dbase = databases.Database(\n self.dsn,\n min_size=self.min_size,\n max_size=self.max_size\n )\n self.engine, self.meta = self.get_engine_metadata()", "def connect():\n # global ENGINE\n # global Session\n\n # ENGINE = create_engine(\"sqlite:///ratings.db\", echo=True)\n # Session = sessionmaker(bind=ENGINE)\n\n # return Session()\n pass", "def init_database(use_mysql=False, dbname=\"sbs\"):\n #engine = create_engine('sqlite:///:memory:', echo=False)\n # \"mysql+mysqldb://{user}:{password}@{host}:{port}/{dbname}\"\n if use_mysql:\n db_setup = dict(user=os.environ.get('MYSQL_LOGIN'),\n password=os.environ.get('MYSQL_PASSWORD'),\n host=\"127.0.0.1\",\n port=os.environ.get('MYSQL_PORT', 3006),\n dbname=dbname\n )\n mysql_setup = \"mysql+mysqldb://{user}:{password}@{host}:{port}/{dbname}?charset=utf8\".format(**db_setup)\n engine = create_engine(mysql_setup, echo=False)\n else:\n engine = create_engine('sqlite:///data.sqlite', echo=False)\n event.Base.metadata.create_all(engine)\n gameinfomodel.Base.metadata.create_all(engine)\n playerinfo.Base.metadata.create_all(engine)\n teaminfomodel.Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n return session", "def setup_db():\n db = TinyDB('db.json')\n chats = db.table('chats')\n members = db.table('members')\n chats.insert({'id': -231128423}) # Kolab chat group\n members.insert({'id': 235493361})", "def init_db(self):\n print(\"Initializing database...\", end='')\n self.cursor.execute(\"DROP DATABASE %s\" % self.db.database)\n self.__init__(self.db_name)\n self.cursor.execute(\"USE %s\" % self.db.database)\n\n # Book\n self.cursor.execute(\n \"\"\"CREATE TABLE Book (\n ISBN VARCHAR(13),\n title VARCHAR(300) COLLATE utf8_general_ci,\n publisher VARCHAR(100) COLLATE utf8_general_ci,\n lang VARCHAR(40),\n publicationDate DATE,\n pageCount SMALLINT CHECK(pageCount >= 0),\n stock SMALLINT CHECK(stock >= 0),\n price DECIMAL(5,2),\n subject VARCHAR(100),\n avg_rating DECIMAL(4,2) CHECK(avg_rating <= 10.00),\n total_rating_score INT DEFAULT 0,\n num_ratings INT DEFAULT 0,\n PRIMARY KEY (ISBN))\"\"\")\n\n # Author\n self.cursor.execute(\n \"\"\"CREATE TABLE Author (\n ID INT AUTO_INCREMENT,\n name VARCHAR(200) COLLATE utf8_general_ci,\n lang VARCHAR(40),\n PRIMARY KEY (ID))\"\"\")\n\n # CustomerPersonal\n self.cursor.execute(\n \"\"\"CREATE TABLE CustomerPersonal (\n phone CHAR(10),\n address VARCHAR(300) NOT NULL,\n PRIMARY KEY (phone))\"\"\")\n\n # CustomerCredentials\n self.cursor.execute(\n \"\"\"CREATE TABLE CustomerCredentials (\n loginID VARCHAR(30),\n firstName VARCHAR(50) NOT NULL,\n lastName VARCHAR(50) NOT NULL,\n salt VARBINARY(32) NOT NULL,\n pass_key VARBINARY(32) NOT NULL,\n phone CHAR(10) NOT NULL,\n PRIMARY KEY (loginID),\n FOREIGN KEY (phone) REFERENCES CustomerPersonal(phone)\n ON UPDATE CASCADE ON DELETE RESTRICT)\"\"\")\n\n # ManagerPersonal\n self.cursor.execute(\n \"\"\"CREATE TABLE ManagerPersonal (\n phone CHAR(10),\n address VARCHAR(300) NOT NULL,\n PRIMARY KEY (phone))\"\"\")\n\n # ManagerCredentials\n self.cursor.execute(\n \"\"\"CREATE TABLE ManagerCredentials (\n loginID VARCHAR(30),\n managerID INT UNIQUE NOT NULL AUTO_INCREMENT,\n firstName VARCHAR(50),\n lastName VARCHAR(50),\n salt VARBINARY(32) NOT NULL,\n pass_key VARBINARY(32) NOT NULL,\n phone CHAR(10) NOT NULL,\n PRIMARY KEY (loginID),\n FOREIGN KEY (phone) REFERENCES ManagerPersonal(phone)\n ON UPDATE CASCADE ON DELETE RESTRICT)\"\"\")\n\n # Comment\n self.cursor.execute(\n \"\"\"CREATE TABLE Comment (\n commentID INT AUTO_INCREMENT,\n ISBN VARCHAR(13) NOT NULL,\n loginID VARCHAR(30) NOT NULL,\n score TINYINT NOT NULL,\n message TEXT,\n veryUseful INT DEFAULT 0,\n useful INT DEFAULT 0,\n useless INT DEFAULT 0,\n avg_usefulness DECIMAL (3,2),\n commentDate DATETIME,\n PRIMARY KEY (commentID),\n FOREIGN KEY (ISBN) REFERENCES Book(ISBN)\n ON UPDATE RESTRICT ON DELETE CASCADE,\n FOREIGN KEY (loginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE)\"\"\")\n\n # OrderLog\n self.cursor.execute(\n \"\"\"CREATE TABLE OrderLog (\n orderNumber INT AUTO_INCREMENT,\n loginID VARCHAR(30) NOT NULL,\n orderDate DATE,\n PRIMARY KEY (orderNumber),\n FOREIGN KEY (loginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE)\"\"\")\n\n # Return Request\n self.cursor.execute(\n \"\"\"CREATE TABLE ReturnRequest (\n requestID INT AUTO_INCREMENT,\n orderNumber INT NOT NULL,\n requestDate DATE,\n ISBN VARCHAR(13) NOT NULL,\n quantity SMALLINT,\n status VARCHAR(25) DEFAULT 'PENDING',\n PRIMARY KEY (requestID),\n FOREIGN KEY (orderNumber) REFERENCES OrderLog(orderNumber)\n ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\")\n\n # # HasKeyword\n # self.cursor.execute(\n # \"\"\"CREATE TABLE HasKeyword (\n # ISBN VARCHAR(13),\n # word VARCHAR(50) COLLATE utf8_general_ci,\n # PRIMARY KEY (ISBN, word),\n # FOREIGN KEY (ISBN) REFERENCES Book(ISBN)\n # ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\")\n\n # Wrote\n self.cursor.execute(\n \"\"\"CREATE TABLE Wrote (\n authorID INT,\n ISBN VARCHAR(13),\n PRIMARY KEY (authorID, ISBN),\n FOREIGN KEY (authorID) REFERENCES Author(ID)\n ON UPDATE RESTRICT ON DELETE RESTRICT,\n FOREIGN KEY (ISBN) REFERENCES Book(ISBN)\n ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\")\n\n # ProductOf\n self.cursor.execute(\n \"\"\"CREATE TABLE ProductOf (\n ISBN VARCHAR(13),\n orderNumber INT,\n quantity SMALLINT CHECK(quantity > 0),\n PRIMARY KEY (ISBN, orderNumber),\n FOREIGN KEY (ISBN) REFERENCES Book(ISBN)\n ON UPDATE RESTRICT ON DELETE CASCADE,\n FOREIGN KEY (orderNUmber) REFERENCES OrderLog(orderNumber)\n ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\")\n\n # Trusts\n self.cursor.execute(\n \"\"\"CREATE TABLE Trusts (\n loginID VARCHAR(30),\n otherLoginID VARCHAR(30) CHECK(loginID<>otherLoginID),\n trustStatus VARCHAR(9) CHECK(trustStatus = 'TRUSTED' OR trustStatus = 'UNTRUSTED'),\n PRIMARY KEY (loginID, otherLoginID),\n FOREIGN KEY (loginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE,\n FOREIGN KEY (otherLoginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE)\"\"\")\n\n # Rates\n self.cursor.execute(\n \"\"\"CREATE TABLE Rates (\n loginID VARCHAR(30),\n commentID INT,\n rating VARCHAR(10) NOT NULL,\n PRIMARY KEY (loginID, commentID),\n FOREIGN KEY (loginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE,\n FOREIGN KEY (commentID) REFERENCES Comment(commentID)\n ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\"\n )\n\n print(\"done\")", "def main():\n settings = {}\n settings['sqlalchemy.url'] = os.environ['DATABASE_URL']\n config = Configurator(settings=settings)\n config.include('TechLurker.models')\n SessionFactory = config.registry[\"dbsession_factory\"]\n session = SessionFactory()\n return session", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n self.database_path = \"postgres://{}:{}@{}/{}\".format('student',\n 'student', 'localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "def db_session(request, config):\n from h import db\n engine = db.make_engine(config.registry.settings)\n session = db.Session(bind=engine)\n try:\n yield session\n finally:\n session.close()\n engine.dispose()", "def setup_database():\n\n user = 'bard'\n password = 'STORY'\n database = 'story'\n DSN = f\"postgresql://{user}:{password}@postgres:5432/{database}\"\n engine = create_engine(DSN)\n register_tables(engine)\n return engine", "def set_up_db():\n DATABASE.drop_tables([Customer])\n DATABASE.close()\n DATABASE.create_tables([Customer])\n DATABASE.close()", "def setup():\n\tfrom webnotes.db import Database\n\twebnotes.conn = Database()\n\twebnotes.session = {'user':'Administrator', 'profile':'Administrator'}", "def __init__(self, config):\n\n engine = self.__my_create_engine(config)\n\n if not engine:\n raise Exception(\"No engine created\")\n\n engine.connect()\n #metadata = MetaData(bind=engine)\n Session = sessionmaker(bind=engine)\n\n # Set the objects to work with\n self.session = Session()", "def before_request():\n\tg.db = sql.connect(host=cfg.dbhost, port=cfg.dbport, user=cfg.user,\\\n\t\tpasswd=cfg.password, db=cfg.database,\\\n\t\tcharset=cfg.charset)", "async def initialize(\n self,\n url: str,\n password: str | None,\n *,\n isolation_level: Optional[str] = None,\n ) -> None:\n if self._override_engine:\n self._session = await create_async_session(self._override_engine)\n else:\n self._engine = create_database_engine(\n url, password, isolation_level=isolation_level\n )\n self._session = await create_async_session(self._engine)", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"databasename\"\n self.database_path = \"postgresql://postgres:usman@{}/{}\".format('localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia\"\n self.database_path = \"postgres://{}/{}\".format('localhost:5432', self.database_name)\n self.database_path = \"postgres://{}:{}@{}/{}\".format('postgres', 'postgres', 'localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "def setUp(self):\n\n # Get the Flask test client\n self.client = app.test_client()\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True\n\n #To test sessions we need to set Secret key \n app.config['SECRET_KEY'] = 'key'\n\n # Connect to test database\n connect_to_db(app, \"postgresql:///testdb\")\n\n # Create tables and add sample data\n db.create_all()\n users()\n reviews()\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess['user_id'] = 1", "def dbsession(cls):\n sqlahelper = cls.dbsqlahelper\n return sqlahelper.getmake_session()", "def initialize():\n\n db.connect() # Se conecta\n db.create_tables([Entry], safe=True) # Crea las tablas\n # safe=true evita crear modelos ya creados", "def create_db_session():\n engine = engine = create_engine('sqlite:///budget.db')\n Base.metadata.bind = engine\n DBSession = sessionmaker(bind=engine)\n session = DBSession()\n return session" ]
[ "0.7948405", "0.7780912", "0.74744767", "0.7390211", "0.7389746", "0.7370456", "0.73018765", "0.7287071", "0.7270587", "0.72254795", "0.721942", "0.7173008", "0.71630186", "0.7160225", "0.7144433", "0.7144433", "0.71353585", "0.7134453", "0.7132723", "0.71059585", "0.7102454", "0.7028066", "0.7022645", "0.69925594", "0.69357485", "0.69328105", "0.69127566", "0.6897379", "0.6876068", "0.68313557", "0.67996657", "0.6776066", "0.6771628", "0.6759701", "0.673852", "0.6734616", "0.6730911", "0.67249876", "0.6719293", "0.6708544", "0.67065537", "0.6701767", "0.6699347", "0.6692281", "0.6688752", "0.66885966", "0.66558707", "0.6655771", "0.6653758", "0.6647695", "0.6633899", "0.663318", "0.6627643", "0.6612512", "0.6587622", "0.6577988", "0.65684986", "0.6568295", "0.656377", "0.6562465", "0.6556413", "0.65514535", "0.6540226", "0.65374506", "0.65243524", "0.6524186", "0.65230876", "0.65091276", "0.64947504", "0.64897984", "0.64877754", "0.6474348", "0.6472242", "0.6472242", "0.6470293", "0.6465339", "0.6458714", "0.6453708", "0.6443898", "0.64368117", "0.64364713", "0.64320475", "0.643186", "0.6428526", "0.6425893", "0.64243007", "0.6412113", "0.6412089", "0.64070475", "0.63956994", "0.6393384", "0.63916695", "0.63899183", "0.63876265", "0.6381304", "0.637753", "0.6376238", "0.637372", "0.63737154", "0.6364913" ]
0.66753596
46
Order the results. type should be ASC or DESC
def order(self, column, type): self._order = (column, type) return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sort_results(self):\n pass", "def orderby():\n pass", "def order_by(self, results, key_, direction=\"ASC\"):\n\n return sorted(results, key=lambda x: x.get(key_), reverse=direction==\"DESC\")", "def sort_results(results_list, sorting_type):\n if sorting_type == \"Oldest\":\n return sort_results_by_date(results_list, False)\n elif sorting_type == \"Description A-Z\":\n return sort_results_alphabetically_on_description(results_list, False)\n elif sorting_type == \"Description Z-A\":\n return sort_results_alphabetically_on_description(results_list, True)\n else:\n return sort_results_by_date(results_list, True)", "def sort_results(self, sort_option):\r\n self.model.sort_data(sort_option)", "def get_sort_query(self, kind, order, is_number):\n pass", "def sortby(self):\n ...", "def order(self):\n raise NotImplementedError()", "def sort_by_type(self):\n # sort_by_type_sitem = self.locator_finder_by_idx(self.sort_by_type_id, 30)\n # sort_by_type_sitem = sort_by_type_sitem.find_element_by_xpath(\"./..\")\n # while True:\n # try:\n # sort_by_type_sitem.click()\n # break\n # except ElementNotInteractableException:\n # time.sleep(1) \n if self.current_package_version() == semver.VersionInfo.parse(\"3.8.0\"):\n sort_by_type = '//*[@id=\"collectionsDropdown\"]/ul[3]/li[3]/a/label'\n sort_by_type_sitem = self.locator_finder_by_xpath(sort_by_type)\n else:\n sort_by_type_sitem = self.locator_finder_by_xpath(self.sort_by_type_id)\n\n sort_by_type_sitem.click()\n time.sleep(2)", "def order_query(self, query):\n\n direction = desc if self.direction == 'desc' else asc\n if self.order in inspect(self.model_class).columns.keys():\n attribute = getattr(self.model_class, self.order)\n elif self.order == 'group.name':\n attribute = func.coalesce(UserGroup.name, '')\n elif self.order == 'user.realname':\n attribute = func.coalesce(User.realname, '')\n elif self.order == 'user.username':\n attribute = func.coalesce(User.username, '')\n elif self.order == 'user.name':\n attribute = func.coalesce(User.realname, User.username, '')\n else:\n attribute = self.model_class.first_issue\n\n return query.order_by(None).order_by(direction(attribute))", "def ordering(self):\r\n if hasattr(self, \"queryset\"):\r\n aliases = {}\r\n for bound_column in self.table.columns:\r\n aliases[bound_column.order_by_alias] = bound_column.order_by\r\n try:\r\n return next(segment(self.queryset.query.order_by, aliases))\r\n except StopIteration:\r\n pass", "def post_sort(self, qs):\n return qs", "def order_answers(self, queryset):\n if self.answer_order == 'content':\n return queryset.order_by('content')\n if self.answer_order == 'random':\n return queryset.order_by('?')\n if self.answer_order == 'none':\n return queryset.order_by('None')", "def order_by(cls, *args):\n return cls.query.order_by(*args)", "def orderList(dataSource,**kwargs):\n\treturn sorted(dataSource)", "def test_order_direction(self):\n threads = [make_minimal_cs_thread()]\n self.register_get_user_response(self.user)\n self.register_get_threads_response(threads, page=1, num_pages=1)\n self.client.get(\n self.url,\n {\n \"course_id\": str(self.course.id),\n \"order_direction\": \"desc\",\n }\n )\n self.assert_last_query_params({\n \"user_id\": [str(self.user.id)],\n \"course_id\": [str(self.course.id)],\n \"sort_key\": [\"activity\"],\n \"page\": [\"1\"],\n \"per_page\": [\"10\"],\n })", "def pre_sort(self, qs):\n return qs", "def get_ordering(self):\n self.ordering = \"-fecha_vista\"\n return self.ordering", "def get_queryset(self):\n\n search_str = self.request.GET.get('search', None)\n col_nm = self.request.GET.get('sort_by', \"name\")\n self.temp=col_nm\n sort_order = self.request.GET.get('sort_order', \"ASC\")\n self.sort_ordr=sort_order\n if search_str:\n a = Q(name__icontains = search_str)\n b = Q(description__icontains = search_str)\n objects = self.model.objects.filter(a | b).distinct()\n\n else:\n objects = OrganizationType.objects.filter().extra(\n select = {col_nm:'lower('+col_nm+')'}).order_by(col_nm)\n\n if sort_order == \"DESC\":\n objects = objects.reverse()\n return objects", "def sortResults( self, results, unique=None, **kw ):\n #logger.info('sortResults len results:%s, sort_on: %s, sort_order: %s' % ( len(results), kw.get('sort_on'), kw.get('sort_order') ))\n catalog = self._catalog\n\n if unique:\n results = self.uniqueResults(results)\n\n if not hasattr( catalog, '_getSortIndex' ):\n # Zope 2.5.x\n return results\n\n index = catalog._getSortIndex( kw )\n if index is None:\n return results\n index_name = index.getId()\n\n limit = catalog._get_sort_attr( 'limit', kw )\n order = catalog._get_sort_attr( 'order', kw )\n reverse = order and order.lower() in ('reverse', 'descending') and 1 or 0\n #logger.info('sortResults len results:%s, index:%s' % ( len(results), index_name ) )\n\n if index_name == 'Creator':\n membership = getToolByName( self, 'portal_membership', None )\n if membership is not None:\n results = list(results)\n results.sort( lambda x, y, f=membership.getMemberName: cmp( f(x['Creator']), f(y['Creator']) ) )\n if reverse:\n results.reverse()\n\n return results", "def test_order_by(self):\n manifestb = job_test_utils.create_seed_manifest(name='scale-batch-creator', jobVersion='2.0.0')\n job_type1b = job_test_utils.create_seed_job_type(manifest=manifestb)\n job_test_utils.create_job(job_type=job_type1b, status='RUNNING')\n\n manifestc = job_test_utils.create_seed_manifest(name='scale-batch-creator', jobVersion='3.0.0')\n job_type1c = job_test_utils.create_seed_job_type(manifest=manifestc)\n job_test_utils.create_job(job_type=job_type1c, status='RUNNING')\n\n url = '/%s/jobs/?is_superseded=false&order=job_type__name&order=-job_type__version' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 4)\n\n self.assertEqual(result['results'][0]['job_type']['id'], job_type1c.id)\n self.assertEqual(result['results'][1]['job_type']['id'], job_type1b.id)\n self.assertEqual(result['results'][2]['job_type']['id'], self.job_type1.id)\n self.assertEqual(result['results'][3]['job_type']['id'], self.job_type2.id)", "def get_queryset(self):\n # get original query set\n qs = super(ExamList, self).get_queryset()\n # order by title\n return qs.order_by('title')", "def order_by(self, order):\n new_table = self.select()\n new_table.rows.sort(key=order)\n return new_table", "def sorted(self): \n pass", "def products_view(request):\n from_database = Product.objects.all()\n\n sort_type = request.POST.get('sort-selector')\n print(sort_type)\n if sort_type is None:\n from_database = from_database.order_by('title')\n my_context = {\n 'from_database': from_database,\n 'sort_type': sort_type,\n }\n return render(request, \"products.html\", my_context)\n if sort_type == 'name_asc':\n from_database = from_database.order_by('title')\n my_context = {\n 'from_database': from_database,\n 'sort_type': sort_type,\n }\n return render(request, \"products.html\", my_context)\n if sort_type == 'name_desc':\n from_database = from_database.order_by('-title')\n my_context = {\n 'from_database': from_database,\n 'sort_type': sort_type,\n }\n return render(request, \"products.html\", my_context)\n if sort_type == 'price_asc':\n from_database = from_database.order_by('price')\n my_context = {\n 'from_database': from_database,\n 'sort_type': sort_type,\n }\n return render(request, \"products.html\", my_context)\n if sort_type == 'price_desc':\n from_database = from_database.order_by('-price')\n my_context = {\n 'from_database': from_database,\n 'sort_type': sort_type,\n }\n return render(request, \"products.html\", my_context)", "def dataSort(self, collectionName, catagory, method='ASCENDING'):\n if method == 'ASCENDING':\n results = collectionName.find().sort(catagory, pymongo.ASCENDING)\n elif method == 'DESCENDING':\n results = collectionName.find().sort(catagory, pymongo.DESCENDING)\n return results", "def get_queryset(self):\n rs = super(BaseQuerysetMixin, self).get_queryset()\n if self.request.GET.get(\"ordering\") is None:\n rs = rs.order_by(\"id\")\n return rs", "def sort(self):\n\n self.models.sort(key=methodcaller('get_age'))", "def data_for_sorting() -> NoReturn:\n raise NotImplementedError", "def data_for_sorting() -> NoReturn:\n raise NotImplementedError", "def test_query_sort_default_sort_order(self):\n doc_count = 10\n field_to_be_sorted_by = \"data\"\n prefix = get_rand_string()\n\n data = [prefix + \"-\" + str(x) for x in range(10)]\n\n # Same user_id for all documents\n user_id = get_rand_string()\n\n for datum in data:\n self.conn.add(id=get_rand_string(), user_id=user_id, data=datum)\n self.conn.commit()\n\n results = self.conn.query(q=\"user_id:\" + user_id, sort=\"data\").results\n\n self.assertEquals(len(results), doc_count,\n \"There should be %d documents returned, got:%d, results:%s\" % (\n doc_count, len(results), results))\n\n query_data = [doc[\"data\"] for doc in results]\n\n for idx, datum in enumerate(sorted(data)):\n self.assertEquals(datum, query_data[idx],\n \"Expected %s instead of %s on position %s in query_data:%s\" % (\n datum, query_data[idx], idx, query_data))", "def test_query_sort_nondefault_sort_order(self):\n doc_count = 10\n field_to_be_sorted_by = \"data\"\n prefix = get_rand_string()\n\n data = [prefix + \"-\" + str(x) for x in range(10)]\n\n # Same user_id for all documents\n user_id = get_rand_string()\n\n for datum in data:\n self.conn.add(id=get_rand_string(), user_id=user_id, data=datum)\n self.conn.commit()\n\n results = self.conn.query(q=\"user_id:\" + user_id, sort=\"data\",\n sort_order=\"desc\").results\n\n self.assertEquals(len(results), doc_count,\n \"There should be %d documents returned, got:%d, results:%s\" % (\n doc_count, len(results), results))\n\n query_data = [doc[\"data\"] for doc in results]\n\n for idx, datum in enumerate(reversed(sorted(data))):\n self.assertEquals(datum, query_data[idx],\n \"Expected %s instead of %s on position %s in query_data:%s\" % (\n datum, query_data[idx], idx, query_data))", "def sortChoices(self):\n self.formatList.sort()", "def sort(self, column, order=Qt.AscendingOrder):\n if(column == Columns.Date):\n self.sorting = Sorting.Date\n elif(column == Columns.Code):\n self.sorting = Sorting.Code\n elif(column == Columns.User):\n self.sorting = Sorting.User\n elif(column == Columns.Tags):\n self.sorting = Sorting.Priviledges\n elif(column == Columns.TimesRequested):\n self.sorting = Sorting.TimesRequested\n\n if(order == Qt.DescendingOrder):\n self.sorting |= Sorting.Reversed\n\n self._reset_view()", "def order_log_results(self, log_search_order):\n raise errors.Unimplemented()", "def sort_position_data(pos,type='A'):\n ranking_type = __position_ranking[type]\n return sorted(pos,key=lambda player: ranking_type[player[2]])", "def asc(self):\n self.get_output = sorted((value, key) for (key, value) in self.get_output.items())", "def order_log_entry_results(self, log_entry_search_order):\n raise errors.Unimplemented()", "def _sort_results(self, results: dict) -> List:\n return [results[url][\"display_name\"] for url in self.urls_list]", "def sort(self, name, start=None, num=None, by=None, get=None,\r\n desc=False, alpha=False, store=None):\r\n if (start is not None and num is None) or \\\r\n (num is not None and start is None):\r\n raise RedisError(\"``start`` and ``num`` must both be specified\")\r\n \r\n pieces = [name]\r\n if by is not None:\r\n pieces.append('BY %s' % by)\r\n if start is not None and num is not None:\r\n pieces.append('LIMIT %s %s' % (start, num))\r\n if get is not None:\r\n pieces.append('GET %s' % get)\r\n if desc:\r\n pieces.append('DESC')\r\n if alpha:\r\n pieces.append('ALPHA')\r\n if store is not None:\r\n pieces.append('STORE %s' % store)\r\n return self.format_inline('SORT', *pieces)", "def filter_and_order(cls, *args, **kwargs):\n return cls.query.filter_by(**kwargs).order_by(*args)", "def orderPodListbox(orderMethod):\n\tvalidOrderMethods=[\"NAME\",\"TYPE\"]\n\torderMethod=str(orderMethod).upper()\n\tnewList=[]\n\t#Add the current data dictionary into list for sorting\n\tfor item in podListbox.data.keys():\n\t\tnewList.append(item)\n\n\t#Check the order method is valid\n\tif orderMethod in validOrderMethods:\n\t\tif orderMethod == \"NAME\":\n\t\t\tnewList=orderList(newList)\n\n\t\telif orderMethod == \"TYPE\":\n\t\t\tprint(\"Can not sort by type yet\")\n\n\t\t#Add the original data\n\t\tpodListbox.delete(0,END)\n\t\tfor item in newList:\n\t\t\tpodListbox.addExisting(item)", "def sort(self, order):\r\n params = base.get_params(None, locals())\r\n url = '{0}/sort'.format(self.get_url())\r\n\r\n request = http.Request('PUT', url, params)\r\n\r\n return request, parsers.parse_json", "def sort(request):\n stars = Product.objects.annotate(\n avg_review=Avg('productreview__rating'),\n )\n select = request.GET['sort']\n if select == 'LtoH':\n results = Product.objects.order_by('price')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})\n elif select == 'HtoL':\n results = Product.objects.order_by('-price')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})\n elif select == 'AtoZ':\n results = Product.objects.order_by('name')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})\n elif select == 'ZtoA':\n results = Product.objects.order_by('-name')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})", "def test_orderby(self):\n\n # TODO: make a unit test out of these various combinations\n #m = mapper(User, users, order_by=desc(users.c.user_name))\n mapper(User, users, order_by=None)\n #mapper(User, users)\n\n #l = create_session().query(User).select(order_by=[desc(users.c.user_name), asc(users.c.user_id)])\n l = create_session().query(User).all()\n #l = create_session().query(User).select(order_by=[])\n #l = create_session().query(User).select(order_by=None)", "def get_sorted_results(self):\n results = self.results.values()\n return sorted(results, key=lambda r: r.rank(), reverse=True)", "def _get_order_bys(self, record_class, sorts, convert_key_names_func):\n result = list()\n for sort in sorts:\n attr_name = convert_key_names_func(sort.attr)\n if attr_name is not None and hasattr(record_class, attr_name):\n if sort.direction == \"ASC\":\n result.append(getattr(record_class, attr_name).asc())\n else:\n result.append(getattr(record_class, attr_name).desc())\n else:\n raise AttributeError(\"Invalid attribute.\")\n return result", "def sort(self, name, by=None, get=None, start=None, num=None, desc=False, alpha=False):\n stmt = ['SORT', name]\n if by:\n stmt.append(\"BY %s\" % by)\n if start and num:\n stmt.append(\"LIMIT %s %s\" % (start, num))\n if get is None:\n pass\n elif isinstance(get, basestring):\n stmt.append(\"GET %s\" % get)\n elif isinstance(get, list) or isinstance(get, tuple):\n for g in get:\n stmt.append(\"GET %s\" % g)\n else:\n raise RedisError(\"Invalid parameter 'get' for Redis sort\")\n if desc:\n stmt.append(\"DESC\")\n if alpha:\n stmt.append(\"ALPHA\")\n self.connect()\n self._write(' '.join(stmt + [\"\\r\\n\"]))\n return self._get_multi_response()", "def ordering(self, qs):\n request = self.request\n # Number of columns that are used in sorting\n try:\n i_sorting_cols = int(request.REQUEST.get('iSortingCols', 0))\n except ValueError:\n i_sorting_cols = 0\n\n order = []\n order_columns = self.get_order_columns()\n for i in range(i_sorting_cols):\n # sorting column\n try:\n i_sort_col = int(request.REQUEST.get('iSortCol_%s' % i))\n except ValueError:\n i_sort_col = 0\n # sorting order\n s_sort_dir = request.REQUEST.get('sSortDir_%s' % i)\n\n sdir = '-' if s_sort_dir == 'desc' else ''\n\n sortcol = order_columns[i_sort_col]\n if isinstance(sortcol, list):\n for sc in sortcol:\n order.append('%s%s' % (sdir, sc))\n else:\n order.append('%s%s' % (sdir, sortcol))\n if order:\n return qs.order_by(*order)\n return qs", "def get_queryset(self):\n search_str = self.request.GET.get('search')\n col_nm = self.request.GET.get('sort_by', \"name\")\n self.temp=col_nm\n sort_order = self.request.GET.get('sort_order', \"ASC\")\n self.sort_ordr=sort_order\n\n if search_str:\n search_str = self.request.GET.get('search', None)\n a = Q(name__icontains=search_str)\n b = Q(administrator__first_name__icontains = search_str)\n c = Q(administrator__last_name__icontains = search_str)\n d = Q(administrator__username__icontains = search_str)\n e = Q(types__name__icontains = search_str)\n f = Q(description__icontains = search_str)\n objects = Organization.objects.filter(a | b | c | d | e | f).distinct()\n\n else: # SORTING BY COL_NM\n if col_nm in ['name', 'description'] :\n objects = Organization.objects.filter().extra(\n select = {col_nm:'lower('+col_nm+')'}).order_by(col_nm)\n\n if sort_order == \"DESC\":\n objects = objects.reverse()\n elif col_nm =='administrator__first_name':\n objects=Organization.objects.filter().order_by(col_nm)\n if sort_order == \"DESC\":\n objects = objects.reverse()\n else:\n objects=Organization.objects.extra(select=\n {'name':'lower(name)'}).order_by('name')\n\n\n return objects", "def order_by(self, *fields):\n self._evaluated = False\n if self._order is None:\n self._order = []\n\n for field in fields:\n direction = \"asc\"\n if field.startswith('-'):\n direction = \"desc\"\n field = field[1:]\n\n self._order.append({ field : direction })\n\n return self", "def get_search_query_ordering(\n self, database_engine_type: BaseDatabaseEngine,\n ) -> Tuple[str, Tuple]:\n if database_engine_type == PostgresEngine:\n # We order by rank and then if a user has profile info.\n # This ranking algorithm is hand tweaked for \"best\" results. Broadly\n # the idea is that a higher weight is given to exact matches.\n # The array of numbers are the weights for the various part of the\n # search: (domain, _, display name, localpart)\n sql = \"\"\"\n (CASE WHEN d.user_id IS NOT NULL THEN 4.0 ELSE 1.0 END)\n \"\"\"\n\n args = ()\n if self.weighted_display_name_like is not None:\n sql += \"\"\"\\\n * (CASE WHEN display_name LIKE ? THEN 2.0 ELSE 1.0 END)\\\n \"\"\"\n args += (\"%\" + self.weighted_display_name_like + \"%\",)\n\n sql += \"\"\"\n * (CASE WHEN avatar_url IS NOT NULL THEN 1.2 ELSE 1.0 END)\n * (\n 3 * ts_rank_cd(\n '{0.1, 0.1, 0.9, 1.0}',\n vector,\n to_tsquery('simple', ?),\n 8\n )\n + ts_rank_cd(\n '{0.1, 0.1, 0.9, 1.0}',\n vector,\n to_tsquery('simple', ?),\n 8\n )\n )\n DESC,\n display_name IS NULL,\n avatar_url IS NULL\n \"\"\"\n return sql, args\n elif database_engine_type == Sqlite3Engine:\n # We order by rank and then if a user has profile info.\n return (\n \"\"\"\n rank(matchinfo(user_directory_search)) DESC,\n display_name IS NULL,\n avatar_url IS NULL\n \"\"\",\n (),\n )\n else:\n raise Exception(\"Received an unrecognized database engine\")", "def sort(self,desc):\n\tself.__sort(\"\",\"\",desc)", "def add_action_type_to_order(action_type: ActionType) -> None:\n if action_type.order_index is not None:\n # action type already has a place in the current sort order\n return\n action_types_before: typing.List[ActionType] = []\n action_types_after: typing.List[ActionType] = []\n action_types = [\n ActionType.from_database(other_action_type)\n for other_action_type in models.ActionType.query.filter(models.ActionType.order_index != db.null()).order_by(models.ActionType.order_index).all()\n ]\n if action_types:\n # check if local action types are listed before imported action types\n local_action_types = [\n other_action_type\n for other_action_type in action_types\n if (other_action_type.fed_id is None)\n ]\n imported_action_types = [\n other_action_type\n for other_action_type in action_types\n if (other_action_type.fed_id is not None)\n ]\n local_before_imported = max(\n typing.cast(int, other_action_type.order_index)\n for other_action_type in local_action_types\n ) < min(\n typing.cast(int, other_action_type.order_index)\n for other_action_type in imported_action_types\n )\n if local_before_imported:\n if action_type.fed_id is None:\n action_types = local_action_types\n action_types_after = imported_action_types + action_types_after\n else:\n action_types_before = action_types_before + local_action_types\n action_types = imported_action_types\n if action_types:\n # check if action types are listed in order of their english names\n english_names = [\n other_action_type.name.get('en', '').lower()\n for other_action_type in action_types\n ]\n english_lexicographical_order = english_names == sorted(english_names)\n if english_lexicographical_order:\n english_name = action_type.name.get('en', '').lower()\n action_types_before = action_types_before + [\n other_action_type\n for other_action_type in action_types\n if other_action_type.name.get('en', '').lower() < english_name\n ]\n action_types_after = [\n other_action_type\n for other_action_type in action_types\n if other_action_type.name.get('en', '').lower() > english_name\n ] + action_types_after\n action_types = [\n other_action_type\n for other_action_type in action_types\n if other_action_type.name.get('en', '').lower() == english_name\n ]\n # update order indices\n index_list = [\n other_action_type.id\n for other_action_type in (\n action_types_before + action_types + [action_type] + action_types_after\n )\n ]\n set_action_types_order(index_list)", "def _create_order_by_list(cls, search: Search):\n\n cls.__class_validation()\n order_by_list = []\n for searchBy in search.OrderBy.split(\",\"):\n order_by = cls._get_column_from_name(searchBy)\n if search.OrderDesc and order_by is not None:\n order_by = order_by.desc()\n order_by_list.append(order_by)\n\n return order_by_list", "def get_sort_field(self, kind, order, is_number):\n pass", "def test_order_by(self, http_query, cc_query):\n threads = [make_minimal_cs_thread()]\n self.register_get_user_response(self.user)\n self.register_get_threads_response(threads, page=1, num_pages=1)\n self.client.get(\n self.url,\n {\n \"course_id\": str(self.course.id),\n \"order_by\": http_query,\n }\n )\n self.assert_last_query_params({\n \"user_id\": [str(self.user.id)],\n \"course_id\": [str(self.course.id)],\n \"page\": [\"1\"],\n \"per_page\": [\"10\"],\n \"sort_key\": [cc_query],\n })", "def get_queryset(self):\n qs = super(SortForm, self).get_queryset()\n\n qs = self.pre_sort(qs)\n\n # Ensure that the form is valid\n if not self.is_valid():\n return qs\n\n # Do Sorting\n sorts = self.cleaned_data.get('sort', [])\n order_by = []\n for sort in sorts:\n param = self.HEADERS[abs(sort) - 1]['column']\n if sort < 0:\n param = '-' + param\n order_by.append(param)\n\n if order_by:\n qs = qs.order_by(*order_by)\n\n qs = self.post_sort(qs)\n\n return qs", "def _sort_torrents(ctx, torrent_list, sort_type):\n\n if sort_type == 'seeders':\n return sorted(torrent_list, key=lambda t: t['seeders'], reverse=True)", "def order_by(self, field_paths, order=None):\n raise NotImplementedError(\"This should have been implemented.\")", "def order_data(self, data, order):\n return data", "def sort_by_default(self):\n self.data.sort()", "def Order(self) -> int:", "def orderby(cls, field, desc=False):\n cls.runtime.set_orderby((field, desc))\n return cls", "def order_by(self, columns, reverse=False):\n\n return self._get(\"order\", (columns, reverse), Table)", "def order ( self ) :\n return self.__order", "def _reorder_collected(self, data):\n priority = {\n 'post': 1,\n 'get': 2,\n 'put': 2,\n 'patch': 2,\n 'head': 2,\n 'options': 2,\n 'delete': 3,\n }\n data = sorted(\n data,\n key=lambda x: priority.get(getattr(x, 'name', ''), 4))\n return data", "def sorter(Plugin):\n return Plugin.order", "def test_results_are_sorted(self, data_flow_api_client):\n response = data_flow_api_client.get(self.view_url)\n assert response.status_code == status.HTTP_200_OK\n\n results = response.json()['results']\n\n assert results == sorted(results, key=lambda t: t['id'])", "def get_queryset(self):\n queryset = super().get_queryset()\n today = datetime.datetime.today()\n return queryset.annotate(\n relevance=models.Case(\n models.When(date__gte=today, then=1),\n models.When(date__lt=today, then=2),\n output_field=models.IntegerField(),\n )).order_by('relevance', 'date')", "def order_agent_results(self, agent_search_order):\n raise errors.Unimplemented()", "def reversesort(self):\n ...", "def order_by(self, *fields):\n self.query = self.query.sort(self._parse_order_spec(fields))\n return self", "def set_trec_order(self):\n for k in self._run:\n self._run[k].sort(key=lambda x:(x.get_score(),x.get_doc()),reverse=True)\n for r in self._run[k]:\n print r.get_str()", "def sort(self):\n sorted_entries = [] # type: list[MSBModel]\n for entry_subtype in MSBModelSubtype:\n sorted_entries += list(sorted(self.get_entries(entry_subtype), key=lambda m: m.name))\n self._entries = sorted_entries", "def sort(self, desc):\n self.__sortByIndex(0, desc)", "def testSortAscending(self):\n self.request.GET['sort'] = \"name,objid\"\n self.datagrid.load_state()\n\n self.assertEqual(self.datagrid.sort_list, [\"name\", \"objid\"])\n self.assertEqual(len(self.datagrid.rows), self.datagrid.paginate_by)\n self.assertEqual(self.datagrid.rows[0]['object'].name, \"Group 01\")\n self.assertEqual(self.datagrid.rows[1]['object'].name, \"Group 02\")\n self.assertEqual(self.datagrid.rows[2]['object'].name, \"Group 03\")\n\n # Exercise the code paths when rendering\n self.datagrid.render_listview()", "def order(self, order=0):\n # type: (int) -> Entity\n self.type_def['order'] = order\n\n return self", "def order(self):\n return self.__order", "def _sort_by_type(self, x, y):\n\n x_key, x_value = x\n y_key, y_value = y\n\n # if both values are map or list, sort by their keys\n if ((isinstance(x_value, schema.Map) or\n isinstance(x_value, schema.List)) and\n (isinstance(y_value, schema.Map) or\n isinstance(y_value, schema.List))):\n return (x_key > y_key) - (x_key < y_key)\n\n # show simple types before maps or list\n if (isinstance(x_value, schema.Map) or\n isinstance(x_value, schema.List)):\n return 1\n\n if (isinstance(y_value, schema.Map) or\n isinstance(y_value, schema.List)):\n return -1\n\n return (x_key > y_key) - (x_key < y_key)", "def get_sorted(self, collection, xmlFormat):\n\t\treturn {\n\t\t\t\"verb\": \"Search\",\n\t\t\t\"xmlFormat\": xmlFormat,\n\t\t\t'sortDescending' : '/text//itemRecord/metaMetadata/dateInfo/@lastModified',\n\t\t\t\"ky\": collection\n\t\t\t}", "def order(self, data):\n order = self.request.GET.get('order', None)\n if order:\n return self.order_data(data, order)\n\n return data", "def _get_search_order(self, post):\n return 'website_published desc, %s' % \\\n self.order_by.get(post.get('order', ''), { 'query': 'website_sequence desc' })['query']", "def sort_key(self):\n ...", "def get_type_order(group):\n cnx, cursor = connect_db()\n query = \"\"\"select a.name, a.`order` from types a, types b\n where a.parent=b.guid and\n b.name='{0}'\"\"\".format(group)\n cursor.execute(query)\n result = cursor.fetchall()\n result = pd.DataFrame(result, columns=['type', 'order'])\n cnx.close()\n return result", "def order_by(self, *fields):\n doc = []\n for field in fields:\n if field.startswith('-'):\n doc.append((field.strip('-'), pymongo.DESCENDING))\n else:\n doc.append((field, pymongo.ASCENDING))\n return self.sort(doc)", "def sort_column(self, column):\n if column == 1: # type\n self.sorted_keys = sorted(self.data_dict.keys(),\n key=lambda x: (self.data_dict[x]['type']),\n reverse=self.sorted_type_top)\n # Invert sorting method\n self.sorted_type_top = not self.sorted_type_top\n\n elif column == 2: # Score\n self.sorted_keys = sorted(self.data_dict.keys(),\n key=lambda x: (float(self.data_dict[x]['score'])),\n reverse=self.sorted_score_top)\n # Invert sorting method\n self.sorted_score_top = not self.sorted_score_top\n\n elif column == 4: # Duration\n d = dict()\n for k in self.sorted_keys:\n duration_string = self.data_dict[k]['duration']\n\n # Get amount of episodes\n if 'episode' in duration_string:\n if 'Some' in duration_string:\n episodes = 0\n else:\n episodes = int(duration_string.split(' episodes')[0])\n else:\n episodes = 1\n\n # Get the duration in minutes\n minutes = 0\n if 'min' in duration_string:\n minutes = int(re.findall('([0-9]+)min', duration_string)[0])\n if 'h' in duration_string:\n minutes += int(re.findall('([0-9]+)h', duration_string)[0]) * 60\n\n # Get total duration of the whole show\n minutes *= episodes\n\n # Store it for sorting\n d[k] = minutes\n\n # Sort titles based on duration\n self.sorted_keys = sorted(d.keys(),\n key=lambda x: d[x],\n reverse=self.sorted_duration_top)\n # Invert sorting method\n self.sorted_duration_top = not self.sorted_duration_top\n\n elif column == 5: # release year\n self.sorted_keys = sorted(self.data_dict.keys(),\n key=lambda x: (float(self.data_dict[x]['released'])),\n reverse=self.sorted_year_top)\n # Invert sorting method\n self.sorted_year_top = not self.sorted_year_top\n\n if column != 2:\n # Make sure next time we click to sort by score,\n # the highest score is on top\n self.sorted_score_top = True\n\n # Redraw the table\n self.setup_tableview()", "def bob_sort_url(query, field, sort_variable_name, type):\n query = query.copy()\n if type == 'desc':\n query[sort_variable_name] = '-' + field\n elif type == 'asc':\n query[sort_variable_name] = field\n return query.urlencode()", "def get_queryset(self):\n search_str = self.request.GET.get('search', None)\n col_nm = self.request.GET.get('sort_by', 'title')\n self.temp=col_nm\n sort_order = self.request.GET.get('sort_order', 'ASC')\n self.sort_ordr=sort_order\n if search_str:\n search_str = self.request.GET.get('search', None)\n a = Q(title__icontains = search_str)\n b = Q(description__icontains = search_str)\n objects = Designation.objects.filter(a | b).distinct()\n else: # SORTING BY COL_NM\n objects = Designation.objects.filter().extra(\n select = {col_nm:'lower('+col_nm+')'}).order_by(col_nm)\n\n if sort_order == \"DESC\":\n objects = objects.reverse()\n return objects", "def order_by_alias(self):\r\n order_by = OrderBy((self.table.order_by or {}).get(self.name, self.name))\r\n order_by.next = order_by.opposite if self.is_ordered else order_by\r\n return order_by", "def sort_object_info(results, sortkey):\n\n if sortkey == \"unsorted\":\n return results\n elif sortkey == \"name\":\n return sorted(results, key=lambda r: r[\"name\"])\n elif sortkey == \"ext\":\n def _get_ext(n):\n # Get extension for sorting\n if n[\"type\"] == \"dataobject\":\n return n[\"name\"].split(\".\")[-1]\n else:\n # Use name for sorting collections\n return n[\"name\"]\n\n return sorted(results, key=_get_ext)\n elif sortkey == \"size\":\n return sorted(results, key=lambda k: k.get(\"size\", 0))\n elif sortkey == \"date\":\n return sorted(results, key=lambda k: k.get(\"modify_time\", 0))\n else:\n exit_with_error(\"Sort option {} not supported.\".format(sortkey))", "def object_list(self):\n\n def _sort(ob, ol):\n reverse = ob.startswith(\"-\")\n ob = ob[1:] if reverse else ob\n for column in self.columns:\n if column.sort_key_fn is not None and column.name == ob:\n return sorted(ol, key=column.sort_key_fn, reverse=reverse)\n if self._meta.order_by and hasattr(ol, \"order_by\"):\n return ol.order_by(*self._meta.order_by.split(\"|\"))\n return ol\n\n ol = self._object_list\n ob = self._meta.order_by\n if not ob: return ol\n if isinstance(ob, basestring):\n return _sort(ob, ol)\n elif isinstance(ob, list):\n ob.reverse()\n for fn in ob:\n ol = _sort(fn, ol)\n return ol", "def order_queryset_by_sort_order(get, qs):\n\n def get_string_from_tuple_list(lstTuples, number):\n \"\"\"Get the string value corresponding to a number in a list of number-string tuples\"\"\"\n sBack = [tup[1] for tup in lstTuples if tup[0] == number]\n return sBack\n\n # Helper: order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]\n def order_queryset_by_tuple_list(qs, sOrder, sListName):\n \"\"\"Order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]\"\"\"\n\n # Get a list of tuples for this sort-order\n tpList = build_choice_list(sListName)\n # Determine sort order: ascending is default\n bReversed = False\n if (sOrder[0:1] == '-'):\n # A starting '-' sign means: descending order\n sOrder = sOrder[1:]\n bReversed = True\n\n # Order the list of tuples alphabetically\n # (NOTE: they are alphabetical from 'build_choice_list()', except for the values 0,1)\n tpList = sorted(tpList, key=operator.itemgetter(1))\n # Order by the string-values in the tuple list\n return sorted(qs, key=lambda x: get_string_from_tuple_list(tpList, getattr(x, sOrder)), reverse=bReversed)\n\n # Set the default sort order\n sOrder = 'woord' # Default sort order if nothing is specified\n # See if the form contains any sort-order information\n if ('sortOrder' in get and get['sortOrder'] != ''):\n # Take the user-indicated sort order\n sOrder = get['sortOrder']\n\n # The ordering method depends on the kind of field:\n # (1) text fields are ordered straightforwardly\n # (2) fields made from a choice_list need special treatment\n if (sOrder.endswith('handedness')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Handedness\")\n elif (sOrder.endswith('domhndsh') or sOrder.endswith('subhndsh')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Handshape\")\n elif (sOrder.endswith('locprim')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Location\")\n else:\n # Use straightforward ordering on field [sOrder]\n ordered = qs.order_by(sOrder)\n\n # return the ordered list\n return ordered", "def sort_contacts(self, method, order):\n \n method_l = method.lower()\n order_l = order.lower()\n \n if method_l == 'name' and order_l == 'asc':\n name_sort = sorted(self.contacts, key=lambda x: x[0])\n for x in name_sort:\n print(x)\n return name_sort\n elif method_l == 'name' and order_l == 'desc':\n name_sort = sorted(self.contacts, key=lambda x: x[0], reverse=True)\n for x in name_sort:\n print(x)\n return name_sort \n \n elif method_l == 'zipcode' and order_l == 'asc':\n zip_sort = sorted(self.contacts, key=lambda y: y[3])\n for x in zip_sort:\n print(x)\n return zip_sort\n elif method_l == 'zipcode' and order_l == 'desc':\n zip_sort = sorted(self.contacts, key=lambda y: y[3],reverse=True)\n for x in zip_sort:\n print(x)\n return zip_sort", "def calculate_items_on_page(self):\n if self.after_id or self.before_id:\n # Execute if 'after' or 'before' query param is present in URL\n model_id = self.after_id if self.after_id else self.before_id\n model = self.iterable(id=model_id).first()\n if self.sort_direction == 'ascending':\n query_operator = f'{self.sort_by}__gt' if self.after_id else f'{self.sort_by}__lt'\n items_kwargs = {\n query_operator: getattr(model, f'{self.sort_by}')\n }\n items = self.iterable(**items_kwargs).order_by(f'{self.sort_by}')\n if self.after_id:\n items = items.limit(self.page_limit)\n else:\n # If 'before' query param present get last 'self.page_limit' (count) from items.\n if len(items) > self.page_limit:\n items = items.skip(len(items) - self.page_limit)\n else:\n query_operator = f'{self.sort_by}__lt' if self.after_id else f'{self.sort_by}__gt'\n items_kwargs = {\n query_operator: getattr(model, f'{self.sort_by}')\n }\n items = self.iterable(**items_kwargs).order_by(f'-{self.sort_by}').limit(self.page_limit)\n else:\n # Execute if neither 'before' nor 'after' query params are present in URL\n sort_order = self.sort_by if self.sort_direction == 'ascending' else f'-{self.sort_by}'\n items = self.iterable.order_by(sort_order).limit(self.page_limit)\n return items", "def order_by(self):\r\n if self.column.order_by is not None:\r\n order_by = self.column.order_by\r\n else:\r\n # default to using column accessor as data source sort key\r\n order_by = OrderByTuple((self.accessor, ))\r\n return order_by.opposite if self.order_by_alias.is_descending else order_by", "def addPrintOrder(self, type):\n self.printOrder.append(type)", "def _sort_records(self):\n self.records.sort(reverse=True, key=lambda record: record.timestamp)", "def order_by(self, name, *selectors) :\n\n temp = []\n for row in self :\n r = row.as_dict()\n keys = tuple([s(r) for s in selectors])\n temp.append((keys, r))\n\n temp.sort(key=lambda x : x[0])\n\n ct = [(s.get_name(), s.get_type()) for s in selectors]\n new_table = self.factory.new_table(name, ct)\n new_table.add_rows([x[1] for x in temp])\n return new_table", "def get_queryset(self):\n return Question.objects.all().order_by(\"-allVote\") #แสดงคำถาม" ]
[ "0.7437501", "0.6612537", "0.6586749", "0.65457374", "0.65006554", "0.6475219", "0.6381626", "0.6325047", "0.6201697", "0.6086321", "0.60466444", "0.59852207", "0.59614545", "0.59498024", "0.59177685", "0.5870425", "0.58585554", "0.58449817", "0.5832564", "0.57793987", "0.5759833", "0.57454866", "0.57432544", "0.5723462", "0.5707863", "0.56963986", "0.5691974", "0.5689471", "0.5675043", "0.5675043", "0.5674279", "0.56734", "0.56732", "0.56706023", "0.5664239", "0.5653549", "0.5650129", "0.5643349", "0.56138", "0.5598378", "0.55947244", "0.5586863", "0.55813277", "0.55750144", "0.55639976", "0.55611795", "0.55538535", "0.55525374", "0.5543824", "0.5543096", "0.55364233", "0.5530575", "0.5525308", "0.5521295", "0.5520383", "0.55136895", "0.5508936", "0.55022657", "0.54958", "0.5494845", "0.54941875", "0.5493115", "0.54856503", "0.5466849", "0.54466045", "0.5443674", "0.5443483", "0.5433151", "0.54324526", "0.54279035", "0.5426479", "0.542269", "0.5410464", "0.5406934", "0.5404428", "0.5397934", "0.5386059", "0.5378538", "0.53710866", "0.53708524", "0.5370834", "0.5370163", "0.5369015", "0.5367357", "0.5366536", "0.536327", "0.5361178", "0.53597254", "0.5359528", "0.53486276", "0.53465796", "0.53461826", "0.5340483", "0.5338615", "0.5334964", "0.5329852", "0.5318905", "0.53177935", "0.53143775", "0.53092647" ]
0.6021243
11
Transform an object retrieved from the database
def transform_outgoing(self, son): if 'type' in son: klass = common.classify(son['type']) return klass.demongofy(son) else: try: return collection_to_class[self.domain].demongofy(son) except KeyError: return son
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transform(self):", "def transform():", "def transform_one(self, obj: Any):\n return obj", "def transform(self, data):", "def Transform(self, record):\n pass", "def _from_db_object(boar, db_boar):\n foreign_key = ['category', 'dormitory', 'source']\n for field in boar.fields:\n if field in ['birthday', 'entryday']:\n boar[field] = db_boar[field].strftime(\"%Y-%m-%d\")\n elif field not in foreign_key:\n boar[field] = db_boar[field]\n elif field == 'category' and db_boar.category:\n boar[field] = db_boar.category.name\n elif field == 'dormitory' and db_boar.dormitory:\n boar[field] = db_boar.dormitory.name\n elif field == 'source' and db_boar.source:\n boar[field] = db_boar.source.name\n boar.obj_reset_changes()\n return boar", "def transform_bson(self, value):\n return value", "def instance_to_model(self):\n pass", "def transform(self, item):\n return self.transformer(item)", "def convert(self):\n return", "def transform():\n pass", "def to_entity(cls, model_obj: \"SqlalchemyModel\"):\n item_dict = {}\n for field_name in attributes(cls.meta_.entity_cls):\n item_dict[field_name] = getattr(model_obj, field_name, None)\n return cls.meta_.entity_cls(item_dict)", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def _transform(obj):\n\n if isinstance(obj, date) or isinstance(obj, time) or isinstance(obj, datetime):\n return str(obj)\n if isinstance(obj, decimal):\n return str(float(obj))\n if obj == None: \n return 'null'\n return str(obj)", "def __convert( source ):\n # Just in case things get this far but we don't know about the record\n if source['recordType'] not in definitions.RECORDS:\n return {\n 'rec_type': source['recordType']\n }\n\n # Create a flat wrapper\n record = estreamer.common.Flatdict( source )\n\n # Transform\n output = __selectWithNewKeys( record )\n\n return output", "def to_dto(cls, obj):\n new_dto = cls()\n\n # Grab DTO from db if exists\n if cls == EpisodeDTO:\n existing = EpisodeDAO().find(number=obj.number).first()\n if existing:\n new_dto = existing\n\n for key, value in obj.__dict__.iteritems():\n if key == 'transcripts':\n setattr(new_dto, key, [DTOConverter.to_dto(TranscriptDTO, t) for t in value])\n elif key == 'acts' and cls == TranscriptDTO:\n setattr(new_dto, key, [DTOConverter.to_dto(ActDTO, a) for a in value])\n elif key == 'subtitles':\n setattr(new_dto, key, [DTOConverter.to_dto(SubtitleDTO, s) for s in value])\n else:\n setattr(new_dto, key, value)\n\n return new_dto", "def db_to_class(cls, record):\n raise NotImplementedError()", "def _transform(self, document):\n pass", "def _to_document(self, document):\n obj = self.document()\n obj._set_from_db(document)\n return obj", "def map_data(self, obj: object):\n pass", "def convert_to_model(self, *args):", "def serialize(self, obj):\n return obj", "def novabase_simplify(self, obj, skip_complex_processing=False):\n\n if not self.already_processed(obj):\n\n obj.update_foreign_keys()\n key = self.get_cache_key(obj)\n\n if self.simple_cache.has_key(key):\n simplified_object = self.simple_cache[key]\n else:\n novabase_classname = str(obj.__class__.__name__)\n if novabase_classname == \"LazyReference\":\n novabase_classname = obj.resolve_model_name()\n if isinstance(obj, dict) and \"novabase_classname\" in obj:\n novabase_classname = obj[\"novabase_classname\"]\n tmp = {\n \"simplify_strategy\": \"novabase\",\n \"tablename\": obj.__tablename__,\n \"novabase_classname\": novabase_classname,\n \"id\": obj.id,\n \"pid\": extract_adress(obj)\n }\n if hasattr(tmp, \"user_id\"):\n tmp = merge_dicts(obj, {\"user_id\": obj.user_id})\n if hasattr(tmp, \"project_id\"):\n tmp = merge_dicts(tmp, {\"project_id\": obj.project_id})\n if not key in self.simple_cache:\n self.simple_cache[key] = tmp\n self.target_cache[key] = obj\n\n simplified_object = tmp\n\n if skip_complex_processing:\n return simplified_object\n\n key = self.get_cache_key(obj)\n if not key in self.simple_cache:\n self.simple_cache[key] = simplified_object\n self.target_cache[key] = obj\n\n complex_object = self.extract_complex_object(obj)\n\n metadata_class_name = novabase_classname\n complex_object[\"metadata_novabase_classname\"] = metadata_class_name\n complex_object[\"pid\"] = extract_adress(obj)\n complex_object[\"rid\"] = str(self.request_uuid)\n\n if not key in self.complex_cache:\n self.complex_cache[key] = complex_object\n else:\n key = self.get_cache_key(obj)\n simplified_object = self.simple_cache[key]\n return simplified_object", "def transform(self, X):\n return self.transformer.transform(X)", "def transform(self, X, copy=...):\n ...", "def transform(self, X, copy=...):\n ...", "def transform(self, X, copy=...):\n ...", "def transform(self, df):\n\t\tdf = self.__parse_json(df)\n\t\tdf = self.__fillnan(df)\n\t\tdf = self.__parse_dates(df)\n\t\tdf['budget'] = df['budget'].apply(lambda x: self.missing_budget_imputing if int(x) == 0 else x)\n\t\tdf['has_collections'] = df['belongs_to_collection'].isna().astype(int)\n\t\tdf['homepage'] = df['homepage'].isna().astype(int)\n\t\tdf['is_en'] = df['original_language'].apply(lambda x: 1 if x == 'en' else 0)\n\t\tdf = self.__encode_genre_transform(df)\n\t\tdf = self.__top_countries_and_companies_transform(df)\n\t\tdf = self.__bin_columns_transform(df)\n\t\tdf.drop(\n\t\t\t['release_date', 'original_language', 'production_countries', 'production_companies', 'id', 'backdrop_path',\n\t\t\t 'imdb_id', 'poster_path', 'video', 'belongs_to_collection', 'status', 'runtime',\n\t\t\t 'original_title', 'overview', 'tagline', 'title'], axis=1, inplace=True)\n\t\treturn df", "def class_to_db(self):", "def to_obj(self, entity):\n return {k: conv(getattr(entity, k, None)) for k, conv in self.cols_to_obj.items()}", "def to_legacy(self) -> object:\n pass", "def change_to_object(column, data):\n data[column] = data[column].astype('object')", "def _convert_db_to_type(self, db_entity, to_type):\n if isinstance(to_type, list):\n to_type = to_type[0]\n\n def _convert(db_obj):\n api_type = to_type.from_data_model(db_obj)\n if to_type == lb_types.LoadBalancerResponse:\n api_type.vip = lb_types.VIP.from_data_model(db_obj.vip)\n elif (to_type == pool_types.PoolResponse\n and db_obj.session_persistence):\n api_type.session_persistence = (\n pool_types.SessionPersistenceResponse.from_data_model(\n db_obj.session_persistence))\n return api_type\n if isinstance(db_entity, list):\n converted = [_convert(db_obj) for db_obj in db_entity]\n else:\n converted = _convert(db_entity)\n return converted", "def convert(obj, unit, axis):\r\n return obj", "def transform(self, xs, field='user', inverse=False):\n if inverse:\n if field == 'user':\n _dict = self.user_inverse_dict\n elif field == 'item':\n _dict = self.item_inverse_dict\n elif field == 'genre':\n _dict = self.genre_inverse_dict\n else:\n if field == 'user':\n _dict = self.user_dict\n elif field == 'item':\n _dict = self.item_dict\n elif field == 'genre':\n _dict = self.genre_dict\n\n return [_dict[x] for x in xs]", "def _deconstruct_object(cls, obj):\n if not hasattr(obj, 'deconstruct'):\n raise NotImplementedError(\n '%s.deconstruct() is not available on this version of '\n 'Django. Subclases of the serializer should override '\n '_deconstruct_object to support this.')\n\n return obj.deconstruct()", "def __object_demapper(self, data: list) -> pd.DataFrame:\n data = pd.DataFrame.from_records([s.to_dict() for s in data])\n\n return data", "def get_model(self, payload):\n return super(BulkEntryTransformer, self).to_model(payload)", "def to_base(self, obj):\n if hasattr(obj, \"to_base\"):\n return obj.to_base()\n return obj", "def to_object(cls, query_dict: Dict):\n pass", "def transform(self, dataframe: DataFrame) -> DataFrame:", "def restore(self, obj):\n return obj", "def _from_db_object(nodegroup, db_nodegroup):\n for field in nodegroup.fields:\n nodegroup[field] = db_nodegroup[field]\n\n nodegroup.obj_reset_changes()\n return nodegroup", "def GetDataAsObject(self):", "def to_python(self, value):\n # Composite types are serialized as JSON blobs. If BaseField.to_python\n # is called with a string, assume it was produced by value_to_string\n # and decode it\n if isinstance(value, str):\n try:\n value = json.loads(value)\n except ValueError as exc:\n raise ValidationError(\n self.error_messages[\"bad_json\"],\n code=\"bad_json\",\n ) from exc\n\n return self.Meta.model(\n **{\n name: field.to_python(value.get(name))\n for name, field in self.Meta.fields\n }\n )\n\n return super().to_python(value)", "def convertTo( self, cls, data=True, keys=True ):\n return self.g.convertTo( cls, data=data, keys=keys )", "def _convert_id_object(self, origin):\n if isinstance(origin, str):\n return ObjectId(origin)\n elif isinstance(origin, (list, set)):\n return [ObjectId(item) for item in origin]\n elif isinstance(origin, dict):\n for key, value in origin.items():\n origin[key] = self._convert_id_object(value)\n return origin", "def _from_db_object(user, db_user):\n foreign_key = ['project']\n for field in user.fields:\n if field not in foreign_key:\n user[field] = db_user[field]\n elif field == 'project' and db_user.project:\n user['project'] = db_user.project.name\n\n user.obj_reset_changes()\n return user", "def simplify(self, obj):\n\n result = self.process_object(obj, False)\n return result", "def _from_dict_transform(cls: Type[TPrimaryElementSubclass], data: Dict[str, Any]) -> Dict[str, Any]:\n data = super()._from_dict_transform(data)\n\n if 'primary' in data:\n data['is_primary'] = data.pop('primary')\n\n return data", "def TransformObject(object_id, matrix, copy=False):\n rc = TransformObjects(object_id, matrix, copy)\n if rc: return rc[0]\n return scriptcontext.errorhandler()", "def convert(self, sm):\n return self.visit(sm)", "def cast(self):\n if self.validate():\n if 'blueprint' in self.data:\n # A single blueprint\n obj = Blueprint.Blueprint()\n obj.versionCode = self.versionCode\n obj.data = self.data\n return obj\n elif 'blueprint-book' in self.data:\n # A book of blueprints\n obj = BlueprintBook.BlueprintBook()\n obj.versionCode = self.versionCode\n obj.data = self.data\n return obj\n else:\n # Unknown datatype. Just return the object\n return self\n \n else:\n # Broken validation means just return the object\n return self", "def translate_db_fields(cls, data):\r\n dst_data = data.copy()\r\n for name, col in cls._columns.items():\r\n key = col.db_field or name\r\n if key in dst_data:\r\n dst_data[name] = dst_data.pop(key)\r\n\r\n return dst_data", "def _handle_object(self, node):\n # Look up the model using the model loading mechanism. If this fails,\n # bail.\n Model = self._get_model_from_node(node, \"resname\")\n\n # Start building a data dictionary from the object.\n # If the node is missing the pk set it to None\n bits = node.getAttribute(\"resname\").split(\".\")\n keytype = node.getAttribute(\"d:keytype\") or 'pk'\n if len(bits) == 3:\n pk = bits[2]\n else:\n pk = None\n\n data = {}\n\n if keytype == 'pk':\n data[Model._meta.pk.attname] = Model._meta.pk.to_python(pk)\n else:\n try:\n data[Model._meta.pk.attname] = Model.objects.get_by_natural_key(pk).pk\n except (Model.DoesNotExist, AttributeError):\n pass\n\n # Also start building a dict of m2m data (this is saved as\n # {m2m_accessor_attribute : [list_of_related_objects]})\n m2m_data = defaultdict(list)\n\n # Create a reference for genericForeignKeys, if necessary\n virtual_fields = dict([(x.name, x) for x in Model._meta.virtual_fields])\n\n # Deseralize each field.\n for field_node in node.getElementsByTagName(\"trans-unit\"):\n # If the field is missing the name attribute, bail (are you\n # sensing a pattern here?)\n field_name = field_node.getAttribute(\"resname\")\n if not field_name:\n raise base.DeserializationError(\"<trans-unit> node is missing the 'resname' attribute\")\n\n # Get the field from the Model. This will raise a\n # FieldDoesNotExist if, well, the field doesn't exist, which will\n # be propagated correctly.\n try:\n field = Model._meta.get_field(field_name)\n except:\n if field_name in virtual_fields:\n field = virtual_fields[field_name]\n else:\n raise\n\n # As is usually the case, relation fields get the special treatment.\n if isinstance(field, GenericForeignKey):\n data[field.name] = self._handle_gfk_field_node(field_node, field)\n elif field.rel and isinstance(field.rel, models.ManyToManyRel):\n # There can be multiple instances since each relation has its own tag\n m2m_data[field.name].append(self._handle_m2m_field_node(field_node, field))\n elif field.rel and isinstance(field.rel, models.ManyToOneRel):\n data[field.attname] = self._handle_fk_field_node(field_node, field)\n else:\n if field_node.getElementsByTagName('None'):\n value = None\n else:\n tag = field_node.getElementsByTagName('target')\n if len(tag) == 0:\n tag = field_node.getElementsByTagName('source')\n if len(tag) != 0:\n value = field.to_python(getInnerText(tag[0]).strip())\n else:\n value = None\n data[field.name] = value\n\n # Return a DeserializedObject so that the m2m data has a place to live.\n return base.DeserializedObject(Model(**data), m2m_data)", "def from_dto(cls, obj):\n if obj is None:\n return None\n\n if not hasattr(obj, '_data'):\n return None\n\n new_model = cls()\n\n for key in obj._data:\n if key == 'transcripts':\n setattr(new_model, key, [DTOConverter.from_dto(Transcript, t) for t in obj._data[key]])\n elif key == 'acts' and cls == Transcript:\n setattr(new_model, key, [DTOConverter.from_dto(Act, a) for a in obj._data[key]])\n elif key == 'subtitles':\n setattr(new_model, key, [DTOConverter.from_dto(Subtitle, s) for s in obj._data[key]])\n else:\n if key != 'id':\n setattr(new_model, key, obj._data[key])\n\n return new_model", "def transform(self, X):\n\n X = super().transform(X)\n\n X[self.columns] = self.value\n\n return X", "def transform(self, X):\n raise NotImplementedError()", "def database_to_python(self, value):\r\n return value", "def to_model(self, obj):\n if obj is None:\n raise UnprocessableEntity(\"expected data in request, was empty\", what=BAD_VALUE)\n \n if not isinstance(obj, Mapping):\n raise UnprocessableEntity(\"expected data object in request\", what=BAD_VALUE)\n \n return {k: self.cols_to_model[k](v) for k, v in obj.items() if k in self.cols_to_model}", "def foreign_translate(object):\n if object == None:\n return Null\n if isinstance(object, (int, long, float)):\n return Decimal(object)\n return object", "def from_db_value(self, value, *args, **kwargs):\n return self.to_python(value)", "def transform_bson(self, value: Dict) -> Entity:\n try:\n return from_dict(\n data_class=self.python_type,\n config=self._config,\n data=value\n )\n except MissingValueError as e:\n raise e\n except DefaultValueNotFoundError as e:\n raise e", "def alchemyencoder(obj):\r\n if isinstance(obj, datetime.date):\r\n return obj.isoformat()\r\n else:\r\n return object_as_dict(obj)", "def to_model(cls, obj):\n\n new_model = cls()\n\n for key, value in obj.iteritems():\n if value:\n if key == 'transcripts':\n setattr(new_model, key, [ModelConverter.to_model(Transcript, t) for t in value])\n elif key == 'acts' and cls == Transcript:\n setattr(new_model, key, [ModelConverter.to_model(Act, a) for a in value])\n elif key == 'subtitles':\n setattr(new_model, key, [ModelConverter.to_model(Subtitle, s) for s in value])\n else:\n setattr(new_model, key, value)\n\n return new_model", "def from_db_value(self, value, *args):\n if value is not None:\n try:\n value = dbsafe_decode(value, self.compress)\n except Exception:\n # If the value is a definite pickle; and an error is raised in\n # de-pickling it should be allowed to propogate.\n if isinstance(value, PickledObject):\n raise\n else:\n if isinstance(value, _ObjectWrapper):\n return value._obj\n return value", "def alchemyencoder(obj):\n #By default python can't even serialize it's own datetime & decimal classes\n if isinstance(obj, datetime.date):\n return obj.isoformat()\n elif isinstance(obj, decimal.Decimal):\n return float(obj)", "def to_orm(self):\n data = {}\n for key, value in self:\n # If field is Type[ORMModel],\n # recursively convert to an ORM object.\n if hasattr(value, \"__orm__\"):\n data[key] = value.to_orm()\n # If the field is a dictionary, iterate over\n # values and convert any ORM models to ORM objects\n # else leave them alone.\n elif isinstance(value, dict):\n nested_data = {}\n for nested_key, nested_value in value:\n if hasattr(nested_value, \"__orm__\"):\n nested_data[key] = nested_value.to_orm()\n else:\n nested_data[key] = value\n data[key] = nested_data\n # If the field is an iterable, iterate through list\n # and convert ORM Models to ORM objects.\n #\n # There has to be a better way to write this conditional...\n elif (\n isinstance(value, collections.Iterable) and\n type(value) not in (str, bytearray, bytes)\n ):\n nested_data = []\n for nested_value in value:\n if hasattr(nested_value, \"__orm__\"):\n nested_data.append(nested_value.to_orm())\n else:\n nested_data.append(nested_value)\n # Convert iterable to the appropriate type at the\n # end.\n data[key] = type(value)(nested_data)\n # Leave the value alone if its not an ORMModel\n else:\n data[key] = value\n return self.__orm__(**data)", "def transform(self, x):", "def map(self, obj):\n if isinstance(obj, np.ndarray) and obj.ndim >= 2 and obj.shape[0] in (2,3):\n return fn.transformCoordinates(self, obj)\n else:\n return QtGui.QMatrix4x4.map(self, obj)", "def reconstruct(self, x):\n return self.inverse_transform(self.transform(x))", "def transform(self, data: pd.DataFrame):\n raise NotImplementedError", "def transform(self, X):\n return super().transform(X)", "def transform_record(self, pid, record, links_factory=None, **kwargs):\n context = kwargs.get(\"marshmallow_context\", {})\n context.setdefault(\"pid\", pid)\n context.setdefault(\"record\", record)\n return self.dump(\n self.preprocess_record(pid, record, links_factory=links_factory, **kwargs),\n context,\n )", "def preprocess_record(record):\n automatic_fields = ['created_at', 'modified_at']\n record = serialize_fields(filter_out_dict_keys(record, automatic_fields))\n\n return record", "def _Transform(obj, jac=None, offset=(0.,0.), flux_ratio=1.):\n ret = Transformation.__new__(Transformation)\n ret._gsparams = obj.gsparams\n ret._propagate_gsparams = True\n ret._jac = jac\n ret._dx, ret._dy = offset\n if isinstance(obj, Transformation):\n if obj._has_offset:\n if jac is None:\n dx1, dy1 = obj._dx, obj._dy\n else:\n dx1, dy1 = ret._fwd_normal(obj._dx, obj._dy)\n ret._dx += dx1\n ret._dy += dy1\n if jac is None:\n ret._jac = obj._jac\n else:\n ret._jac = ret._jac if obj._jac is None else ret._jac.dot(obj.jac)\n ret._flux_ratio = flux_ratio * obj._flux_ratio\n ret._original = obj._original\n else:\n ret._flux_ratio = flux_ratio\n ret._original = obj\n ret._has_offset = (ret._dx != 0. or ret._dy != 0.)\n return ret", "def alchemyencoder(obj):\n if isinstance(obj, datetime.date):\n return obj.isoformat()\n elif isinstance(obj, decimal.Decimal):\n return float(obj)", "def alchemyencoder(obj):\n if isinstance(obj, datetime.date):\n return obj.isoformat()\n elif isinstance(obj, decimal.Decimal):\n return float(obj)", "def alchemyencoder(obj):\n if isinstance(obj, datetime.date):\n return obj.isoformat()\n elif isinstance(obj, decimal.Decimal):\n return float(obj)", "def _transform(self, dataset):\n raise NotImplementedError()", "def _get_data(self, record, encoder):\n try:\n return encoder.transform(record)\n except AttributeError:\n return encoder(record)", "def _before_stockpyle_deserialize(self, obj):\n \n # only merge SA objects\n if _is_sqlalchemy_object(obj):\n self.__session.merge(obj, load=False)", "def transform(self, X: pd.DataFrame):\n return self.feature_transformer.transform(X)", "def migration (self, * filters) :\n result = dict \\\n ( (k, {})\n for k in (\"Account\", \"Group\", \"Person\", \"links\")\n )\n for obj in self.query (* filters).order_by (Q.pid) :\n result [\"Account\"].update ((obj.as_migration (), ))\n if getattr (obj, \"person\", None) :\n result [\"Person\"].update ((obj.person.as_migration (), ))\n result [\"links\"].update ((obj.person_link.as_migration (), ))\n for gl in obj.group_links :\n result [\"Group\"].update ((gl.group.as_migration (), ))\n result [\"links\"].update ((gl.as_migration (), ))\n return result", "def _from_dict_transform(cls: Type[TElementSubclass], data: Dict[str, Any]) -> Dict[str, Any]:\n if 'application' in data:\n data['created_by'] = data.pop('application')\n\n if 'added_timestamp' in data:\n data['created_ts'] = data.pop('added_timestamp')\n\n if 'created_ts' not in data:\n # some really old nin entries in the database have neither created_ts nor modified_ts\n data['_no_created_ts_in_db'] = True\n data['created_ts'] = datetime.fromisoformat('1900-01-01')\n\n if 'modified_ts' not in data:\n data['_no_modified_ts_in_db'] = True\n # Use created_ts as modified_ts if no explicit modified_ts was found\n data['modified_ts'] = data['created_ts']\n\n return data", "def data_convert(items):\n for item in items:\n converted_item = item.copy()\n if 'quantity_available' in item: # convert columns\n converted_item['quantity_available'] =\\\n int(item['quantity_available'])\n\n yield converted_item", "def to_field(obj):\r\n\r\n\r\n if isinstance(obj, Field):\r\n field = obj\r\n else:\r\n d = { \"storage_type\": \"unknown\" }\r\n\r\n if isinstance(obj, basestring):\r\n d[\"name\"] = obj\r\n elif type(obj) == tuple or type(obj) == list:\r\n d[\"name\"] = obj[0]\r\n try:\r\n d[\"storage_type\"] = obj[1]\r\n try:\r\n d[\"analytical_type\"] = obj[2]\r\n except:\r\n pass\r\n except:\r\n pass\r\n else: # assume dictionary\r\n d[\"name\"] = obj[\"name\"]\r\n d[\"label\"] = obj.get(\"label\")\r\n d[\"storage_type\"] = obj.get(\"storage_type\")\r\n d[\"analytical_type\"] = obj.get(\"analytical_type\")\r\n d[\"adapter_storage_type\"] = obj.get(\"adapter_storage_type\")\r\n\r\n if \"analytical_type\" not in d:\r\n storage_type = d.get(\"storage_type\")\r\n if storage_type:\r\n deftype = default_analytical_types.get(storage_type)\r\n d[\"analytical_type\"] = deftype or \"typeless\"\r\n else:\r\n d[\"analytical_type\"] = \"typeless\"\r\n\r\n field = Field(**d)\r\n return field", "def to_python(self, value):\n if isinstance(value, str):\n return value\n\n if hasattr(value, \"to_python\"):\n return value.to_python()\n\n BaseDocument = _import_class(\"BaseDocument\")\n if isinstance(value, BaseDocument):\n # Something is wrong, return the value as it is\n return value\n\n is_list = False\n if not hasattr(value, \"items\"):\n try:\n is_list = True\n value = {idx: v for idx, v in enumerate(value)}\n except TypeError: # Not iterable return the value\n return value\n\n if self.field:\n self.field._auto_dereference = self._auto_dereference\n value_dict = {\n key: self.field.to_python(item) for key, item in value.items()\n }\n else:\n Document = _import_class(\"Document\")\n value_dict = {}\n for k, v in value.items():\n if isinstance(v, Document):\n # We need the id from the saved object to create the DBRef\n if v.pk is None:\n self.error(\n \"You can only reference documents once they\"\n \" have been saved to the database\"\n )\n collection = v._get_collection_name()\n value_dict[k] = DBRef(collection, v.pk)\n elif hasattr(v, \"to_python\"):\n value_dict[k] = v.to_python()\n else:\n value_dict[k] = self.to_python(v)\n\n if is_list: # Convert back to a list\n return [\n v for _, v in sorted(value_dict.items(), key=operator.itemgetter(0))\n ]\n return value_dict", "def transform_python(self, value: Entity) -> Dict:\n if self._schema_type:\n _schema_type: type = self._schema_type\n # noinspection PyTypeChecker\n _schema: Schema = _schema_type()\n _dict = _schema.dump(value)\n return _dict\n\n return dict(value)", "def serialize_model(result):\n relation_prefix = '/' # this prefix is added to fields that are a relation\n\n # Note: unloaded property is used to discard fields that are not loaded, ie. lazily loaded,\n # such as relationships (by default), and fields not specified in query select clause.\n selected = (lambda field: not field in sqlalchemy.orm.attributes.instance_state(result).unloaded)\n fields = list(filter(selected, result._sa_instance_state.attrs.keys()))\n\n object = {}\n for field in fields:\n\n try:\n value = getattr(result, field)\n except AttributeError:\n continue # we are permissive\n\n if not is_relationship(getattr(result.__class__, field)):\n object[field] = value\n\n else:\n if isinstance(value, sqlalchemy.orm.collections.InstrumentedList):\n # ..n relationship: value is a list of sqla models\n object[relation_prefix + field] = list(map(lambda result: result.id, value))\n\n elif isinstance(type(value), sqlalchemy.ext.declarative.api.DeclarativeMeta):\n # ..1 relationship: value is a sqla model\n object[relation_prefix + field] = value.id\n\n else:\n # ..1 relationship: value shall be empty\n object[relation_prefix + field] = value\n\n return object", "def hydrate(self):\n # Preserve unhydrated fields.\n unhydrated_fields = copy.copy(self.__dict__)\n\n # Hydrate\n self.pull()\n hydrated_fields = vars(self)\n\n # Merge fields\n for key, value in unhydrated_fields.items():\n if (\n hydrated_fields.get(key) is None\n and unhydrated_fields.get(key) is not None\n ):\n hydrated_fields[key] = value\n\n # Re-initialize the object.\n self.__init__(**hydrated_fields)", "def transform(self, data, attr):\n data['point'] = torch.from_numpy(data['point'])\n data['feat'] = torch.from_numpy(data['feat'])\n data['label'] = torch.from_numpy(data['label'])\n\n return data", "def convertData(data):\n\n return data", "def _to_obj_tuple(self, obj, numeric=False):\n if isinstance(obj, ObjectRow):\n object_type, object_id = obj['type'], obj['id']\n else:\n try:\n object_type, object_id = obj\n if not isinstance(object_type, (int, str)) or not isinstance(object_id, (int, QExpr)):\n raise TypeError\n except TypeError:\n raise ValueError('Object reference must be either ObjectRow, or (type, id), got %s' % obj)\n\n if numeric:\n object_type = self._get_type_id(object_type)\n\n return object_type, object_id", "def compile_as_obj(self, data):\r\n data = self.compile_as_list(data)\r\n print(data)\r\n fut = []\r\n for i in data:\r\n obj = Record(**i)\r\n obj._primary_key = self.primary_key\r\n fut.append(obj)\r\n return fut" ]
[ "0.65609854", "0.64150685", "0.6350461", "0.6347081", "0.6238227", "0.5997735", "0.598158", "0.59786004", "0.59712017", "0.58984053", "0.58658886", "0.5845334", "0.58016354", "0.58016354", "0.58016354", "0.58016354", "0.58016354", "0.58016354", "0.58016354", "0.5780222", "0.5767579", "0.57582664", "0.5752189", "0.5706217", "0.57021976", "0.56721956", "0.56665605", "0.5659756", "0.5647658", "0.5629202", "0.5626405", "0.5626405", "0.5626405", "0.5621586", "0.5621167", "0.5594319", "0.55840415", "0.5570752", "0.55346096", "0.5511705", "0.550301", "0.54877913", "0.54819894", "0.54805857", "0.54754925", "0.54713166", "0.5460606", "0.54586804", "0.54493403", "0.54457694", "0.5440966", "0.54355687", "0.542659", "0.54161674", "0.54117185", "0.54100263", "0.54098564", "0.5399547", "0.5399528", "0.5396085", "0.53925073", "0.53861105", "0.5383649", "0.53827965", "0.53760123", "0.5371753", "0.5365784", "0.53611237", "0.53540736", "0.5345779", "0.53427637", "0.5334427", "0.5333155", "0.5330213", "0.53299904", "0.53140324", "0.5309111", "0.5294834", "0.5293037", "0.52925193", "0.52804285", "0.52796406", "0.5276646", "0.5276646", "0.5276646", "0.5274362", "0.52693033", "0.5263898", "0.5263609", "0.5254885", "0.52510864", "0.52508867", "0.5246285", "0.52360743", "0.5230034", "0.5219933", "0.5216786", "0.5206634", "0.52039456", "0.5195996", "0.51911426" ]
0.0
-1
test if the stations are sorted correctly by distance
def test_stations_by_distance(): station_list = build_station_list() #test for stations closest to cambridge city coordinates station_list_sort = stations_by_distance(station_list, (52.2053, 0.1218)) output = [(station.name, distance) for (station, distance) in station_list_sort] for n in range(1, len(station_list)): #make sure that the distance of the previous station to the point is less than the next one in the list assert output[n-1][1] <= output[n][1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_nearest_filter(self):\n for airport, reports, count in (\n (True, True, 6),\n (True, False, 16),\n (False, True, 6),\n (False, False, 30),\n ):\n stations = station.nearest(30, -80, 30, airport, reports, 1.5)\n self.assertEqual(len(stations), count)", "def test_nearest(self):\n dist = station.nearest(28.43, -81.31)\n stn = dist.pop(\"station\")\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, \"KMCO\")\n for val in dist.values():\n self.assertIsInstance(val, float)\n for *params, count in (\n (30, -82, 10, True, True, 0.2, 1),\n (30, -82, 10, True, False, 0.2, 5),\n (30, -82, 10, False, False, 0.2, 6),\n (30, -82, 1000, True, True, 0.5, 6),\n (30, -82, 1000, False, False, 0.5, 37),\n ):\n stations = station.nearest(*params)\n self.assertEqual(len(stations), count)\n for dist in stations:\n stn = dist.pop(\"station\")\n self.assertIsInstance(stn, station.Station)\n for val in dist.values():\n self.assertIsInstance(val, float)", "def test_get_closest_stations(self):\n\t\tpoint = \"POINT(40.71911552 -74.00666661)\"\n\t\tstations = set(server.get_closest_stations(point))\n\t\t# find the closest stations, make them a set of objects see if sets intersect completely", "def test_default_ordering(self):\n request = self.factory.get('/api/v1/cars', {'latitude': self.latitude,\n 'longitude': self.longitude,\n 'location': self.location,\n 'distance': 10000}) # inf distance to show all ads\n response = CarAdViewSet.as_view({'get': 'list'})(request)\n self.assertEqual(response.status_code, HTTPStatus.OK._value_)\n # find two nearest cars and distance from our location to them\n cars = response.data['results'][0:2]\n d = [get_distance_between_coords(self.latitude,\n self.longitude,\n cars[i]['latitude'],\n cars[i]['longitude']\n ) for i in range(2)]\n # the first car must be closer than the second one\n self.assertLessEqual(d[0], d[1])\n # and they must not be similar\n self.assertNotEqual(cars[0], cars[1])", "def run():\n\n # Build list of tuples of station names and distance \n stations = build_station_list()\n p = (52.2053, 0.1218)\n by_distance = stations_by_distance(stations, p)\n for n in range(10):\n print(by_distance[n])\n for n in range(10):\n i = len(by_distance) - 10 + n\n print(by_distance[i])", "def comparable_dist(zamg_id):\n station_lat, station_lon = stations[zamg_id]\n return (lat - station_lat) ** 2 + (lon - station_lon) ** 2", "def test_different_routes_from_c_to_c_and_distance_less_than_30(self):\n railroad = trains.Railroad()\n routes = railroad.find_routes('C', 'C', 9)\n routes = railroad.filter_routes_by_distance(routes, 0, 30)\n self.assertEqual(len(routes), 7)", "def chopnod_sort(self, table):\n if not isinstance(table, Table):\n return\n elif None in [self.chopdist, self.noddist]:\n return\n elif 'xcentroid' not in table.columns or \\\n 'ycentroid' not in table.columns:\n return\n dist = np.sqrt((self.chopdist ** 2) + (self.noddist ** 2))\n x0, y0 = table['xcentroid'], table['ycentroid']\n valid = [False] * len(table)\n for idx, row in enumerate(table):\n dx = x0 - row['xcentroid']\n dy = y0 - row['ycentroid']\n dr = np.sqrt((dx ** 2) + (dy ** 2))\n dchop = abs(dr - self.chopdist)\n dnod = abs(dr - self.noddist)\n dchopnod = abs(dr - dist)\n ok = (np.array([dchop, dnod, dchopnod]) < self.epsilon)\n if ok.astype(int).sum() >= 2:\n valid[idx] = True\n table = table[valid]", "def test_nearest(self):\n for lat, lon, icao in ((28.43, -81.31, \"KMCO\"), (28.43, -81, \"KTIX\")):\n stn, dist = station.Station.nearest(lat, lon, is_airport=True)\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, icao)\n for val in dist.values():\n self.assertIsInstance(val, float)\n # Test with IATA req disabled\n stn, dist = station.Station.nearest(28.43, -81, False, False)\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, \"FA18\")\n for val in dist.values():\n self.assertIsInstance(val, float)", "def _rank_stations_by_distance_and_quality(lat, lon):\n\n station_ranking = rank_stations(lat, lon)\n station_ranking['enumerated_quality'] = station_ranking['rough_quality'].map(QUALITY_SORT)\n station_ranking = station_ranking.sort_values(by=['distance_meters', 'enumerated_quality'])\n return station_ranking", "def test_get_distance_to_same_place() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n )\n\n assert meters == 0", "def closest_stations(latlong, df):\n names = df['name'].values\n station_dists = {}\n for (lat, lon, name) in list(df[['Lat', 'Lon', 'name']].value_counts().index):\n if not(np.isnan(lat) or np.isnan(lon)):\n station_dists[name] = haversine(latlong, (lat, lon)) \n \n return sorted(station_dists.items(), key=lambda x: x[1])", "def test_get_distance() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_NEW_YORK[0],\n COORDINATES_NEW_YORK[1],\n )\n\n assert meters / 1000 - DISTANCE_KM < 0.01", "def test_distance_function(self):\n if connection.ops.oracle:\n ref_dists = [0, 4899.68, 8081.30, 9115.15]\n elif connection.ops.spatialite:\n if connection.ops.spatial_version < (5,):\n # SpatiaLite < 5 returns non-zero distance for polygons and points\n # covered by that polygon.\n ref_dists = [326.61, 4899.68, 8081.30, 9115.15]\n else:\n ref_dists = [0, 4899.68, 8081.30, 9115.15]\n else:\n ref_dists = [0, 4891.20, 8071.64, 9123.95]\n htown = City.objects.get(name=\"Houston\")\n qs = Zipcode.objects.annotate(\n distance=Distance(\"poly\", htown.point),\n distance2=Distance(htown.point, \"poly\"),\n )\n for z, ref in zip(qs, ref_dists):\n self.assertAlmostEqual(z.distance.m, ref, 2)\n\n if connection.ops.postgis:\n # PostGIS casts geography to geometry when distance2 is calculated.\n ref_dists = [0, 4899.68, 8081.30, 9115.15]\n for z, ref in zip(qs, ref_dists):\n self.assertAlmostEqual(z.distance2.m, ref, 2)\n\n if not connection.ops.spatialite:\n # Distance function combined with a lookup.\n hzip = Zipcode.objects.get(code=\"77002\")\n self.assertEqual(qs.get(distance__lte=0), hzip)", "def check_sort(self):\n if self.list == []:\n return True\n seg_iter = iter(self.list)\n last = next(seg_iter)\n for segment in seg_iter:\n if last > segment:\n raise Exception('non trié')\n last = segment\n return True", "def CheckIfStationsAreVisitedInGivenOrder(ConnectionInfo, PathInfo, RouteConditions, OrderedStationList):\r\n\t# shortcuts\r\n\tif not PathInfo or len(PathInfo) < 2:\r\n\t\treturn True \r\n\tif not OrderedStationList or len(OrderedStationList) < 2:\r\n\t\treturn True \r\n\r\n\t# return true if next station is not in OrderedStationList\r\n\tNextStation = ConnectionInfo[ConnInfoInd['station_to']]\r\n\tif not NextStation in OrderedStationList:\r\n\t\treturn True \r\n\telse:\r\n\t\t# get last (highest-order) already visited station in OrderedStationList\r\n\t\tLastListedStation = None\r\n\t\tMaxInd = -1\r\n\t\tfor i in range(1, len(PathInfo)+1):\r\n\t\t\tstation = PathInfo[-i][ConnInfoInd['station_to']]\r\n\t\t\t\r\n\t\t\tif station in OrderedStationList:\r\n\t\t\t\tind = OrderedStationList.index(station)\r\n\t\t\t\tif ind > MaxInd:\r\n\t\t\t\t\tLastListedStation = station \r\n\t\t\t\t\tMaxInd = ind\r\n\r\n\t\t# check station orders (an equal or lower order station can be visited again)\r\n\t\tNextStationIND = OrderedStationList.index(NextStation) + 1\r\n\r\n\t\tLastStationIND = 0\r\n\t\tif LastListedStation:\r\n\t\t\tLastStationIND = OrderedStationList.index(LastListedStation) + 1\r\n\r\n\t\tif NextStationIND <= LastStationIND + 1:\r\n\t\t\treturn True \r\n\t\telse:\r\n\t\t\treturn False", "def test_EstimateDistances(self):\n d = EstimateDistances(self.al, JC69())\n d.run()\n canned_result = {('b', 'e'): 0.440840,\n ('c', 'e'): 0.440840,\n ('a', 'c'): 0.088337,\n ('a', 'b'): 0.188486,\n ('a', 'e'): 0.440840,\n ('b', 'c'): 0.0883373}\n result = d.getPairwiseDistances()\n self.assertDistsAlmostEqual(canned_result, result)\n \n # excercise writing to file\n d.writeToFile('junk.txt')\n try:\n os.remove('junk.txt')\n except OSError:\n pass # probably parallel", "def create_station_list(self):\n sorted_station_list = sorted(self.station_dict, key=self.station_dict.get)\n\n return sorted_station_list", "def compare_distance(self, a, b):\n a_dist = int(a['distance'])\n b_dist = int(b['distance'])\n if a_dist < b_dist:\n return -1\n elif a_dist > b_dist:\n return 1\n else:\n return 0", "def _check_normalization(self):\n lastDistance = None\n distance = None\n for idx in xrange(len(self) - 1):\n distance = self[idx+1][0] - self[idx][0]\n\n # first run\n if lastDistance is None:\n lastDistance = distance\n continue\n\n if lastDistance != distance:\n return False\n\n lastDistance = distance\n\n return True", "def test_distances(self):\n for p1, p2, distance in DISTANCES:\n calculated = p1.approximate_distance_meters(p2)\n self.assertAlmostEqual(distance, calculated, delta=5)", "def update_table(vec1, vec2, dist):\n flag = False\n\n for router_to in range(len(vec1)):\n if vec1[router_to] > vec2[router_to] + dist:\n vec1[router_to] = vec2[router_to] + dist\n flag = True\n\n return vec1, flag", "def test_distance_aed(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.distance('AED'), 'NO SUCH ROUTE')", "def is_sorted(self):\n previous = 0 # Setting to 0 shouldn't be an issue aslong as MIN_VALUE is at least 0\n for value in self.data:\n if value < previous:\n return False\n previous = value\n return True", "def miss_station(all_stations,stations):\n\tdiff = len(all_stations)-len(stations)\n k=0\n i=0\n miss_stations = ['']*diff\n a = all_stations[:]\n a.sort()\n s = stations[:]\n s.sort()\n while i < len(stations):\n while a[i] != s[i]:\n miss_stations[k]=a[i]\n del a[i]\n k+=1\n i+=1\n\treturn miss_stations", "def maybe_distal(self):\n return bool(set(self.locations) & set(StandardTerminology.DISTAL_LOCATIONS))", "def check_sorted(self):\n last_count = np.inf\n for count in self.Nx:\n if count > last_count:\n self.sorted = False\n return self.sorted\n last_count = count\n self.sorted = True\n return self.sorted", "def sort_bike_stations(bike_stations, location):\n\n stations = bike_stations.copy()\n\n for index, station in enumerate(stations):\n station_location = (station[\"lat\"], station[\"lon\"])\n dist = distance.distance(station_location, location).m\n stations[index][\"distance\"] = dist\n\n stations = sorted(stations, key=lambda station: station[\"distance\"])\n stations = list(filter(lambda station: station[\"bikesAvailable\"] > 0, stations))\n\n return stations", "def test_distance_query(self):\n locations = [\n Location.objects.create(name=\"The Piton Foundation\", lat=39.7438167, lng=-104.9884953),\n Location.objects.create(name=\"Hull House\", lat=41.8716782, lng=-87.6474517)\n ]\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n story.locations.add(locations[0])\n story.save()\n story2 = create_story(title=\"Test Story 2\", summary=\"Test Summary 2\",\n byline=\"Test Byline 2\", status='published')\n story2.locations.add(locations[1])\n story2.save()\n # If south migrations are enabled, we need to explicitly rebuild\n # the indexes because the RealTimeIndex signal handlers don't get\n # wired up. \n # See https://github.com/toastdriven/django-haystack/issues/599\n # In general, I think we can work around this by just setting\n # SOUTH_TESTS_MIGRATE = False in the settings\n #self._rebuild_index()\n req = RequestFactory().get('/explore/?near=39.7414581054089@-104.9877892025,1')\n resp = self.resource.explore_get_list(req)\n dehydrated = simplejson.loads(resp.content)\n self.assertEqual(len(dehydrated['objects']), 1)\n self.assertEqual(dehydrated['objects'][0]['story_id'], story.story_id)", "def test_exact_matches(self):\n idw = self.dset.spec.sel(\n lons=self.lons_exact, lats=self.lats_exact, method=\"idw\"\n )\n nearest = self.dset.spec.sel(\n lons=self.lons_exact, lats=self.lats_exact, method=\"nearest\"\n )\n assert abs(idw.efth - nearest.efth).max() == 0", "def check_latlon(self):\n\n for station in list(self.station_list.values()):\n station_def = self.station_definitions[station.name]\n lat = float(station.get_obs('LAT')[0])\n lon = float(station.get_obs('LON')[0])\n lat_diff = abs(lat - station_def['lat'])\n lon_diff = abs(lon - station_def['lon'])\n if lat_diff > .1:\n qc_error.all_qc_errors.append(\n qce(\n station_name=station.name,\n error_code=9000,\n old_data_value=lat,\n explanation=\"lats are different for: \" + station.name +\n \". Old value : \" + str(station_def['lat'])\n ))\n if lon_diff > .1:\n qc_error.all_qc_errors.append(\n qce(\n station_name=station.name,\n error_code=9000,\n old_data_value=lon,\n explanation=\"lons are different for: \" + station.name +\n \". Old value : \" + str(station_def['lon'])\n ))", "def _sort_measurements(self):\n if self._unsorted:\n sorted_ndxs = np.argsort(self._angles)\n self._distances = self._distances[sorted_ndxs]\n self._angles = self._angles[sorted_ndxs]\n self._intensities = self._intensities[sorted_ndxs]\n self._error_codes = self._error_codes[sorted_ndxs]\n self._unsorted = False", "def detour(src, dst, pitstop):\n options = on_path([src, dst],query='shell gas station', size=10,urgency=0)\n ret = []\n for place in options:\n title = place['title']\n x = place['latlon']\n addr = place['address']\n A_X = dist(src, x); X_B = dist(x, dst)\n consumer_dist = A_X['distance'] + X_B['distance']\n tour_time = A_X['trafficTime']+X_B['trafficTime']\n last_mile_dist = 2*dist(pitstop, x)['distance']\n total_trip_dist = consumer_dist + last_mile_dist\n carbon_print = total_trip_dist/(1e3 * .621 * .70548)\n ret.append({\"distance\" : consumer_dist,\n \"latlon\" : x,\n \"title\" : title,\n \"time\" : tour_time,\n \"address\" : addr,\n \"carbon\" : carbon_print})\n ret = sorted(ret, key=lambda loc: loc.get('distance'))\n #print(total_trip_dist, consumer_dist, last_mile_dist)\n\n # worst carbon\n consumer_dist = dist(src, dst)['distance']\n last_mile_dist = 2*dist(pitstop, dst)['distance']\n total_trip_dist = consumer_dist + last_mile_dist\n carbon_print = total_trip_dist/(1e3 * .621 * .70548)\n #print(total_trip_dist, consumer_dist, last_mile_dist)\n\n # worst case time A - C - B\n A_C = dist(src, pitstop)\n C_B = dist(pitstop, dst)\n total_time = A_C['trafficTime'] + C_B['trafficTime']\n return {\"meetpoints\" : ret, 'worst_time' : total_time, \"worst_carbon\" : carbon_print}", "def test_EstimateDistances_fromUnaligned(self):\n d = EstimateDistances(self.collection, JC69(), do_pair_align=True,\n rigorous_align=True)\n d.run()\n canned_result = {('b', 'e'): 0.440840,\n ('c', 'e'): 0.440840,\n ('a', 'c'): 0.088337,\n ('a', 'b'): 0.188486,\n ('a', 'e'): 0.440840,\n ('b', 'c'): 0.0883373}\n result = d.getPairwiseDistances()\n self.assertDistsAlmostEqual(canned_result, result)\n \n d = EstimateDistances(self.collection, JC69(), do_pair_align=True,\n rigorous_align=False)\n d.run()\n canned_result = {('b', 'e'): 0.440840,\n ('c', 'e'): 0.440840,\n ('a', 'c'): 0.088337,\n ('a', 'b'): 0.188486,\n ('a', 'e'): 0.440840,\n ('b', 'c'): 0.0883373}\n result = d.getPairwiseDistances()\n self.assertDistsAlmostEqual(canned_result, result)", "def test_get_distance(self):\n meters = location_util.distance(COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_NEW_YORK[0],\n COORDINATES_NEW_YORK[1])\n self.assertAlmostEqual(meters / 1000, DISTANCE_KM, places=2)", "def _is_sorted_by_data(graph):\n assert graph.format == \"csr\"\n out_of_order = graph.data[:-1] > graph.data[1:]\n line_change = np.unique(graph.indptr[1:-1] - 1)\n line_change = line_change[line_change < out_of_order.shape[0]]\n return out_of_order.sum() == out_of_order[line_change].sum()", "def update_dv(self):\n is_changed = False\n for name in self.distance_vector:\n smallest = float('Inf')\n smallest_neighbor = None\n for neighbor_name in self.neighbors:\n if self.neighbors[neighbor_name].is_killed:\n weight = float('Inf')\n else:\n weight = self.neighbors[neighbor_name].weight\n if name in self.neighbors[neighbor_name].distance_vector:\n candidate = self.neighbors[neighbor_name].distance_vector[name]\n candidate += weight\n if smallest > candidate:\n smallest = candidate\n smallest_neighbor = neighbor_name\n if self.distance_vector[name].cost != smallest and name != self.name_str:\n self.distance_vector[name].cost = smallest\n self.distance_vector[name].link = smallest_neighbor\n is_changed = True\n return is_changed", "def test_sqpp_distributed_ands_equivalent(self):\n self.assertEqual(sorted(perform_request_search(p='ellis and (kaluza-klein or r-parity)')),\n sorted(perform_request_search(p='ellis and (r-parity or kaluza-klein)')))", "def test_sort(self):\n a, b, c, d = Node('a'), Node('b'), Node('c'), Node('d')\n a | b | c\n a * 'foo' | 'bar' * c\n d | 'baz' * b\n nodes = topo_sort([a, d])\n self.assertEqual(set(nodes[:2]), set([a, d]))\n self.assertEqual(nodes[2:], [b, c])", "def is_distal(self):\n return bool(set(self.locations) and set(self.locations) <= set(StandardTerminology.DISTAL_LOCATIONS)) \\\n or bool(self.depth and 16 < self.depth < 82)", "def _check_location_order(self, locations):\n strand = None\n last_start = 0\n for location in locations:\n if strand is None:\n strand = location[2]\n elif strand != location[2]:\n return warnings[\"both_strand_coordinates\"]\n if strand == \"-\":\n locations = reversed(locations)\n for location in locations:\n if last_start > location[1]:\n return warnings[\"out_of_order\"]\n else:\n last_start = location[1]\n return None", "def validate(self):\n if len(self.independent_nodes) > 0:\n try:\n self.topological_sort()\n return True\n except ValueError:\n return False\n return False", "def distorted_distance(self):\n return self._distance", "def test_dist_itslef(self):\n X = [[0, 10], [4, 2]] # Just some points. I've no idea where on globe.\n c = cdist(X[0], X[1])\n string_geopy = '{}'.format(great_circle(X[0], X[1]))\n float_geopy = float(string_geopy[:-3])\n self.assertTrue(np.round(c) == np.round(float_geopy))\n\n X = [[34.0522, 118.2437], # Lon Angeles\n [37.7749, 122.4194]] # San Francisco\n c = cdist(X[0], X[1])\n string_geopy = '{}'.format(great_circle(X[0], X[1]))\n float_geopy = float(string_geopy[:-3])\n self.assertTrue(np.round(c) == np.round(float_geopy))", "def __getpredictors_distance(self, staname, distance):\n\n distfromsta = distance[staname]\n del distfromsta[staname] # remove the station to be fill from the dataframe\n distfromsta = distfromsta.sort_values()\n\n stations = self.network.getsta(distfromsta.index.values)\n # station = self.network.getsta(staname)\n\n # Only 3 closest stations\n # sel1 = [ (i,e) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1\n # sel2 = [ (i,e) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 2\n\n # Use all stations\n sel1 = [(i, e) for i, e in zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1\n sel2 = [(i, e) for i, e in zip(stations[0:-2], stations[2:])] # selction predictors with spacing 2\n\n # sel3 = [ (i,e) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 3\n # sel4 = [ (i,e) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 4\n\n # Only 3 closest stations\n # sel1names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1\n # sel2names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 1\n\n # using all stations\n sel1names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in\n zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1\n sel2names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in\n zip(stations[0:-2], stations[2:])] # selction predictors with spacing 1\n\n # sel3names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 1\n # sel4names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 1\n\n selection = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1, sel2)) if x]\n selectionnames = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1names, sel2names)) if x]\n\n return selection, selectionnames", "def test_nearest_location_odd():\n assert nearest_location([(3, 6), (9, 13)], 7) == 0\n assert nearest_location([(3, 6), (9, 13)], 7, 1) == 1", "def test_nearest_location_even():\n assert nearest_location([(3, 6), (8, 13)], 6, 0) == 0\n assert nearest_location([(3, 6), (8, 13)], 6, 1) == 0\n assert nearest_location([(3, 6), (8, 13)], 7, 0) == 1\n assert nearest_location([(3, 6), (8, 13)], 7, 1) == 1", "def test_trips_starting_at_a_and_ending_at_c_by_4_stops(self):\n railroad = trains.Railroad()\n self.assertEqual(sorted(railroad.find_routes('A', 'C', 4, 4)), sorted(['ABCDC', 'ADCDC', 'ADEBC']))", "def test_directions_handles_more_than_max_waypoints(self):\n stops = [\n mommy.make(Stop, trips_year=self.trips_year, lat_lng=coord)\n for coord in (\n '43.705639,-72.297404',\n '43.680288,-72.527876',\n '43.779934,-72.042908',\n '43.753303,-72.124643',\n '43.703049,-72.289567',\n '43.705639,-72.297404',\n '44.831956,-71.075664',\n '44.875039,-71.05471',\n '43.736252,-72.2519',\n '43.788074,-72.099655',\n '44.227489,-71.477737',\n '43.705639,-72.297404',\n '43.680288,-72.527876',\n '43.779934,-72.042908',\n '43.753303,-72.124643',\n '43.703049,-72.289567',\n '43.705639,-72.297404',\n '44.831956,-71.075664',\n '44.875039,-71.05471',\n '43.736252,-72.2519',\n '43.788074,-72.099655',\n '44.227489,-71.477737',\n '43.705639,-72.297404',\n '44.831956,-71.075664',\n '43.753303,-72.124643',\n '43.703049,-72.289567',\n )\n ]\n directions = maps.get_directions(stops)\n self.assertEqual(len(stops), len(directions.legs) + 1)\n for i, leg in enumerate(directions.legs):\n self.assertEqual(leg.start_stop, stops[i])\n self.assertEqual(leg.end_stop, stops[i + 1])", "def test_trips_starting_and_ending_with_c_by_3_stops(self):\n railroad = trains.Railroad()\n self.assertEqual(sorted(railroad.find_routes('C', 'C', 3)), sorted(['CDC', 'CEBC']))", "def is_sorted(self):\n\n return all(self.models[i].glb[iage] <= self.models[i+1].glb[iage] for i in range(len(self.models)-1))", "def sortDistance(netlist):\n netlist_dictionary = {}\n for i in range(len(netlist)):\n start = chips[netlist[i][0]]\n end = chips[netlist[i][1]]\n\n delta_x = abs(start[0]-end[0])\n delta_y = abs(start[1]-end[1])\n distance = delta_x + delta_y\n\n netlist_dictionary[(netlist[i][0], netlist[i][1])] = distance\n\n sorted_dictionary = sorted(netlist_dictionary.items(), key=operator.itemgetter(1))\n sorted_netlist = []\n for j in range(len(sorted_dictionary)):\n sorted_netlist.append(sorted_dictionary[j][0])\n\n return sorted_netlist", "def closest_stations(lat: float, lon: float, limit: int = 1) -> List[Dict]:\n dist_sorted = sorted(\n STATIONS, key=lambda s: distance((lat, lon), (s[\"lat\"], s[\"lon\"]))\n )\n return dist_sorted[:limit]", "def test_nearest_location():\n locations = [(10, 20), (30, 40), (50, 60)]\n\n assert nearest_location(locations, 8) == 0\n assert nearest_location(locations, 15) == 0\n assert nearest_location(locations, 22) == 0\n\n assert nearest_location(locations, 28) == 1\n assert nearest_location(locations, 35) == 1\n assert nearest_location(locations, 42) == 1\n\n assert nearest_location(locations, 48) == 2\n assert nearest_location(locations, 55) == 2\n assert nearest_location(locations, 62) == 2", "def test_sense_distance(self):\n\n\t\tmeasurements = [29, 29, 28]\n\t\tself.driver.us_dist.side_effect = lambda x: measurements.pop()\n\t\texpected_measurement = int(ultrasonic_sensor_error(29))\n\n\t\tself.assertEqual(self.s.sense_distance(60), expected_measurement)\n\t\tself.mount.move.assert_called_once_with(x=60)", "def test_weighting(self):\n dset = self.dset.spec.sel(\n lons=self.lons_inexact, lats=self.lats_inexact, method=\"idw\"\n )\n for stat in [\"hs\", \"tp\"]:\n idw = dset.spec.stats([stat])[stat].values\n site0 = self.dset.isel(site=[0]).spec.stats([stat])[stat].values\n site1 = self.dset.isel(site=[1]).spec.stats([stat])[stat].values\n lower = np.array([min(s1, s2) for s1, s2 in zip(site0, site1)])\n upper = np.array([max(s1, s2) for s1, s2 in zip(site0, site1)])\n assert (upper - idw > 0).all() and (idw - lower > 0).all()", "def get_top_station_set(city):\n s = {}\n for file in os.listdir(exp_data_path + os.sep + 'station' + os.sep + city):\n with open(exp_data_path + os.sep + 'station' + os.sep + city + os.sep + file) as f:\n reader = csv.reader(f)\n for row in reader:\n if row[0] not in s:\n s[row[0]] = 1\n else:\n s[row[0]] = s[row[0]] + 1\n\n sort_s = dict(sorted(s.items(), key=lambda x : x[1], reverse=True))\n first = True\n res = []\n for k, v in sort_s.items():\n if first:\n top = v\n first = False\n if top - v <= 30:\n res.append(k)\n print('before', len(sort_s))\n print('after', len(res))\n\n # restore new map [old_index, new_index]\n list_remap = {}\n new_index = 0\n for index in range(0, data_length[city]):\n if str(index) in res:\n list_remap[index] = new_index\n new_index = new_index + 1\n\n # print(list_remap)\n check_path(exp_data_path + os.sep + 'station_list')\n file_name = exp_data_path + os.sep + 'station_list' + os.sep + 'list_remap_{}'.format(city) + '.npy'\n if os.path.exists(file_name):\n os.remove(file_name)\n np.save(file_name, list_remap)", "def isSorted(lyst):\n #Cute list comprehension way that doesn't short-circuit.\n #return len([x for x in\n # [a - b for a,b in zip(lyst[1:], lyst[0:-1])]\n # if x < 0]) == 0\n for i in range(1, len(lyst)):\n if lyst[i] < lyst[i-1]:\n return False\n return True", "def test_shortest_route_from_a_to_c(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.shortest_route_distance('A', 'C'), 9)", "def topsort_lat(lat, random_shift=False, max_state=None):\n\n V = {arc[STATE_FROM] for arc in lat} | {arc[STATE_TO] for arc in lat}\n A = {i: set() for i in V}\n for arc in lat:\n A[arc[STATE_TO]].add(arc[STATE_FROM])\n newid2oldid = [0]\n while len(newid2oldid) <= len(V):\n vs = [i for i, v in A.items() if len(v) == 0]\n if len(vs) == 0:\n print(f\"Lat: {lat}\")\n print(f\"V: {V}\")\n print(f\"A: {A}\")\n print(f\"newid2oldid: {newid2oldid}\")\n raise RuntimeError(f\"Topsort error.\")\n i = np.random.choice(vs)\n A.pop(i)\n newid2oldid.append(i)\n for a in A.values():\n a.discard(i)\n old2new = {i_old: i_new for i_new, i_old in enumerate(newid2oldid)}\n if random_shift:\n shift=0\n max_shift = max_state - len(old2new)\n max_step = max_state // len(old2new)\n for k,v in old2new.items():\n if v == 0 or v == 1:\n continue\n new_shift = random.randint(0, min(max_step, max_shift))\n shift += new_shift\n max_shift -= new_shift\n old2new[k] += shift\n\n sorted_lat = np.array([(arc[0], old2new[arc[1]], old2new[arc[2]]) for arc in lat])\n return sorted_lat", "def test_distances(self):\n sf = make_classifier_data(n=10, d=2, seed=37)\n sf.remove_column(\"class\", inplace=True)\n\n numeric_features = [\"int0\", \"int1\", \"float0\", \"float1\"]\n array_features = [\"array0\"]\n string_features = [\"str0\"]\n dict_features = [\"dict0\"]\n\n ## Numeric standard distances should work for numeric columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Numeric standard distances should work for array columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=array_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## String standard distances should work.\n for d in [\"levenshtein\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Dictionary standard distances should work.\n for d in [\"jaccard\", \"weighted_jaccard\", \"cosine\", \"transformed_dot_product\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n # Nonsensical combinations of feature types and distances should fail.\n with self.assertRaises(ValueError):\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=\"euclidean\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n # If no distance is specified, the automatic distance construction\n # should kick in and be correct.\n correct_dist = [\n [[\"str0\"], \"levenshtein\", 1],\n [[\"str1\"], \"levenshtein\", 1],\n [[\"dict0\"], \"jaccard\", 1],\n [[\"int0\", \"int1\", \"float0\", \"float1\"], \"euclidean\", 1],\n [[\"array0\"], \"euclidean\", 1],\n ]\n\n m = tc.dbscan.create(\n sf, radius=1, distance=None, min_core_neighbors=3, verbose=False\n )\n\n self.assertItemsEqual(m.distance, correct_dist)\n\n m = tc.dbscan.create(\n sf, radius=1, distance=\"auto\", min_core_neighbors=3, verbose=False\n )\n self.assertItemsEqual(m.distance, correct_dist)", "def _sort_by_satellite(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n time = []\n satellite = []\n system = []\n for sat in sorted(self.dset.unique(\"satellite\"), reverse=True):\n idx = self.dset.filter(satellite=sat)\n time.extend(self.dset.time.gps.datetime[idx])\n satellite.extend(self.dset.satellite[idx])\n system.extend(self.dset.system[idx])\n \n return np.array([time]), np.array([satellite]), np.array([system])", "def test_attometers_validate_list(self):\n attometers = inches_to.attometers([1.0, 2.0, 3.0, 4.0])\n comparison = np.array([2.54e16, 2*2.54e16, 3*2.54e16, 4*2.54e16])\n\n try:\n for i in range(len(comparison)):\n self.assertTrue(math.isclose(attometers[i], comparison[i], rel_tol=self.accepted_error))\n print('{:.40s}{}'.format(sys._getframe().f_code.co_name + self.padding, self.passed))\n except AssertionError:\n print('{:.40s}{}'.format(sys._getframe().f_code.co_name + self.padding, self.failed))", "def test_distance_ad(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.distance('AD'), '5')", "def test_distance(self):\n self.assertTrue(np.allclose(self.vectors.distance('dog.n.01', 'mammal.n.01'), 4.5278745))\n self.assertEqual(self.vectors.distance('dog.n.01', 'dog.n.01'), 0)", "def testDegenerate(self):\n srt = asarray(self.copy())\n srt.sort(axis=1)\n return (srt[:,:-1] == srt[:,1:]).any(axis=1)", "def sort(self):\n\t\tself.servers = sorted(self.servers, key=lambda s: s.load)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.distance_class)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.country == self.locale_info.country, reverse=True)", "def testEditDist(self): # - - - - - - - - - - - - - - - - - - - - - - - - -\n\n for pair in self.string_pairs:\n\n approx_str_value = stringcmp.editdist(pair[0],pair[1])\n\n assert (isinstance(approx_str_value,float)), \\\n '\"EditDist\" does not return a floating point number for: '+ \\\n str(pair)\n\n assert (approx_str_value >= 0.0), \\\n '\"EditDist\" returns a negative number for: '+str(pair)\n\n assert (approx_str_value <= 1.0), \\\n '\"EditDist\" returns a number larger than 1.0 for: '+str(pair)\n\n approx_str_value_1 = stringcmp.editdist(pair[0],pair[1])\n approx_str_value_2 = stringcmp.editdist(pair[1],pair[0])\n\n assert (approx_str_value_1 == approx_str_value_2), \\\n '\"EditDist\" returns different values for pair and swapped ' + \\\n 'pair: '+str(pair)+': '+str(approx_str_value_1)+', '+ \\\n str(approx_str_value_2)\n\n # Check for value 1.0 if the strings are the same\n #\n if (pair[0] == pair[1]):\n\n assert (approx_str_value == 1.0), \\\n '\"EditDist\" does not return 1.0 if strings are equal: '+ \\\n str(pair)", "def get_stops_sorted( latitude, longitude ):\n\treturnvalue = []\n\tstops_file = open( 'google_transit/stops.txt' )\n\tstops_iter = DictReader( stops_file )\n\tfor stop in stops_iter:\n\t\tdistance = angular_distance( latitude, longitude, \n\t\t\t\t\t\t\t float( stop[ 'stop_lat' ] ), float( stop[ 'stop_lon' ]))\n\t\tstop[ 'distance' ] = distance * MI\n\t\treturnvalue.append(( distance, stop ))\n\tstops_file.close( )\n\treturnvalue.sort( )\n\treturn [ y for x,y in returnvalue ]", "def compare_instances(self, inst1, inst2):\n for skey, sdir in zip(self._sort_keys, self._sort_dirs):\n resultflag = 1 if sdir == 'desc' else -1\n if inst1[skey] < inst2[skey]:\n return resultflag\n elif inst1[skey] > inst2[skey]:\n return resultflag * -1\n return 0", "def test_dset_sel_nearest_unique(self):\n dset = self.dset.spec.sel(\n lons=self.lons, lats=self.lats, method=\"nearest\", unique=True\n )\n assert dset[attrs.SITENAME].size == len(self.lons_exact)", "def test_distance():\n t0 = time.time()\n c1 = coord.CelestialCoord(0.234 * coord.radians, 0.342 * coord.radians)\n c2 = coord.CelestialCoord(0.234 * coord.radians, -1.093 * coord.radians)\n c3 = coord.CelestialCoord((pi + 0.234) * coord.radians, -0.342 * coord.radians)\n c4 = coord.CelestialCoord((pi + 0.234) * coord.radians, 0.832 * coord.radians)\n c5 = coord.CelestialCoord(1.832 * coord.radians, -0.723 * coord.radians)\n c6 = coord.CelestialCoord((0.234 + 2.3e-9) * coord.radians, (0.342 + 1.2e-9) * coord.radians)\n t1 = time.time()\n\n a1 = astropy.coordinates.SkyCoord(0.234 * units.radian, 0.342 * units.radian)\n a2 = astropy.coordinates.SkyCoord(0.234 * units.radian, -1.093 * units.radian)\n a3 = astropy.coordinates.SkyCoord((pi + 0.234) * units.radian, -0.342 * units.radian)\n a4 = astropy.coordinates.SkyCoord((pi + 0.234) * units.radian, 0.832 * units.radian)\n a5 = astropy.coordinates.SkyCoord(1.832 * units.radian, -0.723 * units.radian)\n a6 = astropy.coordinates.SkyCoord(0.234 + 2.3e-9, 0.342 + 1.2e-9, unit=units.radian)\n t2 = time.time()\n\n coord_dist = [c1.distanceTo(c).rad for c in [c2,c3,c4,c5,c6]]\n t3 = time.time()\n astropy_dist = [a1.separation(a).rad for a in [a2,a3,a4,a5,a6]]\n t4 = time.time()\n\n np.testing.assert_almost_equal(coord_dist, astropy_dist, decimal=12)\n # For the last one, the distance is rather small in radians, so test in arcsec\n np.testing.assert_almost_equal(coord_dist[-1] * (coord.radians/coord.arcsec),\n astropy_dist[-1] * (coord.radians/coord.arcsec), decimal=10)\n\n print('Compare times for distance calculations:')\n print(' Make CelestialCoords: t = ',t1-t0)\n print(' Make SkyCoords: t = ',t2-t1)\n print(' Calculate distances with Coord: t = ',t3-t2)\n print(' Calculate distances with Astropy: t = ',t4-t3)", "def test_distance_between_points_near_0_longitude(self) -> None:\n distance = mod_geo.distance(latitude_1=0, longitude_1=0.1, elevation_1=0, latitude_2=0, longitude_2=-0.1, elevation_2=0, haversine=True)\n print(distance)\n self.assertTrue(distance < 230000)\n distance = mod_geo.distance(latitude_1=0, longitude_1=0.1, elevation_1=0, latitude_2=0, longitude_2=-0.1, elevation_2=0, haversine=False)\n print(distance)\n self.assertTrue(distance < 230000)\n distance = mod_geo.distance(latitude_1=0, longitude_1=0.1, elevation_1=0, latitude_2=0, longitude_2=360-0.1, elevation_2=0, haversine=True)\n print(distance)\n self.assertTrue(distance < 230000)\n distance = mod_geo.distance(latitude_1=0, longitude_1=0.1, elevation_1=0, latitude_2=0, longitude_2=360-0.1, elevation_2=0, haversine=False)\n print(distance)\n self.assertTrue(distance < 230000)", "def is_sorted(items):\n # TODO: Check that all adjacent items are in order, return early if so\n for x in range(len(items)):\n if x < len(items)-1:\n if items[x+1] < items[x]:\n return False\n return True", "def testCheckPairsHaveCorrectOrder(self):\n add_geo = pd.DataFrame({\n 'date':\n pd.to_datetime(\n ['2019-01-01', '2019-10-01', '2019-01-01', '2019-10-01']),\n 'geo': [5, 5, 6, 6],\n 'response': [4.45, 20, 4.55, 20],\n 'spend': [10, 10, 10, 10]\n })\n new_data = pd.concat([self.test_data, add_geo], sort=False)\n test_class = TrimmedMatchGeoXDesign(\n GeoXType.HEAVY_UP,\n pretest_data=new_data,\n time_window_for_design=self.design_window,\n time_window_for_eval=self.evaluation_window,\n matching_metrics={'response': 1.0})\n test_class.create_geo_pairs(use_cross_validation=True)\n test_class.create_geo_level_eval_data()\n test_class.geo_level_eval_data[0].sort_values(by='geo', inplace=True)\n test_class.geo_level_eval_data[0].reset_index(drop=True, inplace=True)\n pairs = test_class.pairs[0].round({'distance': 5})\n self.assertTrue(\n test_class.geo_level_eval_data[0].sort_index(axis=1).equals(\n pd.DataFrame({\n 'geo': [1, 2, 3, 4, 5, 6],\n 'pair': [3, 1, 3, 1, 2, 2],\n 'response': [2.0, 5.0, 2.0, 4.0, 20.0, 20.0],\n 'spend': [1.5, 2.5, 1.5, 6, 10, 10]\n })))\n self.assertTrue(\n pairs.equals(\n pd.DataFrame({\n 'geo1': [4, 6, 1],\n 'geo2': [2, 5, 3],\n 'distance': [1/16, 0.1/16, 0.0],\n 'pair': [1, 2, 3]\n })))", "def test_12(self):\n num_elements = np.random.randint(1, 11)\n\n input_array = np.random.normal(size=num_elements)\n\n # We first check the sorting implementation.\n py = sorted(input_array)\n f90 = fort_debug.wrapper_sorted(input_array, num_elements)\n assert_equal(py, f90)\n\n params_spec, options_spec = generate_random_model()\n respy_obj = RespyCls(params_spec, options_spec)\n\n edu_spec, optim_paras, num_types = dist_class_attributes(\n respy_obj, \"edu_spec\", \"optim_paras\", \"num_types\"\n )\n\n args = (edu_spec[\"start\"], edu_spec[\"share\"], edu_spec[\"max\"])\n f90 = fort_debug.wrapper_sort_edu_spec(*args)\n py = sort_edu_spec(edu_spec)\n for i, label in enumerate([\"start\", \"share\", \"max\"]):\n assert_equal(py[label], f90[i])\n\n py = sort_type_info(optim_paras, num_types)\n f90 = fort_debug.wrapper_sort_type_info(optim_paras[\"type_shares\"], num_types)\n for i, label in enumerate([\"order\", \"shares\"]):\n assert_equal(py[label], f90[i])", "def test_get_all_grouped_distances_between(self):\r\n groupings = get_all_grouped_distances(self.dist_matrix_header,\r\n self.dist_matrix, self.mapping_header, self.mapping,\r\n self.field, within=False)\r\n expected = [0.72899999999999998, 0.80000000000000004,\r\n 0.72099999999999997, 0.76500000000000001,\r\n 0.77600000000000002, 0.74399999999999999, 0.749,\r\n 0.67700000000000005, 0.73399999999999999,\r\n 0.77700000000000002, 0.73299999999999998,\r\n 0.72399999999999998, 0.69599999999999995,\r\n 0.67500000000000004, 0.65400000000000003,\r\n 0.69599999999999995, 0.73099999999999998,\r\n 0.75800000000000001, 0.73799999999999999,\r\n 0.73699999999999999]\r\n self.assertEqual(groupings, expected)", "def in_distance(a, b, d):\n return distance(a, b) <= d", "def test_distance_aba(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.distance('ABC'), '9')", "def evaluate_distance(self):\n\n fitness = 0\n routes = split_to_routes(self)\n\n for route in routes:\n route = [home] + route + [home]\n for i in range(1,len(route)):\n # Calculates full distance, including from last city\n # to first, to terminate the trip\n pos_from = route[i - 1]\n pos_to = route[i]\n distance = dm[pos_from][pos_to]\n fitness += distance\n\n return int(fitness)", "def distances(self):", "def test_sorting():\n circles = [Circle(i) for i in range(10, 1, -1)] \n sorted_circles = sorted(circles, key=Circle.sort_key)\n assert circles != sorted_circles", "def test_distance_aebcd(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.distance('AEBCD'), '22')", "def closest_station(lat, lon, cache_dir):\n if lat is None or lon is None or not os.path.isdir(cache_dir):\n return\n stations = zamg_stations(cache_dir)\n\n def comparable_dist(zamg_id):\n \"\"\"Calculate the pseudo-distance from lat/lon.\"\"\"\n station_lat, station_lon = stations[zamg_id]\n return (lat - station_lat) ** 2 + (lon - station_lon) ** 2\n\n return min(stations, key=comparable_dist)", "def check_order(current, hit, overlap = 200):\n prev_model = current[-1][2:4]\n prev_strand = current[-1][-2]\n hit_model = hit[2:4]\n hit_strand = hit[-2]\n # make sure they are on the same strand\n if prev_strand != hit_strand:\n return False\n # check for sequential hits on + strand\n if prev_strand == '+' and (prev_model[1] - hit_model[0] >= overlap):\n return False\n # check for sequential hits on - strand\n if prev_strand == '-' and (hit_model[1] - prev_model[0] >= overlap):\n return False\n else:\n return True", "def buildOrd(abscissaTab, nbWalks):\n random = []\n nonReversing = []\n selfAvoiding = []\n for ab in abscissaTab:\n print(\"# Computing for ab = \", ab)\n print(\" -> Random...\")\n random.append(averageDistance(ab, nbWalks, randomWalk))\n print(\" -> Non reversing...\")\n nonReversing.append(averageDistance(ab, nbWalks, nonReversingWalk))\n print(\" -> Self-avoiding...\")\n selfAvoiding.append(averageDistance(ab, nbWalks, selfAvoidingWalk))\n return random, nonReversing, selfAvoiding", "def test_k_nearest(self):\n L = range(100)\n L = [(i, i, i, i) for i in L]\n tree = KdTree(L)\n # remove distance, only keep points from the result\n items = lambda items: [x for (d, x) in items] \n assert items(tree.k_nearest((-1, -1), 1)) == [(0, 0, 0, 0)]\n assert items(tree.k_nearest((100, 100), 1)) == [(99, 99, 99, 99)]\n assert items(tree.k_nearest((50, 50), 1)) == [(50, 50, 50, 50)]\n assert items(tree.k_nearest((-1, -1), 2)) == [(0, 0, 0, 0),\n (1, 1, 1, 1)]", "def test_EstimateDistances_fromThreeway(self):\n d = EstimateDistances(self.al, JC69(), threeway=True)\n d.run()\n canned_result = {('b', 'e'): 0.495312,\n ('c', 'e'): 0.479380,\n ('a', 'c'): 0.089934,\n ('a', 'b'): 0.190021,\n ('a', 'e'): 0.495305,\n ('b', 'c'): 0.0899339}\n result = d.getPairwiseDistances(summary_function=\"mean\")\n self.assertDistsAlmostEqual(canned_result, result)", "def test_version_sorting(self):\n assert natsort(['1', '5', '10', '50']) == ['1', '5', '10', '50']", "def test_cmp(self, tmpdir, treantclass):\n with tmpdir.as_cwd():\n c1 = treantclass('a')\n c2 = treantclass('b')\n c3 = treantclass('c')\n\n assert sorted([c3, c2, c1]) == [c1, c2, c3]\n assert c1 <= c2 < c3\n assert c3 >= c2 > c1", "def test_06_valid_distance(self):\n distance_record = SwimRecord(distance=20)\n try:\n distance_record.full_clean()\n except ValidationError as e:\n self.assertTrue(\"Ensure this value is greater than or equal to 50.\" in e.message_dict['distance'])", "def test_09_laps(self):\n for activity in self.manager_stravagpx:\n self.assertIsNone(activity.laps)\n for activity in self.manager_fit:\n laps = activity.laps\n self.assertIsInstance(laps, pd.DataFrame)\n self.assertAlmostEqual(laps['distance'].sum(), activity.metadata.distance_2d_km * 1000, places=3)\n self.assertAlmostEqual(laps['duration'].sum(), activity.metadata.duration)", "def test_close_goes_first_on_loading(self):\n st, frontend_setup = self.get_st_and_fill_frontends()\n closest = np.argmin(frontend_setup['remoteness'])\n len_from_main_st = len(st.get_array(self.run_id, self.target))\n\n for sf_i, sf in enumerate(st.storage):\n st_compare = st.new_context()\n st_compare.storage = [sf]\n len_from_compare = len(st_compare.get_array(self.run_id,\n self.target))\n if sf_i == closest:\n self.assertEqual(len_from_compare, len_from_main_st)\n # else:\n # self.assertNotEqual(len_from_compare, len_from_main_st)", "def test_shortest_route_from_b_to_b(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.shortest_route_distance('B', 'B'), 9)", "def test_input_order_irrelevant(self):\n sorted_strings = ['1532-44349', '1534-44658', '1536-44582', '1536-44935', '1538-44874', '1538-44920']\n mutable_copy = list(sorted_strings)\n for i in range(10000):\n random.shuffle(mutable_copy)\n assert natsort(mutable_copy) == sorted_strings", "def get_shortest_route_floyd(network, start,destination, excludings=[]):\n\n # On récupère la liste des villes\n list_city = network[1].keys()\n \n # Si la ville de départ ou de fin n'existe pas\n if start not in list_city or destination not in list_city:\n return None\n\n # On retire les villes à exclure\n list_city = [x for x in list_city if x not in excludings]\n\n\n # Initialisation de se qu'on a besoin\n matrix = []\n distance = []\n n = len(list_city)\n\n \n # On construit la matrice adjacente où indique la distance si il existe une autoroute entre 2 villes\n for x in range(n): \n matrix.append( [] )\n distance.append( [] )\n for y in range(n):\n road_id = get_road_to(network,list_city[x],list_city[y])\n if road_id != None:\n matrix[x].append( get_length(network,road_id) )\n else:\n matrix[x].append( None )\n distance[x].append( [road_id] ) # Autoroute -> format: ['LA']\n\n\t \n # Algorithme de Floyd\n for k in range(n):\n for i in range(n):\n for j in range(n):\n if ( matrix[i][k] != None and matrix[k][j] != None ) and ( ( matrix[i][j] == None ) or ( matrix[i][j] > matrix[i][k] + matrix[k][j] ) ):\n matrix[i][j] = matrix[i][k] + matrix[k][j]\n\t\t \n\t\t # Hors Floyd / Ajout personnel\n if i != k and j != k: # Si i == k ou j == k, cela veut dire qu'on additionne un résultat supplémentaire à la case ij\n distance[i][j] = [] # Sinon ca signifie qu'on a trouvé un chemin plus court, du coup on supprime l'ancien chemin\n distance[i][j].extend( distance[i][k] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n distance[i][j].extend( distance[k][j] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n\n\t\t \n # On récupère simplement la liste des autoroutes parcourus\n idx_start = list_city.index( start )\n idx_destination = list_city.index( destination )\n distance_minimum = distance[ idx_start ][ idx_destination ]\n\n \n # Si on ne trouve aucune solution, on renvoie None\n if distance_minimum == [None]:\n distance_minimum = None\n \n return distance_minimum", "def is_spanning(vs, es):\n [e.clear() for e in es]\n d = dijkstra(vs, es, vs[0])\n [e.restore() for e in es]\n return max(d) < 1e-5", "def test_equals_distance_buildings():\n for i in range(building_count):\n for j in range(building_count):\n if i == j:\n continue\n rust_result = rust_force.calculate_distance_between_two_buildings(\n rust_buildings[i], rust_buildings[j])\n python_result = calculate_distance_between_two_buildings(\n python_figures[i], python_figures[j], python_positions[i], python_positions[j])\n assert rust_result == python_result", "def test_get_nearest(self):\n switzerland = Country.objects.get(name=u\"Switzerland\")\n uk = Country.objects.get(name=u\"United Kingdom\")\n \n user1, person1 = self._create_person(\"user1\", \"user1@example.com\",\n country=switzerland.name,\n latitude=46.519582,\n longitude=6.632121,\n location_description=u\"Geneva\")\n # Geneva -> Saint-Genis: 10.9km\n user2, person2 = self._create_person(\"user2\", \"user2@example.com\",\n country=switzerland.name,\n latitude=46.205973,\n longitude=6.5995789,\n location_description=u\"Saint-Genis\")\n \n # Geneva -> Islington: 986km\n user3, person3 = self._create_person(\"user3\", \"user3@example.com\",\n country=uk.name,\n latitude=51.532601866,\n longitude=-0.108382701874,\n location_description=u\"Islington\")\n \n # Geneva -> Lausanne: 63.2km\n user4, person4 = self._create_person(\"user4\", \"user4@example.com\",\n country=switzerland.name,\n latitude=46.243572,\n longitude=6.02107,\n location_description=u\"Lausanne\")\n \n \n near = person1.get_nearest(within_range=9999)\n \n self.assertEqual(near, [person2, person4, person3])\n \n # the within range feature doesn't work in mysql\n if settings.DATABASE_ENGINE == 'mysql':\n return\n \n # person2: 21.7 miles\n # person4: 34.7 miles\n # person3: 471.9 miles\n near = person1.get_nearest(within_range=100)\n \n self.assertEqual(near, [person2, person4])\n \n near = person1.get_nearest(num=1, within_range=100)\n \n self.assertEqual(near, [person2])", "def test_get_grouped_distances_between(self):\r\n groupings = get_grouped_distances(self.dist_matrix_header,\r\n self.dist_matrix, self.mapping_header, self.mapping,\r\n self.field, within=False)\r\n expected = [\r\n ('Control', 'Fast', [0.72899999999999998, 0.80000000000000004,\r\n 0.72099999999999997, 0.76500000000000001,\r\n 0.77600000000000002, 0.74399999999999999,\r\n 0.749, 0.67700000000000005,\r\n 0.73399999999999999, 0.77700000000000002,\r\n 0.73299999999999998, 0.72399999999999998,\r\n 0.69599999999999995, 0.67500000000000004,\r\n 0.65400000000000003, 0.69599999999999995,\r\n 0.73099999999999998, 0.75800000000000001,\r\n 0.73799999999999999, 0.73699999999999999])]\r\n self.assertEqual(groupings, expected)" ]
[ "0.66526514", "0.64656", "0.6275362", "0.60979766", "0.6097272", "0.6059003", "0.6043562", "0.59792054", "0.5845956", "0.5807009", "0.5800586", "0.57856506", "0.5729412", "0.5716074", "0.5695116", "0.5680469", "0.5653806", "0.5639628", "0.5623877", "0.5608037", "0.55967736", "0.5570804", "0.5568012", "0.5551753", "0.55459106", "0.5513904", "0.5503926", "0.54964423", "0.54808754", "0.54724413", "0.5447443", "0.54440725", "0.5443948", "0.5436348", "0.54296905", "0.5417451", "0.5384947", "0.5380363", "0.53745246", "0.5364053", "0.5363109", "0.53445274", "0.5335895", "0.5334369", "0.5324121", "0.5323559", "0.5288807", "0.5273968", "0.52688116", "0.5265349", "0.5261421", "0.52569634", "0.52529514", "0.5244922", "0.52404267", "0.52368224", "0.52312535", "0.5226351", "0.5212104", "0.5210906", "0.52049625", "0.51947844", "0.5194106", "0.5177946", "0.5177064", "0.5170795", "0.5160539", "0.51535153", "0.51504153", "0.5150394", "0.51486117", "0.5145919", "0.5134365", "0.5133976", "0.5130935", "0.5123625", "0.5117512", "0.50969994", "0.50966287", "0.5094842", "0.50913787", "0.50905037", "0.50849783", "0.5083271", "0.50820434", "0.5079921", "0.5074454", "0.50730616", "0.50589025", "0.5057785", "0.50572383", "0.5050692", "0.5036267", "0.5032054", "0.50254387", "0.5023279", "0.50218636", "0.5015137", "0.5012655", "0.5011163" ]
0.8008606
0
Finds the minimum value of SequenceMatcher.ratio() for two strings such that Differ considers them as 'changed'.
def check_difflib_ratio(): import difflib import random import string def random_modify_string(input_string, change_word=0.5, change_char=0.3): word_list = input_string.split() for i, word in enumerate(word_list): if random.random() < change_word: for j in range(len(word)): if random.random() < change_char: word = word[:j] + random.choice(string.printable) + word[j + 1:] word_list[i] = word return ' '.join(word_list) differ = difflib.Differ() min_ratio = 1.0 for count in range(1000): length = random.randint(5, 100) s1 = ''.join(random.SystemRandom().choice(string.printable) for _ in range(length)) s2 = random_modify_string(s1) sm = difflib.SequenceMatcher(None, s1, s2) ratio = sm.ratio() result = list(differ.compare([s1], [s2])) for line in result: if line.startswith('?'): if ratio < min_ratio: min_ratio = ratio break print('Minimum ratio which difflib considers as "change" is: {}'.format(min_ratio))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_equal_rate(str1, str2):\r\n\treturn difflib.SequenceMatcher(None, str1, str2).quick_ratio()", "def string_match_ratio(str1, str2):\n sm = edit_distance.SequenceMatcher(a=str1, b=str2)\n return sm.ratio()", "def compare_strings(string1: str, string2: str) -> float:\n return SequenceMatcher(None, string1, string2).ratio()", "def similar(a, b):\n return SequenceMatcher(None, a, b).ratio()", "def similar(a, b):\n return SequenceMatcher(None, a, b).ratio()", "def string_similarity(a, b):\n return SequenceMatcher(a=a, b=b).ratio()", "def compare_str(seq1, seq2):\n if seq1 == seq2:\n return 1\n ld = Levenshtein.distance(seq1, seq2)\n longest = len(seq1 if len(seq1) > len(seq2) else seq2)\n return (longest - ld) / longest", "def _match(a, b):\n return SequenceMatcher(None, a, b).ratio()", "def get_diff(text_1, text_2):\n\n return str(round(SequenceMatcher(None, text_1, text_2).ratio()*100, 2)) + '%'", "def get_fuzz_ratio(first_word, second_word):\n return fuzz.ratio(first_word, second_word), first_word, second_word", "def string_similarity(item_1, item_2):\n return SequenceMatcher(None, item_1.lower(), item_2.lower()).ratio()", "def fuzzy_ratio(thing_1, thing_2):\n return fuzz.ratio(thing_1, thing_2)", "def string_similarity_score(left: str, right: str):\n return SequenceMatcher(None, left, right).ratio()", "def transition_transversion_ratio(dna1: str, dna2: str):\n transition, transversion = transition_transversion(dna1, dna2)\n return transition / transversion", "def get_similarity(s1, s2):\n t0 = sorted(list(set(s1.split(' ')).intersection(set(s2.split(' ')))))\n t1 = sorted(list(set(t0 + s1.split(' '))))\n t2 = sorted(list(set(t0 + s2.split(' '))))\n\n r01 = SequenceMatcher(None, t0, t1).ratio()\n r02 = SequenceMatcher(None, t0, t2).ratio()\n r12 = SequenceMatcher(None, t1, t2).ratio()\n return max(r01, r02, r12)", "def __getSimilarityScore(expected, actual):\n return SequenceMatcher(None, expected, actual).ratio()", "def compare(self) -> float:\n if not self._hadith_text1 or not self._hadith_text2:\n raise Exception('Hadith texts to compare not set. Use setHadithTexts() to set the texts...')\n\n text1 = self._hadith_text1_cleaned\n text2 = self._hadith_text2_cleaned\n\n if self._ignore_diacritics:\n text1 = self._remove_diacritics(self._hadith_text1_cleaned)\n text2 = self._remove_diacritics(self._hadith_text2_cleaned)\n\n sm = difflib.SequenceMatcher(None, text1, text2)\n return sm.ratio()", "def fuzzy_partial_ratio(thing_1, thing_2):\n return fuzz.partial_ratio(thing_1, thing_2)", "def scientific_match_ratio(str1, str2, keywords):\n\n # Get rid of the numbers\n str1_numberless = remove_numbers(str1)\n str2_numberless = remove_numbers(str2)\n\n # Get the keywords and whatever remains after removing the keywords\n str1_keywords, str1_remainder = get_common_words_in_description(str1_numberless, keywords)\n str2_keywords, str2_remainder = get_common_words_in_description(str2_numberless, keywords)\n\n remainder_dist = string_num_matches(str1_remainder, str2_remainder)\n common_keywords = str1_keywords.intersection(str2_keywords)\n\n common_keyword_total_len = 0\n for common_kword in common_keywords:\n common_keyword_total_len += len(common_kword)\n\n return (remainder_dist + common_keyword_total_len) * 1.0 / max(len(str1_numberless), len(str2_numberless))", "def similarity(a, b):\n distance = Levenshtein.distance(a, b)\n return 1 - (distance / max((len(a), len(b))))", "def levenshtein_distance(str1, str2):\n m = len(str1)\n n = len(str2)\n lensum = float(m + n)\n d = [] \n for i in range(m+1):\n d.append([i]) \n del d[0][0] \n for j in range(n+1):\n d[0].append(j) \n for j in range(1,n+1):\n for i in range(1,m+1):\n if str1[i-1] == str2[j-1]:\n d[i].insert(j,d[i-1][j-1]) \n else:\n minimum = min(d[i-1][j]+1, d[i][j-1]+1, d[i-1][j-1]+2) \n d[i].insert(j, minimum)\n ldist = d[-1][-1]\n ratio = (lensum - ldist)/lensum\n return {'distance':ldist, 'ratio':ratio}", "def levenshtein(str1, str2, normalise=False):\n\ttmp = Levenshtein.distance(str1, str2)\n\tif(normalise) and (len(str1) + len(str2)): tmp /= max(len(str1), len(str2))\n\treturn tmp", "def compare(seq1, seq2):\n if seq1 == seq2:\n return 1\n len_diff = len(seq1) / len(seq2)\n if len_diff > 1:\n len_diff = 1 / len_diff\n\n ngrams1 = {tuple(ng) for ng in get_all_ngrams(seq1)}\n ngrams2 = {tuple(ng) for ng in get_all_ngrams(seq2)}\n\n overall = len(ngrams1 & ngrams2) / len(ngrams1 | ngrams2)\n if overall == 1 or overall == 0:\n return overall\n\n try:\n max_match = len(max(ngrams1 & ngrams2, key=len)) / len(seq1)\n except ValueError:\n return 0\n\n return (len_diff + max_match + overall) / 3", "def diff(self, content):\n\n self.differ.set_seq2(self.make_hash_sequence(content))\n percent_diff = (1.0 - self.differ.ratio()) * 100.0\n percent_diff = 1 if 0 < percent_diff < 1 else int(round(percent_diff, 0))\n\n if percent_diff != 0 and len(content) < self.expected_length:\n percent_diff *= -1\n\n return percent_diff", "def levenshtein_normalised(str1, str2):\n\treturn levenshtein(str1, str2, normalise=True)", "def similarL(a, b, ratio):\n for x in b:\n if SequenceMatcher(None, a, x).ratio() > ratio:\n return x\n return False", "def distance(str1, str2):\n return levenshtein.normalized_distance(str1, str2)", "def fuzzy_score_string(first_string, second_string):\n score = 0\n\n if len(first_string) < len(second_string):\n shorter, longer = (first_string, second_string)\n window_length = len(shorter)\n\n num_iterations = len(longer) - len(shorter) + 1\n\n for position in range(0, num_iterations):\n window = longer[position:position + window_length]\n l_ratio = Levenshtein.ratio(window, shorter) * 100\n\n if l_ratio > 60:\n result = statistics.mean(\n [100 - Levenshtein.distance(window, shorter) * 15, l_ratio, l_ratio])\n\n else:\n result = l_ratio\n\n if result > score:\n score = result\n\n else:\n l_ratio = Levenshtein.ratio(first_string, second_string) * 100\n score = statistics.mean(\n [100 - Levenshtein.distance(first_string, second_string) * 15, l_ratio, l_ratio])\n\n simple = fuzz.ratio(first_string, second_string)\n partial = fuzz.partial_ratio(first_string, second_string)\n sort = fuzz.token_sort_ratio(first_string, second_string)\n set_ratio = fuzz.token_set_ratio(first_string, second_string)\n\n score = max([score, simple, partial, sort, set_ratio])\n\n if score < 75:\n score = 0\n\n return score * 0.85", "def fuzzy_token_sort_ratio(thing_1, thing_2):\n return fuzz.token_sort_ratio(thing_1, thing_2)", "def fuzzy_ratio_check(full_name_check_value, name_one, name_two):\n if full_name_check_value == 0:\n return fuzz.ratio(name_one, name_two)\n \n return 0", "def error_ratio(original, corrected):\n\n original = TextBlob(original)\n corrected = TextBlob(corrected)\n error_ratio = sum(not word in corrected.tokenize() for word in original.tokenize()) / len(original)\n error_ratio_dict = {'error_ratio': error_ratio}\n return error_ratio_dict", "def diff(s0, s1):\n from difflib import ndiff\n lst0 = s0.split(\"\\n\")\n lst1 = s1.split(\"\\n\")\n report = '\\n'.join(ndiff(lst0, lst1))\n return report", "def diff(s0, s1):\n from difflib import ndiff\n lst0 = s0.split(\"\\n\")\n lst1 = s1.split(\"\\n\")\n report = '\\n'.join(ndiff(lst0, lst1))\n return report", "def diff(left_struc, right_struc, minimal=True, verbose=True, key=None):\n if key is None:\n key = []\n\n if structure_worth_investigating(left_struc, right_struc):\n common = commonality(left_struc, right_struc)\n if minimal:\n my_diff = needle_diff(left_struc, right_struc, key, minimal)\n elif common < 0.5:\n my_diff = this_level_diff(left_struc, right_struc, key, common)\n else:\n my_diff = keyset_diff(left_struc, right_struc, key, minimal)\n else:\n my_diff = this_level_diff(left_struc, right_struc, key, 0.0)\n\n if minimal:\n my_diff = min(my_diff, [[key[:], copy.copy(right_struc)]],\n key=lambda x: len(compact_json_dumps(x)))\n\n if not key:\n if len(my_diff) > 1:\n my_diff = sort_stanzas(my_diff)\n if verbose:\n size = len(compact_json_dumps(right_struc))\n csize = float(len(compact_json_dumps(my_diff)))\n msg = ('Size of delta %.3f%% size of original '\n '(original: %d chars, delta: %d chars)')\n print(msg % (((csize / size) * 100),\n size,\n int(csize)),\n file=sys.stderr)\n return my_diff", "def pct_match(self, s1, s2, comp_length):\n\n matches = self.max_freq[s1:s1+comp_length] \\\n == self.max_freq[s2:s2+comp_length]\n return np.ma.sum(matches) / np.ma.count(matches)", "def fuzzy_match_strings(ref, val):\n if not ref or not val:\n return 0\n ref_q = to_q(ref)\n val_q = to_q(val)\n if ref_q or val_q:\n return 100 if ref_q == val_q else 0\n simplified_val = unidecode(val).lower()\n simplified_ref = unidecode(ref).lower()\n\n # Return symmetric score\n r1 = fuzz.token_sort_ratio(simplified_val, simplified_ref)\n r2 = fuzz.token_sort_ratio(simplified_ref, simplified_val)\n r2 = r1\n return int(0.5*(r1+r2))", "def string_edit_dist(str1, str2):\n sm = edit_distance.SequenceMatcher(a=str1, b=str2)\n return sm.distance()", "def optimal_string_alignment_similarity(s1, s2):\n max_cost = max(len(s1), len(s2))\n\n if max_cost == 0:\n return 1.0\n\n return 1.0 - float(optimal_string_alignment_distance(s1, s2)) / max_cost", "def mm_similarity(s1, s2):\n if filter(str.isalpha, s1) == filter(str.isalpha, s2):\n if len(s1) < len(s2):\n return float(len(s1)) / len(s2)\n else:\n return float(len(s2)) / len(s1)\n else:\n return 0.", "def ratio(self, string='') -> float:\n try:\n return(self.find(string)/self.total)\n except Exception as error:\n print(f\"Error: self.ratio({string}) -> {error}\")", "def similar_string_fast(first_string, second_string):\n partial_score = fuzz.ratio(first_string, second_string)\n token_score = fuzz.token_set_ratio(first_string, second_string)\n\n if max(partial_score, token_score) >= SCORE_THRESHOLD_FAST:\n return True\n\n return False", "def wordSimilarityRatio(sent_1,sent_2):", "def get_diff(s1, s2):\n s1 = s1.splitlines(keepends=True)\n s2 = s2.splitlines(keepends=True)\n\n d = Differ()\n\n result = list(d.compare(s1, s2))\n\n lines = \"\"\n for l in result:\n if l.startswith(' '):\n continue\n elif len(l) < 3:\n continue\n # elif l.startswith('?'):\n # continue\n elif l.startswith(\"?\"):\n lines += '?' + ' ' + l[1:]\n else:\n lines += '\\n' + l\n\n return lines", "def fuzzy_partial_ratio_check(full_name_check_value, name_one, name_two):\n if full_name_check_value == 0:\n return fuzz.partial_ratio(name_one, name_two)\n \n return 0", "def bad_start_rate(labelled,str):\n#\tlabelled = RawClaim.objects.exclude(correcttrim=\"\")\n\tfiltered = set([l for l in labelled if fixstring(l.sentence).startswith(str)])\n\twrong = set([l for l in filtered if l.correcttrim!=\"X\"])\n\tright = filtered - wrong\n\treturn (float(len(right))/len(filtered),wrong,right)", "def compare_versions(first, second):\n first = list(map(int, first.split(\".\")))\n second = list(map(int, second.split(\".\")))\n\n for i in range(3):\n diff = first[i] - second[i]\n if diff != 0:\n return diff\n\n return 0", "def rel_change(y):\n return np.min([np.abs(y[1] - y[0]), np.abs(y[1] - y[2])]) / float(y[1])", "def dependency_similarity(s1, s2):\n # pass\n parsed_sentence_1 = parser.raw_parse(s1)\n parsed_sentence_2 = parser.raw_parse(s2)\n \n tree1 = next(parsed_sentence_1)\n tree2 = next(parsed_sentence_2)\n \n triples1 = [t for t in tree1.triples()]\n triples2 = [t for t in tree2.triples()] \n\n # Compute similarity\n if len(triples1) != 0 and len(triples2) != 0:\n similarity = 1 - jaccard_distance(set(triples1), set(triples2))\n return similarity\n else:\n return 0", "def compare(string1: str, string2: str, /) -> int:\n ...", "def test_frac_diff(self):\n s1 = self.RNA(\"ACGU\")\n s2 = self.RNA(\"AACG\")\n s3 = self.RNA(\"GG\")\n s4 = self.RNA(\"A\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_diff(e), 0)\n self.assertEqual(s1.frac_diff(s2), 0.75)\n self.assertEqual(s1.frac_diff(s3), 1)\n self.assertEqual(s1.frac_diff(s4), 0) # note truncation", "def matchGenres(toPredictGenresString, toCompareGenresString):\n\n #Get the sets of genres\n toPredictGenres = str(toPredictGenresString).split(\"|\")\n toCompareGenres = str(toCompareGenresString).split(\"|\")\n\n toCompareGenresSet = set(toCompareGenres)\n\n commonCount = 0\n\n #Count how many are common to the two sets\n for genre in toPredictGenres:\n if genre in toCompareGenresSet:\n commonCount += 1\n\n #Return 100 times the proportion in both\n return 100 * commonCount/len(toPredictGenres)", "def levenshtein(seq1: str, seq2: str) -> int:\n if seq1 == \"\":\n return len(seq2)\n if seq2 == \"\":\n return len(seq1)\n if seq1[-1] == seq2[-1]:\n cost = 0\n else:\n cost = 1\n \n result = min([levenshtein(seq1[:-1], seq2) + 1,\n levenshtein(seq1, seq2[:-1]) + 1,\n levenshtein(seq1[:-1], seq2[:-1]) + cost ])\n return result", "def testSeqMatch(self): # - - - - - - - - - - - - - - - - - - - - - - - - -\n\n for pair in self.string_pairs:\n\n approx_str_value = stringcmp.seqmatch(pair[0],pair[1])\n\n assert (isinstance(approx_str_value,float)), \\\n '\"SeqMatch\" does not return a floating point number for: '+ \\\n str(pair)\n\n assert (approx_str_value >= 0.0), \\\n '\"SeqMatch\" returns a negative number for: '+str(pair)\n\n assert (approx_str_value <= 1.0), \\\n '\"SeqMatch\" returns a number larger than 1.0 for: '+str(pair)\n\n approx_str_value_1 = stringcmp.seqmatch(pair[0],pair[1])\n approx_str_value_2 = stringcmp.seqmatch(pair[1],pair[0])\n\n assert (approx_str_value_1 == approx_str_value_2), \\\n '\"SeqMatch\" returns different values for pair and swapped ' + \\\n 'pair: '+str(pair)+': '+str(approx_str_value_1)+', '+ \\\n str(approx_str_value_2)\n\n # Check for value 1.0 if the strings are the same\n #\n if (pair[0] == pair[1]):\n\n assert (approx_str_value == 1.0), \\\n '\"SeqMatch\" does not return 1.0 if strings are equal: '+ \\\n str(pair)", "def damerau_levenshtein_similarity(s1, s2):\n max_cost = max(len(s1), len(s2))\n\n if max_cost == 0:\n return 1.0\n\n return 1.0 - float(damerau_levenshtein_distance(s1, s2)) / max_cost", "def simple_baseline_similarity(s1, s2):\n # Tokenize by sentences into words in lower case \n tokenized_sentence_1 = nltk.word_tokenize(s1.lower())\n tokenized_sentence_2 = nltk.word_tokenize(s2.lower())\n\n tagged_sentence_1 = pos_tag(tokenized_sentence_1) # [ (word, POS_TAG), ...]\n tagged_sentence_2 = pos_tag(tokenized_sentence_2) # [ (word, POS_TAG), ...]\n \n lemmas_sentence_1 = [lemmatize(tagged_word, wnl) for tagged_word in tagged_sentence_1 if not tagged_word in stop_words] \n lemmas_sentence_2 = [lemmatize(tagged_word, wnl) for tagged_word in tagged_sentence_2 if not tagged_word in stop_words] # [LEMMA_1, ...]\n \n word_seq_match = difflib.SequenceMatcher(None, tokenized_sentence_1, tokenized_sentence_2)\n word_match = word_seq_match.find_longest_match(0, len(tokenized_sentence_1), 0, len(tokenized_sentence_2))\n\n lemm_seq_match = difflib.SequenceMatcher(None, lemmas_sentence_1, lemmas_sentence_2)\n lemm_match = lemm_seq_match.find_longest_match(0, len(lemmas_sentence_1), 0, len(lemmas_sentence_2))\n\n word_sim = word_match.size/(max(len(tokenized_sentence_1), len(tokenized_sentence_2)) + 0.001)\n lemm_sim = lemm_match.size/(max(len(lemmas_sentence_1), len(lemmas_sentence_2)) + 0.001)\n\n return word_sim, lemm_sim", "def test_compare_difference_string_slower(self):\n test_algorithm = 'bubble'\n test_algorithm_time = 5\n test_sorted_time = 1\n result = calculate_compare_time_difference(test_algorithm_time, test_sorted_time, test_algorithm)\n self.assertEqual('bubble was 4 seconds slower.', result)", "def ratio_calc(first_strandI, second_strandI):\n if first_strandI + second_strandI != 0:\n Ratio = first_strandI / float(first_strandI + second_strandI)\n return Ratio\n else:\n return np.nan", "def fuzzy_token_sort_ratio_check(full_name_check_value, name_one, name_two):\n if full_name_check_value == 0:\n return fuzz.token_sort_ratio(name_one, name_two)\n \n return 0", "def text_similarity(self, text_1: str, text_2: str):\n txt1 = self._pre_process(text_1)\n txt2 = self._pre_process(text_2)\n\n sim = self.model.wmdistance(txt1, txt2)\n\n if sim == inf:\n sim = INF_SIMILIARITY\n return sim", "def exchange_ratio(delta_x, salience, power, dominator):\n\treturn (delta_x * salience * power) / dominator", "def same(fragment_one: str, fragment_two: str):\n return max(weight(fragment_one, fragment_two), weight(fragment_two, fragment_one))", "def editing_distance(str1: str, str2: str) -> int:\r\n if not str1 and not str2:\r\n return 0\r\n if not str1:\r\n return len(str2)\r\n if not str2:\r\n return len(str1)\r\n if str1[0] == str2[0]:\r\n return min(editing_distance(str1[1::], str2[1::]), 1 + editing_distance(str1, str2[1::]),\r\n 1 + editing_distance(str1[1::], str2))", "def similarity(self, other):\n part = self.__part_converter(self.part)\n if part != self.__part_converter(other.part):\n return 0\n tresh = 0.2\n sss = wn.synsets(self.string, part)\n sso = wn.synsets(other.string, part)\n best_sim = 0\n for ss in sss:\n # if not match('^' + self.string + '\\..+', ss.name()):\n # continue\n for so in sso:\n # if not match('^' + other.string + '\\..+', so.name()):\n # continue\n sim = ss.wup_similarity(so)\n if (tresh < sim) and (best_sim < sim):\n best_sim = sim\n return best_sim", "def _ratio(a1, a2):\n abs_residues = np.abs(a1 - a2).sum()\n avg_abs_sum = 0.5 * np.abs(a1).sum() + 0.5 * np.abs(a2).sum()\n return abs_residues / avg_abs_sum", "def edit_distance(str1, str2):\n\n if not str1:\n return len(str2)\n if not str2:\n return len(str1)\n\n DP = [[-1 for __ in str2] for ___ in str1]\n DP[0][0] = 0 if str1[0] == str2[0] else 1\n\n\n for x, let1 in enumerate(str1):\n startat = 0\n if x == 0:\n startat = 1\n for y, let2 in enumerate(str2[startat:], startat):\n minimum = float('inf')\n if x != 0:\n minimum = min(DP[x-1][y] + 1, minimum)\n if y != 0:\n minimum = min(DP[x-1][y-1] + (0 if let1 == let2 else 1), minimum)\n if y != 0:\n minimum = min(DP[x][y-1] + 1, minimum)\n\n DP[x][y] = minimum\n\n return DP[len(str1) - 1][len(str2) - 1]", "def sim(a, b):\n ratio = SequenceMatcher(None, a, b).ratio()\n return ratio > 0.5", "def makediff(s1, s2):\n import difflib\n differ = difflib.SequenceMatcher()\n differ.set_seqs(s1, s2)\n #debug = False\n s1new = [ ]\n s2new = [ ]\n previousOp = None\n for op, i1, i2, j1, j2 in differ.get_opcodes():\n #if debug: print \"top\"\n #if debug: print op, i1, i2, j1, j2, '->'\n #if debug: print s1, s2\n if op == 'equal':\n #if i2-i1 < 4 and len(s1new) > 1 and previousOp == \"replace\":\n # s1new[-2] += escape(s1[i1:i2])\n # s2new[-2] += escape(s2[j1:j2])\n #else:\n s1new.append(escape(s1[i1:i2]))\n s2new.append(escape(s2[j1:j2]))\n elif op == 'insert':\n s2new.extend(('<b>', escape(s2[j1:j2]), '</b>'))\n elif op == \"delete\":\n s1new.extend(('<b><strike>', escape(s1[i1:i2]), '</strike></b>'))\n elif op == 'replace':\n s1new.extend(('<b><strike>', escape(s1[i1:i2]), '</strike></b>'))\n s2new.extend(('<b>', escape(s2[j1:j2]), '</b>'))\n previousOp = op\n #if debug: print s1, s2\n #if debug: print \"bottom\"\n #if debug: print \"done\"\n return ''.join(s1new), ''.join(s2new)", "def compare_trees(first_soup: HTMLStrip, second_soup: HTMLStrip) -> float:\n first_tree = Tree.from_soup_object(first_soup.file_name, first_soup.original_soup)\n second_tree = Tree.from_soup_object(second_soup.file_name, second_soup.original_soup)\n\n common_paths_size: int = first_tree.num_of_common_paths(second_tree)\n target_size: int = second_tree.total_num_of_paths()\n similarity = float(common_paths_size)*100/target_size\n print(f'{similarity:.2f}')\n return similarity", "def _print_first_difference(\n arec, brec, ignore_case=False, ignore_N=False, report_match=True\n):\n aseq, bseq = arec.seq, brec.seq\n asize, bsize = len(aseq), len(bseq)\n\n matched = True\n for i, (a, b) in enumerate(zip_longest(aseq, bseq)):\n if ignore_case and None not in (a, b):\n a, b = a.upper(), b.upper()\n\n if ignore_N and (\"N\" in (a, b) or \"X\" in (a, b)):\n continue\n\n if a != b:\n matched = False\n break\n\n if i + 1 == asize and matched:\n if report_match:\n printf(\"[green]Two sequences match\")\n match = True\n else:\n printf(\"[red]Two sequences do not match\")\n\n snippet_size = 20 # show the context of the difference\n\n printf(\"[red]Sequence start to differ at position {}:\".format(i + 1))\n\n begin = max(i - snippet_size, 0)\n aend = min(i + snippet_size, asize)\n bend = min(i + snippet_size, bsize)\n\n printf(\"[red]{}|{}\".format(aseq[begin:i], aseq[i:aend]))\n printf(\"[red]{}|{}\".format(bseq[begin:i], bseq[i:bend]))\n match = False\n\n return match", "def test_frac_same(self):\n s1 = self.RNA(\"ACGU\")\n s2 = self.RNA(\"AACG\")\n s3 = self.RNA(\"GG\")\n s4 = self.RNA(\"A\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_same(e), 0)\n self.assertEqual(s1.frac_same(s2), 0.25)\n self.assertEqual(s1.frac_same(s3), 0)\n self.assertEqual(s1.frac_same(s4), 1.0) # note truncation", "def calc_name_match(\n song: Song, result: Result, search_query: Optional[str] = None\n) -> float:\n\n # Create match strings that will be used\n # to calculate name match value\n match_str1, match_str2 = create_match_strings(song, result, search_query)\n result_name, song_name = slugify(result.name), slugify(song.name)\n\n res_list, song_list = based_sort(result_name.split(\"-\"), song_name.split(\"-\"))\n result_name, song_name = \"-\".join(res_list), \"-\".join(song_list)\n\n # Calculate initial name match\n name_match = ratio(result_name, song_name)\n\n debug(song.song_id, result.result_id, f\"MATCH STRINGS: {match_str1} - {match_str2}\")\n debug(\n song.song_id,\n result.result_id,\n f\"SLUG MATCH STRINGS: {song_name} - {result_name}\",\n )\n debug(song.song_id, result.result_id, f\"First name match: {name_match}\")\n\n # If name match is lower than 60%,\n # we try to match using the test strings\n if name_match <= 75:\n second_name_match = ratio(\n match_str1,\n match_str2,\n )\n\n debug(\n song.song_id,\n result.result_id,\n f\"Second name match: {second_name_match}\",\n )\n\n if second_name_match > name_match:\n name_match = second_name_match\n\n return name_match", "def calc_similarity(lhs, rhs):\n lhs_decomp = decompose(lhs)\n rhs_decomp = decompose(rhs)\n dist = editdistance.eval(lhs_decomp, rhs_decomp)\n max_len = max(len(lhs_decomp), len(rhs_decomp))\n sim = float(max_len - dist) / float(max_len)\n logging.debug('SIM: [%s] vs [%s] ==> %d / %d = %f', lhs.encode('UTF-8'), rhs.encode('UTF-8'),\n max_len - dist, max_len, sim)\n return sim", "def find_edit_distance(string1,string2):\n M=zeros((len(string1)+1,len(string2)+1), dtype=int)\n for i in xrange(1,len(string1)+1):\n M[i][0]=i\n for j in xrange(1,len(string2)+1):\n M[0][j]=j\n for i in xrange(1,len(string1)+1):\n for j in xrange(1,len(string2)+1):\n if(string1[i-1]!=string2[j-1]):\n M[i][j] = min(M[i - 1][j] + 1, M[i][j - 1] + 1, M[i - 1][j - 1] + 1)\n else:\n M[i][j] = M[i - 1][j - 1]\n return M[len(string1)][len(string2)]", "def levenshteinDistance(s, t, asRatioOfMax = False):\n\n if s == None:\n s = \"\"\n if t == None:\n t = \"\"\n\n if t == s:\n return 0\n if len(s) == 0:\n return len(t)\n if len(t) == 0:\n return len(s)\n\n v0 = [x for x in range(len(t)+1)]\n v1 = [0 for x in range(len(t)+1)]\n\n for i, si in enumerate(s):\n\n v1[0] = i + 1\n\n for j, tj in enumerate(t):\n\n if si == tj:\n cost = 0\n else:\n cost = 1\n\n j1 = v1[j] + 1\n j2 = v0[j + 1] + 1\n j3 = v0[j] + cost\n \n if j1 < j2 and j1 < j3:\n v1[j + 1] = j1\n continue\n \n if j2 < j3:\n v1[j+1] = j2\n else:\n v1[j+1] = j3\n \n v0 = v1.copy()\n\n if not asRatioOfMax:\n return v1[len(t)]\n\n return 1 - v1[len(t)]/max([len(t), len(s)])", "def compare(self, p_str, p_str_1): # real signature unknown; restored from __doc__\n return 0", "def test_frac_similar(self):\n transitions = dict.fromkeys(\n [\n (\"A\", \"A\"),\n (\"A\", \"G\"),\n (\"G\", \"A\"),\n (\"G\", \"G\"),\n (\"U\", \"U\"),\n (\"U\", \"C\"),\n (\"C\", \"U\"),\n (\"C\", \"C\"),\n ]\n )\n\n s1 = self.RNA(\"UCAGGCAA\")\n s2 = self.RNA(\"CCAAAUGC\")\n s3 = self.RNA(\"GGGGGGGG\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_similar(y, transitions), z)\n\n test(e, e, 0)\n test(s1, e, 0)\n test(s1, s1, 1)\n test(s1, s2, 7.0 / 8)\n test(s1, s3, 5.0 / 8)\n test(s2, s3, 4.0 / 8)", "def ratio(fullname1, fullname2, strictness='default', options=None):\n\n if options is not None:\n settings = deepcopy(SETTINGS[strictness])\n deep_update_dict(settings, options)\n else:\n settings = SETTINGS[strictness]\n\n namelists1 = namelist_possibilities(normalize(fullname1))\n namelists2 = namelist_possibilities(normalize(fullname2))\n \n #If either is empty, or looks like a list of multiple names, return ratio of 0\n if len(namelists1) == 0 or len(namelists2) == 0 or len(namelists1) > 2 or len(namelists2) > 2:\n return 0.0\n\n ratios = []\n for ns1 in namelists1:\n if len(ns1) != 1: continue\n for ns2 in [n for n in namelists2 if len(n) == 1]:\n if len(ns2) != 1: continue\n n1 = Name(ns1[0])\n n2 = Name(ns2[0])\n ratios.append(n1.ratio_deep_compare(n2, settings))\n\n return max(ratios, default=0.0)", "def lev_sim(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.Levenshtein()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n # Call the function to compute the similarity measure\n return measure.get_sim_score(s1, s2)", "def calculate_score(config: str, diff: str) -> float:\n config_lines = config.split(\"\\n\")\n diff_lines = diff.split(\"\\n\")\n changed_lines = 0\n total_line_score = 0.0\n for line in diff_lines:\n if line.startswith(\"+\") or line.startswith(\"-\"):\n changed_lines += 1\n total_line_score += calculate_line_score(line)\n\n changed_ratio = changed_lines / float(len(config_lines))\n unique_ratio = len(set(diff_lines)) / len(diff_lines)\n\n # Calculate score, 20% based on number of lines changed, 80% on individual\n # line score with applied modifiers\n # Apply uniqueness ratio to lower score if many lines are the same\n\n return ((changed_ratio * 100 * 0.2) + (total_line_score * 0.8)) * unique_ratio", "def compratio(self) :\n\t\ttry :\n\t\t\treturn self._compratio\n\t\texcept Exception as e:\n\t\t\traise e", "def test_string_similarity_constraint():\n f = SimilarityConstraint(func=LevenshteinDistance(), pred=GreaterThan(0.5))\n assert f('BROOKLYN', 'BROKLYN')\n assert not f('BROOKLYN', 'QUEENS')", "def test_compare_difference_string_faster(self):\n test_algorithm = 'bubble'\n test_algorithm_time = 2\n test_sorted_time = 4\n result = calculate_compare_time_difference(test_algorithm_time, test_sorted_time, test_algorithm)\n self.assertEqual('bubble was 2 seconds faster.', result)", "def text_proximity(str_1: str, str_2: str) -> float:\n tokens_1 = Counter(str_1.split(' '))\n tokens_2 = Counter(str_2.split(' '))\n return _normalized_scalar_product(tokens_1, tokens_2)", "def _best_song_match(songs, title, duration):\n # pylint: disable=R0914\n seqmatch = difflib.SequenceMatcher\n\n def variance(a, b):\n \"\"\" Return difference ratio. \"\"\"\n return float(abs(a - b)) / max(a, b)\n\n candidates = []\n\n ignore = \"music video lyrics new lyrics video audio\".split()\n extra = \"official original vevo\".split()\n\n for song in songs:\n dur, tit = int(song.length), song.title\n dbg(\"Title: %s, Duration: %s\", tit, dur)\n\n for word in extra:\n if word in tit.lower() and word not in title.lower():\n pattern = re.compile(word, re.I)\n tit = pattern.sub(\"\", tit)\n\n for word in ignore:\n if word in tit.lower() and word not in title.lower():\n pattern = re.compile(word, re.I)\n tit = pattern.sub(\"\", tit)\n\n replacechars = re.compile(r\"[\\]\\[\\)\\(\\-]\")\n tit = replacechars.sub(\" \", tit)\n multiple_spaces = re.compile(r\"(\\s)(\\s*)\")\n tit = multiple_spaces.sub(r\"\\1\", tit)\n\n title_score = seqmatch(None, title.lower(), tit.lower()).ratio()\n duration_score = 1 - variance(duration, dur)\n dbg(\"Title score: %s, Duration score: %s\", title_score,\n duration_score)\n\n # apply weightings\n score = duration_score * .5 + title_score * .5\n candidates.append((score, song))\n\n best_score, best_song = max(candidates, key=lambda x: x[0])\n percent_score = int(100 * best_score)\n return best_song, percent_score", "def vratio(self):\n return self.run_command('vratio')[0]", "def compare(text1, text2):\n diff = difflib.ndiff(text1.splitlines(True), text2.splitlines(True))\n return '\\n' + '\\n'.join(diff)", "def lcs_similarity(s1, s2):\n max_len = 0\n i = 0\n\n while s1[i] == s2[i]:\n max_len += 1\n i += 1\n if len(s1) == i or len(s2) == i:\n break\n\n if len(s1) < len(s2):\n return float(max_len) / len(s2)\n else:\n return float(max_len) / len(s1)", "def minimum_edit_distance(seq1,seq2):\n if len(seq1) > len(seq2):\n seq1,seq2 = seq2,seq1\n distances = range(len(seq1) + 1)\n for index2,char2 in enumerate(seq2):\n newDistances = [index2+1]\n for index1,char1 in enumerate(seq1):\n if char1 == char2:\n newDistances.append(distances[index1])\n else:\n newDistances.append(1 + min((distances[index1],\n distances[index1+1],\n newDistances[-1])))\n distances = newDistances\n return distances[-1]", "def compare_words(word1, word2):\n word1 = word1.lower()\n word2 = word2.lower()\n seg_scores = []\n if len(word1) >= len(word2):\n for i in range(0, len(word1) - len(word2) + 1):\n seg_scores.append(find_difference(word1[i:i+len(word2)], word2))\n else:\n for i in range(0, len(word2) - len(word1) + 1):\n seg_scores.append(find_difference(word2[i:i+len(word1)], word1))\n return round(min(seg_scores) + abs(len(word1) - len(word2))/float(len(max([word1, word2]))),2)", "def _edit_dist(s1, s2):\r\n dist = 0\r\n for i in range(len(s1)):\r\n if s1[i] != s2[i]:\r\n dist += 1\r\n return dist", "def levenshtein_distance(s1,s2):\n\n\t\tif len(s1) < len(s2):\n\t\t\treturn Searcher.levenshtein_distance(s2, s1)\n\n\t\t# len(s1) >= len(s2)\n\t\tif len(s2) == 0:\n\t\t\treturn len(s1)\n\n\t\tprevious_row = range(len(s2) + 1)\n\t\tfor i, c1 in enumerate(s1):\n\t\t\tcurrent_row = [i + 1]\n\t\t\tfor j, c2 in enumerate(s2):\n\t\t\t\tinsertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\n\t\t\t\tdeletions = current_row[j] + 1 # than s2\n\t\t\t\tsubstitutions = previous_row[j] + (c1 != c2)\n\t\t\t\tcurrent_row.append(min(insertions, deletions, substitutions))\n\t\t\tprevious_row = current_row\n\t\t\n\t\treturn previous_row[-1]", "def compSeq(s1, s2, lineL=50):\n lineN = int(np.ceil(min(len(s1), len(s2))/lineL))\n count = 0\n samecount = 0\n outStr = ''\n for linei in range(lineN):\n if (linei+1) * lineL < min(len(s1), len(s2)):\n end = (linei+1) * lineL\n else:\n end = min(len(s1), len(s2))\n outStr += 'Pos %d - %d\\n' % (linei*lineL+1, end-1+1)\n for sitei in range(linei*lineL, end):\n outStr += s1[sitei]\n outStr += '\\n'\n for sitei in range(linei*lineL, end):\n out = ' ' if s1[sitei] == s2[sitei] else '|'\n outStr += out\n count += 1\n samecount += 1 if s1[sitei]==s2[sitei] else 0\n outStr += '\\n'\n for sitei in range(linei*lineL, end):\n out = '.' if s1[sitei] == s2[sitei] else s2[sitei]\n outStr += s2[sitei]\n outStr += '\\n\\n'\n outStr += 'Seq1 (%d) and Seq2 (%d) are %1.1f%% similar\\n\\n' % (len(s1), len(s2), 1e2*samecount/count)\n print(outStr)", "def similar_string(first_string, second_string):\n score = score_match(first_string, second_string)\n\n if score >= SCORE_THRESHOLD_NORMAL:\n return True\n\n return False", "def levenshtein(s1, s2):\n if len(s1) < len(s2):\n return levenshtein(s2, s1)\n\n # len(s1) >= len(s2)\n if len(s2) == 0:\n return len(s1)\n\n previous_row = range(len(s2) + 1)\n for i, c1 in enumerate(s1):\n current_row = [i + 1]\n for j, c2 in enumerate(s2):\n insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\n deletions = current_row[j] + 1 # than s2\n substitutions = previous_row[j] + (c1 != c2)\n current_row.append(min(insertions, deletions, substitutions))\n previous_row = current_row\n \n return previous_row[-1]", "def cmpRatio(subInfo1, subInfo2):\n val1 = subInfo1[VALUE]\n val2 = subInfo2[VALUE]\n work1 = subInfo1[WORK]\n work2 = subInfo2[WORK]\n return float(val1) / work1 > float(val2) / work2", "def cmpRatio(subInfo1, subInfo2):\n val1 = subInfo1[VALUE]\n val2 = subInfo2[VALUE]\n work1 = subInfo1[WORK]\n work2 = subInfo2[WORK]\n return float(val1) / work1 > float(val2) / work2", "def edit_distance(str1, str2):\n # Base Case:\n if len(str1) == 0:\n return len(str2)\n if len(str2) == 0:\n return len(str1)\n \n # Other base case\n if str1[-1] == str2[-1]:\n return edit_distance(str1[:-1], str2[:-1])\n \n option1 = edit_distance(str1[:-1], str2)\n option2 = edit_distance(str1, str2[:-1])\n option3 = edit_distance(str1[:-1], str2[:-1])\n \n return min(option1, option2, option3) + 1", "def freq_change(content1,content2):\n return content1[0].split(\" \")[1] != content2[0].split(\" \")[1]", "def calculate_distance(seq1,seq2):\r\n mmcounter = 0 #mismatchcount\r\n seqlen = 0 #sequence length\r\n \r\n #cout the sequence length and mismatches\r\n for i in range(len(seq1)):\r\n if seq1[i]!='-' and seq2[i]!='-':\r\n seqlen += 1\r\n if seq1[i] != seq2[i]:\r\n mmcounter += 1\r\n #compute p\r\n p = (mmcounter/seqlen)\r\n #adjust p \r\n if p >= 0.75:\r\n pcorr = float(30)\r\n else:\r\n pcorr = (-3/4)*np.log(1-((4/3)*p))\r\n \r\n return(pcorr)", "def score_match(phrase, song):\n return SequenceMatcher(None, phrase, song.title).ratio()\n ## Examples of other score metrics and modifiers:\n ## Penalize based on difference in phrase length (word count)\n # return -abs(len(song.split()) - len(phrase.split()))\n ## Penalize based on missing words\n # return -len([w for w in phrase.split() if w not in song.split()])" ]
[ "0.76466227", "0.75796056", "0.7486614", "0.7058355", "0.7058355", "0.70151675", "0.6979458", "0.6847581", "0.67930263", "0.6693175", "0.66115886", "0.6577811", "0.6511473", "0.6479723", "0.64503026", "0.64290327", "0.63911766", "0.63698953", "0.6221948", "0.60449994", "0.60261536", "0.5990699", "0.5984577", "0.5982377", "0.59794265", "0.59621227", "0.5951098", "0.59478307", "0.5909935", "0.5894673", "0.5870111", "0.58181846", "0.58181846", "0.5805709", "0.58008265", "0.5781128", "0.5756408", "0.5747702", "0.57395613", "0.5728555", "0.5722689", "0.5706021", "0.5696515", "0.56773007", "0.5670774", "0.5666229", "0.56649935", "0.5652537", "0.56509924", "0.5648203", "0.56418383", "0.563552", "0.5629415", "0.56157863", "0.56044966", "0.5589044", "0.5586248", "0.5577041", "0.5575611", "0.5571091", "0.55650246", "0.5564274", "0.5542566", "0.5540239", "0.55380476", "0.55363476", "0.5523241", "0.5521461", "0.5512919", "0.55107385", "0.55098754", "0.5507181", "0.54920137", "0.54738295", "0.5473731", "0.5472489", "0.5471289", "0.5445839", "0.5442391", "0.54277617", "0.5420617", "0.5419599", "0.54195255", "0.5418035", "0.540092", "0.5391298", "0.53815186", "0.53774697", "0.5373878", "0.5372283", "0.53704697", "0.53665155", "0.5357336", "0.53498346", "0.5344308", "0.5344308", "0.5344275", "0.5318994", "0.53097975", "0.5306555" ]
0.7495494
2
Function to instanciate the instrument.
def connect_instrument(self): for instrument in self.rm.list_resources(): try: k2400 = self.init_inst(instrument) k2400.timeout = 5000 if k2400.query('*IDN?')[:8] == 'KEITHLEY': return k2400 except AttributeError as f: logger.warning(f'Unknown error - {f}') except errors.VisaIOError as e: logger.warning(f'Not possible to connect the port - {k2400}.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new_instrument(self, entry=\"entry\", instrument_name=\"id00\",):\n if not isinstance(entry, h5py.Group):\n entry = self.new_entry(entry)\n return self.new_class(entry, instrument_name, \"NXinstrument\")", "def new_instrument(self, instrument_type):\r\n return self.instrument_list[instrument_type](instrument_type,\r\n self.midi_output)", "def buildInstrument(cfg):\n\n #Build instrument\n ################################################################\n try:\n instrument = Instrument(cfg)\n except Exception as e:\n wx.MessageBox('An unknown error was raised during the'\n ' initialization of the instrument. See log'\n ' for details. NESSI will shut down.', \n 'UNKNOWN INITIALIZATION ERROR!', \n wx.OK | wx.ICON_ERROR)\n logging.critical(traceback.format_exc())\n shutdown()\n\n #Check for components that did not initialize\n ################################################################\n failedComponents = [compName for compName, comp in \n instrument.components.items() if comp == None]\n \n if failedComponents:\n dlg = wx.MessageDialog(None, 'The following instrument components'\n ' did not initialize: ' + str(failedComponents) + '\\n'\n 'Would you like to continue anyway?',\n 'Instrument Partially Initialized!',\n wx.YES_NO | wx.ICON_QUESTION)\n moveon = dlg.ShowModal() == wx.ID_YES\n dlg.Destroy()\n \n if not moveon:\n shutdown()\n\n #Connect to telescope\n ################################################################\n try:\n #instrument.connectTelescope()\n pass\n except:\n wx.MessageBox('Unable to connect to telescope! NESSI must shut'\n ' down!', 'TELESCOPE CONNECTION ERROR!',\n wx.OK | wx.ICON_ERROR)\n logging.critical(traceback.format_exec())\n shutdown()\n\n return instrument", "def __init__(self, instrument, params=None):\n super(InstrumentsCandles, self).__init__(instrument)\n self.params = params", "def __init__(self, instrument):\n endpoint = self.ENDPOINT.format(instrument=instrument)\n super(Instruments, self).__init__(endpoint, method=self.METHOD)", "def __init__(self, instrument, sampling, scene, d):\n block = d['block']\n effective_duration = d['effective_duration']\n photon_noise = d['photon_noise']\n max_nbytes = d['max_nbytes']\n nprocs_instrument = d['nprocs_instrument']\n nprocs_sampling = d['nprocs_sampling']\n comm = d['comm']\n psd = d['psd']\n bandwidth = d['bandwidth']\n twosided = d['twosided']\n sigma = d['sigma']\n\n Acquisition.__init__(\n self, instrument, sampling, scene, block=block,\n max_nbytes=max_nbytes, nprocs_instrument=nprocs_instrument,\n nprocs_sampling=nprocs_sampling, comm=comm)\n self.photon_noise = bool(photon_noise)\n self.effective_duration = effective_duration\n self.bandwidth = bandwidth\n self.psd = psd\n self.twosided = twosided\n self.sigma = sigma\n self.forced_sigma = None", "def __init__(self, instrument):\n self._unsub_dispatcher = None\n self._instrument = instrument\n self._state = instrument.state", "def __init__(self, attributes=None):\n self.coil_combine_method = 'Siemens'\n self.fids_to_average = 1\n self.fid_left_shift = 0\n self.gaussian_apodization = 2.0\n self.apply_peak_shift = True\n self.reference_peak_center = 2.01\n self.peak_search_width = 0.2\n self.apply_phase0 = True\n self.phase0_range_start = 3.5\n self.phase0_range_end = 0.5\n self.global_phase0 = 0.0\n self.global_phase1 = 0.0\n \n if attributes is not None:\n self.inflate(attributes)", "def __init__(self, date_time, diastolic):\n Encounter.__init__(self, date_time)\n self.__diastolic = diastolic", "def __init__(self, instrument, params=None):\n super(InstrumentsOrderBook, self).__init__(instrument)\n self.params = params", "def _generate_trading_instances(self, start_date, end_date, instruments, params):\n configuration = self.configuration\n configuration.start_date = start_date\n configuration.end_date = end_date\n configuration.instruments = instruments\n\n logger.info(\"Creating DataHandler, Strategy, Portfolio and ExecutionHandler\")\n logger.info(\"Start date: %s\" % start_date)\n logger.info(\"End date: %s\" % end_date)\n logger.info(\"Instrument(s): %s...\" % instruments)\n logger.info(\"Params: %s...\" % params)\n\n self.data_handler = self.data_handler_cls(self.events, configuration)\n self.strategy = self.strategy_cls(self.data_handler, self.events, configuration, **params)\n self.portfolio = self.portfolio_cls(self.data_handler, self.events, configuration)\n self.execution_handler = self.execution_handler_cls(self.data_handler, self.events, configuration)", "def New(*args, **kargs):\n obj = itkPeakSignalToNoiseRatioCalculatorID3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def __init__(self, name, address, number=1, **kwargs):\n logging.debug(__name__ + ' : Initializing instrument')\n super().__init__(name, address, **kwargs)\n\n self.visa_handle.set_visa_attribute(visa.constants.VI_ATTR_ASRL_STOP_BITS,\n visa.constants.VI_ASRL_STOP_TWO)\n self._address = address\n self._number = number\n self._values = {}\n\n self.add_parameter('level',\n label='level',\n get_cmd=self._do_get_level,\n unit='%')\n self.add_parameter('status',\n get_cmd=self._do_get_status)\n self.add_parameter('rate',\n get_cmd=self._do_get_rate,\n set_cmd=self._do_set_rate)\n\n # a dummy command to avoid the initial error\n try:\n self.get_idn()\n sleep(70e-3) # wait for the device to be able to respond\n self._read() # to flush the buffer\n except:\n pass", "def __init__(self):\n\n #call super class's __init__ method\n super(TRiseSampler, self).__init__(name=\"trise\", observed=False)", "def __init__(self, *args):\n _snap.TMIn_swiginit(self, _snap.new_TMIn(*args))", "def setup(self):\n\n warnings.simplefilter(\"always\", DeprecationWarning)\n\n orbit_info = {'index': 'slt', 'kind': 'lt'}\n self.tinst = pysat.Instrument('pysat', 'testing', orbit_info=orbit_info)\n self.tinst.bounds = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 2))\n\n self.warn_msgs = []\n self.war = \"\"\n return", "def New(*args, **kargs):\n obj = itkPeakSignalToNoiseRatioCalculatorIF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkPeakSignalToNoiseRatioCalculatorID2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def setup(self):\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean')\n self.bounds1 = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 3))\n self.bounds2 = (dt.datetime(2009, 1, 1), dt.datetime(2009, 1, 2))\n\n self.long_bins = [0., 360., 24]\n self.mlt_bins = [0., 24., 24]\n self.auto_bin = True\n\n return", "def __init__(self, patient_number):\n logging.info(\"Creating patient {}...\".format(patient_number))\n self.patient_number = patient_number\n self.signals, self.additional_fields = self.get_raw_signals()\n self.mit_bih_labels_str, self.labels_locations, self.labels_descriptions = self.get_annotations()\n self.heartbeats = self.slice_heartbeats()\n logging.info(\"Completed patient {}.\\n\\n\".format(patient_number))", "def New(*args, **kargs):\n obj = itkPeakSignalToNoiseRatioCalculatorIUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def setup(self):\n\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean')\n self.testInst.bounds = (dt.datetime(2008, 1, 1),\n dt.datetime(2008, 1, 3))\n\n self.bounds1 = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 3))\n self.bounds2 = (dt.datetime(2009, 1, 1), dt.datetime(2009, 1, 2))\n\n self.long_bins = np.linspace(0., 360., 25)\n self.mlt_bins = np.linspace(0., 24., 25)\n\n self.auto_bin = False\n\n return", "def New(*args, **kargs):\n obj = itkPeakSignalToNoiseRatioCalculatorIF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def __init__(self, instrument, params=None):\n super(InstrumentsPositionBook, self).__init__(instrument)\n self.params = params", "def __init__(self, **stn_dict):\n self._last_rain = None\n\n global DEBUG_READ\n DEBUG_READ = int(stn_dict.get('debug_read', 0))\n global DEBUG_DECODE\n DEBUG_DECODE = int(stn_dict.get('debug_decode', 0))\n global DEBUG_PRESSURE\n DEBUG_PRESSURE = int(stn_dict.get('debug_pressure', 0))\n\n self.model = stn_dict.get('model', 'TE923')\n self.max_tries = int(stn_dict.get('max_tries', 5))\n self.retry_wait = int(stn_dict.get('retry_wait', 30))\n self.polling_interval = int(stn_dict.get('polling_interval', 10))\n self.sensor_map = stn_dict.get('sensor_map', DEFAULT_SENSOR_MAP)\n self.battery_map = stn_dict.get('battery_map', DEFAULT_BATTERY_MAP)\n self.memory_size = stn_dict.get('memory_size', 'small')\n\n vendor_id = int(stn_dict.get('vendor_id', '0x1130'), 0)\n product_id = int(stn_dict.get('product_id', '0x6801'), 0)\n device_id = stn_dict.get('device_id', None)\n\n loginf('driver version is %s' % DRIVER_VERSION)\n loginf('polling interval is %s' % str(self.polling_interval))\n loginf('sensor map is %s' % self.sensor_map)\n loginf('battery map is %s' % self.battery_map)\n\n self.station = TE923(vendor_id, product_id, device_id,\n memory_size=self.memory_size)\n self.station.open()", "def setup(self):\n insts = []\n for i in range(2):\n r_date = dt.datetime(2009, 1, i + 1)\n insts.append(pysat.Instrument('pysat', 'testing',\n clean_level='clean',\n root_date=r_date))\n self.testC = pysat.Constellation(instruments=insts)\n self.testI = pysat.Instrument('pysat', 'testing', clean_level='clean')\n self.bounds = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 3))\n\n # Apply bounds to all Instruments in Constellation, and solo Instrument.\n self.testC.bounds = self.bounds\n self.testI.bounds = self.bounds\n\n # Define variables for 1D testing. A more limited set that only\n # depends upon 'mlt'. Other variables also include longitude, which\n # can differ between instruments when only binning by 'mlt'.\n self.one_d_vars = ['dummy1']\n self.unequal_one_d_vars = ['dummy2', 'dummy3']\n\n return", "def simulator_from_instrument(instrument):\r\n\r\n grid = grid_from_instrument(instrument=instrument)\r\n psf = psf_from_instrument(instrument=instrument)\r\n\r\n if instrument in \"vro\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=100.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"euclid\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=2260.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"hst\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=2000.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"hst_up\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=2000.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"ao\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=1000.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n else:\r\n raise ValueError(\"An invalid instrument was entered - \", instrument)", "def __init__(self):\n self.risepower = 4.\n self.min_event_amplitude = 5.0e-12 # pA default\n self.template = None\n pass", "def __init__(self):\n # Hardware initialization\n gpio.init()\n # Logging\n self._logger = logging.getLogger(' '.join([__name__, __version__]))\n self._logger.debug(\n 'Instance of %s created: %s',\n self.__class__.__name__,\n str(self)\n )", "def setup(self):\n\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean',\n update_files=True)\n self.testInst.bounds = (dt.datetime(2008, 1, 1),\n dt.datetime(2008, 1, 31))\n self.test_bins = [0, 24, 24]\n self.test_label = 'slt'\n self.test_data = ['dummy1', 'dummy2']\n self.out_keys = ['count', 'avg_abs_dev', 'median', 'bin_x']\n self.out_data = {'dummy1':\n {'count': [111780., 111320., 111780., 111320.,\n 111780., 111320., 111780., 111320.,\n 111780., 111320., 111780., 111320.,\n 111780., 111320., 111918., 111562.,\n 112023., 111562., 112023., 111412.,\n 111780., 111320., 111780., 111320.],\n 'avg_abs_dev': np.zeros(shape=24),\n 'median': np.linspace(0.0, 23.0, 24)},\n 'dummy2':\n {'count': [111780., 111320., 111780., 111320.,\n 111780., 111320., 111780., 111320.,\n 111780., 111320., 111780., 111320.,\n 111780., 111320., 111918., 111562.,\n 112023., 111562., 112023., 111412.,\n 111780., 111320., 111780., 111320.],\n 'avg_abs_dev': np.zeros(shape=24) + 6.0,\n 'median': [11., 12., 11., 11., 12., 11., 12., 11.,\n 12., 12., 11., 12., 11., 12., 11., 11.,\n 12., 11., 12., 11., 11., 11., 11., 12.]}}\n return", "def __init__(self, simulator):\r\n self.initialize(simulator)", "def add_instrument(self, mount, instrument):\n pass", "def New(*args, **kargs):\n obj = itkPeakSignalToNoiseRatioCalculatorIUL3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkPeakSignalToNoiseRatioCalculatorIUS2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def setUp(self):\n self.m = m = random.randint(1, 100)\n self.n = n = random.randint(1, 100)\n self.sig = sig = Signature(\"name\", Dim(\"m\"), Dim(\"n\"),\n sData(\"A\", \"ldA * n\"), Ld(\"ldA\", \"m\"),\n dData(\"B\", \"ldB * m\"), Ld(\"ldB\", \"m\"),\n cData(\"C\", \"ldC * n\"), Ld(\"ldC\", \"n\"))\n self.ex = ex = Experiment()\n ex.calls = [sig(m, n, \"X\", None, \"Y\", None, \"Z\", None)]\n ex.infer_lds()\n self.i = Symbol(\"i\")\n self.j = Symbol(\"j\")", "def __init__(self, name, params):\n # create generic technology object\n DER.__init__(self, params['name'], 'ICE', params)\n # input params UNITS ARE COMMENTED TO THE RIGHT\n self.rated_power = params['rated_power'] # kW/generator\n self.p_min = params['min_power'] # kW/generator\n self.startup_time = params['startup_time'] # default value of 0, in units of minutes\n self.efficiency = params['efficiency'] # gal/kWh\n self.fuel_cost = params['fuel_cost'] # $/gal\n self.vari_om = params['variable_om_cost'] # $/kwh\n self.fixed_om = params['fixed_om_cost'] # $/yr\n self.capital_cost = params['ccost'] # $/generator\n self.ccost_kw = params['ccost_kW']\n\n self.variable_names = {'ice_gen', 'on_ice'}\n try:\n self.n = params['n'] # generators\n self.capex = self.capital_cost * self.n + self.ccost_kw * self.rated_power * self.n\n except KeyError:\n pass", "def __init__(self):\n self._read_calibration_data()\n self.configure_sensor(\n TemperatureOversamplings.x08,\n PressureOversamplings.x16,\n HumidityOversamplings.x08,\n IIRFilterCoefficients.FC_003,\n 250,\n 250)", "def __init__(self):\r\n super().__init__()\r\n self._name = \"PICOSCOPE2408b\"\r\n self._lib = None\r\n self._handle = None\r\n self._run_lock = Lock()\r\n self._driver_lock = Lock()\r\n\r\n self._sampling_time = 4E-9\r\n self._sampling_duration = 50E-6\r\n self._pulse_time = 100E-9\r\n self._samples = int(self._sampling_duration / self._sampling_time)\r\n self._idx = 0\r\n\r\n w_len = self._samples\r\n location = 0.1\r\n idx1 = int(w_len*(location - self._pulse_time/(2*self._sampling_duration)))\r\n idx2 = int(w_len*(location + self._pulse_time/(2*self._sampling_duration))) - 1\r\n self._waveform = np.array([-1*MAX_EXT if (i < idx1 or i >= idx2) else MAX_EXT for i in range(w_len)],dtype=c_int16)\r\n\r\n self._A_data = np.ones(self._samples)*2\r\n self._B_data = np.ones(self._samples)*-2\r\n self._C_data = np.ones(self._samples)*0\r\n self._window_est = np.ones(self._samples)*0\r\n self._t = np.linspace(0,self._sampling_duration,self._samples)\r\n self._range_A = None\r\n self._range_B = None\r\n self._depol_ratio = None\r\n\r\n self._process_queue = Queue()\r\n self._save_queue = Queue()", "def new(\n cls,\n exp: int = 0,\n include_iat: bool = False,\n leeway: int = 0,\n ):\n return cls(exp, include_iat, leeway)", "def __init__(self):\n self._read_calibration_data()\n self.set_oversamplings_and_mode(\n HumidityOversampling.x08,\n TemperatureOversampling.x08,\n PressureOversampling.x16,\n SensorMode.Normal)\n self.set_config(\n InactiveDuration.ms1000,\n FilterCoefficient.fc04)", "def __init__ ( self ) :\n\n self.m_src = self.configSrc ('source', ':Cspad.')\n self.m_key_in = self.configStr ('key_in', 'peaks_nda')\n self.m_print_bits = self.configInt ('print_bits', 1)\n\n self.counter = 0\n self.count_msg = 0\n\n if self.m_print_bits & 1 : self.print_input_pars()\n\n self.list_of_dtypes = [\n psana.ndarray_float32_2,\n psana.ndarray_float64_2\n ]", "def init_recording(self):\n self.statusBar().showMessage('Initialising...')\n self.streams = resolve_stream('type', 'EEG')\n self.inlet = StreamInlet(self.streams[0])\n self.timeObj = []\n self.sampleObj = []", "def New(*args, **kargs):\n obj = itkIsoDataThresholdCalculatorHDUS.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkPeakSignalToNoiseRatioCalculatorIUL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def __init__(self,\n instruments: Optional[Union[Instrument, Iterable[Instrument], dict]] = (),\n name: Optional[str] = None):\n super().__init__()\n if isinstance(instruments, dict):\n inst_list = []\n for k, v in instruments.items():\n v.name = k\n inst_list.append(v)\n self.instruments = inst_list\n else:\n self.instruments = instruments\n\n self.name = name", "def __init__(self, samples, analysis):\r\n self.samples = samples\r\n self.analysis = analysis", "def new(self):\n self._init()", "def setup(self):\n self.testInst = pysat.Instrument('pysat', 'testing_xarray',\n clean_level='clean')\n self.bounds1 = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 3))\n self.bounds2 = (dt.datetime(2009, 1, 1), dt.datetime(2009, 1, 2))\n\n self.long_bins = [0., 360., 24]\n self.mlt_bins = [0., 24., 24]\n self.auto_bin = True\n\n return", "def __init__(self, *args):\n _snap.TChAV_swiginit(self, _snap.new_TChAV(*args))", "def instr_dict():\n out = base_dict()\n out['mro']['current'] = ['Instrument']\n ao(out, 'nSamples', 'Integer', 1, 'Number of samples', readLevel=3)\n ao(out, 'devices', 'List', attr=['Hidden'])\n ao(out, 'initTest', 'Progress', attr=['Hidden'])\n ao(out, 'closingTest', 'Progress', attr=['Hidden'])\n return out", "def setup(self):\n self.testInst = pysat.Instrument('pysat', 'testing2D',\n clean_level='clean')\n self.testInst.bounds = (dt.datetime(2008, 1, 1),\n dt.datetime(2008, 2, 1))\n self.dname = 'series_profiles'\n self.test_vals = np.arange(50) * 1.2\n\n return", "def __init__(self, *args):\n _snap.TMOut_swiginit(self, _snap.new_TMOut(*args))", "def __init__(self, underlying: str):\n self._instruments = {}\n self._underlying = underlying\n self._start_date = None\n self._end_date = None\n\n self._total_credit = float(0)\n self._total_debit = float(0)", "def setUp(self):\n self.sampler = {\n \"name\": \"samplername\",\n \"backend_name\": \"\",\n \"backend_header\": \"\",\n \"backend_prefix\": \"\",\n \"backend_suffix\": \"\",\n \"backend_footer\": \"\",\n \"ncores\": 2,\n \"threads_per_core\": 1,\n \"omp_enabled\": True,\n \"papi_enabled\": True,\n \"papi_counters_max\": 2,\n \"papi_counters_avail\": (\"C1\", \"C2\", \"C3\"),\n \"kernels\": {\"dgemm\": (\n 'dgemm', 'char*', 'char*', 'int*', 'int*', 'int*', 'double*',\n 'double*', 'int*', 'double*', 'int*', 'double*', 'float*',\n 'int*'\n )},\n \"nt_max\": random.randint(1, 10),\n \"exe\": \"x\"\n }\n self.i = Symbol(\"i\")\n self.j = Symbol(\"j\")\n self.k = Symbol(\"k\")\n self.ns = [random.randint(1, 100) for _ in range(5)]", "def create_instance(c_instance):\n return MonoPedal(c_instance)", "def __init__(self, name, input, transmit_handler):\n local_dir = os.path.dirname(os.path.abspath(__file__))\n MetaIRInstance.__init__(self, os.path.join(local_dir, 'air_meta.yml'))\n\n self.transmit_handler = transmit_handler\n self.name = name\n\n self.tm_started = False\n self.disabled = True\n\n # Add the content to the MetaIR instance\n self.add_content(input)\n self.port_count = self.meta_ir_object_map[\"layout\"][\"port_count\"]\n\n # Create the AIR objects: parsers, actinos, tables, pipelines and TMs\n self.air_value_set = {}\n self.air_value_map = {}\n self.air_parser = {}\n self.air_action = {}\n self.air_table = {}\n self.air_pipeline = {}\n self.air_traffic_manager = {}\n self.processors = {}\n self.transmit_processor = TransmitProcessor(transmit_handler)\n\n for name, val in self.value_set.items():\n self.air_value_set[name] = [] # Just use a list\n\n for name, val in self.value_map.items():\n self.air_value_map[name] = {} # Just use a dict\n\n for name, val in self.parser.items():\n self.air_parser[name] = Parser(name, val, self.parse_state,\n self.header, self.value_set)\n self.processors[name] = self.air_parser[name]\n for name, val in self.action.items():\n self.air_action[name] = Action(name, val)\n for name, val in self.table.items():\n self.air_table[name] = Table(name, val, self.air_action)\n for name, val in self.control_flow.items():\n self.air_pipeline[name] = Pipeline(name, val, self.air_table,\n self.air_action)\n self.processors[name] = self.air_pipeline[name]\n for name, val in self.traffic_manager.items():\n self.air_traffic_manager[name] = SimpleQueueManager(name, val,\n self.port_count)\n self.processors[name] = self.air_traffic_manager[name]\n\n # Plumb the layout\n layout = self.meta_ir_object_map[\"layout\"]\n meta_ir_assert(layout[\"format\"] == \"list\", \"Unsupported layout: not a list\")\n layout_name_list = layout[\"implementation\"]\n meta_ir_assert(isinstance(layout_name_list, list), \n \"Layout implementation is not a list\")\n\n proc_count = len(layout_name_list)\n for idx, processor_name in enumerate(layout_name_list):\n cur_proc = self.processors[processor_name]\n if idx == 0:\n logging.debug(\"Layout: First processor %s\" % cur_proc.name)\n self.first_processor = cur_proc\n\n if idx < proc_count - 1:\n next_proc = self.processors[layout_name_list[idx + 1]]\n cur_proc.next_processor = next_proc\n else: # Last one connects to transmit processor\n cur_proc.next_processor = self.transmit_processor\n\n logging.debug(\"Layout %s to %s\" % (cur_proc.name,\n cur_proc.next_processor.name))\n\n # Grab table initialization object if present\n self.table_initialization = {}\n ext_objs = self.external_object_map\n if \"table_initialization\" in ext_objs.keys():\n self.table_initialization = ext_objs[\"table_initialization\"]", "def __init__(self):\n Sampler.__init__(self)\n self._registeredIdentifiers = set() # tracks job identifiers used for this adaptive sampler and its inheritors\n self._prefixToIdentifiers = {} # tracks the mapping of run prefixes to particular identifiers\n self._inputIdentifiers = {} # identifiers for a single realization\n self._targetEvaluation = None # data object with feedback from sample realizations\n self._solutionExport = None # data object for solution printing\n self._requireSolnExport = False # if this object requires a solution export\n # NOTE TargetEvaluations consider all the Step <Output> DataObjects as candidates, so requiring\n # exactly one TargetEvaluation forces only having one <Output> DataObject in AdaptiveSampling\n # MultiRun Steps. For now, we leave it as \"n\".\n self.addAssemblerObject('TargetEvaluation', InputData.Quantity.one_to_infinity) # Place where realization evaluations go", "def __init__(\n self,\n manufacturer: int,\n product: int,\n sample_period: int,\n midi_unity_note: int,\n midi_pitch_fraction: int,\n smpte_format: int,\n smpte_offset: int,\n number_of_sample_loops: int,\n sampler_data: int,\n first_cue_point_id: int,\n first_loop_type: int,\n first_loop_start: int,\n first_loop_end: int,\n first_loop_fraction: int,\n first_loop_play_count: int,\n ):\n\n self.__manufacturer = manufacturer\n self.__product = product\n self.__sample_period = sample_period\n self.__midi_unity_note = midi_unity_note\n self.__midi_pitch_fraction = midi_pitch_fraction\n self.__smpte_format = smpte_format\n self.__smpte_offset = smpte_offset\n self.__number_of_sample_loops = number_of_sample_loops\n self.__sampler_data = sampler_data\n self.__first_cue_point_id = first_cue_point_id\n self.__first_loop_type = first_loop_type\n self.__first_loop_start = first_loop_start\n self.__first_loop_end = first_loop_end\n self.__first_loop_fraction = first_loop_fraction\n self.__first_loop_play_count = first_loop_play_count", "def __init__(self, evt_callback):\n #Construct superclass.\n SingleConnectionInstrumentDriver.__init__(self, evt_callback)", "def __init__(self, **params):\n # Dimension of the true signal x\n self.N = params.get('N', 1024)\n\n # Dimension of the measurement vector y\n self.M = params.get('M', 256)\n\n # Number of timesteps\n self.T = params.get('T', 4)\n\n # Type of the random measurement matrix to generate\n # (1) : normalized Gaussian matrix\n self.A_type = params.get('A_type', 1)\n\n # Active support probability\n self.lambda_ = params.get('lambda_', 0.08) # high sparsity default\n\n # Amplitude mean\n self.zeta = params.get('zeta', 0)\n\n # Amplitude variance\n self.sigma2 = params.get('sigma2', 1)\n\n # Amplitude innovation rate\n self.alpha = params.get('alpha', 0.10)\n\n # Active-to-inactive transition probability\n self.p01 = params.get('p01', 0.10)\n\n # Desired signal-to-noise ratio, in dB\n self.desired_SNR = params.get('desired_SNR', 25)", "def _create_petition_(self):\n self.__weather = create(self.__latitude, self.__longitude)", "def make_instrumentation(debug=False):\n # Possible argument values\n batch = inst.var.OrderedDiscrete([2**i for i in range(7,13)]) # 128 to 4096 by powers of 2\n lr = inst.var.OrderedDiscrete([10.0**(-i) for i in range(3,6)]) # 0.001 to 0.00001 by powers of 10\n balance = inst.var.OrderedDiscrete([True, False]) # boolean\n units = inst.var.OrderedDiscrete([i*10 for i in range(1,21)]) # 10 to 200 by 10's\n layers = inst.var.OrderedDiscrete(list(range(1,13))) # 1 to 12\n dropout = inst.var.OrderedDiscrete([5*i/100 for i in range(0,11)]) # 0.0 to 0.5 by 0.05's\n\n # Our \"function\" (neural net training with output of max validation accuracy)\n # is a function of the above hyperparameters\n instrum = inst.Instrumentation(batch, lr, balance, units, layers, dropout)\n\n if debug:\n # Make sure defaults are reasonable and in the middle\n print(\"Default values\")\n print(get_summary(instrum))\n\n return instrum", "def __init__(self, fno, wavelength, extent=None, samples=None):\n if samples is not None:\n x = np.linspace(-extent, extent, samples)\n y = np.linspace(-extent, extent, samples)\n xx, yy = np.meshgrid(x, y)\n rho, phi = cart_to_polar(xx, yy)\n data = airydisk(rho, fno, wavelength)\n else:\n x, y, data = None, None, None\n\n super().__init__(data=data, x=x, y=y)\n self.fno = fno\n self.wavelength = wavelength\n self.has_analytic_ft = True", "def __init__(__self__, *,\n catalog: str,\n guardian: str,\n scan: str):\n pulumi.set(__self__, \"catalog\", catalog)\n pulumi.set(__self__, \"guardian\", guardian)\n pulumi.set(__self__, \"scan\", scan)", "def __init__(self):\n _snap.TStdIn_swiginit(self, _snap.new_TStdIn())", "def __init__(__self__, *,\n perf_metric_type: str,\n perf_unit: str,\n sample_series_label: str):\n pulumi.set(__self__, \"perf_metric_type\", perf_metric_type)\n pulumi.set(__self__, \"perf_unit\", perf_unit)\n pulumi.set(__self__, \"sample_series_label\", sample_series_label)", "def __init__(self, logger, interface, numIntervals):\n\n self._log = logger\n self.interface = interface \n\n # counters\n self.countersData = CountersOperData() \n self._rxPacketsCounter = self.PeriodicCounter(numIntervals)\n self._txPacketsCounter = self.PeriodicCounter(numIntervals)\n self._rxBytesCounter = self.PeriodicCounter(numIntervals)\n self._txBytesCounter = self.PeriodicCounter(numIntervals)\n\n # rates\n self.rxPacketsPerSec = 0\n self.txPacketsPerSec = 0\n self.rxBitsPerSec = 0\n self.txBitsPerSec = 0", "def __init__(self, *args, **kwargs):\n self.__is_connected__ = False\n self.logger = kwargs.get('logger',None)\n if ( self.logger is None ):\n # Get an instance of a logger\n console = logging.StreamHandler()\n formatter = logging.Formatter('%(asctime)s: %(levelname)-8s %(message)s',\"%Y-%m-%d %H:%M:%S\")\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n self.logger = logging.getLogger('')\n self.logger.setLevel(logging.INFO)\n # initial log entry\n self.logger.debug(\"%s: %s version [%s]\" % (self.__class__.__name__, inspect.getfile(inspect.currentframe()),__version__))\n # initialize variables - so all are listed here for convenience\n self.dict_config = {} # dictionary, see cdh_manager.cfg example\n self.__cm_cdh__ = None\n self.__boto_ec2__ = None\n self.data = DataObjectSample(logger=self.logger)", "def setup(self):\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean')\n self.bounds1 = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 3))\n self.bounds2 = (dt.datetime(2009, 1, 1), dt.datetime(2009, 1, 2))\n\n return", "def create(cls,configuration,data_handler):\n ID = configuration[config.ID] \n d = configuration.get(config.DESCRIPTION,cls.description)\n n = configuration.get(config.NAME,cls.name)\n path = configuration['path'] \n \n #hardware._file.debug = True\n return NIPCI6602(path,ID,n,d)", "def __init__(self):\n self.libpath = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-1])\n sys.path.append(self.libpath)\n libpath2 = os.sep.join(self.libpath.split(os.sep)[:-1])\n sys.path.append(libpath2)\n # Initialize TCMetaSchema with correct libpath\n TCMetaSchema(self.libpath)\n self.args, self.unknown = IceteaManager._parse_arguments()\n # If called with --clean, clean up logs.\n if self.args.clean:\n _cleanlogs(silent=self.args.silent, log_location=self.args.log)\n\n LogManager.init_base_logging(self.args.log, verbose=self.args.verbose,\n silent=self.args.silent, color=self.args.color,\n no_file=(self.args.list or self.args.listsuites),\n truncate=not self.args.disable_log_truncate)\n\n self.logger = LogManager.get_logger(\"icetea\")\n self.pluginmanager = None\n self.resourceprovider = ResourceProvider(self.args)\n self._init_pluginmanager()\n self.resourceprovider.set_pluginmanager(self.pluginmanager)", "def __init__(self, **kw_args):\n self._isoFmt = \"%Y%m%dT%H%M%S%z\"\n\n self._init_client_id(kw_args)\n self._init_shared_secret(kw_args)\n self._init_counter_from_time(kw_args)\n self._init_last_count(kw_args)\n self._init_last_count_update_time(kw_args)\n self._init_period(kw_args)\n self._init_password_length(kw_args)\n self._init_tags(kw_args)\n self._init_note(kw_args)", "def New(*args, **kargs):\n obj = itkPeakSignalToNoiseRatioCalculatorIUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def initialize(self, formGenerator, AWGChannels, modulationMode=None, MWSource=None, mixer=None, formGeneratorType='AWG'):\n instrumentManager = InstrumentManager()\n # optional microwave source attached to this pulse generator\n self._MWSource = instrumentManager.getInstrument(\n MWSource) if MWSource is not None else None\n # optional mixer attached to this pulse generator\n self._mixer = instrumentManager.getInstrument(\n mixer) if mixer is not None else None\n # hardware generator attached to this pulse generator\n self._AWG = instrumentManager.getInstrument(formGenerator)\n # type of generators in AWG, AFG, NOne, ...\n self._formGeneratorType = formGeneratorType\n # dictionary of parameters\n self._params = dict()\n self._params[\"MWSource\"] = MWSource\n # confusion here: should be change for formGenerator\n self._params[\"formGenerator\"] = formGenerator\n self._params[\"modulationMode\"] = modulationMode\n self._params[\"AWGChannels\"] = AWGChannels\n self._params[\"mixer\"] = mixer\n # Obsolete. Replaced by pulseList. Mainted for compatinbbility reasons\n self.pulses = dict()\n self._params[\"pulses\"] = self.pulses\n # Obsolete. Replaced by markersList. Mainted for compatinbbility\n # reasons\n self.markersDict = dict()\n self._params[\"markersDict\"] = self.markersDict\n # Obsolete.\n self.totalPulse = numpy.zeros(\n self.numberOfPoints(), dtype=numpy.complex128)\n self.index = 0 # Obsolete.\n self.indexMarker = 0 # Obsolete.\n # List of pulses (object of class pulse)\n self.pulseList = []\n # List of markers (object of class marker)\n self.markersList1 = ()\n # An array of zeros into which the markers will be concatented\n self.markerArray1 = zeros(self.numberOfPoints(), dtype=numpy.int8)\n # total number of markers attached to this pulse generator\n # #self._AWG.markersPerChannel()*\n self.markersChannels = 2 if self._params[\n \"modulationMode\"] == 'IQMixer' else 1\n if self.markersChannels == 2:\n # List of markers (object of class marker)\n self.markersList2 = ()\n # An array of zeros into which the markers will be concatented\n self.markerArray2 = zeros(self.numberOfPoints(), dtype=numpy.int8)\n self.preparePulseSequence()\n self.sendPulseSequence()\n return", "def __init__(self, exp_params, stamp_unique=True):\n self._main_thread = True\n self.params = copy.deepcopy(exp_params)\n self.params['class'] = self.__class__.__name__\n self._check_required_params()\n self.__check_exist_path()\n self.__create_folder(stamp_unique)\n set_experiment_logger(self.params['path_exp'], FILE_LOGS)\n # set stream logging to info level\n for lh in logging.getLogger().handlers:\n if isinstance(lh, logging.StreamHandler) and \\\n not isinstance(lh, logging.FileHandler):\n lh.setLevel(logging.INFO)\n logging.info('initialise experiment...')\n logging.info(string_dict(self.params, 'PARAMETERS:'))\n logging.info('COMPUTER: %r', computer_info())", "def from_serial_instrument(self, instrument):\n return FunctionGenerator(ser=instrument._ser)", "def add_instrument(self,par,T,coup,price,compounding_freq=2):\r\n self.instruments[T]=(par,coup,price,compounding_freq)", "def setup(self):\n\n self.insts = []\n self.testInst = pysat.Instrument('pysat', 'testing2D',\n clean_level='clean')\n self.testInst.bounds = (dt.datetime(2008, 1, 1),\n dt.datetime(2008, 1, 3))\n self.insts.append(self.testInst)\n self.insts.append(self.testInst)\n\n self.dname = 'series_profiles'\n self.test_vals = np.arange(50) * 1.2\n\n self.testC = pysat.Constellation(instruments=self.insts)\n\n return", "def __init__(self, attributes=None):\n super().__init__(attributes)\n \n # processing parameters\n self.set = _Settings()\n\n # results storage\n self.measure_time = None # store here in case we average FIDs, filled by chain!\n self.frequency_shift = None\n self.phase_0 = None\n self.data = None\n \n if attributes is not None:\n self.inflate(attributes)\n\n self.chain = None", "def create(self, odometryType): # real signature unknown; restored from __doc__\n pass", "def New(*args, **kargs):\n obj = itkIsoDataThresholdCalculatorHDUC.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def create(self):\n o = self._create_impl()\n self.logger.debug(f\"created {o}\")\n self._notify(o)", "def __init__(self):\n\n\t\tself.Helpers = Helpers(\"TassAI\", False)\n\n\t\tself.qs = 16\n\t\tself.context = InferenceContext([self.Helpers.confs[\"iotJumpWay\"][\"MQTT\"][\"TassAI\"][\"runas\"], self.Helpers.confs[\"iotJumpWay\"][\"MQTT\"][\"TassAI\"][\"runas\"], self.Helpers.confs[\"iotJumpWay\"][\"MQTT\"][\"TassAI\"][\"runas\"]], \"\", \"\", \"\")\n\n\t\tself.Helpers.logger.info(\"TassAI Helper Class initialization complete.\")", "def __init__(__self__, *,\n app_insights_instrumentation_key: Optional[pulumi.Input[str]] = None,\n app_insights_sampling_rate: Optional[pulumi.Input[float]] = None,\n error: Optional[pulumi.Input['ErrorArgs']] = None,\n trace_enabled: Optional[pulumi.Input[bool]] = None):\n if app_insights_instrumentation_key is not None:\n pulumi.set(__self__, \"app_insights_instrumentation_key\", app_insights_instrumentation_key)\n if app_insights_sampling_rate is not None:\n pulumi.set(__self__, \"app_insights_sampling_rate\", app_insights_sampling_rate)\n if error is not None:\n pulumi.set(__self__, \"error\", error)\n if trace_enabled is not None:\n pulumi.set(__self__, \"trace_enabled\", trace_enabled)", "def New(*args, **kargs):\n obj = itkSpeckleNoiseImageFilterIUS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def setup(self):\n self.testInst = pysat.Instrument('pysat', 'testing_xarray',\n clean_level='clean')\n self.bounds1 = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 3))\n self.bounds2 = (dt.datetime(2009, 1, 1), dt.datetime(2009, 1, 2))\n\n return", "def setup(self):\n self.testInst = pysat.Instrument('pysat', 'testing2D',\n clean_level='clean')\n self.testInst.bounds = (dt.datetime(2008, 1, 1),\n dt.datetime(2008, 1, 3))\n self.dname = 'alt_profiles'\n self.test_vals = np.arange(50) * 1.2\n self.test_fracs = np.arange(50) / 50.0\n\n return", "def __init__(self, *args):\n this = _libsbml.new_UnitDefinition(*args)\n try: self.this.append(this)\n except: self.this = this", "def setInstrument(self,instrument):\n self.instrument = instrument\n self.instrument.attach(self)", "def __new__(cls, *args, **kwargs):\n\n instance = super(PGM, cls).__new__(cls)\n instance.timer = Timer(['init', 'solve', 'solve_wo_func',\n 'solve_wo_rsdl', 'solve_wo_btrack'])\n instance.timer.start('init')\n return instance", "def setup(self):\n\n self.testInst = pysat.Instrument('pysat', 'testing2D_xarray',\n clean_level='clean')\n self.testInst.bounds = (dt.datetime(2008, 1, 1),\n dt.datetime(2008, 2, 1))\n self.dname = 'profiles'\n self.test_val_length = 15\n\n return", "def setup(self):\n insts = []\n for i in range(5):\n insts.append(pysat.Instrument('pysat', 'testing',\n clean_level='clean'))\n self.testC = pysat.Constellation(instruments=insts)\n self.testI = pysat.Instrument('pysat', 'testing', clean_level='clean')\n self.bounds = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 3))\n\n # Apply bounds to all Instruments in Constellation, and solo Instrument.\n self.testC.bounds = self.bounds\n self.testI.bounds = self.bounds\n\n # Define variables for 1D testing\n self.one_d_vars = ['dummy1', 'dummy2', 'dummy3']\n self.unequal_one_d_vars = []\n\n return", "def __init__(self, layout=None):\n self.presentation_ended = False\n self.presentation = Presentation()\n self.layout = layout\n self.master_connection = None\n self.source = ''\n self.beacon = Beacon()\n self.beacon.start_beaconing()", "def __init__(self, multiinstrument, sampling, scene, d):\n\n weights = d['weights']\n\n self.warnings(d)\n\n if d['MultiBand'] and d['nf_sub']>1:\n self.subacqs = [QubicAcquisition(multiinstrument[i],\n sampling, scene, d)\n for i in range(len(multiinstrument))]\n else:\n raise ValueError('If you do not use a multiband instrument,'\n 'you should use the QubicAcquisition class'\n 'which is done for the monochromatic case.')\n for a in self[1:]:\n a.comm = self[0].comm\n self.scene = scene\n self.d = d\n if weights is None:\n self.weights = np.ones(len(self)) # / len(self)\n else:\n self.weights = weights", "def __init__(self, *args):\n _snap.TVoid_swiginit(self, _snap.new_TVoid(*args))", "def __init__(self, *args):\n _snap.TCs_swiginit(self, _snap.new_TCs(*args))", "def __init__(self, *args):\n _snap.TIntHSI_swiginit(self, _snap.new_TIntHSI(*args))", "def New(*args, **kargs):\n obj = itkIsoDataThresholdCalculatorHDF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def initialize(self,*args,**kwargs):\n self.__instrumentID = c_uint32(0) \n self.__numInstruments = c_uint32()\n self.__nbrOfChannels = c_uint32()\n self.__nbrADCBits = c_uint32()\n self.__temperature = c_int32()\n self.__time_us = c_double()\n\n self.loadDLLs(**kwargs) # Load the different DLLs or DLL based modules\n self.reinit() # init or reinit the board\n self.createDictAndGlobals() # create dictionaries and global variables\n self.nbrOfChannels=int(self.__nbrOfChannels.value) # duplicate self.nbrOfChannels in a Python type variable \n self.getInitialConfig()", "def __init__(self, *args):\n\n super(Recorder, self).__init__('RECORDER', *args)\n\n self.client = InfluxDBClient(\n host=self.configprops.influx_host,\n port=self.configprops.influx_port\n )\n\n databases_raw = self.client.get_list_database()\n\n databases = list((i['name'] for i in databases_raw))\n\n if self.configprops.influx_database not in databases:\n self.client.create_database(self.configprops.influx_database)\n\n self.client.switch_database(self.configprops.influx_database)\n\n self.current_position = {'lat': 0, 'lon': 0}", "def __init__(self):\n ProcessingUnit.__init__(self)\n print(\" [ START ] init - Metodo Simulator Reader\")\n\n self.isConfig = False\n self.basicHeaderObj = BasicHeader(LOCALTIME)\n self.systemHeaderObj = SystemHeader()\n self.radarControllerHeaderObj = RadarControllerHeader()\n self.processingHeaderObj = ProcessingHeader()\n self.profileIndex = 2**32-1\n self.dataOut = Voltage()\n #code0 = numpy.array([1,1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,1,1,1,0,1,1,0,1,0,0,0,1,1,1,0,1])\n code0 = numpy.array([1,1,1,-1,1,1,-1,1,1,1,1,-1,-1,-1,1,-1,1,1,1,-1,1,1,-1,1,-1,-1,-1,1,1,1,-1,1])\n #code1 = numpy.array([1,1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,1,0,1,1,1,0,0,0,1,0])\n code1 = numpy.array([1,1,1,-1,1,1,-1,1,1,1,1,-1,-1,-1,1,-1,-1,-1,-1,1,-1,-1,1,-1,1,1,1,-1,-1,-1,1,-1])\n #self.Dyn_snCode = numpy.array([code0,code1])\n self.Dyn_snCode = None" ]
[ "0.67939395", "0.6719433", "0.6655605", "0.645817", "0.6346724", "0.6167076", "0.6164952", "0.5983404", "0.59604347", "0.5957381", "0.58981216", "0.58949465", "0.5830187", "0.58211017", "0.5807674", "0.580686", "0.57869667", "0.5773858", "0.57293445", "0.5729042", "0.5718009", "0.5707762", "0.5689871", "0.5689626", "0.5689014", "0.56774336", "0.5675989", "0.5663942", "0.5654883", "0.5653341", "0.5649911", "0.5638135", "0.56237376", "0.562157", "0.561437", "0.5607852", "0.5606363", "0.56060904", "0.5593325", "0.5587112", "0.5571591", "0.55681103", "0.55622447", "0.5560577", "0.5538632", "0.55255604", "0.5524799", "0.55213016", "0.55186445", "0.5513304", "0.5508603", "0.5499582", "0.549857", "0.54979795", "0.5492556", "0.5490659", "0.5486735", "0.5480154", "0.5477033", "0.54761964", "0.54709446", "0.5468749", "0.5466538", "0.5464614", "0.54619545", "0.5458302", "0.5456484", "0.54487437", "0.54465544", "0.54447275", "0.544461", "0.54361695", "0.5434169", "0.54301053", "0.54296136", "0.54290324", "0.54258126", "0.5421861", "0.5421695", "0.5421499", "0.5414311", "0.5414292", "0.5407397", "0.53998256", "0.5385327", "0.5382881", "0.53790253", "0.53768784", "0.5367298", "0.53667355", "0.5357534", "0.5352201", "0.5349196", "0.534899", "0.53488076", "0.5347905", "0.53472763", "0.53459257", "0.5344535", "0.53443426", "0.5343978" ]
0.0
-1
Function to reset instrument commands.
def reset_instrument(self): return self.inst.write('*RST')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _doReset(self):\n self._cmdReset()", "def reset():\n pass", "def reset():\n pass", "def reset():", "def reset():", "def reset():", "def reset(*args):", "def reset(*args):", "def reset(*args):", "def reset(self):\r\n _debug('simq03b_api.reset')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def ObsReset(self):\n handler = self.get_command_object(\"ObsReset\")\n handler()", "def reset(self, *args, **kwargs):", "def reset(self):\r\r\n self.read(\"*cls\")\r\r\n self.waitForCompletion()\r\r\n self.read(\"*RST\") # Reset and query\r\r\n self.dev.write(\"*cls\")\r\r\n while self.read(\"*OPC?\") != \"1\": time.sleep(1) # Wait until completion\r\r", "def reset():\r\n pass", "def reset(self, *args, **kwargs):\n ...", "def resetDeviceStates(self):", "def reset(self):\n self.desc.put(self.desc.pvname.split(\".\")[0])\n self.scan.put(\"Passive\")\n self.calc.put(\"0\")\n self.prec.put(\"5\")\n self.dold.put(0)\n self.doln.put(\"\")\n self.dopt.put(\"Use VAL\")\n self.flnk.put(\"0\")\n self.odly.put(0)\n self.oopt.put(\"Every Time\")\n self.outn.put(\"\")\n for letter in self.channels.read_attrs:\n channel = self.channels.__getattr__(letter)\n channel.reset()", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\n return self.set_command(\"Z\")", "async def reset(self):\n await self.set_param(\"ContinuousExposures\", 0)\n await self.set_param(\"Exposures\", 0)\n cmd = await self.send_command(\"RESETTIMING\", timeout=1)\n if not cmd.succeeded():\n self.status = ControllerStatus.ERROR\n raise ArchonError(f\"Failed sending RESETTIMING ({cmd.status.name})\")\n\n # TODO: here we should do some more checks before we say it's IDLE.\n self.status = ControllerStatus.IDLE", "def soft_reset():", "def reset(self):\n self._cmd_line = 0\n self._file_line = 0", "def reset() -> None:\n ...", "def actionReset(self):\n sys.stderr.write(\"Reset device ...\\n\")\n sys.stderr.flush()\n self.bslReset(0) #only reset", "def reset(self):\n self.at_cmd('Z')", "def reset(self):\n \n pass", "def reset(self):\n ...", "def reset(self):\n ...", "def reset(self):\n self.restart()\n self.cycles = 0", "def reset(self):", "def reset(self):", "def reset(self):", "def reset(self):", "def resetTool(*args, **kwargs)->None:\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def resetSim(self):\n self.powers = []", "def reset(self) -> None:\n self.memory = self.intcode.copy()\n self.ip = 0\n self.stdout.clear()", "def reset(self, *args, **kwargs):\n pass", "def setOff(self, command):\r\n self.setDriver('ST', 0)", "def reset():\n if os.name == \"posix\": #In linux\n os.system(\"clear\")\n elif os.name == (\"ce\", \"nt\", \"dos\"): #In windows\n os.system(\"cls\")", "def reset() -> None:\n Invocation.active = {}\n Invocation.current = None # type: ignore\n Invocation.top = Invocation(None, None)\n Invocation.top._become_current() # pylint: disable=protected-access\n Invocation.up_to_date = {}\n Invocation.phony = set()\n Invocation.poisoned = set()\n Invocation.actions_count = 0\n Invocation.skipped_count = 0", "def reset(cls):\n\n cls._set_mode_stopped()\n TimeDisplay.reset_time(erase=True)\n TimeDisplay.show_default()\n Notes.clear()\n for callback in cls.reset_callback:\n callback()", "def reset(self):\n raise NotImplementedError", "def reset(self):\n\n\t\tself._send_message(\"RESET\", \"\\x00\")", "def reset_interrupts(self):\n\n self.read_interrupt_capture(0)\n self.read_interrupt_capture(1)\n return", "def reset(self):\n self.ram = Memory(256 * 10)\n self.stdout = ''\n self.input_ptr = 0\n self.pc = 0\n self.stack = []\n logging.debug('Reset all')", "def reset(self, *args):\n raise NotImplementedError", "def reset(self):\n self.rst.value(0) # RST on\n self.sleep_us(100) # reset impulse has to be >100 ns and <100 ms\n self.rst.value(1) # RST off\n # Defaults after reset:\n self.power = self.POWER_DOWN\n self.addressing = self.ADDRESSING_HORIZ\n self.instr = self.INSTR_BASIC\n self.display_mode = self.DISPLAY_BLANK\n self.temp_coeff = self.TEMP_COEFF_0\n self.bias = self.BIAS_1_11\n self.voltage = 3060", "def reset(self):\n\t\tself.write(\"*rst\")\n\t\tpass", "def reset(self):\n self.memory.clear()\n self.relative_base = 0\n self.input_queue.clear()\n self.instr_idx = 0", "def reset(self) -> None:", "def reset(self) -> None:", "def reset(self) -> None:", "def _reset(self):", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self):\n\n # Issue the reset command\n try:\n self.crate_resetting = True\n # Reset the FRU init status to stop attempts to read the sensors\n self.frus_inited = False\n # Wait a few seconds to allow any existing ipmitool requests to complete\n print(\"reset: Short wait before resetting (2 s)\")\n time.sleep(2.0)\n # Force the records to invalid\n print(\"reset: Force sensor read to set invalid\")\n self.read_sensors()\n print(\"reset: Triggering records to scan\")\n self.scan_list.interrupt()\n self.mch_comms.connected = False\n # Stop the ipmitool session. System will reconnect on restart\n self.mch_comms.ipmitool_shell.terminate()\n time.sleep(2.0)\n #print(\"reset: Killing ipmitool shell process\")\n self.mch_comms.ipmitool_shell.kill()\n self.mch_comms.ipmitool_shell = None\n # Stop the reader thread\n #print(\"reset: Stopping thread\")\n self.mch_comms.stop = True\n # Wait for the thread to stop\n self.mch_comms.t.join()\n #print(\"reset: Thread stopped\")\n self.mch_comms.t = None\n # Allow the thread to restart\n self.mch_comms.stop = False\n #print(\"reset: Exiting \")\n # Reset the crate\n print(\"reset: Resetting crate now\")\n self.mch_comms.call_ipmitool_direct_command([\"raw\", \"0x06\", \"0x03\"])\n\n except CalledProcessError:\n pass\n except TimeoutExpired as e:\n # Be silent. We expect this command to timeout.\n print('reset: reset command sent')\n pass\n\n # Reconnect to the crate\n print('reset: reconnecting')\n self.mch_comms.ipmitool_shell_reconnect()", "def reset():\n for i in flags.keys(): flags[i] = 0\n for i in meta.keys(): meta[i] = \"\"\n return (None, \"CON\")", "def _clear(self):\n self._commands = []\n self._activeMacros = []\n self._index = 0\n self._emitSignals()\n self._inUndoRedo = False", "def __mode_reset(self):\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tval.reset_restart()", "def reset(self):\n # The camera will give no response to this command\n self._serial_io('\\x55\\x99\\x66\\x11', None)\n while True:\n try:\n self.system_state = 0x11\n if self.system_state == 0x11:\n break\n except CygnetExc:\n time.sleep(.2)\n while True:\n self.system_state = 0x12\n time.sleep(.2)\n if self.system_state == 0x16:\n break", "def reset(self):\n return self._send_command('reset')", "def interactive_reset(self):\n # Set the initial state\n\n self.dataset.reset()\n\n self.current_turn = 0\n self.current_function = None\n self.query = \"\"\n self.query_vector = np.ones(self.dataset.getVocabularySize())\n self.keywords={\"provided\":set(), \"rejected\":set()}\n self.functions_rejected=set()\n self.result_index = 0\n self.dont_know = False\n\n self.history={\n 'system_action': {\"action\": self.SYSTEM_OPENING_ACT},\n 'user_action': None\n }", "def _reset(self) -> None:", "def _reset(self) -> None:", "def reset(self):\n self.reset_dev_via_serial(self.forced_reset_timeout)", "def hard_reset() -> NoReturn:", "def command_clearterm():\n subprocess.call(\"reset\")", "def reset_terminal():\n if not mswin:\n subprocess.call([\"tset\", \"-c\"])", "def reset(self):\n self.baseline = None\n self.cut = None\n self.manual_push = 0", "def reset(self):\n self.write_to_serial('*RST')", "def reset(self):\n self.command_stack = []\n self.refresh_table_asap = False\n\n self.scripts = set()\n\n # TODO: Implement\n # make sure to reset the connection state in the event that we were\n # watching something\n # if self.watching and self.connection:\n # try:\n # # call this manually since our unwatch or\n # # immediate_execute_command methods can call reset()\n # self.connection.send_command('UNWATCH')\n # self.connection.read_response()\n # except ConnectionError:\n # # disconnect will also remove any previous WATCHes\n # self.connection.disconnect()\n\n # clean up the other instance attributes\n self.watching = False\n self.explicit_transaction = False\n\n # TODO: Implement\n # we can safely return the connection to the pool here since we're\n # sure we're no longer WATCHing anything\n # if self.connection:\n # self.connection_pool.release(self.connection)\n # self.connection = None", "def reset(self):\n GPIO.output(self.reset_pin, GPIO.LOW)\n time.sleep(0.1)\n GPIO.output(self.reset_pin, GPIO.HIGH)\n time.sleep(0.1)\n\n if self.inky_version == 2:\n self._send_command(_V2_RESET)\n\n self._busy_wait()", "def _doResetMemory(self):\n self._cmdClearMemory()\n time.sleep(1)\n self._cmdResetParameters()\n time.sleep(1)", "def reset(self):\r\n err = self._cfuncs['ka_reset'](self._core._get_ka())\r\n self._core._handle_error(err)", "def reset(self):\n\t\tpass", "def reset(targets):", "def reset(self):\n\n self.memory = self.program.copy()\n self.output = 0\n self.stop_code = 0\n self.code_position = 0\n self.input_position = 0\n self.input_parameters = []", "def reset(self):\n\n # Deactivate the card\n try:\n result = self.mch_comms.call_ipmitool_command([\"picmg\", \"deactivate\", (str(self.slot + PICMG_SLOT_OFFSET))])\n except CalledProcessError:\n pass\n except TimeoutExpired as e:\n print(\"reset: caught TimeoutExpired exception: {}\".format(e))\n\n # TODO: Add a resetting status here to allow other reads to wait\n # See DIAG-68.\n\n # Wait for the card to shut down\n time.sleep(2.0)\n\n # Activate the card\n try:\n result = self.mch_comms.call_ipmitool_command([\"picmg\", \"activate\", str(self.slot + PICMG_SLOT_OFFSET)])\n except CalledProcessError:\n pass\n except TimeoutExpired as e:\n print(\"reset: caught TimeoutExpired exception: {}\".format(e))", "def _reset(self):\n pass", "def reset(self, **kwargs):\n # '_reset' depends on the 'backend'\n self._reset(**kwargs)", "def reset(self):\n self.state = [\n ['R', 'N', 'B', 'Q', 'K', 'B', 'N', 'R'],\n ['P'] * 8,\n [' '] * 8,\n [' '] * 8,\n [' '] * 8,\n [' '] * 8,\n ['p'] * 8,\n ['r', 'n', 'b', 'q', 'k', 'b', 'n', 'r']\n ]", "def reset(self):\n Simulation.reset(self)", "def resetChain(self, on_execute=False, minimal_reset=False):\n print 'resetting chain'\n self.cmd_chain = ''\n self.exec_time = 0\n if (on_execute and self.sim_speed_change):\n if minimal_reset:\n self.state = {k: v for k,v in self.sim_state.iteritems()}\n else:\n self.state['slope'] = self.sim_state['slope']\n self.state['microstep'] = self.sim_state['microstep']\n self.updateSpeeds()\n self.getCurPort()\n self.getPlungerPos()\n self.sim_speed_change = False\n self.updateSimState()\n print 'chain reset'", "def reset_all(self):\n self.vna.write(reset(self.model))\n self.using_correction = False\n return 0", "def reset(self):\n # replace with your code\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass" ]
[ "0.6838344", "0.6738413", "0.6738413", "0.671417", "0.671417", "0.671417", "0.66459775", "0.66459775", "0.66459775", "0.6641422", "0.6612196", "0.6542922", "0.6525732", "0.65187824", "0.6515516", "0.6514655", "0.649084", "0.646241", "0.646241", "0.646241", "0.646241", "0.64545614", "0.6440721", "0.64352864", "0.64302737", "0.64103454", "0.6408707", "0.6407169", "0.64011025", "0.6399784", "0.6399784", "0.6386161", "0.63706577", "0.63706577", "0.63706577", "0.63706577", "0.6362519", "0.63597834", "0.63597834", "0.63538796", "0.6337123", "0.63075376", "0.63022953", "0.6300691", "0.6282251", "0.6279326", "0.62719923", "0.6251863", "0.62428313", "0.62367105", "0.6233337", "0.62297046", "0.6229041", "0.6220998", "0.62147945", "0.62147945", "0.62147945", "0.6211326", "0.61949366", "0.61949366", "0.61949366", "0.61949366", "0.61949366", "0.61949366", "0.61949366", "0.61949366", "0.6185212", "0.6175522", "0.6173768", "0.61705184", "0.616385", "0.6163046", "0.615542", "0.6142134", "0.6142134", "0.6128273", "0.61159736", "0.61096066", "0.61085826", "0.6103625", "0.6100046", "0.6094744", "0.60750115", "0.6073941", "0.60653836", "0.6058953", "0.60500836", "0.6044787", "0.60441256", "0.6040176", "0.6030446", "0.60256183", "0.60217935", "0.6018534", "0.6016344", "0.599674", "0.599304", "0.599304", "0.599304", "0.599304" ]
0.728747
0
Function to get the instrument ID.
def get_id(self): try: return self.inst.query('*IDN?')[:36] except errors.VisaIOError as e: logger.warning(e) return 'Device not connected.'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_instrument(instrument_id=\"ncnr.refl\"):\n instrument = lookup_instrument(instrument_id)\n return instrument.get_definition()", "def instrID(self):\n return self.query('*IDN?')", "def getIdent (self) :\n return self.id", "def instrumentLookup(self):\n try:\n return self.instrument_df[\n self.instrument_df.tradingsymbol == self._ticker\n ].instrument_token.values[0]\n except:\n return -1", "def get_identity(self):\n return self.query_serial('*IDN?')", "def get_asic_id(self):\n\t\tif self.have_metadata is False:\n\t\t\tself._get_metadata()\n\t\t\tself.have_metadata = True\n\n\t\ttry:\n\t\t\treturn self.keyinfo['tracking_id'].attrs['asic_id']\n\t\texcept:\n\t\t\treturn None\n\n\t\tif self.have_metadata is False:\n\t\t\tself._get_metadata()\n\t\t\tself.have_metadata = True", "def getIdentification(self):\r\n self._update('getIdentification')\r\n return self.supervisord.options.identifier", "def get_identity(self):\n return GetIdentity(*self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_GET_IDENTITY, (), '', '8s 8s c 3B 3B H'))", "def getId(self):\n return self.identifier", "def getID(self):\r\n return self._interface.UID", "def get_identifier(self) -> str:\n return self.identifier", "def get_current_record_id(self):\n url = self.selenium.get_location()\n for part in url.split(\"/\"):\n oid_match = re.match(OID_REGEX, part)\n if oid_match is not None:\n return oid_match.group(2)\n raise AssertionError(\"Could not parse record id from url: {}\".format(url))", "def getID():", "def getSampleId(self):\n return self.getDbRecord().getColumnValue(SAMPLE_ID_COLUMN)", "def get_instrument(self, header):\n return self.instrument.upper()", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")", "def id(self) -> str:\n return pulumi.get(self, \"id\")" ]
[ "0.7232644", "0.7094874", "0.6803308", "0.6676362", "0.66161686", "0.6494307", "0.64633137", "0.64486635", "0.6435335", "0.64340276", "0.6428906", "0.6420143", "0.6411098", "0.6404091", "0.63868725", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493", "0.63842493" ]
0.6533145
5
Function to turn keithley on.
def power_on(self): return self.inst.write(':OUTP ON')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def turn_eht_on(self):\n raise NotImplementedError", "def friewallOn():\n pass", "def turn_on(self, **kwargs) -> None:\n self.heater.turn_on()", "def turn_on(self):\n self._interrupt_flash()\n if not self.on:\n GPIO.output(self.pin, GPIO.HIGH)\n self.on = True", "def turnOn(self):\n self.off = False\n self.turnOnAnimation()", "def lightning_turnon(self):\n self.turnOn()", "def turn_on(self, **kwargs):\n self._is_on = True", "def _turn_on(self):\n self._turn_display('ON')", "def turn_on(self):\n GPIO.output(self.gpio, True) # turn on light", "def turn_on(self, **kwargs: Any) -> None:\n with self._wemo_call_wrapper(\"turn on\"):\n self.wemo.on()", "def turn_on(self):\n self._lms.query(self._id, 'power', '1')\n self.update_ha_state()", "def turnLightingSystemOn():\n dislin.light('ON')", "def turnOn(self):\n self.write('E;O1;E;')\n return self.output()", "def turn_on(self, **kwargs):\n self.smartplug.turn_on()", "def turn_on(self, **kwargs):\n set_sonoff_state(self._host, \"on\")\n self._state = True", "def set_light_on(self):\r\n self._light = \"ON\"", "def turn_on(self, **kwargs: Any) -> None:\n self._set_light(ON_STATE)", "def kickerOn(self):\n self.sKicker.set(.3 if not config.isPracticeBot else 0)\n if not self.lastKicker:\n self.datalogger.event(\"Fire!\")\n self.lastKicker = True", "def jumped_on(self):\r\n pass", "def set_light_on(self):\n self._light = \"ON\"", "def enable(self) -> None:", "def turn_on(\n self,\n speed: str = None,\n percentage: int = None,\n preset_mode: str = None,\n **kwargs,\n ) -> None:\n self.wink.set_state(True, speed)", "def turn_on(self, **kwargs):\n self.enabled = self.fritz_box.set_call_forwarding(self.uid, 1)", "def power_on(self):\n pass", "def turn_on(self):\n self._remote.power(1)", "def enable(self):", "def on(self):\n self._set_state(on=True)", "def turn_on(self) -> None:\n self._monoprice.set_power(self._zone_id, True)", "def turn_on(self, **kwargs):\n self._state = True\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device,'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":1 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":1 }', 5)", "def turn_on(self):\n self._state = True\n self.write_state(bytes([9]))\n self.schedule_update_ha_state()", "def enable(self):\n pass", "def _force_on(self):\n self._interface.set('fw_wp_vref', self._fw_wp_vref)\n self._interface.set('fw_wp_en', 'on')\n self._interface.set('fw_wp', 'on')", "def strike_on(self):\n self._set_print_mode(self.STRIKE_MASK)", "def enable(self):\n self.enabled = True", "def enable(self):\n self.enabled = True", "def enable():\n boutonPierre[\"state\"] = \"normal\"\n boutonFeuille[\"state\"] = \"normal\"\n boutonCiseaux[\"state\"] = \"normal\"", "def toggle(self):\n self._interrupt_flash()\n GPIO.output(self.pin, GPIO.LOW if self.on else GPIO.HIGH)\n self.on = not self.on", "def _on_someone_arrive(self, event_name: str, data: dict, kwargs: dict) -> None:\n self.log(\"Someone came home; turning on the switch\")\n\n self.toggle(state=\"on\")", "def switch_on(self,name):\n self.circles[name].switch_on()\n self.cursor.execute(\"\"\"UPDATE sensors_powersensor SET state=1 WHERE target=%s\"\"\", (name,))", "def cambiar_celeste(self):\r\n self.celeste.setDisabled(True)", "def _led_enable():\n # type: () -> None\n GPIO.output(LED_nOE, GPIO.LOW)", "def affection_status_switch_on(self):\n self._affection_status_switch = False", "def turn_on(self, **kwargs):\n self.set_graceful_lock(True)\n self.robot.start_cleaning()", "def enable_eye_dome_lighting(self):\n self._render_passes.enable_edl_pass()", "def affection_status_switch_on(self):\n self._affection_status_switch = True", "def turn_on(self, **kwargs):\n self._state = True\n\n # Make initial update\n self.update_switch(self._initial_transition)\n\n self.schedule_update_ha_state()", "def turn_on(self, **kwargs):\n self._send_command(\"turn_on\")", "def turn_on(self) -> None:\n self._state = self._player.turn_on()", "def turn_on(self, **kwargs: Any) -> None:\n if self.type == \"on_off\":\n _LOGGING.debug(\"Starting all torrents\")\n self._tm_client.api.start_torrents()\n elif self.type == \"turtle_mode\":\n _LOGGING.debug(\"Turning Turtle Mode of Transmission on\")\n self._tm_client.api.set_alt_speed_enabled(True)\n self._tm_client.api.update()", "def on(config: dict):\n switch_device(config, config[\"inching\"], \"on\")", "def chase_laser(self):\r\n print(\"Meeeeow\\n\")", "def enable(self):\r\n self.update(enabled=True)", "def activate(widg, self):\n widg.set_sensitive(True)", "def kickerOff(self):\n self.sKicker.set(.6 if not config.isPracticeBot else .3)\n self.lastKicker = False", "def turn_on(self, **kwargs):\n self._is_on = True\n self.schedule_update_ha_state()\n self.hass.data[ZIGATE_DOMAIN].action_onoff(self._device.addr,\n self._endpoint,\n 1)", "async def _hardcore_setheist(self, ctx):\r\n guild = ctx.guild\r\n config = await self.thief.get_guild_settings(guild)\r\n\r\n if config[\"Hardcore\"]:\r\n config[\"Hardcore\"] = False\r\n msg = \"Hardcore mode now OFF.\"\r\n else:\r\n config[\"Hardcore\"] = True\r\n msg = \"Hardcore mode now ON! **Warning** death will result in credit **and chip wipe**.\"\r\n await self.thief.config.guild(guild).Config.set(config)\r\n await ctx.send(msg)", "def _set_villain(self):\n\t\tself.villain_one = donkey.Donkey(100 , constants.THREE_Y,0,500)\n\t\tself.active_sprite_list.add(self.villain_one)", "def enable(self):\n self._enabled = True", "def on(self):\n if not self._is_on:\n self._pwms.enable(self._pin_index, self._frequency)\n self._is_on = True", "def autoExposureChk(self, state):\n if state == Qt.Checked and self.kinect.kinectConnected == True:\n self.kinect.toggleExposure(True)\n else:\n self.kinect.toggleExposure(False)", "def set_is_watering(valve: Valve, value: bool) -> None:\n valve.is_watering = value", "def turn_on(self):\n self._data.homestatus.setroomThermpoint(\n self._data.home_id, self._room_id, STATE_NETATMO_HOME\n )\n self.update_without_throttle = True\n self.schedule_update_ha_state()", "def event11512210():\n header(11512210, 0)\n knight, is_active = define_args('ii')\n if_event_flag_on(1, is_active)\n if_entity_health_less_than_or_equal(1, knight, 0.1)\n if_condition_true(0, 1)\n chr.disable_gravity(knight)\n chr.disable_collision(knight)\n chr.disable_ai(knight)\n chr.replan_ai(knight)\n wait(2.5)\n # Skipping the fade-out, they just get obliterated usually.\n anim.force_animation(knight, 1201, do_not_wait_for_transition=True, wait_for_completion=True)\n chr.enable_ai(knight)\n chr.disable(knight)\n chr.enable_gravity(knight)\n chr.enable_collision(knight)\n chr.set_special_effect(knight, 3231)\n flag.disable(is_active)\n restart()", "def crowned(self): # called when this piece has become a 'King'\r\n \r\n self.isKing = True", "def is_on(self):\n return False", "def turn_on(self, **kwargs):\n self._brightness = 100\n self._state = 'on'\n #self._light.brightness = kwargs.get(ATTR_BRIGHTNESS, 255)\n #self._light.turn_on()\n _LOGGER.info(\"turn_on() is called\")", "def turnLightOn(ID):\n dislin.litmod(ID, 'ON')", "def ON(self):\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self.PIN, GPIO.OUT)\n GPIO.output(self.PIN, True)\n self.STATUS = \"ON\"", "def turn_on(self):\n self.write(\"OUT1\\n\")", "def force_switch_on(self):\n self.turn_on_modem()", "def switch_on(self):\n if threading.current_thread() != self._blinking_thread:\n self._blinking_thread.unregister(self)\n GPIO.output(self.pin, GPIO.HIGH)", "def _set_trace_on(trace_on):\n AceQLHttpApi.set_trace_on(trace_on)", "def is_on(self):\n pass", "def typhoon():\n pass", "async def async_turn_on(self, **kwargs: Any) -> None:\n run_time = self._manual_preset_runtime / 60\n if run_time == 0:\n _LOGGER.warning(\n \"Switch %s manual preset runtime is 0, watering has defaulted to %s minutes. Set the manual run time on your device or please specify number of minutes using the bhyve.start_watering service\",\n self._device_name,\n int(DEFAULT_MANUAL_RUNTIME.seconds / 60),\n )\n run_time = 5\n\n await self.start_watering(run_time)", "def enable(self):\n self.SetInteractive(1)", "def turn_off(self):\n self._interrupt_flash()\n if self.on:\n GPIO.output(self.pin, GPIO.LOW)\n self.on = False", "def light_on(self, pin='D13'):\n self.light_set(pin, '1')", "def flicker_lights(self):\n print 'Lights Set'", "async def wink(self, ctx):\n await ctx.send('wonk')", "def _set_villain(self):\n\t\tself.villain_one = donkey.Donkey(100 , constants.THREE_Y,0,500)\n\t\tself.active_sprite_list.add(self.villain_one)\n\n\t\tself.villain_two = donkey.Donkey(900, constants.TWO_Y,700,950)\n\t\tself.active_sprite_list.add(self.villain_two)", "def turn_on(self, **kwargs) -> None:\n _LOGGER.debug(\n \"SynoDSMSurveillanceHomeModeToggle.turn_on(%s)\",\n self._api.information.serial,\n )\n self._api.surveillance_station.set_home_mode(True)", "def set_light_off(self):\r\n self._light = \"OFF\"", "async def light(self) -> None:\n self.lit = True\n await self.run_command(\"miner fault_light on\")\n print(\"light \" + self.ip)", "def cool_on():\n global PAUSED\n print(\"Temp is high; toggling cooling on\")\n GPIO.output(HEATPIN, RELAYOFF)\n GPIO.output(FANPIN, RELAYOFF)\n GPIO.output(COOLPIN, RELAYON)\n while (all_temps_avg > TEMPMID or min_temp > TEMPHIGH) and (PAUSED == False):\n time.sleep(10)", "def turn_on(self, **kwargs):\n setattr(self.resource, self.variable, True)", "def on(self):\n self.state = \"ON\"\n logger.info(\"Turning on %s lamp at %s port %s plug %s\" % (self.name,\n self.host,\n self.port,\n self.plug))\n return self.send_cmd(\"pset %s 1\" % self.plug)", "def Enabled(self) -> bool:", "def toggle(self, **kwargs):\n self.on = False if self.on else True", "def swint(self) -> None:", "def turn_on(self, **kwargs) -> None:\n self._device.writeCharacteristic(self._handle, b'\\x00', True)\n self._state = True\n self.schedule_update_ha_state()", "def poweron(self):\n raise NotImplementedError()", "async def async_turn_on(self) -> None:\n self._zone.power = True", "def setOn(self, command):\r\n self.setDriver('ST', 1)", "def toggle(self) -> None:\n ...", "def turnLightingSystemOff():\n dislin.light('OFF')", "def use_tonce(self):\r\n return self.config.get_bool(\"gox\", \"use_tonce\")", "def on(self):", "def louder():\n try:\n ttsEng.louder()\n except Exception, e:\n logging.error(e)", "def toggle(self) -> None:", "def toggle(self) -> None:" ]
[ "0.7135671", "0.663822", "0.6409372", "0.6337713", "0.6281222", "0.6265482", "0.6240381", "0.6208941", "0.61929655", "0.6179498", "0.61754537", "0.6157566", "0.6145475", "0.6067093", "0.60128975", "0.5974138", "0.59354514", "0.58733046", "0.5868544", "0.5858084", "0.58495027", "0.5823968", "0.5806659", "0.5800267", "0.57927805", "0.5792337", "0.57690996", "0.5768151", "0.57659066", "0.57487017", "0.57148355", "0.5617545", "0.56109667", "0.5596655", "0.5596655", "0.55893147", "0.5566978", "0.5559515", "0.55481017", "0.5543961", "0.5543477", "0.554064", "0.5509767", "0.5506233", "0.54984456", "0.5497937", "0.549219", "0.5437722", "0.54357517", "0.54351187", "0.5405446", "0.5403108", "0.5400362", "0.5400325", "0.5392974", "0.53923166", "0.5390958", "0.53849876", "0.5382113", "0.53817034", "0.5379975", "0.5379541", "0.5374892", "0.5365341", "0.5361357", "0.5336934", "0.5330876", "0.53219485", "0.53179175", "0.53128344", "0.53061175", "0.5287416", "0.5279834", "0.5265169", "0.52639294", "0.52625394", "0.5257089", "0.52453774", "0.5238001", "0.52355355", "0.5229599", "0.52264994", "0.5220914", "0.5215548", "0.5211761", "0.5211312", "0.52064455", "0.5201201", "0.51947194", "0.51937467", "0.5190498", "0.5186769", "0.5169876", "0.5167868", "0.5167552", "0.5165297", "0.5163305", "0.51597756", "0.5156019", "0.5153335", "0.5153335" ]
0.0
-1
Function to turn keithley off.
def power_off(self): return self.inst.write(':OUTP OFF')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def turn_off(self, **kwargs) -> None:\n self.wink.set_state(False)", "def turn_eht_off(self):\n raise NotImplementedError", "def turn_off(self, **kwargs: Any) -> None:\n with self._wemo_call_wrapper(\"turn off\"):\n self.wemo.off()", "def turn_off(self):\n GPIO.output(self.gpio, False) # turn off light", "def turn_off(self, **kwargs):\n self._is_on = False", "def turnOff(self):\n self.write(\"E;O0;E;\")\n return self.output()", "def _turn_off(self):\n self._turn_display('OFF')", "def turn_off(self):\n self._interrupt_flash()\n if self.on:\n GPIO.output(self.pin, GPIO.LOW)\n self.on = False", "def turn_off(self, **kwargs):\n set_sonoff_state(self._host, \"off\")\n self._state = False", "def turn_off(self, **kwargs):\n self.heater.turn_off()", "def turnOff(self):\n self.off = True\n self.turnOffAnimation()", "def lightning_turnoff(self):\n self.turnOff()", "def turn_off(self, **kwargs):\n self.smartplug.turn_off()", "def set_light_off(self):\r\n self._light = \"OFF\"", "def _disable(self):\n self.enabled = False", "def turnLightingSystemOff():\n dislin.light('OFF')", "def turn_off(self, **kwargs):\n self.enabled = self.fritz_box.set_call_forwarding(self.uid, 0)", "def deactivate(widg, self):\n widg.set_sensitive(False)", "def turn_off(self):\n self._state = False\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":0 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'): \n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":0 }', 5)", "def _force_off(self):\n self._interface.set('fw_wp_vref', self._fw_wp_vref)\n self._interface.set('fw_wp_en', 'on')\n self._interface.set('fw_wp', 'off')", "def disable(self):\n self.enabled = False", "def set_light_off(self):\n self._light = \"OFF\"", "def turn_test_mode_off_by_default(test_mode_off):", "def off(self):\n self._set_state(on=False)", "def turn_off(self) -> None:\n self._monoprice.set_power(self._zone_id, False)", "def disable(self) -> None:", "def disable(self):", "def disable(self):\n self._enabled = False", "def disable():\n Plotter.enable = False", "def turn_off(self):\n self._state = False\n self.write_state(bytes([1]))\n self.schedule_update_ha_state()", "def turn_off(self, **kwargs: Any) -> None:\n self._set_light(OFF_STATE)", "def disable(self):\n pass", "def turn_off(self):\n self.handleCommand(1)\n self._state = STATE_OFF", "def disable_emission(self):\n self.ask(\"LASER=OFF\")\n self.ask(\"LASER=ON\") # unlocks emission button, does NOT start emission!", "def turn_off(self, **kwargs: Any) -> None:\n self._light.turn_off()", "def strike_off(self):\n self._unset_print_mode(self.STRIKE_MASK)", "def _nixie_disable():\n # type: () -> None\n GPIO.output(NIXIE_nOE, GPIO.HIGH)", "def turn_off(self, **kwargs):\n self._state = False\n self.schedule_update_ha_state()\n self._hs_color = None\n self._attributes[\"hs_color\"] = self._hs_color\n self._attributes[\"brightness\"] = None", "def turn_off(self):\n self.robot.stop_simulation()", "def noCheck():\n dislin.nochek()", "def ToggleOff(self):\n for s in self.sensors:\n self.gSetpt[s.GetID()].Disable()\n\n self.top_sizer.Layout()\n print(\"Toggle off\")", "def off(config: dict):\n switch_device(config, config[\"inching\"], \"off\")", "def cambiar_celeste(self):\r\n self.celeste.setDisabled(True)", "def off(self):\n if self._is_on:\n self._pwms.disable(self._pin_index)\n self._is_on = False", "def off() -> None:\n\t\tif Logging.logLevel != LogLevel.OFF:\n\t\t\tLogging.lastLogLevel = Logging.logLevel\n\t\t\tLogging.setLogLevel(LogLevel.OFF)", "def turn_off(self):\n self.set_pin(0, -1)\n self.set_pin(1, -1)\n self.set_pin(2, -1)", "def turn_off(self):\n print(\"Turning the lights off\")\n self.led.all_off()\n self.client.publish(STATE_TOPIC, OFF) #publish", "def turn_off(self, **kwargs):\n setattr(self.resource, self.variable, False)", "def turn_off(self, **kwargs: Any) -> None:\n self._device.power_on = False\n _LOGGER.debug(\"Turn off light %s\", self._device.ip)", "def turn_off(self, **kwargs):\n #self._light.turn_off()\n self._brightness = 0\n self._state = 'off'\n _LOGGER.info(\"turn_off() is called\")", "def disable_tee(self):\n self._tee = False", "def disable_everything(self):\n zhinst.utils.disable_everything(self.daq, self.device_id)\n self.log.info(\"Disabled everything.\")", "def turn_off(self, **kwargs):\n self._send_command(\"turn_off\")", "def off(self):", "def disable(self):\n return self.enable(False)", "def turn_off(self, **kwargs):\n self._light.set_color(0, 0, 0, 0)", "def disable(self):\n self.write(\":OUTPUT OFF;\")", "def _led_disable():\n # type: () -> None\n GPIO.output(LED_nOE, GPIO.HIGH)", "def off_hook(self) -> None:", "def disable_tk(self):\n self.clear_inputhook()", "def disable(self):\r\n self.update(enabled=False)", "def turn_off(self, **kwargs):\n self._client.set_brightness(self._id, 0)", "def set_deformer_off(deformer):\n\n # sets attribute to zero\n try:\n cmds.setAttr(\"{}.envelope\".format(deformer), 0)\n\n # if connections are found on the attribute then mute node is used\n except RuntimeError:\n mute_node = cmds.mute(\"{}.envelope\".format(deformer), force=True)[0]\n cmds.setAttr(\"{}.hold\".format(mute_node), 0)", "def turn_aux_heat_off(self):\n self.set_operation_mode(STATE_HEAT)", "def turn_off(self, **kwargs):\n self.vacuum.stop()\n self.vacuum.home()", "def turn_off(self):\n self.write(\"OUT0\\n\")", "def turn_off(self, **kwargs):\n self.robot.pause_cleaning()\n time.sleep(1)\n self.robot.send_to_base()", "def ga_print_off():\n GA_Printer._off()\n return", "def disable(self):\n self.direction = None # remove direction\n self.state['enabled'] = False # reset states\n self.state['return'] = False\n self.return_path = None # remove path\n if self.state['blue']:\n self.stop_blue_state(resume_audio=False)\n self.image, _ = self.norm_images.get_image() # reset image\n self.sound_manager.stop()", "def turn_off(self, **kwargs: Any) -> None:\n if self.type == \"on_off\":\n _LOGGING.debug(\"Stopping all torrents\")\n self._tm_client.api.stop_torrents()\n if self.type == \"turtle_mode\":\n _LOGGING.debug(\"Turning Turtle Mode of Transmission off\")\n self._tm_client.api.set_alt_speed_enabled(False)\n self._tm_client.api.update()", "def turn_off(self, **kwargs: Any) -> None:\n if (\n DPCODE_LIGHT in self.tuya_device.status\n and DPCODE_SWITCH not in self.tuya_device.status\n ):\n commands = [{\"code\": DPCODE_LIGHT, \"value\": False}]\n else:\n commands = [{\"code\": DPCODE_SWITCH, \"value\": False}]\n self._send_command(commands)", "def turn_off(self, **kwargs):\n _LOGGER.error(\"DALI TURN OFF\")\n self._state = False\n\n url = self.urlx + '/toggle'\n headers = {'x-ha-access': 'raspberry',\n 'content-type': 'application/json'}\n\n response = get(url, headers=headers)\n _LOGGER.error(response.text)\n\n json_data = json.loads(response.text)\n _LOGGER.error(json_data)\n\n state = json_data['state']\n\n self._dimmer = 0\n\n self._state = state == 'on'", "def disable(self, is_top_level=True):\n self.enabled = False", "def protection_off(self, c):\n self.protection_state = False\n self.change_shutter_state(self, False)", "def disable():\n ret = _LIB.oled_click_disable()\n if ret < 0:\n raise Exception(\"oled click disable failed\")", "async def disable(self, **kwargs) -> None: # pylint: disable=unused-argument\r\n await self.set_ena(False)", "def unsetKineticLaw(self):\n return _libsbml.Reaction_unsetKineticLaw(self)", "def _reset(self):\n self._interface.set('fw_wp_en', 'off')", "def __disable__(self) -> None:\n pass", "def fast_off(self, *args, **kwargs):\n return self.set(0,0,0,fade=False)", "def disable_modulation(self):\n self.write(\":OUTPUT:MOD OFF;\")\n self.write(\":lfo:stat off;\")", "def disable(self):\n self.SetInteractive(0)", "def off(self):\n if self._state or (settings.log_state_of_switched_off_managers and self._state is None):\n if self._hidden:\n self.log_state_change('H')\n else:\n self.log_state_change('-')\n self._state = False", "def off(self):\n for light in self.all:\n GPIO.output(light, 0)", "def disable_eye_dome_lighting(self):\n self._render_passes.disable_edl_pass()", "def turn_off(self):\n if self._module_type == NA_VALVE:\n self._data.homestatus.setroomThermpoint(\n self._data.home_id,\n self._room_id,\n STATE_NETATMO_MANUAL,\n DEFAULT_MIN_TEMP,\n )\n elif self.hvac_mode != HVAC_MODE_OFF:\n self._data.homestatus.setroomThermpoint(\n self._data.home_id, self._room_id, STATE_NETATMO_OFF\n )\n self.update_without_throttle = True\n self.schedule_update_ha_state()", "def turn_off(self, **kwargs):\n _LOGGER.debug(\"Turning off Motion Detection \")\n self.data.set_camera_recording(self._camera_id, \"never\")", "def DisableByRunIf(self):\n self.run_if = 'False'", "def off_all(self):\n self._set_status(\"off\", \"11111111\")", "def turn_off(self) -> None:\n self._get_chromecast().quit_app()", "def switch_off(self):\n if threading.current_thread() != self._blinking_thread:\n self._blinking_thread.unregister(self)\n GPIO.output(self.pin, GPIO.LOW)", "def kickerOff(self):\n self.sKicker.set(.6 if not config.isPracticeBot else .3)\n self.lastKicker = False", "def all_off():\n print(\"Climate is within set parameters; toggling systems off if any are on\")\n GPIO.output(HEATPIN, RELAYOFF)\n GPIO.output(COOLPIN, RELAYOFF)\n GPIO.output(FANPIN, RELAYOFF)\n time.sleep(30)", "def reset_energizer_flag(self): \r\n self.energizer_flag = False", "def sm_output_off(self):\n self.sm.output_off()", "def disable_detector():\n global enable_detector, enable_detection, detector\n\n detector = None\n\n if detector is None:\n print(\"Detector stopped...\")\n enable_detection = False\n enable_detector = ''\n\n return render_settings_view()", "def allOff():\n # Get/set special slice IDs\n root_xid = bwlimit.get_xid(\"root\")\n default_xid = bwlimit.get_xid(\"default\")\n kernelhtbs = gethtbs(root_xid, default_xid)\n if len(kernelhtbs):\n logger.log(\"bwmon: Disabling all running HTBs.\")\n for htb in kernelhtbs.keys(): bwlimit.off(htb, dev = dev_default)", "def _disable(self):\n self.debug_log(\"Disabling...\")\n self._unregister_handlers()", "def switch_off(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def off_switch(self):\n self._switch_callback = None", "def light_off(self, pin='D13'):\n self.light_set(pin, '0')" ]
[ "0.712389", "0.68947697", "0.67406696", "0.6689545", "0.66849595", "0.6672334", "0.66197157", "0.6614438", "0.6604366", "0.6602539", "0.65976703", "0.6577364", "0.6537614", "0.6469053", "0.6457736", "0.64300936", "0.6429341", "0.6427579", "0.64250124", "0.6411914", "0.6404472", "0.64029884", "0.6367199", "0.6361038", "0.6353726", "0.6352755", "0.62896115", "0.6281002", "0.62652487", "0.62551194", "0.62506765", "0.62445503", "0.62266296", "0.6208696", "0.6200426", "0.619271", "0.6176504", "0.6171783", "0.61629534", "0.6142468", "0.61391985", "0.612393", "0.61192477", "0.61183304", "0.6115841", "0.61142135", "0.60941863", "0.606819", "0.60663694", "0.6047353", "0.6045856", "0.60421693", "0.60421306", "0.60315204", "0.6015151", "0.6010822", "0.599431", "0.5993763", "0.5973073", "0.59706455", "0.5953447", "0.59492874", "0.59474856", "0.59364116", "0.5933343", "0.5929513", "0.5929355", "0.59272605", "0.5921342", "0.592055", "0.5914935", "0.5914456", "0.59130657", "0.59008414", "0.5893839", "0.588518", "0.58822846", "0.5876816", "0.5875347", "0.5860664", "0.5858609", "0.5854567", "0.5850794", "0.58489084", "0.584523", "0.5835427", "0.5833088", "0.583307", "0.582332", "0.5807538", "0.5790867", "0.57827944", "0.57749224", "0.5774477", "0.577238", "0.5770414", "0.57693565", "0.57532376", "0.5748311", "0.5745441", "0.5734378" ]
0.0
-1
Function to select panel.
def select_panel(self): radio_btn = self.sender() if radio_btn.isChecked(): term = radio_btn.text()[:-9] return self.inst.write(f':ROUT:TERM {term.upper()}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_panel(self, panel_id):\n return self.panels.get(panel_id, None)", "def get_active_panel(cls):\n active_panel = None\n panel_list = pm.getPanel(type='modelPanel')\n for panel in panel_list:\n if pm.modelEditor(panel, q=1, av=1):\n active_panel = panel\n break\n\n return active_panel", "def SelectPresentation(self, event):\n pass", "def show(self):\n self.window.run_command(\"show_panel\", {\"panel\": self.full_name})", "def onMouseDown(self, event):\n # determine where mouse is\n (worldX, worldY) = anwp.sl.engine.screenToWorld(event.pos[0], event.pos[1])\n sim = self.world.checkPoint(worldX, worldY)\n \n # if selecting nothing, remove panel and selector\n if sim == None:\n self.onSelectNoSim()\n elif sim.type == 'SystemEntity':\n self.onSelectSystemSim(sim)\n elif sim.type == 'TradeEntity':\n pass\n elif sim.type == 'ShipyardEntity':\n self.onSelectShipyardSim(sim)\n elif sim.type == 'ArmadaEntity':\n self.onSelectArmadaSim(sim)\n elif sim.type == 'ArmyEntity':\n self.onSelectArmySim(sim)\n elif sim.type == 'MilitaryInstEntity':\n self.onSelectMilitaryInstSim(sim)", "def on_Panel_select_page(self, widget):\n try:\n funcioneshab.listadonumhab()\n except:\n print(\"error botón cliente barra herramientas\")", "def getFilePanel(self):\n filepanel = None\n\n for fp in self.Parent.Children:\n if isinstance(fp, FileSelectPanel):\n filepanel = fp\n break\n return filepanel", "def panels(self, request, panel_list, group):\n return panel_list", "def get_panel(self, event_type):\n self._auto_discover()\n if event_type in self.__class__._panels:\n return self.__class__._panels[event_type]\n raise exceptions.PanelDoesNotExist(\"Panel '%s' does not exist\" % event_type)", "def 选择项目(self, n): # real signature unknown; restored from __doc__\n return self.Select(n)", "def panel(*args, control: bool=True, copy: AnyStr=\"\", createString: bool=True, defineTemplate:\n AnyStr=\"\", docTag: Union[AnyStr, bool]=\"\", editString: bool=True, exists: bool=True,\n init: bool=True, isUnique: bool=True, label: Union[AnyStr, bool]=\"\",\n menuBarRepeatLast: bool=True, menuBarVisible: bool=True, needsInit: bool=True,\n parent: AnyStr=\"\", popupMenuProcedure: Union[Script, bool]=None, replacePanel:\n AnyStr=\"\", tearOff: bool=True, tearOffCopy: AnyStr=\"\", tearOffRestore: bool=True,\n unParent: bool=True, useTemplate: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[None, Any]:\n pass", "def tabSelected(self):", "def tabSelected(self):", "def selectedWidget(self, p_int): # real signature unknown; restored from __doc__\n pass", "def getPanel(*args, allConfigs: bool=True, allPanels: bool=True, allScriptedTypes: bool=True,\n allTypes: bool=True, atPosition: List[int, int]=None, configWithLabel: AnyStr=\"\",\n containing: AnyStr=\"\", invisiblePanels: bool=True, scriptType: AnyStr=\"\", type:\n AnyStr=\"\", typeOf: AnyStr=\"\", underPointer: bool=True, visiblePanels: bool=True,\n withFocus: bool=True, withLabel: AnyStr=\"\", **kwargs)->List[AnyStr]:\n pass", "def get_panels(config):\n\n task = TaskPanels(config)\n task.execute()\n\n task = TaskPanelsMenu(config)\n task.execute()\n\n logging.info(\"Panels creation finished!\")", "def load_panelapp_panel(adapter, panel_id=None, institute=\"cust000\", confidence=\"green\"):\n panel_ids = [panel_id]\n\n if not panel_id:\n LOG.info(\"Fetching all panel app panels\")\n panel_ids = _panelapp_panel_ids()\n\n for _ in panel_ids:\n parsed_panel = _parse_panelapp_panel(adapter, _, institute, confidence)\n\n if len(parsed_panel[\"genes\"]) == 0:\n LOG.warning(\"Panel %s is missing genes. Skipping.\", parsed_panel[\"display_name\"])\n continue\n\n try:\n adapter.load_panel(parsed_panel=parsed_panel, replace=True)\n except Exception as err:\n raise err", "def draw_select_pane(self):\n\t\tpane = SelectScreen.draw_select_pane(self)\n\t\ttext_font = font.Font(\"./fonts/FreeSansBold.ttf\", 28)\n\t\ttext_image = text_font.render(self.title, 1, BLACK)\n\t\tpane.blit(text_image, ( 100, 20 ))\n\t\tcoords = SCREEN_DATA_MAP[CONTROLS][OPTIONS_COORDS]\n\t\tpane.blit(self.draw_select_options_pane(), (coords[0], coords[1]))\n\t\treturn pane", "def panel_callback():\n if self.minwidth is None:\n self.minwidth = self.button_stack.winfo_width()\n\n if self.current_panel:\n self.sashpos = self.paned.sashpos(0)\n self.current_panel.forget()\n if isinstance(self.current_panel, class_obj):\n self.current_panel = None\n self.paned.sashpos(0, self.minwidth)\n return\n\n if class_obj.__name__ in self.panels:\n panel = self.panels[class_name]\n else:\n panel = self.panels[class_name] = class_obj(self, self.app)\n\n panel.pack(side=tk.LEFT, expand=1, fill=tk.BOTH)\n\n if self.sashpos is None:\n self.sashpos = 300\n\n self.paned.sashpos(0, self.sashpos)\n\n self.current_panel = panel", "def select_buy_dashboard_tab(self):\n self.select_static_tab(self.buy_dashboard_tab_locator, True)", "def view_panel():\n id = request.args.get('id')\n try:\n version = request.form[\"versions\"]\n except KeyError:\n version = None\n if id:\n status = check_panel_status(s, id)\n if not status:\n message = \"This panel has changes which cannot be viewed here as they have not been made live yet, if you have permission you can view these by editing the panel\"\n else:\n message = None\n panel_details = get_panel_details_by_id(s, id)\n if not version:\n version = panel_details.current_version\n panel_name = panel_details.name\n panel = get_regions_by_panelid(s, id, version)\n project_id = get_project_id_by_panel_id(s, id)\n result = []\n rows = list(panel)\n if len(rows) != 0:\n bed = ''\n for i in rows:\n row = dict(zip(i.keys(), i))\n result.append(row)\n # panel_name = i.panel_name\n current_version = i.current_version\n else:\n message = \"This Panel has no regions yet & may also have changes that have not been made live\"\n bed = 'disabled'\n current_version = version\n\n if check_user_has_permission(s, current_user.id, project_id):\n edit = ''\n else:\n edit = 'disabled'\n\n form = ViewPanel()\n v_list = range(1, current_version + 1)\n choices = []\n for i in v_list:\n choices.append((i, i))\n form.versions.choices = choices\n form.versions.default = version\n form.process()\n\n table = []\n\n for i in result:\n line = []\n line.append(i['chrom'])\n line.append(str(i['region_start']))\n line.append(str(i['region_end']))\n line.append(i['gene_name'])\n line.append(i['name'].replace(',', ' '))\n table.append(line)\n return render_template('panel_view.html', scope='Panel', table=json.dumps(table), panel=table,\n panel_name=panel_name, edit=edit, bed=bed,\n version=version, panel_id=id, project_id=project_id, message=message,\n url=url_for('panels.view_panel'),\n form=form)\n\n else:\n return redirect(url_for('panels.view_panels'))", "def create_panel(self):\n return\n # return Panel(self)", "def select(self):\r\n pass", "def show_in_maya_panel(*args):\n from mliber_libs.maya_libs.maya_utils import show_as_panel\n show_as_panel(MainWidget())", "def __selectMS(self):\n \n if self._msTool is None:\n self._msTool = mstool()\n self._msTool.open(self._arg['vis']) \n else:\n self._msTool.reset()\n \n # It returns a dictionary if there was any selection otherwise None\n self.__selectionFilter = self.__getSelectionFilter()\n\n if self.__selectionFilter is not None:\n self._msTool.msselect(self.__selectionFilter)", "def widget(self, class_id, name):\n mw = self.getMainWindow()\n form = mw.findChild(QtGui.QWidget, \"TaskPanel\")\n return form.findChild(class_id, name)", "def select(self):\n pass", "def select(self):\n pass", "def _on_plot_selection(self, event=None):\n if event is not None:\n combo = event.GetEventObject()\n event.Skip()\n else:\n combo = self.cb_plotpanel\n selection = combo.GetSelection()\n\n if combo.GetValue() != 'None':\n panel = combo.GetClientData(selection)\n self.parent.on_set_plot_focus(panel)", "def set_panel_on_focus(self, name=None):\n if self.cb_plotpanel and self.cb_plotpanel.IsBeingDeleted():\n return\n for _, value in self.parent.plot_panels.iteritems():\n name_plot_panel = str(value.window_caption)\n if name_plot_panel not in self.cb_plotpanel.GetItems():\n self.cb_plotpanel.Append(name_plot_panel, value)\n if name is not None and name == name_plot_panel:\n self.cb_plotpanel.SetStringSelection(name_plot_panel)\n break\n self.enable_append()\n self.enable_remove_plot()", "def select_browse_tab_layer(driver, main_layer_name, sub_layer_name, tools_utilities_selection_item, index, ws_index):\r\n\r\n AppCommanUtility.click_expand_button_div(driver, main_layer_name, tools_utilities_selection_item, index, ws_index)\r\n AppCommanUtility.click_toggel_btn(driver, sub_layer_name, tools_utilities_selection_item, index, ws_index)", "def select(self):\n save= self.currentSub._select()\n if save!=False and save.selectable == True:\n self.currentSub =save\n if save.explorable():\n try :\n save.list[save.count].onShowed()\n except:\n pass", "def selector(self):\n if self.selectedUnit:\n if not self.map.hasUnselectedUnitAt(self.pos):\n self.menu = Menu.Menu([], MENU_POSITION)\n #self.menuGroup.add(self.menu)\n self.selectedUnit.setNeighbors(self.map.getNeighbors(self.selectedUnit))\n if self.selectedUnit.hasUnfriendlyNeighbors():\n self.menu.add(Menu.MenuComponent(\" Attack\", self.startAttackMode))\n if self.selectedUnit.canCapture(self.pos):\n self.menu.add(Menu.MenuComponent(\" Capture\", lambda: self.capture(self.selectedUnit, self.pos)))\n self.menu.add(Menu.MenuComponent(\" Wait\", self.deselectUnit))\n self.menu.add(Menu.MenuComponent(\" Cancel\", self.cancelMove))\n self.menuMode = True\n else:\n self.selectSpace()", "def hook_frame_selected(self):", "def select_pane_active(direction, context=None):\n title = get_active_window_title()\n res = tmux_parse_window_title(title)\n select_pane(direction, session_name=res['tmux_session'])", "def setMySelector(self, x, y, z, scale):\n selectorPos = (self.selector.getX(), self.selector.getY(), self.selector.getZ())\n if selectorPos != (x,y,z):\n self.selector.setPos(x,y,z)\n self.selector.show()\n self.selector.setScale(scale)\n return 1\n else:\n self.selector.setPos(-1,-1,-1)\n return 0\n #self.enableScrollWheelZoom = 0", "def assignSelector(self, myObj, scale):\n if self.selector == None:\n self.createSelector()\n self.selector.show()\n \n self.selector.setPos(myObj.getX(), myObj.getY(), myObj.getZ())\n self.selector.setScale(scale)", "def select_me(self, mouse_pos):\r\n\t\t#self.active = self.rect.collidepoint(mouse_pos)\r\n\t\tself.active = True", "def select(*args, add: bool=True, addFirst: bool=True, all: bool=True, allDagObjects: bool=True,\n allDependencyNodes: bool=True, clear: bool=True, containerCentric: bool=True,\n deselect: bool=True, hierarchy: bool=True, noExpand: bool=True, replace: bool=True,\n symmetry: bool=True, symmetrySide: int=0, toggle: bool=True, visible: bool=True,\n **kwargs)->None:\n pass", "def get_score_panel(self, idattr):\n return self.get_node('//ScorePanels/ScorePanel[@id=\"%s\"]' % idattr)", "def panel_show(keyword):\n\n nodes = sorted(\n (\n n\n for n in nuke.allNodes()\n if keyword in n.name()\n and not nuke.numvalue(\n cast_str(\"%s.disable\" % n.name()),\n 0,\n )\n ),\n key=lambda n: cast_text(n.name()),\n reverse=True,\n )\n for n in nodes:\n n.showControlPanel()", "def select_disputes_tab(self):\n self.click_element(self.disputes_tab_locator)", "def select_me(self, mouse_pos):\r\n\t\tself.active = self.rect.collidepoint(mouse_pos)", "def view_panels(id=None):\n if not id:\n id = request.args.get('id')\n\n if id:\n panels = get_panels_by_project_id(s, id)\n else:\n panels = get_panels(s)\n result = []\n project_name = \"All\"\n for i in panels:\n row = dict(zip(i.keys(), i))\n status = check_panel_status(s, row[\"panelid\"])\n row[\"status\"] = status\n permission = check_user_has_permission(s, current_user.id, row[\"projectid\"])\n locked = check_if_locked(s, row[\"panelid\"])\n row['permission'] = permission\n row['locked'] = locked\n\n if id:\n project_name = row['projectname']\n # if check_user_has_permission(s, current_user.id, row[\"projectid\"]):\n # result.append(row)\n result.append(row)\n table = ItemTablePanels(result, classes=['table', 'table-striped'])\n return render_template('panels.html', panels=table, project_name=project_name)", "def createPanel(self, LibraryID, Name, **kwargs):\n if self.request(\"createPanel\", LibraryID=LibraryID, Name=Name, **kwargs) is None:\n return None\n return self.json_response[\"Result\"][\"PanelID\"]", "def parameter_selector(mcmc_table: pd.DataFrame):\n non_param_cols = [\"idx\", \"Scenario\", \"loglikelihood\", \"accept\"]\n param_options = [c for c in mcmc_table.columns if c not in non_param_cols]\n return st.sidebar.selectbox(\"Select parameter\", param_options)", "def _select_stage(self):\n logger.debug(f\"Selecting Epic Quest's stage: {self.stage_selector_ui.name}\")\n self.emulator.click_button(self.stage_selector_ui)\n return wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.START_BUTTON)", "def add_selector(self, listing):\n # We will be able to select X-frames and its boundaries\n # will be stored in the given list\n\n def onselect(xmin, xmax):\n# indmin, indmax = np.searchsorted(x, (xmin, xmax))\n# indmax = min(len(x)-1, indmax)\n indmin = xmin\n indmax = xmax\n onselect.listing.append([indmin, indmax])\n print (onselect.listing)\n \n onselect.listing = listing\n \n # set useblit True on gtkagg for enhanced performance\n ax = self.axes\n span = SpanSelector(ax, onselect, 'horizontal', useblit=True,\n rectprops=dict(alpha=0.5, facecolor='red') )\n \n self.widget_list.append(span)", "def select(self):\n return", "def select_field(field, i, j):\n return select_corners(field, i, j)", "def create_panel_navigation(self, frame_parent):\r\n panel = ttk.Frame(frame_parent)\r\n\r\n tree = ttk.Treeview(panel, selectmode=\"browse\") # \"browse\" mode limits to one selection only\r\n tree.heading(\"#0\", text=\"Category\")\r\n tree.column(\"#0\", width=130)\r\n #tree.bind(\"<ButtonRelease-1>\", self.on_category_select) # left-button release\r\n tree.bind(\"<<TreeviewSelect>>\", self.on_category_select)\r\n #\r\n tree.insert('', tk.END, text=\"Email\")\r\n tree.insert('', tk.END, text=\"Access Restriction\")\r\n tree.selection_set(tree.get_children()[0]) # select the first item on init\r\n tree.grid(sticky=\"NS\")\r\n\r\n # http://stackoverflow.com/questions/25940217/python-getting-started-with-tk-widget-not-resizing-on-grid\r\n # or you can just do this: tree.pack(fill=tk.BOTH, expand=1)\r\n tree.rowconfigure(0, weight=1)\r\n tree.columnconfigure(0, weight=1)\r\n return panel", "def panel(context, panel, version):\n LOG.info(\"Running scout export panel\")\n adapter = context.obj['adapter']\n \n if not panel:\n LOG.warning(\"Please provide at least one gene panel\")\n context.abort()\n\n LOG.info(\"Exporting panels: {}\".format(', '.join(panel)))\n for line in export_gene_panels(adapter, panel, version):\n click.echo(line)", "def getConfigPanel():\n\treturn None", "def select(self, target):", "def _selectInd(self, ind):\n logger.info(f'plotNumber:{self.plotNumber} ind: {ind}')\n if ind > len(self.plotDf)-1:\n return\n xVal = self.plotDf.at[ind, self.stateDict['xStat']]\n yVal = self.plotDf.at[ind, self.stateDict['yStat']]\n if self.scatterPlotSelection is not None:\n logger.info(f' setting scatterPlotSelection x:{xVal} y:{yVal}')\n self.scatterPlotSelection.set_data(xVal, yVal)\n self.fig.canvas.draw()", "def select(a, *dims):\n raise NotImplementedError(f'Selection not implemented for \"{type(a).__name__}\".')", "def select(self):\n\t\tif not (self.setting_key):\n\t\t\tSelectScreen.select(self)\n\t\t\treturn", "def hide(self):\n self.window.run_command(\"hide_panel\", {\"panel\": self.full_name})", "def on_btnClitool_clicked (self, widget):\n try:\n panelactual = variables.panel.get_current_page()\n if panelactual != 0:\n variables.panel.set_current_page(0)\n else:\n pass\n except:\n print(\"error botón cliente barra herramientas\")", "def select_collection_settings(self):\n select_collection_settings_sitem = self.locator_finder_by_id(self.select_collection_settings_id)\n select_collection_settings_sitem.click()\n time.sleep(2)", "def get_mutator(button, panel):\n button = (button - 41) // 3 + (panel - 1) * 15\n if 0 <= button < len(mutators_list):\n return mutators_list[button]\n else:\n return None", "def selectItem(*args):", "def _panelapp_panel_ids():\n json_lines = fetch_resource(PANELAPP_BASE_URL.format(\"list_panels\"), json=True)\n return [panel_info[\"Panel_Id\"] for panel_info in json_lines.get(\"result\", [])]", "def raise_panel(name):\n # type: (Text,) -> None\n\n from wulifang.vendor.Qt import QtWidgets\n\n for i in QtWidgets.QApplication.topLevelWidgets():\n panel = i.findChild(QtWidgets.QWidget, cast_str(name))\n if not panel:\n continue\n\n parent = panel.parentWidget()\n if not isinstance(parent, QtWidgets.QStackedWidget):\n continue\n parent = assert_isinstance(parent, QtWidgets.QStackedWidget)\n index = parent.indexOf(panel)\n parent = parent.parentWidget()\n if not isinstance(parent, QtWidgets.QWidget):\n continue\n tab = parent.findChild(QtWidgets.QTabBar)\n if not tab:\n continue\n tab.setCurrentIndex(index)\n panel.window().raise_()\n return\n else:\n raise RuntimeError(\"no such panel: %s\" % (name,))", "def add_panel(self, panel):\n assert panel.PANEL_ID not in self.panels\n assert not self.tools, \"tools must be added after panels\"\n self.panels[panel.PANEL_ID] = panel\n panel.register_panel(self)", "def select_site(b):\n\n try:\n drop = Select(b.find_element_by_id('States'))\n drop.select_by_value(\"CA\")\n time.sleep(2)\n drop = Select(b.find_element_by_id('UnitNums'))\n drop.select_by_value(\"506\")\n time.sleep(2)\n except NoSuchElementException as e:\n pass", "def test_panel(self):\n p = MailToolbarPanel(*self.panel_args)\n self.assertEqual(p.toolbar, self.toolbar)", "def getPanels(self, LibraryID):\n response = self.request(\"getPanels\", LibraryID=LibraryID)\n if not response:\n return None\n return response[\"Result\"][\"Panels\"]", "def showGUI(self,**kwargs):\n self.baxter.menu.select(self.modes[0])", "def setSelected(*args):", "def setSelected(*args):", "def selectMode(*args, component: bool=True, hierarchical: bool=True, leaf: bool=True, object:\n bool=True, preset: bool=True, root: bool=True, template: bool=True, q=True,\n query=True, **kwargs)->Union[bool, Any]:\n pass", "def curr_selection(self):\n\n self.domain = self.row[0]\n abstract = self.row[5]\n self.data_type = self.row[1]\n self.object_id = self.row[3]\n self.service = self.row[2]\n self.layer_title = self.row[4]\n crs_options = self.row[6]\n self.dlg.uCRSCombo.clear()\n if self.data_type != \"table\":\n self.dlg.uCRSCombo.addItems(crs_options)\n curr_crs = self.map_crs()\n if curr_crs in crs_options:\n idx = self.dlg.uCRSCombo.findText(curr_crs)\n self.dlg.uCRSCombo.setCurrentIndex(idx)\n self.dlg.uTextDescription.setText(abstract)", "def get_itemlist_panel(self):\n from guiqwt import panels\n return self.get_panel(panels.ID_ITEMLIST)", "def setModeSelect(self):\n self.scene().mode = fsScene.MODE_SELECT", "def onSelectMilitaryInstSim(self, sim):\n # create panel and selector if they do not exist\n self.onSelectNoSim()\n self.militaryinstInfo = anwp.gui.militaryinstinfo.MilitaryInstInfoFrame(self, self.game.app)\n self.createSelector2()\n \n # update observer\n if self.militaryinstInfo <> None and sim <> None:\n self.updateObserver(sim, 'militaryinstInfo')", "def select_search_method():\n st.sidebar.markdown('### Search method:')\n search_method = st.sidebar.selectbox('', ['Individual', 'Department'], index=0)\n return search_method", "def __selectStrategy( self ):\n chosenStrategy = self.activeStrategies[self.chosenStrategy]\n self.__incrementChosenStrategy()\n return chosenStrategy", "def _independent_panel(width, height):\n\n from maya import cmds\n\n # center panel on screen\n screen_width, screen_height = _get_screen_size()\n topLeft = [int((screen_height-height)/2.0),\n int((screen_width-width)/2.0)]\n\n window = cmds.window(width=width,\n height=height,\n topLeftCorner=topLeft,\n menuBarVisible=False,\n titleBar=False)\n cmds.paneLayout()\n panel = cmds.modelPanel(menuBarVisible=False,\n label='CapturePanel')\n\n # Hide icons under panel menus\n bar_layout = cmds.modelPanel(panel, q=True, barLayout=True)\n cmds.frameLayout(bar_layout, e=True, collapse=True)\n\n cmds.showWindow(window)\n\n # Set the modelEditor of the modelPanel as the active view so it takes\n # the playback focus. Does seem redundant with the `refresh` added in.\n editor = cmds.modelPanel(panel, query=True, modelEditor=True)\n cmds.modelEditor(editor, e=1, activeView=True)\n\n # Force a draw refresh of Maya so it keeps focus on the new panel\n # This focus is required to force preview playback in the independent panel\n cmds.refresh(force=True)\n\n try:\n yield panel\n finally:\n # Delete the panel to fix memory leak (about 5 mb per capture)\n cmds.deleteUI(panel, panel=True)\n cmds.deleteUI(window)", "def select(self, *dims):\n return select(self, *dims)", "def selectTabs(self):\n firstFramework = next(iter(self.data['frameworks']))\n framework = self.data.get('framework')\n if not framework:\n framework = firstFramework\n\n tabIndex = self.getTabIndex(self.ui.tab, framework)\n self.ui.tab.setCurrentIndex(tabIndex)\n\n for index in range(0, self.ui.tab.count()):\n isVisible = self.ui.tab.widget(index).objectName() in self.data['frameworks']\n self.ui.tab.setTabVisible(index, isVisible)\n return True", "def selectChannel(self,asic,chan, hsmode= 1 ):\n pass", "def slot_selectPoint(self, selectionDict):\n\t\tprint('bStackWidget.slot_selectPoint() selectionDict:', selectionDict)\n\t\tif selectionDict is None:\n\t\t\treturn\n\t\tif selectionDict['name'] == 'toggle rect roi':\n\t\t\treturn\n\t\ttype = selectionDict['type']\n\t\tidx = selectionDict['idx']\n\t\tif type == 'Nodes':\n\t\t\tnodeIdx = idx\n\t\t\tself.myStackView2.selectNode(nodeIdx, snapz=True, isShift=False, doEmit=True)\n\t\telif type == 'Edges':\n\t\t\tedgeIdx = idx\n\t\t\tself.myStackView2.selectEdge(edgeIdx, snapz=True, isShift=False, doEmit=True)", "def _select_stage(self, difficulty=6):\n difficulty_ui = ui.get_by_name(self._get_difficulty_ui(difficulty))\n if wait_until(self.emulator.is_ui_element_on_screen, ui_element=self.stage_selector_ui):\n self.emulator.click_button(self.stage_selector_ui)\n if \"_2_\" in difficulty_ui.name: # TODO: that's not good at all\n logger.debug(\"Difficulty is referring from the bottom of list. Trying to scroll.\")\n self.emulator.drag(ui.DIFFICULTY_DRAG_FROM, ui.DIFFICULTY_DRAG_TO)\n r_sleep(1)\n if wait_until(self.emulator.is_ui_element_on_screen, ui_element=difficulty_ui):\n self.emulator.click_button(difficulty_ui)\n return wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.START_BUTTON)", "def set_upgrade_panel(self):\n lot = self.city_graphics.get_clicked_lot(pg.mouse.get_pos())\n if lot.construct:\n self.upgrade_panel.set_lot(lot)\n self.upgrade_panel.enable()\n else:\n self.upgrade_panel.disable()", "def main():\n PanelDemo().mainloop()", "def draw_select_options_pane(self):\n\t\tpane = SelectScreen.draw_select_options_pane(self)\n\t\tself.display_controls(pane)\n\t\tif self.setting_key: self.draw_key_selection(pane)\n\t\treturn pane", "def _parse_panelapp_panel(adapter, panel_id, institute, confidence):\n hgnc_map = adapter.ensembl_to_hgnc_mapping()\n json_lines = fetch_resource(PANELAPP_BASE_URL.format(\"get_panel\") + panel_id, json=True)\n parsed_panel = parse_panel_app_panel(\n panel_info=json_lines[\"result\"],\n hgnc_map=hgnc_map,\n institute=institute,\n confidence=confidence,\n )\n if confidence != \"green\":\n parsed_panel[\"panel_id\"] = \"_\".join([panel_id, confidence])\n else: # This way the old green panels will be overwritten, instead of creating 2 sets of green panels, old and new\n parsed_panel[\"panel_id\"] = panel_id\n\n return parsed_panel", "def select_qos_tab(self):\n self.click_element(self.qos_tab_locator, True)", "def getSelector(self, node):\n self.checkModelOpen()\n calcEngine = CalcEngine.factory(self.client_session)\n return calcEngine.getSelector(node)", "def setMySelector2(self, x, y, z, scale):\n selectorPos = (self.selector2.getX(), self.selector2.getY(), self.selector2.getZ())\n if selectorPos != (x,y,z):\n self.selector2.setPos(x,y,z)\n self.selector2.show()\n self.selector2.setScale(scale)\n return 1\n else:\n self.selector2.setPos(-1,-1,-1)\n return 0\n #self.enableScrollWheelZoom = 0", "def getPanelMode(self) -> str:\r\n if self.visprotocol is not None:\r\n return self.visprotocol.getPanelMode()\r\n return \"Not Connected\"", "def FindTab(self, page):\r\n\r\n all_panes = self._mgr.GetAllPanes()\r\n for pane in all_panes:\r\n if pane.name == \"dummy\":\r\n continue\r\n\r\n tabframe = pane.window\r\n\r\n page_idx = tabframe._tabs.GetIdxFromWindow(page)\r\n \r\n if page_idx != -1:\r\n \r\n ctrl = tabframe._tabs\r\n idx = page_idx\r\n return ctrl, idx\r\n \r\n return None, wx.NOT_FOUND", "def configure_panels(self):\n for panel_id in self.panels:\n panel = self.get_panel(panel_id)\n panel.configure_panel()", "def Show(self, sel_line = 0):\r\n return Control.Show(self, sel_line)", "def getSelectedPosition(*args):", "def getSelected(*args):", "def assignSelector2(self, myObj, scale):\n if self.selector2 == None:\n self.createSelector2()\n self.selector2.show()\n \n self.selector2.setPos(myObj.getX(), myObj.getY(), myObj.getZ())\n self.selector2.setScale(scale)", "def get_xcs_panel(self):\n from guiqwt import panels\n return self.get_panel(panels.ID_XCS)", "def selectPref(*args, affectsActive: bool=True, allowHiliteSelection: bool=True,\n autoSelectContainer: bool=True, autoUseDepth: bool=True, clickBoxSize: Union[int,\n bool]=0, clickDrag: bool=True, containerCentricSelection: bool=True,\n disableComponentPopups: bool=True, expandPopupList: bool=True,\n ignoreSelectionPriority: bool=True, manipClickBoxSize: Union[int, bool]=0,\n paintSelect: bool=True, paintSelectWithDepth: bool=True, popupMenuSelection:\n bool=True, preSelectBackfacing: bool=True, preSelectClosest: bool=True,\n preSelectDeadSpace: Union[int, bool]=0, preSelectHilite: bool=True,\n preSelectHiliteSize: Union[float, bool]=0.0, preSelectTweakDeadSpace: Union[int,\n bool]=0, selectTypeChangeAffectsActive: bool=True, selectionChildHighlightMode:\n Union[int, bool]=0, singleBoxSelection: bool=True, straightLineDistance:\n bool=True, trackSelectionOrder: bool=True, useDepth: bool=True, xformNoSelect:\n bool=True, q=True, query=True, **kwargs)->Union[bool, Any]:\n pass" ]
[ "0.6231665", "0.60948694", "0.58209896", "0.57440066", "0.55831444", "0.55661774", "0.5561857", "0.55333793", "0.5477269", "0.5407169", "0.53396136", "0.5338085", "0.5338085", "0.53237325", "0.5315146", "0.53150046", "0.5298998", "0.52901876", "0.5276756", "0.5271008", "0.5250677", "0.52449095", "0.5207316", "0.5206378", "0.5200966", "0.51855874", "0.51699895", "0.51699895", "0.5146355", "0.51195353", "0.5111423", "0.5099316", "0.50842506", "0.50711834", "0.50633", "0.50564784", "0.5046723", "0.5037077", "0.4970206", "0.49700356", "0.4953453", "0.4951542", "0.49474013", "0.4939824", "0.49270877", "0.49178866", "0.49169594", "0.49124786", "0.49096128", "0.49047887", "0.48989117", "0.48808214", "0.48790467", "0.48787397", "0.48776844", "0.48776472", "0.48656762", "0.4858691", "0.48569897", "0.4848647", "0.48305765", "0.48200202", "0.4819241", "0.48151243", "0.48148963", "0.48115095", "0.4809526", "0.48081678", "0.48055977", "0.4796781", "0.4796781", "0.4791746", "0.47873875", "0.4775928", "0.47718012", "0.4767183", "0.47555548", "0.4753245", "0.47530842", "0.47408307", "0.4739811", "0.47386593", "0.4734164", "0.47330806", "0.47199672", "0.47114256", "0.47097647", "0.47096297", "0.4706013", "0.47042316", "0.47034645", "0.47024176", "0.46982065", "0.46861944", "0.46860483", "0.46851838", "0.46831188", "0.4678652", "0.46763754", "0.46756688" ]
0.6054565
2
Preprocess xml files, including extract main info from xml, and load category info.
def load_data_to_json(root_path, extract=True, decode="utf-8"): category_map = {} for file in os.listdir(root_path): # 行业分类文件夹 path = os.path.join(root_path, file) if os.path.isdir(path): for xml_file in os.listdir(path): # 读取每个行业分类的文件 xml_file_path = os.path.join(path, xml_file) try: if xml_file_path.endswith(".xml"): doc = read_main_info(xml_file_path, extract=extract, decode=decode) if file in category_map.keys(): category_map[file].append(doc) else: category_map[file] = list() category_map[file].append(doc) except UnicodeDecodeError: print("UnicodeDecodeError:%s" % xml_file_path) continue with open(JSON_FILE, "w") as f: json.dump(category_map, f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_xml(self):\n self.process_gpx_file(str(self.filename))", "def extract_titles():\n \"\"\"\n The final data has the mapping post_title -> cat.\n This requires three relations:\n (pid, id) -> feed_url, feed_url -> blog_url, blog_url -> cat.\n Each file contains one raw feed with several titles, thus:\n (pid, id) -> list(post_title, cat)\n \"\"\"\n #(pid, id) -> feed_url\n idvals = cPickle.load(open(prefix + \"idvals.pickle\"))\n #blog_url -> cat\n cats = cPickle.load(open(prefix + \"blogcats.pickle\"))\n #feed_url -> blog_url\n urls = cPickle.load(open(prefix + \"blogurls.pickle\"))\n\n patt = re.compile('<title>(.*?)</title>')\n titles_success = 0\n titles_bad = 0\n successes = 0\n failures = 0\n #iterate through all raw feed HTML files.\n for infile in glob.glob(os.path.join(feeds_path, '*.xml')):\n info = infile.split('.')[0].split('/')[-1]\n pid, id = info.split('-')\n #(pid, id) -> blog\n blog = idvals[(int(pid), int(id))]\n cat = None\n try:\n # blog -> url -> cat\n cat = cats[urls[blog]]\n except KeyError:\n logging.info(\"Could not find category for blog %s. Skipping...\" % blog)\n continue\n try:\n root = etree.parse(infile)\n successes += 1\n except Exception:\n logging.info(\"Title extraction failed for %s.\" % infile)\n failures += 1\n continue\n\n #PARSE THE FILE\n #Get the encoding of the document (doesn't seem to work)\n enc = root.docinfo.encoding\n titles = root.xpath('/rss/channel/item/title') # titles should be here.\n OUT = open(prefix + \"meta/titles.dat\", \"a\")\n if len(titles) == 0: # didn't find titles using that xpath.\n IN = open(infile) # look for the title in HTML instead.\n content = IN.read()\n IN.close()\n titles = patt.findall(content)\n #for each found title, print it to the FINAL log used for research.\n for title in titles:\n if title is not None:\n try:\n print >> OUT, ','.join([blog, cat, str(info),\n title.strip().replace(\",\", \"\")])\n titles_success += 1\n except:\n try:\n print >> ','.join([OUT, blog, cat, str(info),\n title.strip().encode(enc).replace(\",\", \"\")])\n titles_success += 1\n except:\n titles_bad += 1\n logging.info(\"Character encoding failed in file %s.\" % infile)\n else:\n titles_bad += 1\n else:\n for title in titles:\n if title.text is None:\n titles_bad += 1\n continue\n try:\n print >> OUT, ','.join([blog, cat, str(info),\n title.text.strip().encode(enc).replace(\",\", \"\")])\n titles_success += 1\n except:\n logging.info(\"Character encoding failed in file %s.\" % infile)\n titles_bad += 1\n OUT.close()\n logging.info(\"Document Parse Successes: %d\" % successes)\n logging.info(\"Document Parse Failes Failures: %d\" % failures)\n logging.info(\"TOTAL TITLES FETCHED: %d (%d failed)\" %\n (titles_success, titles_bad))", "def parse_xmls(user, application, complete_path, init_es, tool, scan_name, user_host, to_name):\n process_files(user, application, complete_path, init_es, tool, scan_name, user_host, to_name)\n info_debug_log(event='Parse xmls',status='success')", "def preprocess_xml(xml):\n logger.info(\"Preprocessing XML %s\", xml)\n for path, replacement in content.Macros():\n replacement = etree.fromstring('<ROOT>' + replacement + '</ROOT>')\n for node in xml.xpath(path):\n parent = node.getparent()\n idx = parent.index(node)\n parent.remove(node)\n for repl in replacement:\n parent.insert(idx, repl)\n idx += 1", "def load_asterix_category_format(k):\n global filenames\n try:\n __basePath__ = os.path.abspath(os.path.join(os.getcwd(), '../../../..'))\n\n # Look for file in current executing directory\n path_filename1 = filenames[k]\n\n # On default directory (absolute)\n path_filename2 = __basePath__ + \"/\" +filenames[k]\n\n # On default directory (relative)\n path_filename3 = os.path.dirname(os.path.realpath(__file__)) + \"/xml/\" + filenames[k]\n\n if os.path.isfile(path_filename1):\n # print \"Loading file '%s'\" % path_filename1\n return minidom.parse(path_filename1)\n\n if os.path.isfile(path_filename2):\n # print \"Loading file '%s'\" % path_filename2\n return minidom.parse(path_filename2)\n\n if os.path.isfile(path_filename3):\n # print \"Loading file '%s'\" % path_filename3\n return minidom.parse(path_filename3)\n\n return None\n\n except:\n traceback.print_exc()\n\n return None", "def parse_external_files(self, filename):\n # Parse bins xml files\n binsxml = filename + '.xml'\n if os.path.isfile(binsxml):\n bins = BinsParser(binsxml)\n for key, value in bins.items():\n self._set(key, value)\n # FIXME: this doesn't work anymore\n comment_file = os.path.join(os.path.dirname(filename), '.comments',\n os.path.basename(filename) + '.xml')\n if not os.path.isfile(comment_file) or 1:\n return\n # FIXME: replace kaa.xml stuff with sax or minidom\n doc = xml.Document(comment_file, 'Comment')\n for child in doc.children:\n if child.name == 'Place':\n self.location = child.content\n if child.name == 'Note':\n self.description = child.content", "def process_xml(xml):\r\n\r\n def make_name_unique(xml_data):\r\n \"\"\"\r\n Make sure that the url_name of xml_data is unique. If a previously loaded\r\n unnamed descriptor stole this element's url_name, create a new one.\r\n\r\n Removes 'slug' attribute if present, and adds or overwrites the 'url_name' attribute.\r\n \"\"\"\r\n # VS[compat]. Take this out once course conversion is done (perhaps leave the uniqueness check)\r\n\r\n # tags that really need unique names--they store (or should store) state.\r\n need_uniq_names = ('problem', 'sequential', 'video', 'course', 'chapter',\r\n 'videosequence', 'poll_question', 'vertical')\r\n\r\n attr = xml_data.attrib\r\n tag = xml_data.tag\r\n id = lambda x: x\r\n # Things to try to get a name, in order (key, cleaning function, remove key after reading?)\r\n lookups = [('url_name', id, False),\r\n ('slug', id, True),\r\n ('name', Location.clean, False),\r\n ('display_name', Location.clean, False)]\r\n\r\n url_name = None\r\n for key, clean, remove in lookups:\r\n if key in attr:\r\n url_name = clean(attr[key])\r\n if remove:\r\n del attr[key]\r\n break\r\n\r\n def looks_like_fallback(url_name):\r\n \"\"\"Does this look like something that came from fallback_name()?\"\"\"\r\n return (url_name is not None\r\n and url_name.startswith(tag)\r\n and re.search('[0-9a-fA-F]{12}$', url_name))\r\n\r\n def fallback_name(orig_name=None):\r\n \"\"\"Return the fallback name for this module. This is a function instead of a variable\r\n because we want it to be lazy.\"\"\"\r\n if looks_like_fallback(orig_name):\r\n # We're about to re-hash, in case something changed, so get rid of the tag_ and hash\r\n orig_name = orig_name[len(tag) + 1:-12]\r\n # append the hash of the content--the first 12 bytes should be plenty.\r\n orig_name = \"_\" + orig_name if orig_name not in (None, \"\") else \"\"\r\n xml_bytes = xml.encode('utf8')\r\n return tag + orig_name + \"_\" + hashlib.sha1(xml_bytes).hexdigest()[:12]\r\n\r\n # Fallback if there was nothing we could use:\r\n if url_name is None or url_name == \"\":\r\n url_name = fallback_name()\r\n # Don't log a warning--we don't need this in the log. Do\r\n # put it in the error tracker--content folks need to see it.\r\n\r\n if tag in need_uniq_names:\r\n error_tracker(\"PROBLEM: no name of any kind specified for {tag}. Student \"\r\n \"state will not be properly tracked for this module. Problem xml:\"\r\n \" '{xml}...'\".format(tag=tag, xml=xml[:100]))\r\n else:\r\n # TODO (vshnayder): We may want to enable this once course repos are cleaned up.\r\n # (or we may want to give up on the requirement for non-state-relevant issues...)\r\n # error_tracker(\"WARNING: no name specified for module. xml='{0}...'\".format(xml[:100]))\r\n pass\r\n\r\n # Make sure everything is unique\r\n if url_name in self.used_names[tag]:\r\n # Always complain about modules that store state. If it\r\n # doesn't store state, don't complain about things that are\r\n # hashed.\r\n if tag in need_uniq_names:\r\n msg = (\"Non-unique url_name in xml. This may break state tracking for content.\"\r\n \" url_name={0}. Content={1}\".format(url_name, xml[:100]))\r\n error_tracker(\"PROBLEM: \" + msg)\r\n log.warning(msg)\r\n # Just set name to fallback_name--if there are multiple things with the same fallback name,\r\n # they are actually identical, so it's fragile, but not immediately broken.\r\n\r\n # TODO (vshnayder): if the tag is a pointer tag, this will\r\n # break the content because we won't have the right link.\r\n # That's also a legitimate attempt to reuse the same content\r\n # from multiple places. Once we actually allow that, we'll\r\n # need to update this to complain about non-unique names for\r\n # definitions, but allow multiple uses.\r\n url_name = fallback_name(url_name)\r\n\r\n self.used_names[tag].add(url_name)\r\n xml_data.set('url_name', url_name)\r\n\r\n try:\r\n # VS[compat]\r\n # TODO (cpennington): Remove this once all fall 2012 courses\r\n # have been imported into the cms from xml\r\n xml = clean_out_mako_templating(xml)\r\n xml_data = etree.fromstring(xml)\r\n\r\n make_name_unique(xml_data)\r\n\r\n descriptor = create_block_from_xml(\r\n etree.tostring(xml_data, encoding='unicode'),\r\n self,\r\n id_generator,\r\n )\r\n except Exception as err: # pylint: disable=broad-except\r\n if not self.load_error_modules:\r\n raise\r\n\r\n # Didn't load properly. Fall back on loading as an error\r\n # descriptor. This should never error due to formatting.\r\n\r\n msg = \"Error loading from xml. %s\"\r\n log.warning(\r\n msg,\r\n unicode(err)[:200],\r\n # Normally, we don't want lots of exception traces in our logs from common\r\n # content problems. But if you're debugging the xml loading code itself,\r\n # uncomment the next line.\r\n # exc_info=True\r\n )\r\n\r\n msg = msg % (unicode(err)[:200])\r\n\r\n self.error_tracker(msg)\r\n err_msg = msg + \"\\n\" + exc_info_to_str(sys.exc_info())\r\n descriptor = ErrorDescriptor.from_xml(\r\n xml,\r\n self,\r\n id_generator,\r\n err_msg\r\n )\r\n\r\n descriptor.data_dir = course_dir\r\n\r\n xmlstore.modules[course_id][descriptor.scope_ids.usage_id] = descriptor\r\n\r\n if descriptor.has_children:\r\n for child in descriptor.get_children():\r\n parent_tracker.add_parent(child.scope_ids.usage_id, descriptor.scope_ids.usage_id)\r\n\r\n # After setting up the descriptor, save any changes that we have\r\n # made to attributes on the descriptor to the underlying KeyValueStore.\r\n descriptor.save()\r\n return descriptor", "def parse_file(self, filepath):\n\n xml_file = open(filepath, \"r\")\n xml = xml_file.read()\n content = \"\"\n\n xml_file.close()\n\n for line in xml.replace(\"&amp;\", \"&\").split(\"\\n\"):\n if content != \"\":\n content += \" \"\n content += re.sub(\"(<(P|F).*?>)|(<\\\\/P>)\", \"\", line).strip()\n # XML cleanning\n\n start_offset = \"<START_OFFSET_DUCFileRep>\"\n content = start_offset + content\n content = content.replace(\"</LP>\", \"</LP>%s\"%start_offset)\n content = content.replace(\"</TEXT>\", \"</TEXT>%s\"%start_offset)\n content = re.sub(\"%s.*?<LP>(.*?)<\\\\/LP>\"%start_offset, \"\\\\1\", content)\n content = re.sub(\"%s.*?<TEXT>(.*?)<\\\\/TEXT>\"%start_offset, \"\\\\1\", content)\n content = re.sub(\"%s.*\"%start_offset, \"\", content)\n\n self.set_content(content)", "def parse_xml(self, source):\n xml = etree.parse(source)\n for taxonomy_data in xml.findall('taxonomy'):\n self.parse_taxonomy_set(taxonomy_data)", "def process(self):\n self.extract()\n self.transform()\n self.load()", "def extract_cuewords(cuewords, xml_file_path):\n\n try:\n file_output = open(CUEWORDS_DATA_PATH+CUEWORDS_FILE, 'w', encoding='utf8')\n file_output_pos_tagged = open(CUEWORDS_DATA_PATH+CUEWORDS_FILE_POS_TAGGED,\n 'w', encoding='utf8')\n\n except FileNotFoundError:\n print('Please set correct filenames')\n\n # Empty lists for collecting data per file\n cueword_ids = []\n cuewords = []\n\n # Empty list to collect data for all files\n all_cuewords = []\n all_cuewords_pos_tagged = []\n\n print('Extracting cuewords from:', xml_file_path, 'to:', CUEWORDS_DATA_PATH+CUEWORDS_FILE)\n\n # Go through all files in xml_file_path directory\n for file in os.listdir(xml_file_path):\n\n # For each file, open, parseXML\n file = xml_file_path+file\n\n # Open files only, ignore subdirectories\n if os.path.isfile(file) and file.lower().endswith('.xml'):\n\n file_input = open(file, 'r', encoding='utf8')\n file_input = BeautifulSoup(file_input, 'xml')\n\n # Collect frames, get ids\n for frame in file_input.find_all('frame', {'name' : NEGATION_FRAME_NAME}):\n for target in frame.find_all('target'):\n for fenode in target.find_all('fenode'):\n cueword_ids.insert(0, fenode.get('idref'))\n\n # Find all splitwords\n for splitword in file_input.find_all('splitword'):\n cueword_ids.insert(0, splitword.get('idref'))\n\n # Find all terminals, check if its ID is in cueword_ids[]\n for terminal in file_input.find_all('t'):\n if terminal.get('id') in cueword_ids:\n all_cuewords.insert(0, terminal.get('word').lower())\n all_cuewords_pos_tagged.insert(0, terminal.get('word').lower()+\n '\\t'+terminal.get('pos'))\n\n # clear list for next document\n cueword_ids = []\n cuewords = []\n\n # Sort final list\n all_cuewords = sorted(set(all_cuewords))\n all_cuewords_pos_tagged = sorted(set(all_cuewords_pos_tagged))\n\n # Write cuewords without duplicates to file:\n for cueword in all_cuewords:\n file_output.write(cueword+'\\n')\n\n for cueword in all_cuewords_pos_tagged:\n file_output_pos_tagged.write(cueword+'\\n')\n\n file_output.close()\n file_output_pos_tagged.close()\n\n print('Cuewords extracted to:', file_output.name)\n print('Cuewords extracted and POS tagged to:', file_output_pos_tagged.name)\n print('Done!')", "def _xmlRead(self):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n \r\n logger.debug(\"{0:s}xmlFile: {1:s} parse Xml ...\".format(logStr,self.xmlFile)) \r\n tree = ET.parse(self.xmlFile) # ElementTree \r\n root = tree.getroot() # Element\r\n\r\n self.dataFrames=Xm._xmlRoot2Dfs(root)\r\n\r\n #fixes and conversions\r\n self._convertAndFix()\r\n\r\n #Views\r\n self._vXXXX()\r\n \r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))", "def parse(self):\n\t\tself.maincfg_values = self._load_static_file(self.cfg_file)\n\t\t\n\t\tself.cfg_files = self.get_cfg_files()\n\t\t\n\t\tself.resource_values = self.get_resources()\n\t\t\n\t\tself.timestamps = self.get_timestamps()\n\t\t\n\t\t## This loads everything into\n\t\tfor cfg_file in self.cfg_files:\n\t\t\tself._load_file(cfg_file)\n\n\t\tself._post_parse()", "def __parse(self):\n\t\tparser=xml.sax.make_parser()\n\t\tparser.setContentHandler(OSMXMLFileParser(self))\n\t\tparser.parse(self.filename)\n\t\n\t\t# convert them back to lists\n\t\tself.nodes = self.nodes.values()\n\t\tself.ways = self.ways.values()\n\t\tself.relations = self.relations.values()", "def read_and_process_xml(self):\n\n file_pattern_path = \"{}/*.xml\".format(self.download_path)\n\n files = glob.glob(file_pattern_path)\n ob = list(zip([self.logger] * len(files), files))\n\n pool_size = int(multiprocessing.cpu_count() / 2)\n\n try:\n # with multiprocessing.Pool(pool_size) as p:\n # result = p.starmap(process, product(files, repeat=self.logger))\n\n with poolcontext(processes=pool_size) as pool:\n result = pool.map(partial(process, logger=self.logger), files)\n\n empty = False\n\n for res in result:\n if res.empty:\n empty = True\n\n if empty:\n self.logger.logError(\"Error Processing One or More XML Files\")\n\n return pd.concat(result)\n except Exception as e:\n self.logger.logError(\"Error read and processing xml\".format(str(e)))\n raise Exception('Error processing XML Files')", "def preprocess(self):\n\n self._build_labels_dict(['one', 'two', 'three', 'four', 'five'])\n\n with open(self.data_path + self.file_name, 'rb') as csvfile:\n\n reader = csv.reader(csvfile, delimiter=\",\")\n for row in reader:\n self.texts.append(row[1])\n self.labels.append(self.labels_index[row[0]])\n\n print('Found %s texts.' % len(self.texts))", "def setUp(self):\n\n self.parser = XMLParser()\n self.xml = self.parser.parse(self.XMLDATA)", "def load(self, path, imagedir, prefix=None, tag_type=tagtype.Real):\n self.xml = ET.parse(path)\n root = self.xml.getroot()\n feature = None\n if tag_type == tagtype.Real:\n self.name = root.attrib['path'] + root.attrib['filename']\n self.ImagePath = os.path.join(imagedir, self.name) # the use of ImagePath may be discontinued to decouple tag and image loading\n for tag in root.iter('tag'):\n self.tags.append(tag.text)\n if not tag.text in AllTags:\n AllTags[tag.text] = 1\n else:\n AllTags[tag.text] = AllTags[tag.text] + 1\n for feat in root.findall('primary_category'):\n if feat.attrib['name'] == 'Scattering features in image':\n feature = feat\n break\n if feature is not None:\n # row_tag contains major image feature labels (eg. halo, peak, etc.)\n for row_tag in feature.iter('row_tag'):\n mainfeat = row_tag.attrib['name']\n self.MainImageFeatures.append(mainfeat)\n if not mainfeat in MainImageFeatures:\n MainImageFeatures[mainfeat] = 1\n else:\n MainImageFeatures[mainfeat] = MainImageFeatures[mainfeat] + 1\n for tag in feature.iter('tag'):\n self.ImageFeatures.append(tag.text)\n if not tag.text in ImageFeatures:\n ImageFeatures[tag.text] = 1\n else:\n ImageFeatures[tag.text] = ImageFeatures[tag.text] + 1\n elif tag_type == tagtype.Synthetic:\n for feat in root.findall('protocol'):\n if feat.attrib['name'] == 'tag_generated':\n feature = feat\n break\n if feature is not None:\n self.name = feature.attrib['outfile']\n if prefix is not None:\n self.name = self.name.replace(prefix, '') # to get the related image, /analysis/results/... still needs to be taken care of\n for tag in feature.iter('result'):\n if tag.attrib['value'] == 'True':\n simfeat = tag.attrib['name']\n self.SimulatedFeatures.append(simfeat)\n if not simfeat in SimulatedFeatures:\n SimulatedFeatures[simfeat] = 1\n else:\n SimulatedFeatures[simfeat] += 1\n else:\n raise ValueError('Unrecognized tag type')", "def xml_to_conll(self, xml_file_path):\n\n if not os.path.exists(CONLL_PATH):\n self.create_directories(CONLL_PATH)\n\n\n for file in os.listdir(xml_file_path):\n\n # Set path to file\n file = xml_file_path+file\n\n # Open files only, ignore subdirectories\n if os.path.isfile(file) and file.lower().endswith('.xml'):\n\n # Open Files\n chapter_input = open(file, 'r', encoding='utf8')\n\n # Create Same Filename in Output Folder\n chapter_output = open(CONLL_PATH+os.path.split(file)[-1]+'.conll', 'w', encoding='utf8')\n\n print('Converting: ' + chapter_input.name + ' to Conll09 file: ' + chapter_output.name)\n\n chapter_input = BeautifulSoup(chapter_input, 'xml')\n for sentence in chapter_input.find_all('s'):\n line_id = 0\n for terminal in sentence.find_all('t'):\n line_id, terminal_id, form, lemma, plemma = line_id+1, terminal.get('id'), terminal.get('word'), terminal.get('lemma'), terminal.get('lemma')\n pos, ppos = terminal.get('pos'), terminal.get('pos')\n feat, pfeat, head, phead, deprel, pdeprel, fillpred, pred, apred1 = \"_\" * 9 # <3 Python!\n chapter_output.write(\"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\"\n \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\n\"\n % (str(line_id)+\"-\"+terminal_id, form, lemma, plemma, pos, ppos, feat, pfeat, head, phead, deprel, pdeprel, fillpred, pred, apred1))\n chapter_output.write(\"\\n\")\n\n chapter_output.close()\n\n print(\"Done!\")", "def process_all():\n\tfiles = os.listdir('records')\n\tfiles = [file for file in files if file not in ('.DS_Store','old')]\n\tattr_list = []\n\tcorpus = []\n\tsentences = []\n\tcorp_set = set()\n\tfor file in files:\n\t\twith open('records/'+file) as f:\n\t\t\tattr_list, corpus, sentences = proc_file(f,file,corpus,attr_list,corp_set,sentences)\n\treturn attr_list,corpus,sentences", "def main():\n parser = ArgumentParser(description=\"pre-process nexus templates\")\n parser.add_argument(\n \"nexus_templates\",\n nargs=\"+\",\n help=\"Nexus template files to process\",\n )\n args = parser.parse_args()\n\n for template_file in args.nexus_templates:\n preprocess_template(template_file)", "def parse_file(self, filename):\n\n root_name_checked = False\n\n xml_text = self._make_string(filename)\n\n for token, content, loc in XML(xml_text):\n\n if token == \"START\":\n\n name = content[0]\n attr = content[1]\n\n # We are building a tree of Element objects. The issue with\n # parsing multiple files is that each XML file needs a common\n # root Element to form the top of the tree. Therefore, the\n # requirement for parsing a second file is that it has a same\n # named root Element.\n\n if self.__root is not None and not root_name_checked:\n\n if self.__root.getName() == name:\n\n # We already have a root element, and that root element\n # has the same name as the root element in this second\n # file. Continue with the parse.\n\n # Potential issue is that doing this will not call the\n # registered start visitor. Is that okay since we are\n # treating it as a common node.\n\n self.__node_stack.append(self.__root)\n\n else:\n\n # We already have a root element, but that root element\n # name does not match the root element name from the\n # previously parsed file. Stop since this will result\n # in an orphaned tree branch.\n\n print(\n \"XML file (%s) has invalid root name: %s (expected: %s).\"\n % (filename, name, self.__root.getName())\n )\n return\n\n else:\n self._startElement(name, attr)\n\n root_name_checked = True\n\n elif token == \"TEXT\":\n self._cData(content)\n\n elif token == \"END\":\n name = content[0]\n self._endElement(name)\n\n return self.__root", "def loadXMLAnnotations(self, dataset):\n\n\t\txmlFilesContent = {}\n\t\tallFiles = sorted(glob.glob('{}*.{}'.format(self.dataSetDir, \"xml\")))\n\t\tfor file in allFiles:\n\t\t\tfileName = file.split(\"/\")[-1].split(\".\")[0]\n\t\t\txmlFilesContent[fileName] = {}\n\t\t\tfileTree = ET.parse(file)\n\t\t\txmlFilesContent = self.extractXMLEntity(fileTree, fileName, xmlFilesContent, dataset[fileName])\n\t\t\txmlFilesContent = self.extractXMLRelation(fileTree, fileName, xmlFilesContent)\n\t\treturn xmlFilesContent", "def prepare_info(self, infoxml_file):\n\n self.info = InfoXML(infoxml_file)\n\n if self.info.name:\n self.LOGGER << f\"Mod Name from info.xml: {self.info.name}\"\n\n self._install_dirname = self.info.name.lower()", "def load(xml):\n if isinstance(xml, XmlReader):\n for n in _process(xml): yield n\n else:\n with XmlReader.Create(xml) as xr:\n for n in _process(xr): yield n", "def _parse_xml_file( self, xml_file, image_path ):\r\n try:\r\n succeeded = True\r\n coord_posx = 0\r\n coord_posy = 0\r\n # load and parse skin.xml file\r\n skindoc = xml.dom.minidom.parse( xml_file )\r\n root = skindoc.documentElement\r\n # make sure this is a valid <window> xml file\r\n if ( not root or root.tagName != \"window\" ): raise\r\n # check for a <useincludes>tag\r\n includes_exist = False\r\n useIncludes = self.FirstChildElement( root, \"useincludes\" )\r\n if ( useIncludes and useIncludes.firstChild ): \r\n overide = useIncludes.firstChild.nodeValue.lower()\r\n if ( overide == \"1\" or overide == \"true\" or overide == \"yes\" ):\r\n includes_exist = self.LoadIncludes()\r\n #resolve xml file\r\n if ( includes_exist ): self.ResolveIncludes( root )\r\n # check for <defaultcontrol> and <coordinates> based system\r\n try:\r\n default = self.FirstChildElement( root, \"defaultcontrol\" )\r\n if ( default and default.firstChild ): self.defaultControl = int( default.firstChild.nodeValue )\r\n else: self.defaultControl = None\r\n coordinates = self.FirstChildElement( root, \"coordinates\" )\r\n if ( coordinates and coordinates.firstChild ):\r\n systemBase = self.FirstChildElement( coordinates, \"system\" )\r\n if ( systemBase and systemBase.firstChild ): \r\n system = int( systemBase.firstChild.nodeValue )\r\n if ( system == 1 ):\r\n posx = self.FirstChildElement( coordinates, \"posx\" )\r\n if ( posx and posx.firstChild ): coord_posx = int( posx.firstChild.nodeValue )\r\n posy = self.FirstChildElement( coordinates, \"posy\" )\r\n if ( posy and posy.firstChild ): coord_posy = int( posy.firstChild.nodeValue )\r\n except: pass\r\n # check for a <resolution> tag and setCoordinateResolution()\r\n resolution = self.FirstChildElement( root, \"resolution\" )\r\n if ( resolution and resolution.firstChild ): self.resolution = self.resolutions.get( resolution.firstChild.nodeValue.lower(), 6 )\r\n self._set_resolution()\r\n # make sure <controls> block exists and resolve if necessary\r\n controls = self.FirstChildElement( root, \"controls\" )\r\n if ( controls and controls.firstChild ):\r\n if ( includes_exist ): self.ResolveIncludes( controls )\r\n else: raise\r\n # parse and resolve each <control>\r\n data = controls.getElementsByTagName( \"control\" )\r\n if ( not data ): raise\r\n for control in data:\r\n #control = self.FirstChildElement( controls, None )\r\n #while ( control ):\r\n control_type = None\r\n control_group = False\r\n if ( control.hasAttributes() ):\r\n control_type = control.getAttribute( \"type\" )\r\n control_id = control.getAttribute( \"id\" )\r\n #############################\r\n #group_posx = 0\r\n #group_posy = 0\r\n if ( control_type == \"group\" ): \r\n control_group = True\r\n continue\r\n #############################\r\n\r\n if ( includes_exist ): self.ResolveIncludes( control, control_type )\r\n \r\n current_control = {}\r\n animation_tags = []\r\n label_tags = []\r\n label2_tags = []\r\n info_tags = []\r\n image_tags = []\r\n visible_tags = []\r\n enable_tags = []\r\n\r\n if ( control_type != \"\" ): current_control[ \"type\" ] = str( control_type )\r\n if ( control_id != \"\" ): current_control[ \"id\" ] = int( control_id )\r\n else: current_control[ \"id\" ] = 1\r\n \r\n # loop thru control and find all tags\r\n node = self.FirstChildElement( control, None )\r\n while ( node ):\r\n # key node so save to the dictionary\r\n if ( node.tagName.lower() == \"label\" ):\r\n try:\r\n v = node.firstChild.nodeValue\r\n if ( self._ ):\r\n ls = self._( int( v ) )\r\n else: ls = xbmc.getLocalizedString( int( v ) )\r\n if ( ls ): label_tags.append( ls )\r\n else: raise\r\n except:\r\n if ( node.hasChildNodes() ): label_tags.append( node.firstChild.nodeValue )\r\n elif ( node.tagName.lower() == \"label2\" ):\r\n try: \r\n v = node.firstChild.nodeValue\r\n if ( self._ ):\r\n ls = self._( int( v ) )\r\n else: ls = xbmc.getLocalizedString( int( v ) )\r\n if ( ls ): label2_tags.append( ls )\r\n else: raise\r\n except:\r\n if ( node.hasChildNodes() ): label2_tags.append( node.firstChild.nodeValue )\r\n elif ( node.tagName.lower() == \"info\" ):\r\n if ( node.hasChildNodes() ): info_tags.append( node.firstChild.nodeValue )\r\n elif ( node.tagName.lower() == \"image\" ):\r\n if ( node.hasChildNodes() ): image_tags.append( node.firstChild.nodeValue )\r\n elif ( node.tagName.lower() == \"visible\" ):\r\n if ( node.hasChildNodes() ):\r\n if ( node.hasAttributes() ):\r\n ah = node.getAttribute( \"allowhiddenfocus\" )\r\n else: ah = \"false\"\r\n current_control[ \"allowhiddenfocus\" ] = ah\r\n visible_tags.append( node.firstChild.nodeValue )\r\n elif ( node.tagName.lower() == \"enable\" ):\r\n enable_tags.append( node.firstChild.nodeValue )\r\n elif ( node.tagName.lower() == \"animation\" ):\r\n if ( node.hasChildNodes() ): \r\n if ( node.hasAttributes() ):\r\n condition = \"\"\r\n if ( node.hasAttribute( \"effect\" ) ):\r\n condition += \"effect=%s \" % node.getAttribute( \"effect\" ).strip()\r\n if ( node.hasAttribute( \"time\" ) ):\r\n condition += \"time=%s \" % node.getAttribute( \"time\" ).strip()\r\n if ( node.hasAttribute( \"delay\" ) ):\r\n condition += \"delay=%s \" % node.getAttribute( \"delay\" ).strip()\r\n if ( node.hasAttribute( \"start\" ) ):\r\n condition += \"start=%s \" % node.getAttribute( \"start\" ).strip()\r\n if ( node.hasAttribute( \"end\" ) ):\r\n condition += \"end=%s \" % node.getAttribute( \"end\" ).strip()\r\n if ( node.hasAttribute( \"acceleration\" ) ):\r\n condition += \"acceleration=%s \" % node.getAttribute( \"acceleration\" ).strip()\r\n if ( node.hasAttribute( \"center\" ) ):\r\n condition += \"center=%s \" % node.getAttribute( \"center\" ).strip()\r\n if ( node.hasAttribute( \"condition\" ) ):\r\n condition += \"condition=%s \" % node.getAttribute( \"condition\" ).strip()\r\n if ( node.hasAttribute( \"reversible\" ) ):\r\n condition += \"reversible=%s \" % node.getAttribute( \"reversible\" ).strip()\r\n animation_tags += [ ( node.firstChild.nodeValue, condition.strip().lower(), ) ]\r\n elif (node.hasChildNodes()):\r\n if (node.tagName.lower() == \"type\"): control_type = node.firstChild.nodeValue\r\n if ( not node.tagName.lower() in current_control ):\r\n current_control[ node.tagName.lower() ] = node.firstChild.nodeValue\r\n node = self.NextSiblingElement( node, None )\r\n \r\n # setup the controls settings and defaults if necessary\r\n if ( control_type ):\r\n # the following apply to all controls\r\n if ( not \"description\" in current_control ): current_control[ \"description\" ] = control_type\r\n if ( \"posx\" in current_control ): current_control[ \"posx\" ] = int( current_control[ \"posx\" ] ) + coord_posx\r\n else: current_control[ \"posx\" ] = coord_posx\r\n if ( \"posy\" in current_control ): current_control[ \"posy\" ] = int( current_control[ \"posy\" ] ) + coord_posy\r\n else: current_control[ \"posy\" ] = coord_posy\r\n if ( \"width\" in current_control ): current_control[ \"width\" ] = int( current_control[ \"width\" ] )\r\n else: current_control[ \"width\" ] = 250\r\n if ( \"height\" in current_control ): current_control[ \"height\" ] = int( current_control[ \"height\" ] )\r\n else: current_control[ \"height\" ] = 100\r\n if ( not \"onup\" in current_control ): current_control[ \"onup\" ] = current_control[ \"id\" ]\r\n if ( not \"ondown\" in current_control ): current_control[ \"ondown\" ] = current_control[ \"id\" ]\r\n if ( not \"onleft\" in current_control ): current_control[ \"onleft\" ] = current_control[ \"id\" ]\r\n if ( not \"onright\" in current_control ): current_control[ \"onright\" ] = current_control[ \"id\" ]\r\n if ( visible_tags ): current_control[ \"visible\" ] = self.GetConditionalVisibility( visible_tags )\r\n else: current_control[ \"visible\" ] = \"true\"\r\n if ( enable_tags ): current_control[ \"enable\" ] = self.GetConditionalVisibility( enable_tags )\r\n else: current_control[ \"enable\" ] = \"true\"\r\n if ( not \"allowhiddenfocus\" in current_control ): current_control[ \"allowhiddenfocus\" ] = \"false\"\r\n current_control[ \"allowhiddenfocus\"] = current_control[ \"allowhiddenfocus\" ] in [ \"true\", \"yes\", \"1\" ]\r\n if ( not \"onclick\" in current_control ): current_control[ \"onclick\" ] = \"\"\r\n if ( not \"onfocus\" in current_control ): current_control[ \"onfocus\" ] = \"\"\r\n if ( animation_tags ): current_control[ \"animation\" ] = animation_tags\r\n else: current_control[ \"animation\" ] = \"\"\r\n\r\n if ( control_type == \"image\" or control_type == \"label\" or control_type == \"fadelabel\" or control_type == \"button\" or control_type == \"checkmark\" or control_type == \"textbox\" ):\r\n current_control[ \"info\" ] = info_tags\r\n \r\n if ( control_type == \"label\" or control_type == \"fadelabel\" or control_type == \"button\" or control_type == \"checkmark\" or control_type == \"textbox\" or control_type == \"list\" or control_type == \"listcontrol\" ):\r\n if ( label_tags ): current_control[ \"label\" ] = label_tags\r\n else: current_control[ \"label\" ] = [ \"\" ]\r\n if ( not \"shadowcolor\" in current_control ): current_control[ \"shadowcolor\" ] = \"\"\r\n if ( not \"font\" in current_control): current_control[ \"font\" ] = \"font13\"\r\n if ( not \"textcolor\" in current_control ): current_control[ \"textcolor\" ] = \"FFFFFFFF\"\r\n\r\n if ( control_type == \"label\" or control_type == \"fadelabel\" or control_type == \"button\" or control_type == \"checkmark\" or control_type == \"list\" or control_type == \"listcontrol\" ):\r\n if (not \"align\" in current_control ): current_control[ \"align\" ] = \"left\"\r\n try: current_control[\"align\"] = [ \"left\", \"right\", \"center\" ].index( current_control[\"align\"] )\r\n except: current_control[\"align\"] = 0\r\n if ( not \"aligny\" in current_control ): current_control[ \"aligny\" ] = 0\r\n current_control[ \"aligny\"] = ( current_control[ \"aligny\" ] in [ \"center\" ] ) * 4\r\n current_control[ \"align\" ] += current_control[ \"aligny\" ]\r\n\r\n if ( control_type == \"label\" or control_type == \"button\" or control_type == \"checkmark\" ):\r\n if ( not \"disabledcolor\" in current_control ): current_control[ \"disabledcolor\" ] = \"60FFFFFF\"\r\n\r\n if ( control_type == \"label\" or control_type == \"button\" ):\r\n if ( not \"angle\" in current_control ): current_control[ \"angle\" ] = 0\r\n else: current_control[ \"angle\" ] = int( current_control[ \"angle\" ] )\r\n\r\n if ( control_type == \"list\" or control_type == \"button\" or control_type == \"listcontrol\" ):\r\n if (not \"texturefocus\" in current_control ): current_control[ \"texturefocus\" ] = \"\"\r\n elif ( current_control[ \"texturefocus\" ][ 0 ] == \"\\\\\" ): current_control[ \"texturefocus\" ] = os.path.join( image_path, current_control[ \"texturefocus\" ][ 1 : ] )\r\n if ( not \"texturenofocus\" in current_control ): current_control[ \"texturenofocus\" ] = \"\"\r\n elif ( current_control[ \"texturenofocus\" ][ 0 ] == \"\\\\\" ): current_control[ \"texturenofocus\" ] = os.path.join( image_path, current_control[ \"texturenofocus\" ][ 1 : ] )\r\n \r\n if ( control_type == \"image\" ):\r\n try: current_control[ \"aspectratio\" ] = [ \"stretch\", \"scale\", \"keep\" ].index( current_control[ \"aspectratio\" ] )\r\n except: current_control[ \"aspectratio\" ] = 0\r\n if (not \"colorkey\" in current_control ): current_control[ \"colorkey\" ] = \"\"\r\n if (not \"colordiffuse\" in current_control ): current_control[ \"colordiffuse\" ] = \"0xFFFFFFFF\"\r\n if (not \"texture\" in current_control ): current_control[ \"texture\" ] = \"\"\r\n elif ( current_control[ \"texture\" ][ 0 ] == \"\\\\\" ): current_control[ \"texture\" ] = os.path.join( image_path, current_control[ \"texture\" ][ 1 : ] )\r\n\r\n elif ( control_type == \"label\" ):\r\n if ( not \"haspath\" in current_control ): current_control[ \"haspath\" ] = \"false\"\r\n current_control[ \"haspath\"] = current_control[ \"haspath\" ] in [ \"true\", \"yes\", \"1\" ]\r\n if ( \"number\" in current_control ): current_control[ \"label\" ][ 0 ] = [ current_control[ \"number\" ] ]\r\n\r\n elif (control_type == \"button\"):\r\n if ( not \"textoffsetx\" in current_control ): current_control[ \"textoffsetx\" ] = 0\r\n else: current_control[ \"textoffsetx\" ] = int( current_control[ \"textoffsetx\" ] )\r\n if ( not \"textoffsety\" in current_control ): current_control[ \"textoffsety\" ] = 0\r\n else: current_control[ \"textoffsety\" ] = int( current_control[ \"textoffsety\" ] )\r\n if ( not \"focusedcolor\" in current_control ): current_control[ \"focusedcolor\" ] = current_control[ \"textcolor\" ]\r\n\r\n elif ( control_type == \"checkmark\" ):\r\n if (not \"texturecheckmark\" in current_control ): current_control[ \"texturecheckmark\" ] = \"\"\r\n elif ( current_control[ \"texturecheckmark\" ][ 0 ] == \"\\\\\" ): current_control[ \"texturecheckmark\" ] = os.path.join( image_path, current_control[ \"texturecheckmark\" ][ 1 : ] )\r\n if (not \"texturecheckmarknofocus\" in current_control ): current_control[ \"texturecheckmarknofocus\" ] = \"\"\r\n elif ( current_control[ \"texturecheckmarknofocus\" ][ 0 ] == \"\\\\\" ): current_control[ \"texturecheckmarknofocus\" ] = os.path.join( image_path, current_control[ \"texturecheckmarknofocus\" ][ 1 : ] )\r\n if ( not \"markwidth\" in current_control ): current_control[ \"markwidth\" ] = 20\r\n else: current_control[ \"markwidth\" ] = int( current_control[ \"markwidth\" ] )\r\n if ( not \"markheight\" in current_control ): current_control[ \"markheight\" ] = 20\r\n else: current_control[ \"markheight\" ] = int( current_control[ \"markheight\" ] )\r\n\r\n elif ( control_type == \"progress\" ):\r\n if ( not \"texturebg\" in current_control ): current_control[ \"texturebg\" ] = \"\"\r\n elif ( current_control[ \"texturebg\" ][ 0 ] == \"\\\\\" ): current_control[ \"texturebg\" ] = os.path.join( image_path, current_control[ \"texturebg\" ][ 1 : ] )\r\n if ( not \"lefttexture\" in current_control ): current_control[ \"lefttexture\" ] = \"\"\r\n elif ( current_control[ \"lefttexture\" ][ 0 ] == \"\\\\\" ): current_control[ \"lefttexture\" ] = os.path.join( image_path, current_control[ \"lefttexture\" ][ 1 : ] )\r\n if ( not \"midtexture\" in current_control ): current_control[ \"midtexture\" ] = \"\"\r\n elif ( current_control[ \"midtexture\" ][ 0 ] == \"\\\\\" ): current_control[ \"midtexture\" ] = os.path.join( image_path, current_control[ \"midtexture\" ][ 1 : ] )\r\n if ( not \"righttexture\" in current_control ): current_control[ \"righttexture\" ] = \"\"\r\n elif ( current_control[ \"righttexture\" ][ 0 ] == \"\\\\\" ): current_control[ \"righttexture\" ] = os.path.join( image_path, current_control[ \"righttexture\" ][ 1 : ] )\r\n if ( not \"overlaytexture\" in current_control ): current_control[ \"overlaytexture\" ] = \"\"\r\n elif ( current_control[ \"overlaytexture\" ][ 0 ] == \"\\\\\" ): current_control[ \"overlaytexture\" ] = os.path.join( image_path, current_control[ \"overlaytexture\" ][ 1 : ] )\r\n\r\n elif ( control_type == \"list\" or control_type == \"listcontrol\" ):\r\n current_control[ \"label2\" ] = label2_tags\r\n current_control[ \"image\" ] = image_tags\r\n if (not \"selectedcolor\" in current_control ): current_control[ \"selectedcolor\" ] = \"FFFFFFFF\"\r\n if (not \"itemwidth\" in current_control ): current_control[ \"itemwidth\" ] = 20\r\n else: current_control[ \"itemwidth\" ] = int( current_control[ \"itemwidth\" ] )\r\n if (not \"itemheight\" in current_control ): current_control[ \"itemheight\" ] = 20\r\n else: current_control[ \"itemheight\" ] = int( current_control[ \"itemheight\" ] )\r\n if (not \"textureheight\" in current_control ): current_control[ \"textureheight\" ] = 20\r\n else: current_control[ \"textureheight\" ] = int( current_control[ \"textureheight\" ] )\r\n if (not \"textxoff\" in current_control ): current_control[ \"textxoff\" ] = 0\r\n else: current_control[ \"textxoff\" ] = int( current_control[ \"textxoff\" ] )\r\n if (not \"textyoff\" in current_control ): current_control[ \"textyoff\" ] = 0\r\n else: current_control[ \"textyoff\" ] = int( current_control[ \"textyoff\" ] )\r\n if (not \"spacebetweenitems\" in current_control ): current_control[ \"spacebetweenitems\" ] = 0\r\n else: current_control[ \"spacebetweenitems\" ] = int( current_control[ \"spacebetweenitems\" ] )\r\n if ( not \"hidespinner\" in current_control ): current_control[ \"hidespinner\" ] = \"false\"\r\n current_control[ \"hidespinner\"] = current_control[ \"hidespinner\" ] in [ \"true\", \"yes\", \"1\" ]\r\n if ( not \"image\" in current_control ): current_control[ \"image\" ] = [ \" \" ]\r\n for img in range( len( current_control[ \"image\" ] ) ):\r\n if ( current_control[ \"image\" ][ img ][ 0 ] == \"\\\\\" ): current_control[ \"image\" ][ img ] = os.path.join( image_path, current_control[ \"image\" ][ img ][ 1 : ] )\r\n\r\n ok = self._add_control(current_control)\r\n if ( not ok ): raise\r\n ##control = self.NextSiblingElement( control, None )\r\n except:\r\n succeeded = False\r\n try: skindoc.unlink()\r\n except: pass\r\n return succeeded", "def extract_cuewords(self, cuewords, xml_file_path):\n\n # Create output files\n if not os.path.exists(CUEWORDS_DATA_PATH):\n self.create_directories(CUEWORDS_DATA_PATH)\n try:\n file_output = open(CUEWORDS_DATA_PATH+CUEWORDS_FILE, 'w', encoding='utf8')\n file_output_pos_tagged = open(CUEWORDS_DATA_PATH+CUEWORDS_FILE_POS_TAGGED,\n 'w', encoding='utf8')\n\n except FileNotFoundError:\n print('Please set correct filenames')\n\n # Empty lists for collecting data per file\n cueword_ids = []\n cuewords = []\n\n # Empty list to collect data for all files\n all_cuewords = []\n all_cuewords_pos_tagged = []\n\n print('Extracting cuewords from:', xml_file_path, 'to:', CUEWORDS_DATA_PATH+CUEWORDS_FILE)\n\n if not os.path.exists(xml_file_path):\n self.create_directories(xml_file_path)\n\n # Go through all files in xml_file_path directory\n for file in os.listdir(xml_file_path):\n\n # For each file, open, parseXML\n file = xml_file_path+file\n\n # Open files only, ignore subdirectories\n if os.path.isfile(file) and file.lower().endswith('.xml'):\n\n file_input = open(file, 'r', encoding='utf8')\n file_input = BeautifulSoup(file_input, 'xml')\n\n # Collect frames, get ids\n for frame in file_input.find_all('frame', {'name' : NEGATION_FRAME_NAME}):\n for target in frame.find_all('target'):\n for fenode in target.find_all('fenode'):\n cueword_ids.insert(0, fenode.get('idref'))\n\n # Find all splitwords\n for splitword in file_input.find_all('splitword'):\n cueword_ids.insert(0, splitword.get('idref'))\n\n # Find all terminals, check if its ID is in cueword_ids[]\n for terminal in file_input.find_all('t'):\n if terminal.get('id') in cueword_ids:\n all_cuewords.insert(0, terminal.get('word').lower())\n all_cuewords_pos_tagged.insert(0, terminal.get('word').lower()+\n '\\t'+terminal.get('pos'))\n\n # clear list for next document\n cueword_ids = []\n cuewords = []\n\n # Sort final list\n all_cuewords = sorted(set(all_cuewords))\n all_cuewords_pos_tagged = sorted(set(all_cuewords_pos_tagged))\n\n # Write cuewords without duplicates to file:\n for cueword in all_cuewords:\n file_output.write(cueword+'\\n')\n\n for cueword in all_cuewords_pos_tagged:\n file_output_pos_tagged.write(cueword+'\\n')\n\n file_output.close()\n file_output_pos_tagged.close()\n\n print('Cuewords extracted to:', file_output.name)\n print('Cuewords extracted and POS tagged to:', file_output_pos_tagged.name)\n print('Done!')\n\n return cuewords", "def _populate_from_xml_file(self, xml):\n '''\n example from API: http://www.ga.gov.au/www/argus.argus_api.survey?pSurveyNo=921\n\n <?xml version=\"1.0\" ?>\n <ROWSET>\n <ROW>\n <SURVEYID>921</SURVEYID>\n <SURVEYNAME>Goomalling, WA, 1996</SURVEYNAME>\n <STATE>WA</STATE>\n <OPERATOR>Stockdale Prospecting Ltd.</OPERATOR>\n <CONTRACTOR>Kevron Geophysics Pty Ltd</CONTRACTOR>\n <PROCESSOR>Kevron Geophysics Pty Ltd</PROCESSOR>\n <SURVEY_TYPE>Detailed</SURVEY_TYPE>\n <DATATYPES>MAG,RAL,ELE</DATATYPES>\n <VESSEL>Aero Commander</VESSEL>\n <VESSEL_TYPE>Plane</VESSEL_TYPE>\n <RELEASEDATE/>\n <ONSHORE_OFFSHORE>Onshore</ONSHORE_OFFSHORE>\n <STARTDATE>05-DEC-96</STARTDATE>\n <ENDDATE>22-DEC-96</ENDDATE>\n <WLONG>116.366662</WLONG>\n <ELONG>117.749996</ELONG>\n <SLAT>-31.483336</SLAT>\n <NLAT>-30.566668</NLAT>\n <LINE_KM>35665</LINE_KM>\n <TOTAL_KM/>\n <LINE_SPACING>250</LINE_SPACING>\n <LINE_DIRECTION>180</LINE_DIRECTION>\n <TIE_SPACING/>\n <SQUARE_KM/>\n <CRYSTAL_VOLUME>33.6</CRYSTAL_VOLUME>\n <UP_CRYSTAL_VOLUME>4.2</UP_CRYSTAL_VOLUME>\n <DIGITAL_DATA>MAG,RAL,ELE</DIGITAL_DATA>\n <GEODETIC_DATUM>WGS84</GEODETIC_DATUM>\n <ASL/>\n <AGL>60</AGL>\n <MAG_INSTRUMENT>Scintrex CS2</MAG_INSTRUMENT>\n <RAD_INSTRUMENT>Exploranium GR820</RAD_INSTRUMENT>\n </ROW>\n </ROWSET>\n '''\n # turn the XML doc into a Python object\n root = objectify.fromstring(xml)\n\n if hasattr(root.ROW, 'SURVEYNAME'):\n self.survey_name = root.ROW.SURVEYNAME\n if hasattr(root.ROW, 'STATE'):\n self.state = root.ROW.STATE\n if hasattr(root.ROW, 'OPERATOR'):\n self.operator = root.ROW.OPERATOR\n if hasattr(root.ROW, 'CONTRACTOR'):\n self.contractor = root.ROW.CONTRACTOR\n if hasattr(root.ROW, 'PROCESSOR'):\n self.processor = root.ROW.PROCESSOR\n if hasattr(root.ROW, 'SURVEY_TYPE'):\n self.survey_type = root.ROW.SURVEY_TYPE\n if hasattr(root.ROW, 'DATATYPES'):\n self.data_types = root.ROW.DATATYPES\n if hasattr(root.ROW, 'VESSEL'):\n self.vessel = root.ROW.VESSEL\n if hasattr(root.ROW, 'VESSEL_TYPE'):\n self.vessel_type = root.ROW.VESSEL_TYPE\n if hasattr(root.ROW, 'RELEASEDATE'):\n self.release_date = datetime.strptime(root.ROW.RELEASEDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.RELEASEDATE.text is not None else None\n if hasattr(root.ROW, 'ONSHORE_OFFSHORE'):\n self.onshore_offshore = root.ROW.ONSHORE_OFFSHORE\n if hasattr(root.ROW, 'STARTDATE'):\n self.start_date = datetime.strptime(root.ROW.STARTDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.STARTDATE.text is not None else None\n if hasattr(root.ROW, 'ENDDATE'):\n self.end_date = datetime.strptime(root.ROW.ENDDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.ENDDATE.text is not None else None\n if hasattr(root.ROW, 'WLONG'):\n self.w_long = root.ROW.WLONG\n if hasattr(root.ROW, 'ELONG'):\n self.e_long = root.ROW.ELONG\n if hasattr(root.ROW, 'SLAT'):\n self.s_lat = root.ROW.SLAT\n if hasattr(root.ROW, 'NLAT'):\n self.n_lat = root.ROW.NLAT\n if hasattr(root.ROW, 'LINE_KM'):\n self.line_km = root.ROW.LINE_KM\n if hasattr(root.ROW, 'TOTAL_KM'):\n self.total_km = root.ROW.TOTAL_KM\n if hasattr(root.ROW, 'LINE_SPACING'):\n self.line_spacing = root.ROW.LINE_SPACING\n if hasattr(root.ROW, 'LINE_DIRECTION'):\n self.line_direction = root.ROW.LINE_DIRECTION\n if hasattr(root.ROW, 'TIE_SPACING'):\n self.tie_spacing = root.ROW.TIE_SPACING\n if hasattr(root.ROW, 'SQUARE_KM'):\n self.square_km = root.ROW.SQUARE_KM\n if hasattr(root.ROW, 'CRYSTAL_VOLUME'):\n self.crystal_volume = root.ROW.CRYSTAL_VOLUME\n if hasattr(root.ROW, 'UP_CRYSTAL_VOLUME'):\n self.up_crystal_volume = root.ROW.UP_CRYSTAL_VOLUME\n if hasattr(root.ROW, 'DIGITAL_DATA'):\n self.digital_data = root.ROW.DIGITAL_DATA\n if hasattr(root.ROW, 'GEODETIC_DATUM'):\n self.geodetic_datum = root.ROW.GEODETIC_DATUM\n if hasattr(root.ROW, 'ASL'):\n self.asl = root.ROW.ASL\n if hasattr(root.ROW, 'AGL'):\n self.agl = root.ROW.AGL\n if hasattr(root.ROW, 'MAG_INSTRUMENT'):\n self.mag_instrument = root.ROW.MAG_INSTRUMENT\n if hasattr(root.ROW, 'RAD_INSTRUMENT'):\n self.rad_instrument = root.ROW.RAD_INSTRUMENT", "def load_raw_data(self, input_files):\n\n log.debug(f\"Loading dataset {input_files}\") \n print(f\"Loading dataset\")\n\n # Load stroke information from XML files\n for file in input_files:\n new_strokeset = strokeset.StrokeSet(file)\n self.strokesets.append(new_strokeset)\n self.stroke_matrix.append(new_strokeset.as_delta_array())\n self.stroke_ascii.append(new_strokeset.get_text())\n\n done_msg = \"Finished parsing dataset. Imported {} lines\".format(len(self.get_strokesets()))\n print (done_msg)\n log.info(done_msg)", "def data_preprocessing():\n lineid_content = get_lineid_content()\n print('Read movie_lines.txt file complete...')\n convos = get_convos()\n print('Read movie_conversations.txt file complete...')\n print('Building dataset')\n get_data(lineid_content, convos)", "def _parse(self):\n tree = etree.parse(self.filename)\n root_node = tree.getroot()\n\n text_node = root_node.find(\"TEXT\")\n\n # Get text\n self.text = self._extract_text(text_node)\n\n # Get and create Event objects\n self.events = self._get_and_create_event_objects(text_node, root_node)\n\n # Get and create Timex objects\n self.timex = self._get_and_create_timex_objects(text_node, root_node)\n\n # Create Relation objects and link them\n self.relations = self._get_and_create_relation_objects(root_node)\n\n # Build text structure. Must be called last.\n self.text_structure = self._get_and_build_text_structure()", "def preprocess(self):\n # Validate the root element type if the subclass wants us to.\n # This is hard to do elsewhere, since the element handlers don't\n # know where they are in the XML document.\n if self.requiredRootElement is not None:\n rootElement = None\n if self.xml.nodeType == self.xml.DOCUMENT_NODE:\n rootElement = self.xml.documentElement\n elif self.xml.nodeType == self.xml.ELEMENT_NODE:\n rootElement = self.xml\n\n if (not rootElement) or rootElement.nodeName != self.requiredRootElement:\n raise UnknownElementError(\"Missing a required %r root element\" %\n self.requiredRootElement)\n\n setattr(self, self.resultAttribute, self.parse(self.xml))", "def __init__(self, xml_file):\n try:\n # Load xml file\n fd = resource_stream(__name__, 'map/{}'.format(xml_file))\n self.spec = parse(fd)\n self.remove_whitespace_nodes(self.spec, True)\n fd.close()\n\n except OSError:\n raise EDIFileNotFoundError(\n '{} is missing in the package'.format(xml_file)\n )", "def parser(self):\n\t\tdom = ET.parse(self.input_filename)\n\t\tself.doc = dom.getroot()", "def _process_includes(self):\r\n includes = self.tree.findall('.//include')\r\n for inc in includes:\r\n filename = inc.get('file')\r\n if filename is not None:\r\n try:\r\n # open using LoncapaSystem OSFS filestore\r\n ifp = self.capa_system.filestore.open(filename)\r\n except Exception as err:\r\n log.warning(\r\n 'Error %s in problem xml include: %s',\r\n err,\r\n etree.tostring(inc, pretty_print=True)\r\n )\r\n log.warning(\r\n 'Cannot find file %s in %s', filename, self.capa_system.filestore\r\n )\r\n # if debugging, don't fail - just log error\r\n # TODO (vshnayder): need real error handling, display to users\r\n if not self.capa_system.DEBUG:\r\n raise\r\n else:\r\n continue\r\n try:\r\n # read in and convert to XML\r\n incxml = etree.XML(ifp.read())\r\n except Exception as err:\r\n log.warning(\r\n 'Error %s in problem xml include: %s',\r\n err,\r\n etree.tostring(inc, pretty_print=True)\r\n )\r\n log.warning('Cannot parse XML in %s', (filename))\r\n # if debugging, don't fail - just log error\r\n # TODO (vshnayder): same as above\r\n if not self.capa_system.DEBUG:\r\n raise\r\n else:\r\n continue\r\n\r\n # insert new XML into tree in place of include\r\n parent = inc.getparent()\r\n parent.insert(parent.index(inc), incxml)\r\n parent.remove(inc)\r\n log.debug('Included %s into %s' % (filename, self.problem_id))", "def complete_xml_parsing(self):\n for item in self.entities:\n item.severity = self.parsed_severity\n item.cwes.extend(self.parsed_cwes)\n item.advisory_id = self.parsed_advisory_id\n item.attack_vector = self.parsed_attack_vector\n if self.parsed_cvss_base != '' and is_correct_score(self.parsed_cvss_base):\n cvss_v3 = CvssV3(base_sc=self.parsed_cvss_base)\n if self.parsed_cvss_temporal != '' \\\n and is_correct_score(self.parsed_cvss_temporal):\n cvss_v3.temporal_sc = self.parsed_cvss_temporal\n item.cvss_v3 = cvss_v3\n item.cvss_base_sc_v3 = self.parsed_cvss_base\n item.cvss_temporal_score_v3 = self.parsed_cvss_temporal\n item.published = self.parsed_date", "def read_categories_from_hcrml_files(self, files):\r\n categories = []\r\n \r\n for file in files:\r\n # Skip the current file\r\n if os.path.normpath(file) == os.path.normpath(self.hcrml_file):\r\n continue\r\n \r\n # Read the <output> element and append its categories to the result list\r\n reader = HcrmlReader(file, self.configuration)\r\n reader.doc = self._read_xml_doc_from_resource(file, self.configuration)\r\n # Read the output element, but ignore includes, since we are\r\n # currently reading from inside an include\r\n output_obj = reader.read_hcrml_output(ignore_includes=True)\r\n categories.extend(output_obj.categories)\r\n \r\n return categories", "def __init__(self,\n xml_dir,\n cui_dir,\n category,\n use_pickled_alphabet=False,\n alphabet_pickle=None,\n min_token_freq=0,\n use_tokens=False):\n\n self.xml_dir = xml_dir\n self.cui_dir = cui_dir\n self.category = category\n self.alphabet_pickle = alphabet_pickle\n self.min_token_freq = min_token_freq\n self.use_tokens = use_tokens\n\n self.token2int = {}\n\n if use_pickled_alphabet:\n pkl = open(alphabet_pickle, 'rb')\n self.token2int = pickle.load(pkl)", "def load_xml_files_erisk(local_dir, token_position=0):\n users = {}\n prep = Preprocessor()\n c = 0\n for dir_path, dir_names, filenames in os.walk(local_dir):\n for name in filenames:\n tok = name.split(\"_\")\n if token_position > 0:\n key = tok[0] + tok[token_position]\n else:\n key = tok[token_position]\n key = key.strip(\".xml\")\n full_file = os.path.abspath(os.path.join(dir_path, name))\n dom = ET.parse(full_file, parser=ET.XMLParser(encoding=\"utf-8\"))\n writing = dom.findall('WRITING')\n for w in writing:\n title = w.find('TITLE').text\n text = w.find('TEXT').text\n post = title + \" \" + text\n # preprocess text\n new_text = prep.tokenize_reddit(post)\n\n if key in users.keys():\n users[key] += new_text + ' end_ '\n else:\n users[key] = new_text + ' end_ '\n\n c += 1\n print(\"Preprocessed chunk: \", c)\n\n return users", "def cueword_statistics(self, xml_file_path):\n\n print('Extracting cueword statistics from:', xml_file_path, 'to:', CUEWORDS_STATS_PATH)\n\n if not os.path.exists(CUEWORDS_STATS_PATH):\n self.create_directories(CUEWORDS_STATS_PATH)\n\n # Go through all files in xml_file_path directory\n for file in os.listdir(xml_file_path):\n\n # For each file, open, parseXML\n file = xml_file_path+file\n\n # Open files only, ignore subdirectories\n if os.path.isfile(file) and file.lower().endswith('.xml'):\n\n chapter_input = open(file, 'r', encoding='utf8')\n chapter_output = open(CUEWORDS_STATS_PATH+os.path.split(file)[-1]+'_stats.txt',\n 'w', encoding='utf8')\n\n # Try html.parser for ignoring lower and UPPER Tag and attr names\n chapter_input = BeautifulSoup(chapter_input, 'xml')\n\n for sentence in chapter_input.find_all('s'):\n # Terminals and Semantics\n #terminals = sentence.find_all('t')\n semantics = sentence.find('sem')\n\n # If splitwords exist\n if semantics.find('splitwords'):\n splitwords = semantics.find('splitwords')\n splitword = splitwords.find_all('splitword')\n\n # For each splitword\n for s_w in splitword:\n\n # Get reference id\n # <splitword idref=\"x\">\n splitword_idref = s_w.get('idref')\n\n # Get corresponding terminal and its POS tag\n # <t id=\"x\" pos=\"ADJA\" word=\"unerschütterlichen\"/>\n terminal = sentence.find(id=splitword_idref).get('word')\n pos = sentence.find(id=splitword_idref).get('pos')\n\n #print(splitword_idref,'\\t',terminal,'\\t',pos)\n chapter_output.write('\\n' '=SPLITWORDS=' '\\n')\n chapter_output.write('%s' '\\t' '%s' '\\t' '%s' '\\n' %\n (splitword_idref, terminal, pos))\n\n # Find parts of splitword\n parts = s_w.find_all('part')\n part1 = parts[0].get('id')\n part2 = parts[1].get('id')\n\n for part in parts:\n part_word = part.get('word')\n part_id = part.get('id')\n #print(part_id,'\\t',part_word)\n chapter_output.write('%s' '\\t' '%s' '\\n'\n % (part_id, part_word))\n\n # Find corresponding frames\n frames = semantics.find('frames')\n frame = frames.find_all('frame')\n\n for frame_tag in frame:\n\n # skip first letter in case of n|Negation\n if frame_tag['name'] == NEGATION_FRAME_NAME:\n\n # Find target\n target = frame_tag.find('target')\n fenode = target.find('fenode')\n fenode_id = fenode.get('idref')\n\n # Check part ID if == target ID\n if part1 == fenode_id or part2 == fenode_id or splitword_idref == fenode_id:\n\n part_word = sentence.find(id=fenode_id).get('word')\n #print(fenode_id,'\\t','target')\n chapter_output.write('%s' '\\t' '%s' '\\n'\n % (fenode_id, 'TARGET'))\n\n\n # try and except blocks because of parser lowerUPPER errors\n\n #Find Negated\n try:\n negated = frame_tag.find('fe', {'name' : NEGATED_TAG_NAME})\n negated_fenode_idref = negated.find('fenode').get('idref')\n except AttributeError:\n negated = ''\n negated_fenode_idref = ''\n #print(negated_fenode_idref,'\\t',negated['name'].lower())\n try:\n chapter_output.write('%s' '\\t' '%s' '\\n'\n % (negated_fenode_idref, negated['name'].upper()))\n except TypeError:\n chapter_output.write('')\n\n #Find Scope\n try:\n scope = frame_tag.find('fe', {'name' : SCOPE_TAG_NAME})\n scope_fenode_idref = scope.find('fenode').get('idref')\n except AttributeError:\n scope = ''\n scope_fenode_idref = ''\n #print(scope_fenode_idref,'\\t',scope['name'].lower())\n try:\n chapter_output.write('%s' '\\t' '%s' '\\n'\n % (scope_fenode_idref, scope['name'].upper()))\n except TypeError:\n chapter_output.write('')\n\n #Find Focus\n try:\n focus = frame_tag.find('fe', {'name' : FOCUS_TAG_NAME})\n focus_fenode_idref = focus.find('fenode').get('idref')\n except AttributeError:\n focus = ''\n focus_fenode_idref = ''\n\n #print(focus_fenode_idref,'\\t',focus['name'].lower())\n try:\n chapter_output.write('%s' '\\t' '%s' '\\n'\n % (focus_fenode_idref, focus['name'].upper()))\n except TypeError:\n chapter_output.write('')\n\n #end if splitwords\n\n else:\n\n # If Frames exist\n if semantics.find('frames'):\n\n frames = semantics.find('frames')\n frame = frames.find_all('frame')\n\n chapter_output.write('\\n' '=SCOPE/FOCUS=' '\\n')\n\n for frame_tag in frame:\n\n # skip first letter in case of n|Negation\n if frame_tag['name'] == NEGATION_FRAME_NAME:\n\n #scope_list = []\n\n # Find target\n target = frame_tag.find('target')\n fenode = target.find('fenode')\n fenode_id = fenode.get('idref')\n\n word = sentence.find(id=fenode_id).get('word')\n pos = sentence.find(id=fenode_id).get('pos')\n\n chapter_output.write('%s' '\\t' '%s' '\\t' '%s' '\\n' % (fenode_id, word, pos))\n chapter_output.write('%s' '\\t' '%s' '\\n' % (fenode_id, 'TARGET'))\n\n #Find Negated\n if frame_tag.find('fe', {'name' : NEGATED_TAG_NAME}):\n try:\n negated = frame_tag.find('fe', {'name' : NEGATED_TAG_NAME})\n negated_fenode_idref = negated.find('fenode').get('idref')\n negated_word = sentence.find(id=negated_fenode_idref).get('word')\n negated_pos = sentence.find(id=negated_fenode_idref).get('pos')\n except AttributeError:\n negated = ''\n negated_fenode_idref = ''\n negated_word = ''\n negated_pos = ''\n\n chapter_output.write('%s' '\\t' '%s' '\\t' '%s' '\\t' '%s' '\\n'\n % (negated_fenode_idref, negated['name'].upper(), negated_word, negated_pos))\n\n\n # Resolve Terminals if Scope on a complex graph\n def resolve_non_terminals(idref):\n \"\"\" This function resolves a complex graph to\n a simple flat list of tokens.\n \"\"\"\n nonterminal = sentence.find(id=idref)\n edges = nonterminal.find_all('edge')\n edge_words = []\n for edge in edges:\n e_id = edge.get('idref')\n if sentence.find(id=e_id).get('word') is not None:\n try:\n edge_word = sentence.find(id=e_id).get('word')\n edge_words.append(edge_word)\n except:\n pass\n if sentence.find(id=e_id).get('word') is None:\n edge_words.append(resolve_non_terminals(e_id))\n\n return edge_words\n\n scopelist = []\n\n if frame_tag.find('fe', {'name' : SCOPE_TAG_NAME}):\n scope = frame_tag.find('fe', {'name' : SCOPE_TAG_NAME})\n scope_fenode = scope.find_all('fenode')\n for s_f in scope_fenode:\n s_id = s_f.get('idref')\n if sentence.find(id=s_id).get('word') is not None:\n try:\n scope_word = sentence.find(id=s_id).get('word')\n #scope_pos = scope_word.get('pos')\n scopelist.append(scope_word)\n except:\n pass\n if sentence.find(id=s_id).get('word') is None:\n pass\n else:\n pass\n\n chapter_output.write('%s' '\\t' '%s' '\\t' '%s' '\\n'\n % (s_id, scope['name'].upper(), resolve_non_terminals(s_id)))\n\n focuslist = []\n\n\n #chapter_output.write(str(scope_list))\n #Find Focus\n if frame_tag.find('fe', {'name' : FOCUS_TAG_NAME}):\n focus = frame_tag.find('fe', {'name' : FOCUS_TAG_NAME})\n focus_fenode = focus.find_all('fenode')\n for f_f in focus_fenode:\n f_id = f_f.get('idref')\n if sentence.find(id=f_id).get('word') is not None:\n try:\n focus_word = sentence.find(id=f_id).get('word')\n focus_pos = sentence.find(id=f_id).get('pos')\n focuslist.append(focus_word)\n except:\n pass\n if sentence.find(id=f_id).get('word') is None:\n pass\n else:\n pass\n\n chapter_output.write('%s' '\\t' '%s' '\\t' '%s' '\\t' '%s' '\\t' '%s' '\\n'\n % (f_id, focus['name'].upper(), focus_pos, focus_word, resolve_non_terminals(f_id)))\n\n\n chapter_output.close()\n\n print('Cuewords statistics extracted to:', chapter_output.name)", "def __init__(self, xml_text):\n logger.verbose(\"Load Version.xml\")\n self.parse(xml_text)", "def process(self):\r\n\r\n index = cindex.Index.create()\r\n self.headers = {}\r\n\r\n for f in self.files:\r\n if f in self.processed:\r\n continue\r\n\r\n print \"Processing `%s'\" % (os.path.basename(f),)\r\n\r\n tu = index.parse(f, self.flags)\r\n\r\n if len(tu.diagnostics) != 0:\r\n fatal = False\r\n\r\n for d in tu.diagnostics:\r\n sys.stderr.write(d.format)\r\n sys.stderr.write(\"\\n\")\r\n\r\n if d.severity == cindex.Diagnostic.Fatal or \\\r\n d.severity == cindex.Diagnostic.Error:\r\n fatal = True\r\n\r\n if fatal:\r\n sys.stderr.write(\"\\nCould not generate documentation due to parser errors\\n\")\r\n sys.exit(1)\r\n\r\n if not tu:\r\n sys.stderr.write(\"Could not parse file %s...\\n\" % (f,))\r\n sys.exit(1)\r\n\r\n # Extract comments from files and included files that we are\r\n # supposed to inspect\r\n extractfiles = [f]\r\n\r\n for inc in tu.get_includes():\r\n filename = str(inc.include)\r\n self.headers[filename] = True\r\n\r\n if filename in self.processed or (not filename in self.files) or filename in extractfiles:\r\n continue\r\n\r\n extractfiles.append(filename)\r\n\r\n for e in extractfiles:\r\n db = comment.CommentsDatabase(e, tu)\r\n\r\n self.add_categories(db.category_names)\r\n self.commentsdbs[e] = db\r\n\r\n self.visit(tu.cursor.get_children())\r\n\r\n for f in self.processing:\r\n self.processed[f] = True\r\n\r\n self.processing = {}\r\n\r\n # Construct hierarchy of nodes.\r\n for node in self.all_nodes:\r\n q = node.qid\r\n\r\n if node.parent is None:\r\n par = self.find_parent(node)\r\n\r\n # Lookup categories for things in the root\r\n if (par is None or par == self.root) and (not node.cursor is None):\r\n location = node.cursor.extent.start\r\n db = self.commentsdbs[location.file.name]\r\n\r\n if db:\r\n par = self.category_to_node[db.lookup_category(location)]\r\n\r\n if par is None:\r\n par = self.root\r\n\r\n par.append(node)\r\n\r\n # Resolve comment\r\n cm = self.find_node_comment(node)\r\n\r\n if cm:\r\n node.merge_comment(cm)\r\n\r\n # Keep track of classes to resolve bases and subclasses\r\n classes = {}\r\n\r\n # Map final qid to node\r\n for node in self.all_nodes:\r\n q = node.qid\r\n self.qid_to_node[q] = node\r\n\r\n if isinstance(node, nodes.Class):\r\n classes[q] = node\r\n\r\n # Resolve bases and subclasses\r\n for qid in classes:\r\n classes[qid].resolve_bases(classes)\r\n\r\n self.markup_code(index)", "def main():\n glob_pattern = \"{root}/{child}/*.xml\".format(root=MANCHESTER_ROOT, child=TARGET_CHILD)\n corpus_files = glob(glob_pattern)\n for filename in corpus_files:\n print(filename)\n to_csv(filtered_parent_freq_count([filename], 2))", "def preprocess(path):\n with open(path, \"r\", encoding=\"utf-8\", errors=\"ignore\") as f:\n pat = re.compile(\"&([^;\\\\W]*([^;\\\\w]|$))\")\n log(\"Processing the file '{}'...\".format(path))\n try:\n dom = parseString(re.sub(pat, \"&amp;\", f.read()))\n except ExpatError as e:\n msg = XmlHandler.ERROR_MESSAGE.format(path, e)\n log(msg)\n raise ValueError(e)\n else:\n log(\"Done!\")\n return dom", "def __init__(self, document_path):\n self.tree = etree.parse(document_path)\n self.doc_id = get_doc_id(self.tree)\n for element_type in ('nodes', 'edges', 'layers'):\n self._extract_elements(self.tree, element_type)", "def preprocess(self):\n print(\"processing content images...\")\n for dir_item in self.selectedContent:\n join_path = Path(self.content_image_dir,dir_item.replace('/','_'))\n if join_path.exists():\n print(\"processing %s\"%dir_item,end='\\r')\n images = join_path.glob('*.%s'%(self.subffix))\n for item in images:\n self.content_dataset.append(item)\n else:\n print(\"%s dir does not exist!\"%dir_item,end='\\r')\n label_index = 0\n print(\"processing style images...\")\n for class_item in self.selectedStyle:\n images = Path(self.style_image_dir).glob('%s/*.%s'%(class_item, self.subffix))\n for item in images:\n self.art_dataset.append([item, label_index])\n label_index += 1\n random.seed(self.random_seed)\n random.shuffle(self.content_dataset)\n random.shuffle(self.art_dataset)\n # self.dataset = images\n print('Finished preprocessing the Art Works dataset, total image number: %d...'%len(self.art_dataset))\n print('Finished preprocessing the Content dataset, total image number: %d...'%len(self.content_dataset))", "def parse_code_classes(self):\n # Step1 : Gather XML files list\n if not self._xml_files_list:\n self.parse_code_files(store_xml_files_list=True)\n\n # Step 2: Parse all corresponding XML files.\n classes, classes_per_file = parse_xml_files_list(ClassLevelParser, self._xml_files_list)\n return classes, classes_per_file", "def readSources(self):\n for sourceCount, sourceElement in enumerate(self.root.findall(\".sources/source\")):\n # shall we just read the UFO here?\n filename = sourceElement.attrib.get('filename')\n # filename is a path relaive to the documentpath. resolve first.\n sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename))\n sourceName = sourceElement.attrib.get('name')\n if sourceName is None:\n # if the source element has no name attribute\n # (some authoring tools do not need them)\n # then we should make a temporary one. We still need it for reference.\n sourceName = \"temp_master.%d\"%(sourceCount)\n self.reportProgress(\"prep\", 'load', sourcePath)\n if not os.path.exists(sourcePath):\n raise MutatorError(\"Source not found at %s\"%sourcePath)\n sourceObject = self._instantiateFont(sourcePath)\n # read the locations\n sourceLocationObject = None\n sourceLocationObject = self.locationFromElement(sourceElement)\n\n if sourceLocationObject is None:\n raise MutatorError(\"No location defined for source %s\"%sourceName)\n\n # read lib flag\n for libElement in sourceElement.findall('.lib'):\n if libElement.attrib.get('copy') == '1':\n self.libSource = sourceName\n\n # read the groups flag\n for groupsElement in sourceElement.findall('.groups'):\n if groupsElement.attrib.get('copy') == '1':\n self.groupsSource = sourceName\n\n # read the info flag\n for infoElement in sourceElement.findall(\".info\"):\n if infoElement.attrib.get('copy') == '1':\n self.infoSource = sourceName\n if infoElement.attrib.get('mute') == '1':\n self.muted['info'].append(sourceName)\n\n # read the features flag\n for featuresElement in sourceElement.findall(\".features\"):\n if featuresElement.attrib.get('copy') == '1':\n if self.featuresSource is not None:\n self.featuresSource = None\n else:\n self.featuresSource = sourceName\n\n mutedGlyphs = []\n for glyphElement in sourceElement.findall(\".glyph\"):\n glyphName = glyphElement.attrib.get('name')\n if glyphName is None:\n continue\n if glyphElement.attrib.get('mute') == '1':\n if not sourceName in self.muted['glyphs']:\n self.muted['glyphs'][sourceName] = []\n self.muted['glyphs'][sourceName].append(glyphName)\n\n for kerningElement in sourceElement.findall(\".kerning\"):\n if kerningElement.attrib.get('mute') == '1':\n self.muted['kerning'].append(sourceName)\n\n # store\n self.sources[sourceName] = sourceObject, sourceLocationObject\n self.reportProgress(\"prep\", 'done')", "def _preprocess(self):\n self.data['sentences'] = self.data['text'].apply(self._tokenize_sent)\n self.data['nouns'] = self.data['sentences'].apply(self._get_nouns)\n # self._get_frequent_features()\n # self._compactness_pruning()\n # self._redundancy_pruning()\n # self._get_features()\n self._extract_opinions()", "def parse_xml(xml_file):\n logging.info(str(xml_file))\n global nipper_xml\n xml_tree = ElementTree.parse(xml_file)\n\n nipper_xml = xml_tree.getroot()", "def parse(self, limit=None):\n if limit is not None:\n logger.info(\"Only parsing first %d rows of each file\", limit)\n logger.info(\"Parsing files...\")\n\n if self.testOnly:\n self.testMode = True\n\n # the following will provide us the hash-lookups\n self._process_dbxref()\n self._process_cvterm()\n self._process_genotypes(limit)\n self._process_pubs(limit)\n # do this before environments to get the external ids\n self._process_environment_cvterm()\n self._process_environments()\n self._process_organisms(limit) # must be done before features\n self._process_organism_dbxref(limit)\n self._process_features(limit)\n self._process_phenotype(limit)\n self._process_phenotype_cvterm()\n # gets external mappings for features (genes, variants, etc)\n self._process_feature_dbxref(limit)\n # do this after organisms to get the right taxonomy\n self._process_stocks(limit)\n # figures out types of some of the features\n self._get_derived_feature_types(limit)\n\n # These are the associations amongst the objects above\n self._process_stockprop(limit)\n self._process_pub_dbxref(limit)\n self._process_phendesc(limit)\n self._process_feature_genotype(limit)\n self._process_feature_pub(limit)\n self._process_stock_genotype(limit)\n self._process_phenstatement(limit) # these are G2P associations\n\n self._process_feature_relationship(limit)\n\n self._process_disease_models(limit)\n # TODO add version info from file somehow\n # (in parser rather than during fetching)\n\n logger.info(\"Finished parsing.\")\n logger.info(\"Loaded %d nodes\", len(self.graph))\n return", "def main():\n processSetOfCerFiles(sys.argv[1:])", "def parse(filename):\n\n tree = etree.parse(filename)\n root = tree.getroot()\n # according to the structure of the xml article meta nested under \n # front then article-meta\n articleMeta = root[0][1]\n # pubmed central article id\n pmcId = ''\n # the author list, the list of names excluding corresponding\n # athor\n otherAuthors = []\n # the name and email of the corresponding authors\n cAuthors = []\n # container for all the author groups\n authorGroups = []\n \n for child in articleMeta:\n # find the pmc id\n if ((child.tag == 'article-id') and not(isEmpty(child.attrib))):\n if (child.attrib['pub-id-type'] == 'pmc'):\n pmcId = child.text\n # find the author group\n elif (child.tag == 'contrib-group'):\n authorGroups.append(child)\n # this child may contain important corresponding information\n elif (child.tag == 'author-notes'):\n authorNotes = child\n # find the publication date\n elif (child.tag == 'history'):\n for theDate in child:\n if ('date-type' in theDate.attrib and theDate.attrib['date-type'] == 'accepted'):\n #publiction date YEAR MONTH DAY\n if (theDate.find('year') != None):\n theYear = theDate.find('year').text\n else:\n theYear = 0\t\n if (theDate.find('month') != None):\n theMonth = theDate.find('month').text\n else:\n theMonth = 6\n if (theDate.find('day') != None):\n theDay = theDate.find('day').text\n else:\n theDay = 1\n\n publicationDate = (theYear, theMonth, theDay)\n try:\n dateCheck = date(int(theYear), int(theMonth), int(theDay))\n except:\n return((-1,))\n elif (child.tag == 'pub-date'): \n if ('pub-type' in child.attrib and (child.attrib['pub-type'] == 'ppub' or child.attrib['pub-type'] == 'epub')):\n #for grandchild in child: print(grandchild.tag)\n \n if (child.find('year') != None):\n theYear = child.find('year').text\n else:\n theYear = 0\n \n if (child.find('month') != None):\n theMonth = child.find('month').text\n else:\n theMonth = 6\n \n if (child.find('day') != None):\n theDay = child.find('day').text\n else:\n theDay = 1\t\t\t\t\t\n publicationDate = (theYear, theMonth, theDay)\n try:\n dateCheck = date(int(theYear), int(theMonth), int(theDay))\n except:\n return((-1,))\n case1 = False # will be used for post-processing, corr author identified but no email\n for authorGroup in authorGroups:\n # parse author group information\n for child in authorGroup:\n if (child.tag == 'contrib' and child.attrib['contrib-type'] == 'author'):\n # the first child is the name tag\n try:\n name = child[0].find('given-names').text + ' ' + child[0].find('surname').text\n except:\n return((-1,))\n if ('corresp' in child.attrib): # and child.attrib['corresp'] == 'yes'):\n # if it a corresponding author\n # check to see if there is email field\n if (len(child) > 2 and child[1].find('email') != None):\n data = (name, child[1].find('email').text)\n cAuthors.append(data)\n #else post-process this case: case(1)\n else:\n data = (name, 'null')\n cAuthors.append(data)\n case1 = True\n else: \n # handle EMBO style xml \n xrefList = findInSubtree(child, 'xref')\n if (len(xrefList) > 0):\n for xref in xrefList:\n if ('ref-type' in xref.attrib and xref.attrib['ref-type'] == 'corresp'):\n # this is an corresponding author\n data = (name, '')\n cAuthors.append(data)\n case1 = True\n if (case1 == False):\n otherAuthors.append(name) \n else:\n # if not a corresponding author\n otherAuthors.append(name)\n\n # not done yet, some corresponding author information are embedded in author-notes\n if (case1 and 'authorNotes' in locals()):\n i = 0\n # corresponding author identified but no email found\n for child in authorNotes:\n if (child.tag == 'corresp'):\n for grandchild in child:\n if (grandchild.tag == 'email'):\n if (i == len(cAuthors)): break\t\n cAuthors[i] = (cAuthors[i][0], grandchild.text)\n i = i + 1\n elif ('authorNotes' in locals()):\n # the linking information is embedded entirely in the text\n text = etree.tostring(authorNotes).strip().decode('utf-8')\n emailElements = findInSubtree(authorNotes, 'email')\n for name in otherAuthors:\n j = 0\n if (text.find(name) != -1 and j < len(emailElements)):\n data = (name, emailElements[j].text)\n cAuthors.append(data)\n otherAuthors.remove(name)\n j = j + 1\n\n # sanity check here, reject anything that may corrupt the database\n if ('pmcId' in locals() and 'publicationDate' in locals()):\n try:\n print(pmcId, otherAuthors, cAuthors, publicationDate)\n except:\n return(pmcId, otherAuthors, cAuthors, publicationDate)\n return(pmcId, otherAuthors, cAuthors, publicationDate)\n else:\n return((-1,))", "def load_xml_startup_script(name):\n\n for dir in (\"%sshare/gps/support/core/\" % GPS.get_system_dir(),\n \"%sshare/gps/support/ui/\" % GPS.get_system_dir(),\n \"%sshare/gps/library/\" % GPS.get_system_dir(),\n \"%sshare/gps/plug-ins/\" % GPS.get_system_dir()):\n\n try:\n f = file(\"%s%s\" % (dir, name)).read()\n break\n except:\n f = None\n\n GPS.parse_xml(f)\n process_all_events()", "def main():\n # Specify path\n training_filepath = 'data/training.csv'\n testing_filepath = 'data/public_test_features.csv'\n\n # Check whether the specified path exists or not\n isExist = os.path.exists(training_filepath)\n if(isExist):\n print('Reading from ' + training_filepath)\n else:\n print('Training file not found in the app path.')\n exit()\n preprocess_file(training_filepath, 'data/clean_training1.csv', True)\n # Check whether the specified path exists or not\n isExist = os.path.exists(testing_filepath)\n if(isExist):\n print('Reading from ' + testing_filepath)\n else:\n print('Testing file not found in the app path.')\n exit()\n preprocess_file(testing_filepath,'data/clean_testing1.csv', False)", "def transform(self, fileids=None, categories=None):\n # Make the target directory if it doesn't already exist\n if not os.path.exists(self.target):\n os.makedirs(self.target)\n\n # First shutil.copy anything in the root directory.\n self.replicate(self.corpus.root)\n\n # Resolve the fileids to start processing\n for fileid in self.fileids(fileids, categories):\n yield self.process(fileid)", "def preprocess_corpus(train_sents):\n global lookupLexiconDict\n lookupLexiconDict = {}\n \n lexiconDir = getcwd()+'\\\\data\\\\lexicon'\n filesList = [hfile for hfile in listdir(lexiconDir) if path.isfile(lexiconDir+'\\\\'+hfile) ]\n \n decision_tags = ['facility','product','musicartist']\n fileMappingDict = \\\n {\n 'architecture.museum':'facility',\n 'automotive.make':'product',\n 'automotive.model':'product',\n 'award.award':'musicartist',\n 'base.events.festival_series':'geo-loc',\n #'bigdict':'@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@',\n 'book.newspaper':'company',\n 'broadcast.tv_channel':'tvshow',\n 'business.brand':'company',\n 'business.consumer_company':'company',\n 'business.consumer_product':'product',\n 'business.sponsor':'company',\n 'cap.1000':'geo-loc',\n 'cvg.computer_videogame':'product',\n 'cvg.cvg_developer':'company',\n 'cvg.cvg_platform':'product',\n 'education.university':'facility',\n 'english.stop':'O',\n 'firstname.5k':'person',\n 'government.government_agency':'company',\n 'internet.website':'company',\n 'lastname.5000':'person',\n 'location':'geo-loc',\n 'location.country':'geo-loc',\n 'lower.5000':'O',\n 'people.family_name':'person',\n 'people.person':'person',\n 'people.person.lastnames':'person', # <-----------------------------\n 'product':'product',\n 'sports.sports_league':'sportsteam',\n 'sports.sports_team':'sportsteam',\n 'time.holiday':'O',\n 'time.recurring_event':'O',\n 'transportation.road':'geo-loc',\n 'tv.tv_network':'tvshow',\n 'tv.tv_program':'tvshow',\n 'venture_capital.venture_funded_company':'company',\n 'venues':'geo-loc'\n }\n\n for lexFile in filesList:\n if lexFile not in fileMappingDict: continue\n print 'Processing ', lexFile\n \n with open(lexiconDir+'\\\\'+lexFile) as f:\n for line in f:\n line = line.lower().split()\n if len(line) == 1: low=0\n else:low=1\n for i in range(low,len(line)):\n key = tuple(line[:i+1])\n if key not in lookupLexiconDict:\n lookupLexiconDict[key] = [fileMappingDict[lexFile]]\n else:\n lookupLexiconDict[key].append(fileMappingDict[lexFile]) \n\n \n #pass ", "def preprocess(self):\n lines = [line.rstrip() for line in open(self.attr_path, 'r')]\n all_attr_names = lines[1].split()\n for i, attr_name in enumerate(all_attr_names):\n self.attr2idx[attr_name] = i\n self.idx2attr[i] = attr_name\n\n lines = lines[2:]\n random.seed(1234)\n random.shuffle(lines)\n for i, line in enumerate(lines):\n split = line.split()\n filename = split[0]\n values = split[1:]\n\n label = []\n for attr_name in self.selected_attrs:\n idx = self.attr2idx[attr_name]\n label.append(values[idx] == '1')\n\n if (i+1) < 4:\n self.test_dataset.append([filename, label])\n else:\n self.train_dataset.append([filename, label])", "def loadData(catalog):\n loadVideos(catalog)\n loadCategories(catalog)", "def __parse(self):\n # raw/objects: detect name, type, use major tag for type as parent node\n # raw/graphics: as object raw, but add TILE_PAGE\n # init: usually flat file, except\n # embark_profiles.txt: [PROFILE] is parent\n # interface.txt: [BIND] is parent (legacy will be flat)\n # world_gen.txt: [WORLD_GEN] is parent\n # Non-raw files (unsupported): init/arena.txt, subdirs of raw/objects\n parse_raw(self, self.read(self.filename))", "def parse_xml_file(self, filename):\n try:\n dom = parse(filename)\n process_includes(dom)\n except ExpatError, x:\n raise EzXMLError(\"Failed to parse: %s\" % x)\n return self.walkdom(dom.documentElement)", "def __init__(self, infile, runparser=True):\n super(FluoViewMosaic, self).__init__(infile)\n self.tree = self.validate_xml()\n self.mosaictrees = self.find_mosaictrees()\n if runparser:\n self.add_mosaics()", "def parse_file(self, file_path):\n\t\troot = ET.parse(file_path).getroot()\n\t\t## parse predictors\n\t\tpredictors = root.find('DataSpecification').find('Predictors')\n\t\tfor predictor in predictors.findall('predictor'):\n\t\t\tself.predictors.append(predictor.find('name').text)\n\t\t\tself.predictors_types.append(predictor.find('VariableType').text)\n\t\t## parse method\n\t\tmethod = root.find('Method')\n\t\tvariables = {}\n\t\tfor variable in method[0]:\n\t\t\ttag = variable.tag\n\t\t\tvalues = []\n\t\t\tfor v in variable:\n\t\t\t\tvalues.append(float(v.text))\n\t\t\tvariables[tag] = values\n\t\tself.variables = variables\n\t\tself.method_name = method[0].tag\n\t\t## parse pre-processing\n\t\tpreprocessing = root.find('Preprocessing')\n\t\tpreprocessing_methods = preprocessing.findall('PreprocessMethod')\n\t\tfor preprocessing_method in preprocessing_methods:\n\t\t\tself.preprocessing_methods.append(preprocessing_method.text)\n\t\tevaluation = root.find('Evaluation')\n\t\tresampling = evaluation.find('Resampling')\n\t\tcv = resampling.find('CrossValidation')\n\t\tself.k = cv[0].text\n\t\tself.metric = evaluation.find('Metric').text\n\t\tself.data_split = evaluation.find(\"DataSplit\").find('partitionRate').text\n\t\t## parse resampling\n\t\tself.plotting_file_name = root.find('Plotting').find('Plot').find('filename').text", "def parsexml(self):\n raise NotImplementedError", "def load_xml(self,filename):\n self.initvars()\n source = iter(ET.iterparse(filename, events = ('start','end')))\n self.name = source.next()[1].tag\n for event,elem in source:\n if event == 'end' and elem.tag == 'row':\n row = [None]*self.numcols()\n for name,val in elem.attrib.items():\n try:\n idx = self.getColIndex(name)\n except ColumnNotFoundError:\n idx = len(self.cols)\n row.append(None)\n # Add new column to the table\n self.cols.append(set([name]))\n for oldrow in self.data:\n oldrow.append(None)\n row[idx] = val\n self.data.append(row)\n self.initTypes()", "def parse(self, xml):\r\n try:\r\n xmldoc = minidom.parseString(xml)\r\n except:\r\n self.parent.log.error('ISY Could not parse programs, '\r\n + 'poorly formatted XML.')\r\n else:\r\n plastup = datetime.now()\r\n\r\n # get nodes\r\n features = xmldoc.getElementsByTagName('program')\r\n for feature in features:\r\n # id, name, and status\r\n pid = feature.attributes['id'].value\r\n pname = feature.getElementsByTagName('name')[0] \\\r\n .firstChild.toxml()\r\n try:\r\n pparent = feature.attributes['parentId'].value\r\n except:\r\n pparent = None\r\n pstatus = feature.attributes['status'].value == 'true'\r\n\r\n if feature.attributes['folder'].value == 'true':\r\n # folder specific parsing\r\n ptype = 'folder'\r\n data = {'pstatus': pstatus}\r\n\r\n else:\r\n # program specific parsing\r\n ptype = 'program'\r\n\r\n # last run time\r\n try:\r\n tag = 'lastRunTime'\r\n plastrun = feature.getElementsByTagName(tag)\r\n plastrun = plastrun[0].firstChild\r\n if plastrun is None:\r\n plastrun = _empty_time\r\n else:\r\n plastrun = datetime.strptime(\r\n plastrun.toxml(), '%Y/%m/%d %I:%M:%S %p')\r\n except:\r\n plastrun = _empty_time\r\n\r\n # last finish time\r\n try:\r\n tag = 'lastFinishTime'\r\n plastfin = feature.getElementsByTagName(tag)\r\n plastfin = plastfin[0].firstChild\r\n if plastfin is None:\r\n plastfin = _empty_time\r\n else:\r\n plastfin = datetime.strptime(\r\n plastfin.toxml(), '%Y/%m/%d %I:%M:%S %p')\r\n except:\r\n plastfin = _empty_time\r\n\r\n # enabled, run at startup, running\r\n if feature.attributes['enabled'].value == 'true':\r\n penabled = True\r\n else:\r\n penabled = False\r\n if feature.attributes['runAtStartup'].value == 'true':\r\n pstartrun = True\r\n else:\r\n pstartrun = False\r\n if feature.attributes['running'].value == 'idle':\r\n prunning = False\r\n else:\r\n prunning = True\r\n\r\n # create data dictionary\r\n data = {'pstatus': pstatus, 'plastrun': plastrun,\r\n 'plastfin': plastfin, 'penabled': penabled,\r\n 'pstartrun': pstartrun, 'prunning': prunning,\r\n 'plastup': plastup}\r\n\r\n # add or update object if it already exists\r\n if pid not in self.pids:\r\n if ptype == 'folder':\r\n pobj = Folder(self, pid, pname, **data)\r\n else:\r\n pobj = Program(self, pid, pname, **data)\r\n self.insert(pid, pname, pparent, pobj, ptype)\r\n else:\r\n pobj = self.getByID(pid).leaf\r\n pobj.update(data=data)\r\n\r\n self.parent.log.info('ISY Loaded/Updated Programs')", "def load_all_files(self):\n\t\tself.get_rankings()\n\t\tself.get_partition()\n\t\tself.__load_factors()\n\t\tself.get_document_associations()\n\t\tself.get_term_associations()", "def importDB ( c, xml ) :\n assert str(type(c)) == \"<type '_mysql.connection'>\"\n assert str ( type ( xml ) ) == \"<type 'instance'>\"\n for e in xml :\n if e.tag == \"Crisis\" :\n importCrisis ( c, e )\n elif e.tag == \"Organization\" :\n importOrg ( c, e )\n elif e.tag == \"Person\" :\n importPerson ( c, e )\n elif e.tag == \"CrisisKind\" :\n importCrisisKind ( c, e )\n elif e.tag == \"OrganizationKind\" :\n importOrgKind ( c, e )\n elif e.tag == \"PersonKind\" :\n importPersonKind ( c, e )", "def __loadPreProcessedData(self):\n le = joblib.load(self.le_filename)\n X = np.loadtxt(self.X_filename, delimiter=',').astype(int)\n raw_y = np.loadtxt(self.y_filename, delimiter=',').astype(int)\n y = le.inverse_transform(raw_y)\n ##Initialize atrtribute for this class\n self.le, self.X, self.y = le, X, y", "def start_requests(self):\n\n with open(os.path.join(os.path.dirname(__file__), \"../resources/mapemall_categories.csv\")) as categories:\n for category in csv.DictReader(categories):\n category_text=category[\"category\"]\n url=str(MapemallCrawlerSpider.start_urls[0])+category_text\n # The meta is used to send our search text into the parser as metadata\n yield scrapy.Request(url, callback = self.parse, meta = {\"category_text\": category_text})", "def import_xml(self, rootname, dirname=''):\n\n filepath = os.path.join(dirname, rootname+'.trip.xml')\n if os.path.isfile(filepath):\n # import trips\n self.trips.import_trips_xml(filepath, is_generate_ids=False)\n\n # now try to add routes to existing trips\n filepath = os.path.join(dirname, rootname+'.rou.xml')\n if os.path.isfile(filepath):\n self.trips.import_routes_xml(filepath, is_generate_ids=False, is_add=True)\n\n else:\n self.get_logger().w('import_xml: files not found:'+filepath, key='message')\n\n else:\n self.get_logger().w('import_xml: files not found:'+filepath, key='message')\n\n # no trip file exists, but maybe just a route file with trips\n filepath = os.path.join(dirname, rootname+'.rou.xml')\n if os.path.isfile(filepath):\n self.trips.import_routes_xml(filepath, is_generate_ids=False, is_add=False)\n\n else:\n self.get_logger().w('import_xml: files not found:'+filepath, key='message')", "def buildFromXml(self, inputFile, concs):\n\n reactionList, species = self.parse(inputFile, self.T, self.R)\n if len(species) != len(concs):\n raise ValueError(\"Size of concentration does not match to number of species!\")\n\n self.inputFile = inputFile\n self.reactionList = reactionList\n self.species = species\n self.concs = concs\n self.buildFromList(self.reactionList, self.species, self.concs)", "def process_raw_data(data_dir='/home/data/nbc/athena/athena-data/'):\n\n # Calls the process_corpus function, defined below\n # process_corpus reads in the text, performs abbreviation, spelling,\n # translation, and overall text Processing\n # process_corpus outputs the processed text for each file and the stemmed file\n for feature_source in ['abstract', 'full']:\n process_corpus(data_dir, feature_source)\n\n # Calls the label_data function, defined below\n # label_data reads in the metadata csv files, concatenates them, then\n # reads in the processed text files\n # label_data outputs a binary pmid by label metadata matrix\n label_data(data_dir)\n generate_gazetteer(data_dir)", "def parse(self, calibration_px=1.0):\n self.isParsingNeeded = False\n self.meta_data = {}\n self.data = []\n #CZI files\n if self.extend == '.czi':\n with czifile.CziFile(self.file_path) as czi:\n data = czi.asarray()\n Header_Metadata = str(czi).split('<ImageDocument>')\n string = '<ImageDocument>'+Header_Metadata[1]\n #print(string.strip(\"'\"))\n metadata = XMLET.fromstring(string.strip(\"'\"))\n try:\n #Query XML fore the metadata for picture shape(X;Y;Z-stacks).\n #Picture Shape.\n shapes = metadata.findall('./Metadata/Information/Image')[0]\n self.meta_data[\"ShapeSizeX\"] = int(shapes.findall('SizeX')[0].text)\n self.meta_data[\"ShapeSizeY\"] = int(shapes.findall('SizeY')[0].text)\n try:\n self.meta_data[\"ShapeSizeZ\"] = int(shapes.findall('SizeZ')[0].text)\n except:\n self.meta_data[\"ShapeSizeZ\"] = 1\n #Get the hyperstack dimension if the image is a hyperstack.\n try:\n self.meta_data[\"ShapeSizeC\"] = int(shapes.findall('SizeC')[0].text)\n except:\n self.meta_data[\"ShapeSizeC\"] = 1\n print(\"No info of color channels 1 assumed\")\n #Get physical pixel size of image(nm/px) convert to(µm/px).\n PixelSizes = metadata.findall('./Metadata/Scaling/Items/Distance')\n self.meta_data['SizeX'] = float(PixelSizes[0].findall('Value')[0].text)*10**6\n self.meta_data['SizeY'] = float(PixelSizes[1].findall('Value')[0].text)*10**6\n self.meta_data['SizeZ'] = float(PixelSizes[2].findall('Value')[0].text)*10**6\n except(ValueError):\n print (\"Metadata fail\")\n\n #Tiff files.\n #Tiff files are problematic because they most likely wont contain the necessary metadata.\n #Try to get the shape info over common dimensions.\n elif self.extend == '.tif':\n with tifffile.TiffFile(self.file_path) as tif:\n data = tif.asarray()\n for shape in data.shape:\n if shape <5:\n self.meta_data[\"ShapeSizeC\"] = shape\n elif shape <40:\n self.meta_data[\"ShapeSizeZ\"] = shape\n else:\n self.meta_data[\"ShapeSizeY\"] = shape\n self.meta_data[\"ShapeSizeX\"] = shape\n\n #Read Lsm Files.\n elif self.extend == '.lsm':\n with tifffile.TiffFile(self.file_path) as tif:\n data = tif.asarray(memmap=True)\n headerMetadata = str(tif.pages[0].cz_lsm_scan_info)\n metadataList = headerMetadata.split(\"\\n*\")\n #Get image shape from lsm header SizeC=0 if not given.\n for shapes in metadataList:\n if \"images_height\" in shapes:\n self.meta_data[\"ShapeSizeX\"]= int(shapes.split()[-1])\n if \"images_width\" in shapes:\n self.meta_data[\"ShapeSizeY\"]= int(shapes.split()[-1])\n if \"images_number_planes\" in shapes:\n self.meta_data[\"ShapeSizeZ\"]= int(shapes.split()[-1])\n if \"images_number_channels\" in shapes:\n self.meta_data[\"ShapeSizeC\"]= int(shapes.split()[-1])\n #Get physical pixel size of image(nm/px) convert to(µm/px).\n data = np.swapaxes(data,1,2)\n lsm_header = str(tif.pages[0].tags.cz_lsm_info)\n LsmInfo = lsm_header.split(\", \")\n i = 0\n #Query for pixel size.\n for element in LsmInfo:\n if \"e-0\" in element:\n i += 1\n if i == 1:\n self.meta_data['SizeX'] = (float(element)*10**6)\n if i == 2:\n self.meta_data['SizeY'] = (float(element)*10**6)\n if i == 3:\n self.meta_data['SizeZ'] = (float(element)*10**6)\n\n elif self.extend == \".png\":\n data = misc.imread(self.file_path)\n data = np.expand_dims(np.expand_dims(data[...,0],0),0)\n self.meta_data[\"ShapeSizeC\"] = 1\n self.meta_data[\"ShapeSizeZ\"] = 1\n self.meta_data[\"ShapeSizeX\"] = data.shape[2]\n self.meta_data[\"ShapeSizeY\"] = data.shape[3]\n self.meta_data[\"SizeZ\"] = 1\n self.meta_data[\"SizeX\"] = 0.01\n self.meta_data[\"SizeY\"] = 0.01\n #Bring all formats in the same shape.\n self.data = np.reshape(data,(self.meta_data[\"ShapeSizeC\"],self.meta_data[\"ShapeSizeZ\"],self.meta_data[\"ShapeSizeX\"],self.meta_data[\"ShapeSizeY\"]))\n self.meta_data['ChannelNum'] = self.meta_data[\"ShapeSizeC\"]\n #Set pixel size to manuell value if there are no metadata.\n if self.meta_data == {}:\n self.set_calibration(calibration_px)\n #Set the box for manuel calibration to the actuell pixel size.", "def from_file(self, xml_filepath: str) -> None:\n\t\t# Set filename and get root element of the tree\n\t\txml_filelist = xml_filepath.split(\"/\")\n\t\tself.xml_dir = xml_filelist[0]\n\t\tself.xml_name = xml_filelist[1]\n\t\troot: ET.Element = get_xml_file(xml_filepath)\n\t\t# Set name\n\t\tself.name = root.tag\n\n\t\t# Iterate over and add child nodes\n\t\tchild: ET.Element\n\t\tfor child in root:\n\t\t\t# Determine if child is a SubNode or a Node\n\t\t\t# If child has children or attributes it is a Node\n\t\t\tif len(child) or len(child.attrib):\n\t\t\t\t# Add Node\n\t\t\t\tself.add_node(Node(child))\n\t\t\telse:\n\t\t\t\tself.add_subnode(SubNode(child))", "def _load_celex(self, celex_root):\n # Form the filenames from the language\n pre = self.lang[0]\n mw_filename = os.path.join(celex_root, self.lang, pre + 'mw', pre + 'mw.cd')\n ml_filename = os.path.join(celex_root, self.lang, pre + 'ml', pre + 'ml.cd')\n\n # First pass for inflectional morphology from morphological words\n for line in open(mw_filename, 'rU'):\n # pylint: disable=W0612\n word, frequency, lemma, features, analysis = self._parse_mw(line.strip())\n self.word_lemmas[word].add(lemma)\n self.lemma_words[lemma].add(word)\n self.word_freqs[word] += frequency\n\n # Now get lemma information and derivational info\n for line in open(ml_filename, 'rU'):\n lemma, word, roots = self._parse_ml(line.strip())\n\n # TODO: Decide what to do about compounds. For now they are skipped by this.\n # Skip if there are multiple roots\n if roots and len(roots) > 1:\n continue\n\n # TODO: Decide what to do about multi-word entries. For now they are skipped by this.\n # Skip if there is a space in the word itself\n if ' ' in word:\n continue\n\n # Otherwise, there's just one root or none. Extract it if there is one, otherwise\n # call it its own root\n root = roots[0] if roots else word\n\n # Store this lemma\n self.lemma_heads[lemma] = word\n self.root_lemmas[root].add(lemma)", "def predict_all():\n\n # need train dir to list category names\n cfg = configparser.ConfigParser()\n cfg.read(sys.argv[1])\n base = os.environ['DATA_ROOT']\n eval_type = cfg.get('args', 'eval_type')\n train_xml_dir = os.path.join(base, cfg.get('data', 'train_xml_dir'))\n\n if eval_type == 'sparse':\n predict_sparse(train_xml_dir)\n else:\n predict_dense(train_xml_dir)", "def load(self,filename):\n basename = os.path.basename(filename)\n self.name, ext = os.path.splitext(basename)\n if ext == '.xml':\n self.load_xml(filename)\n elif ext == '.tsv':\n self.load_tsv_fast(filename)\n elif ext == '.tsvs':\n self.load_tsv(filename)\n else:\n print 'Error: only .xml and .tsv files are supported'", "def loadData(self):\r\n filePath, _ = QFileDialog.getOpenFileName(self, caption='Open XML file ...', filter=\"XML files (*.xml)\")\r\n\r\n if not filePath:\r\n return\r\n\r\n self.loadDataFromFile(filePath)", "def loadData(self):\r\n filePath, _ = QFileDialog.getOpenFileName(self, caption='Open XML file ...', filter=\"XML files (*.xml)\")\r\n\r\n if not filePath:\r\n return\r\n\r\n self.loadDataFromFile(filePath)", "def parse(self, filename):\n infile = file(filename)\n for line in infile:\n self.parseLine(line)", "def process(self, path):\n\n # Extract filtered content and build source databases to process\n for source in Execute.SOURCES:\n spath = os.path.join(path, source)\n\n # Extract Posts.xml from 7za file\n decompress = Decompress()\n decompress(spath)\n\n posts = os.path.join(spath, \"Posts.xml\")\n filtered = os.path.join(spath, \"Filtered.xml\")\n\n # Filter Posts.xml file for matching questions\n sift = Sift()\n sift(posts, filtered)\n\n dbfile = os.path.join(spath, f\"{source}.db\")\n\n # Convert filtered Posts.xml file to SQLite db file\n xml2db = XML2DB()\n xml2db(filtered, dbfile)\n\n # Get list of all databases to consolidate\n return [\n os.path.join(path, source, f\"{source}.db\") for source in Execute.SOURCES\n ]", "def __init__(self, xmlstore, course_id, course_dir,\r\n error_tracker, parent_tracker,\r\n load_error_modules=True, **kwargs):\r\n self.unnamed = defaultdict(int) # category -> num of new url_names for that category\r\n self.used_names = defaultdict(set) # category -> set of used url_names\r\n id_generator = CourseLocationGenerator(course_id)\r\n\r\n # cdodge: adding the course_id as passed in for later reference rather than having to recomine the org/course/url_name\r\n self.course_id = course_id\r\n self.load_error_modules = load_error_modules\r\n\r\n def process_xml(xml):\r\n \"\"\"Takes an xml string, and returns a XBlock created from\r\n that xml.\r\n \"\"\"\r\n\r\n def make_name_unique(xml_data):\r\n \"\"\"\r\n Make sure that the url_name of xml_data is unique. If a previously loaded\r\n unnamed descriptor stole this element's url_name, create a new one.\r\n\r\n Removes 'slug' attribute if present, and adds or overwrites the 'url_name' attribute.\r\n \"\"\"\r\n # VS[compat]. Take this out once course conversion is done (perhaps leave the uniqueness check)\r\n\r\n # tags that really need unique names--they store (or should store) state.\r\n need_uniq_names = ('problem', 'sequential', 'video', 'course', 'chapter',\r\n 'videosequence', 'poll_question', 'vertical')\r\n\r\n attr = xml_data.attrib\r\n tag = xml_data.tag\r\n id = lambda x: x\r\n # Things to try to get a name, in order (key, cleaning function, remove key after reading?)\r\n lookups = [('url_name', id, False),\r\n ('slug', id, True),\r\n ('name', Location.clean, False),\r\n ('display_name', Location.clean, False)]\r\n\r\n url_name = None\r\n for key, clean, remove in lookups:\r\n if key in attr:\r\n url_name = clean(attr[key])\r\n if remove:\r\n del attr[key]\r\n break\r\n\r\n def looks_like_fallback(url_name):\r\n \"\"\"Does this look like something that came from fallback_name()?\"\"\"\r\n return (url_name is not None\r\n and url_name.startswith(tag)\r\n and re.search('[0-9a-fA-F]{12}$', url_name))\r\n\r\n def fallback_name(orig_name=None):\r\n \"\"\"Return the fallback name for this module. This is a function instead of a variable\r\n because we want it to be lazy.\"\"\"\r\n if looks_like_fallback(orig_name):\r\n # We're about to re-hash, in case something changed, so get rid of the tag_ and hash\r\n orig_name = orig_name[len(tag) + 1:-12]\r\n # append the hash of the content--the first 12 bytes should be plenty.\r\n orig_name = \"_\" + orig_name if orig_name not in (None, \"\") else \"\"\r\n xml_bytes = xml.encode('utf8')\r\n return tag + orig_name + \"_\" + hashlib.sha1(xml_bytes).hexdigest()[:12]\r\n\r\n # Fallback if there was nothing we could use:\r\n if url_name is None or url_name == \"\":\r\n url_name = fallback_name()\r\n # Don't log a warning--we don't need this in the log. Do\r\n # put it in the error tracker--content folks need to see it.\r\n\r\n if tag in need_uniq_names:\r\n error_tracker(\"PROBLEM: no name of any kind specified for {tag}. Student \"\r\n \"state will not be properly tracked for this module. Problem xml:\"\r\n \" '{xml}...'\".format(tag=tag, xml=xml[:100]))\r\n else:\r\n # TODO (vshnayder): We may want to enable this once course repos are cleaned up.\r\n # (or we may want to give up on the requirement for non-state-relevant issues...)\r\n # error_tracker(\"WARNING: no name specified for module. xml='{0}...'\".format(xml[:100]))\r\n pass\r\n\r\n # Make sure everything is unique\r\n if url_name in self.used_names[tag]:\r\n # Always complain about modules that store state. If it\r\n # doesn't store state, don't complain about things that are\r\n # hashed.\r\n if tag in need_uniq_names:\r\n msg = (\"Non-unique url_name in xml. This may break state tracking for content.\"\r\n \" url_name={0}. Content={1}\".format(url_name, xml[:100]))\r\n error_tracker(\"PROBLEM: \" + msg)\r\n log.warning(msg)\r\n # Just set name to fallback_name--if there are multiple things with the same fallback name,\r\n # they are actually identical, so it's fragile, but not immediately broken.\r\n\r\n # TODO (vshnayder): if the tag is a pointer tag, this will\r\n # break the content because we won't have the right link.\r\n # That's also a legitimate attempt to reuse the same content\r\n # from multiple places. Once we actually allow that, we'll\r\n # need to update this to complain about non-unique names for\r\n # definitions, but allow multiple uses.\r\n url_name = fallback_name(url_name)\r\n\r\n self.used_names[tag].add(url_name)\r\n xml_data.set('url_name', url_name)\r\n\r\n try:\r\n # VS[compat]\r\n # TODO (cpennington): Remove this once all fall 2012 courses\r\n # have been imported into the cms from xml\r\n xml = clean_out_mako_templating(xml)\r\n xml_data = etree.fromstring(xml)\r\n\r\n make_name_unique(xml_data)\r\n\r\n descriptor = create_block_from_xml(\r\n etree.tostring(xml_data, encoding='unicode'),\r\n self,\r\n id_generator,\r\n )\r\n except Exception as err: # pylint: disable=broad-except\r\n if not self.load_error_modules:\r\n raise\r\n\r\n # Didn't load properly. Fall back on loading as an error\r\n # descriptor. This should never error due to formatting.\r\n\r\n msg = \"Error loading from xml. %s\"\r\n log.warning(\r\n msg,\r\n unicode(err)[:200],\r\n # Normally, we don't want lots of exception traces in our logs from common\r\n # content problems. But if you're debugging the xml loading code itself,\r\n # uncomment the next line.\r\n # exc_info=True\r\n )\r\n\r\n msg = msg % (unicode(err)[:200])\r\n\r\n self.error_tracker(msg)\r\n err_msg = msg + \"\\n\" + exc_info_to_str(sys.exc_info())\r\n descriptor = ErrorDescriptor.from_xml(\r\n xml,\r\n self,\r\n id_generator,\r\n err_msg\r\n )\r\n\r\n descriptor.data_dir = course_dir\r\n\r\n xmlstore.modules[course_id][descriptor.scope_ids.usage_id] = descriptor\r\n\r\n if descriptor.has_children:\r\n for child in descriptor.get_children():\r\n parent_tracker.add_parent(child.scope_ids.usage_id, descriptor.scope_ids.usage_id)\r\n\r\n # After setting up the descriptor, save any changes that we have\r\n # made to attributes on the descriptor to the underlying KeyValueStore.\r\n descriptor.save()\r\n return descriptor\r\n\r\n render_template = lambda template, context: u''\r\n\r\n # TODO (vshnayder): we are somewhat architecturally confused in the loading code:\r\n # load_item should actually be get_instance, because it expects the course-specific\r\n # policy to be loaded. For now, just add the course_id here...\r\n def load_item(usage_key):\r\n \"\"\"Return the XBlock for the specified location\"\"\"\r\n return xmlstore.get_item(usage_key)\r\n\r\n resources_fs = OSFS(xmlstore.data_dir / course_dir)\r\n\r\n super(ImportSystem, self).__init__(\r\n load_item=load_item,\r\n resources_fs=resources_fs,\r\n render_template=render_template,\r\n error_tracker=error_tracker,\r\n process_xml=process_xml,\r\n **kwargs\r\n )", "def setUp(self):\n input_files = glob.glob(PATH_TO_INPUT_FILES) # Read input (csv) files from current (sw/test) directory.\n if not self.sessionizing:\n self.sessionizing = Sessionizing()\n self.sessionizing.initialize(*input_files)\n if not self.sites_session_counter:\n self.merge_and_sort_input_files(*input_files)\n self.process_input_files()", "def __init__(self, article_xml):\n self.article_xml = article_xml\n self.links = self.grab_links()\n self.first_link = self.parse_first_link()", "def recipe12_8():\n from xml.parsers.xmlproc import utils, xmlval, xmldtd\n def validate_xml_file(xml_filename, app=None, dtd_filename=None):\n # build validating parser object with appropriate error handler\n parser=xmlval.Validator()\n parser.set_error_handler(utils.ErrorPrinter(parser))\n if dtd_filename is None:\n # DTD fiel specified, laod and set it as the DTD to use\n dtd=xmldtd.load_dtd(dtd_filename)\n parser.val.dtd = parser.dtd = parser.ent = dtd\n if app is not None:\n # Application processing requested, set application object\n parser.set_application(app)\n # everything being set correctly, finally perform the parsing\n parser.parse_resource(xml_filename) \n # if XML data is in a string s, use instead\n # parser.feed(s)\n # parser.close(s)", "def parseXML(xmlFile):\n\n tree = etree.parse(xmlFile)\n root = tree.getroot() \n transitionTable = dict()\n transitionTable = getTransitions(tree, root, transitionTable)\n return tree, root, transitionTable", "def load_xml(self, filepath=None, escapeNewline=True, maxSize=0, createMap=True):\n\n if filepath != None:\n self.mFilePath = filepath\n self.mReplaceNewline = escapeNewline\n\n if not os.path.exists(str(self.mFilePath)):\n print \"Warning: The filepath '%s' does not exist. Please make sure to pass the right path as load_xml('foo/bar')\" %filepath\n return False\n\n if not escapeNewline:\n try:\n input = StringIO(gzip.open(self.mFilePath, \"r\").read())\n except IOError:\n input = StringIO(open(self.mFilePath, \"r\").read())\n\n\n else:\n # replace Live's newline string with a dummy ###newline_escape###\n # we will revert this back on writing.\n # using the escapeNewline is slow on large documents\n\n try:\n file = gzip.open(self.mFilePath, \"r\").read()\n except IOError:\n file = open(self.mFilePath, \"r\").read()\n\n input = StringIO(re.sub(r\"&#x0[DA];\", \"###newline_escape###\", file))\n\n del(file) # save memory\n\n if maxSize:\n maxSize = maxSize*1048576 # in MB\n if len(input.getvalue()) > maxSize:\n print \"Warning: Large Document - skipping %s\" %filepath\n return False\n\n self.tree = ET.ElementTree(file=input)\n\n input.close()\n\n if createMap:\n self.child_to_parent_dict()\n\n return True", "def run_load(rootpath):\n global CSV_PATH\n CSV_PATH = rootpath+'/csv_files/'\n load_movies_details()\n load_movies_cast()\n load_movies_reviews()", "def loadData(self,filepath):\r\n self.removeCheckbuttons()\r\n self.tree = ET.parse(filepath)# Parse xml Tree\r\n self.data = self.tree.getroot().find(\"data\")# Find Data\r\n self.sensors = [i.text for i in self.tree.getroot().find('columns')]# Get Sensor Names\r\n for s in self.sensors:# Add Each Sensor as Option\r\n self.addOption(s)", "def parse_xml(file_name):\n events = (\"start\", \"end\")\n has_start = False\n json_dict = dict()\n # Traverse the XML\n for event, element in ET.iterparse(file_name, events=events, encoding=\"utf-8\", load_dtd=True, recover=True):\n print(event, element.tag, element.text)\n # Article node: initialize variables\n if event == 'start' and element.tag in INCLUDE_ARTICLES:\n has_start = True\n # Each article node has an unique attribute key\n publication_key = element.attrib['key']\n authors = list()\n publication_year = ''\n publication_type = str(element.tag)\n publication_title = ''\n # Author node\n elif event == 'start' and element.tag == 'author' and has_start:\n no_accent = lambda x: unidecode.unidecode(x) if x is not None else x\n authors.append(no_accent(element.text))\n # Title node\n elif event == 'start' and element.tag == 'title' and has_start:\n publication_title = element.text\n # Year node\n elif event == 'start' and element.tag == 'year' and has_start:\n publication_year = element.text\n # End article node: save information. This will never execute before initializing all of the variables\n elif has_start and event == 'end' and element.tag in INCLUDE_ARTICLES:\n json_dict[publication_key] = {\n '_id': publication_key,\n 'authors': authors,\n 'title': publication_title,\n 'year': publication_year,\n 'type': publication_type}\n has_start = False\n element.clear()\n else:\n # Remove element (otherwise there will be memory issues due to file size)\n element.clear()\n continue\n\n return json_dict", "def run(self, xml, **kwargs):\n kwargs['output'] = self.__graph__()\n if isinstance(xml, str):\n try:\n self.source = etree.XML(xml)\n except ValueError:\n try:\n self.source = etree.XML(xml.encode())\n except:\n raise ValueError(\"Cannot run error {}\".format(sys.exc_info()[0]))\n else:\n self.source = xml\n super(XMLProcessor, self).run(**kwargs)\n self.output = kwargs['output']\n return kwargs['output']", "def process_cvat_xml(xml_file, image_dir, output_dir):\n KNOWN_TAGS = {'box', 'image', 'attribute'}\n #output_dir = os.path.join(output_dir, \"Annotations\")\n os.makedirs(output_dir, exist_ok=True)\n cvat_xml = etree.parse(xml_file)\n\n basename = os.path.splitext( os.path.basename( xml_file ) )[0]\n\n tracks= cvat_xml.findall( './/track' )\n\n if (tracks is not None) and (len(tracks) > 0):\n frames = {}\n\n for track in tracks:\n trackid = int(track.get(\"id\"))\n label = track.get(\"label\")\n boxes = track.findall( './box' )\n for box in boxes:\n frameid = int(box.get('frame'))\n outside = int(box.get('outside'))\n ## occluded and pose are not tested within tracks\n occluded = 0 ## Default if not found\n if 'occluded' in box.attrib: ## this is an attribute of 'box' element\n occluded = int(box.get('occluded'))\n pose = 'Unspecified'\n for attr in box.findall('attribute'):\n if (attr.get('name') == 'type'): ## Used for view type\n pose = attr.text\n #keyframe = int(box.get('keyframe')) #currently unused\n xtl = float(box.get('xtl'))\n ytl = float(box.get('ytl'))\n xbr = float(box.get('xbr'))\n ybr = float(box.get('ybr'))\n \n frame = frames.get( frameid, {} )\n \n if outside == 0:\n frame[ trackid ] = { 'xtl': xtl, 'ytl': ytl, 'xbr': xbr, 'ybr': ybr, 'label': label,\n 'pose': pose, 'truncated': occluded }\n\n frames[ frameid ] = frame\n\n width = int(cvat_xml.find('.//original_size/width').text)\n height = int(cvat_xml.find('.//original_size/height').text)\n\n # Spit out a list of each object for each frame\n for frameid in sorted(frames.keys()):\n print( frameid )\n\n image_name = \"%s_%08d.jpg\" % (basename, frameid) ## KM: Revisit this for tracks. Hardcoded?\n image_path = os.path.join(image_dir, image_name)\n if not os.path.exists(image_path):\n log.warn('{} image cannot be found. Is `{}` image directory correct?'.\n format(image_path, image_dir))\n writer = Writer(image_path, width, height)\n\n frame = frames[frameid]\n\n objids = sorted(frame.keys())\n\n for objid in objids:\n\n box = frame[objid]\n\n label = box.get('label')\n occluded = box.get('occluded')\n pose = box.get('pose')\n xmin = float(box.get('xtl'))\n ymin = float(box.get('ytl'))\n xmax = float(box.get('xbr'))\n ymax = float(box.get('ybr'))\n\n writer.addObject(label, xmin, ymin, xmax, ymax, pose, occluded)\n\n anno_name = os.path.basename(os.path.splitext(image_name)[0] + '.xml')\n anno_dir = os.path.dirname(os.path.join(output_dir, image_name))\n os.makedirs(anno_dir, exist_ok=True)\n writer.save(os.path.join(anno_dir, anno_name))\n\n else:\n for img_tag in cvat_xml.findall('image'):\n ## Discard path component; we expect user to provide path to images directory.\n ## It is probably easier for users to provide full path to images directory\n ## rather than having to figure out how much of the path is embedded in the XML\n ## as a relative or absolute path by CVAT.\n image_name = os.path.basename(img_tag.get('name'))\n width = img_tag.get('width')\n height = img_tag.get('height')\n image_path = os.path.join(image_dir, image_name)\n if not os.path.exists(image_path):\n log.warn('{} image cannot be found. Is `{}` image directory correct?'.\n format(image_path, image_dir))\n writer = Writer(image_path, width, height)\n\n unknown_tags = {x.tag for x in img_tag.iter()}.difference(KNOWN_TAGS)\n if unknown_tags:\n log.warn('Ignoring tags for image {}: {}'.format(image_path, unknown_tags))\n\n for box in img_tag.findall('box'):\n label = box.get('label')\n occluded = 0 ## Default if not found\n if 'occluded' in box.attrib: ## this is an attribute of 'box' element\n occluded = int(box.get('occluded'))\n pose = 'Unspecified' ## Default if not found\n for attr in box.findall('attribute'):\n if (attr.get('name') == 'type'): ## Used for view type\n pose = attr.text\n\n xmin = float(box.get('xtl'))\n ymin = float(box.get('ytl'))\n xmax = float(box.get('xbr'))\n ymax = float(box.get('ybr'))\n\n writer.addObject(label, xmin, ymin, xmax, ymax, pose, occluded)\n\n anno_name = os.path.basename(os.path.splitext(image_name)[0] + '.xml')\n anno_dir = output_dir #os.path.dirname(os.path.join(output_dir, image_name))\n os.makedirs(anno_dir, exist_ok=True)\n #print(\"Writing {} (image: {})\".format(anno_name, image_name))\n writer.save(os.path.join(anno_dir, anno_name))", "def parse(self) -> None:\n self._parse_zone_files()\n self._process_rules()\n self._process_zones()\n self._process_links()", "def prepare(self):\n self.parse_template()\n self.build_argparser()\n self.parse_arguments()\n self.render_template()\n self.update_relation()", "def preprocess_data(self):\n\n self._preprocess_train_data()\n self._preprocess_test_data()", "def _process(self):\n f = osp.join(self.processed_dir, 'pre_filter.pt')\n if osp.exists(f) and torch.load(f) != _repr(self.pre_filter):\n warnings.warn(\n \"The `pre_filter` argument differs from the one used in \"\n \"the pre-processed version of this dataset. If you want to \"\n \"make use of another pre-fitering technique, make sure to \"\n \"delete '{self.processed_dir}' first\")\n\n if files_exist(self.processed_paths): # pragma: no cover\n return\n\n if self.log and 'pytest' not in sys.modules:\n print('Processing...', file=sys.stderr)\n\n makedirs(self.processed_dir)\n self.process()\n\n path = osp.join(self.processed_dir, 'pre_filter.pt')\n torch.save(_repr(self.pre_filter), path)\n\n if self.log and 'pytest' not in sys.modules:\n print('Done!', file=sys.stderr)", "def _process_data(self, wiki_xml):\r\n MEDIAWIKI_NS = 'http://www.mediawiki.org/xml/export-0.3/'\r\n sequences = []\r\n lw_url_re = re.compile(r'\\[(http://lesswrong\\.com/lw/[^ ]+) [^\\]]+\\]')\r\n\r\n for page in wiki_xml.getroot().iterfind('.//{%s}page' % MEDIAWIKI_NS): # TODO: Change to use iterparse\r\n # Get the titles\r\n title = page.findtext('{%s}title' % MEDIAWIKI_NS)\r\n\r\n # See if this page is a sequence page\r\n sequence_elem = page.xpath(\"mw:revision[1]/mw:text[contains(., '[[Category:Sequences]]')]\", namespaces={'mw': MEDIAWIKI_NS})\r\n\r\n if sequence_elem:\r\n sequence_elem = sequence_elem[0]\r\n articles = []\r\n\r\n # Find all the lesswrong urls\r\n for match in lw_url_re.finditer(sequence_elem.text):\r\n article_url = UrlParser(match.group(1))\r\n\r\n # Only store the path to the article\r\n article_path = article_url.path\r\n\r\n # Ensure path ends in slash\r\n if article_path[-1] != '/':\r\n article_path += '/'\r\n\r\n articles.append(article_path)\r\n\r\n sequences.append({\r\n 'title': title,\r\n 'articles': articles\r\n })\r\n return {'sequences': sequences}", "def setUp(self):\r\n super(TestImport, self).setUp()\r\n self.content_dir = path(tempfile.mkdtemp())\r\n self.addCleanup(shutil.rmtree, self.content_dir)\r\n\r\n # Create good course xml\r\n self.good_dir = self.create_course_xml(self.content_dir, self.BASE_COURSE_KEY)\r\n\r\n # Create run changed course xml\r\n self.dupe_dir = self.create_course_xml(self.content_dir, self.DIFF_KEY)\r\n\r\n # Create course XML where TRUNCATED_COURSE.org == BASE_COURSE_ID.org\r\n # and BASE_COURSE_ID.startswith(TRUNCATED_COURSE.course)\r\n self.course_dir = self.create_course_xml(self.content_dir, self.TRUNCATED_KEY)", "def from_xml(cls, xml_data, system, id_generator):\r\n\r\n xml_object = etree.fromstring(xml_data)\r\n # VS[compat] -- just have the url_name lookup, once translation is done\r\n url_name = xml_object.get('url_name', xml_object.get('slug'))\r\n def_id = id_generator.create_definition(xml_object.tag, url_name)\r\n usage_id = id_generator.create_usage(def_id)\r\n\r\n # VS[compat] -- detect new-style each-in-a-file mode\r\n if is_pointer_tag(xml_object):\r\n # new style:\r\n # read the actual definition file--named using url_name.replace(':','/')\r\n filepath = cls._format_filepath(xml_object.tag, name_to_pathname(url_name))\r\n definition_xml = cls.load_file(filepath, system.resources_fs, def_id)\r\n else:\r\n definition_xml = xml_object\r\n filepath = None\r\n\r\n definition, children = cls.load_definition(definition_xml, system, def_id) # note this removes metadata\r\n\r\n # VS[compat] -- make Ike's github preview links work in both old and\r\n # new file layouts\r\n if is_pointer_tag(xml_object):\r\n # new style -- contents actually at filepath\r\n definition['filename'] = [filepath, filepath]\r\n\r\n metadata = cls.load_metadata(definition_xml)\r\n\r\n # move definition metadata into dict\r\n dmdata = definition.get('definition_metadata', '')\r\n if dmdata:\r\n metadata['definition_metadata_raw'] = dmdata\r\n try:\r\n metadata.update(json.loads(dmdata))\r\n except Exception as err:\r\n log.debug('Error in loading metadata %r', dmdata, exc_info=True)\r\n metadata['definition_metadata_err'] = str(err)\r\n\r\n # Set/override any metadata specified by policy\r\n cls.apply_policy(metadata, system.get_policy(usage_id))\r\n\r\n field_data = {}\r\n field_data.update(metadata)\r\n field_data.update(definition)\r\n field_data['children'] = children\r\n\r\n field_data['xml_attributes']['filename'] = definition.get('filename', ['', None]) # for git link\r\n kvs = InheritanceKeyValueStore(initial_values=field_data)\r\n field_data = KvsFieldData(kvs)\r\n\r\n return system.construct_xblock_from_class(\r\n cls,\r\n # We're loading a descriptor, so student_id is meaningless\r\n ScopeIds(None, xml_object.tag, def_id, usage_id),\r\n field_data,\r\n )", "def parse_file_sax(infile):\n \n from xml.sax import make_parser\n\n ## instantiate the XML handler\n handler = ModXMLHandler()\n parser = make_parser()\n ## associate the handler with the parser\n parser.setContentHandler(handler)\n\n #infile = open(file,'r')\n \n ## actually parse the file\n parser.parse(infile)\n infile.close()\n\n local = []\n fetch = []\n \n for data in [handler.getAlignment(), handler.getReference()] + handler.getDerived():\n ## data will be a 2-tuple with containing two strings. The first one is the name of a file\n ## and the second is the URL of that file\n \n ## sometimes, there won't be a URL (and data[1].strip() will be None) if the file can be fetched\n ## from the PDB\n if data[1].strip():\n loc = DBPuppet.getURL(data[1], data[0])\n ## append the name of the file you will write to the 'local' list\n local.append(loc)\n else:\n ## needs to be fetched from the web\n fetch.append(\"%s\" % str(data[0]) )\n \n ## open the files..\n openInChimera(local, fetch)" ]
[ "0.6510495", "0.58586115", "0.5815168", "0.5765223", "0.5643835", "0.5622581", "0.5620029", "0.55302376", "0.5526815", "0.5474749", "0.54636383", "0.54632366", "0.54615885", "0.5434923", "0.53978896", "0.53765374", "0.53668106", "0.53492767", "0.5326623", "0.52779377", "0.5275292", "0.5248684", "0.52458936", "0.5239102", "0.52337885", "0.52121323", "0.5184705", "0.51791686", "0.5173766", "0.51728463", "0.51347536", "0.5131144", "0.5128662", "0.5127663", "0.5121145", "0.50923276", "0.5075032", "0.5074195", "0.50620085", "0.50592846", "0.5052961", "0.5050923", "0.50399214", "0.5015899", "0.500841", "0.5002603", "0.5002583", "0.49922752", "0.49922478", "0.49891338", "0.49773693", "0.49616116", "0.49581474", "0.49432194", "0.4940416", "0.4939871", "0.49330902", "0.49262244", "0.4908682", "0.48964033", "0.48933986", "0.48864052", "0.48855916", "0.48830777", "0.4881975", "0.48743328", "0.4874307", "0.48682538", "0.48669565", "0.4866701", "0.48639232", "0.48602274", "0.48591635", "0.4858959", "0.48582596", "0.4856886", "0.48561847", "0.48533607", "0.48479143", "0.48479143", "0.48456064", "0.48422045", "0.48211116", "0.481997", "0.4815715", "0.4814798", "0.48147228", "0.4809308", "0.48091415", "0.48082888", "0.48036435", "0.48035496", "0.48025307", "0.47923055", "0.4791328", "0.47862273", "0.4785338", "0.47826573", "0.4771689", "0.47665736", "0.4765774" ]
0.0
-1
load training set and testing set from json
def load_data_set_from_json(json_path, ratio=0.7): train_doc_list = [] train_category_list = [] test_doc_list = [] test_category_list = [] if os.path.exists(json_path): with open(json_path, "r") as f: category_map = json.load(f) categories = category_map.keys() for category in categories: all_doc_list = category_map.get(category) length = len(all_doc_list) train_set_length = int(length * ratio) for i in range(length): if i < train_set_length: train_doc_list.append(all_doc_list[i]) train_category_list.append(category) else: test_doc_list.append(all_doc_list[i]) test_category_list.append(category) else: print("File doesn't exist, please run load_file_to_json first") return train_doc_list, train_category_list, test_doc_list, test_category_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_training_data(\n self,\n train_data_file=\"datasets/train_data.json\",\n test_data_file=\"datasets/test_data.json\",\n ):\n train_data = pd.read_json(train_data_file)\n test_data = pd.read_json(test_data_file)\n return train_data, test_data", "def load_training(self):\n path = \"./training/\" + self.training + \".json\"\n\n data = {}\n\n with open(path, \"r\") as infile:\n data = json.load(infile)\n\n self.states = data[\"states\"]\n self.transitions = data[\"transitions\"]\n self.matrix = data[\"matrix\"]", "def load_training_set():\n global training_set\n f = gzip.open('mnist.pkl.gz', 'rb')\n train, valid, test = cPickle.load(f)\n [training_set, training_labels] = train\n [validation_set, validation_labels] = valid\n [testing_set, testing_labels] = test\n training_set = np.concatenate((training_set, validation_set))\n f.close()\n np.random.shuffle(training_set)", "def load_data_from_json(json_path):\r\n print(\"\\nLoading data from json file\")\r\n with open(json_path, \"r\") as fp:\r\n data = json.load(fp)\r\n \r\n X = np.array(data[\"MFCCs\"])\r\n y = np.array(data[\"labels\"])\r\n print(\"Training sets loaded!\")\r\n print(\"data size :\", X.shape, \"labels size: \", y.shape)\r\n print(\"release the 'data' for memories\")\r\n del data\r\n\r\n return X, y", "def _load_training_data(self):\n self._save_training_data()", "def _load_training_and_test_sets(normalize):\n class_labels = []\n test_labels = []\n norm = None\n if normalize == True:\n norm = loading.get_normalize_vector()\n\n for i in range(0, 10):\n [training, test] = loading.load_number_set(i, 0.7, norm_vector=norm)\n labels = [str(i)] * training.shape[0]\n tlabels = [str(i)] * test.shape[0]\n if i == 0:\n train_points = training\n test_points = test\n else:\n train_points = np.concatenate((train_points, training), axis = 0)\n test_points = np.concatenate((test_points, test), axis = 0)\n class_labels.extend(labels)\n test_labels.extend(tlabels)\n\n return train_points, test_points, class_labels, test_labels", "def make_train_validation_test_sets(path_to_json, out_dir, path_to_images,\n train_fraction=0.6,\n validation_fraction=0.2,\n test_fraction=0.2,\n do_print=False):\n assert train_fraction + validation_fraction + test_fraction == 1, 'Sum of subsets fractions must be 1'\n df = pd.read_json(path_to_json)\n # one-hot encode labels\n df['Class'] = df['Class'].replace(to_replace=[3, 4, 5, 7, 8, 10],\n value=['Unilamellar', 'Multilamellar', 'Uncertain', 'Empty', 'Full', 'Uncertain'])\n\n\n # present class captions as one hot encoding\n df = pd.concat([df, pd.get_dummies(df['Class'], prefix='Label')], axis=1)\n\n # Check that all images in dataframe have corresponding file on the disk\n for index, row in df.iterrows():\n if not os.path.isfile(path_to_images + row['Image']):\n print '{} image was not found. This example will be deleted'.format(row['Image'])\n df.drop(index, inplace=True)\n\n # prepare new dataframes\n df_train = pd.DataFrame()\n df_validation = pd.DataFrame()\n df_test = pd.DataFrame()\n\n if do_print:\n print '----------\\nEntire set:\\n', df['Class'].value_counts()\n\n class_counts = df['Class'].value_counts().to_dict()\n for label, count in class_counts.iteritems():\n df_test = pd.concat([df_test, df[df['Class'] == label].sample(frac=test_fraction)])\n df = df[~df.index.isin(df_test.index)]\n\n validation_fraction_adjusted = validation_fraction / (1 - test_fraction)\n df_validation = pd.concat([df_validation, df[df['Class'] == label].sample(frac=validation_fraction_adjusted)])\n df = df[~df.index.isin(df_validation.index)]\n\n df_train = pd.concat([df_train, df[df['Class'] == label]])\n df = df[~df.index.isin(df_train.index)]\n\n if do_print:\n print '----------\\nTrain set:\\n', df_train['Class'].value_counts()\n print '----------\\nValidation set:\\n', df_validation['Class'].value_counts()\n print '----------\\nTest set:\\n', df_test['Class'].value_counts()\n\n # remove out_file if it exists\n filenames = ['train_set.json', 'test_set.json', 'validation_set']\n for f in filenames:\n try:\n os.remove(out_dir + f)\n except OSError:\n pass\n except IOError:\n pass\n\n df_train.to_json(out_dir + 'train_set.json')\n df_validation.to_json(out_dir + 'validation_set.json')\n df_test.to_json(out_dir + 'test_set.json')", "def test__load_training_set():\n classifier = classifier_module.Classifier(None)\n set = classifier._load_training_set('test')\n for i in range(0, 5):\n signal_list = set[i]\n assert signal_list[0].get_x() == 1.0 + i * 0.028\n assert signal_list[0].get_y() == 1.00 - i * i * 0.20 * 0.30\n\n assert signal_list[1].get_x() == 2.0 - i * 0.011\n assert signal_list[1].get_y() == 2.00 - i * 0.020", "def loadSets(self, indir=\"\"):\n\n if indir==\"\":\n print(\"specify folder\")\n return -1\n\n self.train = pd.read_pickle(\"{}/train.pkl\".format(indir))\n self.valid = pd.read_pickle(\"{}/valid.pkl\".format(indir))\n self.test = pd.read_pickle(\"{}/test.pkl\".format(indir))\n\n print(\"sets loaded\")", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def load_data():\n data = gzip.open(\"mnist.pkl.gz\", \"rb\")\n train_set, valid_set, test_set = cPickle.load(data)\n data.close()\n\n # Combine validation and train folds to recreate the master 60k set.\n new_images = numpy.concatenate((train_set[0], valid_set[0]))\n new_labels = numpy.concatenate((train_set[1], valid_set[1]))\n\n train_set = (new_images, new_labels)\n \n return (train_set, test_set)", "def load_data(path, source='assistant', splits=(70, 10, 20)):\n if not len(splits) == 3:\n raise ValueError('splits expected to have three components: found {}'\n .format(len(splits)))\n train_size, test_size, val_size = np.array(splits) / np.sum(splits)\n\n data = json.load(open(path))\n if source == 'assistant':\n # Split into training set & (test + val) set.\n data_train, data_test = train_test_split(\n data, train_size=train_size\n )\n # Now, the (test + val) set into the test, and val sets\n data_test, data_val = train_test_split(\n data_test,\n test_size=(val_size / (val_size + test_size))\n )\n data = {\n 'train': data_train,\n 'test': data_test,\n 'val': data_val\n }\n elif source == 'sentiment':\n text, label = data['text'], data['label']\n\n # Split into training set & (test + val) set.\n text_train, text_test, label_train, label_test = train_test_split(\n text, label,\n train_size=train_size\n )\n # Now, the (test + val) set into the test, and val sets\n text_test, text_val, label_test, label_val = train_test_split(\n text_test, label_test,\n test_size=(val_size / (val_size + test_size))\n )\n data = {\n 'train': (text_train, label_train),\n 'test': (text_test, label_test),\n 'val': (text_val, label_val)\n }\n else:\n raise ValueError('Invalid source: {}'.format(source))\n return data", "def load_training_data(config):\n # Load data\n LOGGER.info(\"Loading training data.\")\n train_x = load_data(config['data_source'], config['train_x_filename'])\n train_y = load_data(config['data_source'], config['train_y_filename'])\n val_x = load_data(config['data_source'], config['val_x_filename'])\n val_y = load_data(config['data_source'], config['val_y_filename'])\n LOGGER.info(\"Training data size: %d\", len(train_x))\n LOGGER.info(\"Validation data size: %d\", len(val_x))\n\n # Build datasets and create iterators\n LOGGER.info(\"Building dataset.\")\n train_dataset = get_dataset(\n train_x, train_y, config['batch_size'], config['data_shape'],\n config['n_classes'], True)\n val_dataset = get_dataset(\n val_x, val_y, config['batch_size'], config['data_shape'],\n config['n_classes'])\n\n return train_dataset, val_dataset, len(val_x)", "def load(train_file, test_file):\n print('\\nLoad the raw training and test set data...')\n y_train, tx_train, ids_train = load_csv_data(train_file)\n y_test, tx_test, ids_test = load_csv_data(test_file)\n print('\\n... finished.')\n return y_train, tx_train, ids_train, y_test, tx_test, ids_test", "def train(self, trainfile):", "def load_dataset(path, test_or_train):\n senta_batch, sentb_batch, scores_batch = [], [], []\n with open(path, encoding='utf-8') as f:\n for i, line in enumerate(f):\n items = line.strip().split('\\t')\n if test_or_train == 'train':\n senta, sentb, score = items[-2], items[-1], float(items[-3])\n elif test_or_train in ['dev', 'test']:\n senta, sentb, score = items[-2], items[-1], float(items[-3])\n else:\n raise Exception(\"{} error\".format(test_or_train))\n senta_batch.append(senta)\n sentb_batch.append(sentb)\n scores_batch.append(score)\n return senta_batch, sentb_batch, scores_batch", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def test_training(self):\n\t\tpass", "def get_train_test_split(path=\"./data/train_test_split.json\"):\n with open(path) as f:\n train_test_split = json.load(f)\n return train_test_split", "def load_training_data(self) -> Tuple[List[np.ndarray], np.ndarray]:\n return self._load_set(config.TRAIN_DIR, True)", "def load_dataset():\n temp = gzip.open('mnist.pkl.gz')\n train, val , test = pickle.load(temp,encoding='latin1')\n temp.close()\n train_inp = [np.reshape(x, (784,1)) for x in train[0]]\n train_outp = [one_hot(y) for y in train[1]]\n training_data = zip(train_inp, train_outp)\n validation_inp = [np.reshape(x, (784, 1)) for x in val[0]]\n validation_data = zip(validation_inp, val[1])\n test_inp = [np.reshape(x, (784, 1)) for x in test[0]]\n test_data = zip(test_inp, test[1])\n return (training_data,validation_data,test_data)", "def set_data():\r\n #if not os.path.exists(filepath):\r\n #download_data()\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, train, test = {}, {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n\r\n train['x'], train['y'] = convert_train(data['ntraindata'], data['ndim'])\r\n\r\n testdata = read(filepath + flist[-2])\r\n test['x'] = testdata['data']\r\n test['y'] = testdata['labels']\r\n\r\n data['train'], data['test'] = train, test\r\n save_pkl(data)", "def download_train_test_set(save_folder):\n df = extract_notes(os.environ[\"DB_CONFIG\"])\n train_df, test_df = split_df(df)\n\n # if save folder does not exist, create it\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n # save train_df\n save_data(train_df, save_folder, \"training_mimic.jsonl\")\n\n # save test_df\n save_data(test_df, save_folder, \"testing_mimic.jsonl\")", "def test_data_set_load(data_set):\n data = data_set()\n train_data, train_labels, test_data, test_labels = data.load_data()\n\n assert len(train_data) > 0\n assert len(test_data) > 0\n assert len(train_data) == len(train_labels)\n assert len(test_data) == len(test_labels)", "def ignore_test_load_local_data(self):\n tmp_path = create_tmp_test_jsonfile(\"tmp.json\")\n train_data = load_local_data(tmp_path)\n rm_tmp_file(\"tmp.json\")\n assert train_data is not None\n assert len(train_data.training_examples) == 1000\n assert \"text\" not in train_data.training_examples[0].data\n assert \"label\" in train_data.training_examples[0].data", "def load_datasets(self):\n if self.processed_extension == '.npz':\n logger.info(f'Loading sets from npz:')\n \n logger.info(f'train: {self.train_path}')\n self.train_data = sparse.load_npz(self.train_path)\n\n logger.info(f'val: {self.val_path}')\n self.val_data = sparse.load_npz(self.val_path)\n\n logger.info(f'test: {self.test_path}')\n self.test_data = sparse.load_npz(self.test_path)\n \n # Split x and y\n self.train_data = [sparse.lil_matrix(sparse.csr_matrix(self.train_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.train_data)[:,-1])]\n \n self.val_data = [sparse.lil_matrix(sparse.csr_matrix(self.val_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.val_data)[:,-1])]\n \n self.test_data = [sparse.lil_matrix(sparse.csr_matrix(self.test_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.test_data)[:,-1])]\n \n elif self.processed_extension == '.csv':\n logger.info(f'Loading sets from csv:')\n \n logger.info(f'train: {self.train_path}')\n self.train_data = pd.read_csv(self.train_path)\n train_cols = self.train_data.columns\n self.train_data = [self.train_data[train_cols.difference(['TARGET'])],\n self.train_data['TARGET']]\n \n logger.info(f'val: {self.val_path}')\n self.val_data = pd.read_csv(self.val_path)\n self.val_data = [self.val_data[train_cols.difference(['TARGET'])],\n self.val_data['TARGET']]\n \n logger.info(f'test: {self.test_path}')\n self.test_data = pd.read_csv(self.test_path)\n self.test_data = [self.test_data[train_cols.difference(['TARGET'])],\n self.test_data['TARGET']]\n else:\n raise AttributeError(f'Wrong extension: {self.processed_extension}')\n self.n_train = self.train_data[0].shape[0]\n self.n_val = self.val_data[0].shape[0]\n self.n_test = self.test_data[0].shape[0]\n self.input_size = self.train_data[0].shape[1]\n self.n_examples = self.n_train + self.n_val + self.n_test\n \n logger.info(f'Set sizes:')\n logger.info(f'train: {self.n_train}')\n logger.info(f'val: {self.n_val}')\n logger.info(f'test: {self.n_test}')", "def load_data():\n print(\"PARSING TRAIN\")\n ys_train, x_train, ids_train = load_pickle_data(\"ys_train\"), load_pickle_data(\"x_train\"), load_pickle_data(\n \"ids_train\")\n if ys_train is None or x_train is None or ids_train is None:\n ys_train, x_train, ids_train = load_csv_data(\"{}/train.csv\".format(DATA_DIR))\n dump_pickle_data(ys_train, \"ys_train\")\n dump_pickle_data(x_train, \"x_train\")\n dump_pickle_data(ids_train, \"ids_train\")\n\n print(\"PARSING TEST\")\n x_test, ids_test = load_pickle_data(\"x_test\"), load_pickle_data(\"ids_test\")\n if x_test is None or ids_test is None:\n _, x_test, ids_test = load_csv_data(\"{}/test.csv\".format(DATA_DIR))\n dump_pickle_data(x_test, \"x_test\")\n dump_pickle_data(ids_test, \"ids_test\")\n\n return ys_train, x_train, ids_train, x_test, ids_test", "def read_and_split_sets():\n gen_train_test_sets(\"Data_Sent_Embds/en_sent.pkl\", \"Data_Sent_Embd_Splitted/en_train.pkl\",\n \"Data_Sent_Embd_Splitted/en_test.pkl\")\n gen_train_test_sets(\"Data_Sent_Embds/es_sent.pkl\", \"Data_Sent_Embd_Splitted/es_train.pkl\",\n \"Data_Sent_Embd_Splitted/es_test.pkl\")\n gen_train_test_sets(\"Data_Sent_Embds/pr_sent.pkl\", \"Data_Sent_Embd_Splitted/pr_train.pkl\",\n \"Data_Sent_Embd_Splitted/pr_test.pkl\")", "def load_data(dataset, fraction=1.0, base_folder='data'):\n if dataset == 'blobs':\n path = os.path.join(base_folder, 'blobs.json')\n train_features, test_features, train_targets, test_targets = \\\n load_json_data(path)\n elif dataset == 'mnist-binary':\n train_features, test_features, train_targets, test_targets = \\\n load_mnist_data(2, fraction=fraction, mnist_folder=base_folder)\n train_targets = train_targets * 2 - 1\n test_targets = test_targets * 2 - 1\n elif dataset == 'mnist-multiclass':\n train_features, test_features, train_targets, test_targets = \\\n load_mnist_data(5, fraction=fraction, examples_per_class=100,\n mnist_folder=base_folder)\n elif dataset == 'synthetic':\n path = os.path.join(base_folder, 'synthetic.json')\n train_features, test_features, train_targets, test_targets = \\\n load_json_data(path)\n else:\n raise ValueError('Dataset {} not found!'.format(dataset))\n\n # Normalize the data using feature-independent whitening. Note that the\n # statistics are computed with respect to the training set and applied to\n # both the training and testing sets.\n if dataset != 'synthetic':\n mean = train_features.mean(axis=0, keepdims=True)\n std = train_features.std(axis=0, keepdims=True) + 1e-5\n train_features = (train_features - mean) / std\n if fraction < 1.0:\n test_features = (test_features - mean) / std\n\n return train_features, test_features, train_targets, test_targets", "def load_all(): \n training_data = dict() \n for i in range(7):\n training_data[i+1] = load_data(i+1) \n\n return training_data", "def load_data():\r\n train_dataset = h5py.File('train_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\r\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\r\n\r\n test_dataset = h5py.File('test_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\r\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\r\n\r\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\r\n \r\n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\r\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\r\n \r\n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes", "def read_data(train_data_dir, test_data_dir):\n clients = []\n train_num_samples = []\n test_num_samples = []\n train_data = {}\n test_data = {}\n\n train_files = os.listdir(train_data_dir)\n train_files = [f for f in train_files if f.endswith(\".json\")]\n # print(train_files)\n for f in train_files:\n file_path = os.path.join(train_data_dir, f)\n with open(file_path, \"r\") as inf:\n cdata = json.load(inf)\n clients.extend(cdata[\"users\"])\n train_num_samples.extend(cdata[\"num_samples\"])\n train_data.update(cdata[\"user_data\"])\n # print(cdata['user_data'])\n test_files = os.listdir(test_data_dir)\n test_files = [f for f in test_files if f.endswith(\".json\")]\n for f in test_files:\n file_path = os.path.join(test_data_dir, f)\n with open(file_path, \"r\") as inf:\n cdata = json.load(inf)\n test_num_samples.extend(cdata[\"num_samples\"])\n test_data.update(cdata[\"user_data\"])\n\n # parse python script input parameters\n parser = argparse.ArgumentParser()\n main_args = add_args(parser)\n\n class Args:\n def __init__(self, client_id, client_num_per_round, comm_round):\n self.client_num_per_round = client_num_per_round\n self.comm_round = comm_round\n self.client_id = client_id\n self.client_sample_list = []\n\n client_list = []\n for client_number in range(main_args.client_num_per_round):\n client_list.append(\n Args(client_number, main_args.client_num_per_round, main_args.comm_round)\n )\n return (\n clients,\n train_num_samples,\n test_num_samples,\n train_data,\n test_data,\n client_list,\n )", "def load_dataset(self, testPrefix = 'cv9', root = 'datasets', classes = [ 'pos', 'neg' ]):\n\n\t\tfor senti_class in classes:\n\n\t\t\tdirname = os.path.join(root, senti_class)\n\n\t\t\tfor filename in os.listdir(dirname):\n\n\t\t\t\twith open(os.path.join(dirname, filename)) as file:\n\n\t\t\t\t\tcontent = file.read()\n\n\t\t\t\t\tif filename.startswith(testPrefix):\n\t\t\t\t\t\t# Testing data\n\t\t\t\t\t\tself.testing_set.append(content)\n\t\t\t\t\t\tself.testing_labels.append(senti_class)\n\t\t\t\t\telse:\n\t\t\t\t\t\t# Training data\n\t\t\t\t\t\tself.training_set.append(content)\n\t\t\t\t\t\tself.training_labels.append(senti_class)\n\n\t\tself._vectorize(self.vectorizer)", "def load_data(label_name='Species'):\n\n # Create a local copy of the training set.\n train_path = tf.keras.utils.get_file(fname=TRAIN_URL.split('/')[-1],\n origin=TRAIN_URL)\n # train_path now holds the pathname: (训练集和测试集路径) ~/.keras/datasets/iris_training.csv\n\n # Parse the local CSV file.(解析)\n train = pd.read_csv(filepath_or_buffer=train_path,\n names=CSV_COLUMN_NAMES, # list of column names\n header=0 # ignore the first row of the CSV file.\n )\n # train now holds a pandas DataFrame, which is data structure\n # analogous to a table.\n\n # 1. Assign the DataFrame's labels (the right-most column) to train_label.\n # 2. Delete (pop) the labels from the DataFrame.\n # 3. Assign the remainder of the DataFrame to train_features\n print(\"-\")\n train_features, train_label = train, train.pop(label_name)\n\n # Apply the preceding logic to the test set.\n test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_features, test_label = test, test.pop(label_name)\n\n # Return four DataFrames.\n return (train_features, train_label), (test_features, test_label)", "def do_training(self):\n json_data = request.data\n global g_list_of_classifier\n\n datas = json.loads(json_data.decode('UTF-8')) #datas = liste\n\n for ite_clf in g_list_of_classifier:\n for data in datas:\n ite_clf.add_data(data['score'], data['answer'])\n print(ite_clf.get_info())\n return ''", "def create_dicts():\n load_data_for_dict('data/atis/train/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/valid/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/test/seq.in', 'data/atis/voc/vocabulary.json') \n load_data_for_dict('data/atis/train/seq.out', 'data/atis/voc/slot_vocabulary.json')", "def train(self, trainset, testset, niter=1000, ntest=300, epochs=int(1e6)):\n\n print('\\n>> Training begins!\\n')\n\n def fetch_dict(datagen, keep_prob=0.5):\n \"\"\"Format model input data.\"\"\"\n bx, by, br = next(datagen)\n while not (bx.shape[0] > 0 and bx.shape[1] > 0):\n bx, by, br = next(datagen)\n\n dec_lengths = np.full((bx.shape[0], ), bx.shape[1], dtype=np.int32)\n\n feed_dict = {\n self.xs_: bx,\n self.ys_: by,\n self.dec_inputs_length_: dec_lengths,\n self.ext_context_: br,\n self.keep_prob_: keep_prob\n }\n return feed_dict\n\n # setup session\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n load_classification_parameters(\n sess, \"models/model.ckpt-1500000\", \"class\")\n load_classification_parameters(\n sess, \"models/model.ckpt-1500000\", \"feat\")\n\n # get last checkpoint\n saver = tf.train.Saver(get_collections([\"encoder\", \"decoder\", \"proj\"]))\n ckpt = tf.train.get_checkpoint_state(self.ckpt_path)\n # verify it\n if ckpt and ckpt.model_checkpoint_path:\n try:\n saver.restore(sess, ckpt.model_checkpoint_path)\n except tf.OpError:\n # graph structure changed, cannot load, restart training\n pass\n\n try:\n # start training\n for j in range(epochs):\n mean_loss = 0\n for n_it in range(niter):\n _, loss = sess.run(\n [self.train_op, self.loss],\n feed_dict=fetch_dict(trainset))\n mean_loss += loss\n print(' [{}/{}]\\r'.format(n_it, niter))\n\n print('[{}] train loss : {}'.format(j, mean_loss / niter))\n saver.save(sess, self.ckpt_path + self.model_name + '.ckpt',\n global_step=j)\n\n # evaluate\n testloss = 0\n for _ in range(ntest):\n testloss += sess.run(\n [self.loss],\n feed_dict=fetch_dict(testset, keep_prob=1.))[0]\n print('test loss : {}'.format(testloss / ntest))\n\n except KeyboardInterrupt:\n print('\\n>> Interrupted by user at iteration {}'.format(j))", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def load_dataset():\n\n train_dataset = h5py.File('datasets/train_catvnoncat.h5', \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n test_dataset = h5py.File('datasets/test_catvnoncat.h5', \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n \n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes", "def run_train_test(training_file, testing_file):\n\n training = parse_file(training_file)\n training = np.array(training)\n\n X_train = training[:,:4]\n Y_train = training[:,4]\n\n testing = parse_file(testing_file)\n testing = np.array(testing)\n\n X_test = testing[:,:4]\n Y_test = testing[:,4]\n\n gini_clf = DecisionTreeClassifier(random_state=0)\n gini_clf.fit(X_train, Y_train)\n gini_Y_hat = gini_clf.predict(X_test)\n gini_tp, gini_tn, gini_fp, gini_fn, gini_err = eval_results(Y_test, gini_Y_hat)\n\n entropy_clf = DecisionTreeClassifier(criterion=\"entropy\", random_state=0)\n entropy_clf.fit(X_train, Y_train)\n entropy_Y_hat = entropy_clf.predict(X_test)\n entropy_tp, entropy_tn, entropy_fp, entropy_fn, entropy_err = eval_results(Y_test, entropy_Y_hat)\n\n return {\n \"gini\":{\n 'True positives': gini_tp,\n 'True negatives': gini_tn,\n 'False positives': gini_fp,\n 'False negatives': gini_fn,\n 'Error rate': gini_err\n },\n \"entropy\":{\n 'True positives': entropy_tp,\n 'True negatives': entropy_tn,\n 'False positives': entropy_fp,\n 'False negatives': entropy_fn,\n 'Error rate': entropy_err\n }\n }", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def pickle_dataset(train_set, test_set, validation_set, path):\n\n train_set_filename = open(os.path.join(path, TRAIN_DATA_SET), 'wb')\n # Pickle classes_count\n cPickle.dump(train_set, train_set_filename, protocol=cPickle.HIGHEST_PROTOCOL)\n # Close the file\n train_set_filename.close()\n # Save hierarchy_mapping file\n test_set_filename = open(os.path.join(path, TEST_DATA_SET), 'wb')\n # Pickle hierarchy_mapping\n cPickle.dump(test_set, test_set_filename, protocol=cPickle.HIGHEST_PROTOCOL)\n # Close the file\n test_set_filename.close()\n # Save entities list\n validation_set_filename = open(os.path.join(path, VALIDATION_DATA_SET), 'wb')\n # Pickle entities\n cPickle.dump(validation_set, validation_set_filename, protocol=cPickle.HIGHEST_PROTOCOL)\n # Close the file\n validation_set_filename.close()\n\n print(\"Debug printing- the number of train samples: {0}, the number of test samples: {1}, \"\n \"the number of validation samples: {2}\".format(len(train_set), len(test_set), len(validation_set)))", "def load_data():\n train = pd.read_csv(\"../input/train.csv\", dtype={\"Age\": np.float64}, )\n test = pd.read_csv(\"../input/test.csv\", dtype={\"Age\": np.float64}, )\n\n train = train.set_index('PassengerId')\n test = test.set_index('PassengerId')\n\n train = train.apply(preprocess, axis=1)\n test = test.apply(preprocess, axis=1)\n\n x_train = train.drop(['Survived'], axis=1)\n y_train = train['Survived']\n x_test = test\n return {'train': {'x': x_train, 'y': y_train},\n 'test': {'x': x_test},\n 'full_features': pd.concat([x_train, x_test])}", "def read_data_sets(data_path, fake_data=False, one_hot=False,\n validation_size=5000, source_url={},\n augment=False,\n percentage_train=100.,\n unbalance=False, unbalance_dict={\"percentage\": 20, \"label1\": 0, \"label2\": 8},\n ):\n\n class DataSets(object):\n pass\n\n data_sets = DataSets()\n\n if fake_data:\n data_sets.train = DataSet([], [], fake_data=True, one_hot=True)\n data_sets.validation = DataSet([], [], fake_data=True, one_hot=True)\n data_sets.test = DataSet([], [], fake_data=True, one_hot=True)\n return data_sets\n\n if not source_url: # empty string check\n if 'fashion' in data_path:\n source_url = DEFAULT_SOURCE_URL_FASHION\n else:\n source_url = DEFAULT_SOURCE_URL_MNIST\n\n if 'fashion' in data_path or 'mnist' in data_path: # mnist or fashion\n train_images, train_labels, val_images, val_labels, test_images, test_labels = \\\n load_mnist(data_path, validation_size, source_url, one_hot)\n reshape = True\n else:\n train_images, train_labels, val_images, val_labels, test_images, test_labels = \\\n load_medical_data(data_path)\n reshape = False\n\n # add random permutation to train & validation\n np.random.seed(42)\n\n n_train = train_images.shape[0]\n perm = np.random.permutation(n_train)\n train_images = train_images[perm]\n train_labels = train_labels[perm]\n\n n_val = val_images.shape[0]\n perm = np.random.permutation(n_val)\n val_images = val_images[perm]\n val_labels = val_labels[perm]\n\n # For experiments with data-augmentation\n if augment:\n if 'fashion' in data_path: # rotations +-10 and horizontal flips\n augmented_images, augmented_labels = augment_data(train_images, train_labels, hflip=True)\n elif 'mnist' in data_path: # rotations +-10\n augmented_images, augmented_labels = augment_data(train_images, train_labels, hflip=False)\n train_images = np.concatenate([train_images, np.expand_dims(augmented_images, 3)])\n train_labels = np.concatenate([train_labels, augmented_labels])\n # for the medical datasets, you can use the \"augment\" argument while doing patch extraction\n\n # For experiments with limited amount of data\n if percentage_train != 100.:\n train_size = int(0.01*percentage_train*train_images.shape[0])\n Xtrain_images, Xval_images, ytrain, yval = train_test_split(train_images, train_labels, train_size=train_size)\n train_images = Xtrain_images\n train_labels = ytrain\n\n # For experiments with class-imbalance distribution\n if unbalance:\n n_classes = len(np.unique(np.argmax(train_labels, 1)))\n reduceto = 0.01*unbalance_dict['percentage']\n label1 = unbalance_dict['label1']\n label2 = unbalance_dict['label2']\n\n pick_ids = []\n newsize = 0\n all_classes = np.arange(0, n_classes)\n all_classes = np.delete(all_classes, np.where(all_classes == label1)[0])\n all_classes = np.delete(all_classes, np.where(all_classes == label2)[0])\n\n for lab in [label1, label2]:\n allids = np.where(np.argmax(train_labels, 1) == lab)[0]\n selectedids = np.random.choice(allids, int(reduceto * allids.shape[0]), replace=False)\n pick_ids.append(selectedids)\n newsize += len(selectedids)\n\n new_ids = convert_list_to_array(pick_ids, newsize)\n\n other_ids = []\n othersize = 0\n for lab in all_classes.tolist():\n selectedids = np.where(np.argmax(train_labels, 1) == lab)[0]\n other_ids.append(selectedids)\n othersize += len(selectedids)\n\n keep_ids = convert_list_to_array(other_ids, othersize)\n\n # new_ids: contains the indices of the reduced (imbalance) classes\n # keep_ids: contains the indices of the rest (keep the same class distribution)\n resulting_ids = np.concatenate((new_ids, keep_ids))\n np.random.shuffle(resulting_ids)\n\n train_images = train_images[resulting_ids, ...]\n train_labels = train_labels[resulting_ids, ...]\n\n data_sets.train = DataSet(train_images, train_labels, fake_data=True, one_hot=True, reshape=reshape)\n data_sets.validation = DataSet(val_images, val_labels, fake_data=True, one_hot=True, reshape=reshape)\n data_sets.test = DataSet(test_images, test_labels, fake_data=True, one_hot=True, reshape=reshape)\n\n return data_sets", "def load_data(y_name='Species'):\n train_path = tf.keras.utils.get_file(args.TRAIN_URL.split('/')[-1], args.TRAIN_URL)\n test_path = tf.keras.utils.get_file(args.TEST_URL.split('/')[-1], args.TEST_URL)\n\n train = pd.read_csv(train_path, names=args.CSV_COLUMN_NAMES, header=0)\n train_x, train_y = train, train.pop(y_name)\n\n test = pd.read_csv(test_path, names=args.CSV_COLUMN_NAMES, header=0)\n test_x, test_y = test, test.pop(y_name)\n\n return (train_x, train_y), (test_x, test_y)", "def load_all(test_num=100):\n\ttrain_data = pd.read_csv(\n\t\tconfig.train_rating, \n\t\tsep='\\t', header=None, names=['user', 'item'], \n\t\tusecols=[0, 1], dtype={0: np.int32, 1: np.int32})\n\n\tuser_num = train_data['user'].max() + 1\n\titem_num = train_data['item'].max() + 1\n\n\ttrain_data = train_data.values.tolist()\n\n\t# load ratings as a dok matrix\n\ttrain_mat = sp.dok_matrix((user_num, item_num), dtype=np.float32)\n\tfor x in train_data:\n\t\ttrain_mat[x[0], x[1]] = 1.0\n\n\ttest_data = []\n\twith open(config.test_negative, 'r') as fd:\n\t\tline = fd.readline()\n\t\twhile line != None and line != '':\n\t\t\tarr = line.split('\\t')\n\t\t\tu = eval(arr[0])[0]\n\t\t\ttest_data.append([u, eval(arr[0])[1]])\n\t\t\tfor i in arr[1:]:\n\t\t\t\ttest_data.append([u, int(i)])\n\t\t\tline = fd.readline()\n\treturn train_data, test_data, user_num, item_num, train_mat", "def train(self, training_data):\n pass", "def load(config_file: typing.TextIO) -> \"TrainingConfig\":\n return TrainingConfig.from_json(config_file.read())", "def load_mnist(path='./', test_size=0.3, random_state = 123):\n \n np.random.seed(random_state)\n if 'X_train.npy' not in os.listdir(path=path) or 'y_train.npy' not in os.listdir(path=path):\n print (\"Train dataset not found. Downloading...\")\n os.system(\"curl -L -o train.zip {}\".format(TRAIN_DATA_LINK))\n os.system(\"unzip train.zip\")\n os.system(\"tar -xf trainingSet.tar.gz\")\n images = []\n labels = []\n for class_name in os.listdir('./trainingSet'):\n if 'ipynb' not in class_name and '.DS' not in class_name:\n for image_name in os.listdir('./trainingSet/{}'.format(class_name)):\n image = imread('./trainingSet/{}/{}'.format(class_name, image_name))\n images.append(image)\n labels.append(int(class_name))\n X_train = np.array(images)\n y_train = np.array(labels)\n\n permutation = np.random.permutation(X_train.shape[0])\n X_train = X_train[permutation]\n y_train = y_train[permutation]\n\n with open('X_train.npy', 'wb') as f:\n np.save(f, X_train)\n with open('y_train.npy', 'wb') as f:\n np.save(f, y_train)\n os.system(\"rm -rf trainingSet\")\n os.system(\"rm -rf train.zip\")\n os.system(\"rm -rf trainingSet.tar.gz\")\n else:\n X_train = np.load('X_train.npy')\n y_train = np.load('y_train.npy')\n\n if 'X_test.npy' not in os.listdir(path=path) or 'y_test.npy' not in os.listdir(path=path):\n print (\"Test dataset not found. Downloading...\")\n os.system(\"curl -L -o test.zip {}\".format(TEST_DATA_LINK))\n os.system(\"unzip test.zip\")\n os.system(\"tar -xf trainingSample.tar.gz\")\n images = []\n labels = []\n for class_name in os.listdir('./trainingSample'):\n if 'ipynb' not in class_name and '.DS' not in class_name:\n for image_name in os.listdir('./trainingSample/{}'.format(class_name)):\n image = imread('./trainingSample/{}/{}'.format(class_name, image_name))\n images.append(image)\n labels.append(int(class_name))\n X_test = np.array(images)\n y_test = np.array(labels)\n with open('X_test.npy', 'wb') as f:\n np.save(f, X_test)\n with open('y_test.npy', 'wb') as f:\n np.save(f, y_test)\n\n os.system(\"rm -rf trainingSample\")\n os.system(\"rm -rf test.zip\")\n os.system(\"rm -rf trainingSet.tar.gz\")\n\n else:\n X_test = np.load('X_test.npy')\n y_test = np.load('y_test.npy')\n\n return X_train, X_test, y_train, y_test", "def load_data(trainfile, testfile):\n raw_train = pd.read_csv(trainfile, header=None)\n raw_test = pd.read_csv(testfile, header=None)\n train = raw_train.values\n test = raw_test.values\n train_features = train[0::, 1::]\n train_label = train[::, 0]\n test_features = test[0::, 1::]\n test_label = test[::, 0]\n train, cv , train_label, cv_label = train_test_split(train_features,train_label, test_size=0.33, random_state=42)\n return train, train_label, \\\n cv, cv_label, \\\n test_features, test_label", "def import_datasets(snli_path):\n print('extract data from snli directory..')\n train = dict(); dev = dict(); test = dict()\n gold_labels = {'entailment': 0, 'neutral': 1, 'contradiction': 2}\n\n for file_type in ['train', 'dev', 'test']:\n path = os.path.join(snli_path, 'snli_1.0_{}.jsonl'.format(file_type))\n with open(path) as file:\n data = [json.loads(line) for line in file]\n eval(file_type)['premise'] = [entry['sentence1'] for entry in data if entry['gold_label'] != '-']\n eval(file_type)['hypothesis'] = [entry['sentence2'] for entry in data if entry['gold_label'] != '-']\n g_labels = np.array([gold_labels[entry['gold_label']] for entry in data if entry['gold_label'] != '-'])\n eval(file_type)['label'] = g_labels\n print('extraction process was finished successfully!')\n return train, dev, test", "def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()", "def extract_json_to_files(input_dir,output_dir):\n files={}\n files['train']='train-v1.1.json'\n files['dev']='dev-v1.1.json'\n\n for file in files:\n filename=os.path.join(input_dir,files[file])\n with open(filename,'r',encoding='utf-8') as data_file:\n examples = []\n dataset=json.load(data_file)\n count_total=total_exs(dataset)\n count_mapping_problem=0\n count_token_problem=0\n count_ansspan_problem=0\n count_examples=0\n for article_id in tqdm(range(len(dataset['data'])), desc=\"Preprocessing {}\".format(file)):\n article_paragraph=dataset['data'][article_id]['paragraphs']\n for paragraph_id in range(len(article_paragraph)):\n context=article_paragraph[paragraph_id]['context']\n context=context.replace(\"''\",'\"').replace(\"``\",'\"')\n context = context.replace('\\u3000', ' ').replace('\\u202f',' ').replace('\\u2009', ' ')#.replace(\"'\",\"'\")\n context=context.replace('\\-',' ')\n context_tokens=tokenize_sequence(context)\n context=context.lower()\n qas=article_paragraph[paragraph_id]['qas']\n charloc2wordloc=get_char_word_loc_mapping(context, context_tokens)\n if charloc2wordloc is None:\n count_mapping_problem+=len(qas)\n continue\n for qa in qas:\n question=qa['question'].lower()\n question_tokens=tokenize_sequence(question)\n\n ans_text=qa['answers'][0]['text'].lower()\n ans_text=ans_text.replace('\\u3000', ' ').replace('\\u202f', ' ').replace('\\u2009', ' ')\n ans_start_loc=qa['answers'][0]['answer_start']\n if qa['id'] in ['5706baed2eaba6190074aca5','57269c73708984140094cbb5','57269c73708984140094cbb7','572a11661d04691400779721','572a11661d04691400779722','572a11661d04691400779723','572a11661d04691400779724','572a11661d04691400779725','572a2cfc1d0469140077981b','572a3a453f37b319004787e9','572a84d3f75d5e190021fb3c']:\n ans_start_loc+=1\n if qa['id'] in ['572a5df77a1753140016aedf','572a5df77a1753140016aee0','572a84d3f75d5e190021fb38','572a84d3f75d5e190021fb39','572a84d3f75d5e190021fb3a','572a84d3f75d5e190021fb3b','572a85df111d821400f38bad','572a85df111d821400f38bae','572a85df111d821400f38baf','572a85df111d821400f38bb0']:\n ans_start_loc+=2\n if qa['id'] in ['572a5df77a1753140016aee1','572a5df77a1753140016aee2']:\n ans_start_loc+=3\n if qa['id'] in ['57286bf84b864d19001649d6','57286bf84b864d19001649d5']:\n ans_start_loc-=1\n if qa['id'] in ['5726bee5f1498d1400e8e9f3','5726bee5f1498d1400e8e9f4']:\n ans_start_loc-=2\n ans_end_loc=ans_start_loc+len(ans_text)\n\n if context[ans_start_loc:ans_end_loc]!=ans_text:\n count_ansspan_problem+=1\n continue\n ans_start_wordloc = charloc2wordloc[ans_start_loc][1] # answer start word loc\n ans_end_wordloc = charloc2wordloc[ans_end_loc-1][1] # answer end word loc\n assert ans_start_wordloc <= ans_end_wordloc\n\n ans_tokens = context_tokens[ans_start_wordloc:ans_end_wordloc + 1]\n if \"\".join(ans_tokens) != \"\".join(ans_text.split()):\n count_token_problem += 1\n #print(ans_text)\n #print(ans_tokens)\n continue # skip this question/answer pair\n examples.append((' '.join(context_tokens),' '.join(question_tokens),' '.join(ans_tokens),' '.join([str(ans_start_wordloc),str(ans_end_wordloc)])))\n print(\"Number of (context, question, answer) triples discarded due to char -> token mapping problems: \", count_mapping_problem)\n print(\"Number of (context, question, answer) triples discarded because character-based answer span is unaligned with tokenization: \",count_token_problem)\n print(\"Number of (context, question, answer) triples discarded due character span alignment problems (usually Unicode problems): \",count_ansspan_problem)\n print(\"Processed %i examples of total %i\\n\" % (len(examples), len(examples)+count_mapping_problem+count_token_problem+count_ansspan_problem))\n indices = list(range(len(examples)))\n np.random.shuffle(indices)\n with open(os.path.join(output_dir,file+'.context'),'w',encoding='utf-8') as context_file, \\\n open(os.path.join(output_dir,file+'.question'),'w',encoding='utf-8') as question_file, \\\n open(os.path.join(output_dir,file+'.answer'),'w',encoding='utf-8') as answer_file, \\\n open(os.path.join(output_dir,file+'.span'),'w',encoding='utf-8') as span_file:\n for i in indices:\n (context,question,answer,span)=examples[i]\n context_file.write(context+'\\n')\n question_file.write(question+'\\n')\n answer_file.write(answer+'\\n')\n span_file.write(span+'\\n')", "def load(self, model_dir, use_text=True, use_history=True, use_network=True, delimiter=\",\"):\n self._clear_cache()\n # TODO: load parameters from filename!!!\n train_parts = []\n test_parts = []\n #load text feature matrix\n if use_text:\n tr_text = load_npz(os.path.join(model_dir, \"train_text.npz\"))\n te_text = load_npz(os.path.join(model_dir, \"test_text.npz\"))\n train_parts.append(tr_text.toarray())\n test_parts.append(te_text.toarray())\n print(\"text\", tr_text.shape, te_text.shape)\n #load history feature matrix\n if use_history:\n tr_history = np.loadtxt(os.path.join(model_dir, \"train_history.csv\"), delimiter=delimiter)\n te_history = np.loadtxt(os.path.join(model_dir, \"test_history.csv\"), delimiter=delimiter)\n train_parts.append(tr_history)\n test_parts.append(te_history)\n print(\"history\", tr_history.shape, te_history.shape)\n #load node embeddings\n if use_network:\n tr_network = np.loadtxt(os.path.join(model_dir, \"train_network.csv\"), delimiter=delimiter)\n te_network = np.loadtxt(os.path.join(model_dir, \"test_network.csv\"), delimiter=delimiter)\n train_parts.append(tr_network)\n test_parts.append(te_network)\n print(\"network\", tr_network.shape, te_network.shape)\n #concatenation\n X_tr = np.concatenate(train_parts, axis=1)\n X_te = np.concatenate(test_parts, axis=1)\n print(\"After concatenation:\", X_tr.shape, X_te.shape)\n #load labels\n self.tr_label = np.loadtxt(os.path.join(model_dir, \"train_label.csv\"), delimiter=delimiter)\n self.te_label = np.loadtxt(os.path.join(model_dir, \"test_label.csv\"), delimiter=delimiter)\n assert len(self.tr_label) == len(X_tr)\n assert len(self.te_label) == len(X_te)\n #load meta\n self.tr_meta = pd.read_csv(os.path.join(model_dir, \"train_meta.csv\"), delimiter=delimiter)\n self.te_meta = pd.read_csv(os.path.join(model_dir, \"test_meta.csv\"), delimiter=delimiter)\n assert len(self.tr_meta) == len(X_tr)\n assert len(self.tr_meta) == len(X_tr)\n return X_tr, X_te", "def load_data(opt=\"mnist\"):\n if opt == \"mnist\":\n train, test = tf.keras.datasets.mnist.load_data()\n \n x_train, y_train = train\n x_test, y_test = test\n \n x_train = x_train.reshape(x_train.shape[0], 28 * 28)\n x_test = x_test.reshape(x_test.shape[0], 28 * 28)\n \n y_train = y_train.astype(np.int)\n y_test = y_test.astype(np.int)\n for i in range(len(y_train)):\n y_train[i] = 1 if y_train[i] % 2 == 0 else -1\n for i in range(len(y_test)):\n y_test[i] = 1 if y_test[i] % 2 == 0 else -1\n\n elif opt == \"covertype\":\n df = pd.read_csv(\"covtype.data\", header=None)\n x = df.iloc[:, 0:54].values\n y = df[54].values\n for i in range(len(y)):\n y[i] = 1 if y[i] % 2 == 0 else -1\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n \n else:\n logging.error(\"Unknown dataset!!\")\n\n logging.info(\"train data shape: {}\".format(x_train.shape))\n logging.info(\"test data shape: {}\".format(x_test.shape))\n return (x_train, y_train), (x_test, y_test)", "def load_training():\n for can in candidates:\n trainings[can] = []\n for subdir, dirs, files in os.walk(os.path.join(corpus_dir, can)):\n for doc in files:\n trainings[can].append(doc)", "def initSets(self):\n data_frame = pd.read_csv(self.train_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n if self.test_file is None:\n self.train_feat, self.test_feat, self.train_labl, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n else:\n data_frame = pd.read_csv(self.test_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n self.train_feat, _, self.train_labl, _ = train_test_split(features, labels, test_size=self.test_size)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n _, self.test_feat, _, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n # kfold = KFold(n_splits=3)\n # self.train_index, self.test_index = kfold.split(features,labels)", "def load_dataset(self):\n\n train_path = os.path.join(self.dataset_path, 'images_background')\n validation_path = os.path.join(self.dataset_path, 'images_evaluation')\n\n # First let's take care of the train alphabets\n for alphabet in os.listdir(train_path):\n if alphabet[0] == '.':\n continue\n alphabet_path = os.path.join(train_path, alphabet)\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.train_dictionary[alphabet] = current_alphabet_dictionary\n\n # Now it's time for the validation alphabets\n for alphabet in os.listdir(validation_path):\n alphabet_path = os.path.join(validation_path, alphabet)\n if alphabet[0] == '.':\n continue\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.evaluation_dictionary[alphabet] = current_alphabet_dictionary", "def generate_train_test(self):\n x, y = self.read_data()\n x_train, y_train, x_test, y_test = self.sample_data(x, y)\n self.train = (x_train, y_train)\n self.test = (x_test, y_test)", "def _load_processed_data(self):\n with open(os.path.join(self._data_root_path, self._processed_train_data_file_name),\n 'r') as f:\n train_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._processed_dev_data_file_name), 'r') as f:\n dev_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._word_vocab_file_name), 'r') as f:\n word_vocab = Vocab.from_json(json.load(f))\n\n with open(os.path.join(self._data_root_path, self._char_vocab_file_name), 'r') as f:\n char_vocab = Vocab.from_json(json.load(f))\n\n return train_examples, dev_examples, word_vocab, char_vocab", "def load_testing_data(self) -> List[np.ndarray]:\n input_data = self._load_set(config.TEST_DIR, False)\n return input_data", "def load_train_small():\n with open('mnist_train_small.npz', 'rb') as f:\n train_set_small = np.load(f)\n train_inputs_small = train_set_small['train_inputs_small']\n train_targets_small = train_set_small['train_targets_small']\n return train_inputs_small, train_targets_small", "def _load_test_data(self):\n self._save_test_data()", "def _load_test_data(self):\n\n self.test_loader = data.Test_loader(self.N_max, self.n_per_conn,\n self.data_path, self.device)\n\n self.test_loader.load_data()\n\n # load mean and std from train\n scc_mean_std = np.loadtxt(\n os.path.join(self.data_path, 'scc_mean_std.csv'), delimiter=',')\n self.mean = torch.Tensor(scc_mean_std[0])\n self.std = torch.Tensor(scc_mean_std[1])", "def load_data():\n dirname = os.path.join('datasets', 'fashion-mnist')\n base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'\n files = [\n 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',\n 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'\n ]\n\n paths = []\n for fname in files:\n paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname))\n\n with gzip.open(paths[0], 'rb') as lbpath:\n y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[1], 'rb') as imgpath:\n x_train = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)\n\n with gzip.open(paths[2], 'rb') as lbpath:\n y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[3], 'rb') as imgpath:\n x_test = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)\n\n return (x_train, y_train), (x_test, y_test)", "def train_dataset():\n return TabularDataset.from_path('tests/data/dummy_tabular/train.csv', sep=',')", "def load_data(path):\n train = pd.read_csv(os.path.join(path,'train.csv'))\n test = pd.read_csv(os.path.join(path,'test.csv'))\n \n return train, test", "def parse_train_data(training_set, language):\n print \"Reading training set: \" + training_set\n xmldoc = minidom.parse(training_set)\n lex_list = xmldoc.getElementsByTagName('lexelt')\n training_output = {}\n\n print \"Processing training set and training models...\"\n for node in lex_list:\n lexelt = node.getAttribute('item')\n training_output[lexelt] = {}\n inst_list = node.getElementsByTagName(\"instance\")\n # setup the neighbor_word_list within k distance of the word\n neighbor_word_list = []\n senseid_set = set()\n for inst in inst_list:\n sentence = inst.getElementsByTagName('context')[0]\n senseid_set.add(inst.getElementsByTagName('answer')[0].getAttribute('senseid'))\n neighbor_word_list = list(set(neighbor_word_list + get_neighbor_words_list(sentence, language)))\n senseid_list = list(senseid_set)\n training_output[lexelt][\"neighbor_word_list\"] = neighbor_word_list\n _4c_4d_feature = extract_4c_4d_feature(neighbor_word_list, senseid_list, inst_list, language)\n training_output[lexelt][\"4c_4d_feature\"] = _4c_4d_feature\n x_list = []\n y_list = []\n for inst in inst_list:\n y = inst.getElementsByTagName('answer')[0].getAttribute('senseid')\n if ignore_U_activated and y.__eq__('U'):\n continue\n y_list.append(str(replace_accented(y)))\n x = extract_vector(inst, neighbor_word_list, _4c_4d_feature, language)\n x_list.append(x)\n # for each node, build a classifier\n if language.__eq__(\"English\"):\n #clf = RandomForestClassifier(n_estimators=10) 58.9\n #clf = SGDClassifier() 61.1\n #clf = MultinomialNB() 62.9\n #clf = BernoulliNB() 55.8\n #clf = Perceptron() 60.4\n #clf = PassiveAggressiveClassifier() 62.1\n #clf = RidgeClassifier() 62.7\n #clf = svm.LinearSVC() 62.5\n #clf = KNeighborsClassifier()\n #clf = GaussianNB()\n clf = MultinomialNB(alpha=0.95) #+ alpha=0.95 + k=13 + left_right_order + vector_0_1 off = 64.7\n elif language.__eq__(\"Spanish\"):\n #clf = svm.LinearSVC() 82.0\n #clf = MultinomialNB() 82.2\n #clf = RidgeClassifier() 81.5\n #clf = PassiveAggressiveClassifier() 81.9\n #clf = BernoulliNB() 72.4\n clf = MultinomialNB(alpha=0.50) #0.25:82.6 0.4:83.1 0.45:83.2 0.5: 83.2 0.55:83.2 0.6:82.8 0.75:82.7\n elif language.__eq__(\"Catalan\"):\n #clf = svm.LinearSVC() # 82.8\n #clf = MultinomialNB() # 80.8\n #clf = RidgeClassifier() 82.6\n #clf = svm.LinearSVC(C=1.5) 82.9\n clf = MultinomialNB(alpha=0.25) # 0.5:84.3 0.35:84.6 0.3:84.8 0.25:85.4 0.2:85.3\n else:\n clf = svm.LinearSVC()\n clf.fit(x_list, y_list)\n training_output[lexelt][\"Classifier\"] = clf\n print \"Models trained.\"\n return training_output", "def load_data_from_fold(data_path):\r\n print(\"\\nLoading data from json folder {}\".format(data_path))\r\n\r\n SAMPLES_TO_CONSIDER = 22050\r\n\r\n data = preprocess_dataset(data_path, SAMPLES_TO_CONSIDER)\r\n\r\n X = np.array(data[\"MFCCs\"])\r\n y = np.array(data[\"labels\"])\r\n print(\"Training sets loaded!\")\r\n print(\"data size :\", X.shape, \"labels size: \", y.shape)\r\n print(\"release the 'data' for memories\")\r\n del data\r\n\r\n return X, y", "def load_data():\n f = gzip.open('../data/mnist.pkl.gz', mode='rb')\n\n # NOTE: I get errors when I don't use encoding='latin1' because of Python 2 vs Python 3 compatibility issues\n # training_data, validation_data, test_data = pickle.load(f, encoding='latin1')\n training_data, validation_data, test_data = pickle.load(f)\n\n f.close()\n\n return training_data, validation_data, test_data", "def load_testset(self, fn):\n w = codecs.open(fn, 'r', 'utf-8')\n data = w.read().split('\\n')[:-1]\n w.close()\n\n # split labels and sentences\n data = [i.split(':') for i in data]\n # reverse elements and connect subsentences in case of additional colons\n self.test_set = [(':'.join(z[1:]), z[0]) for z in data]\n return self.test_set", "def train():\n import training\n\n # Ensure output directories exist\n os.makedirs(os.path.dirname(cfg.scaler_path), exist_ok=True)\n os.makedirs(cfg.model_path, exist_ok=True)\n os.makedirs(cfg.log_path, exist_ok=True)\n\n # Load (standardized) input data and target values\n tr_x, tr_y, _ = _load_data(cfg.training_set, is_training=True)\n val_x, val_y, _ = _load_data(cfg.validation_set)\n\n # Try to create reproducible results\n np.random.seed(cfg.initial_seed)\n\n # Save free parameters to disk\n utils.log_parameters(cfg.training, os.path.join(cfg.model_path,\n 'parameters.json'))\n\n training.train(tr_x, tr_y, val_x, val_y)", "def load_data(directory: str, dataset_str: str):\n names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']\n objects = []\n for i in range(len(names)):\n with open(\"{}/ind.{}.{}\".format(directory, dataset_str, names[i]), 'rb') as f:\n if sys.version_info > (3, 0):\n objects.append(pkl.load(f, encoding='latin1'))\n else:\n objects.append(pkl.load(f))\n\n x, y, tx, ty, allx, ally, graph = tuple(objects)\n test_idx_reorder = parse_index_file(\"{}/ind.{}.test.index\".format(directory, dataset_str))\n test_idx_range = np.sort(test_idx_reorder)\n\n if dataset_str == 'citeseer':\n # Fix citeseer dataset (there are some isolated nodes in the graph)\n # Find isolated nodes, add them as zero-vecs into the right position\n test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))\n tx_extended[test_idx_range-min(test_idx_range), :] = tx\n tx = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))\n ty_extended[test_idx_range-min(test_idx_range), :] = ty\n ty = ty_extended\n\n features = sp.vstack((allx, tx)).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n\n idx_test = test_idx_range.tolist()\n idx_train = range(len(y))\n idx_val = range(len(y), len(y)+500)\n\n train_mask = sample_mask(idx_train, labels.shape[0])\n val_mask = sample_mask(idx_val, labels.shape[0])\n test_mask = sample_mask(idx_test, labels.shape[0])\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_mask, :] = labels[train_mask, :]\n y_val[val_mask, :] = labels[val_mask, :]\n y_test[test_mask, :] = labels[test_mask, :]\n\n return graph, features, y_train, y_val, y_test, train_mask, val_mask, test_mask", "def test_text_classifier_get_training_samples(self):\n pass", "def load_testset_predictions(all, weighted=False):\n if weighted:\n return load_data_from_CSV('results/testset_results_weighted_{}.csv'.format(all))\n else:\n return load_data_from_CSV('results/testset_results_{}.csv'.format(all))", "def load_data(self):\n params = self.params\n catg = params.data_category\n langs = ['en', params.target_lang]\n data = {lang: {splt: {} for splt in (['train', 'valid'] if lang == 'en' else ['test'])} for lang in langs}\n clf_dataset_path = {\n lang: {\n splt: {\n 'x': os.path.join(params.data_path, '%s_%s_%s_x.bpe.pth' % (splt, lang, catg)),\n 'y': os.path.join(params.data_path, '%s_%s_%s_y.txt' % (splt, lang, catg)),\n } for splt in (['train', 'valid'] if lang == 'en' else ['test'])\n } for lang in langs\n }\n for splt in ['train', 'valid', 'test']:\n for lang in langs:\n if lang == 'en' and splt in ['train', 'valid'] or lang != 'en' and splt == 'test':\n # load data and dictionary\n data1 = load_binarized(clf_dataset_path[lang][splt]['x'], params)\n data['dico'] = data.get('dico', data1['dico'])\n # set dictionary parameters\n set_dico_parameters(params, data, data1['dico'])\n # create dataset\n data[lang][splt]['x'] = Dataset(data1['sentences'], data1['positions'], params)\n # load labels\n with open(clf_dataset_path[lang][splt]['y'], 'r') as f:\n labels = [int(l) for l in f]\n data[lang][splt]['y'] = torch.LongTensor(labels)\n assert len(data[lang][splt]['x']) == len(data[lang][splt]['y'])\n\n return data", "def load_data(loc='../data/SICK/'):\n trainA, trainB, testA, testB = [],[],[],[]\n trainS, testS = [],[]\n\n with open(loc + 'SICK_train.txt', 'rb') as f:\n for line in f:\n text = line.strip().split('\\t')\n trainA.append(text[1])\n trainB.append(text[2])\n trainS.append(text[3])\n with open(loc + 'SICK_test_annotated.txt', 'rb') as f:\n for line in f:\n text = line.strip().split('\\t')\n testA.append(text[1])\n testB.append(text[2])\n testS.append(text[3])\n\n trainS = [float(s) for s in trainS[1:]]\n testS = [float(s) for s in testS[1:]]\n\n return [trainA[1:], trainB[1:]], [testA[1:], testB[1:]], [trainS, testS]", "def load_mnsit_training_set():\n try:\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n train_data = np.asarray(x_train, dtype=np.float32)\n eval_data = np.asarray(x_test, dtype=np.float32)\n train_labels = np.asarray(y_train, dtype=np.int32)\n eval_labels = np.asarray(y_test, dtype=np.int32)\n return train_data, eval_data, train_labels, eval_labels\n\n except Exception as error:\n raise EnvironmentError(\"load_mnsit_training_set: Exception loading MNSIT data: {0}\".format(error))", "def tokenize_all(train_json, test1_json):\n \n #print(\"\\nINFO: Tokenising captions.\\n\")\n tokenised_data = []\n # Train data\n for user_id, posts in tqdm(train_json.items(),\n ncols=100, desc='Tokenising train data'):\n for post_id, post in posts.items():\n img_id = '{}_@_{}'.format(user_id, post_id)\n temp_dict = dict(split='train',\n filepath=pjoin('images', img_id),\n image_id=img_id,\n raw=[post['caption']],\n tokens=[tokenize(post['caption'])])\n tokenised_data.append(temp_dict)\n \n # Validation data\n random.seed(4896)\n random.shuffle(tokenised_data)\n for i in range(2000):\n tokenised_data[i]['split'] = 'val'\n \n # Test1 data\n for user_id, posts in tqdm(test1_json.items(),\n ncols=100, desc='Tokenising test1 data'):\n for post_id, post in posts.items():\n img_id = '{}_@_{}'.format(user_id, post_id)\n temp_dict = dict(split='test',\n filepath=pjoin('images', img_id),\n image_id=img_id,\n raw=[post['caption']],\n tokens=[tokenize(post['caption'])])\n tokenised_data.append(temp_dict)\n return tokenised_data", "def load_training_data(file_path):\n return load_data(file_path)", "def readFiles(trainFile, testFile):\r\n\r\n\t# Open both files and split into lines\r\n\twith open(trainFile) as f:\r\n\t\ttrainLines = f.read().splitlines()\r\n\r\n\twith open(testFile) as f:\r\n\t\ttestLines = f.read().splitlines()\r\n\r\n\t\t\r\n\t# Extract training data\r\n\tfor line in trainLines:\r\n\t\tline = line.split()\r\n\t\t\r\n\t\tid = line[0]\r\n\t\tclass_id = line[1]\r\n\t\twords = line[2:]\r\n\t\t\r\n\t\trow = [id, class_id, words]\r\n\t\t\r\n\t\ttrainingData.append(row)\r\n\t\t\r\n\t\t\r\n\t# Extract testing data\r\n\tfor line in testLines:\r\n\t\tline = line.split()\r\n\t\t\r\n\t\tid = line[0]\r\n\t\tclass_id = line[1]\r\n\t\twords = line[2:]\r\n\t\t\r\n\t\trow = [id, class_id, words]\r\n\t\t\r\n\t\ttestData.append(row)", "def test_01_train(self):\n \n request_json = {'mode':'test'}\n r = requests.post('http://localhost:{}/train'.format(port),json=request_json)\n train_complete = re.sub(\"\\W+\",\"\",r.text)\n self.assertEqual(train_complete,'true')", "def run_train_test_split():\n # Load all documents\n conn = sq.connect(config.DB_FILE)\n documents = pd.read_sql_query('select pubmed_id, review_id, included, title, abstract from article ', conn)\n\n # Identify unique review IDs\n review_ids = documents['review_id'].unique()\n\n # Set seed for random sampling\n np.random.seed(2)\n\n # List of Reviews in the partial data set and full data set\n partial_set = list(np.random.choice(review_ids, 10, replace=False))\n full_set = list(review_ids.copy())\n\n # Load array (X) and labels (Y) of all documents\n with (open(config.DOC_TERM_MATRIX, \"rb\")) as openfile:\n X = pickle.load(openfile)\n\n y = documents['included']\n\n # Train-test split of the partial dataset\n train_test_split(X, y, partial_set, 'min_max', 'partial', review_ids)\n train_test_split(X, y, partial_set, 'tf_idf', 'partial', review_ids)\n\n # Train-test split of the full dataset\n train_test_split(X, y, full_set, 'min_max', 'full', review_ids)\n train_test_split(X, y, full_set, 'tf_idf', 'full', review_ids)", "def load_data():\r\n f = gzip.open('mnist.pkl.gz', 'rb')\r\n training_data, validation_data, test_data = pickle.load(f,encoding='bytes')\r\n f.close()\r\n return (training_data, validation_data, test_data)", "def load_test_dataset(self):\n test_data_path = \"testdata\"\n root = Path(test_data_path)\n classes = sorted([j.name.split('/')[-1] for j in root.iterdir()])\n print(classes)\n\n transform = transforms.Compose([\n transforms.Resize(300),\n transforms.RandomHorizontalFlip(),\n transforms.CenterCrop(250),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.6071, 0.4828, 0.3934], std=[0.2845, 0.3187, 0.3240])\n ])\n\n dataset = datasets.ImageFolder(test_data_path, transform=transform)\n testloader = DataLoader(dataset, batch_size=4, shuffle=True)\n print(\"Loaded data\")\n return testloader", "def model_train(data_dir,test=False):\r\n \r\n if not os.path.isdir(MODEL_DIR):\r\n os.mkdir(MODEL_DIR)\r\n\r\n if test:\r\n print(\"... test flag on\")\r\n print(\"...... subsetting data\")\r\n print(\"...... subsetting countries\")\r\n \r\n ## fetch time-series formatted data\r\n ts_data = fetch_ts(data_dir)\r\n\r\n ## train a different model for each data sets\r\n for country,df in ts_data.items():\r\n if test and country not in ['all','united_kingdom']:\r\n continue\r\n model_name = re.sub(\"\\.\",\"_\",str(MODEL_VERSION))\r\n saved_model = os.path.join(MODEL_DIR,\r\n \"sl-{}-{}.joblib\".format(country,model_name))\r\n saved_test_model = os.path.join(MODEL_DIR,\r\n \"test-{}-{}.joblib\".format(country,model_name))\r\n saved_baseline = os.path.join(BASELINE_DIR,\r\n \"b-sl-{}-{}.joblib\".format(country,model_name))\r\n saved_test_baseline = os.path.join(BASELINE_DIR,\r\n \"b-test-{}-{}.joblib\".format(country,model_name))\r\n if (test and (not os.path.isfile(saved_test_model))) or ((not test) and (not os.path.isfile(saved_model))):\r\n _model_train(df,country,test=test)\r\n if (test and (not os.path.isfile(saved_test_baseline))) or ((not test) and (not os.path.isfile(saved_baseline))):\r\n _baseline_train(df,country,test=test)", "def loadData ( self ) :\n df = pd.read_json ( self.dataset )\n df = df[pd.notnull ( df[2] )]\n df[1] = df[1].apply ( self.clean_text )\n\n self.X = df[1]\n self.y = df[2]", "def setUp(self):\n cwd = Path(__file__).parent.absolute()\n with open(f'{cwd}/test.json', 'r') as f:\n default = json.load(f)\n\n for data in default['results']:\n set_id = data.pop('set_id')\n products_data = data.pop('products')\n\n set_obj = Set.objects.create(id=set_id)\n\n spl_obj = set_obj.spls.create(**data)\n\n for product_data in products_data:\n product_data.pop('name')\n packages_data = product_data.pop('packages')\n if 'inactive_ingredients' in product_data:\n inactive_ingredients_data = product_data\\\n .pop('inactive_ingredients')\n\n inactive_ingredients_list = []\n for inactive_ingredient_data in inactive_ingredients_data:\n try:\n ingredient = InactiveIngredient.objects.get(\n **inactive_ingredient_data\n )\n inactive_ingredients_list.append(ingredient)\n except Exception:\n ingredient = InactiveIngredient.objects.create(\n **inactive_ingredient_data\n )\n inactive_ingredients_list.append(ingredient)\n\n product_obj = spl_obj.products.create(**product_data)\n product_obj.inactive_ingredients\\\n .add(*inactive_ingredients_list)\n\n for package_data in packages_data:\n product_obj.packages.create(**package_data)", "def mutual_data_loader(data_dir, tokenizer, max_doc_len, max_query_len, max_option_len, is_training):\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n if is_training:\n subset_list = ['train']\n else:\n subset_list = ['dev', 'test']\n\n examples, features = {}, {}\n for subset in subset_list:\n level_example_dict = {\"high\": None, \"middle\": None}\n level_features_dict = {\"high\": None, \"middle\": None}\n\n for level in ['high', 'middle']:\n subset_dir = os.path.join(data_dir, subset)\n file_list = os.listdir(subset_dir)\n file_list = [file for file in file_list if file.endswith('txt')]\n file_list = sorted(file_list)\n\n alldata = []\n for file in file_list:\n data = json.load(open(os.path.join(subset_dir, file)))\n alldata.append(data)\n \n examples_list = []\n for data in alldata:\n doc_id = data['id']\n doc = data[\"article\"].replace('\\\\n', '\\n')\n doc_token = []\n prev_is_whitespace = True\n for c in doc:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_token.append(c)\n else:\n doc_token[-1] += c\n prev_is_whitespace = False\n\n for i, answer in enumerate(data[\"answers\"]):\n if answer == \" \":\n answer = \"C\"\n example = InputExample(\n guid=doc_id + '-%d' % i,\n doc_token=doc_token,\n question_text=\"\",\n options=data[\"options\"],\n answer=answer)\n examples_list.append(example)\n\n level_example_dict[level] = examples_list\n level_features_dict[level] = convert_examples_to_features(\n level_example_dict[level], tokenizer, max_doc_len, max_query_len, max_option_len)\n \n print(\"MuTuaL Data Loader....\")\n print(level_example_dict[level][-1])\n print(level_features_dict[level][-1])\n examples[subset] = level_example_dict\n features[subset] = level_features_dict\n\n return examples, features", "def load_training_data(list_files):\n training_data = []\n for tr_file in list_files:\n with open(os.path.join(\"data\", tr_file)) as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n next(reader)\n for row in reader:\n training_data.append(row[1])\n return training_data", "def train(self, training_data, cfg, **kwargs):\n pass", "def load_all_script(train_rating_path,test_negative_path, test_num=100):\n\ttrain_data = pd.read_csv(\n\t\ttrain_rating_path, \n\t\tsep='\\t', header=None, names=['user', 'item'], \n\t\tusecols=[0, 1], dtype={0: np.int32, 1: np.int32})\n\n\tuser_num = train_data['user'].max() + 1\n\titem_num = train_data['item'].max() + 1\n\n\ttrain_data = train_data.values.tolist()\n\n\t# load ratings as a dok matrix\n\ttrain_mat = sp.dok_matrix((user_num, item_num), dtype=np.float32)\n\tfor x in train_data:\n\t\ttrain_mat[x[0], x[1]] = 1.0\n\n\ttest_data = []\n\twith open(test_negative_path, 'r') as fd:\n\t\tline = fd.readline()\n\t\twhile line != None and line != '':\n\t\t\tarr = line.split('\\t')\n\t\t\tu = eval(arr[0])[0]\n\t\t\ttest_data.append([u, eval(arr[0])[1]])\n\t\t\tfor i in arr[1:]:\n\t\t\t\ttest_data.append([u, int(i)])\n\t\t\tline = fd.readline()\n\treturn train_data, test_data, user_num, item_num, train_mat", "def load_eval_datasets(cfg):\n # Temporarily change dataset type to be frame_by_frame\n cur_dataset_type = cfg.dataset_type\n if cfg.dataset_type == 'graph_net':\n cfg.dataset_type = 'single_frame_graph_net'\n else:\n cfg.dataset_type = 'frame_by_frame'\n\n # Get the evaluation (frame by frame) datasets\n train_set, val_set, test_set = get_split_datasets(cfg.dataset)\n\n # Restore dataset type\n cfg.dataset_type = cur_dataset_type\n return train_set, val_set, test_set", "def prepare_train(self) -> Tuple[ZLIMGS, ZLIMGS, ZLIMGS, ZLIMGS]:\n\n if self.setting == 'setting1':\n warnings.warn(\"Please note that Setting 1 should not use train eval dataset! \"\n \"Because its training set only contain normal samples!\")\n\n with open(self.json_path) as fp:\n ids_json = json.load(fp)\n ids_train_normal = ids_json['normal']['train']\n ids_train_defect = ids_json['defect']['train']\n\n # train\n zlimgs_train_normal = self._create_zl_imgs_given_ids(ids=ids_train_normal,\n subset=CONFIG[self.setting]['normal_train'],\n ann_type=CONFIG[self.setting]['ann_train'])\n zlimgs_train_defect = self._create_zl_imgs_given_ids(ids=ids_train_defect,\n subset=CONFIG[self.setting]['defect_train'],\n ann_type=CONFIG[self.setting]['ann_train'])\n\n # train eval\n zlimgs_train_eval_normal = self._create_zl_imgs_given_ids(ids=ids_train_normal,\n subset=CONFIG[self.setting]['normal_train'],\n ann_type=CONFIG[self.setting]['ann_eval'])\n zlimgs_train_eval_defect = self._create_zl_imgs_given_ids(ids=ids_train_defect,\n subset=CONFIG[self.setting]['defect_train'],\n ann_type=CONFIG[self.setting]['ann_eval'])\n\n return zlimgs_train_normal, zlimgs_train_defect, zlimgs_train_eval_normal, zlimgs_train_eval_defect", "def __init__(self, directory, trainset_size = 3000):\n self.directory = directory\n self.trainset_size = trainset_size\n self.trainset = []\n self.testset = []\n self.parse()", "def train(self, train_loader):\n pass", "def load_data():\n f = gzip.open('../data/mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = cPickle.load(f)\n f.close()\n return (training_data, validation_data, test_data)", "def load_model(cls, src_path, update_dict=None, steps=None):\n\n if steps is not None:\n json_file, _ = cls.get_file_via_steps(src_path, steps, 'json', STEPS_REGEX)\n hdf5_file, samples_seen = cls.get_file_via_steps(src_path, steps, 'hdf5',\n STEPS_REGEX)\n\n\n else:\n json_file = max(glob.iglob(os.path.join(src_path, '*.json')),\n key=os.path.getctime)\n hdf5_file = max(glob.iglob(os.path.join(src_path, '*.hdf5')),\n key=os.path.getctime)\n\n samples_seen = cls.get_pattern(hdf5_file, STEPS_REGEX)\n samples_seen = samples_seen if samples_seen is not None else 0\n\n session_number = cls.get_pattern(hdf5_file, SESS_REGEX)\n session_number = session_number if session_number is not None else 1\n\n params_dict = data_functions.load_json(json_file)\n\n params_dict['pretrained_weights'] = hdf5_file\n\n #TODO: try to rearange loading weights\n # if 'weights' in os.path.basename(hdf5_file):\n # params_dict['pretrained_weights'] = hdf5_file\n # else:\n # params_dict['checkpoint'] = hdf5_file\n\n params_dict['train_time'] = os.path.basename(src_path)\n if update_dict is not None:\n if 'pretrained_weights' or 'checkpoint' in update_dict:\n params_dict['pretrained_weights'] = None\n params_dict['checkpoint'] = None\n params_dict.update(update_dict)\n\n model = ClarifruitUnet(**params_dict)\n logger.info(f\"continuing training from {os.path.basename(hdf5_file)}\")\n\n setattr(model, 'samples_seen', samples_seen)\n setattr(model, 'params_filepath', json_file)\n setattr(model, 'session_number', session_number)\n\n return model", "def setup():\n for dir_path in [train_dir, output_dir]:\n Path(dir_path).mkdir(exist_ok=True)\n\n # create the training and test data files that we will use\n create_jsonlines_feature_files(train_dir)", "def setUpClass(cls):\n cls.test_file_1 = \"/tmp/test_data_loader_dummy_1.pkl\"\n cls.test_file_2 = \"/tmp/test_data_loader_dummy_2.pkl\"\n cls.in_cols = [\"file\", \"id\", \"len\", \"seq\", \"phyche\", \"pssm\", \"logits\",\n \"ss\", \"h_0\", \"h_1\", \"h_2\", \"lm_logits\"]\n cls.out_cols = [\"dataset\", \"id\", \"len\", \"position\", \"amino\",\n \"phyche\", \"pssm\", \"logits\", \"ss\", \"h_0\", \"h_1\", \"h_2\",\n \"lm_logits\"]\n\n seq = np.array([[0., 0., 1.],\n [1., 0., 0.]])\n phyche = np.array([[0., 0.], # phyche\n [1., 0.]])\n pssm = np.array([[0., 0., .8], # pssm\n [.8, 0., 0.]])\n logits = np.array([[0.1, 0., 0.9], # logits\n [0.9, 0., 0.1]])\n ss = np.array([[0., 0., 1.], # ss\n [1., 0., 0.]])\n h_0 = np.array([[0., 0., 1., 0.],\n [1., 0., 0., 0.]])\n h_1 = np.array([[0., 0., 1., 0.],\n [1., 0., 0., 0.]])\n h_2 = np.array([[0., 0., 1., 0.], # h_2\n [1., 0., 0., 0.]])\n lm_logits = np.array([[0., 0., 1.], # lm_logits\n [1., 0., 0.]])\n\n ex_1_in = (\"dummy_train.tfrecords\", # file\n \"id1\", # id\n 2, # len\n seq,\n phyche,\n pssm,\n logits,\n ss,\n h_0,\n h_1,\n h_2,\n lm_logits,\n )\n ex_1_out = [tuple([\"train\", ex_1_in[1], ex_1_in[2], j] + [ex_1_in[i][j, :] for i in range(3, len(ex_1_in))]) for j in range(2)]\n\n in_df = pd.DataFrame.from_records(data=[ex_1_in], columns=cls.in_cols)\n # write to file\n in_df.to_pickle(cls.test_file_1)\n\n cls.out_df = pd.DataFrame.from_records(data=ex_1_out, columns=cls.out_cols)" ]
[ "0.74476916", "0.7354245", "0.7169759", "0.6705753", "0.6644966", "0.66320914", "0.6515724", "0.64992905", "0.6398464", "0.63905", "0.62948185", "0.6278425", "0.6241316", "0.62365764", "0.62238926", "0.6203974", "0.6202843", "0.6183581", "0.6180504", "0.6172601", "0.6147451", "0.61436445", "0.6129822", "0.6123778", "0.6120625", "0.612054", "0.61153877", "0.60899556", "0.6065108", "0.605758", "0.60557586", "0.60523736", "0.6029742", "0.6027786", "0.6021974", "0.6008683", "0.599363", "0.59832215", "0.5970852", "0.59689456", "0.5965797", "0.59641606", "0.5948683", "0.5935881", "0.5929288", "0.5920906", "0.59167683", "0.5915398", "0.5904575", "0.58993423", "0.58979684", "0.5896147", "0.58774686", "0.587631", "0.5875967", "0.5867287", "0.5862506", "0.5858521", "0.58556336", "0.5851725", "0.5850531", "0.5848444", "0.5828492", "0.58058375", "0.5803724", "0.57960564", "0.5795704", "0.57922155", "0.57911915", "0.57863355", "0.5784954", "0.5778761", "0.57772493", "0.57653546", "0.57642055", "0.5760668", "0.5757329", "0.5755607", "0.5752071", "0.57483643", "0.5748078", "0.57474905", "0.5744625", "0.5733318", "0.5731488", "0.57307774", "0.57271075", "0.572455", "0.5723551", "0.5723451", "0.5723442", "0.57199025", "0.5719876", "0.57173455", "0.5716125", "0.571483", "0.57124406", "0.5705926", "0.5700542", "0.5700146" ]
0.6676436
4
Sends a message to all listeners of the topic
def _send(self, topic, message): body = {'message': encode(message)} result = requests.post('{0}/topics/{1}'.format(self.apiUrl, topic), json=body) return result.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def listen(self, topics):\n logging.debug(f'Listen to {list(map(lambda x: x.name, topics))}')\n\n for topic in map(lambda x: x.name, topics):\n try:\n self.subscribe(topic)\n logging.debug(f'Subscribed the {topic} topic')\n except Exception:\n logging.debug(f\"Can't subscribe the {topic} topic\")", "def send_to_all(self, msg: str) -> None:\n print(f'{self._name} sends a public message: {msg}')\n self._group.publish_message(sender=self, msg=msg)", "def notify(self) -> None:\n for s in self.subscribers:\n s()", "def send_all(self, msg):\n self.update_chats()\n for c in self.chats:\n self.send_message(msg, c)", "def publish(self, topic: Hashable, *args, **kwargs):\n for sub in self.subscribers[topic]:\n sub(*args, **kwargs)", "def send_messages(self):\n if self.messages:\n messages, self.messages = self.messages, []\n self.mpub(\"events.%s\" % config.pool, messages)", "def notify_message_listeners(self, name, msg):\n\n # handle the message specific listeners\n for fn in self._message_listeners.get(name, []):\n try:\n fn(self, name, msg)\n except Exception as e:\n i = 1\n #print(\"[CONNECTION ERROR] unable to handle message listener for \" + name)\n #print(e)\n\n # handle the listeners that are registered for all messages\n for fn in self._message_listeners.get('*', []):\n try:\n fn(self, name, msg)\n except Exception as e:\n i = 1\n #print(\"[CONNECTION ERROR] unable to handle * message listener for \" + name)\n #print(e)", "def listener(messages):\n for m in messages:\n chatid = m.chat.id\n print(str(chatid))\n if m.content_type == 'text':\n text = m.text\n tb.send_message(chatid, text)", "def notifyObservers(self, topic, value):\n for observer in self.observers:\n observer.notify(topic, value)", "def sendToAllSubscribers(self, message, subject):\n\n for destination in self.subscribers:\n if(self.log):\n logging.info(\"Sending \" + message + \" to \" + destination)\n\n self.sendEmail(destination, message, subject)", "def inform_listeners(self):\n d = self.get_all_sorted()\n for listener in self.listeners:\n listener.stream_updated(d)", "def run(self, topic: str):\n while self.events:\n wait = self.get_wait_time()\n self.logger.debug('sleeping for %s seconds', wait)\n time.sleep(wait)\n\n event = self.events.pop(0)\n self.send(topic, event)\\\n .add_callback(self.on_send_success, event=event)\\\n .add_errback(self.on_send_failure, event=event)\n\n self.flush()", "def event_publish(self, cmd):\n for sub in self.subscribers:\n sub.event_receive(cmd)", "async def publish(self):\n for sock in self.subscribers:\n sock.send_json(self.main_server.state)\n await asyncio.sleep(0)", "def send_to_leaders(self, msg: str) -> None:\n print(f'{self._name} sends a message to all the leaders: {msg}')\n self._group.publish_message_to_leaders(sender=self, msg=msg)", "def broadcast(msg):\r\n for user in clients:\r\n msg_client(msg, user)", "def sendMessage_0(self, messages):\n for message in messages:\n self.sendMessage(message)", "def send_message(self, topic_name, message):\n self.topics[topic_name].append(message)", "def _notify_all(self, event_data):\n for subs in self._subscribers:\n subs.notify(event_data)", "def broadcast_message(msg: str):\r\n\tfor ip in _clients.keys():\r\n\t\tsend_message(ip, msg)", "def publish(self, message, topic=''):\n if type(message) != types.ListType:\n message = [message]\n if topic:\n message = [topic] + message\n self.send(message)", "def send_to_all(self, message: Message):\n\t\tto_send = self.registry.get_user(message.sender) + \": \" + message.body\n\n\t\tfor ip in self.registry.ip():\n\t\t\tself.send(to_send, ip)", "def broadcast(self, msg):\n for client in self.clients.values():\n send_data(client.socket, msg)", "def __sendToAll(self, message, author = None):\n\n self.messageCallback(message)\n\n for user in self.__connections:\n\n if not user is author:\n try: \n user[\"connection\"].send((message + self.separator).encode())\n except: \n user[\"online\"] = False", "async def notify_users():\n if not app['websockets']:\n return\n\n message = build_user_message()\n await wait([user.send(message) for user in app['websockets']])", "def on_next(self, msg):\n # publish the message to the topics\n retain = msg.retain if hasattr(msg, 'retain') else False\n for (topic, qos) in self.topics:\n self.client.publish(topic, msg, qos, retain)", "def _send_to_all_rooms(self, message):\r\n for room in self._rooms.values():\r\n room.send_message(message)", "def notify_all(self, event: GameEvent):\n for listener in self._listeners:\n listener.notify(event)", "def message_listener(self, topic, timeout):\n \"\"\"\n demo_message = [\n {'user_id': 'Lazy Man', 'timestamp': '2019-10-06T22:59:59.989Z', 'risk_level': 3}\n ]\n\n for message in demo_message:\n yield ERROR_CODE_ZERO, \"\", message\n \"\"\"\n\n while True:\n for error_code, error_message, message in self._consumer.subscribe(topic, timeout):\n yield error_code, error_message, message\n if error_code == 1:\n break", "def publish_list(self, messages: list) -> None:", "def publish_list(self, messages: list) -> None:", "def listen(agent, config):\n base_topic = config['base_topic']\n short_topics = ['cpu_percent', 'memory_percent', 'disk_percent']\n topics = [base_topic + '/' + x for x in short_topics]\n seen_topics = set()\n\n def add_topic(peer, sender, bus, topic, headers, messages):\n seen_topics.add(topic)\n\n agent.vip.pubsub.subscribe('pubsub', base_topic,\n callback=add_topic)\n\n max_wait = 1 + max([value for key, value in _test_config.items()\n if key.endswith('_interval')])\n\n all_topics_seen = lambda: set(topics) <= seen_topics\n\n assert poll_gevent_sleep(max_wait, all_topics_seen)", "def requestTopics(self):\n self.port.flushInput()\n # request topic sync\n self.port.write(\"\\xff\\xff\\x00\\x00\\x00\\x00\\xff\")", "def send_messages(self, bot, update, messages):\n\n for msg in messages:\n self.send_message(bot, update, msg)", "async def _async_send_to_all_devices(self, message):\n for device in self.devices:\n await device.set(message)", "def _listen_to_queues(cls):\n queues = cls.get_service_queues()\n for queue in queues:\n queue.consume(cls.process_messages)", "def notify_all_consumers(subscription, data):\n for consumer in get_consumers(subscription):\n notify_consumer(\n subscription=subscription,\n subscription_id=consumer.subscription_id,\n data=data)", "def on_message(\n self, client: mqtt.Client, userdata: typing.Any, msg: mqtt.MQTTMessage\n ) -> None:\n self.msgs.append(msg)", "def broadcast(self, message):\r\n for c in self.characters:\r\n c.notify(message)", "def subscribe(self, topic):\n self.topic = topic\n self.client.on_connect = self.on_connect\n self.client.on_message = self.on_message\n\n self.client.loop_start()", "def broadcast(msg):\n\n for sock in clients:\n sock.send(bytes(msg, \"utf-8\"))", "def _subscribe_to_peers(self):\n if not self.config['PEERS']:\n return\n context = zmq.Context()\n socket = context.socket(zmq.SUB)\n socket.setsockopt(zmq.SUBSCRIBE, '')\n\n for ip, pub_port, api_port in self.config['PEERS']:\n if not self._is_self(ip, pub_port):\n address = '%s:%s' % (ip, pub_port)\n self.logger.debug('Subscribing to peer at: %s' % address)\n socket.connect('tcp://%s' % address)\n\n def new_msg_handler(sender, msg=None):\n topic, delimiter, packed = msg.partition(' ')\n topic = int(topic)\n message_dict = msgpack.unpackb(packed)\n #self.logger.debug('News for topic %s:%s arrived' %\n # (topic, constants.topics.get(topic)))\n self._handle_topic(topic, message_dict)\n\n sig = signal(constants.NEW_MESSAGE_TOPIC)\n sig.connect(new_msg_handler, weak=False)\n\n while True:\n msg = socket.recv()\n sig.send(self, msg=msg)\n gevent.sleep(.1)", "def subscribe(self, channels: typing.Iterable, listener: types.MethodType):\n raise TypeError(\"{} - subscribe not implemented!\")", "def send( value, data, listeners ):\n if len( listeners ):\n # get the first item on the list\n listener = listeners.pop( 0 )\n # test to see if the first item in the list is a list\n if ( ( type( listener ) == type( \"\" ) ) and\n ( len( listener ) > 0 ) ):\n pub.sendMessage( listener, value=value, data=data, listeners=listeners )\n elif ( type( listener ) == type( [] ) ):\n Common.send( value, data, listener )\n Common.send( value, data, listeners )", "def _on_received(self):\n self._call_subscribers(on_level=0xFF)\n publish_topic(self._on_subscriber_topic, on_level=0xFF)", "def _on_received(self):\n self._call_subscribers(on_level=0xFF)\n publish_topic(self._on_subscriber_topic, on_level=0xFF)", "def on_connect():\n # There is now a connection\n subscribe_to_topic(\"pir\",\"Trigger\")", "def notify_consumers(subscription, subscription_ids, data):\n\n for id in subscription_ids:\n notify_consumer(\n subscription=subscription,\n subscription_id=id,\n data=data)", "async def admin_msg(self, message):\n for channel in self.admin_channels.values():\n if channel:\n await channel.send(message)", "def sendSensors(self,sensors):\n self.broadcaster.sendSensors(sensors)", "def starup(self, sender, **kwargs):\n self._initialize_devices()\n for device_topic in device_topic_dict:\n _log.debug('Subscribing to ' + device_topic)\n self.vip.pubsub.subscribe(peer='pubsub',\n prefix=device_topic,\n callback=self.on_analysis_message)", "def send_all(self,\n message: bytes\n ) -> None:\n\n self.log_to_debug(\n line=f\"Send_All: {message}\"\n )\n for user_key in self.connected_users:\n\n if self.connected_users[user_key]['authorized']:\n protocol = self.connected_users[user_key]['main']['base'].transport.protocol\n protocol.sendLine(\n line=message\n )", "def notify_all(self, private_key, message):\n return self._samp_hub.notifyAll(private_key, message)", "def run(self):\n\t\tfor item in self.pubSub.listen():\n\t\t\tself.processItem(item)", "def listen(self, handler):\n try:\n logger.info('Listening on topic: {}'.format(self.topic))\n consumer = KafkaConsumer(self.topic)\n\n for msg in consumer:\n object_dict = self._extract_updated_object(msg)\n if object_dict:\n handler(object_dict)\n\n except Exception as ex:\n if isinstance(ex, KafkaError):\n logger.error('Error with Kafka: {}'.format(ex))", "def _update_listeners(self, trajectory, tasks):\n for listener in self.listeners:\n listener.log(trajectory, tasks)", "async def messages(self, ctx):\n\n\t\tawait self.message_leaderboard(ctx, \"messages\")", "def on_connect(client, interface, flags, rc):\n logger.info(\"Connected with result code \" + str(rc))\n for i in Const.sub_topics:\n client.subscribe(i)\n logger.info(\"Successfully subscribed to \" + i)", "async def subscribe(topics, on_close=None, on_error=None):\n loop = asyncio.get_event_loop()\n session = aiohttp.ClientSession()\n async with session.ws_connect('wss://api.huobi.pro/ws') as ws:\n keys = {\n topic: uuid.uuid4().hex\n for topic in topics\n }\n keyed_channels = {\n v: topics[k]\n for k, v in keys.items()\n }\n subscribed_chanels = {}\n for topic, config in topics.items():\n payload = {\n 'sub': topic,\n 'id': keys[topic]\n }\n await ws.send_str(encode_ws_payload(payload))\n async for msg in ws:\n if msg.type == aiohttp.WSMsgType.BINARY:\n data = decode_ws_payload(msg.data)\n\n ping = data.get('ping')\n if ping:\n reply = encode_ws_payload({'pong': ping})\n await ws.send_str(\n reply\n )\n\n subbed = data.get('subbed')\n if subbed:\n if data.get('status') == 'ok':\n subscribed_chanels[subbed] = keyed_channels[data['id']]\n\n ch = data.get('ch')\n if ch:\n cb = subscribed_chanels[ch].get('callback', lambda _: None)\n if asyncio.iscoroutinefunction(cb):\n await cb(data)\n else:\n loop.run_in_executor(None, partial(cb, data))\n elif msg.type == aiohttp.WSMsgType.CLOSED:\n if on_close:\n return on_close()\n return\n elif msg.type == aiohttp.WSMsgType.ERROR:\n if on_error:\n return on_error()\n return", "def broadcast(self, txt):\n for chan in self.state['joinedchannels']:\n self.say(chan, txt)", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\r\n for sock in clients:\r\n sock.send(bytes(prefix, \"utf8\")+msg)", "def _publish(self, messages):\n num_of_msg = len(messages)\n\n LOG.debug('Publishing %d messages', num_of_msg)\n\n first = True\n while True:\n try:\n for topic in self._topics:\n self._kafka_publisher.publish(\n topic,\n messages\n )\n LOG.debug('Sent %d messages to topic %s',\n num_of_msg, topic)\n break\n except FailedPayloadsError as ex:\n # FailedPayloadsError exception can be cause by connection\n # problem, to make sure that is not connection issue\n # message is sent again.\n LOG.error('Failed to send messages %s', ex)\n if first:\n LOG.error('Retrying')\n first = False\n continue\n else:\n raise falcon.HTTPServiceUnavailable('Service unavailable',\n str(ex), 60)\n except Exception as ex:\n LOG.error('Failed to send messages %s', ex)\n raise falcon.HTTPServiceUnavailable('Service unavailable',\n str(ex), 60)", "def notify_all(self, message):\n return self.hub.notify_all(self.get_private_key(), message)", "def send(self, topic, msg):\n out = \"%s %s\" % (topic, msg)\n self.topics[topic].send(bytes(out, 'utf-8'))", "def send_all(self, data, sender=None):\n for client in self.clients:\n if client == sender:\n continue\n client.send(data)", "def on_message(client, userdata, message): \n print(\"Topic: \" + message.topic + \" Message: \" + message.payload.decode('utf-8'))", "def on_message(self, name):\n\n def decorator(fn):\n if isinstance(name, list):\n for n in name:\n self.add_message_listener(n, fn)\n else:\n self.add_message_listener(name, fn)\n\n return decorator", "async def notify(event):\n for subscriber in syncsubscribers:\n subscriber(event)\n for subscriber in asyncsubscribers:\n await subscriber(event)", "def message_subscribers(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_sptr_message_subscribers(self, *args, **kwargs)", "def collector_process_data(self, data):\n for c in clients:\n c.on_message(json.dumps(data))", "def add_topic_handlers(self):\n self.client.message_callback_add(deployment_topic, self.on_deployment_topic)\n self.client.message_callback_add(illumination_topic, self.on_illumination_topic)", "def call_subscribers(self, *args, **kwargs) -> None:\n for subscriber in self.get_subscribers():\n subscriber(*args, **kwargs)", "async def _send_messages(self, message: str) -> None:\n for chat_id in self.chat_ids_list:\n await self._send_request_to_api(message, chat_id)", "async def broadcast(self):\n with await self.redis as connection:\n await connection.execute_pubsub(\"subscribe\", self.channel)\n try:\n while True:\n room = await self.channel.get(encoding=\"utf-8\")\n await self.ws.send(message)\n except websockets.ConnectionClosed as e:\n print(f\"<ChatManager:broadcast>[error] {e}\")\n await self.connection_closed()", "def send_all(self, data):\n\n for client in self.clients:\n try:\n client.send(data)\n except Exception:\n self.clients.remove(client)", "def publish_message_to_leaders(self, sender, msg: str) -> None:\n for leader in self._leaders:\n if leader is not sender: # The sender himself should not receive the message.\n leader.receive_message(sender_name=sender.name, msg=msg)", "def Talk(self, topic, message):\n Send(self.channel, topic, message)", "def test_subscribe_many_listeners(self):\n def listener():\n pass\n\n def listener1():\n pass\n\n def listener2():\n pass\n\n EVENT_MANAGER.subscribe('test_listeners', listener, listener1, listener2)\n\n self.assertIn(listener, EVENT_MANAGER._listeners['test_listeners'])\n self.assertIn(listener1, EVENT_MANAGER._listeners['test_listeners'])\n self.assertIn(listener2, EVENT_MANAGER._listeners['test_listeners'])", "def received(self, msg, msgID):\r\n for protocol in self._protocols:\r\n protocol.sendMessage(self, msg, msgID)", "def _on_mqtt_message(\n self, client: mqtt.Client, userdata: str, message: mqtt.MQTTMessage\n ) -> None:\n self.log.debug(f\"Received message on topic: {message.topic}\")\n self.inbound_message_listener(Message(message.topic, message.payload))", "def on_message(client, userdata, msg):\n print(msg.topic + \" \" + str(msg.payload))\n send_command(str(msg.payload.decode(\"utf-8\")))", "def send(cls,event):\n for rcv in list(cls.getReceivers(event.sender)):\n if event.consumed:\n break\n rcv(event)", "def send_notification_to_all_guides(sender, thesis, message):\n\n for thesisGuide in ThesisGuide.objects.filter(thesis = thesis):\n receiver = User.objects.get(username = thesisGuide.guide.user.username)\n send_notification(sender, receiver, message, '')", "def publish_message(self, sender, msg: str) -> None:\n for member in self._all_members.values():\n if member is not sender: # The sender himself should not receive the message.\n member.receive_message(sender_name=sender.name, msg=msg)", "def on_messages(self, msg_list):\n self.stats.on_pack_recv(len(msg_list))\n\n for msg in msg_list:\n if self.state == OPEN:\n self.conn.on_message(msg)", "def listen(self):\n self._client.listen(self._default_subscribe_to_dest)", "def broadcast(mensagem, prefixo = \"\"):\n for sock in clients:\n sock.send(bytes(prefixo, \"utf8\") + mensagem)", "def send_events(sock):\n i=0\n while i<10:\n log.info('Sending message from publisher..')\n sock.send(\"even - hai i am publisher\")\n time.sleep(0.2)\n i += 1", "def sendToAll(a):\n for x in participantes:\n con = x['socket']\n con.send(str.encode(a))", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\n\n for sock in clients:\n sock.send(bytes(prefix, \"utf8\")+msg)", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\r\n\r\n for sock in clients:\r\n sock.send(bytes(prefix, \"utf8\") + msg)", "def subscribe(self, topic):\n\t\tself.topic=topic\n\t\tself.client.subscribe(self.topic)", "async def send_message(self, msg: 'Message', **kwargs):\n\n if self._next_targets:\n for pod_address in self._next_targets:\n self._send_message(msg, pod_address)\n else:\n routing_table = RoutingTable(msg.envelope.routing_table)\n next_targets = routing_table.get_next_targets()\n for target, _ in next_targets:\n self._send_message(\n self._add_envelope(msg, target),\n target.active_target_pod.full_address,\n )", "def send_heartbeat_messages(self):\n while True:\n for node in self.directory.get_current_list():\n heartbeat_message = MessageFactory.generate_heartbeat_message(\n origin_node=self.socket_manager.node,\n destination_node=node\n )\n self.socket_manager.send_message(heartbeat_message)\n time.sleep(3)", "def _onMessage(self, client:mqtt.Client, userdata:Any, message:mqtt.MQTTMessage) -> None:\n\t\tself.lowLevelLogging and self.messageHandler and self.messageHandler.logging(self, logging.DEBUG, f'MQTT: received topic:{message.topic}, payload:{message.payload}')\n\t\tfor t in self.subscribedTopics.keys():\n\t\t\tif simpleMatch(message.topic, t, star='#'):\n\t\t\t\tif (topic := self.subscribedTopics[t]).callback:\n\t\t\t\t\t# Run actual request handling in a thread\n\t\t\t\t\t# For some reasons mid is not initialized in the on on_message callback, so we use the timestamp for the actor name\n\t\t\t\t\tBackgroundWorkerPool.newActor(topic.callback, name=f'mid_{message.timestamp}').start(\tconnection=self,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttopic=message.topic,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdata=message.payload, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t**topic.callbackArgs)\n\t\t\t\t\tbreak\t# break at first occurence", "def send_public_message(self, userMessage):\n for client in self.clients:\n self.send_message(userMessage, client.get_socket())", "def broadcast(self, message):\n for s in self.connections:\n s.send(bytes(message, encoding='utf-8'))", "def send_messages(messages):\n while messages:\n msg = messages.pop()\n sent_messages.append(msg)", "def on_message(client, userdata, message):\n logger.info(\"Message received: topic [{}]\\nbody [{}]\".format(message.topic, str(\n message.payload.decode(\"utf-8\"))))", "def broadcast(data):\n for client in CLIENTS:\n client.write_message(data)", "def dispatch(self, event: str, message: str) -> None:\n\t\tfor subscriber, callback in self.get_subscribers(event).items():\n\t\t\tcallback(event, message)" ]
[ "0.6755926", "0.647603", "0.6460375", "0.6401803", "0.6373115", "0.6307153", "0.6262211", "0.6215271", "0.613345", "0.6130038", "0.6114487", "0.60955477", "0.6017219", "0.600299", "0.5941551", "0.588395", "0.58836484", "0.5870817", "0.58618623", "0.5843349", "0.5838989", "0.5817864", "0.58113", "0.5787331", "0.5772329", "0.5751711", "0.57459825", "0.5706031", "0.5698133", "0.5662031", "0.5662031", "0.56606567", "0.5648821", "0.56435245", "0.5630794", "0.56119174", "0.5610013", "0.55989826", "0.55669093", "0.5565172", "0.5561955", "0.5560153", "0.5535353", "0.5529557", "0.5508898", "0.5508898", "0.5508526", "0.5494756", "0.5491418", "0.54897803", "0.5472541", "0.54678136", "0.5467231", "0.5451853", "0.5437374", "0.5436112", "0.54177576", "0.5392238", "0.53918743", "0.5390816", "0.53870916", "0.5375169", "0.53635406", "0.5346318", "0.5335866", "0.5331589", "0.5325713", "0.5320441", "0.5317303", "0.5311926", "0.53102005", "0.5310042", "0.53025174", "0.5295838", "0.528896", "0.52859396", "0.5280252", "0.5277763", "0.5269302", "0.5267123", "0.52608466", "0.5251385", "0.5249093", "0.5240641", "0.52362376", "0.52310103", "0.52267104", "0.520689", "0.52064466", "0.5195746", "0.51912755", "0.5190238", "0.5178848", "0.51780504", "0.5174306", "0.5167929", "0.5161406", "0.5153495", "0.5149821", "0.5145707", "0.51439565" ]
0.0
-1
Starts listening for new messages on this topic
def _listen_on(self, topic, transform=None): task = self.topic_task_map.get(topic) if task is None: task = StreamingRequestTask(self.apiUrl, topic, transform) self.topic_task_map[topic] = task task.start() listener = task.create_listener(transform) self.listener_task_map[listener] = task return listener
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def listen(self):\n self.channel.start_consuming()", "def listen(self):\n self._client.listen(self._default_subscribe_to_dest)", "def listen(self):\n\n # It's ideal to start listening before the game starts, but the\n # down-side\n # is that object construction may not be done yet. Here we pause\n # shortly\n # to let initialization finish, so all functionality (e.g. self.log)\n # is\n # available.\n time.sleep(0.1)\n\n for st in self.sentences():\n if st:\n self.onMessage(source=None, message=st)", "def startReceiving(self):\n self.listening = True\n self.start()", "def run(self):\n self.listen(self.input_topics.filter_by(transmission='tcp'))\n\n logging.info('Getting into the listening loop')\n self.running = True\n while self.running:\n self.loop()", "def start(self):\n if self._chan is not None:\n try:\n self._chan.start_consume()\n except ChannelError:\n log.info('Subscriber is already started')\n\n else:\n self.gl = spawn(self.listen)", "def subscribe(self, topic):\n self.topic = topic\n self.client.on_connect = self.on_connect\n self.client.on_message = self.on_message\n\n self.client.loop_start()", "def listen(self, topics):\n logging.debug(f'Listen to {list(map(lambda x: x.name, topics))}')\n\n for topic in map(lambda x: x.name, topics):\n try:\n self.subscribe(topic)\n logging.debug(f'Subscribed the {topic} topic')\n except Exception:\n logging.debug(f\"Can't subscribe the {topic} topic\")", "def start_listening(self):\n assert not self.listening\n assert not self.connected\n ctx = zmq.Context.instance()\n self._recv_socket = ctx.socket(zmq.SUB)\n self._recv_poller = zmq.Poller()\n self._recv_socket.setsockopt(zmq.SUBSCRIBE, b\"\")\n self._recv_poller.register(self._recv_socket, zmq.POLLIN)\n for i in range(N):\n if i != self.ID:\n address = NODE_INFOS[i].address\n port = NODE_INFOS[i].port\n self._recv_socket.connect(f\"tcp://{address}:{port}\")\n self.listening = True", "def listen(self):\n pass", "def start(self):\n self.listener.start()\n # No need to start broadcaster, it just sends when necessary", "def listen(self):\n\n\t\tprint(\"Connected to the room\")\n\n\t\t#: Watch for messages coming from the server.\n\t\twhile self.joined:\n\n\t\t\t#: Wait for a message to be recieved from the server.\n\t\t\ttry:\n\t\t\t\t#: Store a most recent message for testing purposes.\n\t\t\t\tself.most_recent_message = self.client.recv(1024).decode()\n\t\t\t\tself.messages.append(self.most_recent_message)\n\t\t\texcept OSError:\n\t\t\t\tprint(\"Connection to the server has been lost.\")\n\n\t\t\t\t#: Quit from the server to do cleanup.\n\t\t\t\tself.quit(False)", "def listen(self):\n self.can_listen = True\n threading.Thread(target=self._listen).start()", "def start(self):\n self._listener.start()", "def start(self):\n\n def pubsub_thread():\n \"\"\" Call get_message in loop to fire _handler. \"\"\"\n\n while not self._stop.is_set():\n self._pubsub.get_message()\n sleep(0.01)\n\n # subscribe to personal channel and fire up the message handler\n self._pubsub.subscribe(**{'actor:%s' % self.uuid: self._handler})\n self._proc = Thread(target=pubsub_thread)\n self._proc.daemon = True\n self._proc.start()", "def listen(self, handler):\n try:\n logger.info('Listening on topic: {}'.format(self.topic))\n consumer = KafkaConsumer(self.topic)\n\n for msg in consumer:\n object_dict = self._extract_updated_object(msg)\n if object_dict:\n handler(object_dict)\n\n except Exception as ex:\n if isinstance(ex, KafkaError):\n logger.error('Error with Kafka: {}'.format(ex))", "def listen(self) -> None:\n raise NotImplementedError", "def listen(self):\n raise NotImplementedError()", "def listen(agent, config):\n base_topic = config['base_topic']\n short_topics = ['cpu_percent', 'memory_percent', 'disk_percent']\n topics = [base_topic + '/' + x for x in short_topics]\n seen_topics = set()\n\n def add_topic(peer, sender, bus, topic, headers, messages):\n seen_topics.add(topic)\n\n agent.vip.pubsub.subscribe('pubsub', base_topic,\n callback=add_topic)\n\n max_wait = 1 + max([value for key, value in _test_config.items()\n if key.endswith('_interval')])\n\n all_topics_seen = lambda: set(topics) <= seen_topics\n\n assert poll_gevent_sleep(max_wait, all_topics_seen)", "def start(self):\n self.kb_client.subscribe(self.kb_ID, {\"_data\": {\"tag\": TAG_ANSWER, \"text\": \"$input\", \"timestamp\": \"$time\", \"language\": \"$lang\"}}, self.add_emotion) # from the 'gnlp' module", "def start(self):\n\n # Start listening for records\n self._run_loop(True)\n # There might still be records in the queue.\n self._run_loop(False)", "def topic(cls, **kwargs) -> str:\n return \"hermes/asr/startListening\"", "def listen():\n msg = MSG()\n ctypes.windll.user32.GetMessageA(ctypes.byref(msg), 0, 0, 0)", "def listen():\n msg = MSG()\n ctypes.windll.user32.GetMessageA(ctypes.byref(msg), 0, 0, 0)", "def _start_listeners(self):\n if self.listeners:\n self.state = \"listening\"\n for event_listener in self.listeners:\n event_listener.start()\n\n for listener in self.listeners:\n listener.join()", "def redis_chat_messages_listener(redis_server, redis_new_chat_messages):\n logging.info(\"Spun up a redis chat message listener.\")\n while True:\n raw = redis_new_chat_messages.next()\n msg = (ChatMessage(**json.loads(raw['data'])))\n ## just hook into our existing way for now\n ## a bit redundant but allows server to be run without redis\n logging.info(\"new chat message subscribed to: %s\" % raw['data'])\n ## add o our local buffer to push to clients\n chat_channel = get_chat_channel(redis_server, msg.channel_name)\n chat_channel.list_add_chat_message(msg)", "def subscribe(self):\n fd = libplasma.subscribe(self.conn)\n self.notification_sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)\n # Make the socket non-blocking.\n self.notification_sock.setblocking(0)", "def _on_received(self):\n self._call_subscribers(on_level=0xFF)\n publish_topic(self._on_subscriber_topic, on_level=0xFF)", "def _on_received(self):\n self._call_subscribers(on_level=0xFF)\n publish_topic(self._on_subscriber_topic, on_level=0xFF)", "def _subscribe_to_peers(self):\n if not self.config['PEERS']:\n return\n context = zmq.Context()\n socket = context.socket(zmq.SUB)\n socket.setsockopt(zmq.SUBSCRIBE, '')\n\n for ip, pub_port, api_port in self.config['PEERS']:\n if not self._is_self(ip, pub_port):\n address = '%s:%s' % (ip, pub_port)\n self.logger.debug('Subscribing to peer at: %s' % address)\n socket.connect('tcp://%s' % address)\n\n def new_msg_handler(sender, msg=None):\n topic, delimiter, packed = msg.partition(' ')\n topic = int(topic)\n message_dict = msgpack.unpackb(packed)\n #self.logger.debug('News for topic %s:%s arrived' %\n # (topic, constants.topics.get(topic)))\n self._handle_topic(topic, message_dict)\n\n sig = signal(constants.NEW_MESSAGE_TOPIC)\n sig.connect(new_msg_handler, weak=False)\n\n while True:\n msg = socket.recv()\n sig.send(self, msg=msg)\n gevent.sleep(.1)", "def listen(self):\n while self.active:\n self.handle_input()", "def run():\n listen_active_email_channel()", "def listen(self):\n logging.info(\"Start listening\")\n try:\n for entry in self.socket.poll_socket():\n self.socket.send(True)\n self.send_evaluation(entry)\n except KeyboardInterrupt:\n logging.info(\"Stop listening\")", "def listen_new(self, f):\n self._coms.register_starting_callback(f)", "def listen(self):\n self.init_delete_batch_processing()\n self.init_file_batch_processing()\n self.init_symlink_batch_processing()\n\n self.loop.create_task(self.start_watching_roots())\n\n self.revisit_cond = asyncio.Condition()\n self.loop.create_task(self.start_polling_revisits())\n\n self.start_polling_changes()\n self.loop.run_forever()\n self.stop_polling_changes()", "def startListening(self):\n \n self.listener_thread = threading.Thread(target=self.listening, daemon=True)\n self.listener_thread.start()\n\n # stateupdate = threading.Thread(target=self.showStatus, daemon=True)\n # stateupdate.start()\n\n # Main App Loop (Keeps the Client opened)\n while self.listener_thread.is_alive():\n time.sleep(1)\n else:\n print('Shutting Main Thread-1')\n sys.exit()", "def start_listening(self, handler) -> None:\n assert not self.client_recv\n self.client_recv = handler\n self.scheduler.start()", "def on_connect():\n # There is now a connection\n subscribe_to_topic(\"pir\",\"Trigger\")", "def _listen(self):\n if not self.is_connected:\n self.connect()\n\n while True:\n data = self.recv()\n ping = PING_RE.match(data)\n if ping:\n self.handle_ping(ping.group(1))\n else:\n result = self.handle_message(data)\n\n if result:\n print(result)\n\n time.sleep(1)", "def run(self):\n t = Thread(target=self._listen)\n t.start()", "def start_listener(self):\n if not self.listener:\n #self.listener = threading.Thread(target=self.tn.listener)\n self.listener = threading.Thread(target=self.listener_handler)\n self.listener.start()", "def starup(self, sender, **kwargs):\n self._initialize_devices()\n for device_topic in device_topic_dict:\n _log.debug('Subscribing to ' + device_topic)\n self.vip.pubsub.subscribe(peer='pubsub',\n prefix=device_topic,\n callback=self.on_analysis_message)", "def Listen(self, callback):\n if self.service_account_info_json_file:\n with open(self.service_account_info_json_file, encoding='utf-8') as f:\n service_account_info = json.load(f)\n audience = 'https://pubsub.googleapis.com/google.pubsub.v1.Subscriber'\n credentials = auth.jwt.Credentials.from_service_account_info(\n service_account_info, audience=audience)\n else:\n print('[INFO]\\tNo service account. Using application default credentials')\n # pylint: disable=unused-variable\n credentials, project_id = auth.default()\n\n sub_client = pubsub_v1.SubscriberClient(credentials=credentials)\n future = sub_client.subscribe(self.subscription_name, callback)\n print('[INFO]\\tListening to pub/sub topic. Please wait.')\n # KeyboardInterrupt does not always cause `result` to exit early, so we\n # give the thread a chance to handle that within a reasonable amount of\n # time by repeatedly calling `result` with a short timeout.\n while True:\n try:\n future.result(timeout=5)\n except futures.TimeoutError:\n continue\n except (futures.CancelledError, KeyboardInterrupt):\n future.cancel()\n except Exception as ex: # pylint: disable=broad-except\n print(f'[ERROR]\\tPub/sub subscription failed with error: {ex}')\n future.cancel()\n break", "def enable_subscription():\n client = KConsumer(config=subscriber_config)\n counter = 0\n while 1:\n data = client.consume()\n if data:\n print(\"Received Data\", counter)\n class_label = inference_on_data(data.value)\n publish_response(class_label)", "def subscribe(self, topic):\n\t\tself.topic=topic\n\t\tself.client.subscribe(self.topic)", "def start(self):\n # Start listening for messages\n self.connect_to_presentation()\n\n # Start the heartbeat\n self.heartbeat_thread.start()", "def run(self):\n print('starting up on {} port {}'.format(*self.listener_address))\n self.selector.register(self.listener, selectors.EVENT_READ)\n\n # Serialize our listener's host and port\n serializedAdd = fxp_bytes_subscriber.serialize_address(\n self.listener_address[0], self.listener_address[1])\n\n # Contact with Publisher\n self.listener.sendto(serializedAdd, self.gcd_address)\n\n while True:\n events = self.selector.select(CHECK_INTERVAL)\n for key, mask in events:\n data = self.receive_message()\n self.removeOldQuote()\n self.createGraph(data)\n self.arbitrage()\n self.checkTimeout()", "def _on_connect(self, client, userdata, flags, rc):\n self.subscribe(self.topic)", "def __check_for_messages(self):\n\n # Wait for at least poll_interval sec\n polling_interval = self.conf.messaging_server.polling_interval\n time.sleep(polling_interval)\n if self.conf.messaging_server.debug:\n LOG.debug(\"Topic {}: Checking for new messages\".format(\n self.target.topic))\n self._do()\n return True", "def listen(self):\n self.processor_thread = Thread(target = self.event_loop, name=\"InputThread-\"+str(self.thread_index), args=(self.thread_index, ))\n self.thread_index += 1\n self.processor_thread.daemon = True\n self.processor_thread.start()", "def listen(self):\n print \"starting server thread with address \" + str(self.address)\n server_thread = ServerThread(self.address, self.response_queue, self.queue_lock, self.on_message_received)\n server_thread.start()\n self.connected_as_server = True # TODO only if successful", "def subscribe(self):\n if not self._subscribed and self._connected:\n if ATTR_STREAM_ID not in self.data:\n msg = self._create_message(strings.SUB_MSG)\n self.write(msg)\n else:\n msg = self._create_message(strings.RESUB_MSG)\n self.write(msg)\n self._subscribed = True", "def listen(client, main):\n\n @client.event\n async def on_message_edit(old, message):\n main.message_handler(message, True)", "def listen(self):\n result = self.channel.queue_declare(queue=self.config['queue'], \n exclusive=True)\n if self.endpoints is not None:\n for key in self.endpoints:\n self.channel.queue_bind(exchange=self.config['exchange'], \n queue=self.config['queue'],\n routing_key=f\"sensor_value.{key}\")\n else:\n self.channel.queue_bind(exchange=self.config['exchange'],\n queue=self.config['queue'],\n routing_key=\"sensor_value.#\")\n \n self.channel.basic_consume(queue=self.config['queue'], \n on_message_callback=self.decode_values, \n auto_ack=True)\n\n # starts a while-type loop\n print(\"wabbit eatin hay\")\n self.channel.start_consuming()", "def listen(self):\n self.socket.listen(6)", "def start(self) -> None:\n conn_manager = ConnectionManager(broker_host=self.broker_host, queue=self.queue)\n channel = conn_manager.start_channel()\n channel.basic_consume(queue=self.queue, on_message_callback=self.callback)\n\n try:\n print(\"PV Simulator...\")\n channel.start_consuming()\n except KeyboardInterrupt:\n pass", "def subscribe( self, topic ):\n logging.info( \"Subscribing to topic %s\" %topic )\n try:\n self.client.subscribe( topic )\n except Exception as error:\n print( error )", "def _on_mqtt_message(\n self, client: mqtt.Client, userdata: str, message: mqtt.MQTTMessage\n ) -> None:\n self.log.debug(f\"Received message on topic: {message.topic}\")\n self.inbound_message_listener(Message(message.topic, message.payload))", "def listen(self):\n rospy.init_node('opendr_SyntheticDataGeneration', anonymous=True)\n rospy.loginfo(\"SyntheticDataGeneration node started!\")\n rospy.spin()", "def on_next(self, msg):\n # publish the message to the topics\n retain = msg.retain if hasattr(msg, 'retain') else False\n for (topic, qos) in self.topics:\n self.client.publish(topic, msg, qos, retain)", "def start_listener():\n listener = keyboard.Listener(\n on_press=on_press\n )\n listener.start()", "def do_start(self):\n threading.Thread(group = None, \n target = self._subscribe_message, name = \"RabbitMQSubscribeThread\") .start()\n threading.Thread(group = None, \n target = self._publish_message, name = \"RabbitMQPublishThread\").start()", "def listen(self, prefix):\n self.app.route(prefix)(self._on_interest)\n logging.info(f'Read handle: listening to {Name.to_str(prefix)}')", "def activate(self):\n self.socket.listen(self.request_queue_size)", "async def start(self) -> None:\n while self.producer is None:\n try:\n self.producer = self._producer_factory(\n bootstrap_servers=self.bootstrap_servers,\n ssl_cafile=self.ssl_cafile,\n ssl_certfile=self.ssl_certfile,\n ssl_keyfile=self.ssl_keyfile,\n security_protocol='SSL',\n value_serializer=lambda v: json.dumps(v).encode('utf-8'),\n )\n except kafka.errors.NoBrokersAvailable:\n await trio.sleep(self.connect_interval_secs)\n else:\n logger.info('kafka-ready: %s', self.producer)\n async with self.has_producer:\n self.has_producer.notify_all()", "def setup_stream_listener(self):\n listener = Listener()\n listener.set_callback(self.mq.producer.publish)\n self.stream = tweepy.Stream(\n self.config.get('twitter', 'userid'),\n self.config.get('twitter', 'password'),\n listener,\n timeout=3600\n )", "def Listen(self):\n while True:\n time.sleep(1)", "def startListener(self):\n self.send_conn = None\n self.recv_conn = None\n listener = threading.Thread(target=self.listen, args=(self.recv_conn,))\n sender = threading.Thread(target=self.send, args=(self.send_conn,))\n listener.daemon = True # setting daemon to true means threads wont stop program from closing\n sender.daemon = True\n listener.start()\n sender.start()", "def start_consuming(self):\n logger.info('Issuing consumer related RPC commands')\n self.add_on_cancel_callback()\n logger.info(\"[{}] Waiting for messages on exchange {}\".format(self.bot_id, self.exchange))\n self._consumer_tag = self._channel.basic_consume(self.on_message,\n self.queue_name)", "def watch_for_discovery_messages(self):\n while True:\n message = self.socket_manager.get_discovery_message()\n if message.disconnect == \"1\":\n self.handle_disconnect(message)\n elif message.direction == \"0\":\n self.respond_to_discovery_message(message)\n elif message.direction == \"1\":\n serialized_directory = message.get_payload()\n self.directory.merge_serialized_directory(serialized_directory)\n self.initiate_rtt_calculation()", "def on_message(\n self, client: mqtt.Client, userdata: typing.Any, msg: mqtt.MQTTMessage\n ) -> None:\n self.msgs.append(msg)", "def start(self):\n l.debug(\"Initializing the MQTT connection...\")\n self._mqtt_client.connect(self.domain, self.port, keepalive=30)\n\n # Starts a new thread that handles mqtt protocol and calls us back via callbacks\n l.debug(\"(Re)Starting the MQTT loop.\")\n self._mqtt_client.loop_stop(True)\n self._mqtt_client.loop_start()\n self.connect_event.wait()\n\n # Subscribe to the corresponding topics ...\n self.device_topic = build_device_request_topic(self.target_device_uuid)\n self.client_response_topic = build_client_response_topic(self.user_id, self._app_id)\n self.user_topic = build_client_user_topic(self.user_id)\n\n l.info(f\"Subscribing to topic: {self.device_topic}\")\n self._mqtt_client.subscribe(self.device_topic)\n self.subscribe_event.wait()\n self.subscribe_event.clear()\n\n l.info(f\"Subscribing to topic: {self.client_response_topic}\")\n self._mqtt_client.subscribe(self.client_response_topic)\n self.subscribe_event.wait()\n self.subscribe_event.clear()\n\n l.info(f\"Subscribing to topic: {self.user_topic}\")\n self._mqtt_client.subscribe(self.user_topic)\n self.subscribe_event.wait()\n self.subscribe_event.clear()", "def on_connect(self):\n log.info(\"Stream connected\")", "def subscribe(receiver, updateInterval=10):", "def start_streaming(self, hashtag):\n\n\t\tstream = TwitterStreamListener(hashtag, self._kafka_host)\n\t\ttwitter_stream = tweepy.Stream(auth = self._api.auth, listener=stream)\n\t\ttwitter_stream.filter(track=[hashtag], async=True)\n\t\tself._streams.append(twitter_stream)\n\t\tlog.debug(\"stream connected %s\" % (hashtag))", "def run(self):\n self.logger.info(\"Starting messenger.\")\n self.recv()", "def create_listen_thread(self):\n self.listen_thread = threading.Thread(target=self.listen, daemon=True)\n self.listen_thread.start()\n print('Started listener thread')", "def receive_messages(project, subscription_name):\n subscriber = pubsub_v1.SubscriberClient()\n subscription_path = subscriber.subscription_path(\n project, subscription_name)\n\n def callback(message):\n loaded_data = json.loads(message.data.decode('utf-8'))\n\n insert_to_table(loaded_data)\n message.ack()\n\n subscriber.subscribe(subscription_path, callback=callback)\n\n # The subscriber is non-blocking, so we must keep the main thread from\n # exiting to allow it to process messages in the background.\n print('Listening for messages on {}'.format(subscription_path))\n while True:\n time.sleep(60)", "def run(self, topic: str):\n while self.events:\n wait = self.get_wait_time()\n self.logger.debug('sleeping for %s seconds', wait)\n time.sleep(wait)\n\n event = self.events.pop(0)\n self.send(topic, event)\\\n .add_callback(self.on_send_success, event=event)\\\n .add_errback(self.on_send_failure, event=event)\n\n self.flush()", "def start_speaking(self):\n self.allowed_to_chat = True\n self.chat_message_queue.clear()\n self.chat_thread = threading.Thread(target=self._process_chat_queue,\n kwargs={'chat_queue': self.chat_message_queue})\n self.chat_thread.daemon = True\n self.chat_thread.start()", "def start(self):\n if not self._connected:\n self._client.connect(self._addr, port=self._port, keepalive=60, bind_address=\"\")\n self._client.loop_start()\n self._connected = True\n logger.info(\"Connection with MQTT Broker at %s:%d estabilished.\", self._addr, self._port)", "def listen(self, *a, **kw):\n raise NotImplementedError()", "def listen(self):\n while self.active:\n Quartz.CFRunLoopRunInMode(\n Quartz.kCFRunLoopDefaultMode, 5, False)", "def run(self):\n self.logger.info(\"starting Dashi consumer\")\n while not self.shutdown:\n self.rpc.listen()", "def start(self, event):\n self.send_presence()\n self.get_roster()\n self.send_message(mto=self.recipient, mbody=self.msg, mtype='chat')\n self.disconnect(wait=True)", "def _listen_to_queues(cls):\n queues = cls.get_service_queues()\n for queue in queues:\n queue.consume(cls.process_messages)", "def listen(self):\n try:\n self._listen()\n except KeyboardInterrupt:\n self.sock.close()\n self.is_connected = False", "def message_listener(self, topic, timeout):\n \"\"\"\n demo_message = [\n {'user_id': 'Lazy Man', 'timestamp': '2019-10-06T22:59:59.989Z', 'risk_level': 3}\n ]\n\n for message in demo_message:\n yield ERROR_CODE_ZERO, \"\", message\n \"\"\"\n\n while True:\n for error_code, error_message, message in self._consumer.subscribe(topic, timeout):\n yield error_code, error_message, message\n if error_code == 1:\n break", "def listen(self):\n\n\t\twhile self.running:\n\t\t\t#Wait for server to inform you there is data\n\t\t\tself.rxEvt.wait()\n\t\t\t\n\t\t\ttry:\n\t\t\t\t#See if recieved packet is actually latest from client\n\t\t\t\tif self.rxData[len(self.rxData)-1][0] >= self.rxLatest:\n\n\t\t\t\t\t#Update latest and pass data to data handler\n\t\t\t\t\tself.rxLatest = self.rxData[len(self.rxData)-1][0]\n\t\t\t\t\tself.handleRecvData(self.rxData[len(self.rxData)-1][1])\n\t\t\n\t\t\t\t\t#Clear event object so other clientHandlers begin waiting again\n\t\t\t\t\tself.rxEvt.clear()\n\n\t\t\texcept IndexError, e:\n\t\t\t\tprint(\"Index error on ServerClient listen\\nCarrying on Regardless\")", "def listen_connections(self):\n self.MAIN_CONNECTION.listen(server.MAX_CONNECTIONS)", "def run(self):\n\t\tfor item in self.pubSub.listen():\n\t\t\tself.processItem(item)", "def start(self):\n while True:\n ident = self.reply_socket.recv()\n assert self.reply_socket.rcvmore(), \"Missing message part.\"\n msg = self.reply_socket.recv_json()\n omsg = Message(msg)\n print>>sys.__stdout__\n print>>sys.__stdout__, omsg\n handler = self.handlers.get(omsg.msg_type, None)\n if handler is None:\n print >> sys.__stderr__, \"UNKNOWN MESSAGE TYPE:\", omsg\n else:\n handler(ident, omsg)", "def start_listener(self, input_topic, output_topic, publish_topic):\n try:\n # Create publisher to publish the speed measure\n self._publisher = self.node.create_publisher(\n Int64,\n publish_topic,\n qos_profile=QoSProfile(depth=1)\n )\n\n # Get type of the input topic and subscribe to it\n input_topic_type = get_msg_class(self.node, input_topic,\n blocking=True)\n self._sub_input_topic = self.node.create_subscription(\n input_topic_type,\n input_topic,\n self.input_topic_callback,\n qos_profile=QoSProfile(depth=1)\n )\n\n # Get type of the output topic and subscribe to it\n output_topic_type = get_msg_class(self.node, output_topic,\n blocking=True)\n self._sub_output_topic = self.node.create_subscription(\n output_topic_type,\n output_topic,\n self.output_topic_callback,\n qos_profile=QoSProfile(depth=1)\n )\n except Exception as e:\n error(self.node, \"%s\" % str(e))\n return False\n\n return True", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "def start(self):\n super(EngineService, self).start()\n\n self.target = oslo_messaging.Target(server=self.service_id,\n topic=self.topic,\n version=self.version)\n\n self.server = messaging.get_rpc_server(self.target, self)\n self.server.start()", "def start(self):\n\n self.socket.bind((self.ip, self.port))\n self.socket.listen(self.listenNumber)\n self.printLine()\n print(\"start for listening \")", "def subscribe(receiver, updateInterval=None):", "def start_consuming(self):\n\n for queue in self._handlers.keys():\n self._consumer_tags += self._channel.basic_consume(self.on_message,\n queue=queue)" ]
[ "0.7356364", "0.70570236", "0.7030769", "0.7006789", "0.68794745", "0.6866184", "0.685871", "0.6839579", "0.6831507", "0.67023295", "0.6645298", "0.6638821", "0.6618311", "0.6550533", "0.65046924", "0.64672434", "0.6445343", "0.6389064", "0.63380456", "0.63122493", "0.6299043", "0.6284375", "0.626755", "0.626755", "0.61987686", "0.6197557", "0.618711", "0.616824", "0.616824", "0.6166112", "0.6163818", "0.614461", "0.6133508", "0.6121505", "0.60978955", "0.60921586", "0.6088251", "0.6069001", "0.6050036", "0.6047387", "0.60290486", "0.6026172", "0.6022405", "0.6011154", "0.60066086", "0.5986273", "0.5975829", "0.5968845", "0.5965077", "0.59594125", "0.5959336", "0.59470296", "0.5897169", "0.5884013", "0.5879599", "0.5879035", "0.58638245", "0.5860623", "0.5839528", "0.5835602", "0.57996374", "0.5797826", "0.57862794", "0.57846946", "0.57527566", "0.5749406", "0.5733656", "0.5727042", "0.5717482", "0.5713986", "0.5703115", "0.5701979", "0.57009494", "0.5697543", "0.5685832", "0.5675225", "0.5659958", "0.5649887", "0.5649819", "0.5642871", "0.56396437", "0.5633676", "0.56324047", "0.5620034", "0.5619212", "0.5617684", "0.56167686", "0.56069875", "0.5605938", "0.55992234", "0.55982596", "0.5595752", "0.55944186", "0.55781585", "0.55781585", "0.55781585", "0.5566304", "0.5564996", "0.55593187", "0.55592436" ]
0.644124
17
Sends a message to all listeners of the special topic broadcast
def broadcast(self, message): self._send('broadcast', message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def broadcast(self, msg):\n for client in self.clients.values():\n send_data(client.socket, msg)", "def broadcast(msg):\r\n for user in clients:\r\n msg_client(msg, user)", "def broadcast(msg):\n\n for sock in clients:\n sock.send(bytes(msg, \"utf-8\"))", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\r\n for sock in clients:\r\n sock.send(bytes(prefix, \"utf8\")+msg)", "def broadcast_message(msg: str):\r\n\tfor ip in _clients.keys():\r\n\t\tsend_message(ip, msg)", "def send_to_all(self, msg: str) -> None:\n print(f'{self._name} sends a public message: {msg}')\n self._group.publish_message(sender=self, msg=msg)", "def broadcast(self, message):\r\n for c in self.characters:\r\n c.notify(message)", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\n\n for sock in clients:\n sock.send(bytes(prefix, \"utf8\")+msg)", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\r\n\r\n for sock in clients:\r\n sock.send(bytes(prefix, \"utf8\") + msg)", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\n\tfor sock in clients:\n\t\tsock.send(bytes(prefix, \"utf8\")+msg)", "def broadcast(mensagem, prefixo = \"\"):\n for sock in clients:\n sock.send(bytes(prefixo, \"utf8\") + mensagem)", "def broadcast(self, clients, msg):\n self.server.broadcast(clients, msg)", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\n\n for sock in clients:\n sock.send(bytes(prefix) + msg)", "def broadcast(data):\n for client in CLIENTS:\n client.write_message(data)", "def broadcast(self, txt):\n for chan in self.state['joinedchannels']:\n self.say(chan, txt)", "def broadcast_event(self, name, sender, *args, **kwargs):\n for addon in self.connection_bridges[sender]:\n addon.receive_event(sender=sender, name=name, *args, **kwargs)", "def broadcast(self, message):\n for s in self.connections:\n s.send(bytes(message, encoding='utf-8'))", "def send_messages(self):\n if self.messages:\n messages, self.messages = self.messages, []\n self.mpub(\"events.%s\" % config.pool, messages)", "def send_all(self, msg):\n self.update_chats()\n for c in self.chats:\n self.send_message(msg, c)", "def broadcast_event(self, event, data):\n for ctx in self.manager.all():\n self.pool.notify((ctx.on_event, (event, data), EMPTY_DICT))", "def broadcast(self, txt):\n\n for i in self.bots:\n i.broadcast(txt)", "def broadcast_to_all(self,header,msg, exclude = []):\n for key in self.connections.keys():\n if key not in exclude:\n self.send(header,key,msg)", "def broadcast(msg, prefix=\"\",ChatRoom=None): # prefix is for name identification. \n if not ChatRoom == None :\n for sock,name in ChatRooms[ChatRoom]:\n sock.send(bytes(prefix, \"utf8\")+msg)", "def broadcast(message):\n for client in CLIENTS:\n client.send(message)", "def sendSensors(self,sensors):\n self.broadcaster.sendSensors(sensors)", "def test_broadcast_message(self):\n\n typhoonae.websocket.broadcast_message('My broadcast message.')", "def broadcast(self,msg, UDP=False):\n if DEBUG: print \"class GlabPythonManager, function: broadcast\"\n if DEBUG and len(msg) < 10000: print \"class GlabPythonManager, function: broadcast\"\n \n if UDP: \n self.multicast.protocol.send(msg)\n return\n \n for key, connection in self.connection_manager.default_websocket_connections.iteritems():\n try:\n pass\n self.connection_manager.send(msg,connection)\n except AttributeError:\n if DEBUG: print \"Error: Failed to send broadcast\"\n pass \n \n '''\n for key, peer_server in self.connection_manager.peer_servers.iteritems():\n if not peer_server.ip == '10.1.1.112':\n continue\n try:\n self.connection_manager.send(msg,peer_server)\n except AttributeError:\n if DEBUG: print \"Error: Failed to send broadcast\"\n pass\n '''\n \n \n for key, connection in self.listener.openConnections.iteritems():\n continue\n try:\n if DEBUG: print \"broadcasting to the protocol:\", connection.ConnectionUID\n connection.transport.write(msg)\n except AttributeError:\n if DEBUG: print \"Error: Failed to send broadcast\"\n pass\n \n \n #for client in self.wsfactory.openConnections.keys():\n #self.wsfactory.openConnections[client].sendMessage(messagestring)", "def event_publish(self, cmd):\n for sub in self.subscribers:\n sub.event_receive(cmd)", "def discoveryBroadcast(self):\n interfaces = netifaces.interfaces()\n for interface in interfaces:\n addrlist = netifaces.ifaddresses(interface)[netifaces.AF_INET]\n for addr in addrlist:\n if \"addr\" in addr and \"broadcast\" in addr:\n self.discoverysocket.sendto(str.encode(json.dumps({\"ip\": addr[\"addr\"], \"port\": self.tcpPort, \"host\": socket.gethostname()})), (addr[\"broadcast\"], 31338))", "def listen(self, topics):\n logging.debug(f'Listen to {list(map(lambda x: x.name, topics))}')\n\n for topic in map(lambda x: x.name, topics):\n try:\n self.subscribe(topic)\n logging.debug(f'Subscribed the {topic} topic')\n except Exception:\n logging.debug(f\"Can't subscribe the {topic} topic\")", "def broadcast(self, msg, mtype = 'message', back = True):\n for p in DixitConnection.participants:\n if back or (DixitConnection.participants[p] != self):\n DixitConnection.participants[p].emit(mtype, msg)", "def broadcast(self, txt):\n\n for i in self.state['joinedchannels']:\n self.say(i, txt, speed=-1)", "def notify(self) -> None:\n for s in self.subscribers:\n s()", "def _notify_all(self, event_data):\n for subs in self._subscribers:\n subs.notify(event_data)", "def broadcast():\n # global receiving_message\n # if not receiving_message:\n router.broadcast(clients.copy(), json.dumps(current_state))", "async def broadcast(self):\n with await self.redis as connection:\n await connection.execute_pubsub(\"subscribe\", self.channel)\n try:\n while True:\n room = await self.channel.get(encoding=\"utf-8\")\n await self.ws.send(message)\n except websockets.ConnectionClosed as e:\n print(f\"<ChatManager:broadcast>[error] {e}\")\n await self.connection_closed()", "def bcast(self, msg):\n for k, v in self.peers.iteritems():\n proto = v[2]\n proto.send_obj(msg)", "def listener(messages):\n for m in messages:\n chatid = m.chat.id\n print(str(chatid))\n if m.content_type == 'text':\n text = m.text\n tb.send_message(chatid, text)", "def starup(self, sender, **kwargs):\n self._initialize_devices()\n for device_topic in device_topic_dict:\n _log.debug('Subscribing to ' + device_topic)\n self.vip.pubsub.subscribe(peer='pubsub',\n prefix=device_topic,\n callback=self.on_analysis_message)", "def send_to_all(self, message: Message):\n\t\tto_send = self.registry.get_user(message.sender) + \": \" + message.body\n\n\t\tfor ip in self.registry.ip():\n\t\t\tself.send(to_send, ip)", "def sendToAll(a):\n for x in participantes:\n con = x['socket']\n con.send(str.encode(a))", "def broadcast(self, msg):\n asyncio.run_coroutine_threadsafe(self.coro.broadcast(msg), self._robot._event_loop)", "def notify_message_listeners(self, name, msg):\n\n # handle the message specific listeners\n for fn in self._message_listeners.get(name, []):\n try:\n fn(self, name, msg)\n except Exception as e:\n i = 1\n #print(\"[CONNECTION ERROR] unable to handle message listener for \" + name)\n #print(e)\n\n # handle the listeners that are registered for all messages\n for fn in self._message_listeners.get('*', []):\n try:\n fn(self, name, msg)\n except Exception as e:\n i = 1\n #print(\"[CONNECTION ERROR] unable to handle * message listener for \" + name)\n #print(e)", "def publish(self, topic: Hashable, *args, **kwargs):\n for sub in self.subscribers[topic]:\n sub(*args, **kwargs)", "def flash_broadcast(self,params):\n text = params['text']\n if self.participant:\n self.service.sendParticipants(self.name,'msg',{\"text\":text,\"sender\":self.name})\n else:\n self.notLoggedIn()", "def sendToAllSubscribers(self, message, subject):\n\n for destination in self.subscribers:\n if(self.log):\n logging.info(\"Sending \" + message + \" to \" + destination)\n\n self.sendEmail(destination, message, subject)", "def broadcast(client, msg):\n for client_target in CLIENT_LIST:\n if client_target != client:\n client_target.send(msg)", "def broadcast(self, message: str) -> int:\n\t\tsubs_notified_so_far = []\n\t\treceiver_count = 0\n\t\tfor event in self.events:\n\t\t\tfor subscriber, callback in self.get_subscribers(event).items():\n\t\t\t\tif subscriber not in subs_notified_so_far:\n\t\t\t\t\tcallback(\"broadcast\", message)\n\t\t\t\t\tsubs_notified_so_far.append(subscriber)\n\t\t\t\t\treceiver_count += 1\n\t\treturn receiver_count", "def send_notification (event):\n Publisher.sendMessage (event)", "def broadcast(self,message_type,message):\n for socket in self.connections:\n if socket != self.server_socket:\n self.sendToSocket(socket,message_type,message)", "async def notify_users():\n if not app['websockets']:\n return\n\n message = build_user_message()\n await wait([user.send(message) for user in app['websockets']])", "def on_broadcast_tasks(self, evt, proto) -> None:\n self.log.debug(\"accepted an event from the local console:\\n\\tfunction: {}\\n\\tquery: {}\\n\\targs: {}\",\n evt.fun, evt.tgt, evt.arg)\n clientlist = self.peer_registry.get_targets(query=evt.tgt)\n offline_clientlist = self.peer_registry.get_offline_targets() if evt.offline else []\n\n msg = sugar.transport.ServerMsgFactory.create_console_msg()\n if clientlist or offline_clientlist:\n evt.jid = self.jobstore.new(query=evt.tgt, clientslist=clientlist + offline_clientlist,\n uri=evt.fun, args=json.dumps(evt.arg),\n job_type=\"runner\")\n for target in clientlist:\n threads.deferToThread(self.fire_event, event=evt, target=target)\n self.log.debug(\"Created a new job: '{}' for {} online and {} offline machines\",\n evt.jid, len(clientlist), len(offline_clientlist))\n msg.ret.msg_template = \"Targeted {} machines. JID: {}\"\n msg.ret.msg_args = [len(clientlist + offline_clientlist), evt.jid]\n else:\n self.log.error(\"No targets found for function '{}' on query '{}'.\", evt.fun, evt.tgt)\n msg.ret.message = \"No targets found\"\n proto.sendMessage(ServerMsgFactory.pack(msg), isBinary=True)", "def process_broadcast(data):\n logger.info(f\"Broadcast: {data}\")", "def notify_all(self, event: GameEvent):\n for listener in self._listeners:\n listener.notify(event)", "def _send_to_all_rooms(self, message):\r\n for room in self._rooms.values():\r\n room.send_message(message)", "def broadcast(self, message, *args):\n\t\tComponent.broadcast(self, message, *args)\n\t\tfor comp in self._contents:\n\t\t\tcomp.broadcast(message, *args)", "def inform_listeners(self):\n d = self.get_all_sorted()\n for listener in self.listeners:\n listener.stream_updated(d)", "def _broadcast_message_to_users(self, message):\n self.logger.info(f\"Broadcasting message `{message}`\")\n for id, name in self.users.items():\n time.sleep(.1) # Telegram servers does not let you send more than 30 messages per second\n try:\n self.updater.bot.sendMessage(int(id), message)\n\n except BaseException as e:\n traceback.print_exc()\n self.logger.info(f'Failed to broadcast message to {name} due to {e}')", "async def publish(self):\n for sock in self.subscribers:\n sock.send_json(self.main_server.state)\n await asyncio.sleep(0)", "async def admin_msg(self, message):\n for channel in self.admin_channels.values():\n if channel:\n await channel.send(message)", "def broadcast(self, writer, message):\r\n for user in self.connection_pool:\r\n if user != writer:\r\n # We don't need to also broadcast to the user sending the message\r\n user.write(f\"{message}\\n\".encode())", "def sendMessage_0(self, messages):\n for message in messages:\n self.sendMessage(message)", "def _subscribe_to_peers(self):\n if not self.config['PEERS']:\n return\n context = zmq.Context()\n socket = context.socket(zmq.SUB)\n socket.setsockopt(zmq.SUBSCRIBE, '')\n\n for ip, pub_port, api_port in self.config['PEERS']:\n if not self._is_self(ip, pub_port):\n address = '%s:%s' % (ip, pub_port)\n self.logger.debug('Subscribing to peer at: %s' % address)\n socket.connect('tcp://%s' % address)\n\n def new_msg_handler(sender, msg=None):\n topic, delimiter, packed = msg.partition(' ')\n topic = int(topic)\n message_dict = msgpack.unpackb(packed)\n #self.logger.debug('News for topic %s:%s arrived' %\n # (topic, constants.topics.get(topic)))\n self._handle_topic(topic, message_dict)\n\n sig = signal(constants.NEW_MESSAGE_TOPIC)\n sig.connect(new_msg_handler, weak=False)\n\n while True:\n msg = socket.recv()\n sig.send(self, msg=msg)\n gevent.sleep(.1)", "def send( value, data, listeners ):\n if len( listeners ):\n # get the first item on the list\n listener = listeners.pop( 0 )\n # test to see if the first item in the list is a list\n if ( ( type( listener ) == type( \"\" ) ) and\n ( len( listener ) > 0 ) ):\n pub.sendMessage( listener, value=value, data=data, listeners=listeners )\n elif ( type( listener ) == type( [] ) ):\n Common.send( value, data, listener )\n Common.send( value, data, listeners )", "def broadcast_string(self, data):\n app_message = MessageFactory.generate_app_message(\n origin_node=self.socket_manager.node,\n destination_node=self.directory.get(self.central_node),\n forward='1',\n is_file='0',\n sender=self.socket_manager.node.get_16_byte_name(),\n data=data,\n )\n\n if self._is_central_node():\n self.broadcast_as_central_node(app_message)\n else:\n self.socket_manager.send_message(app_message)\n self._log.write_to_log(\"Message\", f'Message sent to all nodes.')", "def send(cls,event):\n for rcv in list(cls.getReceivers(event.sender)):\n if event.consumed:\n break\n rcv(event)", "def broadcast(self, message, binary=False):\n with self.lock:\n websockets = self.websockets.copy()\n if py3k:\n ws_iter = iter(websockets.values())\n else:\n ws_iter = websockets.itervalues()\n\n for ws in ws_iter:\n if not ws.terminated:\n try:\n ws.send(message, binary)\n except:\n pass", "def run(self, topic: str):\n while self.events:\n wait = self.get_wait_time()\n self.logger.debug('sleeping for %s seconds', wait)\n time.sleep(wait)\n\n event = self.events.pop(0)\n self.send(topic, event)\\\n .add_callback(self.on_send_success, event=event)\\\n .add_errback(self.on_send_failure, event=event)\n\n self.flush()", "def sendWalls(self,walls):\n self.broadcaster.sendWalls(walls)", "def broadcast_to_users(self, text: str, sending_group):\n if sending_group == \"global\":\n for user in self.__users.values():\n user.send_message(f\"broadcast from the server: {text}\")\n print(\"in broadcast to users global\")\n elif sending_group.isdigit():\n sending_group = int(sending_group)\n for user in self.__users.values():\n for station in user.stations:\n if station.line_number == sending_group:\n user.send_message(f\"broadcast from the server: {text}\")\n print(f\"in broadcast to users line{sending_group}\")", "def _broadcast(self, msg: str) -> None:\n from jesse.routes import router\n\n for r in router.routes:\n # skip self\n if r.strategy.id == self.id:\n continue\n\n if msg == 'route-open-position':\n r.strategy.on_route_open_position(self)\n elif msg == 'route-close-position':\n r.strategy.on_route_close_position(self)\n elif msg == 'route-increased-position':\n r.strategy.on_route_increased_position(self)\n elif msg == 'route-reduced-position':\n r.strategy.on_route_reduced_position(self)\n elif msg == 'route-canceled':\n r.strategy.on_route_canceled(self)\n\n r.strategy._detect_and_handle_entry_and_exit_modifications()", "def broadcast(self, tx):\n\n for neighbor_id in self.adjacencies:\n self.sendMsg(neighbor_id, Message(self.id, Type.BLOCK, tx))", "def publish(self, message, topic=''):\n if type(message) != types.ListType:\n message = [message]\n if topic:\n message = [topic] + message\n self.send(message)", "def broadcast(self, msg_type, msg, t=5):\n return None", "def broadcast(bot, event, *args):\n if args:\n subcmd = args[0]\n parameters = args[1:]\n if subcmd == \"info\":\n \"\"\"display broadcast data such as message and target rooms\"\"\"\n conv_info = [\"<b>{}</b> ... {}\".format(get_conv_name(_), _.id_) for _ in _internal[\"broadcast\"][\"conversations\"]]\n if not _internal[\"broadcast\"][\"message\"]:\n bot.send_message_parsed(event.conv, _(\"broadcast: no message set\"))\n return\n if not conv_info:\n bot.send_message_parsed(event.conv, _(\"broadcast: no conversations available\"))\n return\n bot.send_message_parsed(event.conv, _(\n \"<b>message:</b><br />\"\n \"{}<br />\"\n \"<b>to:</b><br />\"\n \"{}\".format(_internal[\"broadcast\"][\"message\"],\n \"<br />\".join(conv_info))))\n elif subcmd == \"message\":\n \"\"\"set broadcast message\"\"\"\n message = ' '.join(parameters)\n if message:\n if message.lower().strip().startswith(tuple([_.lower() for _ in bot._handlers.bot_command])):\n bot.send_message_parsed(event.conv, _(\"broadcast: message not allowed\"))\n return\n _internal[\"broadcast\"][\"message\"] = message\n else:\n bot.send_message_parsed(event.conv, _(\"broadcast: message must be supplied after subcommand\"))\n elif subcmd == \"add\":\n \"\"\"add conversations to a broadcast\"\"\"\n if parameters[0] == \"groups\":\n \"\"\"add all groups (chats with users > 2)\"\"\"\n for conv in bot.list_conversations():\n if len(conv.users) > 2:\n _internal[\"broadcast\"][\"conversations\"].append(conv)\n elif parameters[0] == \"ALL\":\n \"\"\"add EVERYTHING - try not to use this, will message 1-to-1s as well\"\"\"\n for conv in bot.list_conversations():\n _internal[\"broadcast\"][\"conversations\"].append(conv)\n else:\n \"\"\"add by wild card search of title or id\"\"\"\n search = \" \".join(parameters)\n for conv in bot.list_conversations():\n if search.lower() in get_conv_name(conv).lower() or search in conv.id_:\n _internal[\"broadcast\"][\"conversations\"].append(conv)\n _internal[\"broadcast\"][\"conversations\"] = list(set(_internal[\"broadcast\"][\"conversations\"]))\n bot.send_message_parsed(event.conv, _(\"broadcast: {} conversation(s)\".format(len(_internal[\"broadcast\"][\"conversations\"]))))\n elif subcmd == \"remove\":\n if parameters[0].lower() == \"all\":\n \"\"\"remove all conversations from broadcast\"\"\"\n _internal[\"broadcast\"][\"conversations\"] = []\n else:\n \"\"\"remove by wild card search of title or id\"\"\"\n search = \" \".join(parameters)\n removed = []\n for conv in _internal[\"broadcast\"][\"conversations\"]:\n if search.lower() in get_conv_name(conv).lower() or search in conv.id_:\n _internal[\"broadcast\"][\"conversations\"].remove(conv)\n removed.append(\"<b>{}</b> ({})\".format(get_conv_name(conv), conv.id_))\n if removed:\n bot.send_message_parsed(event.conv, _(\"broadcast: removed {}\".format(\", \".join(removed))))\n elif subcmd == \"NOW\":\n \"\"\"send the broadcast - no turning back!\"\"\"\n context = { \"explicit_relay\": True } # prevent echos across syncrooms\n for conv in _internal[\"broadcast\"][\"conversations\"]:\n bot.send_message_parsed(conv, _internal[\"broadcast\"][\"message\"], context=context)\n bot.send_message_parsed(event.conv, _(\"broadcast: message sent to {} chats\".format(len(_internal[\"broadcast\"][\"conversations\"]))))\n else:\n bot.send_message_parsed(event.conv, _(\"broadcast: /bot broadcast [info|message|add|remove|NOW] ...\"))\n else:\n bot.send_message_parsed(event.conv, _(\"broadcast: /bot broadcast [info|message|add|remove|NOW]\"))", "def send_all(self, data, sender=None):\n for client in self.clients:\n if client == sender:\n continue\n client.send(data)", "def broadcast(self, message, exclude=()):\r\n\t\tfor player in self.players:\r\n\t\t\tif player not in exclude:\r\n\t\t\t\tplayer.send(message)", "def notifyObservers(self, topic, value):\n for observer in self.observers:\n observer.notify(topic, value)", "def onRegisterNetworkBroadcast(self):\n pass", "def __sendToAll(self, message, author = None):\n\n self.messageCallback(message)\n\n for user in self.__connections:\n\n if not user is author:\n try: \n user[\"connection\"].send((message + self.separator).encode())\n except: \n user[\"online\"] = False", "def emit (self, signal):\n for room in self.transmissionarea :\n for listener in room.listeners:\n listener.signalReceived(signal)", "def broadcast(self, message, *args):\n\t\tmethod = getattr(self, message, None)\n\t\tif method:\n\t\t\tmethod(*args)", "def notify_all(self, private_key, message):\n return self._samp_hub.notifyAll(private_key, message)", "def broadcast(self, name, msg, color='yellow'):\n name = str(name)\n msg = str(msg)\n self.output.broadcasts.append({\n 'name': name,\n 'msg': msg,\n 'color': str(color),\n 'botname': self._botname,\n 'botowner': self._botowner,\n })\n self._set_lastsaid('[BROADCAST] {0}: {1}'.format(name, msg))", "def on_connect():\n # There is now a connection\n subscribe_to_topic(\"pir\",\"Trigger\")", "def broadcast_to_session(self,session_id,header,msg, exclude = []):\n host = self.sessions[session_id][\"HOST\"][\"ID\"]\n self.send(header,host,msg)\n for key in self.sessions[session_id][\"USERS\"].keys():\n if key not in exclude:\n self.send(header,key,msg)", "def sendMessage(self):\n print(\"sendMessage\")\n self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))", "def listen(agent, config):\n base_topic = config['base_topic']\n short_topics = ['cpu_percent', 'memory_percent', 'disk_percent']\n topics = [base_topic + '/' + x for x in short_topics]\n seen_topics = set()\n\n def add_topic(peer, sender, bus, topic, headers, messages):\n seen_topics.add(topic)\n\n agent.vip.pubsub.subscribe('pubsub', base_topic,\n callback=add_topic)\n\n max_wait = 1 + max([value for key, value in _test_config.items()\n if key.endswith('_interval')])\n\n all_topics_seen = lambda: set(topics) <= seen_topics\n\n assert poll_gevent_sleep(max_wait, all_topics_seen)", "def broadcast(self, addr, message):\n for addr in set(six.iterkeys(self.addr_to_conn_struct_map)) - {addr}:\n try:\n self.addr_to_conn_struct_map[addr].conn.send(message)\n except:\n # if we have any error sending, close the client connection, then remove it from our list\n self.clean(addr)", "def sendMessage(self):\n print('sendMessage')\n self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))", "async def _async_send_to_all_devices(self, message):\n for device in self.devices:\n await device.set(message)", "def on_message(client, userdata, msg):\n print(msg.topic + \" \" + str(msg.payload))\n send_command(str(msg.payload.decode(\"utf-8\")))", "def handle_send_messages():\n items = {k: v for k, v in subscribers.items() if v}\n for key in items:\n subscriber_obj = items[key]\n sim_id = get_sim_id(subscriber_obj)\n if sim_id and type(sim_id) is int:\n frame_messenger(subscriber_obj)\n elif sim_id and sim_id == \"live\":\n live_messenger(subscriber_obj)", "def broadcastChange(self,user_ids):\n userChanged=user_main.getUserPool().userChanged\n map(userChanged,user_ids)", "def apns_send_bulk_message(registration_ids, data, **kwargs):\n\tsocket = _apns_create_socket(APNS_SOCKET)\n\tfor registration_id in registration_ids:\n\t\t_apns_send(registration_id, data, socket=socket, **kwargs)\n\n\tsocket.close()", "def send_to_all_ml(obj):\n base.send_to_all_ml(obj)", "def emit(self, event, *args, **kwargs):\n ns_name = kwargs.pop('namespace', '')\n room = kwargs.pop('room', None)\n if room is not None:\n for client in self.rooms.get(ns_name, {}).get(room, set()):\n client.base_emit(event, *args, **kwargs)\n elif self.server:\n for sessid, socket in self.server.sockets.items():\n if socket.active_ns.get(ns_name):\n socket[ns_name].base_emit(event, *args, **kwargs)", "def requestTopics(self):\n self.port.flushInput()\n # request topic sync\n self.port.write(\"\\xff\\xff\\x00\\x00\\x00\\x00\\xff\")", "def send_events(sock):\n i=0\n while i<10:\n log.info('Sending message from publisher..')\n sock.send(\"even - hai i am publisher\")\n time.sleep(0.2)\n i += 1", "def subscribe_to_mc_groups(addrs=None):\n\n listen_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP)\n listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n listen_sock.bind(('', DEFAULT_TDM_PORT))\n\n for mc in addrs:\n print(\"subscribing to {}\".format(mc))\n mc_req = socket.inet_aton(mc) + socket.inet_aton('0.0.0.0')\n listen_sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mc_req)\n\n return listen_sock" ]
[ "0.68770134", "0.6842449", "0.6777803", "0.6613753", "0.6587547", "0.6495968", "0.6483176", "0.6476947", "0.64376646", "0.64327085", "0.63729095", "0.6357374", "0.63375884", "0.6321455", "0.63169193", "0.6260771", "0.6246585", "0.62057936", "0.6156581", "0.6138631", "0.61364627", "0.60856366", "0.60805935", "0.6066465", "0.6019825", "0.60120785", "0.6007172", "0.594007", "0.58851206", "0.5881567", "0.5871067", "0.5856854", "0.58278036", "0.579965", "0.57949674", "0.57743645", "0.5758306", "0.5753949", "0.57473797", "0.5742097", "0.57176787", "0.5714031", "0.5712505", "0.56983423", "0.56954443", "0.5685895", "0.5682961", "0.5670309", "0.5668111", "0.5648265", "0.56117666", "0.56093293", "0.5600712", "0.55499846", "0.5549712", "0.55488646", "0.5536089", "0.55321705", "0.5520728", "0.551393", "0.5511375", "0.55113035", "0.5477896", "0.5473496", "0.54627085", "0.5460115", "0.5451224", "0.54475045", "0.5440559", "0.5419212", "0.54156935", "0.54154974", "0.5399249", "0.53923225", "0.53900045", "0.5388771", "0.5376988", "0.5370987", "0.5365336", "0.5361125", "0.5358845", "0.535356", "0.53472185", "0.5333566", "0.5328192", "0.53049725", "0.5304615", "0.5302895", "0.5302435", "0.5300682", "0.52950686", "0.5293065", "0.5290556", "0.5287717", "0.528391", "0.52808195", "0.5273814", "0.5272105", "0.5269404", "0.52686036" ]
0.54191
70
Identity,Account/Astakos. Test ~okeanos authentication credentials
def check_user_credentials(token, auth_url='https://accounts.okeanos.grnet.gr' '/identity/v2.0'): logging.info(' Test the credentials') try: auth = AstakosClient(auth_url, token) auth.authenticate() logging.info(' Authentication verified') return AUTHENTICATED except ClientError: logging.error('Authentication failed with url %s and token %s' % ( auth_url, token)) return NOT_AUTHENTICATED
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aio_can_login_to_web_portal(aio):", "def test_oms_credentials(*args, **kwargs):\n\treturn {'status':'success'}", "def test_basic_login(self):\n c = Client()\n c.login(username='a', password='123456')", "def test_01_authenticated(self):\r\n res = self.signin(email=self.email_addr, password=self.password)\r\n res = self.app.get(self.url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"The CKAN exporter should be available for the owner of the app\"\r\n assert dom.find(id=\"ckan\") is not None, err_msg\r\n\r\n self.signout()\r\n\r\n self.signin(email=self.email_addr2, password=self.password)\r\n res = self.app.get(self.url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"The CKAN exporter should be ONLY available for the owner of the app\"\r\n assert dom.find(id=\"ckan\") is None, err_msg", "def test001_authenticate_user(self):\n self.lg('%s STARTED' % self._testID)\n\n self.lg('- Create user1 with admin access ')\n old_password = str(uuid.uuid4()).replace('-', '')[0:10]\n user1 = self.cloudbroker_user_create(group='admin', password=old_password)\n\n self.lg(\"- Authenticate U1 ,should return session key[user1_key] .\")\n user1_key = self.get_authenticated_user_api(username=user1, password=old_password)\n self.assertTrue(user1_key)\n\n self.lg(\"- Use U1's key to list the accounts for U1, should succeed.\")\n accounts_list = user1_key.cloudapi.accounts.list()\n self.assertEqual(accounts_list, [])", "def test_authentication():\n\n c = GoodreadsClient()\n assert_equals(c.user['name'], 'Jan Skus')", "def authenticate(credentials):", "def test_02_account_index(self):\r\n # As Anonymou user\r\n url = \"/account\"\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def test_02_account_index(self):\r\n # As Anonymou user\r\n url = \"/account\"\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should not be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should not be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def test_myaccount(self):\n create_user_object()\n self.client.login(username='testuser', password='testabc123')\n response = self.client.get(reverse('infinite:myaccount'))\n self.assertEqual(response.status_code, 200)", "def test_init(self):\n self.assertEqual(self.new_credentials.account,\"Instagram\")\n self.assertEqual(self.new_credentials.username,\"bensongathu\")\n self.assertEqual(self.new_credentials.password,\"vcxz4321\")", "def test_can_login(self):\n user = authenticate(username='jack', password='secret')\n self.assertTrue(user is not None)\n self.assertTrue(user.is_authenticated)", "def test_identity(self):\n me = self.d.identity()\n self.assertEqual(me.data['consumer_name'], 'Test Client')\n self.assertEqual(me, self.d.user('example'))", "def setUp(self):\n self.credentials = {\n \"username\": \"BobRobert\",\n \"first_name\": \"Bob\",\n \"last_name\": \"Robert\",\n \"email\": \"test_bob@test.com\",\n \"password\": \"fglZfYmr%?,\",\n }", "def test_login(self):\n self.user_api()\n self.base.metadata.create_all(self.engine)\n people = self.provision_users()\n p = {'__action': 'login', 'id': people[0].id, 'password': \"testing\"}\n self.post('user', 200, params=p)", "def test_accounts_logged_in(self):\n self.client.login(username='fred', password='fred')\n \n r = self.client.get(reverse('sync-accounts'))\n self.assertEqual(r.status_code, 200)\n self.accounts_test_helper(r, 3, (2, 3, 5))\n \n self.client.logout()", "def login(self):\n return self.client.login(username='Georgie', password='12345678')", "def test_valid_authentication(self):\n\n for author in self.authors + [self.super_author]:\n response: Response = self.client.post(BASE_URL + '/authenticate/', data={\n 'username': author.username,\n 'password': 'abcd1432' # Might be a better way to store random passwords.\n })\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(data, {\n 'token': author.get_key()\n })", "def test_create_o_auth_client(self):\n pass", "def skyserv_authenticator(self):\n \n header = {\n 'Content-Type': accept, \n 'X-Auth-Token': self.casjobtoken,\n 'Accept': accept\n }\n # this format is disgusting but required....\n authdata = {\n 'auth' :{\n 'identity': {\n 'password': {\n 'user': {\n 'name': username,\n 'password': password\n }\n }\n }\n }\n }\n payload = json.dumps(authdata).encode(encoding='utf-8')\n try:\n post = requests.post(self.loginurl, data=payload, headers=header)\n\n if post.status_code == 200:\n response = json.loads(post.text)\n token = response[self.tokenkey]\n return token\n else:\n print('Username and/or password are invalid.')\n post.raise_for_status()\n except Exception as e:\n raise(str(e))", "def set_credentials():", "def test_auth(self):\n options = Options()\n options.parseOptions([\"--auth\", \"memory:admin:admin:bob:password\"])\n self.assertEqual(len(options[\"credCheckers\"]), 1)\n checker = options[\"credCheckers\"][0]\n interfaces = checker.credentialInterfaces\n registered_checkers = options.service.smtpPortal.checkers\n for iface in interfaces:\n self.assertEqual(checker, registered_checkers[iface])", "def test_read_o_auth_client_authorization(self):\n pass", "def test_create_o_auth_client_authorization(self):\n pass", "def test_duo_account_get(self):\n pass", "def test_read_o_auth_client(self):\n pass", "def login():", "def login():", "def test_auth_test(self):\n backend = LdapBackend()\n backend.authenticate(None, username=\"apple\", password=\"ffffff\")", "def test_success(self):\n \n result = self.authenticator.authenticate(\n username=u'thruflo', \n password=u'secret'\n )\n self.assertTrue(result.username == self.user.username)", "def test_getcredentials_from_netrc(netrc):\n netrc.return_value.authenticators.return_value = (USERNAME, \"\", PASSWORD)\n server = KasServer()\n assert server._username == USERNAME\n assert server._password == PASSWORD", "def test_agentpi_login(self):\n param = {\n 'username' : \"seth\", \n 'password' : \"testing\"\n }\n response = param\n\n self.assertEqual(response.text, \"User Exists\")", "def test_signin_functionality(self):\n res = self.client.post('/signin',\n follow_redirects=True, data=dict(\n username='orange',\n password='123Orange',\n ))\n data = res.data.decode('utf-8')\n assert 'Profile' in data\n assert 'Username: orange' in data\n assert res.status == '200 OK'", "def test_auth_user(self):\n\n self.assertEqual(User.authenticate(\"allison\", \"allison\"), self.user1)\n self.assertEqual(User.authenticate(self.user2.username, \"jackson\"), self.user2)", "def test_authenticate_credentials(generate_token, django_user_model):\n access_token, user_data = generate_token\n jwt = JWTAuthentication()\n user, payload = jwt.authenticate_credentials(access_token)\n user_instance = django_user_model.objects.get(\n username=user_data['username']\n )\n assert user == user_instance\n assert payload['userdata'] == user_data", "def test_init(self):\n self.assertEqual(self.new_credential.app_name, \"MySpace\")\n self.assertEqual(self.new_credential.account_name, \"Ghostke99\")\n self.assertEqual(self.new_credential.account_password, \"daimaMkenya001\")", "def test_validate_credentials(self):\n pass", "def login(self):", "def test_auth_success(self):\n self.assertEqual(Freenas(hostname)._user, 'root')", "def hitobito_login():\n if not hitobito.authorized:\n flash('Access denied to hitobito', 'danger')\n return redirect(url_for(\"auth.login\", local=1))\n # Get remote user data\n resp = hitobito.get(\"/en/oauth/profile\", headers={'X-Scope': 'name'})\n if not resp.ok:\n flash('Unable to access hitobito data', 'danger')\n return redirect(url_for(\"auth.login\", local=1))\n resp_data = resp.json()\n # print(resp_data)\n username = None\n if 'nickname' in resp_data and resp_data['nickname'] is not None:\n username = resp_data['nickname']\n elif 'first_name' in resp_data and 'last_name' in resp_data:\n fn = resp_data['first_name'].lower().strip()\n ln = resp_data['last_name'].lower().strip()\n username = \"%s_%s\" % (fn, ln)\n if username is None or not 'email' in resp_data or not 'id' in resp_data:\n flash('Invalid hitobito data format', 'danger')\n return redirect(url_for(\"auth.login\", local=1))\n return get_or_create_sso_user(\n resp_data['id'],\n username,\n resp_data['email'],\n )", "def test_lookup_account(self):\n pass", "def auth(self):\n return self.creds(\"admin@example.com\", cookie=\"USERTOKEN: authcookie\")", "def test_admin_usuario(self):\n c = Client()\n #iniciar sesion\n c.login(username='super', password='super')\n #hacemos una peticion GET a la vista\n response = c.get(reverse('admin_usuario'))\n #la vista funciona si el codigo de respuesta es 200\n self.assertEqual(response.status_code, 200)", "def test_user_can_log_in(self):\n # create a user\n User.objects.create_user(username='john',\n email='juangathure@gmail.com',\n password='test_password')\n\n credentials = {\n \"username\": \"john\",\n \"password\": \"test_password\"\n }\n\n # use credentials of the user created to test the log in\n response = self.client.post('/login/', credentials,\n format='json')\n\n # assert the login was successful\n self.assertEqual(response.status_code,\n status.HTTP_200_OK)\n\n data = response.json()\n\n # assert a token is returned for every successfull login\n assert \"token\" in data", "def __init__(self,account,username, password):\n self.account = account\n self.username = username\n self.password = password", "def test_login_view_with_iam(self):\n form = {\n 'aws_access_key': 'AWS access key',\n 'aws_secret_access_key': 'AWS secret key',\n }\n response = self.client.post('/login/', form, follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.request['PATH_INFO'], '/profile/')", "def test_create_o_auth_access_token(self):\n pass", "def test_admin_can_login_to_web_portal(admin):", "def test_admin_user_login(self):\n self.login(\"admin\", \"admin\")\n self.should_see(\"This is your profile, admin.\")", "def setUp(self):\n self.new_credentials = Credentials(\"Facebook\",\"Josphato\",\"jose!!otieno@45\")", "def test_account_information(self):\r\n res = self.testapp.get(u'/api/v1/admin/account?api_key=' + API_KEY,\r\n status=200)\r\n\r\n # make sure we can decode the body\r\n user = json.loads(res.body)\r\n\r\n self.assertEqual(\r\n user['username'], 'admin',\r\n \"Should have a username of admin {0}\".format(user))\r\n\r\n self.assertTrue(\r\n 'password' not in user,\r\n 'Should not have a field password {0}'.format(user))\r\n self.assertTrue(\r\n '_password' not in user,\r\n 'Should not have a field password {0}'.format(user))\r\n self.assertTrue(\r\n 'api_key' not in user,\r\n 'Should not have a field password {0}'.format(user))\r\n self._check_cors_headers(res)", "def setUp(self):\n account_models.User.objects.create_user(email='mrtest@mypapaya.io', password='WhoAmI', username='aov1')", "def test_access_account_info_with_token(self):\n\n print(\" --------------------------- Test 6 - Access Account Information ----------------------------\")\n\n user_id = uuid.uuid4()\n password = \"my-precious\"\n currency = \"EUR\"\n\n register_user(user_id, password, currency)\n response = login_user(user_id, password)\n\n self.assertTrue(response.json()['message']['auth_token'])\n\n auth_token = response.json()['message']['auth_token']\n headers = {'Content-Type': \"application/json\", 'Authorization': auth_token}\n\n data = \"{\\\"amount\\\" : 20.0}\"\n requests.post('http://192.168.85-208/account/amount', headers=headers, data=data)\n requests.post('http://192.168.85-208/account/amount', headers=headers, data=data)\n requests.post('http://192.168.85-208/account/amount', headers=headers, data=data)\n\n # Get the buyer account information to check if the money comes in\n response = requests.get('http://0.0.0.0:5000/account', headers=headers)\n print(json.dumps(response.json()['message'], indent=4))", "def auth(self, user):", "def test_user_authenticate(self):\n\n user = User.authenticate(\"test1\", \"password\")\n\n self.assertEqual(user.username, \"test1\")\n self.assertIn(\"$2b$\", user.password)", "def test_init(self):\n self.assertEqual(self.new_account.account_name, \"Instagram\")\n self.assertEqual(self.new_account.username, \"jLuseno161\")\n self.assertEqual(self.new_account.password, \"joy161\")", "def test_display_credentials(self):\n\n self.assertEqual(Credentials.display_credentials(),Credentials.credential_list)", "def testGaiaLogin(self):\n if self._is_guest:\n return\n try:\n username, password = next(self._GetCredentialsIter())\n except StopIteration:\n username = 'autotest.catapult'\n password = 'autotest'\n with self._CreateBrowser(gaia_login=True,\n username=oobe.Oobe.Canonicalize(username),\n password=password):\n self.assertTrue(py_utils.WaitFor(self._IsCryptohomeMounted, 10))", "def test_create_identity(self):\n pass", "def test_user_login(app):\n login_res = app.post('/auth', data={\n 'email': 'tracy.tim@cognizant.com',\n 'password': 'password'\n },content_type='application/json') \n assert login_res.status_code == 200", "def test_search_by_account(self):\n self.new_credentials.save_creds()\n account_found = Credentials.search_by_account(\"Instagram\")\n \n self.assertEqual(account_found.username,self.new_credentials.username)", "def test_authentication_success():\n d = Dexcom(USERNAME, PASSWORD)\n d._validate_account()\n d._validate_session_id()", "def test_login_with_correct_authentication_success(self):\n user = {\n \"Email\": \"fyi@g.com\",\n \"Type\": \"passenger\",\n \"Password\": \"pass234\",\n \"Confirm Password\": \"pass234\"\n }\n response = self.client().post('/api/v1/auth/register',\n data=user)\n self.assertEqual(response.status_code, 201)\n logins = {\"Email\": \"fyi@g.com\", \"Password\": \"pass234\"}\n res = self.client().post('/api/v1/auth/login', data=logins)\n self.assertTrue(res.status_code, 200)", "def test_find_credentials(self):\n self.new_credentials.save_attributes()\n test_credential = Credentials(\"Instagram\", \"@zephonmakale\", \"123456\")\n test_credential.save_attributes()\n\n found_credential = Credentials.find_credentials(\"Instagram\")\n\n self.assertEqual(found_credential.account, test_credential.account)", "def test_32_oauth_password(self):\r\n user = User(email_addr=\"johndoe@johndoe.com\",\r\n name=self.user.username,\r\n passwd_hash=None,\r\n fullname=self.user.fullname,\r\n api_key=\"api-key\")\r\n db.session.add(user)\r\n db.session.commit()\r\n res = self.signin()\r\n assert \"Ooops, we didn't find you in the system\" in res.data, res.data", "def test_login(self):\n\n client = Client('username', 'password')\n self.setSessionResponse(200)\n try:\n client.authenticate()\n except Exception as e:\n self.fail(\"Exception raised : \" + str(e))", "def auth():\n pass", "def auth():\n pass", "def login(self):\n self.client.login(username=self.user.username, password='test')", "def test_login(self):\n\n print('\\n\\nEnter a valid LendingClub account information...')\n email = input('Email:')\n password = getpass.getpass()\n\n self.assertTrue(self.session.authenticate(email, password))\n print('Authentication successful')", "def test_init(self):\n self.assertEqual(self.new_cred.account_name, 'github')\n self.assertEqual(self.new_cred.username, 'Lugaga')\n self.assertEqual(self.new_cred.password, 'tangodown!')", "def _auth(self):\n url = 'https://forsight.crimsonhexagon.com/api/authenticate?'\n\n payload = {\n 'username': self.username,\n 'password': self.password\n }\n\n r = self.session.get(url, params=payload)\n j_result = r.json()\n self.auth_token = j_result[\"auth\"]\n #print('-- Crimson Hexagon Authenticated --')\n return", "def authenticate(self, password):\n url = self.host_address + \"/assist/authentication\"\n payload = {\n 'action' : \"validate\",\n 'client' : self.client_info,\n #'KEY' : (self.user_id + \";\" + password)\n 'GUUID' : self.user_id,\n 'PWD' : password\n }\n headers = {\n 'Content-Type': \"application/json\"\n }\n response = requests.request(\"POST\", url, json=payload, headers=headers)\n try:\n res = json.loads(response.text)\n except NameError:\n res = None\n\n if res and res[\"result\"] and res[\"result\"] == \"success\":\n # store result - overwrite any previous entries with same user ID\n self.storage.write_user_data(self.user_id, {\n \"language\" : res[\"user_lang_code\"],\n \"token\" : res[\"keyToken\"]\n })\n name = res[\"user_name\"][\"nick\"] or res[\"user_name\"][\"first\"]\n print(\"SEPIA account: Success - \" + name + \", your login token has been stored. Hf :-)\")\n # store default host\n self.storage.write_default_host(self.host_address)\n print(\"SEPIA account: Set (new) default host: \" + self.host_address)\n else:\n print(\"SEPIA account: Failed - I think the password is wrong or we got connection problems.\")", "def test_login_request_with_authentication(self):\n # create a user and assert the response\n user_res = self.ph.create_user(self.test_user_name, self.test_user_password)\n self.assertEqual(user_res.status_code, status.HTTP_201_CREATED) \n user_res_data = json.loads(user_res.get_data(as_text=True))\n self.assertEqual(user_res_data['name'], self.test_user_name)\n\n # Get the url for the login resource and query it\n url = url_for('api.userloginresource', _external=True, name=self.test_user_name)\n res = self.test_client.get(url, headers=self.ph.get_authentication_headers())\n \n # assert that the resonse is the same as the one that we created before\n self.assertTrue(res.status_code == status.HTTP_200_OK)\n res_data = json.loads(res.get_data(as_text=True))\n self.assertTrue(res_data['id'], user_res_data['id'])\n self.assertTrue(res_data['name'], user_res_data['name'])", "def test_auth_code_positive(self, api):\n self.builder.add_user(api.get_user())\n resp = api.login_user(api.get_user().username, api.get_user().password)\n self.builder.del_user(api.get_user())\n assert resp.status_code == 200", "def test_get_with_auth(self):\n u = UserFactory(role=User.MODERATOR)\n u.set_password('123')\n u.save()\n\n auth_url = prepare_url('login')\n data = {\n 'username': u.username,\n 'password': '123'\n }\n response = self.client.post(auth_url, data=data, format='json')\n token = response.data['token']\n\n url = prepare_url('admin-cities-list')\n self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_list_o_auth_client_authorization(self):\n pass", "def test_login_twice(self):\n self.client.login(username=self.USERNAME, password=self.PASSWORD)\n username2 = \"two\"\n password2 = \"two\"\n user2 = User.objects.create_user(username=username2, password=password2)\n UserInfo.objects.create(user=user2)\n\n resp = self.client.post(\n reverse('login'),\n json.dumps({\n \"username\": username2,\n \"password\": username2,\n }),\n content_type=\"application/json\"\n )\n assert resp.status_code == 200, resp.content.decode('utf-8')\n assert self.is_authenticated(user2)", "def test_display_all_credentials(self):\n\n\n self.assertEqual(Credential.display_credentials(),Credential.credential_list)", "def __init__(self,account,username, password):\n self.user_name = username\n self.password = password\n self.account = account", "def test_user_login_with_correct_credentials_true(self):\n res = self.client().post('/api/v1/auth/signup', data=self.user)\n self.assertEqual(res.status_code, 201)\n logins = {\n \"Email\": \"user@example.com\",\n \"Password\": \"pass1234\"\n }\n resp = self.client().post('/api/v1/auth/login', data=logins)\n self.assertEqual(resp.status_code, 200)\n resp = resp.get_json()\n self.assertEqual(resp['data']['message'][0]['Email'], logins['Email'])", "def test_Profile(self):\n self.assertEquals(self.user_1.username, 'testuser')\n # self.assertEquals(self.user_1.password, '12345')\n self.assertEquals(self.user_1.email,\n 'boggusmail@boggusmail.net')", "def test_sucessful_login(self):\n self.user.list_of_accounts = [{'username': 'dalton',\n 'pwd': 'chromelegend',\n 'email': 'legionless@yahoo.com'}]\n msg = self.user.login(\"legionless@yahoo.com\", \"chromelegend\")\n self.assertEqual(msg, \"Success!\")", "def test_0000_initiate_users(self):\n self.login(email=common.test_user_1_email, username=common.test_user_1_name)\n self.login(email=common.admin_email, username=common.admin_username)\n self.galaxy_login(email=common.admin_email, username=common.admin_username)", "async def sing_in(\n email: str = Body(\n embed=True,\n title=\"Email\",\n description=\"Email user\",\n example=\"admin@email.com\",\n min_length=10,\n max_length=100,\n ),\n password: str = Body(\n embed=True,\n title=\"Password\",\n description=\"Password user\",\n example=\"admin\",\n min_length=5,\n max_length=100,\n ),\n):\n if email == \"admin@email.com\" and password == \"admin\":\n token = token_manager.create_token(\n {\"email\": \"admin@email.com\", \"password\": \"admin\"}\n )\n return JSONResponse(status_code=status.HTTP_200_OK, content=token)\n else:\n return JSONResponse(\n status_code=status.HTTP_401_UNAUTHORIZED,\n content={\"message\": \"Credenciales no validas t(-_-t)\"},\n )", "def test_successful_login(self):\n pass", "def test_regular_user_login(self):\n self.login(\"user\", \"user\")\n self.should_see(\"This is your profile, user.\")", "def authenticate(self):\n self.login(closet.app.config['USERNAME'],\n closet.app.config['PASSWORD'])", "def __init__(self, account, user_username, user_password):\n self. account = account\n self. user_username = user_username\n self.user_password = user_password", "def test_sign_in(self):\n user = User.objects.create(email='david.smith@mom.com', password='******')\n response = self.client.post(reverse('backend:sign_in'), {'email':user.email, 'password':user.password})\n self.assertEqual(response.status_code, 200)\n self.assertDictContainsSubset({'status': 'success'}, response.json())\n self.assertDictContainsSubset({'pk_user': user.pk}, response.json())", "def test_aiven_creds_exist(self):\n assert os.environ[\"AIVEN_API_URL\"] is not None\n assert os.environ[\"AIVEN_TOKEN\"] is not None", "def test_read_o_auth_access_token(self):\n pass", "def test_authtoken_get(self):\n specialdachs = self.fixtures.specialdachs\n oakley = self.fixtures.oakley\n scope = ['id']\n dachsadv = models.AuthClient(\n title=\"Dachshund Adventures\",\n organization=specialdachs,\n confidential=True,\n website=\"http://dachsadv.com\",\n )\n auth_token = models.AuthToken(auth_client=dachsadv, user=oakley, scope=scope)\n token = auth_token.token\n db.session.add(dachsadv, auth_token)\n result = models.AuthToken.get(token)\n self.assertIsInstance(result, models.AuthToken)\n self.assertEqual(result.auth_client, dachsadv)", "def test_003_check_default_keystone_credential_usage(self):\n\n usr = self.config.master.keystone_user\n pwd = self.config.master.keystone_password\n url = 'http://{0}:5000/v2.0'.format(self.config.nailgun_host)\n\n try:\n keystone = keystoneclient(username=usr,\n password=pwd,\n auth_url=url)\n keystone.authenticate()\n except k_exceptions.Unauthorized:\n pass\n else:\n self.fail('Step 1 failed: Default credentials '\n 'for keystone on master node were not changed')", "def test_get_login(self):\n login = Login(self.client, 123)\n\n self.assertEqual(login.id, 123)\n self.assertEqual(login.ip, \"192.0.2.0\")\n self.assertEqual(login.restricted, True)\n self.assertEqual(login.status, \"successful\")\n self.assertEqual(login.username, \"test-user\")", "def testDenyAllowAccess(self):\n self.host.ContinueAuth()\n self.host.SignIn(self.account['username'], self.account['password'])\n self.host.DenyAccess()\n self.host.ContinueAuth()\n self.host.AllowAccess()", "def test_auth_public(self):\n self.do_visible(True, None, True, tenant='froggy')", "def test_activate_login(self):\r\n pass", "def test_login_and_logout(self):\n create_user(\"User1\", \"User1@gmail.com\", \"isp123456\")\n response = self.client.post(reverse('login'), {'username': 'User1', 'password': 'isp123456'}, follow=True)\n self.assertTrue(response.context['user'].is_authenticated)\n response = self.client.get(reverse('logout'))\n self.assertFalse(response.context['user'].is_authenticated)", "def __init__(self,account_name, username, password):\n self.account_name = account_name\n self.username = username\n self.password = password" ]
[ "0.6723792", "0.671743", "0.6590624", "0.657539", "0.65334004", "0.643523", "0.64110607", "0.6394348", "0.6331037", "0.63070375", "0.6304327", "0.6270155", "0.62404686", "0.62375134", "0.6210401", "0.6204399", "0.61970514", "0.618015", "0.6161113", "0.61508554", "0.61445206", "0.61396724", "0.6139503", "0.61388946", "0.6128106", "0.61200243", "0.611879", "0.611879", "0.6115832", "0.61148816", "0.60950196", "0.6094605", "0.6063453", "0.60629225", "0.60604495", "0.60559684", "0.6055626", "0.6052297", "0.6037713", "0.60135067", "0.6002199", "0.5996182", "0.59927154", "0.59852505", "0.59819204", "0.59794474", "0.59776795", "0.59730583", "0.59497976", "0.5943494", "0.5937871", "0.59260195", "0.592428", "0.592343", "0.59161156", "0.5909937", "0.5909626", "0.5897783", "0.589736", "0.5896679", "0.58930993", "0.5892428", "0.5891157", "0.589055", "0.5888695", "0.58885074", "0.58752775", "0.58752775", "0.5862583", "0.58548385", "0.5850623", "0.58371127", "0.5836649", "0.5833313", "0.5830628", "0.5829232", "0.5827442", "0.58242965", "0.5822499", "0.5818418", "0.5815191", "0.5812007", "0.5811161", "0.58104414", "0.5810154", "0.5809187", "0.5807232", "0.5807192", "0.58066976", "0.58061993", "0.5804473", "0.5801885", "0.5800525", "0.57929754", "0.5788266", "0.57880783", "0.5786488", "0.5785792", "0.57822853", "0.57785416" ]
0.5886558
66
queries the database for a specific character takes a name returns a json with the lines
def lines_from_char(character): query = f""" SELECT script_l FROM script JOIN characters ON characters.char_id = script.characters_char_id WHERE name = '{character}' """ data = pd.read_sql_query(query,engine) return data.to_json(orient="records")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lines_from_char_ep(character,ep):\n query = f\"\"\"\nSELECT script_l FROM script\nJOIN characters \nON characters.char_id = script.characters_char_id\nINNER JOIN episodes\nON episodes.ep_id = script.episodes_ep_id\nWHERE name = '{character}' and episode = '{ep}'\n\"\"\"\n data = pd.read_sql_query(query,engine)\n return data.to_json(orient=\"records\")", "def get_character_info(self, name):\n url = \"%s?%s\" % (self._base_url, urlencode({'name': name}))\n q = Request(url)\n q.add_header('User-Agent', 'curl/7.51.0')\n q.add_header('Accept', 'application/json')\n\n result = urlopen(q).read().decode('utf-8')\n data = json.loads(result)\n\n return data", "def get_character(arg):\n character = requests.get(BASE_URL+'characters/'+arg)\n print character.json()\n return character.status_code", "def lines_():\n query = f\"\"\"\nSELECT script_l, `name`, episode\nFROM script\nINNER JOIN characters\nON characters.char_id = script.characters_char_id\nINNER JOIN episodes\nON episodes.ep_id = script.episodes_ep_id\n\"\"\"\n data = pd.read_sql_query(query, engine)\n return data.to_json(orient=\"records\")", "def search_from_sqlite(self, key):\n key = ('.*' +key+ '.*',)\n conn = get_sqlite()\n c = conn.cursor()\n conn.create_function(\"REGEXP\", 2, regexp)\n c.execute('SELECT * FROM vertices WHERE name REGEXP ? ', key)\n results = c.fetchall()\n\n return json.dumps([{\n 'name': r[1],\n 'size': r[3],\n 'parent': r[2],\n 'last_accessed': r[4],\n 'last_modified': r[5]} for r in results])", "def get_characters(self, sid):\n\n\t\twith open(self.get_fpath(sid)) as f:\n\t\t\treturn json.load(f)", "def get_character_detail(chara_name: str) -> dict:\n\n chara_misc_json = load_characters_config()\n chara_details = list(filter(lambda x: (x['name'] == chara_name), chara_misc_json))\n\n if chara_details:\n return chara_details[0]\n else:\n return None", "def on_get(req, resp):\n connection = db.connect()\n cursor = connection.cursor()\n cursor.execute('SELECT `name` FROM `contact_mode`')\n data = [row[0] for row in cursor]\n cursor.close()\n connection.close()\n resp.body = json_dumps(data)", "def names():\n\n df = pd.read_sql_query(f\"SELECT * FROM olympics_raw\", con = engine)\n print(df.head())\n \n\n # return jsonify(all_olympians)\n return jsonify(df.to_dict(orient='records'))", "def put(cls, char, name=None):\n if name is None:\n name = \"\".join(char[\"name\"].split())\n with open(os.path.join(app.config[\"CHARACTER_DIR\"], name + \".json\"), \"w\") as fp:\n json.dump(char, fp, indent=2)\n return name", "def get(cls, character):\n return WorldData.get_table_data(cls.table_name, character=character)", "def cursor_data(c):\r\n\r\n # pull column description\r\n d = []\r\n for i in range(len(c.description)):\r\n d.append(c.description[i][0])\r\n\r\n # fetch column entries\r\n c = c.fetchall()\r\n\r\n # compile list\r\n info = []\r\n for i in range(len(c)):\r\n # compile dictionary entry\r\n entry = {}\r\n for j in range(len(d)):\r\n entry[d[j]] = c[i][j]\r\n info.append(entry)\t\r\n\r\n # success\r\n return info", "def get_by_character(self, character_id):\n sql = \"SELECT {0} FROM people_{0} WHERE people=?\".format(self.conveyance_type)\n try:\n query_result = self.cursor.execute(sql, (str(character_id),))\n except Exception as e:\n raise Exception(\n \"An error occurred while getting a character %s in the database: query: %s - message: %s\"\n % (self.conveyance_type, sql, e)\n )\n\n rows = query_result.fetchall()\n starships = [s_id for _, s_id in rows]\n\n return starships", "def get_record_by_name(name):\n with RECORD_LOCK:\n # return list of matches or []\n return jsonify([r for r in RECORDS if r.get('name') == name])", "def fetch():\n req_data= request.get_json()\n \n ## ddb uses text files, using this as to eat my own dogfoor and improve\n ## no service sql client. No daemon, low cpu.\n\n\n e=load_db()\n try:\n res=e.query(req_data['query'])\n \n serialized = jsonpickle.encode( res,\n unpicklable=False,\n make_refs=False)\n return serialized\n except Exception as ex:\n return \"{0} -> '{1}'\".format(ex,req_data['query'])", "def cNames():\n a = pd.DataFrame(df['Country Name'].unique(), columns=['cname']).to_json()\n r = Response(response=a,\n status=200,\n mimetype=\"application/json\")\n r.headers[\"Content-Type\"] = \"text/json; charset=utf-8\"\n return r", "def getAllWhereNameIs(table, name):\n\ttry:\n\t\tcon = sqlite3.connect('PampDb.db')\n\t\tcur = con.cursor()\n\t\tcur.execute(\"SELECT * FROM \" + table + \" WHERE name like'\" + name + \"%'\")\n\t\tob = cur.fetchall()\n\t\tif not ob:\n\t\t\treturn \"\"\n\t\telse:\n\t\t\tobje = ob[0]\n\t\t\treturn obje\n\t\tcon.commit()\n\t\tcon.close()\n\texcept:\n\t\tprint('Could not run function getAllWhereNameIs from DbController')", "def handle_characters(curs, collection):\n character_list = curs.execute(\"\"\"SELECT * FROM charactercreator_character;\"\"\")\n for character in character_list:\n _, sl_curs = connect_to_sldb() # need to create a different cursor because the main one still \n # running and it will close the whole thing before it loop\n # item_list = sl_curs.execute(\n # f\"\"\"SELECT ai.name FROM charactercreator_character_inventory as cii\n # LEFT JOIN armory_item as ai\n # ON cii.item_id = ai.item_id\n # WHERE character_id={character[0]};\n # \"\"\")\n inventory = sl_curs.execute(\n f\"\"\"SELECT name, item_ptr_id\n FROM\n (SELECT * FROM charactercreator_character_inventory as cii\n LEFT JOIN armory_item as ai\n ON cii.item_id = ai.item_id) as a\n LEFT JOIN armory_weapon as aw\n ON a.item_id=aw.item_ptr_id\n WHERE character_id={character[0]};\n \"\"\").fetchall()\n\n character_doc = {\n \"name\": character[1],\n \"level\": character[2],\n \"exp\": character[3],\n \"hp\": character[4],\n \"strength\": character[5],\n \"intelligence\": character[6],\n \"dexterity\": character[7],\n \"wisdom\": character[8],\n \"items\": [item[0] for item in inventory],\n \"weapons\": [item[0] for item in inventory if item[1] != None]\n }\n sl_curs.close() # close that new cursor\n collection.insert_one(character_doc)\n\n\n # # A codier way to do it\n # schema = curs.execute(\n # \"PRAGMA table_info(charactercreator_character)\").fetchall()[1:]\n # for character in characters_list:\n # character_doc = {}\n # for index, item_tuple in enumerate(schema):\n # character_doc[item_tuple[1]] = character[index + 1]\n\n # collection.insert_one(character_doc)", "def test_get(date1):\n # create mysql connection\n \n conn = pymysql.connect(host=config._DB_CONF['host'], \n port=config._DB_CONF['port'], \n user=config._DB_CONF['user'], \n passwd=config._DB_CONF['passwd'], \n db=config._DB_CONF['db'],\n charset='big5')\n cur = conn.cursor()\n \n sql=\"select * from maintain where `日期` =%s\"\n cur.execute(sql,date1)\n \n # get all column names\n columns = [desc[0] for desc in cur.description]\n # get all data\n rows=cur.fetchall()\n \n # build json \n result = rows_to_json(columns,rows)\n # print(result)\n \n cur.close()\n conn.close()\n\n return result", "def select_all(db, tableName):\r\n try:\r\n c = db.cursor()\r\n c.execute(\"SELECT * FROM \" + tableName)\r\n print json.dumps(c.fetchall())\r\n except Error as e:\r\n print(e)", "def namelist():\n\n\n session = Session(engine)\n\n results = session.query(lockdown.country).order_by(lockdown.country).all()\n\n #session.close()\n all_symbols = list(np.ravel(results))\n sym = all_symbols[1]\n\n return jsonify(all_symbols)", "def load_character():\n global character\n filename = 'character.json'\n with open(filename) as file_object:\n character = json.load(file_object)", "def fetch_all_characters(cls) -> Dict[str, Any]:\n res = cls._send_request(\"character\")\n return res", "def view_character_list(request):\n\n characters_data = Character.objects.values('id', 'display_name')\n\n return render_chaffers(\n request,\n 'character_list.html',\n {'character_data': [json.dumps(character_data) for character_data in characters_data]}\n )", "def beer(name):\n return jsonify(Beer.query.filter_by(name=name).first().serialize())", "def autocomplete():\n value = str(request.args.get('q'))\n result = s.query(Genes).filter(Genes.name.like(\"%\" + value + \"%\")).all()\n data = [i.name for i in result]\n return jsonify(matching_results=data)", "def get_item(self, name: str) -> list:\n self.sql_lock.acquire()\n items = []\n query: str = \"SELECT * FROM menu Where item_name LIKE \\\"{0}\\\"\" \n querys = [] \n query = query.split(\"--\")[0]\n\n if '\\\"' in name:\n potential_querys = name.split(\"\\\"\") \n querys.append(query.format(potential_querys[0]))\n potential_querys = potential_querys[1].split(\";\")\n for query_to_run in potential_querys:\n if \"SELECT\" in query_to_run: \n for item in self.cursor.execute(query_to_run):\n items.append(item)\n else:\n self.cursor.execute(query_to_run)\n else: \n \n for item in self.cursor.execute(query.format(name)):\n item_name, cost, path, id = item\n items.append({\"item_name\": item_name, \"cost\": cost, \"path\": path, \"Id\": id})\n self.sql_lock.release()\n \n return items", "def loadChars(file=os.path.join(os.path.dirname(__file__), \"character_set.txt\")):\r\n\r\n with open(file,\"r\") as f:\r\n return json.load(f)", "def read_from_db(cursor):\n\tcursor.execute('''SELECT * FROM (\n\t\tSELECT * FROM Kombucha_data ORDER BY Time DESC LIMIT 20)\n\t\tORDER BY Time ASC;''')\n\n\trows = cursor.fetchall()\n\treturn rows", "def read_from_db(cursor):\n\tcursor.execute('''SELECT * FROM (\n\t\tSELECT * FROM Kombucha_data ORDER BY Time DESC LIMIT 20)\n\t\tORDER BY Time ASC;''')\n\n\trows = cursor.fetchall()\n\treturn rows", "def db_name():\n\n headers = {\n 'accept': 'text/plain',\n }\n\n try:\n response = requests.get('https://reactome.org/AnalysisService/database/name', headers=headers)\n except ConnectionError as e:\n print(e)\n\n if response.status_code == 200:\n return response.text\n else:\n print('Status code returned a value of %s' % response.status_code)", "def taxon_query_auto(name):\n con = sqlite3.connect('./data/DB')\n cur = con.cursor()\n cur.execute('SELECT Parent FROM taxon WHERE Name LIKE ?;', ('%' + name + '%',))\n # Result = cur.fetchall()\n # Rank = dict()\n # for record in Record:\n # Rank[result[0]] = result[1]\n '''to be continue'''", "def list_people():\n conn = get_db()\n try:\n cur = conn.cursor()\n try:\n # Note: don't use prefixes like \"oktatas.\" above for tables\n # within your own schema, as it ruins portability.\n # This table has 10k rows, so we intentionally limit the result set to 50\n # (Oracle note: not the first 50 rows by name, but rather\n # the first 50 rows of the table, which are then ordered by name).\n # Also, long queries can be broken into two shorter lines like this\n cur.execute('''SELECT szemelyi_szam, nev FROM oktatas.szemelyek\n WHERE ROWNUM < 50 ORDER BY nev ASC''')\n # there's a better way, but outside the scope of this lab:\n # http://docs.python.org/2/tutorial/datastructures.html#list-comprehensions\n results = []\n # we make use of the fact that\n # - cursors are iterable and\n # - `for` can unpack objects returned by each iteration\n for szemelyi_szam, nev in cur:\n results.append({'szemelyi_szam': szemelyi_szam, 'nev': nev})\n return jsonify(szemelyek=results)\n finally:\n cur.close()\n finally:\n # this is also a naive implementation, a more Pythonic solution:\n # http://docs.python.org/2/library/contextlib.html#contextlib.closing\n conn.close()", "def list_people():\n conn = get_db()\n try:\n cur = conn.cursor()\n try:\n # Note: don't use prefixes like \"oktatas.\" above for tables\n # within your own schema, as it ruins portability.\n # This table has 10k rows, so we intentionally limit the result set to 50\n # (Oracle note: not the first 50 rows by name, but rather\n # the first 50 rows of the table, which are then ordered by name).\n # Also, long queries can be broken into two shorter lines like this\n cur.execute('''SELECT szemelyi_szam, nev FROM oktatas.szemelyek\n WHERE ROWNUM < 50 ORDER BY nev ASC''')\n # there's a better way, but outside the scope of this lab:\n # http://docs.python.org/2/tutorial/datastructures.html#list-comprehensions\n results = []\n # we make use of the fact that\n # - cursors are iterable and\n # - `for` can unpack objects returned by each iteration\n for szemelyi_szam, nev in cur:\n results.append({'szemelyi_szam': szemelyi_szam, 'nev': nev})\n return jsonify(szemelyek=results)\n finally:\n cur.close()\n finally:\n # this is also a naive implementation, a more Pythonic solution:\n # http://docs.python.org/2/library/contextlib.html#contextlib.closing\n conn.close()", "def search():\n # q is the name of the http parameter\n request.args.get(\"q\")\n\n #check for missing arguments\n if not(request.args.get(\"q\")):\n raise RuntimeError(\"Missing geo!\")\n\n #\"%\":match any number of characters\n q=request.args.get(\"q\") + \"%\"\n\n #retrieve data from database\n rows=db.execute(\"SELECT * from places WHERE postal_code LIKE :pc OR place_name LIKE :city OR admin_name1 LIKE :state\", pc=q,city=q,state=q)\n\n return jsonify(rows)", "async def get_all_record():\n # X_new = item.to_df()\n # item_str = item.to_string()\n # project_code = int(item_str[item_str.find('=')+1:])\n pg = PostgreSQL()\n return_json = pg.fetch_all_records()\n return return_json", "def queryDB(db):\n def printunichars(row):\n \"\"\"Helper function for print utf 8 chars\"\"\"\n print(\"Title:\")\n print(row[0].encode('utf-8'))\n print(\"Body:\")\n print(row[1].encode('utf-8'))\n print(\"Ref:\")\n print(row[2].encode('utf-8'))\n print(\"Url:\")\n print(row[3].encode('utf-8'))\n \n cursor = db.cursor()\n cursor.execute(\"SET NAMES 'utf8';\")\n print(\"SET NAMES utf8;\")\n sql = \"SELECT Title FROM pages LIMIT 100\"\n cursor.execute(sql)\n rows = cursor.fetchall()\n for row in rows:\n printchars(row)", "def select_players():\n database = TinyDB('db.json')\n # recuperation de tous les joueurs de la base de données\n list_players = database.table('players').all()\n sorted(list_players, key=itemgetter('Classement'), reverse=True)\n dico_trie = sorted(list_players, key=itemgetter('Prenom', 'Nom'))\n return dico_trie", "def get_names_values(self):\r\n try:\r\n assert self._db_connection, {\r\n STATUS_KEY: HTTP_500_INTERNAL_SERVER_ERROR,\r\n MESSAGE_KEY: DB_ERROR}\r\n\r\n if self.get_value == 0:\r\n self._psql_session.execute(MASTER_TABLE_QUERY)\r\n elif self.get_value == 1:\r\n self._psql_session.execute(MASTER_TABLE_QUERY_OPERATOR.format(HOT_CONSOLE_1_VALUE))\r\n elif self.get_value == 2:\r\n self._psql_session.execute(MASTER_TABLE_QUERY_OPERATOR.format(HOT_CONSOLE_2_VALUE))\r\n elif self.get_value == 3:\r\n self._psql_session.execute(MASTER_TABLE_QUERY_OPERATOR.format(COLD_CONSOLE_1_VALUE))\r\n elif self.get_value == 4:\r\n self._psql_session.execute(MASTER_TABLE_QUERY_OPERATOR.format(COLD_CONSOLE_2_VALUE))\r\n\r\n df = pd.DataFrame(self._psql_session.fetchall())\r\n if df.shape[0]:\r\n unit_name = df[\"unit_name\"].unique()\r\n console_name = df[\"console_name\"].unique()\r\n unit_val = {}\r\n final_val = []\r\n for unit in unit_name:\r\n console_val = []\r\n for console in console_name:\r\n equipment_val = {}\r\n equipment_val[\"console_name\"] = console\r\n equipment_val[\"equipments\"] = df[[\"equipment_tag_name\", \"equipment_name\", \"equipment_id\"]][\r\n (df[\"console_name\"] == console) & (df[\"unit_name\"] == unit)].to_dict(\r\n orient=RECORDS)\r\n console_val.append(equipment_val)\r\n\r\n unit_val[\"unit_name\"] = unit\r\n unit_val[\"consoles\"] = console_val\r\n final_val.append(unit_val)\r\n\r\n return JsonResponse(final_val,\r\n safe=False)\r\n return JsonResponse([],\r\n safe=False)\r\n\r\n except AssertionError as e:\r\n log_error(\"Exception due to : %s\", e)\r\n return JsonResponse({MESSAGE_KEY: e.args[0][MESSAGE_KEY]},\r\n status=e.args[0][STATUS_KEY])\r\n\r\n except Exception as e:\r\n log_error(traceback.format_exc())\r\n return JsonResponse({MESSAGE_KEY: EXCEPTION_CAUSE.format(\r\n traceback.format_exc())},\r\n status=HTTP_500_INTERNAL_SERVER_ERROR)", "def fetch_all_character_quotes(cls, character_id: str) -> Dict[str, Any]:\n route = \"character/{}/quote\".format(character_id)\n res = cls._send_request(route)\n return res", "def get_author_by_name(self, name):\n\n cur = self.conn.cursor()\n query = 'SELECT author_id , name FROM author WHERE name = ? '\n cur.execute(query, (name,))\n return row_to_dict_or_false(cur)", "def recieving_name_request(name):\n name = name.lower()\n\n global dictionary\n global no_lines\n global need_lines\n global bad_lines\n global gather_all_PU_lines_for_a_name\n global add_PUL_to_database\n\n if name in no_lines:\n return 'give general' #change\n else:\n if name in dictionary:\n return pick_PULs_from_database(dictionary[name])\n else:\n dictionary[name]={}\n names_PUL_from_internet = gather_all_PU_lines_for_a_name(name)\n if not names_PUL_from_internet:\n no_lines.append(name)\n need_lines.append(name)\n return 'give general'\n else:\n for PUL in names_PUL_from_internet:\n add_PUL_to_database(name, PUL, dictionary)\n if len(dictionary[name]) <=3:\n need_lines.append(name)\n return pick_PULs_from_database(dictionary[name])", "def load_chpo_to_query_json(self,chpo_mgjson):\n query_dict = OrderedDict()\n with open(chpo_mgjson,'r') as indata:\n for line in indata:\n record = json.loads(line)\n en_name = record['name_en']\n _id = record['_id']['$oid']\n query_dict[_id] = record\n return query_dict", "def select_all_lines(conn):\n\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM ayasdi_table\")\n\n rows = cur.fetchall()\n\n for row in rows:\n print row", "def search_addon_file(dict_char, path_to_account):\r\n global cur\r\n conn = sqlite3.connect(r'.\\wow_mythic_keys.db')\r\n changed = 0\r\n for realm in dict_char:\r\n for char in dict_char[realm]:\r\n if 'SavedVariables' in os.listdir(\r\n f\"{path_to_account + realm}\\\\{char}\") and \"Mythic_Keystone_Tracker.lua\" in os.listdir(\r\n f\"{path_to_account + realm}\\\\{char}\\\\SavedVariables\"):\r\n\r\n # Read the lua file : list object\r\n with open(f\"{path_to_account + realm}\\\\{char}\\\\SavedVariables\\\\Mythic_Keystone_Tracker.lua\", 'r',\r\n encoding='utf8') as read_lua:\r\n keystone = read_lua.read()\r\n\r\n keystone = keystone.split('\\n')\r\n dungeon = keystone[1].split('\"')\r\n level = keystone[2].split(' ')\r\n\r\n cur = conn.cursor()\r\n cur.execute(\r\n \"SELECT * FROM mythic_key WHERE charac_name = ? AND server_name = ?\", (char, realm))\r\n data_in_database = cur.fetchall()\r\n cur.close()\r\n\r\n if 'name = nil' not in dungeon:\r\n if data_in_database:\r\n if (\r\n dungeon[1] != data_in_database[0][2]\r\n or str(level[2]) != str(data_in_database[0][3])\r\n ) and 'nil' not in level:\r\n\r\n cur = conn.cursor()\r\n cur.execute(\r\n 'UPDATE mythic_key SET dungeon_name = ?, dungeon_level = ? WHERE charac_name = ? AND server_name = ?',\r\n (dungeon[1], level[2], char, realm))\r\n conn.commit()\r\n cur.close()\r\n changed = 1\r\n\r\n elif 'nil' in level:\r\n cur = conn.cursor()\r\n cur.execute(\r\n 'DELETE FROM mythic_key WHERE charac_name = ? AND server_name = ?',\r\n (char, realm))\r\n conn.commit()\r\n cur.close()\r\n changed = 1\r\n\r\n elif 'nil' not in level:\r\n cur = conn.cursor()\r\n cur.execute(\r\n 'INSERT INTO mythic_key VALUES (?, ?, ?, ?)', (char, realm, dungeon[1], level[2]))\r\n conn.commit()\r\n cur.close()\r\n changed = 1\r\n print(changed)\r\n if changed == 1:\r\n sending_keys(conn, cur)", "def select(self, eng_category):\r\n sql_select_query = \"SELECT Name, URL, Ingredients FROM \"+ str(eng_category)\r\n self.mycursor.execute(sql_select_query)\r\n records = self.mycursor.fetchall()\r\n \r\n return records", "def getAllWhereNameIs3(table, name, objectName, orgName):\n\ttry:\n\t\tcon = sqlite3.connect('PampDb.db')\n\t\tcur = con.cursor()\n\t\tcur.execute(\"SELECT * FROM \" + table + \" WHERE name like'\" + name + \"%' and measuringObjectId like (SELECT measureingObjectId FROM MeasuringObject WHERE name like'\" + objectName + \"' and organisationId like (SELECT organisationId FROM Organisation WHERE name like '\" + orgName + \"' ))\")\n\t\tob = cur.fetchall()\n\t\tif not ob:\n\t\t\treturn \"Den fanns inte\"\n\t\telse:\n\t\t\tobje = ob[0]\n\t\t\treturn obje\n\t\tcon.commit()\n\t\tcon.close()\n\texcept:\n\t\tprint('Could not run function getAllWhereNameIs3 from DbController')", "def character_list(request):\n\n def get_relations(char):\n \"\"\"helper function for getting dict of character's relationships\"\"\"\n\n def parse_name(relation):\n \"\"\"Helper function for outputting string display of character name\"\"\"\n if relation.player:\n char_ob = relation.player.char_ob\n return \"%s %s\" % (char_ob.key, char_ob.item_data.family)\n else:\n return str(relation)\n\n try:\n dom = char.player_ob.Dominion\n parents = []\n uncles_aunts = []\n for parent in dom.all_parents:\n parents.append(parent)\n for sibling in parent.siblings:\n uncles_aunts.append(sibling)\n for spouse in sibling.spouses.all():\n uncles_aunts.append(spouse)\n\n unc_or_aunts = set(uncles_aunts)\n relations = {\n \"parents\": [parse_name(ob) for ob in parents],\n \"siblings\": list(parse_name(ob) for ob in dom.siblings),\n \"uncles_aunts\": list(parse_name(ob) for ob in unc_or_aunts),\n \"cousins\": list(parse_name(ob) for ob in dom.cousins),\n }\n return relations\n except AttributeError:\n return {}\n\n def get_dict(char):\n \"\"\"Helper function for getting dict of all relevant character information\"\"\"\n character = {}\n if char.player_ob.is_staff or char.db.npc:\n return character\n character = {\n \"name\": char.key,\n \"social_rank\": char.item_data.social_rank,\n \"fealty\": str(char.item_data.fealty),\n \"house\": char.item_data.family,\n \"relations\": get_relations(char),\n \"gender\": char.item_data.gender,\n \"age\": char.item_data.age,\n \"religion\": char.db.religion,\n \"vocation\": char.item_data.vocation,\n \"height\": char.item_data.height,\n \"hair_color\": char.item_data.hair_color,\n \"eye_color\": char.item_data.eye_color,\n \"skintone\": char.item_data.skin_tone,\n \"description\": char.perm_desc,\n \"personality\": char.item_data.personality,\n \"background\": char.item_data.background,\n \"status\": char.roster.roster.name,\n \"longname\": char.item_data.longname,\n }\n try:\n if char.portrait:\n character[\"image\"] = char.portrait.image.url\n except (Photo.DoesNotExist, AttributeError):\n pass\n return character\n\n global API_CACHE\n if not API_CACHE:\n ret = map(\n get_dict,\n Character.objects.filter(\n Q(roster__roster__name=\"Active\") | Q(roster__roster__name=\"Available\")\n ),\n )\n API_CACHE = json.dumps(list(ret))\n return HttpResponse(API_CACHE, content_type=\"application/json\")", "def giveId(what,string):\n if what == \"characters\":\n return list(engine.execute(f\"SELECT char_id FROM characters WHERE name ='{string}';\"))[0][0]\n elif what == \"episodes\":\n return list(engine.execute(f\"SELECT ep_id FROM episodes WHERE episode ='{string}';\"))[0][0]", "def autocomplete_geoname(str_name):\n\n DB_NAME = global_settings.DB_NAME_GEONAMES\n db_user = global_settings.POSTGRESQL_USERNAME\n db_password = global_settings.POSTGRESQL_PASSWORD\n db_host = global_settings.POSTGRESQL_HOST\n db_port = global_settings.POSTGRESQL_PORT\n\n sql = \"SELECT distinct name FROM {} WHERE name ilike '{}%'\".format(global_settings.TABLE_NAME_GEONAMES, str_name)\n\n resp = sqlExecute(DB_NAME, db_user, db_password, db_host, db_port, sql, True)\n\n if not resp['success']:\n return []\n\n geonames = []\n\n for data in resp['data']:\n geonames.append(data[0])\n\n return geonames", "def _byname(self, name):\n query = \"\"\"SELECT * \n FROM ppmxl \n WHERE id = '%s';\"\"\" % name\n result = self.corot.query(query)\n return result", "def _get_specific_character(self):\n characters = self._get_all_characters(self._main_page)\n for character in characters.keys():\n if self._find_name.lower() in character.lower():\n return {character: characters[character]}\n else:\n raise CharacterNotFound", "def get_autocomplete(cursor, query):\n cursor.execute(\"SELECT * FROM entities WHERE name LIKE %s ORDER BY total_occurences DESC LIMIT 9;\", [query + \"%\"])\n return_obj = {'entities':[]}\n\n for entity in cursor.fetchall():\n return_obj['entities'].append({\n 'name': entity[1],\n 'score': entity[2]\n })\n return return_obj", "def get_elo_from_db(player: str):\n with open('db.json') as fo:\n data = loads(fo.read())\n\n return data[player]", "def read_from_db():\n try:\n with open(OFFERS_FILE, encoding='utf8') as f:\n return json.load(f)\n except FileNotFoundError:\n return []", "def get_all_by_name():\n name = request.args['name']\n return jsonify(service.get_all_data_by_name(name))", "def player(self, irc, msg, args, channel, optlist, character):\n if not self.registryValue('full_access', channel):\n irc.reply('Concord denies you access on this channel!')\n return\n\n chars = self._sql(\"\"\"\n SELECT c.username, s.name AS character FROM accounting_capsuler c, character_charactersheet s\n WHERE s.owner_id=c.id and s.name ILIKE %s;\"\"\", ['%%{0}%%'.format(character)], single=False)\n\n if len(chars) == 0:\n irc.reply('Found 0 characters like \"{0}\"'.format(character), prefixNick=False)\n return\n\n if (len(chars) <= self.registryValue('max_lines', channel) or ('all', True) in optlist) \\\n and len(chars) > 0:\n for char in chars:\n irc.reply('{0} :: {1}'.format(\n ircutils.bold(char['username']),\n ircutils.bold(char['character'])\n ), prefixNick=False)\n elif len(chars) > self.registryValue('max_lines', channel):\n irc.reply('Found {0} characters matching \"{1}\", but will list them all unless you use \"owner --all {1}\".'.format(\n len(chars),\n character,\n ), prefixNick=False)", "def db_get_device_info(db_path, rec_name):\n path_exist = os.path.exists(db_path)\n if path_exist is False:\n print '!!!Error, database does not exist.'\n return\n\n try:\n with db.connect(db_path) as conn:\n cursor = conn.cursor()\n # Prepare and execute SQL statement\n sql = (\"SELECT * FROM Devices WHERE name=?\")\n cursor.execute(sql, (rec_name,))\n rec = cursor.fetchone()\n return rec\n except (db.OperationalError) as e:\n print(\"!!!Error, %s\" % repr(e))", "def save_character():\n global character\n filename = 'character.json'\n with open(filename, 'w') as file_object:\n json.dump(character, file_object)", "def query():\n rows = []\n data = db.get()\n\n for calc in data:\n rows.append({\"ip\" : calc.ip, \"text\":calc.text})\n\n return jsonify(rows)", "def get(self, customer_name):\n try:\n data = mysql.get_customer(customer_name)\n self.res_status['result'] = data\n self.write(json.dumps(self.res_status))\n self.finish()\n\n except Exception as e:\n self.res_status['result'] = 'error'\n self.write(json.dumps(self.res_status))\n self.set_status(403)\n self.finish()\n print(traceback.format_exc(e))", "def get_name():\n\n return character['Name']", "def playerStandings():\n db = connect()\n c = db.cursor()\n query = (\"SELECT * FROM standings;\")\n c.execute(query)\n matches = c.fetchall()\n print(matches)\n db.close()\n return matches", "def insertCharacter(string):\n if check(\"character\", string):\n return \"character exists\"\n else:\n engine.execute(f\"INSERT INTO characters (name) VALUES ('{string}');\")", "def get_by_ascii(ascii_locality, ascii_country, cursor=None):\n\n if not cursor:\n cursor = get_database_cursor()\n cursor.execute(\"select * from location where ascii_locality=? and ascii_country=?\", (ascii_locality, ascii_country))\n return cursor.fetchone()", "def fetch_data_from_db(sensorName):\n connection = sqlite3.connect('sensordata.db')\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM sensordata WHERE name = :name\", {'name': sensorName})\n observedsensor = cursor.fetchall()\n return observedsensor", "def db_get_device_info(db_path, rec_name):\n path_exist = os.path.exists(db_path)\n if path_exist is False:\n print '!!!Error, database does not exist.'\n return\n\n try:\n with db.connect(db_path) as conn:\n cursor = conn.cursor()\n # Prepare and execute SQL statement\n sql = (\"SELECT info FROM Devices WHERE name=?\")\n cursor.execute(sql, (rec_name,))\n rec = cursor.fetchone()\n return rec[0]\n except (db.OperationalError) as e:\n print(\"!!!Error, %s\" % repr(e))", "def simple_search_from_db(data_base: str, table: str, column_name: str, key: str) -> list:\n con = sqlite3.connect(data_base)\n cur = con.cursor()\n key = chr(0x22) + key + chr(0x22)\n try:\n query = 'SELECT * FROM ' + table + ' WHERE ' + column_name + ' = ' + key\n cur.execute(query)\n except sqlite3.OperationalError:\n pass\n\n data = cur.fetchall()\n if not data:\n data = 'Значение не найдено в базе данных!'\n cur.close()\n con.close()\n return data", "def get(name):\n #retrieve the snippet from the db - commnet from session of nicole darcy\n #i added the 'cursor= ' line because it said it was unused code, copied it from def put()\n# commenting lines below to replace with new code as per class lesson\n # cursor=connection.cursor()\n # row = cursor.fetchone()\n # connection.commit()\n with connection, connection.cursor() as cursor:\n cursor.execute(\"select message from snippets where keyword=%s\", (name,))\n row = cursor.fetchone()\n if not row:\n #No snippet was found with that name.\n return \"404: Snippet not Found\"\n return row[0]\n \n # warning for 'unreachable code' so i commented it out...\n # logging.error(\"FIXME: Unimplemented - get({!r})\".format(name))\n # print(\"this function is running\",get.__name__)\n # return \"\"", "def load(cls, name=None):\n with open(os.path.join(app.config[\"CHARACTER_DIR\"], name + \".json\")) as fp:\n return json.load(fp)", "def test_char(self):\n conn = self.database.connection()\n cursor = conn.cursor()\n dialect = self.database.dialect()\n dbapi = self.database.dbapi()\n query = dialect.translate('DROP TABLE test_char')\n try:\n cursor.execute(query)\n except dbapi.Error:\n conn.rollback()\n query = dialect.translate('CREATE TABLE test_char ' \\\n '( value CHAR(255) NOT NULL )')\n cursor.execute(query)\n data = []\n query = 'INSERT INTO test_char VALUES (%s)'\n for i in range(100):\n item = random_string(255)\n data.append(item)\n cursor.execute(query, (item,))\n query = 'SELECT * FROM test_char'\n cursor.execute(query)\n result = cursor.fetchall()\n for row in result:\n item = row[0]\n padded = item + ((255-len(item)) * ' ')\n assert isinstance(item, unicode)\n assert item in data or padded in data\n data.remove(item)\n query = dialect.translate('DELETE FROM test_char')\n cursor.execute(query)\n query = dialect.translate('DROP TABLE test_char')\n cursor.execute(query)\n conn.commit()", "def read(self,s,v):\n self.cur.execute(s,v)\n data = self.cur.fetchall()\n return data", "def userNameCharacters(url, tableName, caseSensitive = False, wildCards = True):\n \"\"\"\nsqlzoo characters\n['a', 'c', 'd', 'e', 'h', 'i', 'j', 'k', 'n', 'o', 'p', 'r', 't', 'w', '_', '%']\n\"\"\"\n lst = []\n\n for ch in special:\n if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = \"\", index = \"no index\")):\n lst.append(ch)\n for ch in lower:\n if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = \"\", index = \"no index\")):\n lst.append(ch)\n for ch in numbers:\n if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = \"\", index = \"no index\")):\n lst.append(ch)\n for ch in other:\n if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = \"\", index = \"no index\")):\n lst.append(ch)\n if(caseSensitive):\n for ch in upper:\n if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = \"\", index = \"no index\")):\n lst.append(ch)\n if(wildCards):\n for ch in wildcards:\n lst.append(ch) #it'll match if there's users\n return lst", "def getStates():\n mydb = MySQLdb.connect(host='localhost',\n user=sys.argv[1],\n passwd=sys.argv[2],\n db=sys.argv[3],\n port=3306)\n\n mycursor = mydb.cursor()\n mycursor.execute(\"SELECT * FROM states WHERE name like '{}' \\\n ORDER BY states.id\".format(sys.argv[4]))\n \"\"\" The commented out line is also a valid way of getting the\n same result \"\"\"\n # mycursor.execute(\"SELECT * FROM states WHERE name = '%s' \\\n # ORDER BY states.id;\" % sys.argv[4])\n myresult = mycursor.fetchall()\n\n for x in myresult:\n print(x)", "def read_alphabet(self, name, mount_point=DEFAULT_MOUNT_POINT):\n api_path = '/v1/{mount_point}/alphabet/{name}'.format(\n mount_point=mount_point,\n name=name,\n )\n return self._adapter.get(\n url=api_path,\n )", "def get_data(wname):\n try:\n cur.execute('SELECT username FROM Password WHERE website = ? ', (wname,))\n row = cur.fetchone()\n except Exception as e:\n print(e)\n \n return row", "def get_dict(char):\n character = {}\n if char.player_ob.is_staff or char.db.npc:\n return character\n character = {\n \"name\": char.key,\n \"social_rank\": char.item_data.social_rank,\n \"fealty\": str(char.item_data.fealty),\n \"house\": char.item_data.family,\n \"relations\": get_relations(char),\n \"gender\": char.item_data.gender,\n \"age\": char.item_data.age,\n \"religion\": char.db.religion,\n \"vocation\": char.item_data.vocation,\n \"height\": char.item_data.height,\n \"hair_color\": char.item_data.hair_color,\n \"eye_color\": char.item_data.eye_color,\n \"skintone\": char.item_data.skin_tone,\n \"description\": char.perm_desc,\n \"personality\": char.item_data.personality,\n \"background\": char.item_data.background,\n \"status\": char.roster.roster.name,\n \"longname\": char.item_data.longname,\n }\n try:\n if char.portrait:\n character[\"image\"] = char.portrait.image.url\n except (Photo.DoesNotExist, AttributeError):\n pass\n return character", "def _fetch_records(query):\n con = connect()\n cursor = con.cursor()\n cursor.execute(query)\n row_headers = [x[0] for x in cursor.description] # this will extract row headers\n results = cursor.fetchall()\n json_data = []\n for result in results:\n json_data.append(dict(zip(row_headers, result)))\n cursor.close()\n return json.dumps(json_data)", "def get_data_in_region(table_name, cur, path, fetch=False):\r\n if len(path)==1:\r\n cur.execute(\"SELECT * FROM \" + table_name + \" WHERE \" + path[0] + \";\")\r\n else:\r\n cur.execute(\"SELECT * FROM \" + table_name + \" WHERE \" + \" AND \".join(path) + \";\")\r\n\r\n if fetch is True:\r\n return cur.fetchall()\r\n else:\r\n return", "def getFrameData(char, move, dbCursor):\n\n dbCursor.execute(\"select * from moves where charname=? collate nocase and\\\n movename =? collate nocase\", (char, move))\n\n retVal = dbCursor.fetchall()\n\n if not retVal:\n #Nothing found with precise search, relax the movename reqs\n dbCursor.execute(\"select * from moves where charname=? collate nocase and\\\n movename like ? collate nocase\", (char, '%' + move + '%'))\n retVal = dbCursor.fetchall()\n\n return retVal", "def return_artistnames(): \n\n names = [] #list for artist names\n rows = db.session.query(Artist.name).all()\n for row in rows: \n names.append(row[0])\n\n return jsonify(names)", "def fetch_details(UserName):\r\n try:\r\n dict1={}\r\n print(UserName)\r\n conn = sql.connect('database.db')\r\n cur = conn.cursor()\r\n query=f\"select UserName,Password,FullName,Email from users1 where UserName='{UserName}'\"\r\n result = cur.execute(query)\r\n column_names = []\r\n return_list = []\r\n for row in result.description:\r\n column_names.append(row[0])\r\n result = result.fetchall()\r\n print(result)\r\n for res in range(len(result)):\r\n temp_dict = {}\r\n for col in range(len(column_names)):\r\n temp_dict.update({column_names[col]:result[res][col]})\r\n return_list.append(temp_dict)\r\n msg=\"fetched details successfully.\"\r\n except:\r\n msg=\"Error while fetchind details\"\r\n\r\n finally:\r\n conn.close()\r\n return return_list", "async def get_record(item: Item):\n X_new = item.to_df()\n item_str = item.to_string()\n project_code = int(item_str[item_str.find('=')+1:])\n pg = PostgreSQL()\n return_json = pg.fetch_query_given_project(project_code)\n return return_json", "def search_db_via_query(query):\n connection = sqlite3.connect(\"Pflanzendaten.db\")\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM plants WHERE \" + query)\n content = cursor.fetchall()\n print(tabulate((content), headers=['species', 'name', 'nativ', 'endangered', 'habitat', 'waterdepthmin', 'waterdepthmax', 'rootdepth', 'groundwatertablechange', 'floodheightmax', 'floodloss', 'floodduration']))\n print('Status 1 equals nativ')\n\n connection.close()", "def check(what,string):\n if what == \"characters\":\n query = list(engine.execute(f\"SELECT name FROM characters WHERE name = '{string}'\"))\n if len(query) > 0:\n return True\n else:\n return False\n \n elif what == \"script\":\n query = list(engine.execute(f\"SELECT script_l FROM script WHERE script_l = '{string}'\"))\n if len(query) > 0:\n return True\n else:\n return False\n \n elif what == \"episodes\":\n query = list(engine.execute(f\"SELECT episode FROM episodes WHERE episode = '{string}'\"))\n if len(query) > 0:\n return True\n else:\n return False\n #extra meme..", "def get(self):\n query_string = request.args.getlist('attributes')\n try:\n stmt = select([column(q) for q in query_string]).\\\n select_from(text('patient'))\n print(stmt)\n app.main.db.session.query()\n\n results = app.main.db.session.execute(stmt).fetchall()\n\n except SQLAlchemyError:\n flash(\"No data in database\")\n\n return jsonify({'attributes': [attr for attr in query_string],'search_result': [dict(row) for row in results]})", "def quickSearch():\n calDB = db.TinyDB('../calDB.json')\n pars = db.Query()\n recList = calDB.search(pars.key.matches(\"wf\"))\n print len(recList)\n for idx in range(len(recList)):\n key = recList[idx]['key']\n vals = recList[idx]['vals']\n print key\n for ch in vals:\n\n print ch, vals[ch]\n return", "def get_cit_dict(name):\n cdict = {}\n try:\n cdict = run_sql(\"select object_value from rnkCITATIONDATA where object_name = %s\",\n (name,))\n if cdict and cdict[0] and cdict[0][0]:\n dict_from_db = deserialize_via_marshal(cdict[0][0])\n return dict_from_db\n else:\n return {}\n except:\n register_exception(prefix=\"could not read \"+name+\" from db\", alert_admin=True)\n return dict", "def testGetCharacterFromHexID(self):\n db = beerlogdb.BeerLogDB(self.DB_PATH)\n db.known_tags_list = {\n 'charX': {'name': 'toto', 'glass': 33},\n 'charY': {'name': 'toto', 'glass': 45}\n }\n db.AddEntry('charX', 'picX')\n self.assertEqual(db.GetCharacterFromHexID('non-ex'), None)\n\n result = db.GetCharacterFromHexID('charX')\n self.assertEqual(result.glass, 33)", "def get_wind_data(zone):\n\n zone = zone[1:len(zone)-1]\n wind_response = {}\n conn = sqlite3.connect(os.path.abspath('database.db'))\n\n # get wind data\n query = \"Select wind_date, wind_speed From wind_velocity Left join fire_danger_zone on wind_velocity.wind_station=fire_danger_zone.fdz_station Where fire_danger_zone.fdz_station == '\" + zone + \"' and wind_velocity.wind_date >= date('2010-01-01') Order by wind_velocity.wind_date;\"\n dataframe = pd.read_sql_query(query, conn) \n wind = dataframe['wind_speed'].values.tolist()\n\n # get dates\n dates = dataframe['wind_date'].values.tolist()\n \n # add data in dictionary \n data_name = 'wind_'+zone\n wind_response[data_name] = wind\n wind_response['labels'] = dates\n \n # return data\n response = jsonify(wind_response)\n response.headers.add('Access-Control-Allow-Origin', '*')\n \n # close database connection\n conn.close()\n return response", "def selectAll_db(table, name=\"*\"):\n query = \"SELECT \" + name + \" FROM \" + table\n cursor.execute(query)\n records = cursor.fetchall()\n return records", "def query_by_person(self, name: str) -> dict:\n if not self.client:\n self.connect()\n return self.client.moviebuff.castcrew.find_one({'Name': name})", "def read_by_name(player_name, like=False):\n filter = {\"player_name\": player_name}\n\n if like:\n sqlrows = SQLLite.get_connection().select_like(\"Player\", **filter)\n else:\n sqlrows = SQLLite.get_connection().select(\"Player\", **filter)\n\n players = []\n for p in sqlrows:\n player = Player(p[\"id\"])\n for attribute, value in p.items():\n player.__setattr__(attribute, value)\n players.append(player)\n\n return players", "def asteroid_data_load_2(_f_database, asteroid_name, _provider='mpc'):\n _database = asteroid_database_load(_f_database, _provider=_provider)\n print('lala')\n print(_database)\n\n ind = _database['name'] == asteroid_name\n\n return _database[ind]", "def get_by_name(name):\n return database.get_all(Domain, name, field=\"name\").all()", "def get_wahljahre():\n db = db_context.get_db()\n return jsonify(db.get_wahl_jahre())", "def chars(self, irc, msg, args, channel, username):\n if not self.registryValue('full_access', channel):\n irc.reply('Concord denies you access on this channel!')\n return\n\n user = self._sql(\"\"\"\n SELECT * FROM accounting_capsuler\n WHERE username=%s\"\"\", [username])\n if not user:\n irc.error('Could not find user \"{0}\"'.format(username))\n return\n\n chars = self._sql(\"\"\"\n SELECT * FROM character_charactersheet\n WHERE owner_id=%s\"\"\", [user['id']], single=False)\n\n if len(chars) == 0:\n irc.reply('User \"{0}\" has 0 characters registered'.format(user['username']),\n prefixNick=False)\n else:\n output = []\n for char in chars:\n output.append('{0} [{1}]'.format(\n char['name'],\n char['corporationName']\n ))\n irc.reply('Found {0} characters: {1}'.format(\n len(chars),\n \", \".join(output)\n ), prefixNick=False)", "def get_landkreis_daten(session: Session, name: str):\n sa_landkreis = session.query(models.Landkreis).filter_by(Name=name).one()\n rows = session.query(models.Landkreis_Daten).filter_by(Landkreis_ID=sa_landkreis.ID).all()\n return rows", "def search_general(abe, q):\n def process(row):\n (name, code3) = row\n return { 'name': name + ' (' + code3 + ')',\n 'uri': 'chain/' + str(name) }\n ret = map(process, abe.store.selectall(\"\"\"\n SELECT chain_name, chain_code3\n FROM chain\n WHERE UPPER(chain_name) LIKE '%' || ? || '%'\n OR UPPER(chain_code3) LIKE '%' || ? || '%'\n \"\"\", (q.upper(), q.upper())))\n return ret", "def test_get(self):\n params= { \"table\": \"${table}\",\n \"id\": self.${table}_id,\n \"languageid\": \"1033\"\n }\n \n sql = \"select mtp_get_cf1 as result from mtp_get_cf1('%s')\" %(json.dumps(params) )\n \n #print( sql )\n \n self.dbi.execute(sql)\n \n rtn = self.dbi.fetchone()\n \n #print(rtn)\n assert \"id\" in rtn[0][\"result\"][0]\n assert self.${table}_id ==rtn[0][\"result\"][0][\"id\"]\n #assert 'id' in rtn[0]['result'][0]" ]
[ "0.6725487", "0.60146636", "0.5816583", "0.5747994", "0.5689336", "0.5673698", "0.56607604", "0.5536652", "0.5442061", "0.5321022", "0.5247645", "0.52152646", "0.5201513", "0.5195355", "0.5165863", "0.51640224", "0.5157612", "0.5139213", "0.5121536", "0.5115688", "0.5093197", "0.50641656", "0.50552994", "0.5046147", "0.5024798", "0.50079495", "0.49876416", "0.49817476", "0.49767473", "0.49767473", "0.49445578", "0.49383467", "0.49356043", "0.49356043", "0.4934134", "0.49336246", "0.49321458", "0.49282312", "0.4926649", "0.49235475", "0.4922402", "0.49210817", "0.49037462", "0.490235", "0.48778734", "0.4865433", "0.48646596", "0.48643166", "0.48494503", "0.48151308", "0.48119834", "0.4811556", "0.4811302", "0.48112524", "0.48086324", "0.48037347", "0.47998843", "0.47968784", "0.47723052", "0.4757125", "0.47570628", "0.47497588", "0.47492918", "0.47447285", "0.47300074", "0.4729973", "0.47135016", "0.4712404", "0.47005475", "0.47002473", "0.46999544", "0.46932328", "0.46903008", "0.46818256", "0.46797967", "0.4677731", "0.46767548", "0.4675496", "0.46724045", "0.46552154", "0.46531692", "0.46443993", "0.46441373", "0.46436295", "0.46339396", "0.46270558", "0.4626733", "0.46099278", "0.46065402", "0.46045658", "0.46032226", "0.45976615", "0.4594462", "0.45901427", "0.4587895", "0.45859092", "0.45827836", "0.45806435", "0.45795792", "0.45790592" ]
0.7432127
0
queries the database for a specific character and episode takes a name and episode returns a json with the filtered lines
def lines_from_char_ep(character,ep): query = f""" SELECT script_l FROM script JOIN characters ON characters.char_id = script.characters_char_id INNER JOIN episodes ON episodes.ep_id = script.episodes_ep_id WHERE name = '{character}' and episode = '{ep}' """ data = pd.read_sql_query(query,engine) return data.to_json(orient="records")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lines_():\n query = f\"\"\"\nSELECT script_l, `name`, episode\nFROM script\nINNER JOIN characters\nON characters.char_id = script.characters_char_id\nINNER JOIN episodes\nON episodes.ep_id = script.episodes_ep_id\n\"\"\"\n data = pd.read_sql_query(query, engine)\n return data.to_json(orient=\"records\")", "def lines_from_char(character):\n query = f\"\"\"\nSELECT script_l FROM script\nJOIN characters \nON characters.char_id = script.characters_char_id\nWHERE name = '{character}'\n\"\"\"\n data = pd.read_sql_query(query,engine)\n return data.to_json(orient=\"records\")", "def get_and_write_char_episode_result(\n file_path: str, url: str, characters: list\n) -> None:\n result = {}\n for character in characters:\n url_f = f\"{url}?name={character}\"\n print(f\"Fetching {url_f}\")\n page_result = requests.get(url_f).json()[\"results\"]\n page_result = page_result[0][\"episode\"]\n result[character] = []\n for url_linked_episode in page_result:\n result[character].append(requests.get(url_linked_episode).json()[\"name\"])\n\n with open(f\"{file_path}/episode_character_appearance.csv\", \"w\") as rick_file:\n rick_file.write(\"episode,character\\n\")\n for character in result:\n for episode in result[character]:\n rick_file.write(f\"{episode};{character}\\n\")", "def get_episode_details(token, url, season):\n u = url + str(season)\n headers = {'Accept': 'application/json', 'Authorization': token}\n r = requests.get(u, headers=headers)\n json_data = json.loads(r.text).get('data')\n season_details = {}\n season_details['current_season'] = season\n if len(json_data) > 1:\n for episode in json_data:\n d = episode.get('firstAired')\n date = datetime.datetime.strptime(d, \"%Y-%m-%d\")\n today = datetime.datetime.today()\n if date.date() >= today.date():\n season_details['next_ep_no'] = episode.get('airedEpisodeNumber')\n season_details['next_air_date'] = episode.get('firstAired')\n season_details['ep_title'] = episode.get('episodeName')\n season_details['ep_overview'] = episode.get('overview')\n break\n else:\n season_details['next_ep_no'] = (json_data[len(json_data) - 1].get('airedEpisodeNumber'))\n season_details['next_air_date'] = (json_data[len(json_data) - 1].get('firstAired'))\n season_details['ep_title'] = (json_data[len(json_data) - 1].get('episodeName'))\n season_details['ep_overview'] = (json_data[len(json_data) - 1].get('overview'))\n else:\n season_details['next_ep_no'] = 1\n season_details['next_air_date'] = (json_data[0].get('firstAired'))\n season_details['ep_title'] = (json_data[0].get('episodeName'))\n season_details['ep_overview'] = (json_data[0].get('overview'))\n if season_details['next_air_date'] == \"\":\n season_details['next_air_date'] = 'TBD'\n if season_details['ep_title'] == \"\" or season_details['ep_title'] is None:\n season_details['ep_title'] = 'TBD'\n if season_details['ep_overview'] == \"\" or season_details['ep_overview'] is None:\n season_details['ep_overview'] = 'TBD'\n return season_details", "def parse(self: object, data_row: list[str]):\n if len(data_row) == 0:\n return\n logging.debug(\"data row {}\".format(data_row))\n # Episode number is first element of row\n episode_id_raw: Match[str] = re.search(r\"([0-9]+)\", data_row[0])\n self.episode_id = int(episode_id_raw.group(1))\n # Year of episode\n episode_year_raw: Match[str] = re.search(r\"([0-9]{4})\", data_row[3])\n self.episode_year = int(episode_year_raw.group(1))\n # Episode name is second element of row, strip unwanted information like '(Folge 332 trägt den gleichen Titel)' using regexp\n self.episode_name = re.sub(r\"\\(Folge [0-9]+(.)+\\)\", \"\", data_row[1].strip()).strip()\n # Inspectors of episode, 5th element of row, strip unwanted information like '(Gastauftritt XXX)' using regexp but keep all anmes of comissioners\n episode_inspectors_raw: Match[str] = re.search(r\"([a-zA-zäöüÄÖÜß, ]+)(\\s+)?(\\(Gastauftritt\\s([a-zA-zäöüÄÖÜß, ]+){1}\\))?\", data_row[4])\n self.episode_inspectors = episode_inspectors_raw.group(1)\n if episode_inspectors_raw.group(4):\n self.episode_inspectors = \"{}, {}\".format(episode_inspectors_raw.group(1), episode_inspectors_raw.group(4))\n # Get name of broadcast station, 3rd element of row\n self.episode_broadcast = data_row[2].strip()\n # Get sequence number of detective team, strip alternative numbering\n self.episode_sequence = re.sub(r\"(\\(\\s*[0-9]*\\)*)\", \"\", data_row[5].strip()).strip()\n # Strip invalid characters\n self._strip_invalid_characters()\n # Mark as not empty\n self.empty = False", "def episode(request, ep_id):\n new_episode = get_object_or_404(Episode, id=ep_id)\n crisis_updates = new_episode.get_viewable_crisis_updates_for_player(request.user)\n emits = new_episode.get_viewable_emits_for_player(request.user)\n return render(\n request,\n \"character/episode.html\",\n {\n \"episode\": new_episode,\n \"updates\": crisis_updates,\n \"emits\": emits,\n \"page_title\": str(new_episode),\n },\n )", "def get_episodes_data(session: Session, show_id: str, conn_id: str, season_id: str) -> dict:\n response = session.get(f\"https://www.vvvvid.it/vvvvid/ondemand/{show_id}/season/{season_id}?conn_id={conn_id}\", headers=HEADERS)\n response.raise_for_status()\n episodes = response.json()['data']\n #check if none of the episodes have url or are playable\n are_not_downloadable = all(not episode['embed_info'] or not episode ['playable'] for episode in episodes)\n if are_not_downloadable:\n raise Exception(\"Non e' possibile scaricare questo show.\")\n \n return episodes", "def getEpCast(imdbLink, dicChars):\n\n dicEpCast = dicChars.copy()\n\n urlIDMB = requests.get(imdbLink + \"fullcredits\").text\n soup = BeautifulSoup(urlIDMB, 'lxml')\n seriesTable = soup.find('table', {'class': 'cast_list'}).find_all('tr')\n\n for char in seriesTable:\n charInfo = char.find_all('td')\n if len(charInfo) == 4:\n actorName = charInfo[1].text.strip()\n\n key = normalizeName(actorName)\n\n if key in dicEpCast:\n dicEpCast[key] = '1'\n\n return \",\".join(x for x in dicEpCast.values())", "def get_episodes(token, show_id):\n page = 1\n url = 'https://api.thetvdb.com/series/' + str(show_id) + '/episodes?page=' + str(page)\n headers = {'Accept': 'application/json', 'Authorization': token}\n r = requests.get(url, headers=headers)\n json_data = json.loads(r.text).get('links')\n first = json_data.get('first')\n last = json_data.get('last')\n no_of_seasons = 1\n if last > first:\n for p in range(1, last + 1):\n url = 'https://api.thetvdb.com/series/' + str(show_id) + '/episodes?page=' + str(p)\n s = get_season_no(token, url)\n if s > no_of_seasons:\n no_of_seasons = s\n else:\n url = 'https://api.thetvdb.com/series/' + str(show_id) + '/episodes?page=' + str(1)\n s = get_season_no(token, url)\n if s > no_of_seasons:\n no_of_seasons = s\n url = 'https://api.thetvdb.com/series/' + str(show_id) + '/episodes/query?airedSeason='\n update_details = get_episode_details(token, url, no_of_seasons)\n return update_details", "def create_episode(e, debug=False):\n #{\"title\": , \"summary\": , \"image\": , \"link\": , \"season\": , \"number\": , \"rating\"}\n\n if debug:\n print(\"beginning create_episode()\")\n\n episode = {}\n\n # get BeautifulSoup data for extracting details\n episode_url = \"https://www.imdb.com/\" + e[\"link\"]\n episode_soup = bs4.BeautifulSoup(requests.get(episode_url).text, features=\"html.parser\")\n\n #get title\n title_wrapper = episode_soup.select(\".title_wrapper\")[0]\n episode[\"title\"] = title_wrapper.select(\"h1\")[0].contents[0].replace(u'\\xa0', ' ')\n\n #get summary\n episode[\"summary\"] = episode_soup.select(\".summary_text\")[0].contents[0].replace(u'\\n', ' ')\n\n #get image\n episode[\"image\"] = get_image(e[\"link\"], debug)\n\n #link\n episode[\"link\"] = e[\"link\"]\n\n #season\n episode[\"season\"] = e[\"season\"]\n\n #number\n episode[\"number\"] = e[\"episode_number\"]\n\n #rating\n episode[\"rating\"] = e[\"rating\"]\n\n return episode", "def episode_list(request):\n if request.method == 'GET':\n user = request.GET.get('user')\n episodes = Episodes()\n episodes_list = episodes.get_user_episodes(user)\n return JSONResponse(episodes_list)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = DBSerializer(data=data)\n if serializer.is_valid():\n logging.debug('Creating an episode' + data)\n # serializer.save()\n return JSONResponse(serializer.data, status=201)\n return JSONResponse(serializer.errors, status=400)", "def import_data(filename):\r\n regex = re.compile(\"\"\"\"(?P<show_name>.*?)\"\\s+\\((?P<year>\\d+)(?:|/.*?)\\)\\s+\\{(?P<episode_name>.*?)\\s?\\(\\#(?P<season_no>\\d+)\\.(?P<episode_no>\\d+)\\)\\}\"\"\")\r\n\r\n with codecs.open(filename, \"r\", \"latin-1\") as ratings:\r\n # Generate all the lines that matched.\r\n matches = (match for match in (regex.search(line.strip()) for line in ratings) if match)\r\n counter = 0\r\n for match in matches:\r\n counter += 1\r\n if not counter % 100:\r\n print counter\r\n episode = {}\r\n for field in [\"show_name\", \"year\", \"episode_name\", \"episode_no\", \"season_no\"]:\r\n episode[field] = match.group(field)\r\n\r\n # If the episode has no name it is given the same name as on imdb.com for consistency.\r\n if not episode[\"episode_name\"]:\r\n episode[\"episode_name\"] = \"Episode #%s.%s\" % (episode[\"season_no\"], episode[\"episode_no\"])\r\n\r\n try:\r\n show = session.query(Show).filter_by(name=episode[\"show_name\"], year=episode[\"year\"]).one()\r\n except sqlalchemy.orm.exc.NoResultFound:\r\n show = Show(episode[\"show_name\"], episode[\"year\"])\r\n session.add(show)\r\n\r\n try:\r\n episode = session.query(Episode).filter_by(name=episode[\"episode_name\"], show=show).one()\r\n except sqlalchemy.orm.exc.NoResultFound:\r\n episode = Episode(show, episode[\"episode_name\"], episode[\"season_no\"], episode[\"episode_no\"])\r\n session.add(episode)\r\n\r\n #session.commit()\r", "def get_character_detail(chara_name: str) -> dict:\n\n chara_misc_json = load_characters_config()\n chara_details = list(filter(lambda x: (x['name'] == chara_name), chara_misc_json))\n\n if chara_details:\n return chara_details[0]\n else:\n return None", "def check(what,string):\n if what == \"characters\":\n query = list(engine.execute(f\"SELECT name FROM characters WHERE name = '{string}'\"))\n if len(query) > 0:\n return True\n else:\n return False\n \n elif what == \"script\":\n query = list(engine.execute(f\"SELECT script_l FROM script WHERE script_l = '{string}'\"))\n if len(query) > 0:\n return True\n else:\n return False\n \n elif what == \"episodes\":\n query = list(engine.execute(f\"SELECT episode FROM episodes WHERE episode = '{string}'\"))\n if len(query) > 0:\n return True\n else:\n return False\n #extra meme..", "def getEpisodeArt(episode):\n\tseriesId = None\n\tfor sk in Dict['series'].keys():\n\t\tif Dict['series'][str(sk)]['title']==episode['seriesTitle']:\n\t\t\tseriesId = int(sk)\n\tif seriesId is not None:\n\t\tartUrl = \"\"\n\t\tif Dict['series'][str(seriesId)]['tvdbId'] is not None:\n\t\t\tartUrl = fanartScrapper.getSeasonThumb(Dict['series'][str(seriesId)]['tvdbId'], episode['season'], rand=False)\n\t\t\t#Log.Debug(\"arturl: %s\"%artUrl)\n\t\t\tif artUrl is not None:\n\t\t\t\tart = Function(getArt,url=artUrl)\n\t\tif artUrl == \"\" or artUrl is None:\n\t\t\tartUrl = Dict['series'][str(seriesId)]['art']\n\t\tif artUrl == \"\" or artUrl is None:\n\t\t\tartUrl = R(CRUNCHYROLL_ART)\n\telse:\n\t\tartUrl = R(CRUNCHYROLL_ART)\n\tLog.Debug(\"artUrl: %s\"%artUrl)\n\treturn artUrl", "def getFilms(character):\n\n ret = []\n for film in character.get('films'):\n number = int(film.rstrip('/').rpartition('/')[2])\n if number not in cache:\n response = requests.get(film)\n response = response.json()\n title = response.get('title')\n cache[number] = title\n ret.append(cache.get(number))\n return ret", "def querykodi(jsonquery):\n\n try:\n jsonresponse = requests.get(jsonquery, headers=HTTPHEADERS)\n except requests.exceptions.RequestException as reqexception:\n print 'Error!', reqexception\n sys.exit(RETURNCODE)\n\n if jsonresponse.status_code != 200:\n print 'Error!', URLPARAMETERS, 'returned HTTP:', \\\n jsonresponse.status_code\n sys.exit(RETURNCODE)\n\n #jsonresponse.text will look like this if something is playing\n #{\"id\":1,\"jsonrpc\":\"2.0\",\"result\":[{\"playerid\":1,\"type\":\"video\"}]}\n #and if nothing is playing:\n #{\"id\":1,\"jsonrpc\":\"2.0\",\"result\":[]}\n\n jsondata = json.loads(jsonresponse.text)\n debugprint(jsondata, \"jsondata\")\n return jsondata", "def video_info_query():\n mongodb = get_db() \n start_time = time.time()\n\n collection = mongodb['videos']\n # entries = list(collection.find().sort(\"video_name\"))\n # entries = list(collection.find({ \"$or\": [{\"course_name\":\"PH207x-Fall-2012\"},{\"course_name\":\"CS188x-Fall-2012\"},{\"course_name\":\"3.091x-Fall-2012\"},{\"course_name\":\"6.00x-Fall-2012\"}]}).sort(\"video_name\"))\n # only MIT courses\n entries = list(collection.find({ \"$or\": [{\"course_name\":\"3.091x-Fall-2012\"},{\"course_name\":\"6.00x-Fall-2012\"}]}).sort(\"video_name\"))\n # entries = list(collection.find({\"course_name\":\"PH207x-Fall-2012\"}).sort(\"video_name\"))\n # entries = list(collection.find({\"course_name\":\"6.00x-Fall-2012\"}).sort(\"video_name\"))\n # entries = list(collection.find({\"course_name\":\"3.091x-Fall-2012\"}).sort(\"video_name\"))\n # entries = list(collection.find({\"course_name\":\"CS188x-Fall-2012\"}).sort(\"video_name\"))\n # entries = list(collection.find({\"course_name\":\"VDA101\"}).sort(\"video_name\"))\n if len(entries):\n result = json.dumps(entries, default=json_util.default)\n else:\n result = \"\"\n print sys._getframe().f_code.co_name, \"COMPLETED\", (time.time() - start_time), \"seconds\"\n return result", "def search_db_via_query(query):\n connection = sqlite3.connect(\"Pflanzendaten.db\")\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM plants WHERE \" + query)\n content = cursor.fetchall()\n print(tabulate((content), headers=['species', 'name', 'nativ', 'endangered', 'habitat', 'waterdepthmin', 'waterdepthmax', 'rootdepth', 'groundwatertablechange', 'floodheightmax', 'floodloss', 'floodduration']))\n print('Status 1 equals nativ')\n\n connection.close()", "def lookup_employee():\n unique_names = get_unique_employees()\n while True:\n if len(unique_names) > 1:\n print('Entries found by {} and {}.'.format(\n ', '.join(unique_names[:-1]),\n unique_names[-1]))\n elif len(unique_names) == 1:\n print('Entries found by {}.'.format(unique_names[0]))\n\n search_query = input('Show entries by: ')\n if validate_lookup_employee_format(search_query):\n break\n print('** Please enter a name of alphabetic characters and spaces **')\n return Entry.select().where(Entry.employee_name == search_query)", "def search_season_episode(self,strz):\t\n\t\tpattern = compile(\"(S(\\d\\d)E(\\d\\d))\") #S01E03\n\t\tsep = pattern.search(strz)\t\t\n\t\tif sep is not None:\n\t\t\tse= sep.group(1)\n\t\t\tseason = sep.group(2)\n\t\t\tepisode = sep.group(3)\n\t\t\treturn strz.replace(se,\"\")\n\t\t\t\n\t\tpattern = compile(\"((\\d\\d)x(\\d\\d))\") #01x03\n\t\tsep = pattern.search(strz)\t\t\n\t\tif sep is not None:\n\t\t\tse= sep.group(1)\n\t\t\tseason = sep.group(2)\n\t\t\tepisode = sep.group(3)\n\t\t\treturn strz.replace(se,\"\")\n\t\t\t\n\t\tpattern = compile(\"(Ep(\\d\\d))\") #Ep03\n\t\tsep = pattern.search(strz)\t\t\n\t\tif sep is not None:\n\t\t\tse= sep.group(1)\n\t\t\tepisode = sep.group(2)\n\t\t\treturn strz.replace(se,\"\")", "def parse_episode_page_html(season, episode, html):\n\n data = []\n\n lines = html.split('\\n')\n\n start_parse_dialog = False\n\n for line in lines:\n\n if 'class=\"postbody\"' in line:\n start_parse_dialog = True\n\n if start_parse_dialog and '<p>' in line and ':' in line:\n datum = {}\n datum['season'] = season\n datum['episode'] = episode\n\n dialog_str = line.split(':')[1].split('</p>')[0]\n dialog_str = re.sub(r'\\([a-zA-Z ]*\\)', '', dialog_str)\n dialog_str = dialog_str.strip()\n datum['dialog'] = dialog_str\n datum['num_words'] = len(dialog_str.split())\n\n speakers_str = line.split('<p>')[1].split(':')[0]\n if ',' in speakers_str and 'and' in speakers_str:\n for speaker in speakers_str.split(','):\n if 'and' in speaker:\n for sub_speaker in speaker.split('and'):\n datum['speaker'] = clean_speaker_string(sub_speaker.strip())\n else:\n datum['speaker'] = clean_speaker_string(speaker.strip())\n elif 'and' in speakers_str:\n for sub_speaker in speakers_str.split('and'):\n datum['speaker'] = clean_speaker_string(sub_speaker.strip())\n else:\n datum['speaker'] = clean_speaker_string(speakers_str.strip())\n\n data.append(datum)\n\n return data", "def get(self, show_id, ep_id, session):\n try:\n db.show_by_id(show_id, session=session)\n except NoResultFound:\n raise NotFoundError('show with ID %s not found' % show_id)\n try:\n episode = db.episode_by_id(ep_id, session)\n except NoResultFound:\n raise NotFoundError('episode with ID %s not found' % ep_id)\n if not db.episode_in_show(show_id, ep_id):\n raise BadRequest(f'episode with id {ep_id} does not belong to show {show_id}')\n\n rsp = jsonify(episode.to_dict())\n\n # Add Series-ID header\n rsp.headers.extend({'Series-ID': show_id})\n return rsp", "def get_episodes(link, seasons, factor, debug=False):\n\n if debug:\n print(\"begin get_episodes()\")\n print(seasons, factor)\n\n episodes = {\"episodes\": [], \"weights\": []}\n\n #this is the url that will be modified to access individual seasons\n base_url = f\"https://www.imdb.com/{link}episodes?season=\"\n\n if debug:\n print(f\"Base URL: {base_url}\")\n\n # iterate through seasons\n for season in seasons:\n season_url = base_url + season\n season_soup = bs4.BeautifulSoup(requests.get(season_url).text, features=\"html.parser\")\n episode_divs = season_soup.select(\".list_item\")\n\n #iterate through episodes\n for i in range(len(episode_divs)):\n div = episode_divs[i]\n ep_link = div.select('strong > a')[0].get('href')\n rating_elem = div.select('.ipl-rating-star__rating')\n\n # excludes unrated episodes ensuring they have been airred\n if len(rating_elem) != 0:\n rating = float(rating_elem[0].contents[0])\n\n #add episode\n episodes[\"episodes\"].append({\"link\": ep_link,\n \"season\": int(season),\n \"episode_number\": i + 1,\n \"rating\": rating})\n\n # add weight if there is a factor selected\n if factor != 0:\n weight = rating ** factor\n episodes[\"weights\"].append(weight)\n if debug:\n print(f\"weight: {weight}\")\n return episodes", "def giveId(what,string):\n if what == \"characters\":\n return list(engine.execute(f\"SELECT char_id FROM characters WHERE name ='{string}';\"))[0][0]\n elif what == \"episodes\":\n return list(engine.execute(f\"SELECT ep_id FROM episodes WHERE episode ='{string}';\"))[0][0]", "def getEpisodeSegmentsJson(request, flightName=None, sourceShortName=None):\n try:\n episode = None\n if flightName:\n episode = getClassByName(settings.XGDS_VIDEO_GET_EPISODE_FROM_NAME)(flightName)\n else:\n episode = getClassByName(settings.XGDS_VIDEO_GET_ACTIVE_EPISODE)()\n if not episode:\n raise Exception('no episode')\n except:\n return HttpResponse(json.dumps({'error': 'No episode found'}), content_type='application/json', status=406)\n \n active = episode.endTime is None\n if not flightName:\n flightName = episode.shortName\n\n # get the segments\n segments = {}\n if sourceShortName:\n segments[sourceShortName] = [s.getDict() for s in episode.videosegment_set.filter(source__shortName=sourceShortName)]\n else:\n distinctSources = episode.videosegment_set.values('source__shortName').distinct()\n for theSource in distinctSources:\n sn = str(theSource['source__shortName'])\n segments[sn] = [ s.getDict() for s in episode.videosegment_set.filter(source__shortName=sn)]\n \n if not segments:\n return HttpResponse(json.dumps({'error': 'No segments found for ' + flightName}), content_type='application/json', status=406)\n\n result = []\n result.append({'active': active})\n result.append({'episode': episode.getDict()})\n result.append({'segments': segments})\n \n return HttpResponse(json.dumps(result, sort_keys=True, indent=4, cls=DatetimeJsonEncoder), content_type='application/json')", "def character_list(request):\n\n def get_relations(char):\n \"\"\"helper function for getting dict of character's relationships\"\"\"\n\n def parse_name(relation):\n \"\"\"Helper function for outputting string display of character name\"\"\"\n if relation.player:\n char_ob = relation.player.char_ob\n return \"%s %s\" % (char_ob.key, char_ob.item_data.family)\n else:\n return str(relation)\n\n try:\n dom = char.player_ob.Dominion\n parents = []\n uncles_aunts = []\n for parent in dom.all_parents:\n parents.append(parent)\n for sibling in parent.siblings:\n uncles_aunts.append(sibling)\n for spouse in sibling.spouses.all():\n uncles_aunts.append(spouse)\n\n unc_or_aunts = set(uncles_aunts)\n relations = {\n \"parents\": [parse_name(ob) for ob in parents],\n \"siblings\": list(parse_name(ob) for ob in dom.siblings),\n \"uncles_aunts\": list(parse_name(ob) for ob in unc_or_aunts),\n \"cousins\": list(parse_name(ob) for ob in dom.cousins),\n }\n return relations\n except AttributeError:\n return {}\n\n def get_dict(char):\n \"\"\"Helper function for getting dict of all relevant character information\"\"\"\n character = {}\n if char.player_ob.is_staff or char.db.npc:\n return character\n character = {\n \"name\": char.key,\n \"social_rank\": char.item_data.social_rank,\n \"fealty\": str(char.item_data.fealty),\n \"house\": char.item_data.family,\n \"relations\": get_relations(char),\n \"gender\": char.item_data.gender,\n \"age\": char.item_data.age,\n \"religion\": char.db.religion,\n \"vocation\": char.item_data.vocation,\n \"height\": char.item_data.height,\n \"hair_color\": char.item_data.hair_color,\n \"eye_color\": char.item_data.eye_color,\n \"skintone\": char.item_data.skin_tone,\n \"description\": char.perm_desc,\n \"personality\": char.item_data.personality,\n \"background\": char.item_data.background,\n \"status\": char.roster.roster.name,\n \"longname\": char.item_data.longname,\n }\n try:\n if char.portrait:\n character[\"image\"] = char.portrait.image.url\n except (Photo.DoesNotExist, AttributeError):\n pass\n return character\n\n global API_CACHE\n if not API_CACHE:\n ret = map(\n get_dict,\n Character.objects.filter(\n Q(roster__roster__name=\"Active\") | Q(roster__roster__name=\"Available\")\n ),\n )\n API_CACHE = json.dumps(list(ret))\n return HttpResponse(API_CACHE, content_type=\"application/json\")", "def query_by_person(self, name: str) -> dict:\n if not self.client:\n self.connect()\n return self.client.moviebuff.castcrew.find_one({'Name': name})", "def search(token, query):\n format_query = query.replace(\" \", \"%20\")\n url = 'https://api.thetvdb.com/search/series?name=' + format_query\n headers = {'Accept': 'application/json', 'Authorization': token}\n r = requests.get(url, headers=headers)\n json_data = json.loads(r.text)\n show_list = json_data.get('data')\n for show in show_list:\n if show.get('status') == 'Continuing':\n show_id = show.get('id')\n s = create_show(token, show_id)\n return s", "def get_character(arg):\n character = requests.get(BASE_URL+'characters/'+arg)\n print character.json()\n return character.status_code", "def get_elo_from_db(player: str):\n with open('db.json') as fo:\n data = loads(fo.read())\n\n return data[player]", "def get_podcast_episodes(url):\n\n def parse_pubdate(date_string):\n \"\"\"\n Change pubdate string to datetime object. Tries a bunch of\n possible formats, but if none of them is a match, it will\n return a epoch = 0 datetime object\n\n :param date_string: A string representing a date\n :return: datetime object\n \"\"\"\n date_formats = (\n '%a, %d %b %Y %H:%M:%S +0000',\n '%a, %d %b %Y',\n '%a, %d %b %Y%H:%M:%S +0000',\n '%a, %d %b %Y %H:%M',\n '%a, %d %b %Y %H.%M'\n )\n df_generator = (format for format in date_formats)\n\n date = None\n while date is None:\n try:\n date = datetime.strptime(date_string, next(df_generator))\n except ValueError:\n pass\n except StopIteration:\n date = datetime.fromtimestamp(0)\n\n return date\n\n doc = get_document(url)\n\n return (\n {\n 'url': item.select('guid')[0].text,\n 'Premiered': parse_pubdate(\n item.select('pubdate')[0].text\n ).strftime(\"%d.%m.%Y\"),\n # 'Duration': duration_to_seconds(item.find('itunes:duration').text),\n 'title': item.title.text,\n 'Plot': item.description.text\n }\n for item in doc.find_all(\"item\")\n )", "def create_episode(conn, episode):\n sql = '''INSERT INTO episode(date, id_show, id_corpus, partition, path)\n VALUES(?,?,?,?,?)'''\n cur = conn.cursor()\n cur.execute(sql, episode)\n return cur.lastrowid", "def episodes(self):\n for episode in self._root.iter('Episode'):\n entry = {}\n entry['season'] = int(episode.find('SeasonNumber').text)\n entry['episode'] = int(episode.find('EpisodeNumber').text)\n entry['title'] = unicode(episode.find('EpisodeName').text)\n if entry['title'] == '':\n continue\n entry['description'] = unicode(episode.find('Overview').text)\n entry['firstAired'] = episode.find('FirstAired').text\n yield entry", "def _get_feed_episodes(self, show_key, **kwargs):\n\t\tinfo(\"Getting episodes for Nyaa/{}\".format(show_key))\n\t\tif \"domain\" not in self.config or not self.config[\"domain\"]:\n\t\t\terror(\" Domain not specified in config\")\n\t\t\treturn list()\n\t\t\n\t\t# Send request\n\t\tquery = re.sub(\"[`~!@#$%^&*()+=:;,.<>?/|\\\"]+\", \" \", show_key)\n\t\tquery = re.sub(\"season\", \" \", query, flags=re.I)\n\t\tquery = re.sub(\" +\", \" \", query)\n\t\tquery = re.sub(\"(?:[^ ])-\", \" \", query) # do not ignore the NOT operator\n\t\tdebug(\" query={}\".format(query))\n\t\tquery = url_quote(query, safe=\"\", errors=\"ignore\")\n\t\t\n\t\tdomain = self.config.get(\"domain\", \"nyaa.si\")\n\t\tfilter_ = self.config.get(\"filter\", \"2\")\n\t\texcludes = self.config.get(\"excluded_users\", \"\").replace(\" \", \"\")\n\t\turl = self._search_base.format(domain=domain, filter=filter_, excludes=excludes, q=query)\n\t\tresponse = self.request(url, rss=True, **kwargs)\n\t\tif response is None:\n\t\t\terror(\"Cannot get latest show for Nyaa/{}\".format(show_key))\n\t\t\treturn list()\n\t\t\n\t\t# Parse RSS feed\n\t\tif not _verify_feed(response):\n\t\t\twarning(\"Parsed feed could not be verified, may have unexpected results\")\n\t\treturn response.get(\"entries\", list())", "def list_episodes(title, uri):\r\n\r\n # Set plugin category. It is displayed in some skins as the name\r\n # of the current section.\r\n xbmcplugin.setPluginCategory(_handle, title)\r\n\r\n # Get the list of videos in the category.\r\n result = _get_data(uri)\r\n # Iterate through videos.\r\n #logger.info(\"######: {}, log: {}########\".format('rk1', result['items']))\r\n for video in result['items']:\r\n # {\r\n # \"title\": \"Sakthi returns to India\",\r\n # \"contentId\": 1000036012,\r\n # \"uri\": \"https://api.hotstar.com/o/v1/episode/detail?id=80096&contentId=\r\n # 1000036012&offset=0&size=20&tao=0&tas=5\",\r\n # \"description\": \"Saravanana and Meenakshi's oldest son, Sakthi, returns to\r\n # India 25 years after his parents had left it. He wants to search for a bride,\",\r\n # \"duration\": 1332,\r\n # \"contentType\": \"EPISODE\",\r\n # \"contentProvider\": \"Global Villagers\",\r\n # \"cpDisplayName\": \"Global Villagers\",\r\n # \"assetType\": \"EPISODE\",\r\n # \"genre\": [\r\n # \"Family\"\r\n # ],\r\n # \"lang\": [\r\n # \"Tamil\"\r\n # ],\r\n # \"channelName\": \"Star Vijay\",\r\n # \"seasonNo\": 1,\r\n # \"episodeNo\": 520,\r\n # \"premium\": false,\r\n # \"live\": false,\r\n # \"hboContent\": false,\r\n # \"encrypted\": false,\r\n # \"startDate\": 1416649260,\r\n # \"endDate\": 4127812200,\r\n # \"broadCastDate\": 1382367600,\r\n # \"showName\": \"Saravanan Meenatchi\",\r\n # \"showId\": 99,\r\n # \"showShortTitle\": \"Saravanan Meenatchi\",\r\n # \"seasonName\": \"Chapter 1\",\r\n # \"playbackUri\": \"https://api.hotstar.com/h/v1/play?contentId=1000036012\",\r\n # \"contentDownloadable\": false\r\n # },\r\n _add_video_item(video)\r\n #logger.info(\"######: {}, log: {}########\".format('rk2', video))\r\n\r\n _add_next_page_and_search_item(result['nextPage'], 'episodes', title)\r\n\r\n # Add a sort method for the virtual folder items (alphabetically, ignore articles)\r\n xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_NONE)\r\n\r\n # Finish creating a virtual folder.\r\n xbmcplugin.endOfDirectory(_handle)", "def video_single_query(vid):\n mongodb = get_db() \n start_time = time.time()\n\n # Quanta workshop\n collection = mongodb[HEATMAPS_COL] \n entries = list(collection.find({\"video_id\": vid}, {\"completion_counts\": 0}))\n print vid, entries\n # L@S 2014 analysis\n # collection = mongodb[\"video_heatmaps_mitx_fall2012\"]\n # entries = list(collection.find({\"video_id\": vid}, {\"completion_counts\": 0}))\n # if len(entries) == 0:\n # collection = mongodb[\"video_heatmaps_harvardx_ph207x_fall2012\"] \n # entries = list(collection.find({\"video_id\": vid}, {\"completion_counts\": 0}))\n # if len(entries) == 0:\n # collection = mongodb[\"video_heatmaps_berkeleyx_cs188x_fall2012\"] \n # entries = list(collection.find({\"video_id\": vid}, {\"completion_counts\": 0}))\n\n if len(entries):\n windows = json.dumps(detect_peaks(entries[0]), default=json_util.default)\n result = json.dumps(entries[0], default=json_util.default)\n else:\n result = \"\"\n print sys._getframe().f_code.co_name, \"COMPLETED\", (time.time() - start_time), \"seconds\"\n return [result, windows]", "def video_list_query():\n mongodb = get_db() \n start_time = time.time()\n\n # Quanta workshop\n collection = mongodb[HEATMAPS_COL]\n entries = list(collection.find({}, {\"raw_counts\": 0, \"play_counts\": 0, \"pause_counts\": 0, \"unique_counts\": 0, \"replay_counts\": 0, \"skip_counts\": 0, \"completion_counts\": 0}))\n\n # L@S 2014 analysis\n # collection = mongodb[\"video_heatmaps_mitx_fall2012\"]\n # entries1 = list(collection.find({}, {\"raw_counts\": 0, \"play_counts\": 0, \"pause_counts\": 0, \"unique_counts\": 0, \"replay_counts\": 0, \"skip_counts\": 0, \"completion_counts\": 0}))\n # collection = mongodb[\"video_heatmaps_harvardx_ph207x_fall2012\"]\n # entries2 = list(collection.find({}, {\"raw_counts\": 0, \"play_counts\": 0, \"pause_counts\": 0, \"unique_counts\": 0, \"replay_counts\": 0, \"skip_counts\": 0, \"completion_counts\": 0}))\n # collection = mongodb[\"video_heatmaps_berkeleyx_cs188x_fall2012\"]\n # entries3 = list(collection.find({}, {\"raw_counts\": 0, \"play_counts\": 0, \"pause_counts\": 0, \"unique_counts\": 0, \"replay_counts\": 0, \"skip_counts\": 0, \"completion_counts\": 0}))\n\n # entries = entries1 + entries2 + entries3\n\n if len(entries):\n result = json.dumps(entries, default=json_util.default)\n else:\n result = \"\"\n print sys._getframe().f_code.co_name, \"COMPLETED\", (time.time() - start_time), \"seconds\"\n return result", "def parse_episode (self, episode, genres=None):\n mpaa = ''\n if episode.get('maturity', None) is not None:\n if episode['maturity'].get('board', None) is not None and episode['maturity'].get('value', None) is not None:\n mpaa = str(episode['maturity'].get('board', '').encode('utf-8')) + '-' + str(episode['maturity'].get('value', '').encode('utf-8'))\n\n return {\n episode['summary']['id']: {\n 'id': episode['summary']['id'],\n 'episode': episode['summary']['episode'],\n 'season': episode['summary']['season'],\n 'plot': episode['info']['synopsis'],\n 'duration': episode['info']['runtime'],\n 'title': episode['info']['title'],\n 'year': episode['info']['releaseYear'],\n 'genres': self.parse_genres_for_video(video=episode, genres=genres),\n 'mpaa': mpaa,\n 'maturity': episode['maturity'],\n 'playcount': (0, 1)[episode['watched']],\n 'rating': episode['userRating'].get('average', 0) if episode['userRating'].get('average', None) != None else episode['userRating'].get('predicted', 0),\n 'thumb': episode['info']['interestingMoments']['url'],\n 'fanart': episode['interestingMoment']['_1280x720']['jpg']['url'],\n 'poster': episode['boxarts']['_1280x720']['jpg']['url'],\n 'banner': episode['boxarts']['_342x192']['jpg']['url'],\n 'mediatype': {'episode': 'episode', 'movie': 'movie'}[episode['summary']['type']],\n 'my_list': episode['queue']['inQueue'],\n 'bookmark': episode['bookmarkPosition']\n }\n }", "def episode(self, title=None, episode=None):\n key = f'{self.key}/children'\n if title is not None and not isinstance(title, int):\n return self.fetchItem(key, Episode, title__iexact=title)\n elif episode is not None or isinstance(title, int):\n if isinstance(title, int):\n index = title\n else:\n index = episode\n return self.fetchItem(key, Episode, parentIndex=self.index, index=index)\n raise BadRequest('Missing argument: title or episode is required')", "def rule_episode_only(name: str, metadata: dict, **kwargs) -> dict:\n logger.debug(f\"Applying rule 'episode-only' to {name}\")\n try:\n metadata['episode'] = int(\n str(metadata['season']) + str(metadata['episode']))\n except KeyError:\n # Any episode number below 100 will raise... therefore its ignored\n pass\n metadata.copy().pop('season', None)\n\n logger.debug(f\"Rule 'episode-only' OK for {name}\")\n return {'metadata': metadata}", "def loadDbIntoDf2(content):\n #Loading data into DF\n if content == 'trending':\n file = 'dataVideo.txt'\n elif content == 'music':\n file = 'dataVideoChallenge.txt'\n else:\n file = 'dataVideo.txt'\n with open(file,'r') as f:\n videos_dict = json.load(f)\n df = pd.DataFrame.from_dict(videos_dict)\n #filter on challenge\n if content == 'music':\n df = df[df.musicId == \"6745161928949106690\"]\n return df", "def get_movies(cinema_name):\n cinema_code = CINEMA_CODES[cinema_name]\n movies = {}\n uncaught = []\n genres = set(json_response(URLS['attributes'])['body']['dropdownConfig']['genres'])\n poster_json = json_response(URLS['posters'])\n # Get the movies names\n for poster in poster_json['body']['posters']:\n # Extract movie title from poster's url\n try:\n movie_name = re.sub('-', ' ', re.search(r'films/([a-z0-9\\-]+)', poster['url']).group(1))\n movie_name = re.sub('(.*)(\\s*(green|purple))', '\\g<1>', movie_name).strip()\n if movie_name in {movie.title for movie in movies.values()}:\n continue\n except (AttributeError, IndexError):\n logger.warning(\"Could not find movie title of this url: {}\".format(poster['url']))\n continue\n try:\n release_year = datetime.strptime(poster['dateStarted'].split('T')[0], \"%Y-%m-%d\").year\n movie_genres = genres.intersection(set(poster['attributes']))\n\n # If no Poster genres found in Planet, it's probably a 'fake' movie\n if not movie_genres:\n raise RuntimeError\n selected_movie = map_poster_to_matching_movie(movie_name, release_year)\n if selected_movie is None:\n raise RuntimeError\n except (IMDbParserError, RuntimeError):\n uncaught.append(movie_name)\n continue\n movies[poster['code']] = Movie(poster['code'],\n selected_movie.get('title'),\n poster['featureTitle'],\n selected_movie.get('rating'),\n selected_movie.get('votes'),\n selected_movie.get('year'),\n movie_genres,\n selected_movie.get('imdbID'),\n poster['url']\n )\n # Add screening dates\n dates = get_dates(cinema_code)\n for day in dates:\n movies_json = json_response(URLS['events'].format(cinema_code, day))\n for event in movies_json['body']['events']:\n if not MACBOOK.intersection(set(event['attributeIds'])):\n try:\n movies[event['filmId']].add_date(event['eventDateTime'])\n except KeyError:\n continue\n logger.warning(f\"Couldn't find result(s) for movie(s): {uncaught}\")\n return movies, uncaught", "def search_from_sqlite(self, key):\n key = ('.*' +key+ '.*',)\n conn = get_sqlite()\n c = conn.cursor()\n conn.create_function(\"REGEXP\", 2, regexp)\n c.execute('SELECT * FROM vertices WHERE name REGEXP ? ', key)\n results = c.fetchall()\n\n return json.dumps([{\n 'name': r[1],\n 'size': r[3],\n 'parent': r[2],\n 'last_accessed': r[4],\n 'last_modified': r[5]} for r in results])", "def view_character_list(request):\n\n characters_data = Character.objects.values('id', 'display_name')\n\n return render_chaffers(\n request,\n 'character_list.html',\n {'character_data': [json.dumps(character_data) for character_data in characters_data]}\n )", "def insertEpisode(ep):\n if check(\"episodes\", ep):\n return \"episode exists\"\n else:\n engine.execute(f\"INSERT INTO episodes (episode) VALUES ('{ep}');\")", "def users_json(self, rows=None, sidx=None, _search=None, searchField=None,\n searchOper=None, searchString=None, page=None, sord=None, nd=None): # 1 line # 2 lines\n t1 = time.clock()\n header = [\"value\", \"flags\", \"source\", \"evidence_type\", \"creation_time\", \"time\", \"useby\", \"owner\", \"comment\"] # 3 lines\n reslist = []\n genshi_tmpl = LoadGenshiTemplate(cherrypy.session.get('cur_session'), cherrypy.session.get('username'))\n cur_component = cherrypy.session.get('cur_component')\n cur_context = cherrypy.session.get('cur_context') \n if cur_component != 'None':\n #print \"getting new\"\n context = cur_context.split()\n um = cherrypy.session.get('um')\n reslist = um.get_evidence_new(context, cur_component)\n cherrypy.session['cur_component'] = 'None'\n else:\n #print \"getting default\"\n cherrypy.session['cur_component'] = 'firstname'\n reslist = um.get_evidence_new()\n\n #users_list = test_data_to_list(test_data) # 4 lines\n evdlist = []\n i = 0\n #{'comment': None, 'evidence_type': 'explicit', 'creation_time': 1322914468.889158, 'value': 'Bob',\n #'source': 'Jane', 'flags': [], 'time': None, 'owner': 'Jane', 'objectType': 'Evidence', 'useby': None}\n myEvd = []\n\n if type(reslist) is ListType:\n for res in reslist:\n print \"Inside user_json \"\n myEvd = [0]*10\n myEvd[0] = i\n for key, value in res.__dict__.items():\n #print \"%s:%s\"%(key, value)\n for item in header:\n if item == key:\n #print \"key: %s %s--\"%(item,key)\n if key == 'creation_time' or key == 'time' or key == 'useby':\n if value:\n import datetime\n value = datetime.datetime.fromtimestamp(int(value)).strftime('%d/%m/%Y %H:%M:%S')\n elif key == 'flags':\n if value:\n value = ''.join(value)\n else:\n value=\"None\"\n __index = header.index(item)\n #print \"%s in %d\" %(value,__index+1)\n myEvd[__index+1]=value\n evdlist.append(myEvd)\n i = i+1\n #print \"Evidence: %d\" %i\n #for val in myEvd:\n # print val\n\n import my_jqGrid\n result_page = my_jqGrid.jqgrid_json(self, evdlist, header, rows=rows, sidx=sidx, _search=_search,\n searchField=searchField, searchOper=searchOper, searchString=searchString, page=page, sord=sord)\n\n t2 = time.clock()\n print 'user-json took %0.3fms' % ((t2-t1)*1000.0)\n write_log('notice','Show evidence list operation successful')\n\n return result_page\n\n else:\n #print reslist\n e = reslist\n write_log('error','Show evidence list Operation Failed; Error:'+str(e))\n modeltree = cherrypy.session.get('modeltree')\n return genshi_tmpl.greeting_template(e, \"Evidencelist upload\", modeltree)", "def read_game_logs(file_path):\n\n if os.path.isfile(file_path):\n with open(file_path, \"r\") as read_file:\n log = json.load(read_file)\n # event_type = set([e[\"event\"] for e in log ])\n # the event types: command, text_message, set_attribute, join\n # print(\"event types\", event_type)\n\n # sort all messages chronologically\n log.sort(key=lambda x: x[\"date_modified\"])\n\n start = None\n end = None\n real_end = None # WHen The came master says COngrats or you die, because rest of the messages looks like bugs...\n episode_list = []\n length = len(log)\n game_finished = False\n # Episode are being searched between 2 starts commands\n # only the one where the command done has been issued is kept\n for i, l in enumerate(log):\n if \"command\" in l.keys():\n if l[\"command\"] == \"start\":\n if start == None:\n start = i\n elif end == None:\n end = i\n if l[\"command\"] == \"done\":\n game_finished = True\n\n if l[\"user\"][\"id\"] == 1 and l[\"event\"] == \"text_message\" and type(l[\"message\"]) is str and (\n l[\"message\"].startswith(\"Congrats\") or l[\"message\"].startswith(\n \"The rescue robot has not reached you\")):\n real_end = i + 1 # +1 because we want to include this message in the log slice...\n if start is not None and end is not None:\n if game_finished:\n episode_list.append(log[start:real_end])\n start = end\n end = None\n real_end = None\n game_finished = False\n\n if i + 1 == length:\n if start is not None and end is None and game_finished:\n episode_list.append(log[start:real_end])\n\n score_list = {}\n for i, e in enumerate(episode_list):\n # the number of answers the avatar utters gives us the number of question asked\n # num_questions = sum(\n # [1 for m in e if m[\"user\"][\"name\"] == \"Avatar\" and m[\"event\"] == \"text_message\"])\n\n # Just sum every messages ending with a question mark issueed by the user...\n num_questions = sum([1 for m in e if m[\"user\"][\"name\"] != \"Avatar\" and m[\"user\"][\"id\"] != 1 and m[\n \"event\"] == \"text_message\" and type(m[\"message\"]) is str and m[\"message\"].endswith(\"?\")])\n\n # user id 1 is alway the game master, we are looping here on the messages of the \"real\" player\n # when we tell the avatar to change location, we don't get an answer, this is why the substraction gives the number of orders\n # this does not include the order \"done\"\n # num_orders = sum(\n # [1 for m in e if m[\"user\"][\"name\"] != \"Avatar\" and m[\"user\"][\"id\"] != 1 and m[\n # \"event\"] == \"text_message\"]) - num_questions\n\n # Just sum every order of type \"go west\". Describe orders are not counted.\n num_orders = sum([1 for m in e if m[\"user\"][\"name\"] != \"Avatar\" and m[\"user\"][\"id\"] != 1 and m[\n \"event\"] == \"text_message\" and type(m[\"message\"]) is str and (\n \"east\" in m[\"message\"].lower() or \"north\" in m[\"message\"].lower() or \"west\" in m[\n \"message\"].lower() or \"south\" in m[\"message\"].lower() or \"back\" in m[\"message\"].lower())])\n\n game_won = sum([1 for m in e if m[\"user\"][\"id\"] == 1 and m[\n \"event\"] == \"text_message\" and type(m[\"message\"]) is str and m[\"message\"].startswith(\"Congrats\")]) > 0\n\n # Work-Around - the final reward giving +1.0 on success and -1.0 on loss happens after the messages\n # Saying \"congratulations\" or \"you die horribly\" just repeating the message when the game starts.\n # We had to exclude that message to segment finished games but this is why we have to add these rewards here manually...\n\n final_reward = -1.0\n if game_won:\n final_reward = 1.0\n score_list[i] = {\"score\": sum([m[\"message\"][\"observation\"][\"reward\"] for m in e if\n \"message\" in m.keys() and type(m[\"message\"]) is dict])+final_reward,\n \"num_questions\": num_questions, \"num_orders\": num_orders, \"game_session\": e,\n \"game_won\": game_won}\n\n return score_list\n\n else:\n raise Exception(f\"{file_path} is not a correct file path.\")", "def quickSearch():\n calDB = db.TinyDB('../calDB.json')\n pars = db.Query()\n recList = calDB.search(pars.key.matches(\"wf\"))\n print len(recList)\n for idx in range(len(recList)):\n key = recList[idx]['key']\n vals = recList[idx]['vals']\n print key\n for ch in vals:\n\n print ch, vals[ch]\n return", "def text_json(request):\n query = str()\n\n if request.method == 'GET':\n query = request.GET.get('q')\n\n results = list()\n\n for c in search.tokenSearch(query):\n tmp = {'category':'課程代號','title':c.token}\n results.append(tmp)\n \n for c in search.zhNameSearch(query):\n tmp = {'category':'課程名稱','title':c.name_zh}\n results.append(tmp)\n\n \n for c in search.engNameSearch(query):\n tmp = {'category':'Course Name','title':c.name_eng}\n results.append(tmp)\n \n for t in Teacher.objects.filter(name_zh__icontains=query):\n tmp = {'category':'老師','title':t.name_zh}\n results.append(tmp)\n \n for d in Department.objects.filter(name_zh__icontains=query):\n tmp = {'category':'開課單位','title':d.name_zh}\n results.append(tmp)\n\n tmp = {'results':results}\n\n return HttpResponse(json.dumps(tmp))", "def get_exercise(name):\n # Get db object and exercises table\n db = get_db()\n exercises = db.exercises\n \n # Search database for exercises with matching name\n cursor = exercises.find({\"name\": str(name)})\n if cursor.count() is 0:\n raise APIException(status_code=404, message='exercise with specified name not found')\n \n context = {}\n for document in cursor:\n temp = document\n temp['exercise_id'] = str(document['_id'])\n del temp['_id']\n context = temp\n \n context['url'] = \"/api/v1/exercises/\" + name + \"/\"\n return flask.jsonify(**context)", "def list_show(self, alias):\n re_m = re.match(r'^(.*\\D)(\\d{1,2}){0,1}$', alias)\n if not re_m:\n print('Bad format for list - \"{0}\"'.format(alias))\n\n season = -1\n if re_m.lastindex == 2:\n season = int(re_m.group(2))\n show_id = self.id_by_title(\n self.title_by_alias(re_m.group(1), no_exit=True)\n )\n epis = self.load_episodes(show_id)\n episodes = epis['episodes']\n list_map = {}\n for epi_id in episodes:\n next_episode = episodes[epi_id]\n if season in [-1, next_episode['seasonNumber']]:\n list_map[\n next_episode['seasonNumber'] * 1000 +\n next_episode['episodeNumber']\n ] = next_episode\n\n watched = self.load_watched(show_id)\n current_season = -1\n for epi_num in sorted(list_map.keys()):\n next_episode = list_map[epi_num]\n next_season = next_episode['seasonNumber']\n if current_season != next_season:\n current_season = next_season\n print('{0} Season {1}:'.format(\n tr_out(epis['title']), current_season\n ))\n comment = ''\n epi_id = str(next_episode['id'])\n if epi_id in watched:\n comment = 'watched ' + watched[epi_id]['watchDate']\n print(' \"{0}\" (s{1:02d}e{2:02d}) {3}'.format(\n tr_out(next_episode['title']),\n next_episode['seasonNumber'],\n next_episode['episodeNumber'],\n comment\n ))", "def add_episode(self, ep):\n #make da season\n ses = self._add_season(ep)\n dvdses = self._add_season(ep, dvd=True) \n self._add_episode(ep, ses)\n self._add_episode(ep, dvdses, dvd=True)", "def on_task_output(self, task, config):\n series = {}\n movies = {}\n for entry in task.accepted:\n if all(field in entry for field in ['tvdb_id', 'series_name', 'series_season', 'series_episode']):\n eid = str(entry['tvdb_id'])\n sno = str(entry['series_season'])\n eno = entry['series_episode']\n show = series[eid] if eid in series else {'name': entry['series_name'], 'seasons': {}}\n if not sno in show['seasons']:\n show['seasons'][sno] = []\n if not eno in show['seasons'][sno]:\n show['seasons'][sno].append(eno)\n elif all(field in entry for field in ['imdb_id', 'movie_name']):\n movies[entry['imdb_id']] = entry['movie_name']\n if series:\n for eid, show in series.items():\n dest = os.path.join(config, 'series.watched.%s.json' % eid)\n data = {'name': show['name'], 'rating': 5}\n if os.path.exists(dest):\n with open(dest, 'r') as f:\n data = json.load(f)\n for season, episodes in show['seasons'].items():\n lst = data[season] if season in data else []\n data[season] = list(set(lst + episodes))\n text = json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))\n with open(dest, 'w') as f:\n f.write(text)\n self.log.info('Added watched episodes to Uoccin')\n if movies:\n dest = os.path.join(config, 'movies.watched.json')\n data = {}\n if os.path.exists(dest):\n with open(dest, 'r') as f:\n data = json.load(f)\n n = 0\n for eid, name in movies.items():\n if not eid in data:\n data[eid] = {'name': name, 'rating': 5}\n n += 1\n if n > 0:\n text = json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))\n with open(dest, 'w') as f:\n f.write(text)\n self.log.info('Added watched movies to Uoccin')", "def js_exercises(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Exercise(row [1], row [2])\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select e.id,\n e.name,\n e.language\n \n from exercises e\n where e.language = \"JavaScript\"\n order by e.language;\n \"\"\")\n \n\n js_exercises = db_cursor.fetchall()\n print('\\n***JavaScript Exercises***')\n\n for exercise in js_exercises:\n print(exercise)", "def scraping_episodes(self, serie_data, episodes_list):\n episodes_data = []\n for episode in episodes_list:\n # Se arma este dict para localizar los campos\n # en el json y que sea mas facil procesarlos mas adelante\n epi_details = episode[0]['body']['details']\n epi_dict = {\n 'ParentId': serie_data.id,\n 'ParentTitle': serie_data.clean_title,\n 'Id': episode[0]['id'],\n 'Title': epi_details['title'],\n 'Type': 'episode',\n 'JSON': {\n 'Synopsis': epi_details['description'],\n 'Metadata': epi_details['metadata'].replace('\\xa0', ''),\n 'Rating': epi_details['localizedRating']['value'],\n 'Image': epi_details,\n 'Groups': episode[1]['body']['groups'],\n 'SeasonAndNumber': episode[2]['body']['metadata'],\n 'isFree': episode[0]['body']['isFree']\n }\n }\n payload_epi = self.build_payload(epi_dict)\n # Si la serie es original sus episodios también\n payload_epi.is_original = serie_data.is_original\n episodes_data.append(payload_epi)\n payload_epi = payload_epi.payload_episode()\n Datamanager._checkDBandAppend(\n self, payload_epi, self.scraped_epi, self.payloads_epi,\n isEpi=True\n )\n return episodes_data", "def search_by_person(name):\n\tids = get_id_by_name(name)\n\tif not ids:\n\t\treturn JSONResponse({}) # No actors with the name found\n\turl = tmdb_api(\"discover/movie\")+\"&with_cast=\"+'|'.join(map(str, ids))\n\tresponse = json.load(urllib2.urlopen(url))\n\treturn JSONResponse(response)", "def getEpisodeFromName(flightName):\n try:\n return EPISODE_MODEL.get().objects.get(shortName=flightName)\n except:\n return None", "def handle_characters(curs, collection):\n character_list = curs.execute(\"\"\"SELECT * FROM charactercreator_character;\"\"\")\n for character in character_list:\n _, sl_curs = connect_to_sldb() # need to create a different cursor because the main one still \n # running and it will close the whole thing before it loop\n # item_list = sl_curs.execute(\n # f\"\"\"SELECT ai.name FROM charactercreator_character_inventory as cii\n # LEFT JOIN armory_item as ai\n # ON cii.item_id = ai.item_id\n # WHERE character_id={character[0]};\n # \"\"\")\n inventory = sl_curs.execute(\n f\"\"\"SELECT name, item_ptr_id\n FROM\n (SELECT * FROM charactercreator_character_inventory as cii\n LEFT JOIN armory_item as ai\n ON cii.item_id = ai.item_id) as a\n LEFT JOIN armory_weapon as aw\n ON a.item_id=aw.item_ptr_id\n WHERE character_id={character[0]};\n \"\"\").fetchall()\n\n character_doc = {\n \"name\": character[1],\n \"level\": character[2],\n \"exp\": character[3],\n \"hp\": character[4],\n \"strength\": character[5],\n \"intelligence\": character[6],\n \"dexterity\": character[7],\n \"wisdom\": character[8],\n \"items\": [item[0] for item in inventory],\n \"weapons\": [item[0] for item in inventory if item[1] != None]\n }\n sl_curs.close() # close that new cursor\n collection.insert_one(character_doc)\n\n\n # # A codier way to do it\n # schema = curs.execute(\n # \"PRAGMA table_info(charactercreator_character)\").fetchall()[1:]\n # for character in characters_list:\n # character_doc = {}\n # for index, item_tuple in enumerate(schema):\n # character_doc[item_tuple[1]] = character[index + 1]\n\n # collection.insert_one(character_doc)", "def queryDB(db):\n def printunichars(row):\n \"\"\"Helper function for print utf 8 chars\"\"\"\n print(\"Title:\")\n print(row[0].encode('utf-8'))\n print(\"Body:\")\n print(row[1].encode('utf-8'))\n print(\"Ref:\")\n print(row[2].encode('utf-8'))\n print(\"Url:\")\n print(row[3].encode('utf-8'))\n \n cursor = db.cursor()\n cursor.execute(\"SET NAMES 'utf8';\")\n print(\"SET NAMES utf8;\")\n sql = \"SELECT Title FROM pages LIMIT 100\"\n cursor.execute(sql)\n rows = cursor.fetchall()\n for row in rows:\n printchars(row)", "def parse_episode(filename):\n print_info('Extracting episode from {0}'.format(filename))\n for regex in EPISODE_NUM_REGEXS:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_ep = m.group('Episode').lower()\n print_info('Extracted episode: {0}'.format(extracted_ep))\n\n if '-' in extracted_ep:\n print_info('Multiple Episodes found')\n tokens = extracted_ep.split('-e')\n first_token = tokens[0]\n last_token = tokens[len(tokens)-1]\n return parse_episode(first_token) + '-' + parse_episode(last_token)\n else:\n ep_num = int(extracted_ep)\n if ep_num is not None and ep_num > 0:\n print_info('Episode might be: {0}'.format(ep_num))\n return 'E' + format_num(ep_num)\n\n return None", "def get_event():\n\t#Get HTTP query args.\n\tno = request.args.get('no')\n\tfilename = request.args.get('filename')\n\tcollection = mongo.db[filename]\n\n\tjsonEncoder = hepmcio_json.HepMCJSONEncoder()\n\thepMCDecoder = hepmcio_json.HepMCJSONDecoder()\n\tjsonDecoder = json.JSONDecoder()\n\t#Everything below same as in the Visualiser view.\n\tevent = collection.find_one({\"type\":\"event\", \"no\":int(no)}, {\"_id\":False})\n\tparticleJson = collection.find({\"type\":\"particle\", \"event\":event[\"barcode\"]}, {\"_id\":False})\n\tparticles = []\n\tfor particle in particleJson:\n\t\tparticles.append(jsonEncoder.encode(particle))\n\tvertices = []\n\tvertexJson = collection.find({\"type\":\"vertex\", \"event\":event[\"barcode\"]}, {\"_id\":False})\n\tfor vertex in vertexJson:\n\t\tvertices.append(jsonEncoder.encode(vertex))\n\tevent = jsonEncoder.encode(event)\n\n\teventObject = hepmcio_json.EventJSONObject(event, particles, vertices)\n\t\n\tdecodedEvent = hepMCDecoder.decode(eventObject)\n\n\tPT_CUTOFF = 0.0\n\tintParticles = [particle for particle in decodedEvent.particles.values() if particle.status!=1 and \\\n\t\tparticle.mom[0]**2 + particle.mom[1]**2 > PT_CUTOFF**2]\n\t\n\tintParticleAncestors = reduce(operator.concat, [hepmcio.get_ancestors(particle)[:-1] for particle in intParticles])\n\n\tparticles = []\n\tfor particle in (intParticles + intParticleAncestors):\n\t\tparticles.append(jsonDecoder.decode(jsonEncoder.encode(particle)))\n\t\n\tvertices = list(map(jsonDecoder.decode, vertices))\n\t\n\treturn {\"particles\":jsonEncoder.encode(particles), \"vertices\":jsonEncoder.encode(vertices)}", "def episode(self, title=None, season=None, episode=None):\n key = f'{self.key}/allLeaves'\n if title is not None:\n return self.fetchItem(key, Episode, title__iexact=title)\n elif season is not None and episode is not None:\n return self.fetchItem(key, Episode, parentIndex=season, index=episode)\n raise BadRequest('Missing argument: title or season and episode are required')", "def only_javascript(self):\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Exercises(\n row[0], row[1]\n )\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select Name, Language\n FROM Exercise e\n WHERE e.Language=\"JavaScript\"\n \"\"\")\n\n all_exercises = db_cursor.fetchall()\n\n for exercise in all_exercises:\n print(exercise)", "def emit_list_episodes_orgmode(self, filename):\n\n fileObj = open(filename, 'w')\n \n for ep in self.episodes:\n fileObj.write('***' + \n ' Season:' + str(ep.season))\n fileObj.write(' Episode:' + str(ep.number))\n fileObj.write('\\tAired:' + ep.aired) \n fileObj.write('\\tRating:' + str(ep.rating))\n fileObj.write('\\n')\n fileObj.write(ep.description)\n fileObj.write('\\n')\n\n fileObj.close()", "def filter_by_title(self):\n\n \"\"\" validate data \"\"\"\n if not self.title or not self.body:\n api.abort(400, \"Incorrect Data Format. Try again\")\n\n con, results = psycopg2.connect(**self.config), None\n cur = con.cursor(cursor_factory=RealDictCursor)\n\n try:\n cur.execute(\"select * from {} WHERE title='{}'\".format(self.table, self.title))\n results = cur.fetchone()\n except Exception as e:\n print(e)\n\n con.close()\n return results", "def load_episode_info(episode_file, team_name):\n with open(episode_file) as f:\n episode_json = json.load(f)\n\n replay = json.loads(episode_json[\"replay\"])\n\n # Determine whether team of interest is player 0 or 1.\n # Raises exception if neither team name matches.\n team_idx = replay[\"info\"][\"TeamNames\"].index(team_name)\n other_team_idx = (team_idx + 1) % 2\n\n # Reward is defined in replay[\"specification\"] as:\n # \"The amount of player owned halite (equal to players[index][0]) if the player\n # has not been eliminated, else step_eliminated - episode_steps - 1.\"\n cur_team_reward = replay[\"rewards\"][team_idx] or 0 # Rewards are occasionally None, not sure why.\n other_team_reward = replay[\"rewards\"][other_team_idx] or 0\n cur_team_won = cur_team_reward > other_team_reward\n\n cur_team_score = replay[\"steps\"][0][0][\"observation\"][\"players\"][team_idx][0]\n other_team_score = replay[\"steps\"][0][0][\"observation\"][\"players\"][other_team_idx][0]\n\n max_steps = replay[\"configuration\"][\"episodeSteps\"]\n steps = len(replay[\"steps\"])\n ended_early = steps < max_steps\n return cur_team_won, ended_early, cur_team_score, other_team_score, cur_team_reward, other_team_reward", "def get_american_life(epno, directory = '/mnt/media/thisamericanlife', extraStuff = None):\n\n try:\n title, year = get_americanlife_info(epno, extraStuff = extraStuff)\n except ValueError as e:\n print(e)\n print('Cannot find date and title for This American Life episode #%d.' % epno)\n return\n\n if not os.path.isdir(directory):\n raise ValueError(\"Error, %s is not a directory.\" % directory)\n outfile = os.path.join(directory, 'PRI.ThisAmericanLife.%03d.mp3' % epno) \n urlopn = 'http://www.podtrac.com/pts/redirect.mp3/podcast.thisamericanlife.org/podcast/%d.mp3' % epno\n\n resp = requests.get( urlopn, stream = True )\n if not resp.ok:\n urlopn = 'http://audio.thisamericanlife.org/jomamashouse/ismymamashouse/%d.mp3' % epno\n resp = requests.get( urlopn, stream = True )\n if not resp.ok:\n print(\"Error, could not download This American Life episode #%d. Exiting...\" % epno)\n return\n with open( outfile, 'wb') as openfile:\n for chunk in resp.iter_content(65536):\n openfile.write( chunk )\n \n mp3tags = ID3( )\n mp3tags['TDRC'] = TDRC(encoding = 0, text = [ u'%d' % year ])\n mp3tags['TALB'] = TALB(encoding = 0, text = [ u'This American Life' ])\n mp3tags['TRCK'] = TRCK(encoding = 0, text = [ u'%d' % epno ])\n mp3tags['TPE2'] = TPE2(encoding = 0, text = [u'Chicago Public Media'])\n mp3tags['TPE1'] = TPE1(encoding = 0, text = [u'Ira Glass'])\n mp3tags['TIT2'] = TIT2(encoding = 0, text = [u'#%03d: %s' % ( epno, title ) ])\n mp3tags['TCON'] = TCON(encoding = 0, text = [u'Podcast'])\n mp3tags.save( outfile )", "def get_ep_collection_with_episode(self, episode):\n sub_id = self._get_sub_id_for_ep(episode.id_)\n html = self._get_html_for_subject_eps(sub_id)\n ep_collection = BangumiEpisodeCollection.from_html_with_ep(episode,\n html)\n ep_collection.session = self\n return ep_collection", "def parse_anime_episode(filename):\n print_info('Extracting episode from {0}'.format(filename))\n for regex in ANIME_EPISODE_NUM_REGEXS:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_ep = m.group('Episode')\n print_info('Extracted episode: {0}'.format(extracted_ep))\n\n ep_num = int(extracted_ep)\n if ep_num is not None and ep_num > 0:\n print_info('Episode might be: {0}'.format(ep_num))\n return 'E' + format_num(ep_num)\n\n return None", "def get_playable_entries(table, column, owner, no_participants=0, uuid=None, planned_date=None):\n\n db = choose_database(\"datadb\")\n\n if table == \"games\":\n where = \"owner LIKE \\'%\" + owner + \"%\\' AND (playercount>=\" + str(no_participants) + \" OR playercount=\\'X\\')\"\n if planned_date:\n delta = datetime.timedelta(weeks=2)\n not_played_after = planned_date - delta\n not_played_after = not_played_after.date()\n add_to_where = \" AND (last_played<\\'\" + str(not_played_after) + \"\\' OR last_played IS NULL)\"\n where += add_to_where\n elif table == \"expansions\":\n where = \"owner LIKE \\'%\" + owner + \"%\\' AND basegame_uuid=\\'\" + uuid + \"\\'\"\n result = select_columns(db, table, column, condition=where)\n\n return result", "def get_by_character(self, character_id):\n sql = \"SELECT {0} FROM people_{0} WHERE people=?\".format(self.conveyance_type)\n try:\n query_result = self.cursor.execute(sql, (str(character_id),))\n except Exception as e:\n raise Exception(\n \"An error occurred while getting a character %s in the database: query: %s - message: %s\"\n % (self.conveyance_type, sql, e)\n )\n\n rows = query_result.fetchall()\n starships = [s_id for _, s_id in rows]\n\n return starships", "def on_episode_end(self, episode, logs):\n template = '(( episode {0}: steps: {1} --- target_reached: {2}, target_reached_in_steps: {3} --- reward: {4:.3f} ))'\n variables = [\n episode + 1,\n logs['nb_steps'],\n logs['target_reached'],\n logs['target_reached_in_steps'],\n logs['episode_reward'],\n ]\n print(template.format(*variables))", "def onto(disgenet, edam):\n disgenet = disgenet.replace(' ', '+').replace(\"'\", \"%27\")\n edam = edam.replace(' ', '+').replace(\"'\", \"%27\")\n disid = subprocess.Popen(\n [\"curl -s -k http://127.0.0.1:3030/ds/query -X POST --data \" +\n \"'query=PREFIX+rdf%3A+%3Chttp%3A%2F%2Fwww.w3.org%2F1999%2F02%2F22-rdf-syntax-ns%23%3E%0A\" +\n \"PREFIX+dcterms%3A+%3Chttp%3A%2F%2Fpurl.org%2Fdc%2Fterms%2F%3E%0A\" +\n \"PREFIX+ncit%3A+%3Chttp%3A%2F%2Fncicb.nci.nih.gov%2Fxml%2Fowl%2FEVS%2FThesaurus.owl%23%3E%0A\" +\n \"SELECT+DISTINCT+%0A%09%3Fdisease+%0AFROM+%3Chttp%3A%2F%2Frdf.disgenet.org%3E+%0AWHERE+%7B%0A++\" +\n \"SERVICE+%3Chttp%3A%2F%2Frdf.disgenet.org%2Fsparql%2F%3E+%7B%0A++++\" +\n \"%3Fdisease+rdf%3Atype+ncit%3AC7057+%3B%0A++++%09dcterms%3Atitle+%22\" + disgenet +\n \"%22%40en+.%0A%7D%0A%7D' -H 'Accept: application/sparql-results+json,*/*;q=0.9'\"],\n stdout=subprocess.PIPE, shell=True).communicate()[0]\n edam_id = subprocess.Popen([\"curl -s 'http://www.ebi.ac.uk/ols/api/search?q=\" + edam + \"&ontology=edam' 'Accept: application/json'\"],\n stdout=subprocess.PIPE, shell=True).communicate()[0]\n try:\n jdisease = json.loads(disid)\n umllist = []\n umls = jdisease['results']['bindings'][0]['disease']['value']\n except (IndexError, ValueError):\n umls = \"No disgenet record\"\n try:\n jedam = json.loads(edam_id)\n eid = jedam['response']['docs'][0]['iri']\n except (IndexError, ValueError):\n eid = \"No EDAM record\"\n return umls, eid", "def extract_episode_details(season, episode_response):\n try:\n rating = float(episode_response['imdbRating'])\n except ValueError:\n # Rating may come through as 'N/A' if episode has not aired\n rating = None\n\n return {\n 'title': episode_response['Title'],\n 'episode': int(episode_response['Episode']),\n 'season': season,\n 'ratings': {'imdb': rating},\n }", "def get_characters(self, sid):\n\n\t\twith open(self.get_fpath(sid)) as f:\n\t\t\treturn json.load(f)", "def beer(name):\n return jsonify(Beer.query.filter_by(name=name).first().serialize())", "def read_from_db():\n try:\n with open(OFFERS_FILE, encoding='utf8') as f:\n return json.load(f)\n except FileNotFoundError:\n return []", "def get_character_info(self, name):\n url = \"%s?%s\" % (self._base_url, urlencode({'name': name}))\n q = Request(url)\n q.add_header('User-Agent', 'curl/7.51.0')\n q.add_header('Accept', 'application/json')\n\n result = urlopen(q).read().decode('utf-8')\n data = json.loads(result)\n\n return data", "def episode_string(self, episode):\n cp, _ = zip(*episode.states)\n\n car_positions = dict()\n for i, p in enumerate(cp):\n car_positions[p] = i\n\n x, y = zip(*self.track_positions)\n output = \"\"\n y_rng = range(max(y) + 1)\n for i in range(max(x) + 1):\n row = \"\"\n for j in y_rng:\n pos = i, j\n if pos in car_positions:\n row += str(car_positions[pos])\n elif pos in self.start_positions:\n row += self.format_dict['start']\n elif pos in self.finish_positions:\n row += self.format_dict['finish']\n elif pos in self.track_positions:\n row += self.format_dict['track']\n else:\n row += self.format_dict['border']\n row += self.format_dict['sep']\n output = row + \"\\n\" + output\n return output", "def get_playable_entries_by_category(table, column, owner, category, no_participants=0, planned_date=None):\n\n db = choose_database(\"datadb\")\n mycursor = db.cursor()\n\n where = \"owner LIKE \\'%\" + owner + \"%\\' AND (playercount>=\" + str(no_participants) + \" OR playercount=\\'X\\')\"\n if planned_date:\n delta = datetime.timedelta(weeks=2)\n not_played_after = planned_date - delta\n not_played_after = not_played_after.date()\n add_to_where = \" AND (last_played<\\'\" + str(not_played_after) + \"\\' OR last_played IS NULL)\"\n where += add_to_where\n\n on = table + \".game_uuid=categories.`\"+category + \"` AND \" + where # use `` bc. categories have spaces in them\n sql = \"SELECT \" + table + \".\" + column + \" FROM \" + table + \" INNER JOIN categories ON \" + on\n\n mycursor.execute(sql)\n result = mycursor.fetchall()\n\n return result", "async def get_record(item: Item):\n X_new = item.to_df()\n item_str = item.to_string()\n project_code = int(item_str[item_str.find('=')+1:])\n pg = PostgreSQL()\n return_json = pg.fetch_query_given_project(project_code)\n return return_json", "def new_line(script_l, character, episode):\n if up.check(\"characters\", character):\n char_id = up.giveId(\"characters\", character)\n else:\n up.insertCharacter(character)\n char_id = up.giveId(\"characters\", character)\n if up.check(\"episodes\", episode):\n ep_id = up.giveId(\"episodes\", episode)\n else:\n up.insertEpisode(episode)\n ep_id = up.giveId(\"episodes\", episode)\n if up.check(\"script\", script_l) and up.check(\"characters\", character) and up.check(\"episodes\", episode):\n return \"line exists\"\n else:\n engine.execute(f\"\"\"\n INSERT INTO script (script_l, characters_char_id, episodes_ep_id) VALUES\n (\"{script_l}\", \"{char_id}\", \"{ep_id}\");\n \"\"\")\n return f\"successfully loaded: {character},{script_l},{episode}\"", "def filter_aired_episodes(self, episodes):\n #Set now one day in the past or check download\n now = datetime.datetime.now() - datetime.timedelta(days=1)\n aired_episodes = [episode for episode in episodes if\n episode.get_first_aired() and\n datetime.datetime.strptime(episode.get_first_aired(),\n \"%Y-%m-%d\")\n <= now]\n return aired_episodes", "def fetch_episodes_by_season(self, params):\n raw_episode_list = self.netflix_session.fetch_episodes_by_season(\n season_id=params.get('season_id')[0])\n if 'error' in raw_episode_list:\n return raw_episode_list\n episodes = self.netflix_session.parse_episodes_by_season(\n response_data=raw_episode_list)\n return episodes", "def select(self, eng_category):\r\n sql_select_query = \"SELECT Name, URL, Ingredients FROM \"+ str(eng_category)\r\n self.mycursor.execute(sql_select_query)\r\n records = self.mycursor.fetchall()\r\n \r\n return records", "def handle_item(curs):\n # inventory_char = {'1':[]}\n weapon_list = curs.execute(\n f\"\"\"SELECT name, item_ptr_id\n FROM\n (SELECT * FROM charactercreator_character_inventory as cii\n LEFT JOIN armory_item as ai\n ON cii.item_id = ai.item_id) as a\n LEFT JOIN armory_weapon as aw\n ON a.item_id=aw.item_ptr_id\n WHERE character_id=5;\n \"\"\")\n inventory_char = [weapon[0] for weapon in weapon_list if weapon[1] != None]\n # for weapon in weapon_list:\n # if weapon[1] != None:\n # inventory_char['1'].append(weapon[0])\n return inventory_char", "def show_item_json(category, item):\n category = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n item = (session.query(Items)\n .filter_by(name=item.replace('-', ' '))\n .one())\n return jsonify(item=[item.serialize])", "def generate_episode(self, episode_name: typing.Optional[typing.List[str]] = None) -> typing.List[np.ndarray]:\n x = []\n\n if episode_name is None:\n\n alphabet = random.sample(population=self.data.keys(), k=1)[0]\n\n max_n_way = min(len(self.data[alphabet]), self.max_num_cls)\n\n assert self.min_num_cls <= max_n_way\n\n n_way = random.randint(a=self.min_num_cls, b=max_n_way)\n n_way = min(n_way, self.max_num_cls)\n\n characters = random.sample(population=self.data[alphabet].keys(), k=n_way)\n else:\n alphabet = episode_name[0]\n characters = episode_name[1:]\n\n for character in characters:\n x_temp = random.sample(population=self.data[alphabet][character], k=self.k_shot)\n if self.load_images:\n x.append(x_temp)\n else:\n x_ = [_load_image(\n img_url=os.path.join(self.root, alphabet, character, img_name),\n expand_dim=self.expand_dim\n ) for img_name in x_temp]\n x.append(x_)\n\n return x", "def get_dict(char):\n character = {}\n if char.player_ob.is_staff or char.db.npc:\n return character\n character = {\n \"name\": char.key,\n \"social_rank\": char.item_data.social_rank,\n \"fealty\": str(char.item_data.fealty),\n \"house\": char.item_data.family,\n \"relations\": get_relations(char),\n \"gender\": char.item_data.gender,\n \"age\": char.item_data.age,\n \"religion\": char.db.religion,\n \"vocation\": char.item_data.vocation,\n \"height\": char.item_data.height,\n \"hair_color\": char.item_data.hair_color,\n \"eye_color\": char.item_data.eye_color,\n \"skintone\": char.item_data.skin_tone,\n \"description\": char.perm_desc,\n \"personality\": char.item_data.personality,\n \"background\": char.item_data.background,\n \"status\": char.roster.roster.name,\n \"longname\": char.item_data.longname,\n }\n try:\n if char.portrait:\n character[\"image\"] = char.portrait.image.url\n except (Photo.DoesNotExist, AttributeError):\n pass\n return character", "def findConnections(userToLookForConnections):\n import json\n with open(\"filter.json\", \"r\") as opened:\n filtering = json.load(opened) #Filter that contains all the excellent players from 2011 to 2017. Type: DICT\n for teamName, squadList in filtering.items():\n if userToLookForConnections in squadList:\n print(\"ciao\")\n #crea arco\n return None", "def search_dish_name(text):\n\n # timing information, can delete later.\n start = dt.datetime.now()\n\n results = {}\n if type(text) != unicode:\n text = text.decode('utf-8')\n if len(text) > 10:\n # Most dish names are 3-5 characters. \n # If Tesseract returned more than 10 characters, something probably went wrong.\n print \"Input text is too long.\"\n return None\n else:\n # Find a matching dish, if it exists.\n match = Dish.find_match(text)\n if match:\n # If result is found, return JSON representation of dish.\n results = match.get_json()\n start = time_elapsed(\"Dish lookup\", start)\n else:\n # If no dish is found, return translation data and similar dishes, if they exist.\n translation = translate(text)\n start = time_elapsed(\"Translation\", start)\n results['translation'] = translation\n\n # Find similar dishes and add to results.\n if len(text) > 1:\n similar_dishes = Dish.find_similar(text)\n start = time_elapsed(\"Similar dish lookup\", start)\n similar_json = [] \n for similar_dish in similar_dishes:\n dish_data = similar_dish.get_json_min()\n similar_json.append(dish_data)\n\n if similar_json != []:\n results['similar'] = similar_json\n\n return results", "def test_load_special_chars_5(query_factory):\n text = \"what christmas movies are , showing at {{8pm|sys_time}|range}\"\n\n processed_query = markup.load_query(text, query_factory)\n\n assert len(processed_query.entities) == 1\n\n entity = processed_query.entities[0]\n\n assert entity.span == Span(42, 44)\n assert entity.normalized_text == \"8pm\"", "def __init__(self, number, json):\n\n self.number = number\n self.episodes = []\n for episode in json:\n self.episodes.append(Episode(episode))", "def query(self, c, slot_number, query_str):\n dev = self.selectedDevice(c)\n #yield dev.write_line(\"TERM LF\"+LF)\n yield dev.write_line(query_str)\n query_resp = yield dev.read_line()\n returnValue(query_resp)", "def get_latest_episode(directory: Path, show_id: int=799, episodes: int=1):\n\n episodes_url = 'https://api.transistor.fm/v1/episodes/'\n params = {\"show_id\": show_id}\n r = httpx.get(episodes_url, headers=header, params=params)\n print(r.json()['data'][0])\n\n for episode in r.json()['data'][:episodes]:\n episode_attrs = episode['attributes']\n title = episode_attrs['title']\n published_date = episode_attrs['published_at']\n summary = episode_attrs['summary']\n embed_url = episode_attrs['embed_html']\n image_url = episode_attrs.get(\n 'image_url',\n 'https://imagekit.io/cxazzw3yew/pit-logo-v5.jpg',\n )\n content = f\"\"\"title: {title}\ndate: {published_date}\nimage: {image_url}\n\n{summary}\n{embed_url}\"\"\"\n output = directory.joinpath(slugify(title)).with_suffix(\".md\")\n output.write_text(content)", "def scrapPage(idSeries, pageIMDB, credit=False, dicChars=None):\n\n urlIDMB = requests.get(pageIMDB).text\n soup = BeautifulSoup(urlIDMB, 'lxml')\n seriesData = \"\"\n creditsData = \"\"\n\n nbSeasons = len(soup.find(id=\"bySeason\").find_all('option')) + 1\n\n for season in range(1, nbSeasons):\n linkSeason = pageIMDB + \"?season=\" + str(season)\n urlIDMB = requests.get(linkSeason).text\n soup = BeautifulSoup(urlIDMB, 'lxml')\n\n table = soup.find('div', {'class': 'eplist'})\n episodesTable = table.find_all('div', class_=\"list_item\")\n\n for episode in episodesTable:\n infos = episode.find('div', {'class': 'info'})\n nbEpisode = int(infos.find('meta').get('content'))\n link = infos.find('a')\n title = link.get('title')\n\n if \"Episode #\" not in title:\n link = link.get('href').split('?')[0]\n imdbLink = \"https://www.imdb.com\" + link\n seasonStr = f\"{season:02d}\"\n epStr = f\"{nbEpisode:02d}\"\n\n epNorm = idSeries + '.Season' + seasonStr + '.Episode' + epStr\n\n epString = epNorm + ',' + title + ',' + imdbLink + ','\n\n if credit:\n epCast = getEpCast(imdbLink, dicChars)\n creditsData += epNorm + ',' + epCast + '\\n'\n\n seriesData += epString + '\\n'\n\n return seriesData, creditsData", "def webquery(ra=None, dec=None, datetime=None, cachefname=None,\n rm=120, verbose=False):\n\n # If the user hasn't specified a full RA, DEC, and time, complain.\n if (ra is None or dec is None or datetime is None):\n print \"SkybotObject.webquery(): I need to know where to look!\"\n return [ ]\n \n # Call wwwget to query Skybot website and retrieve data as ASCII.\n if verbose:\n print \"Querying Skybot database with RA = {0:.5f}, DEC = {1:.5f}\"\\\n .format(ra, dec)\n url = \"http://vo.imcce.fr/webservices/skybot/skybotconesearch_query.php\"\n params = \\\n [\n \"-ep={0}\".format(datetime), # 2011-08-25T14:15:31\n \"-ra={0:.5f}\".format(ra), # 300.00000 [degrees]\n \"-dec={0:.5f}\".format(dec), # -35.00000 [degrees]\n \"-rm={0:f}\".format(rm), # 120 [arcminutes]\n \"-mime=text\",\n \"-output=basic\",\n \"-filter=1\",\n \"-loc=260\", # Siding Spring\n \"-from=skymapper\"\n ]\n cmd = \"wwwget '{0}?{1}'\".format(url,\"&\".join(params))\n if verbose: print cmd\n\n # Run the command and retrieve the results\n proc = sp.Popen(shlex.split(cmd), stdout=sp.PIPE, stderr=sp.STDOUT)\n (ascii_query, stderrstr) = proc.communicate()\n objlist = [ ]\n for line in ascii_query.split('\\n'):\n try: objlist.append(SkybotObject(line.strip()))\n except BadInit: continue\n if cachefname != None:\n if verbose:\n print \"Writing results of query to {0}\".format(cachefname)\n SkybotObject.write_ascii_file(cachefname, objlist)\n return objlist", "def get_all_players():\n players = {}\n\n for char in list(string.ascii_uppercase):\n req = requests.get(\n 'http://www.euroleague.net/competition/players?listtype=alltime&letter=' + char\n )\n\n soup = BeautifulSoup(req.text, 'html5lib')\n\n mydivs = soup.findAll('div', {'class': 'items-list'})\n\n for div in mydivs:\n itemdivs = soup.findAll('div', {'class': 'item'})\n\n\n for div in itemdivs:\n links = div.findAll('a')\n for index, link in enumerate(links):\n if index % 2 == 0:\n player = link.text.replace(',', '').strip()\n link['href'] = link['href'].replace('?', '')\n result = re.findall(\n '/competition/players/showplayerpcode=(.*)&seasoncode=', link['href']\n )\n code = result[0]\n players[code] = player\n \n return players", "def getEpisodeDict(mediaId):\n\tif str(mediaId) not in Dict['episodes']:\n\t\t# get brutal\n\t\trecoverEpisodeDict(mediaId)\n\t\t\n\treturn Dict['episodes'].get(str(mediaId))" ]
[ "0.6161546", "0.61566335", "0.58697116", "0.531941", "0.51938283", "0.5191531", "0.51225615", "0.50665915", "0.49034274", "0.48820502", "0.48670354", "0.48493454", "0.48400128", "0.48004526", "0.47973293", "0.47908667", "0.47580832", "0.47565988", "0.47521466", "0.47492182", "0.4741195", "0.4740418", "0.4724786", "0.47156763", "0.46878895", "0.4683128", "0.46818048", "0.46757242", "0.46561244", "0.4648763", "0.46486652", "0.46358716", "0.46340284", "0.4632888", "0.46262866", "0.46191126", "0.4618856", "0.4614616", "0.4580528", "0.45462847", "0.45439112", "0.45119765", "0.45048288", "0.44854227", "0.44840696", "0.44720477", "0.4444789", "0.44444594", "0.44439292", "0.44413713", "0.44339776", "0.4418939", "0.44188744", "0.44167575", "0.44132394", "0.44086364", "0.44085693", "0.44056588", "0.44050795", "0.439785", "0.43945688", "0.43894184", "0.4386093", "0.43723422", "0.43720317", "0.43631452", "0.43581483", "0.43390822", "0.4334169", "0.43232346", "0.43175185", "0.43136254", "0.43134496", "0.4313377", "0.4304001", "0.43031535", "0.4303139", "0.43022087", "0.42907083", "0.42886207", "0.42807934", "0.42760897", "0.42668524", "0.42595583", "0.425605", "0.42548415", "0.42507964", "0.42466152", "0.42412564", "0.4238951", "0.42372507", "0.42369756", "0.4231569", "0.42286474", "0.42226663", "0.42214346", "0.42213887", "0.42183214", "0.4215642", "0.42138758" ]
0.7129047
0
queries the database for all lines takes no arguments returns a json with all the lines
def lines_(): query = f""" SELECT script_l, `name`, episode FROM script INNER JOIN characters ON characters.char_id = script.characters_char_id INNER JOIN episodes ON episodes.ep_id = script.episodes_ep_id """ data = pd.read_sql_query(query, engine) return data.to_json(orient="records")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def get_all_record():\n # X_new = item.to_df()\n # item_str = item.to_string()\n # project_code = int(item_str[item_str.find('=')+1:])\n pg = PostgreSQL()\n return_json = pg.fetch_all_records()\n return return_json", "def select_all_lines(conn):\n\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM ayasdi_table\")\n\n rows = cur.fetchall()\n\n for row in rows:\n print row", "def query():\n rows = []\n data = db.get()\n\n for calc in data:\n rows.append({\"ip\" : calc.ip, \"text\":calc.text})\n\n return jsonify(rows)", "def select_all(db, tableName):\r\n try:\r\n c = db.cursor()\r\n c.execute(\"SELECT * FROM \" + tableName)\r\n print json.dumps(c.fetchall())\r\n except Error as e:\r\n print(e)", "def _fetch_records(query):\n con = connect()\n cursor = con.cursor()\n cursor.execute(query)\n row_headers = [x[0] for x in cursor.description] # this will extract row headers\n results = cursor.fetchall()\n json_data = []\n for result in results:\n json_data.append(dict(zip(row_headers, result)))\n cursor.close()\n return json.dumps(json_data)", "def get_all_records(self, data: dict, execution_context: dict):", "def read_all(self):\n def is_data(i):\n \"\"\"\n It checks if given key is different than added by system\n \"\"\"\n keys = ['_id', '_time']\n return all(i != k for k in keys)\n\n self.logger.log_reading()\n return simplejson.dumps([{i: x[i] for i in x if is_data(i)} for x in self.json_collection.find()])", "def get_all_todos():\n with sql.connect(\"todos.db\") as con:\n cursor = con.cursor()\n cursor.execute(\"SELECT * from todos\")\n todos = cursor.fetchall()\n cursor.close()\n\n return jsonify(todos), 200", "def lines_from_char(character):\n query = f\"\"\"\nSELECT script_l FROM script\nJOIN characters \nON characters.char_id = script.characters_char_id\nWHERE name = '{character}'\n\"\"\"\n data = pd.read_sql_query(query,engine)\n return data.to_json(orient=\"records\")", "def get_all_objects():\n \n # Database connection \n conn = sqlite3.connect(db_path)\n c = conn.cursor()\n\n # Select all object query\n c.execute(\"SELECT * FROM objects\")\n rows = c.fetchall()\n\n # Closing connection\n conn.close()\n\n # Found objects to dict {id : {obj}}\n objects = {k[0]:{} for k in rows}\n for row in rows:\n obj = {col:\"\" for col in COLUMNS[1:]}\n for i in range(1, len(row)):\n obj[COLUMNS[i]] = row[i]\n objects[row[0]] = obj\n return json.dumps(objects)", "def lines_from_char_ep(character,ep):\n query = f\"\"\"\nSELECT script_l FROM script\nJOIN characters \nON characters.char_id = script.characters_char_id\nINNER JOIN episodes\nON episodes.ep_id = script.episodes_ep_id\nWHERE name = '{character}' and episode = '{ep}'\n\"\"\"\n data = pd.read_sql_query(query,engine)\n return data.to_json(orient=\"records\")", "def get_all(self):\n url = self._dbname + '/_all'\n return self._connection.get(url).json()", "def rpc_database_get_rows(self, *args):\n\t\targs = list(args)\n\t\toffset = 0\n\t\tfields = self.path.split('/')[1:-2]\n\t\tif len(args) == (len(fields) + 1):\n\t\t\toffset = (args.pop() * VIEW_ROW_COUNT)\n\t\tassert len(fields) == len(args)\n\t\ttable_name = self.path.split('/')[-2]\n\t\ttable = DATABASE_TABLE_OBJECTS.get(table_name)\n\t\tassert table\n\n\t\t# it's critical that the columns are in the order that the client is expecting\n\t\tcolumns = DATABASE_TABLES[table_name]\n\t\trows = []\n\t\tsession = db_manager.Session()\n\t\tquery = session.query(table)\n\t\tquery = query.filter_by(**dict(zip((f + '_id' for f in fields), args)))\n\t\tfor row in query[offset:offset + VIEW_ROW_COUNT]:\n\t\t\trows.append([getattr(row, c) for c in columns])\n\t\tsession.close()\n\t\tif not len(rows):\n\t\t\treturn None\n\t\treturn {'columns': columns, 'rows': rows}", "def get_all_rows(self):\n cur = self.cursor()\n sql = (\"SELECT * FROM snapshot_log;\")\n cur.execute(sql)\n r = cur.fetchall()\n #cur.close()\n self.close()\n return r", "def get_all(self):\n # read log\n d = {}\n log = self.get_logfile()\n if not os.path.isfile(log):\n return d\n f = open(log, \"r\")\n if f.mode == 'r':\n lines = f.readlines()\n for line in lines:\n dline = json.loads(line)\n d.update(dline)\n f.close()\n return d", "def api_all():\r\n\tconn = sqlite3.connect('Shopify_products.db')\r\n\tconn.row_factory = dict_factory\r\n\tcur = conn.cursor()\r\n\tall_products = cur.execute('SELECT * FROM products WHERE inventory_count>0;').fetchall()\r\n\treturn jsonify(all_products)", "def all_rows(self, table, prt=False):\n conn = psycopg2.connect(self.name, sslmode='require')\n # conn.row_factory = sqlite3.Row\n c = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n c.execute(\"SELECT * FROM {}\".format(table))\n all_rows=c.fetchall()\n if(prt):\n for row in all_rows:\n print(row) \n conn.close()\n return all_rows", "def get_all(self):\n return {\"parcels\": self.db}, 200", "def read_all(table_id = None, \n language = 'en',\n base_url = 'http://data.ssb.no/api/v0', \n full_url = None):\n \n \n if full_url is None: \n full_url = '{base_url}/{language}/table/{table_id}'.format(\n base_url = base_url,\n language = language, \n table_id = table_id)\n \n query = full_json(full_url = full_url)\n data = requests.post(full_url, json = query)\n results = pyjstat.from_json_stat(data.json(object_pairs_hook=OrderedDict))\n \n # maybe this need not be its own function, \n # but an option in read_json? json = 'all'\n \n # other functions(options include: read_recent to get only the \n # most recent values (defined as x), json = 'recent')\n \n return results[0]", "def get_records():\n with RECORD_LOCK: # since flask 1.0 multi-threaded is enabled by default\n return jsonify(RECORDS)", "def see_all():\n database = get_connection()\n patients_in_db = []\n patient: dict = database.patients.find()\n for p in patient:\n pat = p[\"patient_data\"]\n patients_in_db.append(pat)\n print(patients_in_db)\n return patients_in_db", "async def db_query(self, *args, **kwargs):\n rows = []\n async with self.db_pool.acquire() as conn:\n async with conn.cursor(cursor_factory=DictCursor) as cur:\n await cur.execute(*args, **kwargs)\n try:\n async for row in cur:\n rows.append(row)\n except psycopg2.ProgrammingError:\n # No results\n pass\n return rows", "def get_all_data():\n \n # open the data stored in a file called \"data.json\"\n try:\n fp = open(\"data/data.json\")\n response = simplejson.load(fp)\n # but if that file does not exist, download the data from fusiontables\n except IOError:\n logging.info(\"failed to load file\")\n service = build('fusiontables', 'v1', developerKey=API_KEY)\n query = \"SELECT * FROM \" + TABLE_ID + \" WHERE Animal_Type = 'DOG'\"\n response = service.query().sql(sql=query).execute()\n \n return response", "def all():\n session = session_maker(\n app.config['MYSQL_USER'], app.config['MYSQL_PASS'], app.config['MYSQL_SERVER_PORT_3306_TCP_ADDR'],\n app.config['MYSQL_SERVER_PORT_3306_TCP_PORT'], app.config['DB'])\n\n print(\n tabulate(\n selection_list_all(session),\n headers=['number', 'sqlid', 'name', 'city', 'state']))", "def get_all_data():\n return jsonify(service.get_all_data())", "def select_all():\n sql = 'SELECT * FROM dostawy.przesylki'\n rows = DBconnector.fetch_query(sql)\n return _wrap_in_parcel_list(rows)", "def fetch_all(q, *params):\n db = Database()\n db.cur.execute(q, params)\n ret = db.cur.fetchall()\n db.con.close()\n return ret", "def lines(self, request, pk=None):\n shp = self.get_object()\n lines = shp.multilinestringfeatures_set.all()\n '''\n pagination of the geojson to reduce loading time\n '''\n paginator = GeoJsonPagination()\n paginator.page_size = 100\n page = paginator.paginate_queryset(lines, request)\n if page is not None:\n serializer = lineSerializer(page, many=True)\n return paginator.get_paginated_response(serializer.data)\n serializer = lineSerializer(data=lines, many=True)\n serializer.is_valid()\n return Response(serializer.data)", "def load_stock(self):\n lines = []\n with Transaction().start(DBNAME, 1):\n stock_lines = self.Inventory.search([('state', '=', 'done'), ('location', '=', self.location.id)])\n if stock_lines:\n for i in stock_lines:\n batch = i.batch_number\n for j in i.lines:\n if j.quantity <= 0:\n continue\n dictionary = {}\n dictionary['code'] = j.product.code\n dictionary['item'] = j.product.template.name\n dictionary[\n 'category'] = j.product.template.category.name if j.product.template.category else None\n dictionary['quantity'] = Decimal(j.quantity).quantize(Decimal('0.11')).to_eng()\n dictionary['batch_number'] = batch\n dictionary['supplier'] = j.supplier.name if j.supplier else None\n dictionary['expiry_date'] = j.expiry_date.strftime('%d-%m-%Y') if j.expiry_date else None\n lines.append(dictionary)\n return lines", "def test_home_by_all_lines(self):\r\n result = self.app.get('/All_lines')\r\n self.assertTrue(b'' in result.data)", "def get_rest():\n\n size = int(request.form.get('size'))\n start = int(request.form.get('start'))\n\n try:\n data = database.mongodb[db_name].find({'parsed': True}, {'_id': 0})\\\n .limit(size)\\\n .skip(start)\n except Exception:\n return \"EOF\"\n resp = \"\"\n\n for idx, entry in enumerate(data):\n try:\n resp += '<tr>'\n\n resp += '<td class=\"col-md-1\">'+str(101+idx+start)+'</td>'\n\n resp += '<td class=\"col-md-2\"><a href=' + entry.get('url') + '>' \\\n + entry.get('url') + '</td>'\n resp += '<td class=\"col-md-3\">'\n resp += entry.get('title') \\\n if entry.get('title') is not None else 'AA'\n resp += '</td>'\n\n resp += '<td class=\"col-md-3\">'\n resp += entry.get('desc') \\\n if entry.get('desc') is not None else 'AA'\n resp += '</td>'\n\n resp += '<td class=\"col-md-3\">'\n resp += entry.get('keywords') \\\n if entry.get('keywords') is not None else 'AA'\n resp += '</td>'\n\n resp += '</tr>'\n except Exception as e:\n print(e)\n print(entry)\n continue\n print(\"DONE\")\n return resp", "def get_all_by_id():\n id = request.args['id']\n return jsonify(service.get_all_data_by_id(id))", "def read_from_db(cursor):\n\tcursor.execute('''SELECT * FROM (\n\t\tSELECT * FROM Kombucha_data ORDER BY Time DESC LIMIT 20)\n\t\tORDER BY Time ASC;''')\n\n\trows = cursor.fetchall()\n\treturn rows", "def read_from_db(cursor):\n\tcursor.execute('''SELECT * FROM (\n\t\tSELECT * FROM Kombucha_data ORDER BY Time DESC LIMIT 20)\n\t\tORDER BY Time ASC;''')\n\n\trows = cursor.fetchall()\n\treturn rows", "def sensors():\n sensor_data = query_db('SELECT * FROM sensors')\n return jsonify(results=sensor_data)", "def read_all():\n # Create the list of CIs from our data\n ci = db.session.query(CI).order_by(CI.id).all()\n app.logger.debug(pformat(ci))\n # Serialize the data for the response\n ci_schema = CISchema(many=True)\n data = ci_schema.dump(ci)\n return data", "def new_entries():\n where_clause = (db.lioli_main.accepted == 0)\n fields = [db.lioli_main.unique_id, db.lioli_main.body, db.lioli_main.age, db.lioli_main.gender]\n rows = db(where_clause).select(limitby=(0, 10), orderby=db.lioli_main.id, *fields)\n \n row_count = db(where_clause).count()\n return dict(rows=rows, row_count=row_count)", "def selectAll(conn, params):\n cur = conn.cursor()\n cur.execute(f\"SELECT {params} FROM criptomonedas\")\n\n # rows = cur.fetchall()\n rows = [r[0] for r in cur]\n # for row in rows:\n # print(row[0])\n return rows", "def show_all():\n\n QUERY = \"\"\"\n SELECT first_name, last_name, github\n FROM students\n \"\"\"\n\n db_cursor = db.session.execute(QUERY)\n\n rows = db_cursor.fetchall()\n\n return rows", "def read_all():\n # Create the list of example_objects from our data\n example_objects = ExampleObject.query.order_by(ExampleObject.field2).all()\n\n # Serialize the data for the response\n example_object_schema = ExampleObjectSchema(many=True)\n data = example_object_schema.dump(example_objects)\n return data", "def all_entries(cls):\n info = Diary.entries\n response = jsonify({\"data\": info})\n response.status_code = 200\n return response", "def get_all_rows(table, db_file):\n \n try:\n conn, c = connect_to_db(db_file) \n c.execute('SELECT * FROM {t}'.format(t=safe(table)))\n allrows = c.fetchall()\n conn.close()\n return allrows\n except Exception as e:\n print(\"Error when trying to fetch all rows in table\", table, \"in\", db_file)\n print(e)\n return []", "def get_all_project_records():\r\n records = flask.request.db_api.get_all_project_record()\r\n return flask.jsonify(records=records)", "def get_records(self):\n logging.debug('Return all records in table')\n if not self._dbconnect or not self._cursor:\n raise Exception('Invalid call to Context Manager method!')\n\n self._cursor.execute(\"\"\"SELECT * FROM {}\"\"\".format(self._name))\n rows = self._cursor.fetchall()\n\n records = []\n for r in rows:\n record = {'date': r['date'],\n 'time': r['time'],\n 'location': r['location'],\n 'nodeID': r['nodeID']}\n logging.info('{}|{}|{}|{}'.format(r['date'],r['time'],r['location'],r['nodeID']))\n records.append(record)\n\n return records", "def loadbatch():\n s=\"select * from tblbatch where status='1'\"\n c.execute(s)\n data=c.fetchall()\n return data", "def fetch_all(self, sql):\n result = []\n\n curs = self.q(sql, True)\n cols = curs.column_names\n for row in curs:\n row_result = {}\n for field in cols:\n k = cols.index(field)\n row_result[cols[k]] = row[k]\n #print cols[k], row[k]\n result.append(row_result)\n curs.close()\n return result", "async def get(self) -> List[RowProxy]:", "def dev_get_all_lineups(self, contest_id):\n\n settings_module_name = os.environ['DJANGO_SETTINGS_MODULE']\n # 'mysite.settings.local' should let this method work\n if 'local' not in settings_module_name:\n raise Exception(\n 'json from dev_get_all_lineups not allowed unless local settings being used')\n\n lineups = []\n\n for e in self.entries:\n\n lineup_id = e.lineup.pk\n player_ids = []\n\n # pack in each player in the lineup, in order of course\n lm = LineupManager(e.user)\n for pid in lm.get_player_ids(e.lineup):\n # player_ids.append( self.starter_map[ pid ] ) # masks out no-yet-started players\n player_ids.append(pid)\n\n lineups.append({\n 'lineup_id': lineup_id,\n 'player_ids': player_ids,\n })\n\n data = {\n 'endpoint': '/contest/all-lineups/%s?json' % int(contest_id),\n 'bytes_for_condensed_response': self.get_size_in_bytes(),\n 'total_lineups': self.contest.entries,\n 'players_per_lineup': self.players_per_lineup,\n 'lineups': lineups,\n }\n return data", "def get_all():\n return SavedQuery.get_all()", "def fetch_all_keys():\n response = TIME_TABLE.scan()\n items = response['Items']\n items.sort(key=lambda x: x['timeStamp'])\n response = ''\n for item in items:\n response = '{0}\\n{1}'.format(response, item)\n return response", "def all_query() -> list:\n data = []\n posts = Posts.query.all()\n for post in posts:\n x = {\n \"title\": post.title,\n \"body\": post.body,\n \"timestamp\": post.timestamp,\n \"id\": post.id,\n \"url\": make_url_from_title(post.title),\n }\n data.append(x)\n return data", "def test_retrieve_all(database_connection: mysql.connector.connect,\n print_response: bool = False):\n scorekeepers = info.retrieve_all(database_connection)\n assert scorekeepers is not None\n if print_response:\n print(json.dumps(scorekeepers, indent=2))", "def feed_records(self):\n if not self.stats_file:\n return\n\n with open(self.stats_file) as fh:\n reader = reverse_file(fh)\n for line in reader:\n if line is None:\n return\n if not line:\n continue\n\n try:\n js = json.loads(line)\n except Exception as e:\n continue\n\n yield js", "def return_orders():\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n cursor.execute(\n \"\"\"\n SELECT id_order, id_customer, id_product, quantity, total_price,\n payment_status, send_status, order_date, location\n FROM Orders\n \"\"\")\n records = cursor.fetchall()\n return records", "def select_all_data(conn, select_sql):\n cur = conn.cursor()\n cur.execute(select_sql)\n\n rows = cur.fetchall()\n\n for row in rows:\n print(row)", "def run_query(db, query, multi=False):\n result = []\n connection = db.engine.connect()\n rows = connection.execute(text(query))\n for c in rows:\n if multi:\n result.append(dict(c.items()))\n else:\n result.append(dict(c))\n connection.close()\n\n return result", "def get_all_entries():\n conn = sqlite3.connect(CONF.database, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)\n curs = conn.cursor()\n try:\n return curs.execute(\"SELECT date_time, price FROM rates ORDER BY date_time DESC\").fetchall()\n finally:\n curs.close()\n conn.close()", "def fetch():\n req_data= request.get_json()\n \n ## ddb uses text files, using this as to eat my own dogfoor and improve\n ## no service sql client. No daemon, low cpu.\n\n\n e=load_db()\n try:\n res=e.query(req_data['query'])\n \n serialized = jsonpickle.encode( res,\n unpicklable=False,\n make_refs=False)\n return serialized\n except Exception as ex:\n return \"{0} -> '{1}'\".format(ex,req_data['query'])", "async def get_entries(self, *args,convert = True, listed=False, as_dict=False):\r\n consts = args\r\n condition = condition = \" AND \".join(consts)\r\n if not consts:\r\n query = \"SELECT * FROM {table_name}\"\r\n else:\r\n query = \"SELECT * FROM {table_name} WHERE {condition}\"\r\n query = query.format(condition = condition, table_name=self.name)\r\n cur = await self.data.db.execute(query)\r\n data = await cur.fetchall()\r\n await cur.close()\r\n if not data:\r\n return []\r\n if (convert and listed) or (convert and as_dict):\r\n raise ArgumentError(\"Incorrect arguments passed. only one can be True between arguments (convert, listed, as_dict)\")\r\n #Data contains all the info retrieved. Compile into dicts and also get the primary key data\r\n if listed:\r\n data = self.compile_as_list(data)\r\n return data\r\n if as_dict:\r\n data = self.compile_as_dict(data)\r\n return data\r\n data = self.compile_as_obj(data)\r\n return Records(data)", "def get_db_entries(location: str='') -> list:\n db = CarsDb() # pylint: disable=invalid-name\n results = db.get_cars(location)\n db.commit()\n db.close()\n return results", "def select_all_report(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM report\")\n\n rows = cur.fetchall()\n\n for row in rows:\n print(row)", "def select_all(table):\n # Establish connection\n conn = psycopg2.connect(dbname='db', user='grok')\n # Establish cursor\n cursor = conn.cursor()\n try:\n # Execute query\n cursor.execute('SELECT * from '+table+';')\n records = cursor.fetchall()\n except:\n return []\n return records", "def read_all_rows(condition, database, table):\n connection = sqlite3.connect(database)\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n cursor.execute('SELECT * FROM ' + table + ' WHERE ' + condition)\n rows = cursor.fetchall()\n cursor.close()\n connection.close()\n return rows", "def QueryAllUsers():\n conn = engine.connect()\n outmsg = \"\"\n if CheckTblNameExist(\"lineuser\"):\n result_db = conn.execute(\"select * from lineuser\")\n for row in result_db:\n outstring = f\" [(userid: {row.userid}),\"\\\n f\" (username: {row.username}),\"\\\n f\" (usertoken: {row.usertoken})]\"\n if outmsg == \"\":\n outmsg = outstring\n else:\n outmsg = outmsg + \",\\n\" + outstring\n conn.close()\n return outmsg\n else:\n conn.close()\n return \"Table Not Exist\"", "def get(self, problem_id):\n photos = self.sess.query(Photo).filter(Photo.problem_id == problem_id)\n self.write(\n json.dumps([get_row_data(photo) for photo in photos]))", "def get_all_tasks(rows):\n lista = list()\n for row in rows:\n arr = dict()\n for k,v in row.items():\n arr[k] = str(v)\n lista.append(arr)\n return dict(task_list=gluon.contrib.simplejson.dumps(lista))", "async def read_all(\n self,\n *,\n projection: Optional[Dict[str, Any]] = DEFAULT_PROJECTION,\n limit: int = DEFAULT_LIMIT,\n offset: int = DEFAULT_OFFSET,\n session: Optional[Any] = DEFAULT_SESSION,\n **kwargs: Any,\n ) -> List[Dict[str, Any]]:\n return await self._database.read_all(\n self.name,\n projection=projection,\n limit=limit,\n offset=offset,\n session=session,\n **kwargs,\n )", "def test_get(date1):\n # create mysql connection\n \n conn = pymysql.connect(host=config._DB_CONF['host'], \n port=config._DB_CONF['port'], \n user=config._DB_CONF['user'], \n passwd=config._DB_CONF['passwd'], \n db=config._DB_CONF['db'],\n charset='big5')\n cur = conn.cursor()\n \n sql=\"select * from maintain where `日期` =%s\"\n cur.execute(sql,date1)\n \n # get all column names\n columns = [desc[0] for desc in cur.description]\n # get all data\n rows=cur.fetchall()\n \n # build json \n result = rows_to_json(columns,rows)\n # print(result)\n \n cur.close()\n conn.close()\n\n return result", "def get(self, table):\n \"\"\"Get all data entries from a specified table \"\"\"\n data = Select('*', table).all()\n if data == []:\n return {\"Error\": f\"No entries found in {table}\"}, 404\n if \"Error\" in str(data):\n return data, 500\n return data", "def listings_data():\n\n stmt = db.session.query(nyc).statement\n df = pd.read_sql_query(stmt, db.session.bind)\n df[\"latitude\"] = pd.to_numeric(df[\"latitude\"])\n df[\"longitude\"] = pd.to_numeric(df[\"longitude\"])\n df[\"accommodates\"] = pd.to_numeric(df[\"accommodates\"])\n\n data = df.to_dict(orient='index')\n # Create a dictionary entry for each row of metadata information\n # data = {}\n # for result in results:\n #\n # data[\"ID\"] = result[0]\n # data[\"LISTING_URL\"] = result[1]\n # data[\"NAME\"] = result[2]\n # data[\"HOST_ID\"] = result[3]\n # data[\"NEIGHBORHOOD\"] = result[4]\n # data[\"NEIGHBORHOOD_GROUP\"] = result[5]\n # data[\"CITY\"] = result[6]\n # data[\"ZIPCODE\"] = result[7]\n # data[\"LAT\"] = float(result[8])\n # data[\"LON\"] = float(result[9])\n #\n # print(data)\n\n return jsonify(data)", "def selectAll_db(table, name=\"*\"):\n query = \"SELECT \" + name + \" FROM \" + table\n cursor.execute(query)\n records = cursor.fetchall()\n return records", "def fetchSqlRecords(self, sql):\r\n cursor = self.c.cursor()\r\n cursor.execute(sql)\r\n r = cursor.fetchall()\r\n cursor.close()\r\n return r", "def read_db():\n with open(\"config.json\") as f:\n config = json.load(f)\n \n conn = psycopg2.connect(dbname='cage_sc_db', user='cage_db_user', \n password='legend', host='10.66.193.71')\n cursor = conn.cursor()\n\n # cmd = \"SELECT value_raw, timestamp FROM numeric_data WHERE endpoint_name='krstc_baseline' AND timestamp>'2019-09-27T00:00';\"\n \n # cmd = \"SELECT * FROM endpoint_id_map;\"\n \n # cmd = \"SELECT value_cal, timestamp FROM numeric_data WHERE endpoint_name='cage_coldPlate_temp' AND timestamp>'2019-09-03T00:02';\"\n \n # cmd = \"SELECT value_cal, timestamp FROM numeric_data WHERE endpoint_name='cage_pressure' AND timestamp>'2019-09-27T00:00';\"\n \n cmd = \"SELECT value_cal, timestamp FROM numeric_data WHERE endpoint_name='cage_ln_level' AND timestamp>'2019-09-27T00:00';\"\n \n # cmd = \"SELECT value_raw, timestamp FROM string_data WHERE endpoint_name='krstc_hv_status' AND timestamp>'2019-08-01';\"\n \n cursor.execute(cmd)\n\n # retrieve data. returns a list of tuples.\n record = cursor.fetchall()\n \n # print(type(record[0]))\n \n # dt = record[0][1]\n \n # print(dt)\n \n for rec in record:\n print(rec)", "def get_records(self):\n url = f\"{self.baseurl}\" + \"?limit=\" + RECORDS_PER_PAGE\n\n while True:\n log.debug(\"Retrieving from OpenCity URL %s\", url)\n response = urlopen(url).read()\n json_content = json.loads(response)\n\n url = json_content[\"next\"]\n\n objects = json_content[\"items\"]\n for res in objects:\n lid = res[\"id\"]\n ltitle = res[\"title\"]\n log.info(f'Found id:{lid} \"{ltitle}\"')\n yield res\n\n if url is None:\n break", "def fetch_all_url_records():\r\n db_cursor = db_collection.find({'deleted': False})\r\n result = list(db_cursor)\r\n db_cursor.close()\r\n\r\n return result", "async def fetchall(self):\n\n self._check_executed()\n\n if self._query_id == -1:\n msg = \"query didn't result in a resultset\"\n self._exception_handler(ProgrammingError, msg)\n\n result = self._rows[self.rownumber - self._offset :]\n self.rownumber = len(self._rows) + self._offset\n\n # slide the window over the resultset\n while await self.nextset():\n result += self._rows\n self.rownumber = len(self._rows) + self._offset\n print(\n \"\\nresult: \",\n self._rows,\n \"\\n\\n\",\n )\n return result", "def _fetch_data_lines(self) -> list:\n with open(self.full_path, \"rb\") as file:\n all_lines = file.readlines()\n return self._screen_invalid_lines(all_lines)", "def getdata_from_db(number):\n print(\"Connecting to database\")\n query = sqlite3.connect(DB_PATH).cursor()\n query.execute(\n \"SELECT text FROM data ORDER BY id DESC LIMIT \" + str(number) + \"; \")\n tweets = [line[0] for line in query.fetchall()]\n print(\"Tweets from databases: %d tweets\" % (len(tweets)))\n return tweets", "async def get_all(self, collection):\n db_records = self.database[collection].find()\n output_records = []\n async for record in db_records:\n output_records.append(QuestionInDB(**record, id=record[\"_id\"]))\n return output_records", "def get_data(self, tablename):\n conn = self.get_conn()\n c = conn.cursor()\n status_sql = self.get_status_sql(tablename)\n c.execute(status_sql)\n results = c.fetchall()\n data = []\n for row in results:\n data.append(dict_from_row(row))\n conn.commit()\n conn.close()\n return data", "def list_people():\n conn = get_db()\n try:\n cur = conn.cursor()\n try:\n # Note: don't use prefixes like \"oktatas.\" above for tables\n # within your own schema, as it ruins portability.\n # This table has 10k rows, so we intentionally limit the result set to 50\n # (Oracle note: not the first 50 rows by name, but rather\n # the first 50 rows of the table, which are then ordered by name).\n # Also, long queries can be broken into two shorter lines like this\n cur.execute('''SELECT szemelyi_szam, nev FROM oktatas.szemelyek\n WHERE ROWNUM < 50 ORDER BY nev ASC''')\n # there's a better way, but outside the scope of this lab:\n # http://docs.python.org/2/tutorial/datastructures.html#list-comprehensions\n results = []\n # we make use of the fact that\n # - cursors are iterable and\n # - `for` can unpack objects returned by each iteration\n for szemelyi_szam, nev in cur:\n results.append({'szemelyi_szam': szemelyi_szam, 'nev': nev})\n return jsonify(szemelyek=results)\n finally:\n cur.close()\n finally:\n # this is also a naive implementation, a more Pythonic solution:\n # http://docs.python.org/2/library/contextlib.html#contextlib.closing\n conn.close()", "def list_people():\n conn = get_db()\n try:\n cur = conn.cursor()\n try:\n # Note: don't use prefixes like \"oktatas.\" above for tables\n # within your own schema, as it ruins portability.\n # This table has 10k rows, so we intentionally limit the result set to 50\n # (Oracle note: not the first 50 rows by name, but rather\n # the first 50 rows of the table, which are then ordered by name).\n # Also, long queries can be broken into two shorter lines like this\n cur.execute('''SELECT szemelyi_szam, nev FROM oktatas.szemelyek\n WHERE ROWNUM < 50 ORDER BY nev ASC''')\n # there's a better way, but outside the scope of this lab:\n # http://docs.python.org/2/tutorial/datastructures.html#list-comprehensions\n results = []\n # we make use of the fact that\n # - cursors are iterable and\n # - `for` can unpack objects returned by each iteration\n for szemelyi_szam, nev in cur:\n results.append({'szemelyi_szam': szemelyi_szam, 'nev': nev})\n return jsonify(szemelyek=results)\n finally:\n cur.close()\n finally:\n # this is also a naive implementation, a more Pythonic solution:\n # http://docs.python.org/2/library/contextlib.html#contextlib.closing\n conn.close()", "def load_data(client):\n codes = [\"DUB\", \"LHR\", \"ETC\", \"XXX\"]\n q = generateMultiInsertQuery(codes, \"Airport\")\n #print(json.dumps(q.json(), indent=4))\n q.execute(client)", "def read_from_db():\n\t# prepare the query for reading from DB\n\tquery = \"SELECT * FROM tasks\"\n\n\t# connection to database\n\tconnection = pymysql.connect(user=\"root\", password=\"sysadmin\", host=\"localhost\", database=\"todolist\")\n\n\t# get a cursor\n\tcursor = connection.cursor()\n\n\t# execute query\n\tcursor.execute(query)\n\n\t# fetch result from query\n\tresults = cursor.fetchall()\n\n\t# close cursor and connection\n\tcursor.close()\n\tconnection.close()\n\n\ttask_list = list()\n\tfor result in results:\n\t\ttmp = {'id': result[0], 'description': result[1], 'urgent': result[2]}\n\t\ttask_list.append(tmp)\n\n\treturn task_list", "def executeAll(lines):", "async def list(request):\n dict_answer = {'models': [item[1]+' '+item[0]+str(item[2:]) for item in models_db],\n 'datasets': [conv_time(d.stat().st_atime)+' '+str(d.name) for d in Path('data/datasets/').glob('*')],\n }\n return web.json_response(dict_answer)", "def read_all():\n # Create the list of photos from our data\n photos = Photo.query.order_by(Photo.sample_id).all()\n\n # Serialize the data for the response\n photo_schema = PhotoSchema(many=True)\n data = photo_schema.dump(photos)\n return data", "def to_dict_query(self) -> list:\n return [row.to_dict() for row in self.all()]", "def get_articles():\n _, articles = base_query(db_session)\n return jsonify([p.serialize for p in articles])", "def fetch_all_from_db(collec):\n db = client.get_database(\"tweetstorm\")\n collection = db.get_collection(collec)\n ret = list(collection.find())\n logger.info(str(len(ret)) + ' documents read from the db')\n return ret", "def get_json(self):\r\n self.get_recordrange()\r\n [\r\n self.json_data.setter(n, self.get_data(\"json\", x))\r\n for n, x in enumerate(tqdm(self.iterlist))\r\n ]", "async def get_objects(conn: Database, query):\n return await conn.fetch_all(query=query)", "def get_objects(columns, filter):\n\n # Database connection\n conn = sqlite3.connect(db_path)\n c = conn.cursor()\n \n # Creating queries\n column_query = \",\".join(col for col in columns)\n filter_query = ' and '.join(k + \"='\" + v +\"'\" for k,v in filter.items())\n \n # Querying database\n c.execute(\"SELECT \" + column_query + \" FROM objects where \" + filter_query)\n rows = c.fetchall()\n\n # Close connection\n conn.close()\n\n # Found objects to dict {cpt : {obj}}\n objects = {i:{} for i in range(len(rows))}\n i = 0\n if columns == [\"*\"]:\n columns = COLUMNS\n for row in rows:\n obj = {col:\"\" for col in columns}\n for j in range(len(row)):\n obj[columns[j]] = row[j]\n objects[i] = obj\n i += 1\n return json.dumps(objects)", "def read_all():\n # Create the list of users from our data\n users = User.query.order_by(User.first_name).all()\n\n # Serialize the data for the response\n user_schema = UserSchema(many=True)\n data = user_schema.dump(users)\n return data", "def read_all():\r\n categories = Category.query.all()\r\n # Serialize the data for the response\r\n category_schema = CategorySchema(many=True)\r\n print('***********************************************************')\r\n return category_schema.dump(categories)", "def read_database(self):\n # open the database\n f = open('KISS_LINES','r')\n # make a list which will contain lines\n tlc = []\n for row in f:\n tlc.append(f.readline())\n f.close()\n\n return tlc", "def run():\n pgconn = util.get_dbconn()\n cursor = pgconn.cursor()\n cursor.execute(\n \"\"\"\n SELECT feedtype from ldm_feedtypes ORDER by feedtype\n \"\"\"\n )\n res = dict(feedtypes=[])\n for row in cursor:\n res[\"feedtypes\"].append(row[0])\n\n return json.dumps(res)", "def get_all_by_name():\n name = request.args['name']\n return jsonify(service.get_all_data_by_name(name))", "def get_all_2(conn) -> str:\n with conn.cursor() as cursor:\n cursor.execute(\"\"\"select products.id, \n products.name, \n products.price, \n products.image, \n products.category_id, \n product_categories.name\n from products\n inner join product_categories\n on products.category_id=product_categories.id\n where products.deleted=false order by id\"\"\")\n try:\n return cursor.fetchall()\n except TypeError:\n raise errors.StoreError", "def get_all(self):\n list = []\n line = self.get()\n while line:\n list.append(line)\n line = self.get()\n return list" ]
[ "0.70531267", "0.6787454", "0.6718839", "0.66217417", "0.62935805", "0.62533355", "0.6238542", "0.6206922", "0.611652", "0.608391", "0.6056544", "0.59862447", "0.5970311", "0.59272057", "0.5872251", "0.58481425", "0.58452594", "0.5820792", "0.58157414", "0.5807491", "0.57849103", "0.57791793", "0.5766813", "0.57588965", "0.5752929", "0.57316625", "0.57191473", "0.57053214", "0.56994545", "0.5690506", "0.56892693", "0.5685616", "0.56476265", "0.56476265", "0.56322294", "0.56253713", "0.56248546", "0.562305", "0.56079865", "0.5599622", "0.5588472", "0.55783", "0.5549964", "0.5546569", "0.55464464", "0.5544894", "0.55329144", "0.5522229", "0.5517468", "0.548601", "0.5471227", "0.54596287", "0.54501605", "0.543316", "0.5432801", "0.5424721", "0.5416708", "0.5413311", "0.54132307", "0.5410298", "0.54086167", "0.53953123", "0.5389443", "0.5388174", "0.53872347", "0.53804696", "0.5376172", "0.5376079", "0.53751403", "0.5370704", "0.5370086", "0.53608215", "0.53545415", "0.5354013", "0.5349014", "0.5347779", "0.53461844", "0.5345491", "0.5342513", "0.5340518", "0.5338188", "0.5338188", "0.53296304", "0.53289264", "0.53264093", "0.5323004", "0.5319935", "0.5303456", "0.52990806", "0.52962875", "0.52906805", "0.52894247", "0.5288272", "0.52864605", "0.52855104", "0.5283056", "0.52823263", "0.5275912", "0.52636737", "0.52621907" ]
0.6937443
1
queries the database to insert a line from a character takes a name , character and episode returns a confirmation message
def new_line(script_l, character, episode): if up.check("characters", character): char_id = up.giveId("characters", character) else: up.insertCharacter(character) char_id = up.giveId("characters", character) if up.check("episodes", episode): ep_id = up.giveId("episodes", episode) else: up.insertEpisode(episode) ep_id = up.giveId("episodes", episode) if up.check("script", script_l) and up.check("characters", character) and up.check("episodes", episode): return "line exists" else: engine.execute(f""" INSERT INTO script (script_l, characters_char_id, episodes_ep_id) VALUES ("{script_l}", "{char_id}", "{ep_id}"); """) return f"successfully loaded: {character},{script_l},{episode}"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insertCharacter(string):\n if check(\"character\", string):\n return \"character exists\"\n else:\n engine.execute(f\"INSERT INTO characters (name) VALUES ('{string}');\")", "def insertLine(row):\n if check(\"script\", row[\"dialogue\"]) and check(\"characters\", row[\"character\"]) and check(\"episodes\", row[\"episode\"]):\n return \"line exists\"\n else:\n if check(\"characters\", row[\"character\"]):\n char_id = giveId(\"characters\", row[\"character\"])\n else:\n insertCharacter(row[\"character\"])\n char_id = giveId(\"characters\", row[\"character\"])\n \n if check(\"episodes\", row[\"episode\"]):\n ep_id = giveId(\"episodes\", row[\"episode\"])\n else:\n insertEpisode(row[\"episode\"])\n ep_id = giveId(\"episodes\", row[\"episode\"])\n #meme optional insert somehow\n #meme_id = 0\n engine.execute(f\"\"\"\n INSERT INTO script (line_n, script_l, characters_char_id, episodes_ep_id) VALUES\n (\"{row['line']}\", \"{row['dialogue']}\", \"{char_id}\", \"{ep_id}\");\n \"\"\")", "def insertEpisode(ep):\n if check(\"episodes\", ep):\n return \"episode exists\"\n else:\n engine.execute(f\"INSERT INTO episodes (episode) VALUES ('{ep}');\")", "def insert_row(conn, episode_info):\n\tp_key = get_p_key(episode_info)\n\t\n\tinsert_statement = f'INSERT INTO shows (p_key, show_stub, show_name, season, episode_number, episode_title watched_status, hidden_status) VALUES (\\\"{p_key}\\\", \\\"{episode_info[\"show_stub\"]}\\\", \\\"{episode_info[\"show_name\"]}\\\", {episode_info[\"season\"]}, {episode_info[\"episode_number\"]}, {episode_info[\"episode_title\"]}, {episode_info[\"watched_status\"]}, {episode_info[\"hidden_status\"]});'\n\t\n\texecute_sql(conn, insert_statement)", "def do_insert(self, pokemon):\n while input(f\"Insert a new record for \\\"{pokemon}\\\" (y?): \") == \"y\":\n self._table.put_item(Item={\n \"Pokemon\": pokemon, # TODO Validate against a list\n \"Index\": _index(),\n \"nickname\": input(\"nickname: \"),\n \"ability\": input(\"ability: \"), # TODO Validate against a list\n \"nature\": input(\"nature: \"), # TODO Validate against a list\n \"ivs\": _statline(\"ivs\", 31),\n \"evs\": _statline(\"evs\", 255),\n # TODO does order of moves matter?\n \"moves\": _moves(),\n \"egg_moves\": input(\"egg moves (,-delim): \").split(\",\"),\n })", "def create_episode(conn, episode):\n sql = '''INSERT INTO episode(date, id_show, id_corpus, partition, path)\n VALUES(?,?,?,?,?)'''\n cur = conn.cursor()\n cur.execute(sql, episode)\n return cur.lastrowid", "def insertClip(dbConnection, audiourl, podcastName, description, parsedDate, title):\n try:\n cursor = dbConnection.cursor()\n title = title.replace(\"'\", \"''\")\n cursor.execute(\"INSERT INTO transcriptions(audiourl, realtimefactor, podcastname, transcription, description, date, title, pending, datetranscribed) VALUES('\" + audiourl + \"', NULL, '\" + podcastName + \"', NULL, '\" + description + \"', '\" + parsedDate + \"', '\" + title + \"', FALSE, NULL);\")\n dbConnection.commit()\n cursor.close()\n return True\n except:\n return False\n return False", "def registerPlayer(name):\n # cn=name\n # title='playerName'\n # data=[title,cn]\n DB = connect()\n c = DB.cursor()\n #cur.execute(\"INSERT INTO test (num, data) VALUES (%s, %s)\",*/\n #c.execute(\"INSERT INTO tournament (playerName) values ('al pachino2') \")\n #c.execute(\"INSERT INTO tournament name values (%s)\", name)\n #cur.execute('INSERT INTO %s (day, elapsed_time, net_time, length, average_speed, geometry) VALUES (%s, %s, %s, %s, %s, %s)', (escaped_name, day, ))\n c.execute(\"INSERT INTO tournament VALUES (%s)\", (name,))\n DB.commit()\n DB.close()", "def registerPlayer(name):\n DB = connect()\n c = DB.cursor()\n #inserts a new player into the players table, bleach cleans the input to avoid attack \n c.execute(\"INSERT INTO players (player) VALUES (%s)\", (bleach.clean(name), ))\n DB.commit()\n DB.close()", "def insert_statement() -> str:\n pass", "def registerPlayer(name):\n if \"'\" in name:\n ap_index = name.index(\"'\")\n name = name[0:ap_index] + \"''\" + name[ap_index+1:]\n \n cursor.execute(\"\"\"insert into players (name) values ('%s')\"\"\" % name)\n gc.commit()", "def insert(self, name, email, phone, address, state, zip, country, amount, message):\n params = {'name':name, 'email':email, 'phone':phone,'address':address,'state':state,\\\n 'zip':zip,'country':country,'amount':amount,'message':message}\n connection = sqlite3.connect(DB_FILE)\n cursor = connection.cursor()\n cursor.execute(\"insert into foodbank (name, email, phone, address, state, zip, country, amount, message)\\\n VALUES (:name, :email, :phone, :address, :state, :zip, :country, :amount, :message)\", params)\n\n connection.commit()\n cursor.close()\n return True", "def insert(sql, clue):\n\t# clue is [game, airdate, round, category, value, clue, answer]\n\t# note that at this point, clue[4] is False if round is 3\n\tif \"\\\\\\'\" in clue[6]:\n\t\tclue[6] = clue[6].replace(\"\\\\\\'\", \"'\")\n\tif \"\\\\\\\"\" in clue[6]:\n\t\tclue[6] = clue[6].replace(\"\\\\\\\"\", \"\\\"\")\n\tif not sql:\n\t\tprint clue\n\t\treturn\n\tsql.execute(\"INSERT OR IGNORE INTO airdates VALUES(?, ?);\", (clue[0], clue[1], ))\n\tsql.execute(\"INSERT OR IGNORE INTO categories(category) VALUES(?);\", (clue[3], ))\n\tcategory_id = sql.execute(\"SELECT id FROM categories WHERE category = ?;\", (clue[3], )).fetchone()[0]\n\tclue_id = sql.execute(\"INSERT INTO documents(clue, answer) VALUES(?, ?);\", (clue[5], clue[6], )).lastrowid\n\tsql.execute(\"INSERT INTO clues(game, round, value) VALUES(?, ?, ?);\", (clue[0], clue[2], clue[4], ))\n\tsql.execute(\"INSERT INTO classifications VALUES(?, ?)\", (clue_id, category_id, ))", "def insert_to_database(self, db):\n \n self.remove_bad_characters()\n print(\"Inserting \"+self.categorie_name+\" to database.\")\n db.query(\"INSERT INTO categorie (categorie_name) VALUES (:categorie_name)\", \\\n categorie_name=self.categorie_name)", "def register_player(name):\n\n \"\"\" use bleach to clean the name of the registered user \"\"\"\n clean_name = bleach.clean(name, strip=True)\n DB = connect()\n c = DB.cursor()\n c.execute(\"INSERT INTO players (player_name) VALUES (%s)\", (clean_name,))\n DB.commit()\n DB.close()", "def accept(self):\n if not self.shortCheck.isChecked():\n self.parent.shortadj = False\n self.parent.contpage = False\n self.close()\n return\n basecommand = self.parent.sqlcommand\n self.parent.sqldict[\"name\"] = \"\\'\" + self.enstr + \"\\'\"\n self.parent.sqldict[\"runame\"] = \"\\'\" + self.rustr + \"\\'\"\n self.parent.sqldict[\"gender\"] = \"\\'masculine\\'\"\n self.parent.sqldict[\"declension\"] = \"\\'\" + self.rumascEdit.text() + \"\\'\"\n self.parent.sqldict[\"wordcase\"] = \"\\'nominative\\'\"\n self.parent.sqldict[\"animate\"] = \"\\'inanimate\\'\"\n cols = \"(\"\n data = \"(\"\n for y in self.parent.sqldict:\n cols += y + \", \"\n data += self.parent.sqldict[y] + \", \"\n cols = cols[:-2] + \") VALUES\"\n data = data[:-2] + \");\\n\"\n self.sqlcommand += basecommand + cols + data\n self.parent.sqldict[\"gender\"] = \"\\'feminine\\'\"\n self.parent.sqldict[\"declension\"] = \"\\'\" + self.rufemEdit.text() + \"\\'\"\n cols = \"(\"\n data = \"(\"\n for y in self.parent.sqldict:\n cols += y + \", \"\n data += self.parent.sqldict[y] + \", \"\n cols = cols[:-2] + \") VALUES\"\n data = data[:-2] + \");\\n\"\n self.sqlcommand += basecommand + cols + data\n self.parent.sqldict[\"gender\"] = \"\\'nueter\\'\"\n self.parent.sqldict[\"declension\"] = \"\\'\" + self.runuetEdit.text() + \"\\'\"\n cols = \"(\"\n data = \"(\"\n for y in self.parent.sqldict:\n cols += y + \", \"\n data += self.parent.sqldict[y] + \", \"\n cols = cols[:-2] + \") VALUES\"\n data = data[:-2] + \");\\n\"\n self.sqlcommand += basecommand + cols + data\n self.parent.sqldict[\"gender\"] = \"\\'plural\\'\"\n self.parent.sqldict[\"declension\"] = \"\\'\" + self.ruplurEdit.text() + \"\\'\"\n cols = \"(\"\n data = \"(\"\n for y in self.parent.sqldict:\n cols += y + \", \"\n data += self.parent.sqldict[y] + \", \"\n cols = cols[:-2] + \") VALUES\"\n data = data[:-2] + \");\\n\"\n self.sqlcommand += basecommand + cols + data\n self.parent.shortadjcommand = self.sqlcommand\n self.parent.contpage = False\n self.close()", "def insert_customer(db, values):\r\n command = \"INSERT INTO waiting (name, username, ru_id, os_platform, description)\"\r\n command = command + \" VALUES (\"\r\n for i in range(len(values)):\r\n command = command + \"?\"\r\n if i == (len(values) - 1):\r\n command = command + \");\"\r\n else:\r\n command = command + \", \"\r\n try:\r\n c = db.cursor()\r\n c.execute(command, values)\r\n c.execute(\"SELECT * FROM waiting ORDER BY cus_num DESC LIMIT 1\")\r\n db.commit()\r\n customer = c.fetchall()\r\n print \"Your number is \" + str(customer[0][0])\r\n except Error as e:\r\n print(e)", "async def character(self, ctx, character=None):\n\n if character.lower() in [c.lower() for c in self.characters]:\n return await ctx.send(f\"`ERROR: Duplicate Character` {character} is already added.\")\n\n created_char = eqdkp.create_character(character.capitalize())\n if created_char:\n self.characters.append(created_char)\n await ctx.send(f\"{created_char.name} was created!\")\n else:\n await ctx.send(f\"Failed to create {character}. Please try again later, or create them manually.\")", "def registerPlayer(name):\n regP = c.execute(\"INSERT INTO players (name) VALUES(?)\", (name,)); # remember to make it a tuple\n print \"Successfully added player %s\" % name\n return regP", "def insert(self, name, email, message):\n params = {'name':name, 'email':email, 'date':date.today(), 'message':message}\n connection = sqlite3.connect(DB_FILE)\n cursor = connection.cursor()\n cursor.execute(\"insert into guestbook (name, email, signed_on, message) VALUES (:name, :email, :date, :message)\", params)\n\n connection.commit()\n cursor.close()\n return True", "def insertByHand(self):\n\n fieldValues = []\n for field in self.fieldNames:\n fieldValues.append(raw_input(\"Give \" + field + \": \"))\n\n print(self.tableName + \".insert(\" + str(fieldValues) + \")\")\n\n self.insert(fieldValues)", "def newEquipment(recipe):\r\n db = db_helpers.getDbCon()\r\n cursor = db.cursor()\r\n equipmentInsertQuery = \"\"\"INSERT into equipment (equipment_id, equipment_name) \r\n VALUES (%s, %s) ON Duplicate KEY UPDATE equipment_id = equipment_id;\"\"\"\r\n try:\r\n for instr in recipe.instructions:\r\n for equip in instr.equipment:\r\n cursor.execute(equipmentInsertQuery, (equip.equipment_id, equip.equipment_name))\r\n db.commit()\r\n except Exception:\r\n print(\"Error: OOPs something went wrong while adding new equipment to the database\")\r\n finally:\r\n cursor.close()\r\n db.close()", "def registerPlayer(name):\n db, cursor = connect()\n cursor.execute(\"INSERT INTO players (name, wins, matches) VALUES (%s, 0, 0)\" , (name, ) ) \n db.commit() \n db.close()", "def process(self, row):\n #print (row)\n \n key = conf[\"equipment_id\"]\n \n result = \"test start\" \n result = \"test end\" \n \n data = 1 ## 1\n \n self.db.save(key, result, data)", "def insert(self, sql):\n try:\n # Execute the SQL command\n self.cursor.execute(sql)\n # Commit your changes in the database\n self.db.commit()\n except:\n # Rollback in case there is any error\n self.db.rollback()", "def db_insert(name, task, time, note):\n Entry.create(name=name,\n task=task,\n time=time,\n note=note)\n return main()", "def create_speaker(conn, speaker):\n\n sql = ''' INSERT INTO speaker(name,gender,native)\n VALUES(?,?,?) '''\n cur = conn.cursor()\n cur.execute(sql, speaker)\n return cur.lastrowid", "def test_adds_seeming_notes(campaign):\n\n npc.commands.create_character.changeling('changeling mann', 'Beast', 'Hunterheart')\n character = campaign.get_character('changeling mann.nwod')\n assert ' Seeming Beast (8-again animal ken and free specialty; glamour adds to presence and composure; -4 untrained mental; no 10-again on Int)' in character.read()", "def insert_item(self, text_path, word_first, word_second, word_third, word_fourth, word_fifth):\n conn, cursor = SQLDatabase.connect()\n self.change_database(cursor)\n success = False\n try:\n cursor.execute(\"INSERT INTO {table} (Text_Path, Word_First, Word_Second, Word_Third,\\\n Word_Fourth, Word_Fifth) VALUES ('{path}','{first}', '{second}',\\\n '{third}', '{fourth}', '{fifth}')\\\n \".format(table=self.table_name, path=text_path, first=word_first,\n second=word_second,\n third=word_third, fourth=word_fourth, fifth=word_fifth))\n conn.commit()\n success = True\n except mysql.connector.errors.ProgrammingError as err:\n print(\"{} can't insert item\".format(err))\n finally:\n SQLDatabase.close(cursor, conn)\n\n return success", "def insert_question(self, id):\n cursor = self.conn.cursor()\n cursor.execute(f\"insert into {self.site} values (?)\", (id, ))\n self.conn.commit()\n cursor.close()", "def registerPlayer(name):\n db_conn = connect()\n db_cursor = db_conn.cursor()\n player_insert_stmt = \"insert into players(player_name) values (%s)\"\n db_cursor.execute(player_insert_stmt, (name,))\n db_conn.commit()\n db_conn.close()", "def save_in_db(self):\n self.sql_database.table_name = self.table_db\n self.sql_database.db_name = self.db\n if self.sql_database.insert_item(text_path=self.path, word_first=self.word_1.get(),\n word_second=self.word_2.get(),\n word_third=self.word_3.get(), word_fourth=self.word_4.get(),\n word_fifth=self.word_5.get()):\n msg.showinfo(message=\"Done\")", "def InsertUser(inputuser, inputtoken):\n lastuserid = findLastUserID()\n insertstm = f\"insert into lineuser\"\\\n f\"(userid, username, usertoken) \"\\\n f\"values('{lastuserid+1}',\"\\\n f\"'{inputuser}',\"\\\n f\"'{inputtoken}')\"\n conn = engine.connect()\n if CheckTblNameExist(\"lineuser\"):\n try:\n conn.execute(insertstm)\n conn.close()\n return f\"Execute Success: Insert - {inputuser}\" \n except:\n conn.close()\n return \"Execute Errors\"\n else:\n conn.close()\n return \"Table Not Exist\"", "def post_med(self):\n\treturn \"INSERT INTO medic(lib, description) VALUES(%s, %s) RETURNING id\"", "def add_cheer(ask_id):\n\tconn = get_db()\n\tmessage = request.form.get('message')\n\n\twith conn.cursor() as cursor :\n\t\tsql = \"INSERT INTO `cheer` (`ask_id`, `message`, `ip_address`) VALUES (%s, %s, %s)\"\n\t\tr = cursor.execute(sql, (ask_id, message, request.remote_addr))\n\n\tconn.commit()\n\n\tredirect_url = request.form.get('back', '/#c' + str(ask_id))\n\treturn redirect(redirect_url)", "def insertarhab(fila):\n try:\n conexion.cur.execute('insert into habitacion(numero,tipo,prezo,libre) values(?,?,?,?)', fila)\n conexion.conex.commit()\n except sqlite3.OperationalError as e:\n print(e)\n conexion.conex.rollback()", "def insertarhab(fila):\n try:\n conexion.cur.execute('insert into habitacion(numero,tipo,prezo,libre) values(?,?,?,?)', fila)\n conexion.conex.commit()\n except sqlite3.OperationalError as e:\n print(e)\n conexion.conex.rollback()", "def registerPlayer(name):\n sql = \"INSERT INTO players (name) VALUES (%s)\"\n data = [name]\n executeNonQuery(sql, data)", "def insert_to_reports(connection,name_,category,report):\r\n with connection:\r\n connection.execute(\"INSERT INTO reports_table (Name , category ,report) VALUES (?,?,?)\",\r\n (name_,category,report))", "def addRow(conn, title, url, text):\n with conn.cursor() as cursor:\n sql = \"INSERT INTO comments(course_name, course_url, course_comment) VALUES (%s, %s, %s)\"\n cursor.execute(sql,(title, url, text))\n conn.commit()", "def test_insert(self):\n self.assertEqual(['INSERT', 'INTO', 'test', '(a,b) VALUES (1,2)'],\n grammar._INSERT_EXPR.parseString(\"INSERT INTO test (a,b) VALUES (1,2);\").asList())", "def insertSQL(data):\n \"\"\"Has abbrv1, full name, abbrv2, full name, date \"\"\"\n# print(data)\n gameInfo = data[0] \n date = gameInfo[4][0] + '_' + gameInfo[4][1] + '_' + gameInfo[4][2]\n tableName = gameInfo[0] + '_' + gameInfo[2] + '_' + date\n \n gameTemp = executeReturn(\"SELECT * FROM allgames2017\")\n tableNames = []\n for i in gameTemp:\n tableNames.append(i[1])\n if(len(gameTemp) != 0):\n lastRow = int(gameTemp[len(gameTemp) - 1][0])\n lastRow += 1\n else:\n lastRow = 0\n \n if(tableName in tableNames):\n return\n \n execute(\"INSERT INTO allgames2017 VALUES (? , ?)\", [lastRow, tableName])\n \n for i in data:\n if(gameInfo[0] in i):\n i[4] = date\n \n# for i in data:\n# print(i)\n \n longestRow = 0 \n for i in data:\n if(len(i) > longestRow):\n longestRow = len(i) \n \n ''' Make text CC1 TEXT, CC2 TEXT, .... '''\n columnText = \"\"\n questionText = \"\"\n \n for i in range(0,longestRow):\n columnText += 'CC' + str(i) + ' TEXT,'\n questionText += '?,'\n columnText = columnText[0:len(columnText)-1]\n questionText = questionText[0:len(questionText)-1]\n \n# print(columnText)\n# for i in data:\n# print(i)\n \n execute('DROP TABLE IF EXISTS ' + tableName, None)\n execute('CREATE TABLE ' + tableName + '(' + columnText + ')', None)\n \n '''Feed data into sqlite'''\n for i in range(0, len(data)):\n tempArray = []\n \n for j in range(0,longestRow):\n if(j < len(data[i])):\n tempArray.append(data[i][j])\n else:\n tempArray.append(\"\")\n# print(tempArray)\n execute('INSERT INTO ' + tableName + ' VALUES (' + questionText + ')', tempArray)", "def registerPlayer(name):\n print \"\\n\"\n print \"\\t\\t\\tRegistering....\\t\", name\n cur2 = conn.cursor()\n\n # Since ID column in players is auto-increment. Only 'Name' is specified.\n SQL = \"INSERT INTO players(NAME) VALUES ( %s );\"\n data = (name, )\n cur2.execute(SQL, data) # Note: no % operator\n cur2.execute(\"commit;\")\n cur2.execute(\"\\t\\t\\tSELECT * from players;\")\n\n print \"\\t\\t\\tRegistered!!\\n\"", "def registerPlayer(name):\n dbConn = connect()\n c = dbConn.cursor()\n c.execute(\"INSERT INTO player (name) VALUES (%s)\", (name,))\n dbConn.commit()\n dbConn.close()", "async def funnypts_transaction(message, client, extra_args, operation):\n\n awarder = message.author.id\n\n # input screening\n if len(extra_args) < 2:\n await message.channel.send(f\"PLEASE USE THIS: `funnypts {operation} user_mention reason`\")\n return False\n\n if not (awardee := utils.from_mention(extra_args[0])):\n await message.channel.send(\"PLEASE MENTION SOMEONE. WHAT ARE THEY GONNA DO, CRY?\")\n return False\n\n reason_length = funny_controls[\"reason_length\"]\n if len(reason := \" \".join(extra_args[1:])) > reason_length:\n await message.channel.send(F\"APOLOGIES, I ONLY STORE DESCRIPTIONS OF UP TO {reason_length} CHARACTERS. WELCOME TO TWITTER\")\n return False\n\n if client.get_user(awarder).bot or client.get_user(awardee).bot:\n return False\n\n if awarder == awardee:\n await message.channel.send(\"WHAT ARE YOU, AN EGOMANIAC?\")\n return False\n\n # writing\n if operation == \"add\":\n transaction = 1\n elif operation == \"remove\":\n transaction = -1\n\n @database.query\n def write_entry(conn):\n conn.execute(\"INSERT INTO funnypts VALUES(?, ?, ?, ?, ?)\",\n (awarder, awardee, reason, transaction, datetime.now()))\n conn.commit()\n conn.close()\n\n write_entry()\n return True", "def add(self, character_id, conveyances):\n sql = \"INSERT INTO people_{0} ('people', '{0}') VALUES (?, ?)\".format(\n self.conveyance_type\n )\n\n for conveyance in set(conveyances):\n try:\n if not self._is_conveyance_id_valid(conveyance):\n raise Exception(\n \"The specified %s does not exist: ID: %s\"\n % (self.conveyance_type, conveyance)\n )\n self.cursor.execute(sql, (str(character_id), conveyance))\n except Exception as e:\n raise Exception(\n \"An error occurred while adding a character %s in the database: query: %s - message: %s\"\n % (self.conveyance_type, sql, e)\n )\n\n database_commit(self.connector)", "def registerPlayer(name):\n conn, c = connect()\n c.execute(\"INSERT INTO players (name) VALUES (%s);\", (name,))\n conn.commit()\n conn.close()", "def registerPlayer(name):\n conn = connect()\n cursor = conn.cursor()\n cursor.execute(\"INSERT INTO players (p_name) VALUES (%s)\", (name,))\n conn.commit()\n conn.close()", "def registerPlayer(name):\n db = connect()\n db_cursor = db.cursor()\n query = \"INSERT INTO players(name) VALUES(%s)\"\n db_cursor.execute(\"INSERT INTO players(name) VALUES(%s)\", (name,))\n db.commit()\n db.close()", "def insert_champion_info(champion_id, key, name, title):\n conn = get_connect()\n cursor = conn.execute(\"SELECT * FROM championInfo where championId = ?\", [champion_id])\n result_list = cursor.fetchall()\n if len(result_list) == 0:\n conn.execute(\"INSERT INTO championInfo \\\n VALUES (?, ?, ?, ?)\", [champion_id, key, name, title])\n print(\"championInfo of \" + str(champion_id) + \" is inserted\")\n else:\n print(\"championInfo of \" + str(champion_id) + \" already exists!\")\n conn.commit()\n conn.close()\n return", "def registerPlayer(name):\n conn, cur = connect()\n query = \"INSERT INTO players (player_name) VALUES (%s);\"\n param = (name,)\n try:\n cur.execute(query, param)\n except:\n print(\"Error encountered when inserting player \" + name + \" into the database\")\n conn.commit()\n conn.close()", "def registerPlayer(name):\n # gets connection to tournament database in conn object\n conn = connect()\n # gets the cursor to execute queries\n c = conn.cursor()\n # executes insert query which takes the name variable passed in arguments\n # of this method and adds a new player record to PLAYER table where the\n # ID is generated automatically for new created record\n c.execute(\"INSERT INTO PLAYER VALUES (DEFAULT, %s)\", (name,))\n # commits the changes performed on PLAYER table\n # after insert statement executes\n conn.commit()\n # closes the connection to tournament database\n conn.close()", "def registerPlayer(name):\n\n if len(name) < 1:\n print \"Player not registered. Invalid name or no name given.\"\n else:\n query = \"INSERT INTO players (name) VALUES (%s)\"\n values = (name,)\n results = executeQuery({\n 'dbname': 'tournament', \n 'query' : query, \n 'type' : 'insert', \n 'values' : values\n })", "def add_entry():\n username = util.remove_commas_from_string(request.form[\"name\"])\n link = util.remove_commas_from_string(request.form[\"ytLink\"])\n song = util.remove_commas_from_string(request.form[\"songName\"])\n\n festive = CHRISTMAS_MODE and \"christmasSong\" in request.form\n\n with database.connect_to_database() as db:\n user_id = database.get_userid(db, username)\n database.add_song(db, link, song, user_id, month=12 if festive else None)\n\n return redirect(url_for('main'))", "def add_entry():\n clear()\n name = input(\"Enter Full Name: \")\n task = input(\"Enter a task name: \")\n while True:\n try:\n time = input(\"Enter the minutes (ints only) to complete task: \")\n int(time)\n except ValueError:\n input(\"Be sure you are entering an integer. \")\n else:\n break\n quest = input(\"Would you like to add a note [N/y]: \").upper()\n note = \"\"\n if quest == \"Y\":\n print(\"Enter your note below.\")\n note = input(\":\")\n return db_insert(name, task, time, note)", "async def monsave(self, ctx, *, entry):\r\n\r\n self.connect()\r\n discord_id = str(ctx.message.author.id)\r\n\r\n self.database.entries.insert_one({\r\n \"discord_id\": discord_id,\r\n \"entry\": entry\r\n })\r\n\r\n await ctx.send('You have successfully saved this entry in the Viking database.')", "def insert_champion_data(champion_data):\n conn = get_connect()\n conn.execute(\"DELETE FROM championData WHERE championId = \" + str(champion_data[0]))\n conn.execute(\"INSERT INTO championData \\\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\", champion_data)\n conn.commit()\n conn.close()\n print(\"champion_data \" + str(champion_data[0]) + \" is inserted\")\n return", "def add():\n name = request.form['name']\n message = request.form['message']\n\n try:\n newcurs = g.conn.execute(\"\"\"INSERT INTO record\n VALUES (%s, %s );\"\"\", name, message)\n newcurs.close()\n except Exception:\n print \"can not write record to database\"\n return redirect('/error')\n\n return render_template(\"index.html\", **locals())", "def add(self, entry):\n \"An entry is a tuple of (id, datatime, text).\"\n id = entry[0]\n datee = entry[1]\n text = re.sub('[^A-Za-z0-9]+', ' ', entry[2].lower())\n self.recordsDict[id].create(id, datee, entry[2])\n for word in text.split():\n self.wordDict[word].add(id)", "def addbutton_click(self):\n pais = self.ui.qlinepais.text()\n nome = self.ui.qlinenome.text()\n presidente = self.ui.qlinepres.text()\n email_contato = self.ui.qlineemail.text()\n endereco = self.ui.qlineend.text()\n if pais and nome and presidente:\n kwargs = {'pais': \"'\" + pais + \"'\",\n 'nome': \"'\" + nome + \"'\",\n 'presidente': \"'\" + presidente + \"'\"}\n if email_contato:\n kwargs['email_contato'] = \"'\" + email_contato + \"'\"\n if endereco:\n kwargs['endereco'] = \"'\" + endereco + \"'\"\n if(insert('comite', kwargs)):\n self.parent().hide()\n self.parent().parent().setWindowTitle(self.parent().parent().title)\n else:\n showdialog ('Erro', \"Erro na inserção, verifique se o comite já não está cadastrado\");\n \n else:\n showdialog('Erro', 'Os campos país, nome e presidente são obrigatórios')", "def check(what,string):\n if what == \"characters\":\n query = list(engine.execute(f\"SELECT name FROM characters WHERE name = '{string}'\"))\n if len(query) > 0:\n return True\n else:\n return False\n \n elif what == \"script\":\n query = list(engine.execute(f\"SELECT script_l FROM script WHERE script_l = '{string}'\"))\n if len(query) > 0:\n return True\n else:\n return False\n \n elif what == \"episodes\":\n query = list(engine.execute(f\"SELECT episode FROM episodes WHERE episode = '{string}'\"))\n if len(query) > 0:\n return True\n else:\n return False\n #extra meme..", "def giveId(what,string):\n if what == \"characters\":\n return list(engine.execute(f\"SELECT char_id FROM characters WHERE name ='{string}';\"))[0][0]\n elif what == \"episodes\":\n return list(engine.execute(f\"SELECT ep_id FROM episodes WHERE episode ='{string}';\"))[0][0]", "def insert_data(wname,uname,pword):\n try:\n cur.execute('INSERT INTO Password(website,username,pass) VALUES (?,?,?)',(wname,uname,pword))\n conn.commit()\n except Exception as e:\n print(e)", "def submitAdoption2db(cursor, dog_id, adoption_fee, adoption_date, application_num):\n query = f\"INSERT INTO Adoption VALUES ({application_num}, {dog_id}, {adoption_fee}, '{adoption_date}');\"\n cursor.execute(query)", "def insert_sentence(group_no, sentence, category):\n\n try:\n #insert the sentence\n database.Sentences.insert_one({'group_no': group_no, 'sentence': sentence, 'category': category})\n return {'status': 1, 'data': 1}\n except Exception as e:\n return {'status': -1, 'data': 'insert_sentence ' + str(e)}", "def registerPlayer(name):\n query = (\"INSERT INTO players(id, name) VALUES (default, %s);\")\n db = connect()\n c = db.cursor()\n c.execute(query, (name,))\n\n db.commit()\n db.close()", "def passenger_insert_query(\n pass_fname, pass_lname, pass_mi, pass_bday, pass_gender\n ):\n q = \"\"\"\n INSERT INTO\n passenger(pass_fname, pass_lname, pass_mi, pass_bday, pass_gender)\n VALUES (%s, %s, %s, %s, %s);\n\n SELECT currval(pg_get_serial_sequence('passenger','pass_id'));\n \"\"\"\n cursor = connection.cursor()\n cursor.execute(\n q,\n (pass_fname, pass_lname, pass_mi, pass_bday, pass_gender)\n )\n pass_id = cursor.fetchone()[0]\n cursor.close()\n return pass_id", "def insert(self, unhealthy_product, name, description, stores, url):\n self.database.query('''INSERT INTO History\n VALUES (NULL,\n NOW(),\n :unhealthy_product,\n :healthy_product,\n :description,\n :stores,\n :url)''',\n unhealthy_product=unhealthy_product,\n healthy_product=name,\n description=description,\n stores=stores,\n url=url)\n print(f'La substitution du produit \"{name}\" a été ajoutée à la table \\\nHistory !', file=open('print_log.txt', 'a'))", "def insert_post(text, sub, sub_id, num_com, up, down, flair, vid, num_awards):\n conn, curs = conn_curs()\n insert = f\"\"\"\n INSERT INTO posts (\n title_selftext, subreddit, subreddit_id,\n num_comments, upvotes, downvotes,\n flair, has_vid, num_awards)\n VALUES ('{text}', '{sub}', '{sub_id}', {num_com}, {up}, {down}, '{flair}', {vid}, {num_awards})\n \"\"\"\n curs.execute(insert)\n conn.commit()\n return", "def add_teacher_data(connection,name,tsc_no,subjects,type_of_teacher):\r\n with connection:\r\n connection.execute(INSERT_TEACHER,(name,tsc_no,subjects,type_of_teacher))", "def insert(title, author, year, isbn,shelf,raw):\n\n conn_obj = mysql.connector.connect(host='localhost',database='mydb',user='root',password='kks')\n cur_obj = conn_obj.cursor()\n sql=\"INSERT INTO book (title, author, year, isbn,shelf,raw) VALUES(%s, %s, %s, %s, %s, %s)\"\n cur_obj.execute(sql,(title, author, year, isbn,shelf,raw))\n conn_obj.commit()\n conn_obj.close()", "def insertarInforme(self, texto, sesion):\n try:\n cursor = self.__conexion.cursor()\n cursor.execute(\"INSERT INTO Informes VALUES(?, ?, ?)\", [\n int(time.time()), texto, sesion])\n self.__conexion.commit()\n cursor.close()\n except sqlite3.Error as error:\n print(\"Error al insertar: \", error)", "def d114():\n tmp = sp.call('clear', shell=True)\n print ()\n print (\"Insert Material (Query)\")\n print ()\n\n try:\n row = {}\n print(\"Enter Material details: \")\n\n while True:\n row[\"Name\"] = input(\"Material Name: \")\n if row[\"Name\"] != '':\n row[\"Name\"] = \"'\" + row[\"Name\"] + \"'\"\n break\n else:\n print(\"Enter a valid input\")\n\n while True:\n row[\"Quantity\"] = input(\"Quantity (integer): \")\n if row[\"Quantity\"] != '':\n try:\n row[\"Quantity\"] = int(row[\"Quantity\"])\n break\n except ValueError:\n print(\"Enter a valid input\")\n else:\n print(\"Enter a valid input\")\n\n while True:\n row[\"Cost\"] = input(\"Cost: \")\n if row[\"Cost\"] != '':\n try:\n row[\"Cost\"] = float(row[\"Cost\"])\n break\n except ValueError:\n print(\"Enter a valid input - float value\")\n\n print()\n query = \"INSERT INTO MATERIAL(Name, Quantity, Cost) VALUES ({}, {}, {})\".format(row[\"Name\"], row[\"Quantity\"], row[\"Cost\"])\n print(query)\n cur.execute(query)\n con.commit()\n print(\"Inserted Into Database\")\n tmp = input(\"Enter any key to CONTINUE>\")\n\n # insert to BELONGS_TO\n tmp = sp.call('clear', shell=True)\n print ()\n print (\"Connect this material to a Wing, Personnel and Vehicle\")\n print ()\n\n isCorrect = False\n while not isCorrect:\n pid = input(\"Enter personnel ID: \")\n cno = input(\"Enter chassis number: \")\n mdl = input(\"Enter model: \")\n mat = row[\"Name\"]\n wng = input(\"Enter wing name: \")\n isCorrect = h1(pid, cno, mdl, mat[1:-1], wng)\n\n print(\"Inserted Into Database\")\n\n except Exception as e:\n con.rollback()\n print(\"Failed to insert material into database\")\n print(\">>\", e)\n\n tmp = input(\"Enter any key to CONTINUE>\")", "def new_event(szene, time, bedingung=\"\", permanent=0):\n con = mdb.connect(constants.sql_.IP, constants.sql_.USER, constants.sql_.PASS,\n constants.sql_.DB)\n with con:\n cur = con.cursor()\n value_string = '\"%s\", \"%s\", \"%s\", \"%s\"' % (szene, str(time), str(bedingung),\n str(permanent))\n insertstatement = ('INSERT INTO %s (Szene, Time, Bedingung, permanent) VALUES(%s)' %\n (constants.sql_tables.cron.name, value_string))\n cur.execute(insertstatement)\n con.close", "def insertData(table, column, input):\n\ttry:\n\t\tcon = sqlite3.connect('PampDb.db')\n\t\tcur = con.cursor()\n\t\tcur.execute(\"INSERT INTO '\" + table + \"' (\" + column + \") VALUES ('\" + input + \"')\")\n\t\tcon.commit()\n\t\tcon.close()\n\texcept:\n\t\tprint('Could not run function insertData from DbController')", "def insertData(self):\n Walzenlage = self.ranRotorOrder() #Obtains a random rotor order\n Ringstellung = self.ranRingSetting() #Obtains s series or random ring settings\n Steckerverbindungen = self.ranPlugboard() #Obtains random plugboard pairs\n Kenngruppen = self.ranCharGroup() #Obtains 4 groups of characters for the Kenngruppen\n \n Datum = 31 #Begins the data entry at day 31\n while Datum != 0: \n self.c.execute(\"INSERT INTO Enigma(Datum, Walzenlage, Ringstellung, Steckerverbindungen, Kenngruppen) VALUES (?, ?, ?, ?, ?)\",\n (int(Datum), str(Walzenlage), str(Ringstellung), str(Steckerverbindungen), str(Kenngruppen))) #Inserts the entries generated above into the database\n Walzenlage = self.ranRotorOrder() #Generates new random rotor order\n Ringstellung = self.ranRingSetting() #Generates new random ring settings\n Steckerverbindungen = self.ranPlugboard() #Generates new random plugboard pairs\n Kenngruppen = self.ranCharGroup() #Generates 4 new groups of characters for the Kenngruppen\n Datum -= 1 #Decreases the day by one for the next record entry\n \n self.conn.commit() #Commits all the changes to the database", "def registerPlayer(name):\n DB = dbc()\n DB.cursor().execute(('INSERT INTO players (name)\\\n VALUES (%s)'), (name,))\n DB.commit()\n DB.close()", "def test_char(self):\n conn = self.database.connection()\n cursor = conn.cursor()\n dialect = self.database.dialect()\n dbapi = self.database.dbapi()\n query = dialect.translate('DROP TABLE test_char')\n try:\n cursor.execute(query)\n except dbapi.Error:\n conn.rollback()\n query = dialect.translate('CREATE TABLE test_char ' \\\n '( value CHAR(255) NOT NULL )')\n cursor.execute(query)\n data = []\n query = 'INSERT INTO test_char VALUES (%s)'\n for i in range(100):\n item = random_string(255)\n data.append(item)\n cursor.execute(query, (item,))\n query = 'SELECT * FROM test_char'\n cursor.execute(query)\n result = cursor.fetchall()\n for row in result:\n item = row[0]\n padded = item + ((255-len(item)) * ' ')\n assert isinstance(item, unicode)\n assert item in data or padded in data\n data.remove(item)\n query = dialect.translate('DELETE FROM test_char')\n cursor.execute(query)\n query = dialect.translate('DROP TABLE test_char')\n cursor.execute(query)\n conn.commit()", "def test_insert(self):\n db=Database(\"test.db\")\n db.query(\"insert into game (user_a, user_b, winner, board) values('a', 'b', 'sinner', 'asdf');\");\n self.assertEqual(len(db.query(\"select * from game\"))>=1, True)", "def add_to_database():\n db_conn.execute(\"INSERT INTO Fietsenstalling (Naam, Achternaam, Adress, FietsNr, PIN) VALUES \"\n \"(?, ?, ?, ?, ?);\",(Naam, Achternaam, Adress, FietsNr, PIN))\n\n db_conn.commit()", "def insert_row(table_str, attribute_value_dict): #works\n sql = make_insert_row(table_str, attribute_value_dict)\n #print sql\n execute_edit_queries(sql)", "def insert_into_reading():\n insert_stmt = \"('%s', '%s', '{}')\"\n k = 0 \n for i in range(100): \n stmt = \"INSERT INTO readings(asset_code, read_key, reading) VALUES\" \n for j in range(10): \n if j == 9: \n stmt = stmt + \" \" + insert_stmt % (str(k), uuid.uuid4()) + \";\" \n else: \n stmt = stmt + \" \" + insert_stmt % (str(k), uuid.uuid4()) + \", \"\n k =+ 1\n execute_command(stmt)", "def add():\r\n ch = input('You are about to ADD an entry. If NO, you may choose another option.\\n').lower()\r\n\r\n if y_n(ch):\r\n print('Enter info for the following fields...\\n')\r\n xln = re.sub(r'\\s', '', str(input('Last name?\\n')).lower().capitalize()) # lower, cap first, remove whitespace\r\n xfn = re.sub(r'\\s', '', str(input('First name?\\n')).lower().capitalize())\r\n\r\n if search2(xln, xfn): # search if an entry already exists for user's input\r\n print('An entry already exists for', xfn, xln, end='. Please enter another entry.\\n')\r\n return add() # if an entry already exists make user enter another\r\n\r\n xgr = None\r\n try: # try except user's inputted grade\r\n xgr = int(input('Grade?\\n'))\r\n xgrs = [8, 9, 10, 11, 12, 13]\r\n\r\n xgr = check_int(xgr, xgrs)\r\n except ValueError:\r\n print('You did not enter an applicable grade. Please enter another value.')\r\n add()\r\n\r\n xsr = str(input('Stream? (eg. Academic, IB, etc...)\\n')).lower().capitalize()\r\n xrl = str(input('Role? (eg. Design Member)\\n')).lower().capitalize()\r\n xcm = str(input('Any comments?\\n')).lower().capitalize()\r\n\r\n ch2 = input('Are you sure you wish to add this individual to the database? YES or NO?\\n')\r\n if y_n(ch2):\r\n print(xfn, xln, 'has been added to the database.')\r\n with conn: # input corresponding info to table with context manager\r\n c.execute(\"\"\"INSERT INTO personnel VALUES (\r\n :last, :first, :grade, :stream, :role, :comments)\"\"\",\r\n {'last': xln, 'first': xfn, 'grade': xgr, 'stream': xsr, 'role': xrl, 'comments': xcm})\r\n\r\n start() # after user's action has been completed, ask for another\r\n else:\r\n print('Your add action has been cancelled.')\r\n start()\r\n else: # ask for another if user wishes to perform another action\r\n start()", "def insert():\n new_text = request.json\n text = TextModel(new_text)\n text.validate()\n unique_fields = [{\"key\"}]\n repository.insert_one_unique_fields(COLLECTION_NAME, text.to_dict(), unique_fields)\n return {\"message\": \"success!\"}, 201", "def help_insert(self):\n print(INSERT)", "def insert_in_favourite(self, food_id, substitute_id):\n\n ref = (food_id, substitute_id)\n print(\"\"\"\\n Souhaitez-vous ajouter cette recherche dans vos favoris ?\n 1. Oui\n 0. Non \"\"\")\n\n choice = int(input(\"Entrez votre choix: \\n\"))\n if choice == 1:\n self.cursor.execute(\"\"\"INSERT INTO favourite\n (food_id, substitute_id)\n VALUES (%s, %s)\"\"\", ref)\n else:\n return", "def insert_aux(n, actual, e, f, prio):\n\tpoyo_datos = \"\"\"\n\t\t\t\tSELECT pokedex, type1, type2, hptotal, legendary \n\t\t\t\tFROM poyo\n\t\t\t\tWHERE nombre = :1\"\"\"\n\tcur.execute(poyo_datos, [n])\n\t# Lista con tupla [(pokedex, tipo1, tipo2, hptotal, legendary)]\n\tdata_poyo = cur.fetchall()\n\tdata_poyo = data_poyo[0]\n\tpokedex, t1, t2, total, l = data_poyo\n\tins_query = \"\"\"\n\t\t\t\tINSERT INTO sansanito (pokedex, nombre, type1, type2,\\\n\t\t\t\thpactual, hpmax, legendary, estado, ingreso, prioridad)\n\t\t\t\tVALUES (:1, :2, :3, :4, :5, :6, :7, :8, to_date(:9, 'DD/MM/YY HH24:MI'), :10)\"\"\" \n\t\t\t\t\n\tcur.execute(ins_query, [pokedex, n, t1, t2, actual, total, l, e, f, prio])", "def insertQuery1(self,nome,descr,censimento):\r\n\t\tq=Query(nome,descr,censimento)\r\n\t\tself.session.add(q)\r\n\t\tself.session.commit()", "def registerPlayer(name):\n conn = connect()\n c = conn.cursor()\n # Inserts a players name into the \"players\" table.\n c.execute(\"INSERT INTO players (name) VALUES (%s);\", (str(name), ))\n conn.commit()", "def execute_insert(self,insert):\n try:\n self.cursor.execute(insert)\n self.connection.commit()\n except Exception as error:\n self.connection.rollback()\n raise error", "def registerPlayer(name):\n with _connect_db() as (conn, cur):\n cur.execute(\"\"\"INSERT INTO players (name) VALUES (%s);\"\"\", (name,))\n conn.commit()", "def insert_habit():\n analytics.insert_habit('Play Piano', 'daily', 'Learn more songs', 'Minimum one hour')", "def registerPlayer(name):\n conn, c = connect()\n q = \"INSERT INTO PLAYERS VALUES (default, %s);\"\n data = (name,)\n c.execute(q, data)\n conn.commit()\n conn.close()", "def insert_talk(request):\n try:\n is_loggedin, username = get_session_variables(request)\n # User is not logged in\n if not logged_in(request):\n return HttpResponseRedirect('/register/login')\n\n # User is logged in\n else:\n if request.method == 'POST':\n form = AddSpeakerForm(request.POST)\n\n # Invalid form imput\n if not form.is_valid():\n error = \"Invalid inputs\"\n return render_to_response('achievement/new_speaker.html', \\\n {'form':form, \\\n 'error':error, \\\n 'is_loggedin':is_loggedin, \\\n 'username':username}, \\\n RequestContext(request))\n\n # Form is valid\n else:\n # Get the new achievement_id\n achievement_id = get_achievement_id(request)\t\n achievement_type = \"Speaker\"\n\n # Saving inputs\n achievement_obj = Achievement(achievement_id, \\\n achievement_type, \\\n username)\n achievement_obj.save()\n contribution_obj = form.save(commit = False)\n contribution_obj.achievement_id = achievement_obj\n contribution_obj.achieve_typ = achievement_type\n user_obj = get_object_or_404(User_info, username = username)\n contribution_obj.username = user_obj\n contribution_obj.save()\n return render_to_response('achievement/success.html', \\\n {'achievement_type':achievement_type, \\\n 'is_loggedin':is_loggedin, \\\n 'username':username}, \\\n RequestContext(request))\n # Method is not POST\n else:\n return render_to_response('achievement/new_speaker.html', \\\n {'form': AddSpeakerForm, \\\n 'is_loggedin':is_loggedin, \\\n 'username':username}, \\\n RequestContext(request))\n except KeyError:\n return error_key(request)", "def ask_question():\n title_question = request.form.get(\"title\")\n question = request.form.get(\"question\")\n\n date_string = datetime.today().strftime('%Y-%m-%d')\n \n ask = Question(user_id = session[\"user_id\"],question_created=date_string, title_question = title_question, question = question)\n\n db.session.add(ask)\n db.session.commit()\n\n return \"question added\"", "def handle_characters(curs, collection):\n character_list = curs.execute(\"\"\"SELECT * FROM charactercreator_character;\"\"\")\n for character in character_list:\n _, sl_curs = connect_to_sldb() # need to create a different cursor because the main one still \n # running and it will close the whole thing before it loop\n # item_list = sl_curs.execute(\n # f\"\"\"SELECT ai.name FROM charactercreator_character_inventory as cii\n # LEFT JOIN armory_item as ai\n # ON cii.item_id = ai.item_id\n # WHERE character_id={character[0]};\n # \"\"\")\n inventory = sl_curs.execute(\n f\"\"\"SELECT name, item_ptr_id\n FROM\n (SELECT * FROM charactercreator_character_inventory as cii\n LEFT JOIN armory_item as ai\n ON cii.item_id = ai.item_id) as a\n LEFT JOIN armory_weapon as aw\n ON a.item_id=aw.item_ptr_id\n WHERE character_id={character[0]};\n \"\"\").fetchall()\n\n character_doc = {\n \"name\": character[1],\n \"level\": character[2],\n \"exp\": character[3],\n \"hp\": character[4],\n \"strength\": character[5],\n \"intelligence\": character[6],\n \"dexterity\": character[7],\n \"wisdom\": character[8],\n \"items\": [item[0] for item in inventory],\n \"weapons\": [item[0] for item in inventory if item[1] != None]\n }\n sl_curs.close() # close that new cursor\n collection.insert_one(character_doc)\n\n\n # # A codier way to do it\n # schema = curs.execute(\n # \"PRAGMA table_info(charactercreator_character)\").fetchall()[1:]\n # for character in characters_list:\n # character_doc = {}\n # for index, item_tuple in enumerate(schema):\n # character_doc[item_tuple[1]] = character[index + 1]\n\n # collection.insert_one(character_doc)", "def post_game_message(character):\n\n if character[4] == 3:\n\n print(\"\\nCongratulations! You have passed the trial, honouring your village's tradition!\")\n\n elif character[4] == 10:\n\n print(\"\\nThank you for playing, we hope to see you again!\")\n\n else:\n\n print(\"\\nYou were slain! Game Over!\")", "def add_account(self, log, pword):\r\n #Placeholder : insert variables in sqlite3\r\n self.curs.execute(f\"\"\"INSERT INTO main_table VALUES (?, ?)\"\"\", (log, pword))\r\n self.conn.commit()", "def try_ask_save() -> None:\n\n if acc.savek == '':\n # Do not inherently redefine save phrase\n val = random.random()\n if val < 0.95:\n # 95% chance to ask for save phrase\n val = input(\"What should I do with that?\")\n\n if line_valid(val):\n # Do not use empty or single space lines as a save phrase\n save_phrase = val\n post_query(\"I will \\'\" + save_phrase + \"\\' to keep new information\")\n else:\n # Notify the user of their error\n post_query(\"That does not make sense\")", "def insert_champion_match_data(champion_match_data):\n conn = get_connect()\n cursor = conn.execute(\"SELECT * FROM championMatchData where matchId = ? AND championId = ?\",\n [champion_match_data[0], champion_match_data[1]])\n result_list = cursor.fetchall()\n if len(result_list) == 0:\n conn.execute(\"INSERT INTO championMatchData \\\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\", champion_match_data)\n print(\"champion_match_data (\" + str(champion_match_data[0]) + \",\" + str(champion_match_data[1]) + \") is inserted\")\n else:\n print(\"champion_match_data \" + str(champion_match_data[0]) + \",\" + str(champion_match_data[1]) + \" already exists!\")\n conn.commit()\n conn.close()\n return" ]
[ "0.68971574", "0.6643312", "0.6609034", "0.63286656", "0.62008613", "0.596312", "0.5952589", "0.5870909", "0.58442897", "0.5829052", "0.58106846", "0.58059555", "0.57868826", "0.57820517", "0.5769277", "0.56395286", "0.5630925", "0.56198794", "0.5592354", "0.55757797", "0.5517927", "0.550607", "0.55005515", "0.5493217", "0.54873043", "0.5483246", "0.5479291", "0.54688966", "0.5468399", "0.545576", "0.5445361", "0.54393893", "0.5437158", "0.5430659", "0.54293525", "0.54216397", "0.54216397", "0.5408965", "0.54088575", "0.5408409", "0.54050964", "0.54010594", "0.539875", "0.5397532", "0.5395165", "0.5393252", "0.53880244", "0.53853256", "0.5380401", "0.5379501", "0.53716207", "0.53516287", "0.53475374", "0.5343219", "0.53344816", "0.53251815", "0.53199303", "0.5319879", "0.52915055", "0.5289459", "0.52872956", "0.52812755", "0.5262803", "0.526181", "0.52503765", "0.52488256", "0.5241431", "0.52395636", "0.52357304", "0.52269566", "0.522417", "0.5220359", "0.52195233", "0.5215884", "0.5210712", "0.5210356", "0.52095497", "0.5209094", "0.5207945", "0.51868397", "0.518131", "0.5177196", "0.51727426", "0.5168186", "0.5163897", "0.5162268", "0.5157696", "0.51511353", "0.514965", "0.5144123", "0.5124555", "0.51227146", "0.51164526", "0.5115954", "0.5111999", "0.510368", "0.50935876", "0.50821936", "0.50781727", "0.5076076" ]
0.6655293
1
Creates a new SnakemakeRule instance from a dict representation
def __init__( self, rule_id, parent_id, input, output, local=False, template=None, **kwargs ): self.rule_id = rule_id self.parent_id = parent_id self.input = input self.output = output self.local = local self.template = template self.params = kwargs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_dict(cls, dictionary: Dict[str, Any]):\n return cls(**dictionary)", "def from_dict(cls, data: Dict[str, any]):\n return cls(**data)", "def from_dict(self, d):\r\n options = dict(d)\r\n task_id = options['task_id']\r\n del options['task_id']\r\n return SubtaskStatus.create(task_id, **options)", "def from_dict(cls, data):\n return cls(**data)", "def from_dict(cls, dikt) -> 'PipelineDefinition':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n alias = dictionary.get(\"alias\")\r\n cnam_lookups_enabled = dictionary.get(\"cnam_lookups_enabled\")\r\n number_type = dictionary.get(\"number_type\")\r\n rate_center = dictionary.get(\"rate_center\")\r\n state = dictionary.get(\"state\")\r\n value = dictionary.get(\"value\")\r\n\r\n # Return an object of this model\r\n return cls(alias,\r\n cnam_lookups_enabled,\r\n number_type,\r\n rate_center,\r\n state,\r\n value)", "def create(cls, dictionary):\n return cls(**dictionary)", "def create(cls, dictionary):\n return cls(**dictionary)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def from_dict(cls, d: dict):\n scen = None\n try:\n scen = DTScenario(d['name'])\n for t in d['tasks']:\n scen.addTask(dtTaskTypeDict['cls'][t['class']], t['parameters'])\n except KeyError:\n scen = None\n raise DTInternalError('DTScenario.fromDict()', 'Wrong dict format')\n return scen", "def from_dict(cls, dictionary):\n normalised = cls.normalise_dict(dictionary)\n return cls.from_normalised_dict(normalised)", "def from_dict(eventScheduleDict):\n pass", "def from_dict(cls, d):\n return cls(**d)", "def from_dict(cls, dikt) -> 'ShardingDescriptor':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, d):\n hostname = d.get('hostname')\n project = d.get('project')\n treeish = d.get('treeish')\n path = d.get('path')\n _validate_args(\n hostname,\n project,\n treeish,\n path,\n path_required=True)\n return cls(hostname, project, treeish, path)", "def from_dict(cls, dct):\n if dct.pop('type') != cls.__name__:\n fmt = 'Can not construct Note from dict %s'\n raise ValueError(fmt % dct)\n\n return cls(**dct)", "def from_dict(cls, dikt) -> 'ProductionFlowItem':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n links = dictionary.get('links')\r\n email_config = dictionary.get('emailConfig')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(links,\r\n email_config,\r\n dictionary)", "def from_dict(cls, inp):\n return cls(**{k: v for k, v in inp.items() if k != '__class__'})", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n continue_on_error = dictionary.get('continueOnError')\n is_active = dictionary.get('isActive')\n script_params = dictionary.get('scriptParams')\n script_path = dictionary.get('scriptPath')\n timeout_secs = dictionary.get('timeoutSecs')\n\n # Return an object of this model\n return cls(\n continue_on_error,\n is_active,\n script_params,\n script_path,\n timeout_secs\n)", "def _from_normalised_dict(cls, dictionary):\n if 'cvarsort' in dictionary and dictionary['cvarsort'] != cls.cvarsort:\n raise PydmrsValueError('{} must have cvarsort {}, not {}'.format(cls.__name__,\n cls.cvarsort,\n dictionary['cvarsort']))\n return cls(**{key:value for key, value in dictionary.items() if key != 'cvarsort'})", "def _from_string(cls, serialized):\r\n parse = cls.URL_RE.match(serialized)\r\n if not parse:\r\n raise InvalidKeyError(cls, serialized)\r\n\r\n parse = parse.groupdict()\r\n if parse['definition_id']:\r\n parse['definition_id'] = cls.as_object_id(parse['definition_id'])\r\n\r\n return cls(**{key: parse.get(key) for key in cls.KEY_FIELDS})", "def from_dict(cls, dikt) -> 'AssetPropertyValueHistoryRequest':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, d):\n assert \"status\" in d\n assert \"metadata\" in d\n return cls(**d)", "def from_dict(cls, dikt) -> 'Story':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'Problem':\n return deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'Failure':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'StartConfiguration':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n name = dictionary.get('name')\r\n lan_ip = dictionary.get('lanIp')\r\n uplink = dictionary.get('uplink')\r\n public_port = dictionary.get('publicPort')\r\n local_port = dictionary.get('localPort')\r\n allowed_ips = dictionary.get('allowedIps')\r\n protocol = dictionary.get('protocol')\r\n\r\n # Return an object of this model\r\n return cls(name,\r\n lan_ip,\r\n uplink,\r\n public_port,\r\n local_port,\r\n allowed_ips,\r\n protocol)", "def from_dict(cls, dikt) -> 'Task':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, data):\n requirements = {k: v for k, v in data['params']['requirements'].items()}\n return cls(requirements)", "def from_dict(cls, dct):\n return cls(**dct)", "def from_dict(cls, dikt) -> 'SourceAudit':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'POSTExecution':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n address = dictionary.get('address')\n port = dictionary.get('port')\n protocol = dictionary.get('protocol')\n is_cluster_auditing_enabled = dictionary.get('isClusterAuditingEnabled')\n is_data_protection_enabled = dictionary.get('isDataProtectionEnabled')\n is_filer_auditing_enabled = dictionary.get('isFilerAuditingEnabled')\n is_ssh_log_enabled = dictionary.get('isSshLogEnabled')\n name = dictionary.get('name')\n\n # Return an object of this model\n return cls(address,\n port,\n protocol,\n is_cluster_auditing_enabled,\n is_data_protection_enabled,\n is_filer_auditing_enabled,\n is_ssh_log_enabled,\n name)", "def create_snat_rule(self, **attrs):\n return self._create(_snat.Rule, **attrs)", "def from_dict(cls, d):\n return cls(d[\"sequence\"], immutable_bounds=d[\"immutable_bounds\"])", "def from_dict(cls, dct):\n dct['address'] = Address(**dct['address'])\n return cls(**dct)", "def from_dict(cls, dikt) -> 'OneOffSchedule':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, obj: dict) -> FormatTest:\n if obj is None:\n return None\n\n if not isinstance(obj, dict):\n return FormatTest.parse_obj(obj)\n\n _obj = FormatTest.parse_obj({\n \"integer\": obj.get(\"integer\"),\n \"int32\": obj.get(\"int32\"),\n \"int64\": obj.get(\"int64\"),\n \"number\": obj.get(\"number\"),\n \"float\": obj.get(\"float\"),\n \"double\": obj.get(\"double\"),\n \"decimal\": obj.get(\"decimal\"),\n \"string\": obj.get(\"string\"),\n \"string_with_double_quote_pattern\": obj.get(\"string_with_double_quote_pattern\"),\n \"byte\": obj.get(\"byte\"),\n \"binary\": obj.get(\"binary\"),\n \"var_date\": obj.get(\"date\"),\n \"date_time\": obj.get(\"dateTime\"),\n \"uuid\": obj.get(\"uuid\"),\n \"password\": obj.get(\"password\"),\n \"pattern_with_digits\": obj.get(\"pattern_with_digits\"),\n \"pattern_with_digits_and_delimiter\": obj.get(\"pattern_with_digits_and_delimiter\")\n })\n return _obj", "def from_dict(self, data: dict):\n if 'title' in data:\n self.title = data['title']\n if 'description' in data:\n self.description = data['description']\n if 'deadline' in data:\n self.deadline = parser.parse(data['deadline'])\n return", "def _from_dict(cls, d):\n confidence = d.get(\"confidence\", None)\n constant = d.get(\"constant\", False)\n tags = d.get(\"tags\", None)\n return cls(\n d[\"name\"],\n d[\"value\"],\n confidence=confidence,\n constant=constant,\n tags=tags,\n )", "def from_dict(cls, name, data):\n item = cls(name)\n\n item.description = data.get(\"description\", \"\")\n item.difficulty = data.get(\"difficulty\", 0)\n\n item.prerequisites = data.get(\"prerequisites\", {})\n item.prerequisites[\"items\"] = to_list(item.prerequisites.get(\"items\"))\n item.prerequisites[\"research\"] = to_list(item.prerequisites.get(\"research\"))\n item.prerequisites[\"triggers\"] = to_list(item.prerequisites.get(\"triggers\"))\n item.cost = data.get(\"cost\", {})\n item.strings = data.get(\"strings\", {})\n item.effects = data.get(\"effects\", {})\n for effect in (\n \"enable_commands\",\n \"enable_items\",\n \"enable_resources\",\n \"events\",\n \"triggers\",\n ):\n item.effects[effect] = to_list(item.effects.get(effect))\n return item", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n alias_name = dictionary.get('aliasName')\n client_subnet_whitelist = None\n if dictionary.get('clientSubnetWhitelist') != None:\n client_subnet_whitelist = list()\n for structure in dictionary.get('clientSubnetWhitelist'):\n client_subnet_whitelist.append(cohesity_management_sdk.models.cluster_config_proto_subnet.ClusterConfigProtoSubnet.from_dictionary(structure))\n smb_config = cohesity_management_sdk.models.alias_smb_config.AliasSmbConfig.from_dictionary(dictionary.get('smbConfig')) if dictionary.get('smbConfig') else None\n view_path = dictionary.get('viewPath')\n\n # Return an object of this model\n return cls(alias_name,\n client_subnet_whitelist,\n smb_config,\n view_path)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n name = dictionary.get('name')\r\n scheduling = meraki.models.scheduling_model.SchedulingModel.from_dictionary(dictionary.get('scheduling')) if dictionary.get('scheduling') else None\r\n bandwidth = meraki.models.bandwidth_model.BandwidthModel.from_dictionary(dictionary.get('bandwidth')) if dictionary.get('bandwidth') else None\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(name,\r\n scheduling,\r\n bandwidth,\r\n dictionary)", "def from_json(cls, s):\n\n d = json.loads(s, object_pairs_hook=OrderedDict)\n return cls.from_definition(d)", "def from_json(cls, s):\n\n d = json.loads(s, object_pairs_hook=OrderedDict)\n return cls.from_definition(d)", "def from_dict(cls, dict_object):\n\n return cls(**dict_object)", "def from_dict(cls, data):\n return cls(\n filter_id=data[\"Filter\"],\n name=data[\"Name\"],\n admin=data[\"Admin\"],\n action=data[\"Action\"],\n input_port=data[\"Input\"],\n output_port=data[\"Output\"],\n classifiers=data[\"Classifiers\"],\n packet_processing=data[\"Packet Processing\"],\n )", "def __init__(self, rulespath=None):\n if rulespath is None:\n rulespath = path.join(path.dirname(path.realpath(__file__)), 'pattern_sentence_filter.yaml')\n\n self.rulespath = rulespath\n self.rules = Rules(yaml.safe_load(open(rulespath)))", "def from_dict(cls, dikt) -> 'Expression':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'Sitemap':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n to = dictionary.get('to')\r\n application_id = dictionary.get('applicationId')\r\n expiration_time_in_minutes = dictionary.get('expirationTimeInMinutes')\r\n code = dictionary.get('code')\r\n scope = dictionary.get('scope')\r\n\r\n # Return an object of this model\r\n return cls(to,\r\n application_id,\r\n expiration_time_in_minutes,\r\n code,\r\n scope)", "def from_dict(cls, dikt) -> \"Scheduler\":\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, obj):\n cls._check_keys(obj)\n return cls(**obj)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n alternate_restore_base_directory = dictionary.get('alternateRestoreBaseDirectory')\n continue_on_error = dictionary.get('continueOnError')\n encryption_enabled = dictionary.get('encryptionEnabled')\n generate_ssh_keys = dictionary.get('generateSshKeys')\n override_originals = dictionary.get('overrideOriginals')\n preserve_acls = dictionary.get('preserveAcls')\n preserve_attributes = dictionary.get('preserveAttributes')\n preserve_timestamps = dictionary.get('preserveTimestamps')\n restore_entities = dictionary.get('restoreEntities')\n restore_to_original_paths = dictionary.get('restoreToOriginalPaths')\n save_success_files = dictionary.get('saveSuccessFiles')\n skip_estimation = dictionary.get('skipEstimation')\n\n # Return an object of this model\n return cls(\n alternate_restore_base_directory,\n continue_on_error,\n encryption_enabled,\n generate_ssh_keys,\n override_originals,\n preserve_acls,\n preserve_attributes,\n preserve_timestamps,\n restore_entities,\n restore_to_original_paths,\n save_success_files,\n skip_estimation\n)", "def from_dict(cls, d):\n d = d.copy()\n if \"length\" in d:\n # length argument removed in version 1.1.0\n del d[\"length\"]\n return cls(**d)" ]
[ "0.61375326", "0.60902405", "0.60286725", "0.60125345", "0.597105", "0.5911886", "0.5895749", "0.5895749", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5874035", "0.58442324", "0.581522", "0.5809438", "0.5804045", "0.5784471", "0.57791626", "0.5776851", "0.5766595", "0.5756868", "0.57540387", "0.574283", "0.5734516", "0.57139975", "0.5710597", "0.56893325", "0.56814414", "0.5676567", "0.5675981", "0.56747025", "0.56696045", "0.5664665", "0.5663855", "0.56278855", "0.56240016", "0.5611198", "0.56104", "0.56060696", "0.5602196", "0.5601705", "0.55800784", "0.55754584", "0.5568526", "0.55666053", "0.5557283", "0.55520517", "0.5527106", "0.5527106", "0.5526382", "0.550478", "0.5492428", "0.5487735", "0.5487285", "0.5486449", "0.54820335", "0.54794806", "0.5473134", "0.5471022" ]
0.0
-1
Prints a string representation of SnakemakeRule instance
def __repr__(self): template = """ SnakemakeRule ({}) - parent_id : {} - input : {} - output : {} - local : {} - template : {} - params : {} """ return template.format( self.rule_id, self.parent_id, self.input, self.output, self.local, self.template, self.params, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return \"[ %s ]\" % str(self.__rule)", "def __str__(self):\n return \"{ %s }\" % str(self.__rule)", "def __str__(self):\n return \"{ %s }1\" % str(self.__rule)", "def print_rules(self):\n for idx, r in enumerate(self.rules):\n print(idx, \"=>\", r.__repr__())", "def __str__ (self) :\n\t\ttext_rule = \"\"\n\t\t\n\t\tfor key, rules in self.production_rules.items() :\n\t\t\ttext_rule += \"\\nRULE \" + key + \" = [\\n\\t\"\n\t\t\trule_in_a_line = []\n\t\t\tfor rule in rules :\n\t\t\t\t#rule_in_a_line.append(\" + \".join([r.val+\"(\"+r.type+\")\" for r in rule]))\n\t\t\t\trule_in_a_line.append(\" + \".join([r.__str__() for r in rule]))\n\t\t\ttext_rule += \"\\n\\t\".join(rule_in_a_line) + \"\\n]\"\n\t\ttext_rule += \"\\n\\n\"\n\t\t\n\t\ttext_rule += \"LABELS = \" + json.dumps (self.labels, indent=2) + '\\n\\n'\n\n\t\ttext_rule += \"STRUCT = [\\n{}\\n]\\n\\n\".format(\n\t\t\t\"\".join([\n\t\t\t\t\"\\t{} : {{\\n\\t\\t{}\\n\\t}}\\n\".format (\n\t\t\t\t\tkey, \", \\n\\t\\t\".join(val)\n\t\t\t\t) for key, val in self.keeper.items()\n\t\t\t])\n\t\t)\n\t\ttext_rule += \"STRNODE = [\\n{}\\n]\\n\\n\".format(\n\t\t\t\"\".join(self.strnodes)\n\t\t)\n\t\tfor regex, label in self.tokens :\n\t\t\ttext_rule += \"TOKEN \" + label + \" = regex('\" + regex + \"')\\n\"\n\n\t\treturn text_rule", "def view_rule(self, rule_name):\n\n assert rule_name in self.rules.keys(), 'Rule name not in current set of rules'\n print(self.rule_source[rule_name])", "def __str__(self):\n\n ret = ''\n for rule in self.rules:\n ret += str(rule) + '\\n'\n ret += 'IF TRUE THEN {0}'.format(self.default)\n\n return ret", "def get_formatted_rule(rule=None):\r\n rule = rule or {}\r\n return ('action: %s\\n'\r\n 'protocol: %s\\n'\r\n 'source_ip_address: %s\\n'\r\n 'source_ip_subnet_mask: %s\\n'\r\n 'destination_ip_address: %s\\n'\r\n 'destination_ip_subnet_mask: %s\\n'\r\n 'destination_port_range_start: %s\\n'\r\n 'destination_port_range_end: %s\\n'\r\n 'version: %s\\n'\r\n % (rule.get('action', 'permit'),\r\n rule.get('protocol', 'tcp'),\r\n rule.get('sourceIpAddress', 'any'),\r\n rule.get('sourceIpSubnetMask', '255.255.255.255'),\r\n rule.get('destinationIpAddress', 'any'),\r\n rule.get('destinationIpSubnetMask', '255.255.255.255'),\r\n rule.get('destinationPortRangeStart', 1),\r\n rule.get('destinationPortRangeEnd', 1),\r\n rule.get('version', 4)))", "def __str__(self):\n return \"(%s)\" % ' '.join(map(str, self.__subrules))", "def fmt_rule(rule: Callable, *, gets: Optional[List[Tuple[str, str]]] = None) -> str:\n type_hints = get_type_hints(rule)\n product = type_hints.pop(\"return\").__name__\n params = \", \".join(t.__name__ for t in type_hints.values())\n gets_str = \"\"\n if gets:\n get_members = \", \".join(\n f\"Get[{product_subject_pair[0]}]({product_subject_pair[1]})\"\n for product_subject_pair in gets\n )\n gets_str = f\", gets=[{get_members}]\"\n return f\"@rule({fmt_rust_function(rule)}({params}) -> {product}{gets_str})\"", "def rule_to_str(self, t):\r\n\r\n if(t[0] == TERMINAL):\r\n return self.terminal_to_str(t[1])\r\n else:\r\n return toRuleString[t[1]]", "def pretty_str(rule,print_option=PrintOption()):\n if rule.is_terminal() or rule.is_empty():\n content = str(rule)\n if print_option.bikeshed:\n return \"`{}`\".format(content)\n return content\n if rule.is_symbol_name():\n name = rule.content\n def with_meta(phrase,metachar,print_option):\n content = \" \".join([x.pretty_str(print_option) for x in phrase])\n if len(phrase) > 1:\n return \"( {} ){}\".format(content, metachar)\n return \"{} {}\".format(content, metachar)\n if name in print_option.replace_with_starred:\n phrase = print_option.replace_with_starred[name]\n return with_meta(phrase,'*',print_option)\n if name in print_option.replace_with_optional:\n phrase = print_option.replace_with_optional[name]\n return with_meta(phrase,'?',print_option)\n if name in print_option.replace_with_nested:\n po = print_option.clone()\n po.multi_line_choice = False\n content = po.replace_with_nested[name].pretty_str(po)\n return \"( {} )\".format(content)\n if print_option.inline_synthetic and name.find(\"/\") >=0:\n po = print_option.clone()\n po.multi_line_choice = False\n content = po.grammar.rules[name].pretty_str(po)\n return \"( {} )\".format(content)\n\n # Print ourselves\n if print_option.bikeshed:\n context = 'recursive descent syntax'\n g = print_option.grammar\n if g.rules[name].is_token():\n context = 'syntax'\n if name in g.extra_externals:\n context = 'syntax_sym'\n if name == '_disambiguate_template':\n # This is an implementation detail, so make it invisible.\n return ''\n else:\n without_underscore = ['_less_than',\n '_less_than_equal',\n '_greater_than',\n '_greater_than_equal',\n '_shift_left',\n '_shift_left_assign',\n '_shift_right',\n '_shift_right_assign']\n if name in without_underscore:\n name = name[1:]\n return \"[={}/{}=]\".format(context,name)\n return name\n if isinstance(rule,Choice):\n parts = [i.pretty_str(print_option) for i in rule]\n if print_option.multi_line_choice:\n parts.sort()\n\n if print_option.multi_line_choice:\n if print_option.bikeshed:\n nl = \"\\n\\n\"\n prefixer = \"\\n | \"\n else:\n nl = \"\\n\"\n prefixer = \"\\n \"\n else:\n nl = \"\"\n prefixer = \"\"\n joiner = nl + \" | \"\n inside = prefixer + joiner.join([p for p in parts])\n if print_option.is_canonical:\n return inside\n else:\n # If it's not canonical, then it can have nesting.\n return \"(\" + inside + nl + \")\"\n if isinstance(rule,Seq):\n return \" \".join(filter(lambda i: len(i)>0, [i.pretty_str(print_option) for i in rule]))\n if isinstance(rule,Repeat1):\n return \"( \" + \"\".join([i.pretty_str(print_option) for i in rule]) + \" )+\"\n raise RuntimeError(\"unexpected node: {}\".format(str(rule)))", "def __str__(self):\n return \"(%s)\" % ' | '.join(map(str, self.__subrules))", "def rule_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"rule_name\")", "def test_rule_representation():\n rule = MethodRule(method=\"POST\")\n assert repr(rule) == \"MethodRule(method='POST')\", \"Wrong representation\"", "def __str__(self):\n if len(self.label) > 0:\n descr = [\"'%s', target='%s' [%s]\" % (self.label, self.target.name, self.target.body_type)]\n else:\n descr = [\"target='%s' [%s]\" % (self.target.name, self.target.body_type)]\n if self.baseline:\n descr[0] += ', initial baseline offset=%f' % (self.baseline.poly[-1],)\n if self.beam:\n descr[0] += ', beam height=%f' % (self.beam.height,)\n for scan_ind, scan in enumerate(self.scans):\n descr.append('%4d: %s' % (scan_ind, str(scan)))\n return '\\n'.join(descr)", "def __str__(self):\n s = \"\"\n s += self.synset.name + \"\\t\"\n s += \"PosScore: %s\\t\" % self.pos_score\n s += \"NegScore: %s\" % self.neg_score\n return s", "def __str__(self):\n s = 'Processor ' + __name__\n # if self._rule_files:\n # s += ' running with rules ' + ' '.join(self._rule_files.values())\n\n return s", "def get_text(self):\n return self.rule_id + '\\t' + self.rule_text", "def print_rules(self, input_file='rules.txt'):\n\n with open(input_file, 'r') as f_in:\n rules = f_in.readlines()\n for r in rules:\n print(r)", "def __print_rules(self, left=0):\n\n for line in self.__rules:\n print((\" \" * left) + line, end=\"\")", "def __str__(self):\n return \"MatchWhite(%s)\" % str(self.__rule)", "def rule(self) -> str:\n if self._rule:\n return self._rule\n return self._make_rule(member_param=self._member_param,\n unique_member_param=self._unique_member_param)", "def dumpSMRule(ruleInfos, outputFile, inputFile):\n if 'py' in ruleInfos:\n code = ruleInfos['py']\n if type(code) is str:\n outputFile.write(insertPlaceholders(code, inputFile))\n elif type(code) is list:\n [outputFile.write(insertPlaceholders(line, inputFile) + '\\n') for line in code]\n\n outputFile.write('rule ' + ruleInfos['rule'] + ':\\n')\n for field in SNAKEMAKE_FIELDS:\n if field in ruleInfos:\n outputFile.write(' ' + field + ': ' + str(ruleInfos[field]) + '\\n')", "def summary_string(self) -> str:\n return f\"dixonoid: {self.plain_rules}\"", "def __str__(self):\n\n return \"[\" + str(self.quick) + \"] \" + \\\n self.regexp.pattern + \" --> \" + \\\n str(self.handler)", "def __repr__(self):\n template = \"\"\"\n DataIntegrationRule ({})\n \n - inputs : {}\n - output : {}\n - local : {}\n - template : {}\n - params : {}\n \"\"\"\n\n return template.format(\n self.rule_id,\n self.inputs,\n self.output,\n self.local,\n self.template,\n self.params\n )", "def __str__( self ):\n assert isinstance( self.level, int )\n assert isinstance( self.prop, WFF )\n assert isinstance( self.justification, Inference )\n\n return \"Step( %d, %s, %s )\" % ( self.num, repr( self.prop ), repr( self.justification ) )", "def __str__(self):\n return \"Combine(%s)\" % str(self.__rule)", "def __str__(self):\n name_str = \"node name is %s\\n\" % self.__name\n label_str = \"labels are %s\\n\" % str(self.__labels)\n propety_str = \"properties are %s\\n\" % str(self.__props)\n return name_str + label_str + propety_str", "def rule(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule\")", "def format_rule_results(rule_result_set, print_=False):\n\tresult_string = \"\"\n\tfor rule in rule_result_set['rule_results']:\n\t\tif rule['passed']:\n\t\t\tresult_string = \",\".join((result_string, 'Passed: ', rule['id']))\n\t\telse:\n\t\t\tresult_string = \",\".join((result_string, 'Failed: '))\n\t\t\tfor msg in rule['fail_reasons']:\n\t\t\t\tresult_string = \",\".join((result_string, msg))\n\tif print_:\n\t\tprint(result_string)\n\treturn result_string", "def __str__(self):\n runner = self.__head\n if runner is None:\n return \"\"\n while runner.next_node:\n if runner is not None:\n print(\"{}\".format(runner.data))\n runner = runner.next_node\n return \"{}\".format(runner.data)", "def render_v1(rule):\n return \" || \".join([str(rule.sid), rule.msg] + rule.references)", "def __repr__(self):\r\n s = 'Player ' + str(self.checker)\r\n v = ' ('+ self.tiebreak+', '+str(self.lookahead)+')'\r\n s += v\r\n return s", "def __str__(self):\n return \"{}\\n\\n{}\".format(self.puzzle,\n \"\\n\".join([str(x) for x in self.children]))", "def __repr__(self):\r\n s = 'Player ' + self.checker + ' (' + self.tiebreak + ', ' + str(self.lookahead) + ')'\r\n return s", "def __str__(self):\n s = 'word chain: ' + '\\n'\n for word in self._used_words[:-1]:\n s += word + ' -> '\n s += self._used_words[-1] + '\\ntarget word: ' + self._target\n return s", "def test_rule(self):\n\n x = t.Rule(\"foo\", t.Exactly(\"x\"))\n self.assertEqual(writePython(x),\n dd(\"\"\"\n def rule_foo(self):\n _locals = {'self': self}\n self.locals['foo'] = _locals\n _G_exactly_1, lastError = self.exactly('x')\n self.considerError(lastError, 'foo')\n return (_G_exactly_1, self.currentError)\n \"\"\"))", "def rule_str(C: List, fmt: str = \"%.3f\") -> str:\n s = \" \" + \"\\n∨ \".join([\"(%s)\" % (\" ∧ \".join([fatom(a[0], a[1], a[2], fmt=fmt) for a in c])) for c in C])\n return s", "def __str__(self):\n s = self.prev_error.failures + '\\n' if self.prev_error else ''\n\n s += '%s' % self.message\n if self.args[1:]:\n s += ' %s' % str(self.args[1:])\n\n for task in self.tasktrace:\n s += '\\n in %s %s' % (task.task.__name__, task.name)\n return s", "def __str__(self):\r\n\t\toutStr = \"\"\r\n\t\toutStr += \"Heuristic Level: \" + str(self.heuristic)\r\n\t\toutStr += \"\\n-\" + \"-----\"*self.n\r\n\t\tfor row in self.board:\r\n\t\t\ttempStr = (\"\\n|\" + \" %2d |\" * self.n)\r\n\t\t\toutStr += tempStr % tuple(row)\r\n\t\t\toutStr += \"\\n-\" + \"-----\"*self.n\r\n\r\n\t\treturn outStr", "def __str__(self):\n _str = \"Variables:\\n\"\n for variable in self.variables:\n _str += \" {}\\n\".format(str(variable))\n _str += \"\\nConstraints:\\n\"\n for constraint in self.constraints:\n _str += \" {}\\n\".format(str(constraint))\n return _str", "def test_get_rule_details(self):\n pass", "def __str__(self):\n tapeline = self.tape.format(\n self.index - 10, self.index + 11) + ' : state {}'.format(self.state)\n pointline = ' ' * 10 + '^' + ' ' * 11 + \\\n ' : index {}'.format(self.index)\n\n return tapeline + '\\n' + pointline", "def test_rule(cls, rule, args, kwargs, expected, caplog):\n qalgebra.core.abstract_algebra.LOG = True\n qalgebra.core.algebraic_properties.LOG = True\n log_marker = \"Rule %s.%s\" % (cls.__name__, rule)\n print(\"\\n\", log_marker)\n with caplog.at_level(logging.DEBUG):\n with no_instance_caching():\n expr = cls.create(*args, **kwargs)\n assert expr == expected\n assert log_marker in caplog.text", "def __repr__(self):\r\n c = \"Player \" + self.checker + \" (\" + self.tiebreak + \", \" + str(self.lookahead) + \")\"\r\n return c", "def build_rule(rule, attributes):\n\t\n\tlines = [rule, \"{\"]\n\tfor attr in attributes:\n\t\tlines.append(\"\t%s\" % attr)\n\tlines.append(\"}\\n\")\n\n\treturn \"\\n\".join(lines)", "def dump(self):\n dump_grammar(self.rules)\n print(self.registry)", "def rule_name(self) -> str:\n return pulumi.get(self, \"rule_name\")", "def rule_name(self) -> str:\n return pulumi.get(self, \"rule_name\")", "def render_v2(rule):\n return \" || \".join([\n str(rule.gid),\n str(rule.sid),\n str(rule.rev),\n \"NOCLASS\" if rule.classtype is None else rule.classtype,\n str(rule.priority),\n rule.msg] + rule.references)", "def as_rule(self):\n return ((u'%s = %s' % (self.name, self._as_rhs())) if self.name else\n self._as_rhs())", "def createFisEntry(self, rule):\n\n\t\tline = \"\"\n\t\tfor i, ant in enumerate(rule.antecedent):\n\t\t\tfor j, mf in enumerate(self.inputs[i].mfs):\n\t\t\t\tif (mf.name == ant):\n\t\t\t\t\tline = line + str(j+1) + \" \"\n\t\tline = line[:-1] + \", \"\n\t\tfor i, con in enumerate([rule.consequent]):\n\t\t\tfor j, mf in enumerate(self.outputs[i].mfs):\n\t\t\t\tif (mf.name == con):\n\t\t\t\t\tline = line + str(j+1)\n\n\t\tline = line + \" (1) : 1\\n\"\n\t\treturn line", "def __str__(self):\n return_string = self.name + \"\\n\" + str(self.traits)\n\n return return_string", "def __str__(self):\n s = ''\n for node in self.nodes:\n s += '\\n\\n'+str(node)+'\\n\\t'\n edges = node.getChildren()\n keys = edges.keys()\n keys.sort()\n for key in keys:\n bounds = edges[key].getSuffix()\n s += str(edges[key])+' '\n for i in xrange(bounds[0], bounds[1]):\n s += self.target[i]\n s += '\\n\\t'\n return s", "def __str__(self):\n # newline-delimited values of all the attributes\n return \">%s\\n%s\" % (self.Label, self.Sequence)", "def __str__(self):\n return \"\\n\\n\".join(self.failures)", "def show_rule(bot, trigger):\n if not trigger.group(2):\n bot.say('.rule <id> - The Internet has no rules. No Exceptions.')\n return\n\n rule = trigger.group(2).strip()\n\n if not trigger.sender.is_nick() and bot.privileges[trigger.sender][bot.nick] >= HALFOP:\n if rule == \"WAMM\" or rule == \"asie\":\n bot.write(['KICK', trigger.sender, trigger.nick], text='The kick is the rule')\n return\n\n if rule.strip() in rules:\n bot.say('Rule {0}: \"{1}\"'.format(rule, rules[rule]))\n else:\n bot.say('Rule {0} is a lie.'.format(rule))", "def __str__(self):\n base_message = self.base_message.format(filename=self.yaml_file_path)\n error_message = ERROR_MESSAGE.format(key=self.key, expected=self.expected)\n return base_message + error_message", "def generate_workflow(self) -> str:\n analysisTasks = self._parse_parameters()\n terminalTasks = self._identify_terminal_tasks(analysisTasks)\n\n ruleList = {k: SnakemakeRule(v, self._pythonPath)\n for k, v in analysisTasks.items()}\n\n workflowString = 'rule all: \\n\\tinput: ' + \\\n ','.join([ruleList[x].full_output()\n for x in terminalTasks]) + '\\n\\n'\n workflowString += '\\n'.join([x.as_string() for x in ruleList.values()])\n\n return self._dataSet.save_workflow(workflowString)", "def __str__(self):\n debug_str = \"%s ::=\" % str(self.head)\n for symbol in self.body:\n debug_str += \" %s\" % str(symbol)\n return debug_str", "def __str__(self) -> str:\n st = \"<Output> \"\n if self.inst_out:\n st += f'instance:{self.inst_out};'\n st += f'''{self.output} -> {self.target or '\"\"'} -> '''\n if self.inst_in:\n st += f\"instance:{self.inst_in};\"\n st += self.input\n\n if self.params and not self.inst_in:\n st += f\" ({self.params})\"\n if self.delay != 0:\n st += f\" after {self.delay} seconds\"\n if self.times != -1:\n st += \" (once only)\" if self.times == 1 else f\" ({self.times!s} times only)\"\n return st", "def format_with_lineno(self) -> str:\n s = f\"<Rule '{self!s}'\"\n if self.csv_line_number is not None:\n s += f\" from line {self.csv_line_number}\"\n s += \">\"\n return s", "def pretty_str(self,print_option=PrintOption()):\n\n po = print_option.clone()\n po.is_canonical = self.is_canonical\n po.grammar = self\n\n token_rules = set()\n\n # Look for defined rules that look better as absorbed into their uses.\n for name, rule in self.rules.items():\n # Star-able is also optional-able, so starrable must come first.\n starred_phrase = rule.as_starred(name)\n if starred_phrase is not None:\n po.replace_with_starred[name] = starred_phrase\n continue\n optional_phrase = rule.as_optional()\n if optional_phrase is not None:\n po.replace_with_optional[name] = optional_phrase\n continue\n options = rule.as_container()\n if len(options)==1:\n phrase = options[0].as_container()\n if len(phrase)==1 and phrase[0].is_token():\n token_rules.add(name)\n\n # A rule that was generated to satisfy canonicalization is better\n # presented as absorbed in its original parent.\n for name, rule in self.rules.items():\n # We only care about rules generated during canonicalization\n if name.find('.') > 0 or name.find('/') > 0:\n options = rule.as_container()\n if len(options) != 2:\n continue\n if any([len(x.as_container())!=1 for x in options]):\n continue\n if any([(not x.as_container()[0].is_symbol_name()) for x in options]):\n continue\n # Rule looks like A -> X | Y\n po.replace_with_nested[name] = rule\n\n parts = []\n for key in sorted(self.rules):\n if key == LANGUAGE:\n # This is synthetic, for analysis\n continue\n rule_content = self.rules[key].pretty_str(po)\n if key in po.replace_with_optional:\n continue\n if key in po.replace_with_starred:\n continue\n if key in po.replace_with_nested:\n continue\n if (not po.print_terminals) and (key in token_rules):\n continue\n space = \"\" if po.multi_line_choice else \" \"\n if po.bikeshed:\n key_content = \" <dfn for='recursive descent syntax'>{}</dfn>\".format(key)\n content = \"<div class='syntax' noexport='true'>\\n{}:\\n{}\\n</div>\".format(key_content,rule_content)\n else:\n content = \"{}:{}{}\".format(key,space,rule_content)\n parts.append(content)\n content = (\"\\n\\n\" if po.more_newlines else \"\\n\").join(parts)\n return content", "def dump(self) :\n st = \"%s=%s, valid=%d, found=%d, type=%s stringValue=%s\" \\\n %(self.name_, str(self.value_), self.valid_, self.found_, \\\n self.type_, self.stringValue_)\n print st", "def __str__(self):\n return \"{}\\n{}\\n{}\\n{}\".format(self.header,self.sequence,self.line3,self.quality)", "def __str__(self):\n out = self.san\n if self.comment != \"\":\n out += \" {\" + self.comment.replace('\\n', ' ') + \"}\"\n if len(self.nags) > 0:\n for n in self.nags:\n out += \" \" + n\n for v in self.variations:\n out += \" (\" + str(v).strip(' ') + \")\"\n return out", "def printResults(items, rules):\n\t\n for item, support in items: \n print \"item: %s , %.3f\" % (str(item), support)\n print '-----------------------------------------'\n for r, confi in rules:\n \tpre, after = r\n \tprint \"Rule: %s ==> %s , %.3f\" % (str(pre), str(after), confi)", "def __str__(self) -> str:\n return 'Node({})'.format(self.yaml_node)", "def __str__(self):\n astr = '[\\n name: [ ' + self.name + ' ]\\n'\n astr += ' variables: [ '\n for var, init in self.variables:\n astr += '(' + var + ' := ' + init + '), '\n astr = astr[:-2] + ' ]\\n assumptions: [ '\n for assumption in self.assumptions:\n astr += assumption + ', '\n astr = astr[:-2] + ' ]\\n guarantees: [ '\n for guarantee in self.guarantees:\n astr += guarantee + ', '\n return astr[:-2] + ' ]\\n]'", "def toString(self):\n\t\ts = \"A %s titled '%s':\\n\\n\" % (self.getSpecString(), self.getName())\n\t\ts += \"It's summary reads: %s\\n\\n\" % (self.getDescription())\n\t\ts += \"~~\\n%s\\n~~\" % (self.getAllItemsStr())\n\t\treturn s", "def print_production(self, transition_index, value):\r\n\r\n transition = self.rules[transition_index]\r\n str_list = [self.rule_to_str(t) for t in transition]\r\n print(\" (%04d, %10s) {%s}\" % (transition_index, toRuleString[value], \" \".join(str_list)))", "def __str__(self, output=[]):\n\n class_str = 'Analytical Phonon simulation properties:\\n\\n'\n class_str += super().__str__()\n\n return class_str", "def __repr__(self) -> str:\n return f\"<TestResult {self.test_id},{self.regression_test_id}: {self.exit_code} \" \\\n f\"(expected {self.expected_rc} in {self.runtime} ms>\"", "def __str__(self):\n s = 'hit '+str(self.hit)+'\\n'\n s+= 'states '+str(self.states)+'\\n'\n s+= 'chi2 '+str(self.chi2)\n return s", "def rule_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule_name\")", "def __str__(self):\r\n print '%s' % self.name,' %12d' % self.estart,' %12d' % self.eend,' %s' % self.sta, ' %s' % self.chan, ' %s' % self.filepattern", "def __str__(self):\n output = \"Solution for \" + self.vrpdata.InstanceName + \":\\n\"\n output += \"Total distance: \" + str(round(self.objective, 2)) + \"\\n\"\n output += \"Solution valid: \" + str(self.solutionValid) + \"\\n\\n\"\n count = 1 # count routes\n for r in self.routes:\n output += \"Route #\" + str(count) + \"\\n\" + str(r) + \"\\n\" + str(round(r.distance, 2)) + \"\\n\" + str(r.quantity) + \"\\n\"\n count += 1\n return output", "def rule_action(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"rule_action\")", "def __str__(self):\n astr = ' variables:\\t[ '\n for var in self.variables:\n astr += str(var) + ', '\n astr = astr[:-2] + ' ]\\n assumptions :\\t[ '\n for assumption in self.assumptions.cnf:\n astr += assumption.formula + ', '\n astr = astr[:-2] + ' ]\\n guarantees :\\t[ '\n for guarantee in self.guarantees.cnf:\n astr += guarantee.formula + ', '\n # astr = astr[:-2] + ' ]\\n guarantees_unsat :\\t[ '\n # for guarantee in self.guarantees.cnf:\n # astr += guarantee.unsaturated + ', '\n return astr[:-2] + ' ]\\n'", "def __str__(self):\n\n print(\"\")\n s = \"NAME : \"+self._name+\"\\n\\n\"\n s += \"PARAMS :\"\n print(s)\n\n for key, val in self.params.items():\n l = (21-len(key))//7\n print(\"{0}\".format(key)+\"\\t\"*l+\":\\t{0}\".format(val))\n\n s = \"\\nRuns stored in DEFAULT_RUNS = \"+str(len(self.default_runs))\n print(s)\n\n s = \"\\nRuns stored in MOD_RUNS = \"+str(len(self.mod_runs))\n print(s)\n\n return \"\"", "def __str__(self):\n # Power/toughness, seen only if it's a creature\n pt = \"\"\n if \"power\" in self:\n pt = \"{0}/{1}\".format(self.power,\n self.toughness).replace(\"*\", \"\\*\")\n # Append loyalty to the end of oracle text if the creature is a\n # planeswalker\n if \"loyalty\" in self:\n self.oracle_text = \"{0}\\nStarting Loyalty: {1}\".format(\n self.oracle_text, self.loyalty)\n\n flavor = \"*{0}*\".format(\n self.flavor_text) if \"flavor_text\" in self else \"\"\n\n return \"**{0}** {1}\\n{2} {3}\\n{4}\\n{5}\\n\\n\".format(self.name,\n self.mana_cost,\n self.type_line,\n pt,\n self.oracle_text,\n flavor)", "def __str__(self):\n return ' '.join([self.source, self.name, str(self.outputs)])", "def __str__(self):\n buf = io.StringIO()\n args.output.write(buf, self.root, self.headings)\n return buf.getvalue()", "def __str__(self):\n items = ['({!r})'.format(item) for item in self.items()]\n return '[{}]'.format(' -> '.join(items))", "def __str__(self):\n outstr = [\"\\n<%s: %s>\" % (self.__class__, self.name)]\n outstr.append(\"%d graphs\" % len(self._graphs))\n outstr = \"\\n\".join(outstr)\n return outstr", "def print_problem(self):\n print('\\n*****************')\n print('PROBLEM: ' + self.problem)\n print('OBJECTS: ' + str(self.objects))\n print('INIT: ' + str(self.init))\n print('GOAL: ' + str(self.goal))\n print('AGENTS: ' + str(self.agents))\n print('****************')", "def printResults(items, rules):\n for item, support in sorted(items, key=lambda (item, support): support):\n print \"item: %s , %.3f\" % (str(item), support)\n\n print \"\\n------------------------ RULES:\"\n for rule, confidence, support in sorted(rules, key=lambda (rule, confidence, support): confidence):\n pre, post = rule\n print \"Rule: %s ==> %s , %.3f, %.3f\" % (str(pre), str(post), confidence, support)", "def __str__(self):\n return 'Observables Test Case: {0}'.format(self.title)", "def __str__(self):\n analysis = []\n for analyze in self.analysis:\n if self.analysis[analyze] is not None:\n analysis.append(self.analysis[analyze])\n return \"Analises: {} \\n\".format(analysis)", "def summary(self):\n return \"{0:}: {1:} -> {2:}\".format(self.name, self.var, self.out)", "def print_solution(manager, routing, assignment):\n print('Objective: {}'.format(assignment.ObjectiveValue()))\n index = routing.Start(0)\n plan_output = 'Route:\\n'\n route_distance = 0\n while not routing.IsEnd(index):\n plan_output += ' {} ->'.format(index)\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n plan_output += ' {}\\n'.format(manager.IndexToNode(index))\n print(plan_output)\n plan_output += 'Objective: {}m\\n'.format(route_distance)", "def __str__(self) -> str:\n st = \"\\tmat = \" + self.mat\n st += \"\\n\\trotation = \" + str(self.ham_rot) + '\\n'\n pl_str = ['(' + p.join(' ') + ')' for p in self.planes]\n st += '\\tplane: ' + \", \".join(pl_str) + '\\n'\n return st", "def __str__(self):\n return self.piece_behavior.summary", "def __str__(self):\n summary = '{} object in the {} language, consisting of {} tokens.'\n return summary.format(\n type(self).__name__, self.language,\n len(self.hand_tagged)\n )", "def __repr__(self):\n options_str = \", \".join(\n [\n f\"validate={self._validate}\",\n f\"outcome={self._outcome}\",\n f\"alpha_prior={self._alpha_prior}\",\n ]\n )\n return f\"{self.__class__.__name__}({options_str})\"", "def print_solution(manager, routing, assignment):\n print('Objective: {}'.format(assignment.ObjectiveValue()))\n index = routing.Start(0)\n plan_output = 'Route:\\n'\n route_distance = 0\n while not routing.IsEnd(index):\n plan_output += ' {} ->'.format(manager.IndexToNode(index))\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n plan_output += ' {}\\n'.format(manager.IndexToNode(index))\n print(plan_output)\n plan_output += 'Objective: {}m\\n'.format(route_distance)" ]
[ "0.7497269", "0.74336165", "0.73365", "0.7002294", "0.6986714", "0.6474687", "0.6371976", "0.63269794", "0.62451273", "0.620827", "0.6196005", "0.6190379", "0.6182506", "0.6179493", "0.6122271", "0.60830194", "0.60090804", "0.5991629", "0.59799564", "0.5888777", "0.5873028", "0.58716637", "0.58665186", "0.5865816", "0.58422565", "0.5788415", "0.5774052", "0.5752707", "0.5749632", "0.57353145", "0.5691922", "0.5691922", "0.5691922", "0.56873363", "0.56792164", "0.56788266", "0.5677975", "0.5672413", "0.5666912", "0.56638485", "0.5637679", "0.55964655", "0.55762386", "0.55600023", "0.5555105", "0.5546736", "0.55349284", "0.5532956", "0.55322", "0.55205745", "0.5505554", "0.55020064", "0.55020064", "0.5496282", "0.54914224", "0.5489084", "0.5475136", "0.54667807", "0.54625654", "0.54490435", "0.5448817", "0.5445562", "0.5440421", "0.543527", "0.54282886", "0.54227257", "0.54223645", "0.5413062", "0.5411269", "0.5405241", "0.53995895", "0.5392017", "0.53880715", "0.53867364", "0.5386011", "0.53831756", "0.53767973", "0.5374547", "0.53669244", "0.5366146", "0.5356512", "0.53532284", "0.53518665", "0.53449404", "0.5341957", "0.53322935", "0.5324486", "0.53225446", "0.531363", "0.53115463", "0.53081834", "0.5305274", "0.5304831", "0.5297624", "0.52929485", "0.5283715", "0.5279262", "0.5276693", "0.527472", "0.52728415" ]
0.7577809
0
Creates a new SnakemakeRule instance from a dict representation
def __init__( self, rule_id, parent_id, input, output, inline=True, local=False, template=None, **kwargs ): super().__init__(rule_id, parent_id, input, output, local, template, **kwargs) self.inline = inline self.groupped = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_dict(cls, dictionary: Dict[str, Any]):\n return cls(**dictionary)", "def from_dict(cls, data: Dict[str, any]):\n return cls(**data)", "def from_dict(self, d):\r\n options = dict(d)\r\n task_id = options['task_id']\r\n del options['task_id']\r\n return SubtaskStatus.create(task_id, **options)", "def from_dict(cls, data):\n return cls(**data)", "def from_dict(cls, dikt) -> 'PipelineDefinition':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n alias = dictionary.get(\"alias\")\r\n cnam_lookups_enabled = dictionary.get(\"cnam_lookups_enabled\")\r\n number_type = dictionary.get(\"number_type\")\r\n rate_center = dictionary.get(\"rate_center\")\r\n state = dictionary.get(\"state\")\r\n value = dictionary.get(\"value\")\r\n\r\n # Return an object of this model\r\n return cls(alias,\r\n cnam_lookups_enabled,\r\n number_type,\r\n rate_center,\r\n state,\r\n value)", "def create(cls, dictionary):\n return cls(**dictionary)", "def create(cls, dictionary):\n return cls(**dictionary)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def from_dict(cls, d: dict):\n scen = None\n try:\n scen = DTScenario(d['name'])\n for t in d['tasks']:\n scen.addTask(dtTaskTypeDict['cls'][t['class']], t['parameters'])\n except KeyError:\n scen = None\n raise DTInternalError('DTScenario.fromDict()', 'Wrong dict format')\n return scen", "def from_dict(cls, dictionary):\n normalised = cls.normalise_dict(dictionary)\n return cls.from_normalised_dict(normalised)", "def from_dict(eventScheduleDict):\n pass", "def from_dict(cls, d):\n return cls(**d)", "def from_dict(cls, dikt) -> 'ShardingDescriptor':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, d):\n hostname = d.get('hostname')\n project = d.get('project')\n treeish = d.get('treeish')\n path = d.get('path')\n _validate_args(\n hostname,\n project,\n treeish,\n path,\n path_required=True)\n return cls(hostname, project, treeish, path)", "def from_dict(cls, dct):\n if dct.pop('type') != cls.__name__:\n fmt = 'Can not construct Note from dict %s'\n raise ValueError(fmt % dct)\n\n return cls(**dct)", "def from_dict(cls, dikt) -> 'ProductionFlowItem':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n links = dictionary.get('links')\r\n email_config = dictionary.get('emailConfig')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(links,\r\n email_config,\r\n dictionary)", "def from_dict(cls, inp):\n return cls(**{k: v for k, v in inp.items() if k != '__class__'})", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n continue_on_error = dictionary.get('continueOnError')\n is_active = dictionary.get('isActive')\n script_params = dictionary.get('scriptParams')\n script_path = dictionary.get('scriptPath')\n timeout_secs = dictionary.get('timeoutSecs')\n\n # Return an object of this model\n return cls(\n continue_on_error,\n is_active,\n script_params,\n script_path,\n timeout_secs\n)", "def _from_normalised_dict(cls, dictionary):\n if 'cvarsort' in dictionary and dictionary['cvarsort'] != cls.cvarsort:\n raise PydmrsValueError('{} must have cvarsort {}, not {}'.format(cls.__name__,\n cls.cvarsort,\n dictionary['cvarsort']))\n return cls(**{key:value for key, value in dictionary.items() if key != 'cvarsort'})", "def _from_string(cls, serialized):\r\n parse = cls.URL_RE.match(serialized)\r\n if not parse:\r\n raise InvalidKeyError(cls, serialized)\r\n\r\n parse = parse.groupdict()\r\n if parse['definition_id']:\r\n parse['definition_id'] = cls.as_object_id(parse['definition_id'])\r\n\r\n return cls(**{key: parse.get(key) for key in cls.KEY_FIELDS})", "def from_dict(cls, dikt) -> 'AssetPropertyValueHistoryRequest':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, d):\n assert \"status\" in d\n assert \"metadata\" in d\n return cls(**d)", "def from_dict(cls, dikt) -> 'Story':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'Problem':\n return deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'Failure':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'StartConfiguration':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n name = dictionary.get('name')\r\n lan_ip = dictionary.get('lanIp')\r\n uplink = dictionary.get('uplink')\r\n public_port = dictionary.get('publicPort')\r\n local_port = dictionary.get('localPort')\r\n allowed_ips = dictionary.get('allowedIps')\r\n protocol = dictionary.get('protocol')\r\n\r\n # Return an object of this model\r\n return cls(name,\r\n lan_ip,\r\n uplink,\r\n public_port,\r\n local_port,\r\n allowed_ips,\r\n protocol)", "def from_dict(cls, dikt) -> 'Task':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, data):\n requirements = {k: v for k, v in data['params']['requirements'].items()}\n return cls(requirements)", "def from_dict(cls, dct):\n return cls(**dct)", "def from_dict(cls, dikt) -> 'SourceAudit':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'POSTExecution':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n address = dictionary.get('address')\n port = dictionary.get('port')\n protocol = dictionary.get('protocol')\n is_cluster_auditing_enabled = dictionary.get('isClusterAuditingEnabled')\n is_data_protection_enabled = dictionary.get('isDataProtectionEnabled')\n is_filer_auditing_enabled = dictionary.get('isFilerAuditingEnabled')\n is_ssh_log_enabled = dictionary.get('isSshLogEnabled')\n name = dictionary.get('name')\n\n # Return an object of this model\n return cls(address,\n port,\n protocol,\n is_cluster_auditing_enabled,\n is_data_protection_enabled,\n is_filer_auditing_enabled,\n is_ssh_log_enabled,\n name)", "def create_snat_rule(self, **attrs):\n return self._create(_snat.Rule, **attrs)", "def from_dict(cls, d):\n return cls(d[\"sequence\"], immutable_bounds=d[\"immutable_bounds\"])", "def from_dict(cls, dikt) -> 'OneOffSchedule':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dct):\n dct['address'] = Address(**dct['address'])\n return cls(**dct)", "def from_dict(cls, obj: dict) -> FormatTest:\n if obj is None:\n return None\n\n if not isinstance(obj, dict):\n return FormatTest.parse_obj(obj)\n\n _obj = FormatTest.parse_obj({\n \"integer\": obj.get(\"integer\"),\n \"int32\": obj.get(\"int32\"),\n \"int64\": obj.get(\"int64\"),\n \"number\": obj.get(\"number\"),\n \"float\": obj.get(\"float\"),\n \"double\": obj.get(\"double\"),\n \"decimal\": obj.get(\"decimal\"),\n \"string\": obj.get(\"string\"),\n \"string_with_double_quote_pattern\": obj.get(\"string_with_double_quote_pattern\"),\n \"byte\": obj.get(\"byte\"),\n \"binary\": obj.get(\"binary\"),\n \"var_date\": obj.get(\"date\"),\n \"date_time\": obj.get(\"dateTime\"),\n \"uuid\": obj.get(\"uuid\"),\n \"password\": obj.get(\"password\"),\n \"pattern_with_digits\": obj.get(\"pattern_with_digits\"),\n \"pattern_with_digits_and_delimiter\": obj.get(\"pattern_with_digits_and_delimiter\")\n })\n return _obj", "def from_dict(self, data: dict):\n if 'title' in data:\n self.title = data['title']\n if 'description' in data:\n self.description = data['description']\n if 'deadline' in data:\n self.deadline = parser.parse(data['deadline'])\n return", "def _from_dict(cls, d):\n confidence = d.get(\"confidence\", None)\n constant = d.get(\"constant\", False)\n tags = d.get(\"tags\", None)\n return cls(\n d[\"name\"],\n d[\"value\"],\n confidence=confidence,\n constant=constant,\n tags=tags,\n )", "def from_dict(cls, name, data):\n item = cls(name)\n\n item.description = data.get(\"description\", \"\")\n item.difficulty = data.get(\"difficulty\", 0)\n\n item.prerequisites = data.get(\"prerequisites\", {})\n item.prerequisites[\"items\"] = to_list(item.prerequisites.get(\"items\"))\n item.prerequisites[\"research\"] = to_list(item.prerequisites.get(\"research\"))\n item.prerequisites[\"triggers\"] = to_list(item.prerequisites.get(\"triggers\"))\n item.cost = data.get(\"cost\", {})\n item.strings = data.get(\"strings\", {})\n item.effects = data.get(\"effects\", {})\n for effect in (\n \"enable_commands\",\n \"enable_items\",\n \"enable_resources\",\n \"events\",\n \"triggers\",\n ):\n item.effects[effect] = to_list(item.effects.get(effect))\n return item", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n alias_name = dictionary.get('aliasName')\n client_subnet_whitelist = None\n if dictionary.get('clientSubnetWhitelist') != None:\n client_subnet_whitelist = list()\n for structure in dictionary.get('clientSubnetWhitelist'):\n client_subnet_whitelist.append(cohesity_management_sdk.models.cluster_config_proto_subnet.ClusterConfigProtoSubnet.from_dictionary(structure))\n smb_config = cohesity_management_sdk.models.alias_smb_config.AliasSmbConfig.from_dictionary(dictionary.get('smbConfig')) if dictionary.get('smbConfig') else None\n view_path = dictionary.get('viewPath')\n\n # Return an object of this model\n return cls(alias_name,\n client_subnet_whitelist,\n smb_config,\n view_path)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n name = dictionary.get('name')\r\n scheduling = meraki.models.scheduling_model.SchedulingModel.from_dictionary(dictionary.get('scheduling')) if dictionary.get('scheduling') else None\r\n bandwidth = meraki.models.bandwidth_model.BandwidthModel.from_dictionary(dictionary.get('bandwidth')) if dictionary.get('bandwidth') else None\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(name,\r\n scheduling,\r\n bandwidth,\r\n dictionary)", "def from_json(cls, s):\n\n d = json.loads(s, object_pairs_hook=OrderedDict)\n return cls.from_definition(d)", "def from_json(cls, s):\n\n d = json.loads(s, object_pairs_hook=OrderedDict)\n return cls.from_definition(d)", "def from_dict(cls, dict_object):\n\n return cls(**dict_object)", "def from_dict(cls, data):\n return cls(\n filter_id=data[\"Filter\"],\n name=data[\"Name\"],\n admin=data[\"Admin\"],\n action=data[\"Action\"],\n input_port=data[\"Input\"],\n output_port=data[\"Output\"],\n classifiers=data[\"Classifiers\"],\n packet_processing=data[\"Packet Processing\"],\n )", "def __init__(self, rulespath=None):\n if rulespath is None:\n rulespath = path.join(path.dirname(path.realpath(__file__)), 'pattern_sentence_filter.yaml')\n\n self.rulespath = rulespath\n self.rules = Rules(yaml.safe_load(open(rulespath)))", "def from_dict(cls, dikt) -> 'Expression':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'Sitemap':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n to = dictionary.get('to')\r\n application_id = dictionary.get('applicationId')\r\n expiration_time_in_minutes = dictionary.get('expirationTimeInMinutes')\r\n code = dictionary.get('code')\r\n scope = dictionary.get('scope')\r\n\r\n # Return an object of this model\r\n return cls(to,\r\n application_id,\r\n expiration_time_in_minutes,\r\n code,\r\n scope)", "def from_dict(cls, dikt) -> \"Scheduler\":\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, obj):\n cls._check_keys(obj)\n return cls(**obj)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n alternate_restore_base_directory = dictionary.get('alternateRestoreBaseDirectory')\n continue_on_error = dictionary.get('continueOnError')\n encryption_enabled = dictionary.get('encryptionEnabled')\n generate_ssh_keys = dictionary.get('generateSshKeys')\n override_originals = dictionary.get('overrideOriginals')\n preserve_acls = dictionary.get('preserveAcls')\n preserve_attributes = dictionary.get('preserveAttributes')\n preserve_timestamps = dictionary.get('preserveTimestamps')\n restore_entities = dictionary.get('restoreEntities')\n restore_to_original_paths = dictionary.get('restoreToOriginalPaths')\n save_success_files = dictionary.get('saveSuccessFiles')\n skip_estimation = dictionary.get('skipEstimation')\n\n # Return an object of this model\n return cls(\n alternate_restore_base_directory,\n continue_on_error,\n encryption_enabled,\n generate_ssh_keys,\n override_originals,\n preserve_acls,\n preserve_attributes,\n preserve_timestamps,\n restore_entities,\n restore_to_original_paths,\n save_success_files,\n skip_estimation\n)", "def from_dict(cls, d):\n d = d.copy()\n if \"length\" in d:\n # length argument removed in version 1.1.0\n del d[\"length\"]\n return cls(**d)" ]
[ "0.61388683", "0.60924554", "0.603016", "0.60145026", "0.59718156", "0.5911789", "0.5897906", "0.5897906", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5892087", "0.5874733", "0.58446443", "0.581596", "0.5811093", "0.58043706", "0.57854325", "0.5779384", "0.57779425", "0.57670593", "0.57580984", "0.5754467", "0.5742527", "0.5736129", "0.571457", "0.5712241", "0.56904167", "0.5682176", "0.56777376", "0.56774867", "0.5674947", "0.56714714", "0.5665846", "0.5664586", "0.5628342", "0.5625868", "0.5611619", "0.5607494", "0.56069016", "0.56026757", "0.5602451", "0.5580156", "0.55767584", "0.5569615", "0.5568706", "0.5556874", "0.55524564", "0.5527888", "0.5527888", "0.5526569", "0.550578", "0.54912895", "0.5488643", "0.5488239", "0.5487369", "0.54828584", "0.5480193", "0.54733217", "0.54717916" ]
0.0
-1
Prints a string representation of SnakemakeRule instance
def __repr__(self): template = """ - inline : {} """ return super().__repr__() + template.format(self.inline)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __repr__(self):\n template = \"\"\"\n SnakemakeRule ({})\n \n - parent_id : {}\n - input : {}\n - output : {}\n - local : {}\n - template : {}\n - params : {}\n \"\"\"\n return template.format(\n self.rule_id,\n self.parent_id,\n self.input,\n self.output,\n self.local,\n self.template,\n self.params,\n )", "def __str__(self):\n return \"[ %s ]\" % str(self.__rule)", "def __str__(self):\n return \"{ %s }\" % str(self.__rule)", "def __str__(self):\n return \"{ %s }1\" % str(self.__rule)", "def print_rules(self):\n for idx, r in enumerate(self.rules):\n print(idx, \"=>\", r.__repr__())", "def __str__ (self) :\n\t\ttext_rule = \"\"\n\t\t\n\t\tfor key, rules in self.production_rules.items() :\n\t\t\ttext_rule += \"\\nRULE \" + key + \" = [\\n\\t\"\n\t\t\trule_in_a_line = []\n\t\t\tfor rule in rules :\n\t\t\t\t#rule_in_a_line.append(\" + \".join([r.val+\"(\"+r.type+\")\" for r in rule]))\n\t\t\t\trule_in_a_line.append(\" + \".join([r.__str__() for r in rule]))\n\t\t\ttext_rule += \"\\n\\t\".join(rule_in_a_line) + \"\\n]\"\n\t\ttext_rule += \"\\n\\n\"\n\t\t\n\t\ttext_rule += \"LABELS = \" + json.dumps (self.labels, indent=2) + '\\n\\n'\n\n\t\ttext_rule += \"STRUCT = [\\n{}\\n]\\n\\n\".format(\n\t\t\t\"\".join([\n\t\t\t\t\"\\t{} : {{\\n\\t\\t{}\\n\\t}}\\n\".format (\n\t\t\t\t\tkey, \", \\n\\t\\t\".join(val)\n\t\t\t\t) for key, val in self.keeper.items()\n\t\t\t])\n\t\t)\n\t\ttext_rule += \"STRNODE = [\\n{}\\n]\\n\\n\".format(\n\t\t\t\"\".join(self.strnodes)\n\t\t)\n\t\tfor regex, label in self.tokens :\n\t\t\ttext_rule += \"TOKEN \" + label + \" = regex('\" + regex + \"')\\n\"\n\n\t\treturn text_rule", "def view_rule(self, rule_name):\n\n assert rule_name in self.rules.keys(), 'Rule name not in current set of rules'\n print(self.rule_source[rule_name])", "def __str__(self):\n\n ret = ''\n for rule in self.rules:\n ret += str(rule) + '\\n'\n ret += 'IF TRUE THEN {0}'.format(self.default)\n\n return ret", "def get_formatted_rule(rule=None):\r\n rule = rule or {}\r\n return ('action: %s\\n'\r\n 'protocol: %s\\n'\r\n 'source_ip_address: %s\\n'\r\n 'source_ip_subnet_mask: %s\\n'\r\n 'destination_ip_address: %s\\n'\r\n 'destination_ip_subnet_mask: %s\\n'\r\n 'destination_port_range_start: %s\\n'\r\n 'destination_port_range_end: %s\\n'\r\n 'version: %s\\n'\r\n % (rule.get('action', 'permit'),\r\n rule.get('protocol', 'tcp'),\r\n rule.get('sourceIpAddress', 'any'),\r\n rule.get('sourceIpSubnetMask', '255.255.255.255'),\r\n rule.get('destinationIpAddress', 'any'),\r\n rule.get('destinationIpSubnetMask', '255.255.255.255'),\r\n rule.get('destinationPortRangeStart', 1),\r\n rule.get('destinationPortRangeEnd', 1),\r\n rule.get('version', 4)))", "def __str__(self):\n return \"(%s)\" % ' '.join(map(str, self.__subrules))", "def fmt_rule(rule: Callable, *, gets: Optional[List[Tuple[str, str]]] = None) -> str:\n type_hints = get_type_hints(rule)\n product = type_hints.pop(\"return\").__name__\n params = \", \".join(t.__name__ for t in type_hints.values())\n gets_str = \"\"\n if gets:\n get_members = \", \".join(\n f\"Get[{product_subject_pair[0]}]({product_subject_pair[1]})\"\n for product_subject_pair in gets\n )\n gets_str = f\", gets=[{get_members}]\"\n return f\"@rule({fmt_rust_function(rule)}({params}) -> {product}{gets_str})\"", "def rule_to_str(self, t):\r\n\r\n if(t[0] == TERMINAL):\r\n return self.terminal_to_str(t[1])\r\n else:\r\n return toRuleString[t[1]]", "def pretty_str(rule,print_option=PrintOption()):\n if rule.is_terminal() or rule.is_empty():\n content = str(rule)\n if print_option.bikeshed:\n return \"`{}`\".format(content)\n return content\n if rule.is_symbol_name():\n name = rule.content\n def with_meta(phrase,metachar,print_option):\n content = \" \".join([x.pretty_str(print_option) for x in phrase])\n if len(phrase) > 1:\n return \"( {} ){}\".format(content, metachar)\n return \"{} {}\".format(content, metachar)\n if name in print_option.replace_with_starred:\n phrase = print_option.replace_with_starred[name]\n return with_meta(phrase,'*',print_option)\n if name in print_option.replace_with_optional:\n phrase = print_option.replace_with_optional[name]\n return with_meta(phrase,'?',print_option)\n if name in print_option.replace_with_nested:\n po = print_option.clone()\n po.multi_line_choice = False\n content = po.replace_with_nested[name].pretty_str(po)\n return \"( {} )\".format(content)\n if print_option.inline_synthetic and name.find(\"/\") >=0:\n po = print_option.clone()\n po.multi_line_choice = False\n content = po.grammar.rules[name].pretty_str(po)\n return \"( {} )\".format(content)\n\n # Print ourselves\n if print_option.bikeshed:\n context = 'recursive descent syntax'\n g = print_option.grammar\n if g.rules[name].is_token():\n context = 'syntax'\n if name in g.extra_externals:\n context = 'syntax_sym'\n if name == '_disambiguate_template':\n # This is an implementation detail, so make it invisible.\n return ''\n else:\n without_underscore = ['_less_than',\n '_less_than_equal',\n '_greater_than',\n '_greater_than_equal',\n '_shift_left',\n '_shift_left_assign',\n '_shift_right',\n '_shift_right_assign']\n if name in without_underscore:\n name = name[1:]\n return \"[={}/{}=]\".format(context,name)\n return name\n if isinstance(rule,Choice):\n parts = [i.pretty_str(print_option) for i in rule]\n if print_option.multi_line_choice:\n parts.sort()\n\n if print_option.multi_line_choice:\n if print_option.bikeshed:\n nl = \"\\n\\n\"\n prefixer = \"\\n | \"\n else:\n nl = \"\\n\"\n prefixer = \"\\n \"\n else:\n nl = \"\"\n prefixer = \"\"\n joiner = nl + \" | \"\n inside = prefixer + joiner.join([p for p in parts])\n if print_option.is_canonical:\n return inside\n else:\n # If it's not canonical, then it can have nesting.\n return \"(\" + inside + nl + \")\"\n if isinstance(rule,Seq):\n return \" \".join(filter(lambda i: len(i)>0, [i.pretty_str(print_option) for i in rule]))\n if isinstance(rule,Repeat1):\n return \"( \" + \"\".join([i.pretty_str(print_option) for i in rule]) + \" )+\"\n raise RuntimeError(\"unexpected node: {}\".format(str(rule)))", "def __str__(self):\n return \"(%s)\" % ' | '.join(map(str, self.__subrules))", "def rule_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"rule_name\")", "def test_rule_representation():\n rule = MethodRule(method=\"POST\")\n assert repr(rule) == \"MethodRule(method='POST')\", \"Wrong representation\"", "def __str__(self):\n if len(self.label) > 0:\n descr = [\"'%s', target='%s' [%s]\" % (self.label, self.target.name, self.target.body_type)]\n else:\n descr = [\"target='%s' [%s]\" % (self.target.name, self.target.body_type)]\n if self.baseline:\n descr[0] += ', initial baseline offset=%f' % (self.baseline.poly[-1],)\n if self.beam:\n descr[0] += ', beam height=%f' % (self.beam.height,)\n for scan_ind, scan in enumerate(self.scans):\n descr.append('%4d: %s' % (scan_ind, str(scan)))\n return '\\n'.join(descr)", "def __str__(self):\n s = \"\"\n s += self.synset.name + \"\\t\"\n s += \"PosScore: %s\\t\" % self.pos_score\n s += \"NegScore: %s\" % self.neg_score\n return s", "def __str__(self):\n s = 'Processor ' + __name__\n # if self._rule_files:\n # s += ' running with rules ' + ' '.join(self._rule_files.values())\n\n return s", "def get_text(self):\n return self.rule_id + '\\t' + self.rule_text", "def print_rules(self, input_file='rules.txt'):\n\n with open(input_file, 'r') as f_in:\n rules = f_in.readlines()\n for r in rules:\n print(r)", "def __print_rules(self, left=0):\n\n for line in self.__rules:\n print((\" \" * left) + line, end=\"\")", "def __str__(self):\n return \"MatchWhite(%s)\" % str(self.__rule)", "def rule(self) -> str:\n if self._rule:\n return self._rule\n return self._make_rule(member_param=self._member_param,\n unique_member_param=self._unique_member_param)", "def dumpSMRule(ruleInfos, outputFile, inputFile):\n if 'py' in ruleInfos:\n code = ruleInfos['py']\n if type(code) is str:\n outputFile.write(insertPlaceholders(code, inputFile))\n elif type(code) is list:\n [outputFile.write(insertPlaceholders(line, inputFile) + '\\n') for line in code]\n\n outputFile.write('rule ' + ruleInfos['rule'] + ':\\n')\n for field in SNAKEMAKE_FIELDS:\n if field in ruleInfos:\n outputFile.write(' ' + field + ': ' + str(ruleInfos[field]) + '\\n')", "def summary_string(self) -> str:\n return f\"dixonoid: {self.plain_rules}\"", "def __str__(self):\n\n return \"[\" + str(self.quick) + \"] \" + \\\n self.regexp.pattern + \" --> \" + \\\n str(self.handler)", "def __repr__(self):\n template = \"\"\"\n DataIntegrationRule ({})\n \n - inputs : {}\n - output : {}\n - local : {}\n - template : {}\n - params : {}\n \"\"\"\n\n return template.format(\n self.rule_id,\n self.inputs,\n self.output,\n self.local,\n self.template,\n self.params\n )", "def __str__( self ):\n assert isinstance( self.level, int )\n assert isinstance( self.prop, WFF )\n assert isinstance( self.justification, Inference )\n\n return \"Step( %d, %s, %s )\" % ( self.num, repr( self.prop ), repr( self.justification ) )", "def __str__(self):\n return \"Combine(%s)\" % str(self.__rule)", "def __str__(self):\n name_str = \"node name is %s\\n\" % self.__name\n label_str = \"labels are %s\\n\" % str(self.__labels)\n propety_str = \"properties are %s\\n\" % str(self.__props)\n return name_str + label_str + propety_str", "def rule(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule\")", "def format_rule_results(rule_result_set, print_=False):\n\tresult_string = \"\"\n\tfor rule in rule_result_set['rule_results']:\n\t\tif rule['passed']:\n\t\t\tresult_string = \",\".join((result_string, 'Passed: ', rule['id']))\n\t\telse:\n\t\t\tresult_string = \",\".join((result_string, 'Failed: '))\n\t\t\tfor msg in rule['fail_reasons']:\n\t\t\t\tresult_string = \",\".join((result_string, msg))\n\tif print_:\n\t\tprint(result_string)\n\treturn result_string", "def __str__(self):\n runner = self.__head\n if runner is None:\n return \"\"\n while runner.next_node:\n if runner is not None:\n print(\"{}\".format(runner.data))\n runner = runner.next_node\n return \"{}\".format(runner.data)", "def render_v1(rule):\n return \" || \".join([str(rule.sid), rule.msg] + rule.references)", "def __repr__(self):\r\n s = 'Player ' + str(self.checker)\r\n v = ' ('+ self.tiebreak+', '+str(self.lookahead)+')'\r\n s += v\r\n return s", "def __str__(self):\n return \"{}\\n\\n{}\".format(self.puzzle,\n \"\\n\".join([str(x) for x in self.children]))", "def __repr__(self):\r\n s = 'Player ' + self.checker + ' (' + self.tiebreak + ', ' + str(self.lookahead) + ')'\r\n return s", "def __str__(self):\n s = 'word chain: ' + '\\n'\n for word in self._used_words[:-1]:\n s += word + ' -> '\n s += self._used_words[-1] + '\\ntarget word: ' + self._target\n return s", "def test_rule(self):\n\n x = t.Rule(\"foo\", t.Exactly(\"x\"))\n self.assertEqual(writePython(x),\n dd(\"\"\"\n def rule_foo(self):\n _locals = {'self': self}\n self.locals['foo'] = _locals\n _G_exactly_1, lastError = self.exactly('x')\n self.considerError(lastError, 'foo')\n return (_G_exactly_1, self.currentError)\n \"\"\"))", "def rule_str(C: List, fmt: str = \"%.3f\") -> str:\n s = \" \" + \"\\n∨ \".join([\"(%s)\" % (\" ∧ \".join([fatom(a[0], a[1], a[2], fmt=fmt) for a in c])) for c in C])\n return s", "def __str__(self):\n s = self.prev_error.failures + '\\n' if self.prev_error else ''\n\n s += '%s' % self.message\n if self.args[1:]:\n s += ' %s' % str(self.args[1:])\n\n for task in self.tasktrace:\n s += '\\n in %s %s' % (task.task.__name__, task.name)\n return s", "def __str__(self):\r\n\t\toutStr = \"\"\r\n\t\toutStr += \"Heuristic Level: \" + str(self.heuristic)\r\n\t\toutStr += \"\\n-\" + \"-----\"*self.n\r\n\t\tfor row in self.board:\r\n\t\t\ttempStr = (\"\\n|\" + \" %2d |\" * self.n)\r\n\t\t\toutStr += tempStr % tuple(row)\r\n\t\t\toutStr += \"\\n-\" + \"-----\"*self.n\r\n\r\n\t\treturn outStr", "def __str__(self):\n _str = \"Variables:\\n\"\n for variable in self.variables:\n _str += \" {}\\n\".format(str(variable))\n _str += \"\\nConstraints:\\n\"\n for constraint in self.constraints:\n _str += \" {}\\n\".format(str(constraint))\n return _str", "def test_get_rule_details(self):\n pass", "def __str__(self):\n tapeline = self.tape.format(\n self.index - 10, self.index + 11) + ' : state {}'.format(self.state)\n pointline = ' ' * 10 + '^' + ' ' * 11 + \\\n ' : index {}'.format(self.index)\n\n return tapeline + '\\n' + pointline", "def test_rule(cls, rule, args, kwargs, expected, caplog):\n qalgebra.core.abstract_algebra.LOG = True\n qalgebra.core.algebraic_properties.LOG = True\n log_marker = \"Rule %s.%s\" % (cls.__name__, rule)\n print(\"\\n\", log_marker)\n with caplog.at_level(logging.DEBUG):\n with no_instance_caching():\n expr = cls.create(*args, **kwargs)\n assert expr == expected\n assert log_marker in caplog.text", "def __repr__(self):\r\n c = \"Player \" + self.checker + \" (\" + self.tiebreak + \", \" + str(self.lookahead) + \")\"\r\n return c", "def build_rule(rule, attributes):\n\t\n\tlines = [rule, \"{\"]\n\tfor attr in attributes:\n\t\tlines.append(\"\t%s\" % attr)\n\tlines.append(\"}\\n\")\n\n\treturn \"\\n\".join(lines)", "def dump(self):\n dump_grammar(self.rules)\n print(self.registry)", "def rule_name(self) -> str:\n return pulumi.get(self, \"rule_name\")", "def rule_name(self) -> str:\n return pulumi.get(self, \"rule_name\")", "def render_v2(rule):\n return \" || \".join([\n str(rule.gid),\n str(rule.sid),\n str(rule.rev),\n \"NOCLASS\" if rule.classtype is None else rule.classtype,\n str(rule.priority),\n rule.msg] + rule.references)", "def as_rule(self):\n return ((u'%s = %s' % (self.name, self._as_rhs())) if self.name else\n self._as_rhs())", "def createFisEntry(self, rule):\n\n\t\tline = \"\"\n\t\tfor i, ant in enumerate(rule.antecedent):\n\t\t\tfor j, mf in enumerate(self.inputs[i].mfs):\n\t\t\t\tif (mf.name == ant):\n\t\t\t\t\tline = line + str(j+1) + \" \"\n\t\tline = line[:-1] + \", \"\n\t\tfor i, con in enumerate([rule.consequent]):\n\t\t\tfor j, mf in enumerate(self.outputs[i].mfs):\n\t\t\t\tif (mf.name == con):\n\t\t\t\t\tline = line + str(j+1)\n\n\t\tline = line + \" (1) : 1\\n\"\n\t\treturn line", "def __str__(self):\n return_string = self.name + \"\\n\" + str(self.traits)\n\n return return_string", "def __str__(self):\n s = ''\n for node in self.nodes:\n s += '\\n\\n'+str(node)+'\\n\\t'\n edges = node.getChildren()\n keys = edges.keys()\n keys.sort()\n for key in keys:\n bounds = edges[key].getSuffix()\n s += str(edges[key])+' '\n for i in xrange(bounds[0], bounds[1]):\n s += self.target[i]\n s += '\\n\\t'\n return s", "def __str__(self):\n # newline-delimited values of all the attributes\n return \">%s\\n%s\" % (self.Label, self.Sequence)", "def __str__(self):\n return \"\\n\\n\".join(self.failures)", "def show_rule(bot, trigger):\n if not trigger.group(2):\n bot.say('.rule <id> - The Internet has no rules. No Exceptions.')\n return\n\n rule = trigger.group(2).strip()\n\n if not trigger.sender.is_nick() and bot.privileges[trigger.sender][bot.nick] >= HALFOP:\n if rule == \"WAMM\" or rule == \"asie\":\n bot.write(['KICK', trigger.sender, trigger.nick], text='The kick is the rule')\n return\n\n if rule.strip() in rules:\n bot.say('Rule {0}: \"{1}\"'.format(rule, rules[rule]))\n else:\n bot.say('Rule {0} is a lie.'.format(rule))", "def __str__(self):\n base_message = self.base_message.format(filename=self.yaml_file_path)\n error_message = ERROR_MESSAGE.format(key=self.key, expected=self.expected)\n return base_message + error_message", "def generate_workflow(self) -> str:\n analysisTasks = self._parse_parameters()\n terminalTasks = self._identify_terminal_tasks(analysisTasks)\n\n ruleList = {k: SnakemakeRule(v, self._pythonPath)\n for k, v in analysisTasks.items()}\n\n workflowString = 'rule all: \\n\\tinput: ' + \\\n ','.join([ruleList[x].full_output()\n for x in terminalTasks]) + '\\n\\n'\n workflowString += '\\n'.join([x.as_string() for x in ruleList.values()])\n\n return self._dataSet.save_workflow(workflowString)", "def __str__(self):\n debug_str = \"%s ::=\" % str(self.head)\n for symbol in self.body:\n debug_str += \" %s\" % str(symbol)\n return debug_str", "def __str__(self) -> str:\n st = \"<Output> \"\n if self.inst_out:\n st += f'instance:{self.inst_out};'\n st += f'''{self.output} -> {self.target or '\"\"'} -> '''\n if self.inst_in:\n st += f\"instance:{self.inst_in};\"\n st += self.input\n\n if self.params and not self.inst_in:\n st += f\" ({self.params})\"\n if self.delay != 0:\n st += f\" after {self.delay} seconds\"\n if self.times != -1:\n st += \" (once only)\" if self.times == 1 else f\" ({self.times!s} times only)\"\n return st", "def format_with_lineno(self) -> str:\n s = f\"<Rule '{self!s}'\"\n if self.csv_line_number is not None:\n s += f\" from line {self.csv_line_number}\"\n s += \">\"\n return s", "def pretty_str(self,print_option=PrintOption()):\n\n po = print_option.clone()\n po.is_canonical = self.is_canonical\n po.grammar = self\n\n token_rules = set()\n\n # Look for defined rules that look better as absorbed into their uses.\n for name, rule in self.rules.items():\n # Star-able is also optional-able, so starrable must come first.\n starred_phrase = rule.as_starred(name)\n if starred_phrase is not None:\n po.replace_with_starred[name] = starred_phrase\n continue\n optional_phrase = rule.as_optional()\n if optional_phrase is not None:\n po.replace_with_optional[name] = optional_phrase\n continue\n options = rule.as_container()\n if len(options)==1:\n phrase = options[0].as_container()\n if len(phrase)==1 and phrase[0].is_token():\n token_rules.add(name)\n\n # A rule that was generated to satisfy canonicalization is better\n # presented as absorbed in its original parent.\n for name, rule in self.rules.items():\n # We only care about rules generated during canonicalization\n if name.find('.') > 0 or name.find('/') > 0:\n options = rule.as_container()\n if len(options) != 2:\n continue\n if any([len(x.as_container())!=1 for x in options]):\n continue\n if any([(not x.as_container()[0].is_symbol_name()) for x in options]):\n continue\n # Rule looks like A -> X | Y\n po.replace_with_nested[name] = rule\n\n parts = []\n for key in sorted(self.rules):\n if key == LANGUAGE:\n # This is synthetic, for analysis\n continue\n rule_content = self.rules[key].pretty_str(po)\n if key in po.replace_with_optional:\n continue\n if key in po.replace_with_starred:\n continue\n if key in po.replace_with_nested:\n continue\n if (not po.print_terminals) and (key in token_rules):\n continue\n space = \"\" if po.multi_line_choice else \" \"\n if po.bikeshed:\n key_content = \" <dfn for='recursive descent syntax'>{}</dfn>\".format(key)\n content = \"<div class='syntax' noexport='true'>\\n{}:\\n{}\\n</div>\".format(key_content,rule_content)\n else:\n content = \"{}:{}{}\".format(key,space,rule_content)\n parts.append(content)\n content = (\"\\n\\n\" if po.more_newlines else \"\\n\").join(parts)\n return content", "def dump(self) :\n st = \"%s=%s, valid=%d, found=%d, type=%s stringValue=%s\" \\\n %(self.name_, str(self.value_), self.valid_, self.found_, \\\n self.type_, self.stringValue_)\n print st", "def __str__(self):\n return \"{}\\n{}\\n{}\\n{}\".format(self.header,self.sequence,self.line3,self.quality)", "def __str__(self):\n out = self.san\n if self.comment != \"\":\n out += \" {\" + self.comment.replace('\\n', ' ') + \"}\"\n if len(self.nags) > 0:\n for n in self.nags:\n out += \" \" + n\n for v in self.variations:\n out += \" (\" + str(v).strip(' ') + \")\"\n return out", "def printResults(items, rules):\n\t\n for item, support in items: \n print \"item: %s , %.3f\" % (str(item), support)\n print '-----------------------------------------'\n for r, confi in rules:\n \tpre, after = r\n \tprint \"Rule: %s ==> %s , %.3f\" % (str(pre), str(after), confi)", "def __str__(self) -> str:\n return 'Node({})'.format(self.yaml_node)", "def __str__(self):\n astr = '[\\n name: [ ' + self.name + ' ]\\n'\n astr += ' variables: [ '\n for var, init in self.variables:\n astr += '(' + var + ' := ' + init + '), '\n astr = astr[:-2] + ' ]\\n assumptions: [ '\n for assumption in self.assumptions:\n astr += assumption + ', '\n astr = astr[:-2] + ' ]\\n guarantees: [ '\n for guarantee in self.guarantees:\n astr += guarantee + ', '\n return astr[:-2] + ' ]\\n]'", "def toString(self):\n\t\ts = \"A %s titled '%s':\\n\\n\" % (self.getSpecString(), self.getName())\n\t\ts += \"It's summary reads: %s\\n\\n\" % (self.getDescription())\n\t\ts += \"~~\\n%s\\n~~\" % (self.getAllItemsStr())\n\t\treturn s", "def print_production(self, transition_index, value):\r\n\r\n transition = self.rules[transition_index]\r\n str_list = [self.rule_to_str(t) for t in transition]\r\n print(\" (%04d, %10s) {%s}\" % (transition_index, toRuleString[value], \" \".join(str_list)))", "def __str__(self, output=[]):\n\n class_str = 'Analytical Phonon simulation properties:\\n\\n'\n class_str += super().__str__()\n\n return class_str", "def __repr__(self) -> str:\n return f\"<TestResult {self.test_id},{self.regression_test_id}: {self.exit_code} \" \\\n f\"(expected {self.expected_rc} in {self.runtime} ms>\"", "def __str__(self):\n s = 'hit '+str(self.hit)+'\\n'\n s+= 'states '+str(self.states)+'\\n'\n s+= 'chi2 '+str(self.chi2)\n return s", "def rule_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule_name\")", "def __str__(self):\r\n print '%s' % self.name,' %12d' % self.estart,' %12d' % self.eend,' %s' % self.sta, ' %s' % self.chan, ' %s' % self.filepattern", "def __str__(self):\n output = \"Solution for \" + self.vrpdata.InstanceName + \":\\n\"\n output += \"Total distance: \" + str(round(self.objective, 2)) + \"\\n\"\n output += \"Solution valid: \" + str(self.solutionValid) + \"\\n\\n\"\n count = 1 # count routes\n for r in self.routes:\n output += \"Route #\" + str(count) + \"\\n\" + str(r) + \"\\n\" + str(round(r.distance, 2)) + \"\\n\" + str(r.quantity) + \"\\n\"\n count += 1\n return output", "def rule_action(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"rule_action\")", "def __str__(self):\n astr = ' variables:\\t[ '\n for var in self.variables:\n astr += str(var) + ', '\n astr = astr[:-2] + ' ]\\n assumptions :\\t[ '\n for assumption in self.assumptions.cnf:\n astr += assumption.formula + ', '\n astr = astr[:-2] + ' ]\\n guarantees :\\t[ '\n for guarantee in self.guarantees.cnf:\n astr += guarantee.formula + ', '\n # astr = astr[:-2] + ' ]\\n guarantees_unsat :\\t[ '\n # for guarantee in self.guarantees.cnf:\n # astr += guarantee.unsaturated + ', '\n return astr[:-2] + ' ]\\n'", "def __str__(self):\n\n print(\"\")\n s = \"NAME : \"+self._name+\"\\n\\n\"\n s += \"PARAMS :\"\n print(s)\n\n for key, val in self.params.items():\n l = (21-len(key))//7\n print(\"{0}\".format(key)+\"\\t\"*l+\":\\t{0}\".format(val))\n\n s = \"\\nRuns stored in DEFAULT_RUNS = \"+str(len(self.default_runs))\n print(s)\n\n s = \"\\nRuns stored in MOD_RUNS = \"+str(len(self.mod_runs))\n print(s)\n\n return \"\"", "def __str__(self):\n # Power/toughness, seen only if it's a creature\n pt = \"\"\n if \"power\" in self:\n pt = \"{0}/{1}\".format(self.power,\n self.toughness).replace(\"*\", \"\\*\")\n # Append loyalty to the end of oracle text if the creature is a\n # planeswalker\n if \"loyalty\" in self:\n self.oracle_text = \"{0}\\nStarting Loyalty: {1}\".format(\n self.oracle_text, self.loyalty)\n\n flavor = \"*{0}*\".format(\n self.flavor_text) if \"flavor_text\" in self else \"\"\n\n return \"**{0}** {1}\\n{2} {3}\\n{4}\\n{5}\\n\\n\".format(self.name,\n self.mana_cost,\n self.type_line,\n pt,\n self.oracle_text,\n flavor)", "def __str__(self):\n return ' '.join([self.source, self.name, str(self.outputs)])", "def __str__(self):\n buf = io.StringIO()\n args.output.write(buf, self.root, self.headings)\n return buf.getvalue()", "def __str__(self):\n items = ['({!r})'.format(item) for item in self.items()]\n return '[{}]'.format(' -> '.join(items))", "def __str__(self):\n outstr = [\"\\n<%s: %s>\" % (self.__class__, self.name)]\n outstr.append(\"%d graphs\" % len(self._graphs))\n outstr = \"\\n\".join(outstr)\n return outstr", "def print_problem(self):\n print('\\n*****************')\n print('PROBLEM: ' + self.problem)\n print('OBJECTS: ' + str(self.objects))\n print('INIT: ' + str(self.init))\n print('GOAL: ' + str(self.goal))\n print('AGENTS: ' + str(self.agents))\n print('****************')", "def printResults(items, rules):\n for item, support in sorted(items, key=lambda (item, support): support):\n print \"item: %s , %.3f\" % (str(item), support)\n\n print \"\\n------------------------ RULES:\"\n for rule, confidence, support in sorted(rules, key=lambda (rule, confidence, support): confidence):\n pre, post = rule\n print \"Rule: %s ==> %s , %.3f, %.3f\" % (str(pre), str(post), confidence, support)", "def __str__(self):\n return 'Observables Test Case: {0}'.format(self.title)", "def __str__(self):\n analysis = []\n for analyze in self.analysis:\n if self.analysis[analyze] is not None:\n analysis.append(self.analysis[analyze])\n return \"Analises: {} \\n\".format(analysis)", "def summary(self):\n return \"{0:}: {1:} -> {2:}\".format(self.name, self.var, self.out)", "def print_solution(manager, routing, assignment):\n print('Objective: {}'.format(assignment.ObjectiveValue()))\n index = routing.Start(0)\n plan_output = 'Route:\\n'\n route_distance = 0\n while not routing.IsEnd(index):\n plan_output += ' {} ->'.format(index)\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n plan_output += ' {}\\n'.format(manager.IndexToNode(index))\n print(plan_output)\n plan_output += 'Objective: {}m\\n'.format(route_distance)", "def __str__(self) -> str:\n st = \"\\tmat = \" + self.mat\n st += \"\\n\\trotation = \" + str(self.ham_rot) + '\\n'\n pl_str = ['(' + p.join(' ') + ')' for p in self.planes]\n st += '\\tplane: ' + \", \".join(pl_str) + '\\n'\n return st", "def __str__(self):\n return self.piece_behavior.summary", "def __str__(self):\n summary = '{} object in the {} language, consisting of {} tokens.'\n return summary.format(\n type(self).__name__, self.language,\n len(self.hand_tagged)\n )", "def __repr__(self):\n options_str = \", \".join(\n [\n f\"validate={self._validate}\",\n f\"outcome={self._outcome}\",\n f\"alpha_prior={self._alpha_prior}\",\n ]\n )\n return f\"{self.__class__.__name__}({options_str})\"", "def print_solution(manager, routing, assignment):\n print('Objective: {}'.format(assignment.ObjectiveValue()))\n index = routing.Start(0)\n plan_output = 'Route:\\n'\n route_distance = 0\n while not routing.IsEnd(index):\n plan_output += ' {} ->'.format(manager.IndexToNode(index))\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n plan_output += ' {}\\n'.format(manager.IndexToNode(index))\n print(plan_output)\n plan_output += 'Objective: {}m\\n'.format(route_distance)" ]
[ "0.7577809", "0.7497269", "0.74336165", "0.73365", "0.7002294", "0.6986714", "0.6474687", "0.6371976", "0.63269794", "0.62451273", "0.620827", "0.6196005", "0.6190379", "0.6182506", "0.6179493", "0.6122271", "0.60830194", "0.60090804", "0.5991629", "0.59799564", "0.5888777", "0.5873028", "0.58716637", "0.58665186", "0.5865816", "0.58422565", "0.5788415", "0.5774052", "0.5752707", "0.5749632", "0.57353145", "0.5691922", "0.5691922", "0.5691922", "0.56873363", "0.56792164", "0.56788266", "0.5677975", "0.5672413", "0.5666912", "0.56638485", "0.5637679", "0.55964655", "0.55762386", "0.55600023", "0.5555105", "0.5546736", "0.55349284", "0.5532956", "0.55322", "0.55205745", "0.5505554", "0.55020064", "0.55020064", "0.5496282", "0.54914224", "0.5489084", "0.5475136", "0.54667807", "0.54625654", "0.54490435", "0.5448817", "0.5445562", "0.5440421", "0.543527", "0.54282886", "0.54227257", "0.54223645", "0.5413062", "0.5411269", "0.5405241", "0.53995895", "0.5392017", "0.53880715", "0.53867364", "0.5386011", "0.53831756", "0.53767973", "0.5374547", "0.53669244", "0.5366146", "0.5356512", "0.53532284", "0.53518665", "0.53449404", "0.5341957", "0.53322935", "0.5324486", "0.53225446", "0.531363", "0.53115463", "0.53081834", "0.5305274", "0.5304831", "0.5297624", "0.52929485", "0.5283715", "0.5279262", "0.5276693", "0.527472", "0.52728415" ]
0.0
-1
Creates a new SnakemakeRule instance from a dict representation
def __init__(self, rule_id, input, output, local=False, template=None, **kwargs): super().__init__(rule_id, None, input, output, local, template, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_dict(cls, dictionary: Dict[str, Any]):\n return cls(**dictionary)", "def from_dict(cls, data: Dict[str, any]):\n return cls(**data)", "def from_dict(self, d):\r\n options = dict(d)\r\n task_id = options['task_id']\r\n del options['task_id']\r\n return SubtaskStatus.create(task_id, **options)", "def from_dict(cls, data):\n return cls(**data)", "def from_dict(cls, dikt) -> 'PipelineDefinition':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n alias = dictionary.get(\"alias\")\r\n cnam_lookups_enabled = dictionary.get(\"cnam_lookups_enabled\")\r\n number_type = dictionary.get(\"number_type\")\r\n rate_center = dictionary.get(\"rate_center\")\r\n state = dictionary.get(\"state\")\r\n value = dictionary.get(\"value\")\r\n\r\n # Return an object of this model\r\n return cls(alias,\r\n cnam_lookups_enabled,\r\n number_type,\r\n rate_center,\r\n state,\r\n value)", "def create(cls, dictionary):\n return cls(**dictionary)", "def create(cls, dictionary):\n return cls(**dictionary)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def from_dict(cls, d: dict):\n scen = None\n try:\n scen = DTScenario(d['name'])\n for t in d['tasks']:\n scen.addTask(dtTaskTypeDict['cls'][t['class']], t['parameters'])\n except KeyError:\n scen = None\n raise DTInternalError('DTScenario.fromDict()', 'Wrong dict format')\n return scen", "def from_dict(cls, dictionary):\n normalised = cls.normalise_dict(dictionary)\n return cls.from_normalised_dict(normalised)", "def from_dict(eventScheduleDict):\n pass", "def from_dict(cls, d):\n return cls(**d)", "def from_dict(cls, dikt) -> 'ShardingDescriptor':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, d):\n hostname = d.get('hostname')\n project = d.get('project')\n treeish = d.get('treeish')\n path = d.get('path')\n _validate_args(\n hostname,\n project,\n treeish,\n path,\n path_required=True)\n return cls(hostname, project, treeish, path)", "def from_dict(cls, dct):\n if dct.pop('type') != cls.__name__:\n fmt = 'Can not construct Note from dict %s'\n raise ValueError(fmt % dct)\n\n return cls(**dct)", "def from_dict(cls, dikt) -> 'ProductionFlowItem':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n links = dictionary.get('links')\r\n email_config = dictionary.get('emailConfig')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(links,\r\n email_config,\r\n dictionary)", "def from_dict(cls, inp):\n return cls(**{k: v for k, v in inp.items() if k != '__class__'})", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n continue_on_error = dictionary.get('continueOnError')\n is_active = dictionary.get('isActive')\n script_params = dictionary.get('scriptParams')\n script_path = dictionary.get('scriptPath')\n timeout_secs = dictionary.get('timeoutSecs')\n\n # Return an object of this model\n return cls(\n continue_on_error,\n is_active,\n script_params,\n script_path,\n timeout_secs\n)", "def _from_normalised_dict(cls, dictionary):\n if 'cvarsort' in dictionary and dictionary['cvarsort'] != cls.cvarsort:\n raise PydmrsValueError('{} must have cvarsort {}, not {}'.format(cls.__name__,\n cls.cvarsort,\n dictionary['cvarsort']))\n return cls(**{key:value for key, value in dictionary.items() if key != 'cvarsort'})", "def _from_string(cls, serialized):\r\n parse = cls.URL_RE.match(serialized)\r\n if not parse:\r\n raise InvalidKeyError(cls, serialized)\r\n\r\n parse = parse.groupdict()\r\n if parse['definition_id']:\r\n parse['definition_id'] = cls.as_object_id(parse['definition_id'])\r\n\r\n return cls(**{key: parse.get(key) for key in cls.KEY_FIELDS})", "def from_dict(cls, dikt) -> 'AssetPropertyValueHistoryRequest':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, d):\n assert \"status\" in d\n assert \"metadata\" in d\n return cls(**d)", "def from_dict(cls, dikt) -> 'Story':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'Problem':\n return deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'Failure':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'StartConfiguration':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n name = dictionary.get('name')\r\n lan_ip = dictionary.get('lanIp')\r\n uplink = dictionary.get('uplink')\r\n public_port = dictionary.get('publicPort')\r\n local_port = dictionary.get('localPort')\r\n allowed_ips = dictionary.get('allowedIps')\r\n protocol = dictionary.get('protocol')\r\n\r\n # Return an object of this model\r\n return cls(name,\r\n lan_ip,\r\n uplink,\r\n public_port,\r\n local_port,\r\n allowed_ips,\r\n protocol)", "def from_dict(cls, dikt) -> 'Task':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, data):\n requirements = {k: v for k, v in data['params']['requirements'].items()}\n return cls(requirements)", "def from_dict(cls, dct):\n return cls(**dct)", "def from_dict(cls, dikt) -> 'SourceAudit':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'POSTExecution':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n address = dictionary.get('address')\n port = dictionary.get('port')\n protocol = dictionary.get('protocol')\n is_cluster_auditing_enabled = dictionary.get('isClusterAuditingEnabled')\n is_data_protection_enabled = dictionary.get('isDataProtectionEnabled')\n is_filer_auditing_enabled = dictionary.get('isFilerAuditingEnabled')\n is_ssh_log_enabled = dictionary.get('isSshLogEnabled')\n name = dictionary.get('name')\n\n # Return an object of this model\n return cls(address,\n port,\n protocol,\n is_cluster_auditing_enabled,\n is_data_protection_enabled,\n is_filer_auditing_enabled,\n is_ssh_log_enabled,\n name)", "def create_snat_rule(self, **attrs):\n return self._create(_snat.Rule, **attrs)", "def from_dict(cls, d):\n return cls(d[\"sequence\"], immutable_bounds=d[\"immutable_bounds\"])", "def from_dict(cls, dct):\n dct['address'] = Address(**dct['address'])\n return cls(**dct)", "def from_dict(cls, dikt) -> 'OneOffSchedule':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, obj: dict) -> FormatTest:\n if obj is None:\n return None\n\n if not isinstance(obj, dict):\n return FormatTest.parse_obj(obj)\n\n _obj = FormatTest.parse_obj({\n \"integer\": obj.get(\"integer\"),\n \"int32\": obj.get(\"int32\"),\n \"int64\": obj.get(\"int64\"),\n \"number\": obj.get(\"number\"),\n \"float\": obj.get(\"float\"),\n \"double\": obj.get(\"double\"),\n \"decimal\": obj.get(\"decimal\"),\n \"string\": obj.get(\"string\"),\n \"string_with_double_quote_pattern\": obj.get(\"string_with_double_quote_pattern\"),\n \"byte\": obj.get(\"byte\"),\n \"binary\": obj.get(\"binary\"),\n \"var_date\": obj.get(\"date\"),\n \"date_time\": obj.get(\"dateTime\"),\n \"uuid\": obj.get(\"uuid\"),\n \"password\": obj.get(\"password\"),\n \"pattern_with_digits\": obj.get(\"pattern_with_digits\"),\n \"pattern_with_digits_and_delimiter\": obj.get(\"pattern_with_digits_and_delimiter\")\n })\n return _obj", "def from_dict(self, data: dict):\n if 'title' in data:\n self.title = data['title']\n if 'description' in data:\n self.description = data['description']\n if 'deadline' in data:\n self.deadline = parser.parse(data['deadline'])\n return", "def _from_dict(cls, d):\n confidence = d.get(\"confidence\", None)\n constant = d.get(\"constant\", False)\n tags = d.get(\"tags\", None)\n return cls(\n d[\"name\"],\n d[\"value\"],\n confidence=confidence,\n constant=constant,\n tags=tags,\n )", "def from_dict(cls, name, data):\n item = cls(name)\n\n item.description = data.get(\"description\", \"\")\n item.difficulty = data.get(\"difficulty\", 0)\n\n item.prerequisites = data.get(\"prerequisites\", {})\n item.prerequisites[\"items\"] = to_list(item.prerequisites.get(\"items\"))\n item.prerequisites[\"research\"] = to_list(item.prerequisites.get(\"research\"))\n item.prerequisites[\"triggers\"] = to_list(item.prerequisites.get(\"triggers\"))\n item.cost = data.get(\"cost\", {})\n item.strings = data.get(\"strings\", {})\n item.effects = data.get(\"effects\", {})\n for effect in (\n \"enable_commands\",\n \"enable_items\",\n \"enable_resources\",\n \"events\",\n \"triggers\",\n ):\n item.effects[effect] = to_list(item.effects.get(effect))\n return item", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n alias_name = dictionary.get('aliasName')\n client_subnet_whitelist = None\n if dictionary.get('clientSubnetWhitelist') != None:\n client_subnet_whitelist = list()\n for structure in dictionary.get('clientSubnetWhitelist'):\n client_subnet_whitelist.append(cohesity_management_sdk.models.cluster_config_proto_subnet.ClusterConfigProtoSubnet.from_dictionary(structure))\n smb_config = cohesity_management_sdk.models.alias_smb_config.AliasSmbConfig.from_dictionary(dictionary.get('smbConfig')) if dictionary.get('smbConfig') else None\n view_path = dictionary.get('viewPath')\n\n # Return an object of this model\n return cls(alias_name,\n client_subnet_whitelist,\n smb_config,\n view_path)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n name = dictionary.get('name')\r\n scheduling = meraki.models.scheduling_model.SchedulingModel.from_dictionary(dictionary.get('scheduling')) if dictionary.get('scheduling') else None\r\n bandwidth = meraki.models.bandwidth_model.BandwidthModel.from_dictionary(dictionary.get('bandwidth')) if dictionary.get('bandwidth') else None\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(name,\r\n scheduling,\r\n bandwidth,\r\n dictionary)", "def from_json(cls, s):\n\n d = json.loads(s, object_pairs_hook=OrderedDict)\n return cls.from_definition(d)", "def from_json(cls, s):\n\n d = json.loads(s, object_pairs_hook=OrderedDict)\n return cls.from_definition(d)", "def from_dict(cls, dict_object):\n\n return cls(**dict_object)", "def from_dict(cls, data):\n return cls(\n filter_id=data[\"Filter\"],\n name=data[\"Name\"],\n admin=data[\"Admin\"],\n action=data[\"Action\"],\n input_port=data[\"Input\"],\n output_port=data[\"Output\"],\n classifiers=data[\"Classifiers\"],\n packet_processing=data[\"Packet Processing\"],\n )", "def __init__(self, rulespath=None):\n if rulespath is None:\n rulespath = path.join(path.dirname(path.realpath(__file__)), 'pattern_sentence_filter.yaml')\n\n self.rulespath = rulespath\n self.rules = Rules(yaml.safe_load(open(rulespath)))", "def from_dict(cls, dikt) -> 'Expression':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'Sitemap':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n to = dictionary.get('to')\r\n application_id = dictionary.get('applicationId')\r\n expiration_time_in_minutes = dictionary.get('expirationTimeInMinutes')\r\n code = dictionary.get('code')\r\n scope = dictionary.get('scope')\r\n\r\n # Return an object of this model\r\n return cls(to,\r\n application_id,\r\n expiration_time_in_minutes,\r\n code,\r\n scope)", "def from_dict(cls, dikt) -> \"Scheduler\":\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, obj):\n cls._check_keys(obj)\n return cls(**obj)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n alternate_restore_base_directory = dictionary.get('alternateRestoreBaseDirectory')\n continue_on_error = dictionary.get('continueOnError')\n encryption_enabled = dictionary.get('encryptionEnabled')\n generate_ssh_keys = dictionary.get('generateSshKeys')\n override_originals = dictionary.get('overrideOriginals')\n preserve_acls = dictionary.get('preserveAcls')\n preserve_attributes = dictionary.get('preserveAttributes')\n preserve_timestamps = dictionary.get('preserveTimestamps')\n restore_entities = dictionary.get('restoreEntities')\n restore_to_original_paths = dictionary.get('restoreToOriginalPaths')\n save_success_files = dictionary.get('saveSuccessFiles')\n skip_estimation = dictionary.get('skipEstimation')\n\n # Return an object of this model\n return cls(\n alternate_restore_base_directory,\n continue_on_error,\n encryption_enabled,\n generate_ssh_keys,\n override_originals,\n preserve_acls,\n preserve_attributes,\n preserve_timestamps,\n restore_entities,\n restore_to_original_paths,\n save_success_files,\n skip_estimation\n)", "def from_dict(cls, d):\n d = d.copy()\n if \"length\" in d:\n # length argument removed in version 1.1.0\n del d[\"length\"]\n return cls(**d)" ]
[ "0.61375326", "0.60902405", "0.60286725", "0.60125345", "0.597105", "0.5911886", "0.5895749", "0.5895749", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5891623", "0.5874035", "0.58442324", "0.581522", "0.5809438", "0.5804045", "0.5784471", "0.57791626", "0.5776851", "0.5766595", "0.5756868", "0.57540387", "0.574283", "0.5734516", "0.57139975", "0.5710597", "0.56893325", "0.56814414", "0.5676567", "0.5675981", "0.56747025", "0.56696045", "0.5664665", "0.5663855", "0.56278855", "0.56240016", "0.5611198", "0.56104", "0.56060696", "0.5602196", "0.5601705", "0.55800784", "0.55754584", "0.5568526", "0.55666053", "0.5557283", "0.55520517", "0.5527106", "0.5527106", "0.5526382", "0.550478", "0.5492428", "0.5487735", "0.5487285", "0.5486449", "0.54820335", "0.54794806", "0.5473134", "0.5471022" ]
0.0
-1
Creates a new SnakemakeRule instance from a dict representation
def __init__(self, input, output, options, local=False): super().__init__( "create_training_set", None, input, output, local, "multi_training_set.snakefile", ) self.options = options
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_dict(cls, dictionary: Dict[str, Any]):\n return cls(**dictionary)", "def from_dict(cls, data: Dict[str, any]):\n return cls(**data)", "def from_dict(self, d):\r\n options = dict(d)\r\n task_id = options['task_id']\r\n del options['task_id']\r\n return SubtaskStatus.create(task_id, **options)", "def from_dict(cls, data):\n return cls(**data)", "def from_dict(cls, dikt) -> 'PipelineDefinition':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n alias = dictionary.get(\"alias\")\r\n cnam_lookups_enabled = dictionary.get(\"cnam_lookups_enabled\")\r\n number_type = dictionary.get(\"number_type\")\r\n rate_center = dictionary.get(\"rate_center\")\r\n state = dictionary.get(\"state\")\r\n value = dictionary.get(\"value\")\r\n\r\n # Return an object of this model\r\n return cls(alias,\r\n cnam_lookups_enabled,\r\n number_type,\r\n rate_center,\r\n state,\r\n value)", "def create(cls, dictionary):\n return cls(**dictionary)", "def create(cls, dictionary):\n return cls(**dictionary)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def from_dict(cls, d: dict):\n scen = None\n try:\n scen = DTScenario(d['name'])\n for t in d['tasks']:\n scen.addTask(dtTaskTypeDict['cls'][t['class']], t['parameters'])\n except KeyError:\n scen = None\n raise DTInternalError('DTScenario.fromDict()', 'Wrong dict format')\n return scen", "def from_dict(cls, dictionary):\n normalised = cls.normalise_dict(dictionary)\n return cls.from_normalised_dict(normalised)", "def from_dict(eventScheduleDict):\n pass", "def from_dict(cls, d):\n return cls(**d)", "def from_dict(cls, dikt) -> 'ShardingDescriptor':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, d):\n hostname = d.get('hostname')\n project = d.get('project')\n treeish = d.get('treeish')\n path = d.get('path')\n _validate_args(\n hostname,\n project,\n treeish,\n path,\n path_required=True)\n return cls(hostname, project, treeish, path)", "def from_dict(cls, dct):\n if dct.pop('type') != cls.__name__:\n fmt = 'Can not construct Note from dict %s'\n raise ValueError(fmt % dct)\n\n return cls(**dct)", "def from_dict(cls, dikt) -> 'ProductionFlowItem':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n links = dictionary.get('links')\r\n email_config = dictionary.get('emailConfig')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(links,\r\n email_config,\r\n dictionary)", "def from_dict(cls, inp):\n return cls(**{k: v for k, v in inp.items() if k != '__class__'})", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n continue_on_error = dictionary.get('continueOnError')\n is_active = dictionary.get('isActive')\n script_params = dictionary.get('scriptParams')\n script_path = dictionary.get('scriptPath')\n timeout_secs = dictionary.get('timeoutSecs')\n\n # Return an object of this model\n return cls(\n continue_on_error,\n is_active,\n script_params,\n script_path,\n timeout_secs\n)", "def _from_normalised_dict(cls, dictionary):\n if 'cvarsort' in dictionary and dictionary['cvarsort'] != cls.cvarsort:\n raise PydmrsValueError('{} must have cvarsort {}, not {}'.format(cls.__name__,\n cls.cvarsort,\n dictionary['cvarsort']))\n return cls(**{key:value for key, value in dictionary.items() if key != 'cvarsort'})", "def _from_string(cls, serialized):\r\n parse = cls.URL_RE.match(serialized)\r\n if not parse:\r\n raise InvalidKeyError(cls, serialized)\r\n\r\n parse = parse.groupdict()\r\n if parse['definition_id']:\r\n parse['definition_id'] = cls.as_object_id(parse['definition_id'])\r\n\r\n return cls(**{key: parse.get(key) for key in cls.KEY_FIELDS})", "def from_dict(cls, dikt) -> 'AssetPropertyValueHistoryRequest':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, d):\n assert \"status\" in d\n assert \"metadata\" in d\n return cls(**d)", "def from_dict(cls, dikt) -> 'Story':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'Problem':\n return deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'StartConfiguration':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'Failure':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n name = dictionary.get('name')\r\n lan_ip = dictionary.get('lanIp')\r\n uplink = dictionary.get('uplink')\r\n public_port = dictionary.get('publicPort')\r\n local_port = dictionary.get('localPort')\r\n allowed_ips = dictionary.get('allowedIps')\r\n protocol = dictionary.get('protocol')\r\n\r\n # Return an object of this model\r\n return cls(name,\r\n lan_ip,\r\n uplink,\r\n public_port,\r\n local_port,\r\n allowed_ips,\r\n protocol)", "def from_dict(cls, dikt) -> 'Task':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, data):\n requirements = {k: v for k, v in data['params']['requirements'].items()}\n return cls(requirements)", "def from_dict(cls, dct):\n return cls(**dct)", "def from_dict(cls, dikt) -> 'SourceAudit':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'POSTExecution':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n address = dictionary.get('address')\n port = dictionary.get('port')\n protocol = dictionary.get('protocol')\n is_cluster_auditing_enabled = dictionary.get('isClusterAuditingEnabled')\n is_data_protection_enabled = dictionary.get('isDataProtectionEnabled')\n is_filer_auditing_enabled = dictionary.get('isFilerAuditingEnabled')\n is_ssh_log_enabled = dictionary.get('isSshLogEnabled')\n name = dictionary.get('name')\n\n # Return an object of this model\n return cls(address,\n port,\n protocol,\n is_cluster_auditing_enabled,\n is_data_protection_enabled,\n is_filer_auditing_enabled,\n is_ssh_log_enabled,\n name)", "def create_snat_rule(self, **attrs):\n return self._create(_snat.Rule, **attrs)", "def from_dict(cls, d):\n return cls(d[\"sequence\"], immutable_bounds=d[\"immutable_bounds\"])", "def from_dict(cls, dikt) -> 'OneOffSchedule':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dct):\n dct['address'] = Address(**dct['address'])\n return cls(**dct)", "def from_dict(cls, obj: dict) -> FormatTest:\n if obj is None:\n return None\n\n if not isinstance(obj, dict):\n return FormatTest.parse_obj(obj)\n\n _obj = FormatTest.parse_obj({\n \"integer\": obj.get(\"integer\"),\n \"int32\": obj.get(\"int32\"),\n \"int64\": obj.get(\"int64\"),\n \"number\": obj.get(\"number\"),\n \"float\": obj.get(\"float\"),\n \"double\": obj.get(\"double\"),\n \"decimal\": obj.get(\"decimal\"),\n \"string\": obj.get(\"string\"),\n \"string_with_double_quote_pattern\": obj.get(\"string_with_double_quote_pattern\"),\n \"byte\": obj.get(\"byte\"),\n \"binary\": obj.get(\"binary\"),\n \"var_date\": obj.get(\"date\"),\n \"date_time\": obj.get(\"dateTime\"),\n \"uuid\": obj.get(\"uuid\"),\n \"password\": obj.get(\"password\"),\n \"pattern_with_digits\": obj.get(\"pattern_with_digits\"),\n \"pattern_with_digits_and_delimiter\": obj.get(\"pattern_with_digits_and_delimiter\")\n })\n return _obj", "def from_dict(self, data: dict):\n if 'title' in data:\n self.title = data['title']\n if 'description' in data:\n self.description = data['description']\n if 'deadline' in data:\n self.deadline = parser.parse(data['deadline'])\n return", "def _from_dict(cls, d):\n confidence = d.get(\"confidence\", None)\n constant = d.get(\"constant\", False)\n tags = d.get(\"tags\", None)\n return cls(\n d[\"name\"],\n d[\"value\"],\n confidence=confidence,\n constant=constant,\n tags=tags,\n )", "def from_dict(cls, name, data):\n item = cls(name)\n\n item.description = data.get(\"description\", \"\")\n item.difficulty = data.get(\"difficulty\", 0)\n\n item.prerequisites = data.get(\"prerequisites\", {})\n item.prerequisites[\"items\"] = to_list(item.prerequisites.get(\"items\"))\n item.prerequisites[\"research\"] = to_list(item.prerequisites.get(\"research\"))\n item.prerequisites[\"triggers\"] = to_list(item.prerequisites.get(\"triggers\"))\n item.cost = data.get(\"cost\", {})\n item.strings = data.get(\"strings\", {})\n item.effects = data.get(\"effects\", {})\n for effect in (\n \"enable_commands\",\n \"enable_items\",\n \"enable_resources\",\n \"events\",\n \"triggers\",\n ):\n item.effects[effect] = to_list(item.effects.get(effect))\n return item", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n alias_name = dictionary.get('aliasName')\n client_subnet_whitelist = None\n if dictionary.get('clientSubnetWhitelist') != None:\n client_subnet_whitelist = list()\n for structure in dictionary.get('clientSubnetWhitelist'):\n client_subnet_whitelist.append(cohesity_management_sdk.models.cluster_config_proto_subnet.ClusterConfigProtoSubnet.from_dictionary(structure))\n smb_config = cohesity_management_sdk.models.alias_smb_config.AliasSmbConfig.from_dictionary(dictionary.get('smbConfig')) if dictionary.get('smbConfig') else None\n view_path = dictionary.get('viewPath')\n\n # Return an object of this model\n return cls(alias_name,\n client_subnet_whitelist,\n smb_config,\n view_path)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n name = dictionary.get('name')\r\n scheduling = meraki.models.scheduling_model.SchedulingModel.from_dictionary(dictionary.get('scheduling')) if dictionary.get('scheduling') else None\r\n bandwidth = meraki.models.bandwidth_model.BandwidthModel.from_dictionary(dictionary.get('bandwidth')) if dictionary.get('bandwidth') else None\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(name,\r\n scheduling,\r\n bandwidth,\r\n dictionary)", "def from_json(cls, s):\n\n d = json.loads(s, object_pairs_hook=OrderedDict)\n return cls.from_definition(d)", "def from_json(cls, s):\n\n d = json.loads(s, object_pairs_hook=OrderedDict)\n return cls.from_definition(d)", "def from_dict(cls, dict_object):\n\n return cls(**dict_object)", "def from_dict(cls, data):\n return cls(\n filter_id=data[\"Filter\"],\n name=data[\"Name\"],\n admin=data[\"Admin\"],\n action=data[\"Action\"],\n input_port=data[\"Input\"],\n output_port=data[\"Output\"],\n classifiers=data[\"Classifiers\"],\n packet_processing=data[\"Packet Processing\"],\n )", "def __init__(self, rulespath=None):\n if rulespath is None:\n rulespath = path.join(path.dirname(path.realpath(__file__)), 'pattern_sentence_filter.yaml')\n\n self.rulespath = rulespath\n self.rules = Rules(yaml.safe_load(open(rulespath)))", "def from_dict(cls, dikt) -> 'Sitemap':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n to = dictionary.get('to')\r\n application_id = dictionary.get('applicationId')\r\n expiration_time_in_minutes = dictionary.get('expirationTimeInMinutes')\r\n code = dictionary.get('code')\r\n scope = dictionary.get('scope')\r\n\r\n # Return an object of this model\r\n return cls(to,\r\n application_id,\r\n expiration_time_in_minutes,\r\n code,\r\n scope)", "def from_dict(cls, dikt) -> 'Expression':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> \"Scheduler\":\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, obj):\n cls._check_keys(obj)\n return cls(**obj)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n alternate_restore_base_directory = dictionary.get('alternateRestoreBaseDirectory')\n continue_on_error = dictionary.get('continueOnError')\n encryption_enabled = dictionary.get('encryptionEnabled')\n generate_ssh_keys = dictionary.get('generateSshKeys')\n override_originals = dictionary.get('overrideOriginals')\n preserve_acls = dictionary.get('preserveAcls')\n preserve_attributes = dictionary.get('preserveAttributes')\n preserve_timestamps = dictionary.get('preserveTimestamps')\n restore_entities = dictionary.get('restoreEntities')\n restore_to_original_paths = dictionary.get('restoreToOriginalPaths')\n save_success_files = dictionary.get('saveSuccessFiles')\n skip_estimation = dictionary.get('skipEstimation')\n\n # Return an object of this model\n return cls(\n alternate_restore_base_directory,\n continue_on_error,\n encryption_enabled,\n generate_ssh_keys,\n override_originals,\n preserve_acls,\n preserve_attributes,\n preserve_timestamps,\n restore_entities,\n restore_to_original_paths,\n save_success_files,\n skip_estimation\n)", "def from_dict(cls, d):\n d = d.copy()\n if \"length\" in d:\n # length argument removed in version 1.1.0\n del d[\"length\"]\n return cls(**d)" ]
[ "0.61378634", "0.60913914", "0.6031257", "0.6013804", "0.59718573", "0.591209", "0.5898094", "0.5898094", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5891009", "0.5874604", "0.58435756", "0.5814847", "0.5809489", "0.58045727", "0.57846206", "0.5779389", "0.5778046", "0.576669", "0.5757316", "0.57552576", "0.5742031", "0.573693", "0.57146305", "0.5711709", "0.5689657", "0.56827873", "0.5678167", "0.56777304", "0.56754625", "0.56721", "0.5665552", "0.56636107", "0.5628624", "0.56263775", "0.5611974", "0.56099284", "0.56065273", "0.56021523", "0.5602097", "0.5578916", "0.5576964", "0.5568905", "0.55676836", "0.5557416", "0.55526024", "0.5528486", "0.5528486", "0.5525787", "0.5506056", "0.54931504", "0.548852", "0.54881287", "0.54863113", "0.54825616", "0.5479623", "0.54728335", "0.54711413" ]
0.0
-1
Creates a new ReportRule instance from a dict representation
def __init__(self, rule_id, input, output, rmd, title, name, metadata, styles, theme, local=False, **kwargs): super().__init__(rule_id, None, input, output, local, **kwargs) self.rmd = rmd self.title = title self.name = name self.metadata = metadata self.styles = styles self.theme = theme # include rmd in params as well (expected by snakemake) self.params["rmd"] = rmd # other parameters used for report generation self.params["title"] = title self.params["name"] = name self.params["metadata"] = metadata self.params["styles"] = styles self.params["theme"] = theme
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n id = dictionary.get('id')\r\n consumer_id = dictionary.get('consumerId')\r\n consumer_ssn = dictionary.get('consumerSsn')\r\n requester_name = dictionary.get('requesterName')\r\n request_id = dictionary.get('requestId')\r\n constraints = finicityapi.models.report_constraints.ReportConstraints.from_dictionary(dictionary.get('constraints')) if dictionary.get('constraints') else None\r\n mtype = dictionary.get('type')\r\n status = dictionary.get('status')\r\n created_date = dictionary.get('createdDate')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(id,\r\n consumer_id,\r\n consumer_ssn,\r\n requester_name,\r\n request_id,\r\n constraints,\r\n mtype,\r\n status,\r\n created_date,\r\n dictionary)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n links = dictionary.get('links')\r\n email_config = dictionary.get('emailConfig')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(links,\r\n email_config,\r\n dictionary)", "def from_dict(cls, data):\n return cls(**data)", "def from_dict(cls, obj: dict) -> FormatTest:\n if obj is None:\n return None\n\n if not isinstance(obj, dict):\n return FormatTest.parse_obj(obj)\n\n _obj = FormatTest.parse_obj({\n \"integer\": obj.get(\"integer\"),\n \"int32\": obj.get(\"int32\"),\n \"int64\": obj.get(\"int64\"),\n \"number\": obj.get(\"number\"),\n \"float\": obj.get(\"float\"),\n \"double\": obj.get(\"double\"),\n \"decimal\": obj.get(\"decimal\"),\n \"string\": obj.get(\"string\"),\n \"string_with_double_quote_pattern\": obj.get(\"string_with_double_quote_pattern\"),\n \"byte\": obj.get(\"byte\"),\n \"binary\": obj.get(\"binary\"),\n \"var_date\": obj.get(\"date\"),\n \"date_time\": obj.get(\"dateTime\"),\n \"uuid\": obj.get(\"uuid\"),\n \"password\": obj.get(\"password\"),\n \"pattern_with_digits\": obj.get(\"pattern_with_digits\"),\n \"pattern_with_digits_and_delimiter\": obj.get(\"pattern_with_digits_and_delimiter\")\n })\n return _obj", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def from_dict(cls, dictionary: Dict[str, Any]):\n return cls(**dictionary)", "def from_dict(cls, dikt) -> 'AssetPropertyValueHistoryRequest':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n pay_period = dictionary.get('payPeriod')\r\n billable = dictionary.get('billable')\r\n asset_id = dictionary.get('assetId')\r\n pay_date = dictionary.get('payDate')\r\n start_date = dictionary.get('startDate')\r\n end_date = dictionary.get('endDate')\r\n net_pay_current = dictionary.get('netPayCurrent')\r\n net_pay_ytd = dictionary.get('netPayYTD')\r\n gross_pay_current = dictionary.get('grossPayCurrent')\r\n gross_pay_ytd = dictionary.get('grossPayYTD')\r\n payroll_provider = dictionary.get('payrollProvider')\r\n employer = finicityapi.models.employer.Employer.from_dictionary(dictionary.get('employer')) if dictionary.get('employer') else None\r\n employee = finicityapi.models.employee.Employee.from_dictionary(dictionary.get('employee')) if dictionary.get('employee') else None\r\n pay_stat = None\r\n if dictionary.get('payStat') != None:\r\n pay_stat = list()\r\n for structure in dictionary.get('payStat'):\r\n pay_stat.append(finicityapi.models.pay_stat.PayStat.from_dictionary(structure))\r\n deductions = None\r\n if dictionary.get('deductions') != None:\r\n deductions = list()\r\n for structure in dictionary.get('deductions'):\r\n deductions.append(finicityapi.models.deduction.Deduction.from_dictionary(structure))\r\n direct_deposits = None\r\n if dictionary.get('directDeposits') != None:\r\n direct_deposits = list()\r\n for structure in dictionary.get('directDeposits'):\r\n direct_deposits.append(finicityapi.models.direct_deposit.DirectDeposit.from_dictionary(structure))\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(pay_period,\r\n billable,\r\n asset_id,\r\n pay_date,\r\n start_date,\r\n end_date,\r\n net_pay_current,\r\n net_pay_ytd,\r\n gross_pay_current,\r\n gross_pay_ytd,\r\n payroll_provider,\r\n employer,\r\n employee,\r\n pay_stat,\r\n deductions,\r\n direct_deposits,\r\n dictionary)", "def from_dict(cls, data: Dict[str, any]):\n return cls(**data)", "def from_dict(cls, dictionary):\n normalised = cls.normalise_dict(dictionary)\n return cls.from_normalised_dict(normalised)", "def from_dict(cls, d):\n # Pass values from the dictionary to the __init__() method\n obj = cls(tables=d['tables'], model_expression=d['model_expression'], \n filters=d['filters'], out_tables=d['out_tables'], \n out_column=d['out_column'], out_transform=d['out_transform'],\n out_filters=d['out_filters'], name=d['name'], tags=d['tags'])\n\n obj.summary_table = d['summary_table']\n obj.fitted_parameters = d['fitted_parameters']\n obj.model = None\n \n # Unpack the urbansim.models.RegressionModel() sub-object and resuscitate it\n if d['model'] is not None:\n model_config = yamlio.convert_to_yaml(d['model'], None)\n obj.model = RegressionModel.from_yaml(model_config)\n \n return obj", "def _from_dict(cls, d):\n confidence = d.get(\"confidence\", None)\n constant = d.get(\"constant\", False)\n tags = d.get(\"tags\", None)\n return cls(\n d[\"name\"],\n d[\"value\"],\n confidence=confidence,\n constant=constant,\n tags=tags,\n )", "def create(cls, dictionary):\n return cls(**dictionary)", "def create(cls, dictionary):\n return cls(**dictionary)", "def from_dict(cls, d):\n return cls(**d)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n alias = dictionary.get(\"alias\")\r\n cnam_lookups_enabled = dictionary.get(\"cnam_lookups_enabled\")\r\n number_type = dictionary.get(\"number_type\")\r\n rate_center = dictionary.get(\"rate_center\")\r\n state = dictionary.get(\"state\")\r\n value = dictionary.get(\"value\")\r\n\r\n # Return an object of this model\r\n return cls(alias,\r\n cnam_lookups_enabled,\r\n number_type,\r\n rate_center,\r\n state,\r\n value)", "def from_dict(cls, d):\n schema = d.get(\"schema\", None)\n if schema is not None:\n schema = {\n attr_name: AttributeSchema.from_dict(asd)\n for attr_name, asd in iteritems(schema)\n }\n\n return cls(schema=schema)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n is_mail_enabled = dictionary.get('isMailEnabled')\n is_security_enabled = dictionary.get('isSecurityEnabled')\n member_count = dictionary.get('memberCount')\n visibility = dictionary.get('visibility')\n\n # Return an object of this model\n return cls(\n is_mail_enabled,\n is_security_enabled,\n member_count,\n visibility\n)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n to = dictionary.get('to')\r\n application_id = dictionary.get('applicationId')\r\n expiration_time_in_minutes = dictionary.get('expirationTimeInMinutes')\r\n code = dictionary.get('code')\r\n scope = dictionary.get('scope')\r\n\r\n # Return an object of this model\r\n return cls(to,\r\n application_id,\r\n expiration_time_in_minutes,\r\n code,\r\n scope)", "def from_dict(cls, dikt) -> 'SourceAudit':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, d):\n assert \"status\" in d\n assert \"metadata\" in d\n return cls(**d)", "def from_dict(eventScheduleDict):\n pass", "def from_dict(self, data: dict):\n if 'title' in data:\n self.title = data['title']\n if 'description' in data:\n self.description = data['description']\n if 'deadline' in data:\n self.deadline = parser.parse(data['deadline'])\n return", "def from_dict(cls, _dict: Dict) -> 'Resource':\n args = {}\n if 'attributes' in _dict:\n args['attributes'] = [Attribute.from_dict(x) for x in _dict.get('attributes')]\n return cls(**args)", "def from_dict(cls, dct):\n dct['address'] = Address(**dct['address'])\n return cls(**dct)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n id = dictionary.get('id')\r\n customer_id = dictionary.get('customerId')\r\n consumer_id = dictionary.get('consumerId')\r\n consumer_ssn = dictionary.get('consumerSsn')\r\n requester_name = dictionary.get('requesterName')\r\n request_id = dictionary.get('requestId')\r\n mtype = dictionary.get('type')\r\n status = dictionary.get('status')\r\n created_date = dictionary.get('createdDate')\r\n customer_type = dictionary.get('customerType')\r\n title = dictionary.get('title')\r\n start_date = dictionary.get('startDate')\r\n end_date = dictionary.get('endDate')\r\n days = dictionary.get('days')\r\n seasoned = dictionary.get('seasoned')\r\n gse_enabled = dictionary.get('gseEnabled')\r\n consolidated_available_balance = dictionary.get('consolidatedAvailableBalance')\r\n portfolio_id = dictionary.get('portfolioId')\r\n institutions = None\r\n if dictionary.get('institutions') != None:\r\n institutions = list()\r\n for structure in dictionary.get('institutions'):\r\n institutions.append(finicityapi.models.voa_with_income_report_institution.VOAWithIncomeReportInstitution.from_dictionary(structure))\r\n assets = finicityapi.models.asset_summary.AssetSummary.from_dictionary(dictionary.get('assets')) if dictionary.get('assets') else None\r\n errors = None\r\n if dictionary.get('errors') != None:\r\n errors = list()\r\n for structure in dictionary.get('errors'):\r\n errors.append(finicityapi.models.error_message.ErrorMessage.from_dictionary(structure))\r\n constraints = finicityapi.models.report_constraints.ReportConstraints.from_dictionary(dictionary.get('constraints')) if dictionary.get('constraints') else None\r\n source = dictionary.get('source')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(id,\r\n customer_id,\r\n consumer_id,\r\n consumer_ssn,\r\n requester_name,\r\n request_id,\r\n mtype,\r\n status,\r\n created_date,\r\n customer_type,\r\n title,\r\n start_date,\r\n end_date,\r\n days,\r\n seasoned,\r\n gse_enabled,\r\n consolidated_available_balance,\r\n portfolio_id,\r\n institutions,\r\n assets,\r\n errors,\r\n constraints,\r\n source,\r\n dictionary)", "def from_dict(cls, _dict: Dict) -> 'Port':\n args = {}\n if 'direct_link_count' in _dict:\n args['direct_link_count'] = _dict.get('direct_link_count')\n else:\n raise ValueError('Required property \\'direct_link_count\\' not present in Port JSON')\n if 'id' in _dict:\n args['id'] = _dict.get('id')\n else:\n raise ValueError('Required property \\'id\\' not present in Port JSON')\n if 'label' in _dict:\n args['label'] = _dict.get('label')\n else:\n raise ValueError('Required property \\'label\\' not present in Port JSON')\n if 'location_display_name' in _dict:\n args['location_display_name'] = _dict.get('location_display_name')\n else:\n raise ValueError('Required property \\'location_display_name\\' not present in Port JSON')\n if 'location_name' in _dict:\n args['location_name'] = _dict.get('location_name')\n else:\n raise ValueError('Required property \\'location_name\\' not present in Port JSON')\n if 'provider_name' in _dict:\n args['provider_name'] = _dict.get('provider_name')\n else:\n raise ValueError('Required property \\'provider_name\\' not present in Port JSON')\n if 'supported_link_speeds' in _dict:\n args['supported_link_speeds'] = _dict.get('supported_link_speeds')\n else:\n raise ValueError('Required property \\'supported_link_speeds\\' not present in Port JSON')\n return cls(**args)", "def from_dict(cls, _dict: Dict) -> 'FirstHref':\n args = {}\n if 'href' in _dict:\n args['href'] = _dict.get('href')\n return cls(**args)", "def from_dict(cls, ref, d):\n source = d.pop('dataSource', None)\n ds_type = d.pop('dataSourceType')\n\n # patch to deal with changing Background extension handling\n filetype = d.pop('filetype', None)\n if filetype is not None:\n if not source.endswith(filetype):\n source += filetype\n\n return cls(ref, source, ds_type, **d)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n address = dictionary.get('address')\n port = dictionary.get('port')\n protocol = dictionary.get('protocol')\n is_cluster_auditing_enabled = dictionary.get('isClusterAuditingEnabled')\n is_data_protection_enabled = dictionary.get('isDataProtectionEnabled')\n is_filer_auditing_enabled = dictionary.get('isFilerAuditingEnabled')\n is_ssh_log_enabled = dictionary.get('isSshLogEnabled')\n name = dictionary.get('name')\n\n # Return an object of this model\n return cls(address,\n port,\n protocol,\n is_cluster_auditing_enabled,\n is_data_protection_enabled,\n is_filer_auditing_enabled,\n is_ssh_log_enabled,\n name)", "def from_dict(cls, dikt) -> 'Expression':\n return util.deserialize_model(dikt, cls)", "def from_dict(self, d):\r\n options = dict(d)\r\n task_id = options['task_id']\r\n del options['task_id']\r\n return SubtaskStatus.create(task_id, **options)", "def from_dict(cls, d: Dict[str, Any]) -> \"Link\":\n d = copy(d)\n rel = d.pop(\"rel\")\n href = d.pop(\"href\")\n media_type = d.pop(\"type\", None)\n title = d.pop(\"title\", None)\n\n extra_fields = None\n if any(d):\n extra_fields = d\n\n return cls(\n rel=rel,\n target=href,\n media_type=media_type,\n title=title,\n extra_fields=extra_fields,\n )", "def from_dict(cls, dikt) -> 'ResultFeedback':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dict_object):\n\n return cls(**dict_object)", "def from_dict(cls, dikt) -> 'Dashboard':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, data):\n requirements = {k: v for k, v in data['params']['requirements'].items()}\n return cls(requirements)", "def from_dict(cls, dikt) -> 'PipelineDefinition':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, tag_dict):\n return cls(tag_dict.get('tag_type'), tag_dict.get('value'))", "def from_dict(cls, dct):\n if dct.pop('type') != cls.__name__:\n fmt = 'Can not construct Note from dict %s'\n raise ValueError(fmt % dct)\n\n return cls(**dct)", "def from_dict(cls, dikt) -> 'ProductionFlowItem':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, record, _id=None):\n # copy dict\n record = dict(record)\n\n # get record id and remove it from record\n record_id = record.pop(\"_id\", None)\n if _id is None:\n _id = record_id\n if _id is None:\n _id = cls._make_uuid()\n\n # make record\n return cls(record, _id)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n name = dictionary.get('name')\r\n scheduling = meraki.models.scheduling_model.SchedulingModel.from_dictionary(dictionary.get('scheduling')) if dictionary.get('scheduling') else None\r\n bandwidth = meraki.models.bandwidth_model.BandwidthModel.from_dictionary(dictionary.get('bandwidth')) if dictionary.get('bandwidth') else None\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(name,\r\n scheduling,\r\n bandwidth,\r\n dictionary)", "def from_dict(cls, d):\n index = d.get(\"index\", None)\n if index is not None:\n index = {\n int(value): Attribute.from_dict(ad)\n for value, ad in iteritems(index)\n }\n\n return cls(index=index)", "def from_dict(self, dictionary):\n self.fk_from_operation = dictionary['fk_op_id']\n self.fk_for_user = dictionary['fk_user_id']\n self.fk_in_project = dictionary['fk_project_id']\n self.session_name = dictionary['session_name']\n self.name = dictionary['name']\n self.file_path = dictionary['file_path']\n self.file_format = dictionary['file_format']\n return self", "def from_dict(self, d):\n return Grid(**d)", "def from_dict(cls, dct):\n return cls(**dct)", "def from_dict(cls, data):\n return cls(\n filter_id=data[\"Filter\"],\n name=data[\"Name\"],\n admin=data[\"Admin\"],\n action=data[\"Action\"],\n input_port=data[\"Input\"],\n output_port=data[\"Output\"],\n classifiers=data[\"Classifiers\"],\n packet_processing=data[\"Packet Processing\"],\n )", "def from_dict(cls, d):\n hostname = d.get('hostname')\n project = d.get('project')\n treeish = d.get('treeish')\n path = d.get('path')\n _validate_args(\n hostname,\n project,\n treeish,\n path,\n path_required=True)\n return cls(hostname, project, treeish, path)", "def fromdict(cls,datadict):\n return cls(fmetric=datadict.get('fmetric'),\n fhost=datadict.get('fhost'),\n fvalue=datadict.get('fvalue'),\n ftime=datadict.get('ftime'),\n funit=datadict.get('funit'),\n finfo=datadict.get('finfo'))", "def from_dict(cls, obj):\n cls._check_keys(obj)\n return cls(**obj)", "def from_dict(cls, _dict: Dict) -> 'NextHref':\n args = {}\n if 'href' in _dict:\n args['href'] = _dict.get('href')\n return cls(**args)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n primary_language = dictionary.get('PrimaryLanguage')\r\n secondary_language = dictionary.get('SecondaryLanguage')\r\n xml_signature = dictionary.get('XmlSignature')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(primary_language,\r\n secondary_language,\r\n xml_signature,\r\n dictionary)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n continue_on_error = dictionary.get('continueOnError')\n is_active = dictionary.get('isActive')\n script_params = dictionary.get('scriptParams')\n script_path = dictionary.get('scriptPath')\n timeout_secs = dictionary.get('timeoutSecs')\n\n # Return an object of this model\n return cls(\n continue_on_error,\n is_active,\n script_params,\n script_path,\n timeout_secs\n)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n continuous_schedule = cohesity_management_sdk.models.continuous_schedule.ContinuousSchedule.from_dictionary(dictionary.get('continuousSchedule')) if dictionary.get('continuousSchedule') else None\n daily_schedule = cohesity_management_sdk.models.daily_schedule.DailySchedule.from_dictionary(dictionary.get('dailySchedule')) if dictionary.get('dailySchedule') else None\n monthly_schedule = cohesity_management_sdk.models.monthly_schedule.MonthlySchedule.from_dictionary(dictionary.get('monthlySchedule')) if dictionary.get('monthlySchedule') else None\n periodicity = dictionary.get('periodicity')\n rpo_schedule = cohesity_management_sdk.models.rpo_schedule.RpoSchedule.from_dictionary(dictionary.get('rpoSchedule')) if dictionary.get('rpoSchedule') else None\n\n # Return an object of this model\n return cls(\n continuous_schedule,\n daily_schedule,\n monthly_schedule,\n periodicity,\n rpo_schedule\n)", "def from_raw_dict(cls, data: Dict[str, str]):\n print(data)\n return cls(**{field: cls._PARSERS.get(field, str)(data[field])\n for field in cls.__dataclass_fields__ # type: ignore\n if field in data})", "def from_dict(cls, d, record_cls=None):\n if record_cls is None:\n record_cls_str = d.get(cls._ELE_CLS_FIELD, None)\n if record_cls_str is None:\n raise DataRecordsError(\n \"Your DataRecords does not have its '%s' attribute \"\n \"populated, so you must manually specify the `record_cls` \"\n \"to use when loading it\" % cls._ELE_CLS_FIELD\n )\n record_cls = etau.get_class(record_cls_str)\n\n return DataRecords(\n record_cls=record_cls,\n records=[record_cls.from_dict(r) for r in d[cls._ELE_ATTR]],\n )" ]
[ "0.61548984", "0.6064839", "0.5945463", "0.59221816", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.591966", "0.58697486", "0.58580166", "0.5854221", "0.5837279", "0.57997847", "0.57860833", "0.57854056", "0.5773502", "0.5773502", "0.57388705", "0.5731515", "0.57111025", "0.5708404", "0.57020164", "0.56983143", "0.5685916", "0.5683024", "0.56785554", "0.56576943", "0.56576663", "0.5630736", "0.5627903", "0.5624332", "0.5622776", "0.56225175", "0.5606891", "0.5591123", "0.55818164", "0.5579736", "0.55662525", "0.5560606", "0.55518764", "0.55344236", "0.55316937", "0.55220073", "0.55135036", "0.55133396", "0.54952854", "0.5495282", "0.54926896", "0.54918724", "0.5488818", "0.5476515", "0.5473944", "0.5472983", "0.5449643", "0.5445347", "0.54376644", "0.5424265", "0.54223096", "0.542229", "0.54211795" ]
0.0
-1
Creates a new SnakemakeRuleGroup instance from a dict representation
def __init__( self, rule_id, parent_id, group_actions, input, output, local=False, **kwargs ): self.rule_id = rule_id self.parent_id = parent_id self.input = input self.output = output self.local = local self.params = kwargs self.groupped = True # load sub-actions self.actions = OrderedDict() for action in group_actions: # get action name action_name = action["action_name"] del action["action_name"] # determine template filepath action_type = action_name.split("_")[0] template = "actions/{}/{}.snakefile".format(action_type, action_name) # create new SnakemakeRule instance self.actions[action_name] = ActionRule( rule_id=None, parent_id=None, input=None, output=None, template=template, **action )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_json(value):\r\n for key in ('id', 'name', 'version'):\r\n if key not in value:\r\n raise TypeError(\"Group dict {0} missing value key '{1}'\".format(\r\n value, key))\r\n\r\n if value[\"version\"] != Group.VERSION:\r\n raise TypeError(\"Group dict {0} has unexpected version\".format(\r\n value))\r\n\r\n return Group(value[\"id\"], value[\"name\"])", "def from_dict(cls, dikt) -> 'PipelineDefinition':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, _dict: Dict) -> 'ResourceGroupReference':\n args = {}\n if 'id' in _dict:\n args['id'] = _dict.get('id')\n else:\n raise ValueError('Required property \\'id\\' not present in ResourceGroupReference JSON')\n return cls(**args)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n create_new_group = dictionary.get('createNewGroup')\n ms_groups_vec = None\n if dictionary.get('msGroupsVec') != None:\n ms_groups_vec = list()\n for structure in dictionary.get('msGroupsVec'):\n ms_groups_vec.append(cohesity_management_sdk.models.restore_o_365_groups_params_ms_group_info.RestoreO365GroupsParams_MSGroupInfo.from_dictionary(structure))\n restore_original_owners_members = dictionary.get('restoreOriginalOwnersMembers')\n restore_to_original = dictionary.get('restoreToOriginal')\n target_group = dictionary.get('targetGroup')\n target_group_name = dictionary.get('targetGroupName')\n target_group_owner = dictionary.get('targetGroupOwner')\n\n # Return an object of this model\n return cls(\n create_new_group,\n ms_groups_vec,\n restore_original_owners_members,\n restore_to_original,\n target_group,\n target_group_name,\n target_group_owner\n)", "def from_dict(self, d):\r\n options = dict(d)\r\n task_id = options['task_id']\r\n del options['task_id']\r\n return SubtaskStatus.create(task_id, **options)", "def from_dict(cls, _dict: Dict) -> 'ResourceGroupIdentity':\n args = {}\n if 'id' in _dict:\n args['id'] = _dict.get('id')\n else:\n raise ValueError('Required property \\'id\\' not present in ResourceGroupIdentity JSON')\n return cls(**args)", "def from_dict(cls, d: dict):\n scen = None\n try:\n scen = DTScenario(d['name'])\n for t in d['tasks']:\n scen.addTask(dtTaskTypeDict['cls'][t['class']], t['parameters'])\n except KeyError:\n scen = None\n raise DTInternalError('DTScenario.fromDict()', 'Wrong dict format')\n return scen", "def from_dict(cls, dikt) -> 'ShardingDescriptor':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, d):\n hostname = d.get('hostname')\n project = d.get('project')\n treeish = d.get('treeish')\n path = d.get('path')\n _validate_args(\n hostname,\n project,\n treeish,\n path,\n path_required=True)\n return cls(hostname, project, treeish, path)", "def from_dict(self, d):\n return Grid(**d)", "def _from_dict(cls, d):\n confidence = d.get(\"confidence\", None)\n constant = d.get(\"constant\", False)\n tags = d.get(\"tags\", None)\n return cls(\n d[\"name\"],\n d[\"value\"],\n confidence=confidence,\n constant=constant,\n tags=tags,\n )", "def _from_normalised_dict(cls, dictionary):\n if 'cvarsort' in dictionary and dictionary['cvarsort'] != cls.cvarsort:\n raise PydmrsValueError('{} must have cvarsort {}, not {}'.format(cls.__name__,\n cls.cvarsort,\n dictionary['cvarsort']))\n return cls(**{key:value for key, value in dictionary.items() if key != 'cvarsort'})", "def from_dict(cls, _dict: Dict) -> 'Pool':\n args = {}\n if 'id' in _dict:\n args['id'] = _dict.get('id')\n if 'name' in _dict:\n args['name'] = _dict.get('name')\n if 'description' in _dict:\n args['description'] = _dict.get('description')\n if 'enabled' in _dict:\n args['enabled'] = _dict.get('enabled')\n if 'healthy_origins_threshold' in _dict:\n args['healthy_origins_threshold'] = _dict.get('healthy_origins_threshold')\n if 'origins' in _dict:\n args['origins'] = [Origin.from_dict(x) for x in _dict.get('origins')]\n if 'monitor' in _dict:\n args['monitor'] = _dict.get('monitor')\n if 'notification_channel' in _dict:\n args['notification_channel'] = _dict.get('notification_channel')\n if 'health' in _dict:\n args['health'] = _dict.get('health')\n if 'healthcheck_region' in _dict:\n args['healthcheck_region'] = _dict.get('healthcheck_region')\n if 'healthcheck_subnets' in _dict:\n args['healthcheck_subnets'] = _dict.get('healthcheck_subnets')\n if 'created_on' in _dict:\n args['created_on'] = _dict.get('created_on')\n if 'modified_on' in _dict:\n args['modified_on'] = _dict.get('modified_on')\n return cls(**args)", "def from_dict(cls, data: Dict[str, any]):\n return cls(**data)", "def from_dict(cls, dictionary: Dict[str, Any]):\n return cls(**dictionary)", "def from_dict(cls, d):\n return cls(d[\"sequence\"], immutable_bounds=d[\"immutable_bounds\"])", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n alias = dictionary.get(\"alias\")\r\n cnam_lookups_enabled = dictionary.get(\"cnam_lookups_enabled\")\r\n number_type = dictionary.get(\"number_type\")\r\n rate_center = dictionary.get(\"rate_center\")\r\n state = dictionary.get(\"state\")\r\n value = dictionary.get(\"value\")\r\n\r\n # Return an object of this model\r\n return cls(alias,\r\n cnam_lookups_enabled,\r\n number_type,\r\n rate_center,\r\n state,\r\n value)", "def from_dict(cls, data):\n return cls(**data)", "def _from_string(cls, serialized):\r\n parse = cls.URL_RE.match(serialized)\r\n if not parse:\r\n raise InvalidKeyError(cls, serialized)\r\n\r\n parse = parse.groupdict()\r\n if parse['definition_id']:\r\n parse['definition_id'] = cls.as_object_id(parse['definition_id'])\r\n\r\n return cls(**{key: parse.get(key) for key in cls.KEY_FIELDS})", "def from_dict(cls, d):\n assert \"status\" in d\n assert \"metadata\" in d\n return cls(**d)", "def create(cls, dictionary):\n return cls(**dictionary)", "def create(cls, dictionary):\n return cls(**dictionary)", "def from_dict(cls, dikt) -> 'StartConfiguration':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, d):\n ret_obj = AssetClass(d.pop('Name'))\n for child_dict in d.pop('Children', []):\n ret_obj.add_subclass(\n child_dict.pop('Ratio'),\n AssetClass.from_dict(child_dict))\n assert len(d) == 0, f'Extra attributes found: {list(d.keys())}'\n return ret_obj.validate()", "def from_dict(cls: Type[\"GraphSet\"], data: Dict[str, Any]) -> \"GraphSet\":\n resources: List[Resource] = []\n name = data[\"name\"]\n start_time = data[\"start_time\"]\n end_time = data[\"end_time\"]\n version = data[\"version\"]\n errors = data[\"errors\"]\n stats = MultilevelCounter.from_dict(data[\"stats\"])\n for resource_id, resource_data in data[\"resources\"].items():\n resource = Resource.from_dict(resource_id, resource_data)\n resources.append(resource)\n return cls(\n name=name,\n version=version,\n start_time=start_time,\n end_time=end_time,\n resources=resources,\n errors=errors,\n stats=stats,\n )", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def from_dict(cls, d):\n return cls(**d)", "def from_dict(cls, dikt) -> 'ProductionFlowItem':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, d):\n obj = cls()\n obj.__dict__.update(d)\n obj.words = {value: set(words) for value, words in d['words'].items()}", "def from_dict(cls, name, data):\n item = cls(name)\n\n item.description = data.get(\"description\", \"\")\n item.difficulty = data.get(\"difficulty\", 0)\n\n item.prerequisites = data.get(\"prerequisites\", {})\n item.prerequisites[\"items\"] = to_list(item.prerequisites.get(\"items\"))\n item.prerequisites[\"research\"] = to_list(item.prerequisites.get(\"research\"))\n item.prerequisites[\"triggers\"] = to_list(item.prerequisites.get(\"triggers\"))\n item.cost = data.get(\"cost\", {})\n item.strings = data.get(\"strings\", {})\n item.effects = data.get(\"effects\", {})\n for effect in (\n \"enable_commands\",\n \"enable_items\",\n \"enable_resources\",\n \"events\",\n \"triggers\",\n ):\n item.effects[effect] = to_list(item.effects.get(effect))\n return item", "def from_dict(cls, tag_dict):\n return cls(tag_dict.get('tag_type'), tag_dict.get('value'))", "def create_group(self, properties: dict[str, Any | None]) -> dict:\n group = self.ms_client.http_request(method='POST', url_suffix='groups', json_data=properties)\n return group", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n alias_name = dictionary.get('aliasName')\n client_subnet_whitelist = None\n if dictionary.get('clientSubnetWhitelist') != None:\n client_subnet_whitelist = list()\n for structure in dictionary.get('clientSubnetWhitelist'):\n client_subnet_whitelist.append(cohesity_management_sdk.models.cluster_config_proto_subnet.ClusterConfigProtoSubnet.from_dictionary(structure))\n smb_config = cohesity_management_sdk.models.alias_smb_config.AliasSmbConfig.from_dictionary(dictionary.get('smbConfig')) if dictionary.get('smbConfig') else None\n view_path = dictionary.get('viewPath')\n\n # Return an object of this model\n return cls(alias_name,\n client_subnet_whitelist,\n smb_config,\n view_path)", "def from_dict(cls, d):\n return loadd(d, cls)", "def from_dict(cls, dikt) -> 'Problem':\n return deserialize_model(dikt, cls)", "def __init__(self, groups=dict()):\n self.groups = groups", "def from_dict(cls, dictionary):\n normalised = cls.normalise_dict(dictionary)\n return cls.from_normalised_dict(normalised)", "def from_dict(cls, data):\n requirements = {k: v for k, v in data['params']['requirements'].items()}\n return cls(requirements)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n match_labels = None\n if dictionary.get('matchLabels') != None:\n match_labels = list()\n for structure in dictionary.get('matchLabels'):\n match_labels.append(cohesity_management_sdk.models.label_selector_match_labels_entry.LabelSelector_MatchLabelsEntry.from_dictionary(structure))\n name = dictionary.get('name')\n service_name = dictionary.get('serviceName')\n\n # Return an object of this model\n return cls(\n match_labels,\n name,\n service_name\n)", "def new(ruletype, **kwargs):\n try:\n ruleclass = TYPE_MAP[ruletype]\n except KeyError:\n raise error.InvalidRule('Unrecognized rule type: %s' % ruletype)\n\n try:\n return ruleclass(**kwargs)\n except TypeError:\n log.error('BADNESS. ruletype: %s, data: %s', ruletype, kwargs)\n raise\n #raise error.InvalidRule(\n # '%s does not work that way.\\nDetails: %s.\\nData: %s' % (\n # ruletype, err, kwargs))", "def from_dict(cls, dikt) -> 'Task':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n folder_id = dictionary.get('folderId')\n public_folder_item_id_list = dictionary.get(\"publicFolderItemIdList\")\n restore_entire_folder = dictionary.get('restoreEntireFolder')\n\n # Return an object of this model\n return cls(\n folder_id,\n public_folder_item_id_list,\n restore_entire_folder\n)", "def from_dict(self, data: dict):\n if 'title' in data:\n self.title = data['title']\n if 'description' in data:\n self.description = data['description']\n if 'deadline' in data:\n self.deadline = parser.parse(data['deadline'])\n return", "def from_dict(cls, dikt) -> 'Spacecraft':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n name = dictionary.get('name')\r\n lan_ip = dictionary.get('lanIp')\r\n uplink = dictionary.get('uplink')\r\n public_port = dictionary.get('publicPort')\r\n local_port = dictionary.get('localPort')\r\n allowed_ips = dictionary.get('allowedIps')\r\n protocol = dictionary.get('protocol')\r\n\r\n # Return an object of this model\r\n return cls(name,\r\n lan_ip,\r\n uplink,\r\n public_port,\r\n local_port,\r\n allowed_ips,\r\n protocol)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n api_group = dictionary.get('apiGroup')\n api_version = dictionary.get('apiVersion')\n kind = dictionary.get('kind')\n name = dictionary.get('name')\n namespace = dictionary.get('namespace')\n resource_version = dictionary.get('resourceVersion')\n uid = dictionary.get('uid')\n\n # Return an object of this model\n return cls(\n api_group,\n api_version,\n kind,\n name,\n namespace,\n resource_version,\n uid\n)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n is_mail_enabled = dictionary.get('isMailEnabled')\n is_security_enabled = dictionary.get('isSecurityEnabled')\n member_count = dictionary.get('memberCount')\n visibility = dictionary.get('visibility')\n\n # Return an object of this model\n return cls(\n is_mail_enabled,\n is_security_enabled,\n member_count,\n visibility\n)", "def create( self, trans, payload, **kwd ):\n group_dict = dict( message='', status='ok' )\n name = payload.get( 'name', '' )\n if name:\n description = payload.get( 'description', '' )\n if not description:\n description = ''\n else:\n # TODO add description field to the model\n group_dict = self.group_manager.create( trans, name=name ).to_dict( view='element', value_mapper=self.__get_value_mapper( trans ) )\n else:\n raise RequestParameterMissingException( 'Missing required parameter \"name\".' )\n return group_dict", "def from_dict(cls, the_dict):\n\n if not isinstance(the_dict, dict):\n raise TypeError('This requires a dict. Got type {}'.format(type(the_dict)))\n if 'label_schema' not in the_dict:\n raise KeyError('this dictionary must contain a label_schema')\n\n typ = the_dict.get('type', 'NONE')\n if typ != cls._type:\n raise ValueError('FileLabelCollection cannot be constructed from the input dictionary')\n\n return cls(\n the_dict['label_schema'],\n version=the_dict.get('version', 'UNKNOWN'),\n annotations=the_dict.get('annotations', None),\n image_file_name=the_dict.get('image_file_name', None),\n image_id=the_dict.get('image_id', None),\n core_name=the_dict.get('core_name', None))", "def from_dict(cls, dikt) -> 'JobOutputRequest':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, obj: dict) -> FormatTest:\n if obj is None:\n return None\n\n if not isinstance(obj, dict):\n return FormatTest.parse_obj(obj)\n\n _obj = FormatTest.parse_obj({\n \"integer\": obj.get(\"integer\"),\n \"int32\": obj.get(\"int32\"),\n \"int64\": obj.get(\"int64\"),\n \"number\": obj.get(\"number\"),\n \"float\": obj.get(\"float\"),\n \"double\": obj.get(\"double\"),\n \"decimal\": obj.get(\"decimal\"),\n \"string\": obj.get(\"string\"),\n \"string_with_double_quote_pattern\": obj.get(\"string_with_double_quote_pattern\"),\n \"byte\": obj.get(\"byte\"),\n \"binary\": obj.get(\"binary\"),\n \"var_date\": obj.get(\"date\"),\n \"date_time\": obj.get(\"dateTime\"),\n \"uuid\": obj.get(\"uuid\"),\n \"password\": obj.get(\"password\"),\n \"pattern_with_digits\": obj.get(\"pattern_with_digits\"),\n \"pattern_with_digits_and_delimiter\": obj.get(\"pattern_with_digits_and_delimiter\")\n })\n return _obj", "def from_dict(cls, d):\n s = cls()\n s.update_from_dict(d)\n return s", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n links = dictionary.get('links')\r\n email_config = dictionary.get('emailConfig')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(links,\r\n email_config,\r\n dictionary)", "def from_dict(eventScheduleDict):\n pass", "def from_dict(cls, dikt) -> 'POSTExecution':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n address = dictionary.get('address')\n port = dictionary.get('port')\n protocol = dictionary.get('protocol')\n is_cluster_auditing_enabled = dictionary.get('isClusterAuditingEnabled')\n is_data_protection_enabled = dictionary.get('isDataProtectionEnabled')\n is_filer_auditing_enabled = dictionary.get('isFilerAuditingEnabled')\n is_ssh_log_enabled = dictionary.get('isSshLogEnabled')\n name = dictionary.get('name')\n\n # Return an object of this model\n return cls(address,\n port,\n protocol,\n is_cluster_auditing_enabled,\n is_data_protection_enabled,\n is_filer_auditing_enabled,\n is_ssh_log_enabled,\n name)" ]
[ "0.6249006", "0.60356647", "0.59878504", "0.58986735", "0.5808315", "0.57978857", "0.56254345", "0.55351543", "0.54451144", "0.5398016", "0.53898287", "0.5336041", "0.5296309", "0.5290205", "0.52836525", "0.5264334", "0.52494353", "0.52434814", "0.52236265", "0.5214074", "0.52138007", "0.52138007", "0.52070487", "0.5206535", "0.52038336", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.52016014", "0.5178124", "0.51634985", "0.51605904", "0.51356906", "0.51299584", "0.51258564", "0.5124559", "0.5085199", "0.50847995", "0.5078271", "0.50754803", "0.5054016", "0.5047883", "0.50257885", "0.50248915", "0.50154847", "0.50073963", "0.50013924", "0.49958944", "0.4994435", "0.4986841", "0.49854663", "0.49787393", "0.497627", "0.4973739", "0.49727827", "0.4969225", "0.49600077", "0.49461898", "0.4945727" ]
0.50452054
83
Creates a new DataIntegrationRule instance from a dict representation
def __init__(self, rule_id, inputs, output, local=False, template=None, **kwargs): self.rule_id = rule_id self.inputs = inputs self.output = output self.local = local self.template = template self.params = kwargs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_dict(cls, data):\n return cls(**data)", "def from_dict(cls, data: Dict[str, any]):\n return cls(**data)", "def _from_dict(cls, d):\n confidence = d.get(\"confidence\", None)\n constant = d.get(\"constant\", False)\n tags = d.get(\"tags\", None)\n return cls(\n d[\"name\"],\n d[\"value\"],\n confidence=confidence,\n constant=constant,\n tags=tags,\n )", "def from_dict(cls, d):\n return cls(**d)", "def from_dict(cls, dikt) -> 'Expression':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, data):\n requirements = {k: v for k, v in data['params']['requirements'].items()}\n return cls(requirements)", "def from_dict(self, d):\n return Grid(**d)", "def from_dict(cls, dikt) -> 'PipelineDefinition':\n return util.deserialize_model(dikt, cls)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def from_dict(cls, data):\r\n instance = cls()\r\n for key, value in data.items():\r\n instance.__dict__[key] = value\r\n return instance", "def from_dict(cls, dictionary: Dict[str, Any]):\n return cls(**dictionary)", "def from_dict(cls, d):\n # Pass values from the dictionary to the __init__() method\n obj = cls(tables=d['tables'], model_expression=d['model_expression'], \n filters=d['filters'], out_tables=d['out_tables'], \n out_column=d['out_column'], out_transform=d['out_transform'],\n out_filters=d['out_filters'], name=d['name'], tags=d['tags'])\n\n obj.summary_table = d['summary_table']\n obj.fitted_parameters = d['fitted_parameters']\n obj.model = None\n \n # Unpack the urbansim.models.RegressionModel() sub-object and resuscitate it\n if d['model'] is not None:\n model_config = yamlio.convert_to_yaml(d['model'], None)\n obj.model = RegressionModel.from_yaml(model_config)\n \n return obj", "def from_dict(cls, d):\n s = cls()\n s.update_from_dict(d)\n return s", "def from_dict(cls, ref, d):\n source = d.pop('dataSource', None)\n ds_type = d.pop('dataSourceType')\n\n # patch to deal with changing Background extension handling\n filetype = d.pop('filetype', None)\n if filetype is not None:\n if not source.endswith(filetype):\n source += filetype\n\n return cls(ref, source, ds_type, **d)", "def from_dict(cls, d):\n return loadd(d, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n alias = dictionary.get(\"alias\")\r\n cnam_lookups_enabled = dictionary.get(\"cnam_lookups_enabled\")\r\n number_type = dictionary.get(\"number_type\")\r\n rate_center = dictionary.get(\"rate_center\")\r\n state = dictionary.get(\"state\")\r\n value = dictionary.get(\"value\")\r\n\r\n # Return an object of this model\r\n return cls(alias,\r\n cnam_lookups_enabled,\r\n number_type,\r\n rate_center,\r\n state,\r\n value)", "def from_dict(cls, d: dict):\n scen = None\n try:\n scen = DTScenario(d['name'])\n for t in d['tasks']:\n scen.addTask(dtTaskTypeDict['cls'][t['class']], t['parameters'])\n except KeyError:\n scen = None\n raise DTInternalError('DTScenario.fromDict()', 'Wrong dict format')\n return scen", "def from_dict(cls, dikt) -> 'DataComparisonViewSeries':\n return util.deserialize_model(dikt, cls)", "def fromdict(cls,datadict):\n return cls(fmetric=datadict.get('fmetric'),\n fhost=datadict.get('fhost'),\n fvalue=datadict.get('fvalue'),\n ftime=datadict.get('ftime'),\n funit=datadict.get('funit'),\n finfo=datadict.get('finfo'))", "def from_dict(cls, dct):\n if dct.pop('type') != cls.__name__:\n fmt = 'Can not construct Note from dict %s'\n raise ValueError(fmt % dct)\n\n return cls(**dct)", "def from_dict(cls, dikt) -> 'AssetPropertyValueHistoryRequest':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dictionary):\n normalised = cls.normalise_dict(dictionary)\n return cls.from_normalised_dict(normalised)", "def from_dict(cls, d):\n schema = d.get(\"schema\", None)\n if schema is not None:\n schema = {\n attr_name: AttributeSchema.from_dict(asd)\n for attr_name, asd in iteritems(schema)\n }\n\n return cls(schema=schema)", "def from_dict(cls, d):\n assert \"status\" in d\n assert \"metadata\" in d\n return cls(**d)", "def from_dict(cls, d):\n d = d.copy()\n if \"length\" in d:\n # length argument removed in version 1.1.0\n del d[\"length\"]\n return cls(**d)", "def from_dict(cls, data):\n instance = cls()\n instance._set_data(data)\n return instance", "def from_dict(cls, dikt) -> 'ProductionFlowItem':\n return util.deserialize_model(dikt, cls)", "def from_dict(eventScheduleDict):\n pass", "def from_dict(cls, obj: dict) -> FormatTest:\n if obj is None:\n return None\n\n if not isinstance(obj, dict):\n return FormatTest.parse_obj(obj)\n\n _obj = FormatTest.parse_obj({\n \"integer\": obj.get(\"integer\"),\n \"int32\": obj.get(\"int32\"),\n \"int64\": obj.get(\"int64\"),\n \"number\": obj.get(\"number\"),\n \"float\": obj.get(\"float\"),\n \"double\": obj.get(\"double\"),\n \"decimal\": obj.get(\"decimal\"),\n \"string\": obj.get(\"string\"),\n \"string_with_double_quote_pattern\": obj.get(\"string_with_double_quote_pattern\"),\n \"byte\": obj.get(\"byte\"),\n \"binary\": obj.get(\"binary\"),\n \"var_date\": obj.get(\"date\"),\n \"date_time\": obj.get(\"dateTime\"),\n \"uuid\": obj.get(\"uuid\"),\n \"password\": obj.get(\"password\"),\n \"pattern_with_digits\": obj.get(\"pattern_with_digits\"),\n \"pattern_with_digits_and_delimiter\": obj.get(\"pattern_with_digits_and_delimiter\")\n })\n return _obj", "def from_dict(cls, dikt: dict) -> 'DutyWhere':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'DependencyValue':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, d):\n return cls(d[\"sequence\"], immutable_bounds=d[\"immutable_bounds\"])", "def from_dict(cls, dikt) -> 'DayResult':\n return util.deserialize_model(dikt, cls)", "def create(cls, dictionary):\n return cls(**dictionary)", "def create(cls, dictionary):\n return cls(**dictionary)", "def from_dict(cls, dct):\n return cls(**dct)", "def from_dict(cls, dikt: dict) -> 'DutyWhen':\n return util.deserialize_model(dikt, cls)", "def from_dict(self, data):\n for field in ['_id', 'title', 'description', 'params']:\n if field in data:\n setattr(self, field, data[field])\n return self", "def from_dict(cls, data):\n return cls(\n filter_id=data[\"Filter\"],\n name=data[\"Name\"],\n admin=data[\"Admin\"],\n action=data[\"Action\"],\n input_port=data[\"Input\"],\n output_port=data[\"Output\"],\n classifiers=data[\"Classifiers\"],\n packet_processing=data[\"Packet Processing\"],\n )", "def from_dict(cls, dikt) -> 'WorkerPerformanceEvent':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'ResultFeedback':\n return util.deserialize_model(dikt, cls)", "def from_dict(self, d):\r\n options = dict(d)\r\n task_id = options['task_id']\r\n del options['task_id']\r\n return SubtaskStatus.create(task_id, **options)", "def from_dict(cls, d):\n index = d.get(\"index\", None)\n if index is not None:\n index = {\n int(value): Attribute.from_dict(ad)\n for value, ad in iteritems(index)\n }\n\n return cls(index=index)", "def from_dict(cls, dict_object):\n\n return cls(**dict_object)", "def from_dict(cls, connection, data):\n\t\tif data.get('type') == \"gauge\":\n\t\t\tcls = Gauge\n\t\telif data.get('type') == \"counter\":\n\t\t\tcls = Counter\n\n\t\tobj = cls(connection, data['name'])\n\t\tobj.description = data['description']\n\t\tobj.period = data['period']\n\t\tobj.attributes = data['attributes']\n\t\tobj.description = data['description']\n\n\t\treturn obj", "def from_dict(cls, d):\n kwargs = {k: d[k] for k in cls.required()} # required\n kwargs.update({k: d[k] for k in cls.optional() if k in d}) # optional\n return cls(**kwargs)", "def from_dict(cls, dikt) -> 'Problem':\n return deserialize_model(dikt, cls)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n setup = idfy_rest_client.models.setup.Setup.from_dictionary(dictionary.get('setup')) if dictionary.get('setup') else None\r\n merge_fields = dictionary.get('mergeFields')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(setup,\r\n merge_fields,\r\n dictionary)", "def from_dict(self, data: dict):\n for name, sub in data.items():\n if hasattr(getattr(self, name, None), \"from_dict\"):\n getattr(self, name).from_dict(sub)\n else:\n setattr(self, name, decoder(sub))", "def from_dict(cls, dikt) -> 'SourceSchema':\n return util.deserialize_model(dikt, cls)", "def from_dict(self, data: dict):\n if 'title' in data:\n self.title = data['title']\n if 'description' in data:\n self.description = data['description']\n if 'deadline' in data:\n self.deadline = parser.parse(data['deadline'])\n return", "def from_dict(cls, dikt) -> 'DataFileFormat':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'ShardingDescriptor':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'OrgApacheFelixHttpProperties':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, data:{}):\n instance = cls(\n data['address'],\n None,\n data['frequency'],\n data['resolution'],\n data['servo_frequency']\n )\n if data['logging_level'] is not None:\n logger.setLevel(data['logging_level'])\n return instance", "def from_dict(cls, dikt) -> 'Parameters':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'Failure':\n return util.deserialize_model(dikt, cls)" ]
[ "0.6742197", "0.65646493", "0.6552889", "0.648437", "0.63919514", "0.6375292", "0.6365639", "0.62918556", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.62024546", "0.619918", "0.61803114", "0.6167417", "0.6135077", "0.6134521", "0.61195284", "0.6106273", "0.6054559", "0.60440415", "0.604376", "0.60305697", "0.6025698", "0.6020887", "0.60162044", "0.6012465", "0.60121566", "0.6005058", "0.60032576", "0.59904265", "0.5974323", "0.59663975", "0.5953395", "0.594727", "0.5941422", "0.5941013", "0.5941013", "0.59321415", "0.5918559", "0.5911302", "0.5897919", "0.58949196", "0.58943605", "0.5876348", "0.5860243", "0.5857489", "0.5855565", "0.58487225", "0.5848224", "0.5841192", "0.5836669", "0.5826996", "0.58201754", "0.5818796", "0.5816789", "0.5808807", "0.58003753", "0.5795948", "0.5793727" ]
0.0
-1
Prints a string representation of DataIntegrationRule instance
def __repr__(self): template = """ DataIntegrationRule ({}) - inputs : {} - output : {} - local : {} - template : {} - params : {} """ return template.format( self.rule_id, self.inputs, self.output, self.local, self.template, self.params )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return \"[ %s ]\" % str(self.__rule)", "def __str__(self):\n return \"{ %s }\" % str(self.__rule)", "def __str__(self):\n return \"{ %s }1\" % str(self.__rule)", "def __str__ (self) :\n\t\ttext_rule = \"\"\n\t\t\n\t\tfor key, rules in self.production_rules.items() :\n\t\t\ttext_rule += \"\\nRULE \" + key + \" = [\\n\\t\"\n\t\t\trule_in_a_line = []\n\t\t\tfor rule in rules :\n\t\t\t\t#rule_in_a_line.append(\" + \".join([r.val+\"(\"+r.type+\")\" for r in rule]))\n\t\t\t\trule_in_a_line.append(\" + \".join([r.__str__() for r in rule]))\n\t\t\ttext_rule += \"\\n\\t\".join(rule_in_a_line) + \"\\n]\"\n\t\ttext_rule += \"\\n\\n\"\n\t\t\n\t\ttext_rule += \"LABELS = \" + json.dumps (self.labels, indent=2) + '\\n\\n'\n\n\t\ttext_rule += \"STRUCT = [\\n{}\\n]\\n\\n\".format(\n\t\t\t\"\".join([\n\t\t\t\t\"\\t{} : {{\\n\\t\\t{}\\n\\t}}\\n\".format (\n\t\t\t\t\tkey, \", \\n\\t\\t\".join(val)\n\t\t\t\t) for key, val in self.keeper.items()\n\t\t\t])\n\t\t)\n\t\ttext_rule += \"STRNODE = [\\n{}\\n]\\n\\n\".format(\n\t\t\t\"\".join(self.strnodes)\n\t\t)\n\t\tfor regex, label in self.tokens :\n\t\t\ttext_rule += \"TOKEN \" + label + \" = regex('\" + regex + \"')\\n\"\n\n\t\treturn text_rule", "def __str__( self ):\n assert isinstance( self.level, int )\n assert isinstance( self.prop, WFF )\n assert isinstance( self.justification, Inference )\n\n return \"Step( %d, %s, %s )\" % ( self.num, repr( self.prop ), repr( self.justification ) )", "def __repr__(self):\n template = \"\"\"\n SnakemakeRule ({})\n \n - parent_id : {}\n - input : {}\n - output : {}\n - local : {}\n - template : {}\n - params : {}\n \"\"\"\n return template.format(\n self.rule_id,\n self.parent_id,\n self.input,\n self.output,\n self.local,\n self.template,\n self.params,\n )", "def print_rules(self):\n for idx, r in enumerate(self.rules):\n print(idx, \"=>\", r.__repr__())", "def __str__(self):\n outs = str(self.cluster_subspace).split(\"\\n\")[:6]\n\n if self.regression_data is not None:\n # This might need to be redefined to take \"expectation\" using measure\n feature_avg = np.average(self.feature_matrix, axis=0)\n feature_std = np.std(self.feature_matrix, axis=0)\n outs += [\n f\"Regression Data : estimator={self.regression_data.estimator_name}\",\n f\" module={self.regression_data.module}\",\n f\" parameters={self.regression_data.parameters}\",\n f\"Target Property : \"\n f\"mean={np.mean(self.regression_data.property_vector):0.4f} \"\n f\"std={np.std(self.regression_data.property_vector):0.4f}\",\n ]\n fit_var = sum(\n self._subspace.function_total_multiplicities[1:] * self.eci[1:] ** 2\n )\n outs += [\n f\"ECI-based Property : mean={self.eci[0]:0.4f}\"\n f\" std={np.sqrt(fit_var):0.4f}\",\n \"Fit Summary\",\n ]\n\n for i, term in enumerate(self._subspace.external_terms):\n outs.append(f\"{repr(term)}={self.coefs[len(self.eci) + i]:0.3f}\")\n\n if self.regression_data is not None:\n outs += [\n \" ---------------------------------------------------------------------\"\n \"-------------------------------\",\n \" | ID Orbit ID Degree Cluster Diameter ECI Feature AVG\"\n \" Feature STD ECI * STD |\",\n f\" | 0 0 0 NA \"\n f\"{self.eci[0]:^7.3f}{feature_avg[0]:^15.3f}\"\n f\"{feature_std[0]:^15.3f}{feature_std[0] * self.eci[0]:^13.3f}|\",\n ]\n else:\n outs += [\n \" ---------------------------------------------------------\",\n \" | ID Orbit ID Degree Cluster Diameter ECI |\",\n f\" | 0 0 0 NA \"\n f\"{self.eci[0]:^7.3f} |\",\n ]\n\n for degree, orbits in self.cluster_subspace.orbits_by_size.items():\n for orbit in orbits:\n for i, bits in enumerate(orbit.bit_combos):\n line = (\n f\" |{orbit.bit_id + i:^6}{orbit.id:^12}{degree:^10}\"\n f\"{orbit.base_cluster.diameter:^20.4f}\"\n f\"{self.eci[orbit.bit_id + i]:^7.3f}\"\n )\n if self.regression_data is not None:\n line += (\n f\"{feature_avg[orbit.bit_id + i]:^15.3f}\"\n f\"{feature_std[orbit.bit_id + i]:^15.3f}\"\n f\"{feature_std[orbit.bit_id + i] * self.eci[orbit.bit_id + i]:^13.3f}\" # noqa\n )\n line += \"|\"\n outs.append(line)\n outs.append(\" \" + (len(outs[-1]) - 1) * \"-\")\n return \"\\n\".join(outs)", "def __str__(self):\n r = []\n for item in sorted(self._data.keys()):\n correct, incorrect = self._data[item][True], self._data[item][False]\n acc = correct / (correct + incorrect)\n s = f\"{item:4} | Accuracy: {acc:.2f}% (diff {'+' if acc-item >=0 else ''}{acc-item:.2f}%) | correct: {correct:2}, incorrect: {incorrect:2}\" \n r.append(s)\n\n return \"\\n\".join(r)", "def summary_string(self) -> str:\n return f\"dixonoid: {self.plain_rules}\"", "def __str__(self):\n\n ret = ''\n for rule in self.rules:\n ret += str(rule) + '\\n'\n ret += 'IF TRUE THEN {0}'.format(self.default)\n\n return ret", "def __repr__( self ):\n assert isinstance( self.level, int )\n assert isinstance( self.prop, WFF )\n assert isinstance( self.justification, Inference )\n\n return str( self )", "def __str__(self):\n output = \"Solution for \" + self.vrpdata.InstanceName + \":\\n\"\n output += \"Total distance: \" + str(round(self.objective, 2)) + \"\\n\"\n output += \"Solution valid: \" + str(self.solutionValid) + \"\\n\\n\"\n count = 1 # count routes\n for r in self.routes:\n output += \"Route #\" + str(count) + \"\\n\" + str(r) + \"\\n\" + str(round(r.distance, 2)) + \"\\n\" + str(r.quantity) + \"\\n\"\n count += 1\n return output", "def __str__(self):\n astr = ' variables:\\t[ '\n for var in self.variables:\n astr += str(var) + ', '\n astr = astr[:-2] + ' ]\\n assumptions :\\t[ '\n for assumption in self.assumptions.cnf:\n astr += assumption.formula + ', '\n astr = astr[:-2] + ' ]\\n guarantees :\\t[ '\n for guarantee in self.guarantees.cnf:\n astr += guarantee.formula + ', '\n # astr = astr[:-2] + ' ]\\n guarantees_unsat :\\t[ '\n # for guarantee in self.guarantees.cnf:\n # astr += guarantee.unsaturated + ', '\n return astr[:-2] + ' ]\\n'", "def __str__(self):\n return 'GradientAnisotropicDiffusion:\\n' \\\n ' time_step: {self.time_step}\\n' \\\n ' conductance: {self.conductance}\\n' \\\n ' conductance_scaling_update_interval: {self.conductance_scaling_update_interval}\\n' \\\n ' no_iterations: {self.no_iterations}\\n' \\\n .format(self=self)", "def __str__(self):\n if len(self.label) > 0:\n descr = [\"'%s', target='%s' [%s]\" % (self.label, self.target.name, self.target.body_type)]\n else:\n descr = [\"target='%s' [%s]\" % (self.target.name, self.target.body_type)]\n if self.baseline:\n descr[0] += ', initial baseline offset=%f' % (self.baseline.poly[-1],)\n if self.beam:\n descr[0] += ', beam height=%f' % (self.beam.height,)\n for scan_ind, scan in enumerate(self.scans):\n descr.append('%4d: %s' % (scan_ind, str(scan)))\n return '\\n'.join(descr)", "def __repr__(self):\n return f\"{self.__class__.__name__}(validate={self._validate}, axis={self._axis})\"", "def __str__(self):\n outstr = [\"\\n<%s: %s>\" % (self.__class__, self.name)]\n outstr.append(\"%d graphs\" % len(self._graphs))\n outstr = \"\\n\".join(outstr)\n return outstr", "def __repr__(self):\n options_str = \", \".join(\n [\n f\"validate={self._validate}\",\n f\"outcome={self._outcome}\",\n f\"alpha_prior={self._alpha_prior}\",\n ]\n )\n return f\"{self.__class__.__name__}({options_str})\"", "def _to_string(self):\n self.results.print_results()\n self.results.print_comparison()", "def __repr__(self):\n\n rep = \"\"\n rep += str(self.literal)+\"\\n\"\n rep += str(self.bindings)+\"\\n\"\n rep += str(self.facts)+\"\\n\"\n return (rep)", "def __str__(self):\n runner = self.__head\n if runner is None:\n return \"\"\n while runner.next_node:\n if runner is not None:\n print(\"{}\".format(runner.data))\n runner = runner.next_node\n return \"{}\".format(runner.data)", "def __str__(self, printODData = False):\n networkStr = \"Link\\tFlow\\tCost\\n\"\n for ij in sorted(self.link, key=lambda ij : self.link[ij].sortKey):\n networkStr += \"%s\\t%f\\t%f\\n\" % (ij, self.link[ij].flow, self.link[ij].cost)\n if printODData == True:\n networkStr += \"\\n\"\n networkStr += \"OD pair\\tDemand\\tLeastCost\\n\"\n for ODpair in self.ODpair:\n networkStr += \"%s\\t%f\\t%f\\n\" % (ODpair, self.ODpair[ODpair].demand, self.ODpair[ODpair].leastCost)\n return networkStr", "def __repr__(self) -> str:\n\n thresh = np.get_printoptions()[\"threshold\"]\n np.set_printoptions(threshold=20)\n extra_chars = len(self.__class__.__name__)\n arr_str = \"data=\" + str(self.data).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 6))\n shape_str = (\n \" \" * extra_chars\n + \" shape=\"\n + str(self.shape).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 7))\n )\n dtype_str = \" \" * extra_chars + \" dtype=\" + str(self.dtype)\n np.set_printoptions(threshold=thresh)\n return \"{klass}({data},\\n{shape},\\n{dtype})\".format(\n klass=self.__class__.__name__,\n data=arr_str,\n shape=shape_str,\n dtype=dtype_str,\n )", "def __str__(self):\n _str = \"Variables:\\n\"\n for variable in self.variables:\n _str += \" {}\\n\".format(str(variable))\n _str += \"\\nConstraints:\\n\"\n for constraint in self.constraints:\n _str += \" {}\\n\".format(str(constraint))\n return _str", "def __str__(self):\n\n rep = 'Generalized Syllogism:\\n'\n rep += '\\ttask: {}\\n'.format(self.task)\n rep += '\\tencoded_task: {}\\n'.format(self.encoded_task)\n rep += '\\tp1: {}\\n'.format(self.p1)\n rep += '\\tp2: {}\\n'.format(self.p2)\n rep += '\\tquantifier_p1: {}\\n'.format(self.quantifier_p1)\n rep += '\\tquantifier_p2: {}\\n'.format(self.quantifier_p2)\n rep += '\\tfigure: {}\\n'.format(self.figure)\n rep += '\\tTerms:\\n'\n rep += '\\t\\tA: {}\\n'.format(self.A)\n rep += '\\t\\tB: {}\\n'.format(self.B)\n rep += '\\t\\tC: {}\\n'.format(self.C)\n return rep", "def __str__(self, output=[]):\n\n class_str = 'Analytical Phonon simulation properties:\\n\\n'\n class_str += super().__str__()\n\n return class_str", "def __str__(self, output=[]):\n\n class_str = 'Numerical Phonon simulation properties:\\n\\n'\n class_str += super().__str__()\n\n return class_str", "def __repr__(self):\n return 'PCFGRule(%s, %s, %s)' % (self.variable, self.derivation, self.probability)", "def __str__(self):\n return \"(%s)\" % ' '.join(map(str, self.__subrules))", "def __str__(self):\n astr = '[\\n name: [ ' + self.name + ' ]\\n'\n astr += ' variables: [ '\n for var, init in self.variables:\n astr += '(' + var + ' := ' + init + '), '\n astr = astr[:-2] + ' ]\\n assumptions: [ '\n for assumption in self.assumptions:\n astr += assumption + ', '\n astr = astr[:-2] + ' ]\\n guarantees: [ '\n for guarantee in self.guarantees:\n astr += guarantee + ', '\n return astr[:-2] + ' ]\\n]'", "def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += f'\\n {t}'\n format_string += '\\n)'\n return format_string", "def __repr__(self):\n outs = [\"Cluster Expansion Summary\"]\n outs += repr(self.cluster_subspace).split(\"\\n\")[1:]\n\n if self.regression_data is not None:\n outs += [\n f\"Regression Data : estimator={self.regression_data.estimator_name}\"\n f\" module={self.regression_data.module}\",\n f\" parameters={self.regression_data.parameters}\",\n f\"Target Property : \"\n f\"mean={np.mean(self.regression_data.property_vector):0.4f} \"\n f\"std={np.std(self.regression_data.property_vector):0.4f}\",\n ]\n fit_var = sum(\n self._subspace.function_total_multiplicities[1:] * self.eci[1:] ** 2\n )\n outs += [\n f\"ECI-based Property : mean={self.eci[0]:0.4f} std={np.sqrt(fit_var):0.4f}\"\n ]\n return \"\\n\".join(outs)", "def __str__(self):\n out = \"phase polynomial = \\n\"\n out += str(self.poly)\n out += \"\\naffine function = \\n\"\n out += \" (\"\n for row in range(self.num_qubits):\n wrote = False\n for col in range(self.num_qubits):\n if self.linear[row][col] != 0:\n if wrote:\n out += \" + x_\" + str(col)\n else:\n out += \"x_\" + str(col)\n wrote = True\n if self.shift[row] != 0:\n out += \" + 1\"\n if row != self.num_qubits - 1:\n out += \",\"\n out += \")\\n\"\n return out", "def _repr_(self):\n if self.parent()._chart.manifold().options.textbook_output:\n return str(ExpressionNice(self._express))\n else:\n return str(self._express)", "def __str__(self):\n\n s = \"\"\n\n for i, x in enumerate(self):\n s += f\"Step {i}, {x[0].__name__}, {x[1]}\\n\"\n return s", "def test_rule_representation():\n rule = MethodRule(method=\"POST\")\n assert repr(rule) == \"MethodRule(method='POST')\", \"Wrong representation\"", "def __str__(self):\n rep=\"This system has \"+str(self.NL)+\" layers.\\n\"\n rep+=\"The parameters for the each layers are:\\n\"\n for i in range(self.NL-1):\n rep+=\"Layer no. \"+str(i)+\":\\t \"+str(self.layers[i])\n rep+=\"Coupled to the next layer with strength:\\t\"+str(self.couplings[i])+\"\\n\"\n rep+=\"Layer no. \"+str(self.NL-1)+\":\\t \"+str(self.layers[self.NL-1])\n \n return rep", "def __str__(self):\n s = 'Processor ' + __name__\n # if self._rule_files:\n # s += ' running with rules ' + ' '.join(self._rule_files.values())\n\n return s", "def __str__(self):\n # Set up title\n r = '{:20.19} {:>10} {:>10}\\n'\n t = r.format(self.name, 'Days', 'FRBs')\n line = '-'*len(t.split('\\n')[-2].strip()) + '\\n'\n t += line\n\n # Format rates\n rdays = round(self.days, 3)\n t += r.format('In population', rdays, round(self.tot()))\n t += r.format('Detected', rdays, round(self.det, 3))\n t += r.format('Too late', rdays, round(self.late, 3))\n t += r.format('Too faint', rdays, round(self.faint, 3))\n t += r.format('Outside survey', rdays, round(self.out, 3))\n t += r.format('/Gpc^3', 365.25, round(self.vol, 3))\n t += r.format('Expected', round(self.exp, 4), 1)\n t += line\n\n return pprint(t, output=False)", "def __str__(self):\n name_str = \"node name is %s\\n\" % self.__name\n label_str = \"labels are %s\\n\" % str(self.__labels)\n propety_str = \"properties are %s\\n\" % str(self.__props)\n return name_str + label_str + propety_str", "def __repr__(self):\n repr_parts = ['<', self.__class__.__name__]\n \n repr_parts.append(' title = ')\n repr_parts.append(repr(self.title))\n \n step_type = self.type\n repr_parts.append(', type = ')\n repr_parts.append(step_type.name)\n \n required = self.required\n if required:\n repr_parts.append(', required = ')\n repr_parts.append(repr(required))\n \n values = self.values\n if (values is not None):\n repr_parts.append(', values = [')\n \n index = 0\n limit = len(values)\n \n while True:\n value = values[index]\n \n repr_parts.append(repr(value))\n \n index += 1\n if index == limit:\n break\n \n repr_parts.append(', ')\n continue\n \n repr_parts.append(']')\n \n repr_parts.append('>')\n return ''.join(repr_parts)", "def __str__(self):\n return_string = self.name + \"\\n\" + str(self.traits)\n\n return return_string", "def print_metric(self):\r\n print(f'\\n\\n{self.sort} metric of size {self.n}')\r\n print(f'algorithm: {self.algo}')\r\n print(f'number of comparisons: {self.comps}')\r\n print(f'number of exchanges: {self.exs}')\r\n print(f'regression equation for comparisons: {self.comp_eq}')\r\n print(f'regression equation for exchanges: {self.ex_eq}')\r\n print(f'presorted data: {self.predata}')\r\n print(f'postsorted data: {self.postdata}')", "def pretty_str(rule,print_option=PrintOption()):\n if rule.is_terminal() or rule.is_empty():\n content = str(rule)\n if print_option.bikeshed:\n return \"`{}`\".format(content)\n return content\n if rule.is_symbol_name():\n name = rule.content\n def with_meta(phrase,metachar,print_option):\n content = \" \".join([x.pretty_str(print_option) for x in phrase])\n if len(phrase) > 1:\n return \"( {} ){}\".format(content, metachar)\n return \"{} {}\".format(content, metachar)\n if name in print_option.replace_with_starred:\n phrase = print_option.replace_with_starred[name]\n return with_meta(phrase,'*',print_option)\n if name in print_option.replace_with_optional:\n phrase = print_option.replace_with_optional[name]\n return with_meta(phrase,'?',print_option)\n if name in print_option.replace_with_nested:\n po = print_option.clone()\n po.multi_line_choice = False\n content = po.replace_with_nested[name].pretty_str(po)\n return \"( {} )\".format(content)\n if print_option.inline_synthetic and name.find(\"/\") >=0:\n po = print_option.clone()\n po.multi_line_choice = False\n content = po.grammar.rules[name].pretty_str(po)\n return \"( {} )\".format(content)\n\n # Print ourselves\n if print_option.bikeshed:\n context = 'recursive descent syntax'\n g = print_option.grammar\n if g.rules[name].is_token():\n context = 'syntax'\n if name in g.extra_externals:\n context = 'syntax_sym'\n if name == '_disambiguate_template':\n # This is an implementation detail, so make it invisible.\n return ''\n else:\n without_underscore = ['_less_than',\n '_less_than_equal',\n '_greater_than',\n '_greater_than_equal',\n '_shift_left',\n '_shift_left_assign',\n '_shift_right',\n '_shift_right_assign']\n if name in without_underscore:\n name = name[1:]\n return \"[={}/{}=]\".format(context,name)\n return name\n if isinstance(rule,Choice):\n parts = [i.pretty_str(print_option) for i in rule]\n if print_option.multi_line_choice:\n parts.sort()\n\n if print_option.multi_line_choice:\n if print_option.bikeshed:\n nl = \"\\n\\n\"\n prefixer = \"\\n | \"\n else:\n nl = \"\\n\"\n prefixer = \"\\n \"\n else:\n nl = \"\"\n prefixer = \"\"\n joiner = nl + \" | \"\n inside = prefixer + joiner.join([p for p in parts])\n if print_option.is_canonical:\n return inside\n else:\n # If it's not canonical, then it can have nesting.\n return \"(\" + inside + nl + \")\"\n if isinstance(rule,Seq):\n return \" \".join(filter(lambda i: len(i)>0, [i.pretty_str(print_option) for i in rule]))\n if isinstance(rule,Repeat1):\n return \"( \" + \"\".join([i.pretty_str(print_option) for i in rule]) + \" )+\"\n raise RuntimeError(\"unexpected node: {}\".format(str(rule)))", "def __str__(self):\n shape, dtype = self._initial_shape, self._initial_dtype\n descr = [self._name_shape_dtype(self.name, shape, dtype)]\n for transform in self.transforms:\n shape, dtype = transform.new_shape(shape), transform.dtype if transform.dtype is not None else dtype\n descr += ['-> ' + self._name_shape_dtype(transform.name, shape, dtype)]\n return '\\n'.join(descr)", "def __repr__(self):\n s = f'sample:\\n{self.sample}\\n'\n s += f'cluster:\\n{self.cluster}\\n'\n s += f'largest_cluster:\\n{self.get_largest_cluster()}'\n return s", "def __str__(self):\n return \"{}\\n{}\\n{}\\n{}\".format(self.header,self.sequence,self.line3,self.quality)", "def __str__(self):\n debug_str = \"%s ::=\" % str(self.head)\n for symbol in self.body:\n debug_str += \" %s\" % str(symbol)\n return debug_str", "def __str__(self):\n out = [f'{v}: {self.adj_list[v]}' for v in self.adj_list]\n out = '\\n '.join(out)\n if len(out) < 70:\n out = out.replace('\\n ', ', ')\n return f'GRAPH: {{{out}}}'\n return f'GRAPH: {{\\n {out}}}'", "def __str__(self) -> str:\n st = \"<Output> \"\n if self.inst_out:\n st += f'instance:{self.inst_out};'\n st += f'''{self.output} -> {self.target or '\"\"'} -> '''\n if self.inst_in:\n st += f\"instance:{self.inst_in};\"\n st += self.input\n\n if self.params and not self.inst_in:\n st += f\" ({self.params})\"\n if self.delay != 0:\n st += f\" after {self.delay} seconds\"\n if self.times != -1:\n st += \" (once only)\" if self.times == 1 else f\" ({self.times!s} times only)\"\n return st", "def __str__(self):\n sgf = self.grad_fn\n return \"{}{}\".format(\n str(self.data),\n \", grad_fn={}\".format(\n self.grad_fn.__class__.__name__) if sgf is not None else \"\"\n )", "def __repr__(self):\n return self.pretty_print(self.__dict__)", "def _to_str(self):\n\t\tprint(\"predictors: {}, types: {} \\n method: {}, preprocessing: {}\\\n\t\t\t \\n partition_rate: {}, metric: {}, file name: {}\".format(\n\t\t\t self.predictors, self.predictors_types, self.method_name,\n\t\t\t self.preprocessing_methods, self.data_split, self.metric,\n\t\t\t self.plotting_file_name))", "def __str__(self):\n s = \"\"\n s += self.synset.name + \"\\t\"\n s += \"PosScore: %s\\t\" % self.pos_score\n s += \"NegScore: %s\" % self.neg_score\n return s", "def __repr__(self):\n\n # info string\n info = self.model.__repr__()\n info += \"\\n=========================\\n\"\n info += f\"Train data length:\\t\\t{ len(self.train_dataset) }\\n\"\n info += f\"Eval sata length:\\t\\t{ len(self.eval_dataset) }\\n\"\n info += f\"Optimizer:\\t\\t\\t\\t{ str(self.optimizer).split('(')[0] }\\n\"\n info += f\"Criterion:\\t\\t\\t\\t{ str(self.criterion).split('(')[0] }\\n\"\n info += f\"Training Environment:\\t{ self.device.type }\\n\"\n info += f\"Show information:\\t\\t{ 'True' if self.info else 'False' }\\n\"\n info += \"=========================\\n\"\n\n return info", "def __str__(self):\n return \"<aospy.Calc instance: \" + ', '.join(\n (self.name, self.proj.name, self.model.name, self.run.name)\n ) + \">\"", "def __str__(self):\n\t\n\t\tresult = \"\"\n\t\tresult += \"Torsional Spring Specs: \\n\"\n\t\tresult += \"Shape Eq. Slope: {0}\\n\".format(str(self.shape_slope))\n\t\tresult += \"Z Thickness: {0}\\n\".format(str(self.z_thick))\n\t\tresult += \"In-Plane Thickness: {0}\\n\".format(str(self.thick))\n\t\tresult += \"Spiral Length: {0}\\n\".format(str(self.length))\n\n\t\treturn result", "def __str__(self) -> str:\n\n assert self.data is not None\n border = \"****************************************\"\n name_border = \"=\" * len(self.name)\n lines = [border, name_border, self.name, name_border]\n for spec in self.specs.values():\n lines.append(str(spec))\n lines.append(border)\n return os.linesep.join(lines)", "def __repr__(self) -> str:\n return f\"<TestResult {self.test_id},{self.regression_test_id}: {self.exit_code} \" \\\n f\"(expected {self.expected_rc} in {self.runtime} ms>\"", "def __str__(self):\n string = 'input dim: {} \\noutput dim: {} \\n'.format(\n self.dim_inputs, self.dim_outputs\n )\n string += 'sequence length: {} \\n'.format(\n self.tensors[0].shape[1]\n )\n key = 'train' if self.train else 'test'\n string += '{}_samples: {} \\n{}_sequences: {} \\n'.format(\n key, self.experiment_length, key, self.tensors[0].shape[0]\n )\n return string", "def __str__(self):\n if self.combinedReplicates:\n printout = \"\\nCOMBINED MS REPLICATES WITH n = \" + str(self.n_cutoff) + \" and std dev = \" + str(self.std_cutoff) + \"\\nCell Lines: \" + str(self.cellLines).strip(\"[]\") + \"\\nSize: \" + str([self.combinedReplicates[i].shape[0] for i in range(len(self.combinedReplicates))]).strip(\"[]\") + \"\\nIntersection Size: \" + str(self.experimentFullIntersection.shape[0]) + \"\\n\"\n printout += \"\\n\"\n else:\n printout = \"\"\n printout += \"\\n\".join([str(each) for each in self.experimentalReplicates]).strip(\"[]\")\n if self.phenotypicMeasurements:\n printout += \"\\n\"\n printout += \"\".join([str(each) for each in self.phenotypicMeasurements.values()]).strip(\"[]\")\n\n return printout", "def __str__(self) -> str:\n if self.decorator is None:\n decorator_str = \"\"\n elif self.decorator:\n decorator_str = \"+\"\n else:\n decorator_str = \"-\"\n return \" \".join([\"The nilpotent orbit corresponding\",\n f\"to partition {self.my_diagram}{decorator_str}\",\n f\"in type {self.my_type.letter()} {self.lie_rank}\"])", "def __repr__(self):\n indent = len(self.type) + 2\n jstr = ',\\n' + ' ' * indent\n\n props = self._display_properties()\n\n params = jstr.join('{:}={:}'.format(p, summary(self[p],\n indent=indent))\n for (p, dp) in props)\n return '<{}({:})>'.format(self.type, params)", "def __str__(self):\r\n\r\n txt = super(GrfNodeCore, self).__str__()\r\n\r\n inID = self.inPort[0]\r\n\r\n outID = [P[0] for P in self.outPort]\r\n\r\n txt += '; input ID = {0}; output ID = {1}'.format(inID, outID) # generate formatted text\r\n return txt", "def __str__(self):\n return \"(%s)\" % ' | '.join(map(str, self.__subrules))", "def __str__(self):\n shape = self.dataarray.shape\n return 'CouplingAnalysisPurePython: %i variables, %i timesteps.' % (\n shape[0], shape[1])", "def __repr__(self):\n lstout = [ \"SinkPyFAI Processlib instance\",\"Worker:\",self._worker.__repr__(),\"Writer:\",self._writer.__repr__()]\n return os.linesep.join(lstout)", "def print_production(self, transition_index, value):\r\n\r\n transition = self.rules[transition_index]\r\n str_list = [self.rule_to_str(t) for t in transition]\r\n print(\" (%04d, %10s) {%s}\" % (transition_index, toRuleString[value], \" \".join(str_list)))", "def __repr__(self):\n s = self.name\n if self.param != \"None\":\n s += ' with parameter '+self.param\n s += '; '+self.applyTo\n if self.applyTo != \"global\":\n s += ': '+self.conditions\n return s", "def _printable(self) -> str:\n \n if self.type_of_second_operand == self.TYPE_REF_ID:\n operand_type = \"RefID\"\n else:\n operand_type = \"Value\"\n\n # parenthesis to concatenate the string over multiple lines\n return (\n \"CQC IF header. RefID=\" + str(self.first_operand)\n + \" | Operator=\" + str(self.operator)\n + \" | \" + operand_type + \"=\" + str(self.second_operand)\n + \" | Second_operand_type=\" + operand_type\n + \" | Body_length=\" + str(self.length)\n )", "def __str__(self):\n s = \"--\\n\"\n for element in self:\n s += element.__str__() + \"\\n\"\n s += \"--\"\n \"\"\"\n # Uncomment if you want to see the internal structure\n s = \"\\n--\\n\"\n for i in xrange(self.size):\n s += \"%d [%s, %s]\\n\" % ( i, self.slot[i], self.data[i] )\n s += \"--\"\n \"\"\"\n return s", "def __print_ruler():\n print(\"{:s}-+-{:s}-+-{:s}-+-{:s}-+-{:s}-+-{:s}\".format(\n \"-\"*__collen[\"id\"],\n \"-\"*__collen[\"name\"],\n \"-\"*MAX_SIZE,\n \"-\"*__collen[\"used_lim\"],\n \"-\"*MAX_SIZE,\n \"-\"*__collen[\"excl_lim\"]))", "def __str__(self):\n string = ''\n for degree, coef in enumerate(self.coefs, 1):\n degree = degree - 1\n string += str(coef)+'x^' + str(degree) + ' + '\n string = string[0:-3] # remove the last ' + '\n return string", "def __repr__(self):\n return f\"{self.__class__.__name__}(validate={self._validate}, scale={self.scale})\"", "def __repr__(self):\n return \"Flow(sip:{} dip:{} sport:{} dport{})\".format(\n self.fr_flow_sip_l, self.fr_flow_dip_l,\n self.fr_flow_sport, self.fr_flow_dport)", "def test_repr_format(self):\n t = Linearize()\n assert t.repr_format(1.0) == \"Linearize(1.0)\"", "def __repr__(self):\n ret = \"\"\n if is_relation(self.root):\n ret += self.root + '('\n for index, obj in enumerate(self.arguments):\n ret += str(obj)\n if index != len(self.arguments)-1:\n ret += ','\n ret += ')'\n elif is_equality(self.root):\n ret = str(self.first) + self.root + str(self.second)\n elif is_quantifier(self.root):\n ret = self.root + str(self.variable) + '[' + str(self.predicate) + ']'\n elif is_unary(self.root):\n ret = self.root + str(self.first)\n elif is_binary(self.root):\n ret = '(' + str(self.first) + self.root + str(self.second) + ')'\n return ret\n # Task 7.2", "def __str__(self):\n representation_string = '{:^5}\\t{:^20}\\n\\n'.format('S. No.', 'Schedule Policy')\n\n for index, policy in enumerate(self._policies):\n sub_str = '{:^5}\\t{:20}\\n'.format(index + 1, policy)\n representation_string += sub_str\n\n return representation_string.strip()", "def __repr__(self):\n values = ', '.join(f'{k}={v}' for k, v in self.variables.items())\n return f'D({values})'", "def printReport(self): \n \n print('Distribution: ', self._distribution_type)\n print('Distribution Type: ', str(self._measure_type).replace('MeasureType.','')) \n print('Type Detection Match: ', str(self._measure_type_match))\n print('MLE: ', str(self._mle))\n print('Goodness of Fit: ', str(self._gof)) \n print('Goodness of Fit Pass: ', str(self._pass)) \n print('Overall Score: ', str(self._score)) \n print('-------------')", "def __str__(self):\n\n print(\"\")\n s = \"NAME : \"+self._name+\"\\n\\n\"\n s += \"PARAMS :\"\n print(s)\n\n for key, val in self.params.items():\n l = (21-len(key))//7\n print(\"{0}\".format(key)+\"\\t\"*l+\":\\t{0}\".format(val))\n\n s = \"\\nRuns stored in DEFAULT_RUNS = \"+str(len(self.default_runs))\n print(s)\n\n s = \"\\nRuns stored in MOD_RUNS = \"+str(len(self.mod_runs))\n print(s)\n\n return \"\"", "def __str__(self):\n txt = ''\n if self.PrintHeader:\n txt = \" |\" + \"|\".join(sorted(self.rows[0].keys())).expandtabs() + \"|\"\n txt += \"\\n\"\n txt += \"|-\"\n for r in self.rows:\n txt += \"\\n|\"\n txt += \"|\".join([str(uround(r[key] , 2) if isinstance(r[key], (int, long, float, complex , Variable,AffineScalarFunc )) else r[key]) for key in sorted(self.rows[0].keys())]) + \"|\"\n txt += \"\\n|-\"\n if self.PrintSum:\n txt += \"\\n\"\n sumRow = self.GetSumRow()\n txt += \"| |\" + \"|\".join( [str(uround(sumRow[key] , 2) if isinstance(sumRow[key], (int, long, float, complex , Variable ,AffineScalarFunc )) else sumRow[key]) for key in sorted(self.rows[0].keys())[1:]] ) + \"|\"\n\n return txt", "def __str__(self):\n ret_str = \"\"\n for element_type in ('nodes', 'edges', 'layers'):\n elements = getattr(self, element_type)\n subtype_counts = defaultdict(int)\n ret_str += \"{0} {1}:\\n\".format(len(elements), element_type)\n for element in elements:\n subtype_counts[type(element).__name__] += 1\n for subtype in subtype_counts:\n ret_str += \"\\t{0}: {1}\\n\".format(subtype,\n subtype_counts[subtype])\n if element_type == 'layers':\n layer_names = [layer.name for layer in self.layers]\n ret_str += \"\\t\\t\" + \", \".join(layer_names)\n ret_str += \"\\n\"\n return ret_str", "def __repr__(self):\n\n repme = \"pfreq= {!r}, sampling= {!r}\".format(self.pfreq, self.sampling)\n\n return \"{}({})\".format(type(self).__name__, repme)", "def __str__(self):\n\n result = \"n: \" + str(self.n) + \"\\n\"\n result += \"m: \" + str(self.m) + \"\\n\"\n result += \"ns: \" + str(self.ns) + \"\\n\"\n result += \"s0: \" + str(self.s0) + \"\\n\"\n result += \"goals: \" + str([self.goals[i] for i in range(self.ng)]) + \"\\n\"\n result += \"horizon: \" + str(self.horizon) + \"\\n\"\n result += \"gamma: \" + str(self.gamma) + \"\\n\\n\"\n\n result += \"S(s, a, s'):\\n%s\" % (str(np.array([self.S[i] \\\n for i in range(self.n * self.m * self.ns)]).reshape((self.n, self.m, self.ns)))) + \"\\n\\n\"\n\n result += \"T(s, a, s'):\\n%s\" % (str(np.array([self.T[i] \\\n for i in range(self.n * self.m * self.ns)]).reshape((self.n, self.m, self.ns)))) + \"\\n\\n\"\n\n result += \"R(s, a):\\n%s\" % (str(np.array([self.R[i] \\\n for i in range(self.n * self.m)]).reshape((self.n, self.m)))) + \"\\n\\n\"\n\n return result", "def printObj(self):\n return 'patient_id:{}, medication:{}, frequency:{}, start_dt:{},'\n 'end_dt:{}, noti_type:{}'.format(\n self.patients.data,\n self.medication.data,\n self.frequency.data,\n self.start_dt,\n self.end_dt.data,\n self.noti_type.data)", "def __str__(self):\n\n string = \"values:\\n\\t\"\n string += \" x \".join(map(str, self.shape))\n\n string += \" {} ({})\\n\".format(type(self.values).__name__, self.values.dtype)\n\n if self.print_values is True:\n string += str(self.values) + \"\\n\"\n\n string += \"dims:\\n\\t\"\n\n string += \"{}\\n\".format(self.dims)\n\n string += \"coords:\\n\\t\"\n string += \"\\n\\t\".join(map(repr, self.coords))\n\n string += \"\\n\"\n\n string += \"attrs:\\n\"\n\n for ix, key in enumerate(self.attrs.keys()):\n if ix == self.max_print_attrs:\n string += \"\\t+%i attrs\" % (len(self.attrs) - self.max_print_attrs)\n break\n string += \"\\t{!r}: {!r}\\n\".format(key, self.attrs[key])\n\n return string", "def _printable(self):\n toPrint = \"Measurement Outcome header. \"\n toPrint += \"measurement outcome: \" + str(self.outcome) + \" \"\n\n return toPrint", "def print(self):\n print(self.pretty_str())", "def __str__(self):\n DataND_str = \"\"\n # Get the properties inherited from Data\n DataND_str += super(DataND, self).__str__() + linesep\n if len(self.axes) == 0:\n DataND_str += \"axes = []\"\n for ii in range(len(self.axes)):\n DataND_str += (\n \"axes[\"\n + str(ii)\n + \"] = \"\n + str(self.axes[ii].as_dict())\n + \"\\n\"\n + linesep\n + linesep\n )\n DataND_str += \"normalizations = \" + str(self.normalizations) + linesep\n DataND_str += \"FTparameters = \" + str(self.FTparameters) + linesep\n DataND_str += \"values = \" + linesep + str(self.values)\n return DataND_str", "def __str__(self):\n header = [\n ' ObjectiveFunction:']\n header += [('Function: {}').format(self.func.__name__)]\n header += [('Objective: {}').format(self.objective)]\n return ('\\n').join(header) + '\\n'", "def __str__(self):\n\n return \"[\" + str(self.quick) + \"] \" + \\\n self.regexp.pattern + \" --> \" + \\\n str(self.handler)", "def __str__(self):\n buf = io.StringIO()\n args.output.write(buf, self.root, self.headings)\n return buf.getvalue()", "def __repr__(self):\n\n output = list()\n output.append('{resonance_id:6s}'.format(**self.par))\n output.append('{h_larmor_frq:6.1f}'.format(**self.par))\n output.append('{temperature:4.1f}'.format(**self.par))\n output.append('{:10.5f}'.format(self.val))\n output.append('{:10.5f}'.format(self.err))\n\n if self.cal:\n output.append('{:10.5f}'.format(self.cal))\n\n return ' '.join(output)", "def __repr__(self) -> str:\n s = \"\\n\"\n fmt = \"{:7.3f}\"\n for i in range(len(self.w)):\n s += \" \".join(fmt.format(w) for w in self.w[i])\n s += \" | \" + fmt.format(self.b[i]) + \"\\n\"\n return s", "def __repr__(self):\n\n repme = \"x0= {!r}, kf= {!r}, n_upd= {!r}\"\\\n .format(self.x0, self.kf, self.n_upd)\n\n return \"UmbrellaSampling({!s})\".format(repme)", "def __str__(self):\n\n strme = \"fed method {} {} {} {}\"\\\n .format(UmbrellaSampling.key, self.x0, self.kf, self.n_upd)\n\n return strme", "def dump(self) :\n st = \"%s=%s, valid=%d, found=%d, type=%s stringValue=%s\" \\\n %(self.name_, str(self.value_), self.valid_, self.found_, \\\n self.type_, self.stringValue_)\n print st", "def __repr__(self):\n return f\"Fact-Sheet: '{self.title}'\"" ]
[ "0.68674856", "0.68624336", "0.6645672", "0.6532725", "0.6322785", "0.628434", "0.62689245", "0.6190423", "0.604574", "0.6039333", "0.59951794", "0.5989062", "0.5976397", "0.59762686", "0.5963851", "0.5931985", "0.59197044", "0.59166557", "0.590007", "0.5897591", "0.5883885", "0.5875002", "0.5871039", "0.5841265", "0.58263445", "0.57791466", "0.5776673", "0.57741016", "0.5771794", "0.576991", "0.57687056", "0.57642436", "0.57613784", "0.5761286", "0.57434684", "0.57386446", "0.5727442", "0.57211435", "0.5713379", "0.5712394", "0.5681544", "0.566913", "0.56606495", "0.5660143", "0.5659392", "0.5659201", "0.5659066", "0.56570095", "0.5656695", "0.565524", "0.565142", "0.56514096", "0.56500375", "0.5644358", "0.5643493", "0.56415284", "0.56398815", "0.5635766", "0.56275874", "0.5626152", "0.5620231", "0.5616022", "0.5613762", "0.5608229", "0.5605557", "0.560424", "0.5597465", "0.55968577", "0.55914587", "0.5588962", "0.5575548", "0.55669135", "0.55638903", "0.5563734", "0.5562449", "0.55596083", "0.55587745", "0.5554324", "0.5551766", "0.55502766", "0.5540235", "0.5535567", "0.55353534", "0.5534336", "0.55321634", "0.5532008", "0.55258447", "0.55214185", "0.551398", "0.5507092", "0.5501527", "0.55007696", "0.5497796", "0.54949605", "0.5492957", "0.54916894", "0.5483563", "0.54835105", "0.54822475", "0.5472809" ]
0.7635887
0
Syncs an account by the account_name
def test_sync_account(self): runner = CliRunner() LOG.info("Testing 'calm sync account {}".format(ACCOUNT_NAME)) result = runner.invoke( cli, ["sync", "account", ACCOUNT_NAME], ) if result.exit_code: cli_res_dict = {"Output": result.output, "Exception": str(result.exception)} LOG.debug( "Cli Response: {}".format( json.dumps(cli_res_dict, indent=4, separators=(",", ": ")) ) ) LOG.debug( "Traceback: \n{}".format( "".join(traceback.format_tb(result.exc_info[2])) ) ) pytest.fail("Account sync failed") LOG.info("Success")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sync_account(account):\n stripe_account = stripe.Account.retrieve(id=account.stripe_id)\n return sync_account_from_stripe_data(stripe_account)", "def put_account(self, account):\n \n pass", "def change_account(self, account):\r\n check_account = Account(account, steem_instance=self.steem)\r\n self.account = check_account[\"name\"]\r\n self.refresh()", "def account(self, account: str):\n self._account = account", "def account_name(self, account_name):\n\n self._account_name = account_name", "def account_name(self, account_name):\n\n self._account_name = account_name", "def update(self, account):\n model = models.load('Account', account)\n return self.client.update_account(model=model)", "def save_accounts(account):\n account.save_account()", "def save_accounts(account):\n account.save_account()", "def sync(self, options=None):\n return self._call_account_method(\n 'syncAccount', {\n 'options': options,\n }\n )", "def set_account(self, account: str):\n ret = self._call_txtrader_api('set_account', {'account': account})\n if ret:\n self.account = account\n return ret", "def account(self, account):\n\n self._account = account", "def account(self, account):\n\n self._account = account", "def account(self, account):\n\n self._account = account", "def account(self, account):\n\n self._account = account", "def MultiWaySync(self, accounts):\n\t\tcleaned_contacts = []\n\t\tcontacts = []\n\t\t\n\t\tfor account in accounts:\n\t\t\tself.SelectAccount(account)\n\t\t\tcontacts.extend(self.GetContactList())\n\t\t\n\t\tduplicates, originals = ceFindDuplicates(contacts)\n\t\tmerged, todelete = ceMergeDuplicates(duplicates)\n\t\t\n\t\tcleaned_contacts.extend(originals)\n\t\tcleaned_contacts.extend(merged)\n\t\t\n\t\tfor account in accounts:\n\t\t\tself.SelectAccount(account)\n\t\t\tself.RemoveAll()\n\t\t\n\t\tfor account in accounts:\n\t\t\tself.SelectAccount(account)\n\t\t\tfor contact in cleaned_contacts:\n\t\t\t\tself.BatchEnqueue('create', contact)\n\t\t\tself.ExecuteBatchQueue()", "def add_account(self, account):\n self.accounts[account.account_number] = account.json()\n # We should save in database the new account using self.di, but not now in order to get our tests passed", "def change_name(change_account):\n change_data(change_account, changed_data='name')", "def accounts(self, accounts):\n\n self._accounts = accounts", "def account_name(self, account_name):\n if account_name is None:\n raise ValueError(\"Invalid value for `account_name`, must not be `None`\") # noqa: E501\n\n self._account_name = account_name", "def save_account(self):\n Credential.account_list.append(self)", "def setAccount(self, account_id):\n self.data_struct['_setAccount'] = account_id", "def account_update(request):\r\n params = request.params\r\n json_body = request.json_body\r\n user_acct = request.user\r\n\r\n if 'name' in params and params['name'] is not None:\r\n name = params.get('name')\r\n user_acct.name = name\r\n\r\n if 'name' in json_body and json_body['name'] is not None:\r\n name = json_body.get('name')\r\n user_acct.name = name\r\n\r\n if 'email' in params and params['email'] is not None:\r\n email = params.get('email')\r\n user_acct.email = email.lower()\r\n\r\n if 'email' in json_body and json_body['email'] is not None:\r\n email = json_body.get('email')\r\n user_acct.email = email.lower()\r\n\r\n return _api_response(request, user_acct.safe_data())", "def setaccount(self, vergeaddress, account):\n return self.proxy.setaccount(vergeaddress, account)", "def sync(config, group, accounts=(), dryrun=False, region=None):\n config = validate.callback(config)\n destination = config.get('destination')\n client = boto3.Session().client('s3')\n\n for account in config.get('accounts', ()):\n if accounts and account['name'] not in accounts:\n continue\n\n session = get_session(account['role'], region)\n account_id = session.client('sts').get_caller_identity()['Account']\n prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id\n prefix = \"%s/%s\" % (prefix, group)\n\n exports = get_exports(client, destination['bucket'], prefix + \"/\")\n\n role = account.pop('role')\n if isinstance(role, str):\n account['account_id'] = role.split(':')[4]\n else:\n account['account_id'] = role[-1].split(':')[4]\n account.pop('groups')\n\n if exports:\n last_export = exports.pop()\n account['export'] = last_export\n else:\n account['export'] = 'missing'\n last_export = None\n try:\n tag_set = client.get_object_tagging(\n Bucket=destination['bucket'], Key=prefix).get('TagSet', [])\n except ClientError:\n tag_set = []\n\n tags = {t['Key']: t['Value'] for t in tag_set}\n tagged_last_export = None\n\n if 'LastExport' in tags:\n le = parse(tags['LastExport'])\n tagged_last_export = (le.year, le.month, le.day)\n account['sync'] = tagged_last_export\n else:\n account['sync'] = account['export'] != 'missing' and 'sync' or 'missing'\n\n if last_export is None:\n continue\n\n if tagged_last_export == last_export or account['export'] == 'missing':\n continue\n\n if dryrun:\n continue\n\n client.put_object(\n Bucket=destination['bucket'],\n Key=prefix,\n Body=json.dumps({}),\n ACL=\"bucket-owner-full-control\",\n ServerSideEncryption=\"AES256\")\n\n export_time = datetime.now().replace(tzinfo=tzlocal()).astimezone(tzutc())\n export_time = export_time.replace(\n year=last_export[0], month=last_export[1], day=last_export[2],\n minute=0, second=0, microsecond=0, hour=0)\n client.put_object_tagging(\n Bucket=destination['bucket'], Key=prefix,\n Tagging={\n 'TagSet': [{\n 'Key': 'LastExport',\n 'Value': export_time.isoformat()}]})\n\n accounts_report = []\n for a in config.get('accounts'):\n if accounts and a['name'] not in accounts:\n continue\n if isinstance(a['sync'], tuple):\n a['sync'] = \"%s/%s/%s\" % (a['sync'])\n if isinstance(a['export'], tuple):\n a['export'] = \"%s/%s/%s\" % (a['export'])\n accounts_report.append(a)\n\n accounts_report.sort(key=operator.itemgetter('export'), reverse=True)\n print(tabulate(accounts_report, headers='keys'))", "def get_account(self, account):\n \n pass", "def onAccountUpdate(self, data):\n pass", "def put(self, account=None, user=None, account_id=None):\n return super().put()", "def save_account(self, account = None):\n\t\tif account == None:\n\t\t\taccount = self.currentAccount\n\t\tself.config.add_section(account.data['name'])\n\t\tfor field in account.data:\n\t\t\tself.config.set(account.data['name'], field, str(account.data[field]))\n\t\tself.config.write(open(self.configFile, 'w'))", "def update_cloud_account_name(cls, body: AwsCloudAccountUpdateName) -> Dict:\n\t\tpass", "def create(self, account):\n model = models.load('Account', account)\n\n return self.client.create_account(model=model)", "def sync(self):\n acctManager = self.acctManager\n acct = acctManager.account(0)\n gapPolicy = 5\n acct.generateGapAddresses(gapPolicy)\n watchAddresses = set()\n\n # send the initial balance\n self.signals.balance(acct.balance)\n addresses = acct.allAddresses()\n \n # Update the account with known UTXOs.\n chain = self.blockchain\n blockchainUTXOs = chain.UTXOs(addresses)\n acct.resolveUTXOs(blockchainUTXOs)\n\n # Subscribe to block and address updates.\n chain.subscribeBlocks(self.blockSignal)\n watchAddresses = acct.addressesOfInterest()\n if watchAddresses:\n chain.subscribeAddresses(watchAddresses, self.addressSignal)\n # Signal the new balance.\n b = acct.calcBalance(self.blockchain.tip[\"height\"])\n self.signals.balance(b)\n self.save()\n return True", "def funding_account_account_name(self, funding_account_account_name):\n\n self._funding_account_account_name = funding_account_account_name", "def update_account(row, account):\n if row['LAST_UPDATED_FROM_PAYGOV']:\n updated_at = datetime_from(row['LAST_UPDATED_FROM_PAYGOV'])\n account.donations.filter(time__lte=updated_at).delete()\n if account.category == Account.PROJECT:\n set_balances(row, account)\n account.save()", "def update_account_data(self):\n self.ensure_one()\n getattr(self, '%s_update_account_data' % self.provider, lambda: None)()", "def get_account_by_name(self, account_name):\n accounts = self.service_old.management().accounts().list().execute()\n\n account = None\n if accounts.get('items'):\n account = next(acnt for acnt in accounts.get('items') if acnt[\"name\"] == account_name)\n\n if account is None:\n log_msg = \"The account named \" + account_name + \" does not exist!\"\n print(log_msg)\n\n return account", "def newaccount(accountname, account, owner, active, memo, posting, create_claimed_account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not account:\n account = mph.config[\"default_account\"]\n if not unlock_wallet(stm):\n return\n acc = Account(account, morphene_instance=stm)\n if owner is None or active is None or memo is None or posting is None:\n password = click.prompt(\"Keys were not given - Passphrase is used to create keys\\n New Account Passphrase\", confirmation_prompt=True, hide_input=True)\n if not password:\n print(\"You cannot chose an empty password\")\n return\n if create_claimed_account:\n tx = mph.create_claimed_account(accountname, creator=acc, password=password)\n else:\n tx = mph.create_account(accountname, creator=acc, password=password)\n else:\n if create_claimed_account:\n tx = mph.create_claimed_account(accountname, creator=acc, owner_key=owner, active_key=active, memo_key=memo, posting_key=posting)\n else:\n tx = mph.create_account(accountname, creator=acc, owner_key=owner, active_key=active, memo_key=memo, posting_key=posting) \n tx = json.dumps(tx, indent=4)\n print(tx)", "def accounts():", "def on_account(self, account: AccountData):\n # self.on_event(EVENT_ACCOUNT, account)\n # self.on_event(EVENT_ACCOUNT + account.vt_accountid, account)\n pass", "def updateAccount(accountNumber: int, payload: str ):\n statement = \"update account set \" + payload+ f\" where account_number = {accountNumber}\"\n cursor = connection.cursor()\n try:\n cursor.execute(statement)\n cursor.commit()\n cursor.close()\n except mysql.Error as err:\n print (err)\n raise\n else:\n cursor.close()", "def submit(self):\n name = self.nameEntry.get()\n server = self.servEntry.get()\n key = self.keyEntry.get()\n secret = self.secEntry.get()\n try:\n accounts.new(name, key, secret, server)\n # Test if account valid\n try:\n core.account_available_margin(name)\n self.quit()\n except Exception as e:\n tkinter.messagebox.showerror(\"Error\", \"Wasn't able to validate \"\n + \"the new account:\\n\" + str(e))\n accounts.delete(name)\n except BitmexAccountsException as e:\n tkinter.messagebox.showerror(\"Error\", str(e))", "def find_by_account_name(cls, account_name):\n for account in cls.credentials_list:\n if account.account_name == account_name:\n return account", "def save_account(self):\n Credentials.credentials_list.append(self)", "def put_account():\n\n # init vars\n user = g.user\n\n # pre-validate data\n errors = unique({}, Administrator, Administrator.username,\n request.json.get('username', None), update=user)\n\n errors = unique_email(errors, Administrator, Administrator.email,\n request.json.get('email', None), update=user)\n\n # validate data\n try:\n data = UserAccountAdminSchema().load(request.json)\n except ValidationError as err:\n errors = dict(list(errors.items()) + list(err.messages.items()))\n\n # return any errors\n if errors:\n return jsonify({\"error\": errors}), 400\n\n # save user account\n user.username = data['username'].strip()\n user.email = data['email'].strip()\n user.first_name = data['first_name'].strip()\n user.last_name = data['last_name'].strip()\n\n db.session.commit()\n\n # response\n return jsonify({'user_account': UserAccountAdminSchema().dump(user)}), 200", "def update(self, customerguid, name=\"\", login=\"\", password=\"\", email=\"\", address=\"\", vat=\"\", jobguid=\"\", executionparams=None):", "def account_id(self, account_id):\n self._account_id = account_id", "def test_account_name(self):\n account = Account('test-account')\n self.assertEqual(account.name, 'test-account')", "def test_search_by_account(self):\n self.new_credentials.save_creds()\n account_found = Credentials.search_by_account(\"Instagram\")\n \n self.assertEqual(account_found.username,self.new_credentials.username)", "def set_account_id(account_id):\n conn = get_connect()\n conn.execute(\"UPDATE account SET isSearched = 1 WHERE accountId = \" + str(account_id))\n conn.commit()\n conn.close()\n print(\"accountId \" + str(account_id) + \" has been searched\")\n return", "def save(self, **kwargs):\n owner = str(self.vhost.domain.owner())\n if not self.name.startswith(owner + '_'):\n self.name = owner + '_' + self.name\n try:\n super(Account, self).save(**kwargs)\n except IntegrityError:\n i = 1\n base_name = self.name\n while True:\n self.name = base_name + '-' + str(i)\n try:\n super(Account, self).save(**kwargs)\n return\n except IntegrityError:\n i += 1", "def set_accounts(self, accounts):\n\n\t\tif accounts is not None and not isinstance(accounts, list):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: accounts EXPECTED TYPE: list', None, None)\n\t\t\n\t\tself.__accounts = accounts\n\t\tself.__key_modified['Accounts'] = 1", "def login(self, name, pin):\n self.account = self.bank.get(name, pin)\n if self.account:\n return \"success\"\n else:\n return \"faliure\"", "def accounts(self, accounts):\n if accounts is None:\n raise ValueError(\"Invalid value for `accounts`, must not be `None`\") # noqa: E501\n\n self._accounts = accounts", "def accounts():\n pass", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def test_save_account(self):\n self.new_account.save_account() # add account to list\n self.assertEqual(len(Credential.credential_list),\n 1) # check length of list", "def test_sync_biz_from_sugar_acct(self):\n LOG.debug('test_sync_biz_from_sugar_acct')\n business = Business.objects.get(id=114)\n advertiser = business.advertiser\n module = \"Accounts\"\n query = build_recent_entry_query(module=module, test_mode=True, \n get_modified=False, start=None)\n sugar_list = self.sugar.get_entry_list(module, query)\n sugar_dict = sugar_list[0]\n sugar_dict['business_id_c'] = ''\n self.sugar.set_entry(module, dict_to_name_value(sugar_dict))\n billing_record = BillingRecord.objects.get(id=114)\n order = billing_record.orders.all()[0]\n order.delete()\n billing_record.delete()\n business.delete()\n sync_business_from_sugar(test_mode=True, sugar=self.sugar)\n # business is not created since zip is not valid\n try:\n business = Business.objects.get(advertiser=advertiser)\n self.assertTrue(False)\n except business.DoesNotExist:\n self.assertTrue(True)", "def find_by_account(cls, accounts):\n\n for name in cls.account_list:\n if name.acc_name == accounts:\n return name", "def for_account(self, account_id: str):\n self._add_query_param(\"account\", account_id)\n return self", "def get_account(self, name):\n return self._accounts[name]", "def test_account_update(self):\r\n params = {\r\n 'name': u'Test Admin'\r\n }\r\n res = self.testapp.post(\r\n str(u\"/api/v1/admin/account?api_key=\" + str(API_KEY)),\r\n content_type='application/json',\r\n params=json.dumps(params),\r\n status=200)\r\n\r\n # make sure we can decode the body\r\n user = json.loads(res.body)\r\n\r\n self.assertEqual(\r\n user['username'], 'admin',\r\n \"Should have a username of admin {0}\".format(user))\r\n self.assertEqual(\r\n user['name'], 'Test Admin',\r\n \"Should have a new name of Test Admin {0}\".format(user))\r\n\r\n self.assertTrue(\r\n 'password' not in user,\r\n \"Should not have a field password {0}\".format(user))\r\n self.assertTrue(\r\n '_password' not in user,\r\n \"Should not have a field password {0}\".format(user))\r\n self.assertTrue(\r\n 'api_key' not in user,\r\n \"Should not have a field password {0}\".format(user))\r\n self._check_cors_headers(res)", "def test_update_account(self):\n id = Account.objects.first().id\n url = reverse('account:accounts-detail', kwargs={\"id\":id})\n data = {'name': 'Updated Test Account 1'}\n response = self.client.patch(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(Account.objects.count(), 1)\n self.assertEqual(Account.objects.get().name, 'Updated Test Account 1')", "def HandleAccounts(self, result):\n self.logger.debug('Checking for changes to user accounts.')\n configured_users = self.utils.GetConfiguredUsers()\n enable_oslogin = self._GetEnableOsLoginValue(result)\n enable_two_factor = self._GetEnableTwoFactorValue(result)\n if enable_oslogin:\n desired_users = {}\n self.oslogin.UpdateOsLogin(True, two_factor_desired=enable_two_factor)\n else:\n desired_users = self._GetAccountsData(result)\n self.oslogin.UpdateOsLogin(False)\n remove_users = sorted(set(configured_users) - set(desired_users.keys()))\n self._UpdateUsers(desired_users)\n self._RemoveUsers(remove_users)\n self.utils.SetConfiguredUsers(desired_users.keys())", "def account_put(request):\n fields = [\"email\", \"token\", \"updateFields\"]\n\n # serializes the quert string to a dict (neeto)\n args = request.args\n\n query_validation = validate_query_params(args, fields)\n # check that body validation succeeded\n if query_validation[1] != 200:\n return query_validation\n\n auth = azure_refresh_token(args[\"token\"])\n if not auth[0]:\n return http400(\"Not Authenticated\")\n # Add tuple to response\n\n account_db = Database(\"accounts\")\n\n try:\n account_db.update(args[\"email\"], ast.literal_eval(args[\"updateFields\"]))\n response = {\n \"access_token\": auth[0],\n \"refresh_token\": auth[1],\n }\n response.update(account_db.get(args[\"email\"]).to_dict())\n\n return jsonHttp200(\"Account Updated\", response)\n except:\n return http400(\"Account update error\")", "def flush_account(self):\n if self.data_channel:\n if not self.data_channel.transfer_in_progress():\n self.data_channel.close()\n self.data_channel = None\n if self.data_server:\n self.data_server.close()\n self.data_server = None\n\n self.fs.rnfr = None\n self.authenticated = False\n self.username = \"\"\n self.attempted_logins = 0\n self.current_type = 'a'\n self.restart_position = 0\n self.quit_pending = False\n self.in_dtp_queue = None\n self.out_dtp_queue = None\n\n\n # --- connection", "def type_account(self, account):\n\n\t\twith allure.step(\"Type payee account\"):\n\t\t\telement = Element(driver=self.driver,\n\t\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t\t locator=BillPayPageLocator.ACCOUNT_INPUT)\n\t\t\telement.write(account)\n\t\t\treturn None", "def power(account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if len(account) == 0:\n if \"default_account\" in mph.config:\n account = [mph.config[\"default_account\"]]\n for name in account:\n a = Account(name, morphene_instance=stm)\n print(\"\\n@%s\" % a.name)\n a.print_info(use_table=True)", "def __update_accounts(self):\n\t\tfor acct in self.wallet:\n\t\t\tif len(get_unspent(acct[\"address\"], self.testnet))!=0:\n\t\t\t\tacct[\"status\"] = \"in use\"\n\t\t\telse:\n\t\t\t\tspent = get_spent(acct[\"address\"], self.testnet)\n\t\t\t\tconfirm = (s[\"confirmations\"] >= 6 for s in spent)\n\t\t\t\tif len(spent) > 0 and all(confirm):\n\t\t\t\t\tacct[\"status\"] = \"used\"\n\t\t\t\telif len(spent) > 0:\n\t\t\t\t\tacct[\"status\"] = \"in use\"\n\t\tself.header[\"LAST_UPDATE_TIME\"] = str(round(time.time()))\n\t\toutput = [self.header, *self.wallet]\n\t\twith open(self.filepath, 'w+') as f:\n\t\t\tjson.dump(output, f)", "def update_account(self, id=None, username=None, **kwargs):\n if not (bool(id) != bool(username)):\n raise TypeError('account id OR username needed')\n if 'full_name' in kwargs.keys():\n try:\n self.g.put('accounts/%s/name' % id or username,\n data=json.dumps({'name': kwargs['full_name']}))\n except HTTPError as e:\n return self._manage_errors(e)\n if 'email' in kwargs.keys():\n # Note that the user will have to confirm the email and set it\n # as preferred herself in the gerrit interface.\n try:\n url = 'accounts/%s/emails/%s' % (id or username,\n kwargs['email'])\n j = {'email': kwargs['email']}\n if kwargs.get('no_email_confirmation'):\n j['preferred'] = True\n j['no_confirmation'] = True\n self.g.put(url,\n data=json.dumps(j))\n except HTTPError as e:\n if e.response.status_code == 409:\n # the email already exists, set it as preferred\n url = url + '/preferred'\n try:\n self.g.put(url,\n data=json.dumps(j))\n except HTTPError as ee:\n return self._manage_errors(ee)\n else:\n return self._manage_errors(e)\n if not ('full_name' in kwargs.keys() or 'email' in kwargs.keys()):\n raise Exception('Unknown fields')\n return True", "def transfer(self, name, contact):\n response = self.request.post('domain_transfers', {\n 'domain': {'name': name, 'registrant_id': contact.id}\n })\n\n return response.was_successful()", "def sync_swag(owner, bucket_name, bucket_prefix, bucket_region, account_type, spinnaker):\n from security_monkey.account_manager import account_registry\n\n swag_opts = {\n 'swag.type': 's3',\n 'swag.bucket_name': bucket_name,\n 'swag.data_file': bucket_prefix,\n 'swag.region': bucket_region\n }\n\n swag = SWAGManager(**parse_swag_config_options(swag_opts))\n account_manager = account_registry[account_type]()\n\n for account in swag.get_all(\"[?provider=='{provider}']\".format(provider=account_type.lower())):\n active = False\n for s in account['services']:\n if s['name'] == 'security_monkey':\n for status in s['status']:\n if status['region'] == 'all':\n active = status['enabled']\n\n thirdparty = True\n if account['owner'] == owner:\n thirdparty = False\n\n if spinnaker:\n spinnaker_name = swag.get_service_name('spinnaker', \"[?id=='{id}']\".format(id=account['id']))\n if not spinnaker_name:\n name = account['name']\n else:\n name = spinnaker_name\n else:\n name = account['name']\n\n notes = account['description']\n identifier = account['id']\n\n custom_fields = {}\n s3_name = swag.get_service_name('s3', \"[?id=='{id}']\".format(id=account['id']))\n if s3_name:\n custom_fields['s3_name'] = s3_name\n\n for service in account['services']:\n if service['name'] == 's3':\n c_id = service['metadata'].get('canonicalId')\n if c_id:\n custom_fields['canonical_id'] = c_id\n\n account_manager.sync(account_manager.account_type, name, active, thirdparty,\n notes, identifier,\n custom_fields=custom_fields)\n db.session.close()\n app.logger.info('SWAG sync successful.')", "def account(self, acct):\n aMgr = self.acctManager\n if len(aMgr.accounts) <= acct:\n raise Exception(\"requested unknown account number %i\" % acct)\n return aMgr.account(acct)", "def save(self, *args, **kwargs):\n self.name = unique_slugify(\n self.name,\n instance=self,\n queryset=AccountTeam.objects.filter(account=self.account),\n )\n return super().save(*args, **kwargs)", "def add(self, account):\n if isinstance(account, Account) and account not in self.account:\n self.account.append(account)", "def FromName(self, sAccountName):\n db = catocommon.new_conn()\n sSQL = \"select account_id from cloud_account where account_name = '{0}' or account_id = '{0}'\".format(sAccountName)\n\n caid = db.select_col_noexcep(sSQL)\n if db.error:\n raise Exception(\"Cloud Account Object: Unable to get Cloud Account from database. \" + db.error)\n\n if caid:\n self.FromID(caid)\n else:\n raise Exception(\"Error getting Cloud Account ID for Name [%s] - no record found. %s\" % (sAccountName, db.error))\n\n db.close()", "def account_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"account_name\")", "def account_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"account_name\")", "def account_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"account_name\")", "def add_account(self, account, add_ms_to_account=True):\r\n # check if name already exists and throw ValueError if it does\r\n # it doesn't make sense to add an account twice -- this could be\r\n # refactored to use a set instead\r\n # check for accounts by name per Q2 bonus below\r\n if account.name in [account.name for account in self._accounts]:\r\n raise ValueError(\"{} already associated to {}\".format(account.name,\r\n self.name))\r\n self._accounts.append(account)\r\n if add_ms_to_account:\r\n # add_account_to_ms is False because we've already added the\r\n # account to this segment, don't want to do it again\r\n account.add_to_market_segment(self, add_account_to_ms=False)", "def lookup_by_account_name(account_name):\n try:\n account = session.query(SnapshotBalance).filter(\n func.lower(SnapshotBalance.account_name) == account_name.lower()).first()\n except IntegrityError as pie:\n msg = str(pie)\n raise InvalidUsage(msg, status_code=400)\n return account", "def __init__(self, name, accounts=None):\r\n self.name = name\r\n if accounts:\r\n self._accounts = accounts\r\n for account in accounts:\r\n # add_account_to_ms is False because we've already added the\r\n # account to this segment, don't want to do it again\r\n account.add_to_market_segment(self, add_account_to_ms=False)\r\n else:\r\n self._accounts = []\r\n check_for_existing_market_segment(self)", "def set_account(self, account_list):\n self.multiple_items_selection_from_kendo_dropdown(self.account_kendo_dropdown_locator, account_list)\n self.wait_for_ajax_spinner_load()", "def account_name(self) -> str:\n return pulumi.get(self, \"account_name\")", "def account_name(self) -> str:\n return pulumi.get(self, \"account_name\")", "def register_account(self, contract_name, budget_name, ynab_account_name, account_id=None):\n ynab_contract = self.get_contract_by_name(contract_name)\n if not ynab_contract:\n self._logger.error('Could not get contract by name \"%s\"', contract_name)\n return False\n try:\n account_wrapper = getattr(importlib.import_module('ynabintegrationslib.adapters'),\n f'{ynab_contract.bank}{ynab_contract.type}')\n account = ynab_contract.contract.get_account(account_id)\n self._accounts.append(account_wrapper(account,\n self._ynab,\n budget_name,\n ynab_account_name))\n return True\n except Exception: # pylint: disable=broad-except\n self._logger.exception('Problem registering account')\n return False", "def apply(self):\n changed = False\n account_exists = False\n update_account = False\n account_detail = self.get_account()\n\n if account_detail:\n account_exists = True\n\n if self.state == 'absent':\n changed = True\n\n elif self.state == 'present':\n # Check if we need to update the account\n\n if account_detail.username is not None and self.new_element_username is not None and \\\n account_detail.username != self.new_element_username:\n update_account = True\n changed = True\n\n elif account_detail.status is not None and self.status is not None \\\n and account_detail.status != self.status:\n update_account = True\n changed = True\n\n elif account_detail.initiator_secret is not None and self.initiator_secret is not None \\\n and account_detail.initiator_secret != self.initiator_secret:\n update_account = True\n changed = True\n\n elif account_detail.target_secret is not None and self.target_secret is not None \\\n and account_detail.target_secret != self.target_secret:\n update_account = True\n changed = True\n\n elif account_detail.attributes is not None and self.attributes is not None \\\n and account_detail.attributes != self.attributes:\n update_account = True\n changed = True\n else:\n if self.state == 'present' and self.status is None:\n changed = True\n\n if changed:\n if self.module.check_mode:\n pass\n else:\n if self.state == 'present':\n if not account_exists:\n self.create_account()\n elif update_account:\n self.update_account()\n\n elif self.state == 'absent':\n self.delete_account()\n\n self.module.exit_json(changed=changed)", "def delete_account(self, account):\n \n pass", "def sync(self, sync_from, sync_to, **kwargs):\n return self.exec_command('sync %s %s' % (sync_from, sync_to), **kwargs)", "def rename(ctx, query, name, force, password, remember):\n\n _init_session(ctx, password, remember)\n session = ctx.obj[\"session\"]\n creds = session.list_credentials()\n hits = _search(creds, query, True)\n if len(hits) == 0:\n click.echo(\"No matches, nothing to be done.\")\n elif len(hits) == 1:\n cred = hits[0]\n if \":\" in name:\n issuer, name = name.split(\":\", 1)\n else:\n issuer = None\n\n new_id = _format_cred_id(issuer, name, cred.oath_type, cred.period)\n if any(cred.id == new_id for cred in creds):\n raise CliFail(\n f\"Another account with ID {new_id.decode()} \"\n \"already exists on this YubiKey.\"\n )\n if force or (\n click.confirm(\n f\"Rename account: {_string_id(cred)} ?\",\n default=False,\n err=True,\n )\n ):\n session.rename_credential(cred.id, name, issuer)\n click.echo(f\"Renamed {_string_id(cred)} to {new_id.decode()}.\")\n else:\n click.echo(\"Rename aborted by user.\")\n\n else:\n _error_multiple_hits(ctx, hits)", "def acctLogin(self):\n self.acctObj.email = \"default@example.com\"\n self.password = \"default\"\n self._displayName = \"defaultUser\"\n return True", "def switch_to_boughtx(window: WindowSpecification, account: str):\n PAUSE_TIME = .1\n window.type_keys(\"{TAB}\")\n window.type_keys(\"boughtx\")\n\n # Tab to transfer field.\n import time\n for i in range(7):\n time.sleep(PAUSE_TIME)\n window.type_keys(\"{TAB}\")\n\n time.sleep(PAUSE_TIME)\n window.type_keys(account)\n time.sleep(PAUSE_TIME)\n # Accept account.\n window.type_keys(\"{ENTER}\")\n time.sleep(PAUSE_TIME)\n # Save transaction.\n window.type_keys(\"{ENTER}\")", "def muting(account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not account:\n if \"default_account\" in mph.config:\n account = [mph.config[\"default_account\"]]\n for a in account:\n a = Account(a, morphene_instance=stm)\n print(\"\\nMuting statistics for @%s (please wait...)\" % a.name)\n muting = a.get_mutings(False)\n muting.print_summarize_table(tag_type=\"Muting\")", "def new_account(firstname, lastname, pin):\n pass", "def run(config, start, end, accounts, region, debug):\n config = validate.callback(config)\n destination = config.get('destination')\n start = start and parse(start) or start\n end = end and parse(end) or datetime.now()\n executor = debug and MainThreadExecutor or ThreadPoolExecutor\n with executor(max_workers=32) as w:\n futures = {}\n for account in config.get('accounts', ()):\n if accounts and account['name'] not in accounts:\n continue\n futures[\n w.submit(process_account, account, start,\n end, destination, region)] = account\n for f in as_completed(futures):\n account = futures[f]\n if f.exception():\n log.error(\"Error on account %s err: %s\",\n account['name'], f.exception())\n log.info(\"Completed %s\", account['name'])", "def account_exist(cls, account):\n for name in cls.account_list:\n if name.acc_name == account:\n return True\n\n return False" ]
[ "0.7384746", "0.7075692", "0.68083966", "0.6631079", "0.6615135", "0.6615135", "0.657953", "0.6560432", "0.6560432", "0.65222824", "0.63499165", "0.6179354", "0.6179354", "0.6179354", "0.6179354", "0.60058033", "0.6002847", "0.5868163", "0.5854182", "0.5799399", "0.57433146", "0.57306117", "0.5717644", "0.57160294", "0.56700563", "0.5660693", "0.5633001", "0.56178117", "0.5596922", "0.5581498", "0.55676377", "0.5520654", "0.55184937", "0.5464097", "0.5436658", "0.5411825", "0.5408048", "0.5402284", "0.53753245", "0.5372522", "0.5370777", "0.53582436", "0.53212994", "0.5315057", "0.5298647", "0.5289559", "0.5289362", "0.5288932", "0.5268281", "0.52598655", "0.5252385", "0.52520627", "0.52490574", "0.5234669", "0.52311265", "0.52311265", "0.52311265", "0.52311265", "0.52311265", "0.52311265", "0.5218061", "0.5215404", "0.5197757", "0.51977473", "0.519519", "0.5188022", "0.51570976", "0.5156952", "0.5149103", "0.514732", "0.514051", "0.51380515", "0.5127626", "0.5124767", "0.50926733", "0.5089326", "0.5082717", "0.5081598", "0.5075913", "0.5064863", "0.5061951", "0.5061951", "0.5061951", "0.50436693", "0.5042295", "0.50388974", "0.5035168", "0.5032485", "0.5032485", "0.5032252", "0.5013069", "0.50116587", "0.50079036", "0.49879172", "0.49703133", "0.4959215", "0.4948511", "0.49478373", "0.49455816", "0.4942032" ]
0.6851966
2
Send a password reset email to the suer
def deliever_password_reset_mail(user_id, reset_password_url): user = User.query.get(user_id) if user is not None: try: url = f"{celery.conf.get('EMAIL_SERVICE_HOST')}/api/email/" payload = { "sender": celery.conf.get("MAIL_DEFAULT_SENDER"), "receiver": user.email, "subject": "Password reset from snake eyes", "template_id": 2, "request_id": uuid4().hex, "template_params": {"username": user.username, "reset_password_url": reset_password_url} } response = post(url, json=payload, headers={"Accept": "application/json"}) except RequestException as e: print(f"[********] UNABLE TO DELIEVER MAIL {e}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_password_reset_email():\n aaa.send_password_reset_email(\n username=post_get('username'),\n email_addr=post_get('email_address')\n )\n return 'Please check your mailbox.'", "def send_pw_reset_email(user):\n token = user.get_token()\n message = Message(\n 'Reset Your Password',\n sender='storcwebsite@gmail.com',\n recipients=[user.email])\n message.body = f\"To verify reset your password, click the link \" \\\n f\"below:\\n\\n\" \\\n f\"{url_for('users.reset_password', token=token, _external=True)}\"\n mail.send(message)", "def send_password_reset_email(user):\n\n token = user.get_password_token()\n reset_time=datetime.now()\n send_email('[SiteSurveyApp] Account password reset',\n recipients=[user.email],\n sender=app.config['MAIL_DEFAULT_SENDER'],\n text_body=render_template('auth/emails/reset_password.txt',\n user=user, token=token, reset_time=reset_time),\n html_body=render_template('auth/emails/reset_password.html',\n user=user, token=token, reset_time=reset_time))", "def send_reset_email(staff):\n token = staff.get_reset_token()\n msg = Message('Password Reset Request', \n sender='NoReplyBloodBank@my.unt.edu', \n recipients=[staff.email])\n msg.body = f\"\"\"To reset your password, visit the following link:\n{url_for('reset_token', token=token, _external=True)}\nIf you did not make this request, then simply record this email and no changes will be made.\"\"\"\n try:\n mail.send(msg)\n except Exception as e:\n print(e)", "def send_password_reset(user):\n _log('++ sending password reset email for: {} {}'.format(user.first_name, user.last_name))\n secret_string = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(20))\n\n # if local set the domain to localhost\n if ENV_DICT['ENVIRON'] == 'LOCAL':\n secret_link = 'http://localhost:8080/reset/{}/'.format(secret_string)\n # otherwise use the subdomain of the tenancy\n else:\n secret_link = 'http://{}.cpisearch.io/reset/{}/'.format(user.tenancy, secret_string)\n\n reset_link_object = PasswordResetLink(\n user_id=user.user_id,\n secret_link=secret_string,\n tenancy=user.tenancy,\n )\n db.session.add(reset_link_object)\n db.session.commit()\n send_email(\n to_email=user.email,\n subject='SuccessKit Password Reset',\n template_path='emails/password_reset_email.html',\n template_vars={\n 'user': user,\n 'secret_link': secret_link\n }\n )", "def post(self):\n data = request.get_json()\n user = actions.get_user_by_email(data['email'])\n html = '<p>To reset your password </p>'\n subject = 'Request for changing password, ' + user['username']\n actions.send_email(data['email'], user['username'], user['password'], subject,\n '/reset_password/', html, False)\n pass", "def send_password_reset_mail(email, token):\n print(\"reset password\")\n url = f\"{settings.SITE_URL}/reset-password?email={email}&token={token}\"\n SUBJECT = \"Reset Password Request\"\n # The HTML body of the email.\n body = \"\"\"\n <html>\n <head></head>\n <body>\n <p>Here is your password reset link:</p>\n <p><a href='{0}'>{1}</a></p>\n </body>\n </html>\n \"\"\".format(url, url)\n send_mail(SUBJECT, body, email)", "def send_reset_email(s):\n \n email = s.email\n username = s.username\n sponsor_id = s.id\n reset_key = id_generator(size=20)\n\n cache.set('reset_%s' % reset_key, sponsor_id, 86400) \n\n message = \"We have received a request to reset your password for your \"\n message += \"Goo.im sponsor account. Please click the link below to reset your password.\\n\\n\"\n message += \"https://goo.im/sponsor/password?token=%s\" % reset_key\n message += \"\\n\\n\"\n message += \"If you feel that you received this message in error, or you did not request a password \"\n message += \"reset, please contact our admins by replying to this email.\"\n message += \"\\n\\n\"\n message += \"-- The Goo.im team\"\n\n send_mail('Password Request', message,\n 'support@snipanet.com', [email])", "def send_reset_email(user):\n msg = emails.reset_email(user)\n try:\n mail.send(msg)\n except Exception as e:\n traceback.print_exc()", "def send_recovery_password_email(token: str, email: str) -> None:\n\n # TODO ...\n # Load html templates and get the content from it.\n # html_content = ...\n\n # You must have to send this as a anchor\n # to my-domain.com/reset-password?token=ad5a....\n link = f\"{SERVER_HOST}/reset-password?token={token}\"\n content = f\"\"\"\n <h1>Reset your password</h1>\n <p></p>\n <a href=\"{link}\" target=\"_blank\" rel=\"noopener noreferrer\">Press here</a>\n \"\"\"\n email = sender.create_email(\n to_list=[email],\n subject=f\"Recovery Password\",\n html_content=content,\n )\n sender.send_email(email_to_send=email)", "def send_email( user, password ):\n \n mail = Mailer( host = EMAIL['host'], \n port = EMAIL['port'],\n use_tls = EMAIL['use_tls'], \n usr = EMAIL['user'], \n pwd = EMAIL['password']\n )\n \n message = Message( From = 'help@rxmedaccess.com',\n To = [user.email],\n Subject = \"Password Reset\"\n )\n \n body = \"\"\"Your new password for {} is {}\n You can reset it to what you like on your settings page once you log in with\n this password\n \"\"\".format(__name__, password )\n\n message.Body = body\n try:\n mail.send(message)\n except Exception as e:\n log.error( 'Send mail error: {}'.format( str(e) ) )", "def post(self):\n try:\n url = request.host_url + 'reset/password/'\n body = request.get_json()\n base_url = request.url_root\n email = body.get('email')\n\n if not email:\n raise SchemaValidationError\n\n user = User.objects.get(email=email)\n if not user:\n raise EmailDoesNotExistsError\n\n expires = datetime.timedelta(minutes=60)\n payload = {\"user_id\": str(user.id)}\n reset_token = create_access_token(payload, expires_delta=expires)\n\n return send_email('[Unboxit] Reset Your Password',\n sender='contact@tsantos.dev',\n recipients=[user.email],\n text_body=render_template(\n 'components/reset_password.txt',\n url=url + reset_token),\n html_body=render_template(\n 'components/reset_password.html',\n url=url + reset_token,\n first_name=user.first_name,\n base_url=base_url))\n except SchemaValidationError:\n raise SchemaValidationError\n except DoesNotExist:\n raise EmailDoesNotExistsError\n except Exception as e:\n raise InternalServerError", "def reset_password():\n if current_user.is_authenticated:\n return redirect(url_for('main.home'))\n\n form = RequestResetForm()\n\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n send_reset_email(user) # located in utils.py\n flash('An email has been sent with instruction to reset your password', 'info')\n return redirect(url_for('users.login'))\n\n return render_template('reset_password_request.html', form=form)", "def action_reset_password(self):\n # prepare reset password signup\n create_mode = bool(self.env.context.get('create_user'))\n\n # no time limit for initial invitation, only for reset password\n expiration = False if create_mode else now(days=+1)\n\n self.mapped('partner_id').signup_prepare(signup_type=\"reset\", expiration=expiration)\n\n # send email to users with their signup url\n template = False\n if create_mode:\n try:\n template = self.env.ref('loyalty.set_password_email', raise_if_not_found=False)\n except ValueError:\n pass\n if not template:\n template = self.env.ref('loyalty.reset_password_email')\n assert template._name == 'mail.template'\n\n template_values = {\n 'email_to': '${object.email|safe}',\n 'email_cc': False,\n 'auto_delete': True,\n 'partner_to': False,\n 'scheduled_date': False,\n }\n template.write(template_values)\n\n for user in self:\n if not user.email:\n raise UserError(_(\"Cannot send email: user %s has no email address.\") % user.name)\n with self.env.cr.savepoint():\n template.with_context(lang=user.lang).send_mail(user.id, force_send=True, raise_exception=True)\n _logger.info(\"Password reset email sent for user <%s> to <%s>\", user.login, user.email)", "def send_reset_password_email(self, user, base_url):\n\n parsed_base_url = urlparse(base_url)\n if parsed_base_url.hostname != settings.PUBLIC_WEB_FRONTEND_HOSTNAME:\n raise BaseURLHostnameNotAllowed(\n f'The hostname {parsed_base_url.netloc} is not allowed.'\n )\n\n signer = self.get_reset_password_signer()\n signed_user_id = signer.dumps(user.id)\n\n if not base_url.endswith('/'):\n base_url += '/'\n\n reset_url = urljoin(base_url, signed_user_id)\n\n email = ResetPasswordEmail(user, reset_url, to=[user.email])\n email.send()", "def test_sendPasswordResetEmail(self, testUser):\n with mail.record_messages() as outbox:\n testUser.send_password_reset_email()\n assert len(outbox) == 1\n msg = outbox[0]\n assert \"jjones@yahoo.com\" in msg.recipients\n assert msg.subject == 'Ask Your Peeps: Password Reset'\n assert 'To reset your password, please paste the below link into'\\\n ' your browser' in msg.body", "def send_reset_email(user, domain_override=None,\n subject_template_name='registration/password_reset_request_subject.txt',\n email_template_name=None, use_https=False,\n token_generator=default_token_generator, from_email=None, request=None,\n html_email_template_name='registration/password_reset_email.html', extra_email_context=None):\n if user.first_name != \"\":\n user_name = user.first_name.title()\n else:\n user_name = user.email\n\n context = {\n 'email': user.email,\n 'user_name': user_name,\n 'domain': settings.BASE_URL,\n 'site_name': \"Clubby\",\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'user': user,\n 'token': token_generator.make_token(user),\n 'protocol': 'https' if use_https else 'http',\n }\n send_mail(subject_template_name, email_template_name, context, from_email, user.email,\n html_email_template_name=html_email_template_name)", "def reset_password():\n body = request.get_json()\n reset_token = body.get('reset_token')\n password = body.get('password')\n\n if not reset_token or not password:\n return jsonify(msg.MISSING_PARAMETER), 400\n\n user_email = decode_token(reset_token)['identity']\n is_changed = views.UserManagement().change_password(email=user_email, password=password)\n if not is_changed:\n return jsonify(msg.NO_DATA), 404\n\n send_email('[Shodita] Password reset successful', sender='shodita@shodita.com', recipients=[user_email],\n text_body='Password reset was successful', html_body='<p>Password reset was successful</p>')\n\n return jsonify(msg.SUCCESS), 200", "def forgot_password():\n url = 'http://localhost:8080/' + 'user/reset/'\n body = request.get_json()\n email = body.get('email')\n if not email:\n return jsonify(msg.MISSING_PARAMETER), 400\n user_email = views.UserManagement().exists(email=email)\n\n if not user_email:\n return jsonify(msg.NO_DATA), 404\n expires = datetime.timedelta(hours=24)\n reset_token = create_access_token(identity=email, expires_delta=expires)\n\n send_email('[Shodita] Reset Your Password', sender='shodita@shodita.com', recipients=[email],\n text_body=render_template('email/reset_password.txt', url=url + reset_token),\n html_body=render_template('email/reset_password.html', url=url + reset_token))\n\n return jsonify(msg.SUCCESS), 200", "def forgot_password():\r\n form = ForgotPasswordForm(request.form)\r\n if form.validate_on_submit():\r\n user = model.user.User.query\\\r\n .filter_by(email_addr=form.email_addr.data)\\\r\n .first()\r\n if user and user.email_addr:\r\n msg = Message(subject='Account Recovery',\r\n recipients=[user.email_addr])\r\n if user.twitter_user_id:\r\n msg.body = render_template(\r\n '/account/email/forgot_password_openid.md',\r\n user=user, account_name='Twitter')\r\n elif user.facebook_user_id:\r\n msg.body = render_template(\r\n '/account/email/forgot_password_openid.md',\r\n user=user, account_name='Facebook')\r\n elif user.google_user_id:\r\n msg.body = render_template(\r\n '/account/email/forgot_password_openid.md',\r\n user=user, account_name='Google')\r\n else:\r\n userdict = {'user': user.name, 'password': user.passwd_hash}\r\n key = signer.signer.dumps(userdict, salt='password-reset')\r\n recovery_url = url_for('.reset_password',\r\n key=key, _external=True)\r\n msg.body = render_template(\r\n '/account/email/forgot_password.md',\r\n user=user, recovery_url=recovery_url)\r\n msg.html = markdown(msg.body)\r\n mail.send(msg)\r\n flash(gettext(\"We've send you email with account \"\r\n \"recovery instructions!\"),\r\n 'success')\r\n else:\r\n flash(gettext(\"We don't have this email in our records. \"\r\n \"You may have signed up with a different \"\r\n \"email or used Twitter, Facebook, or \"\r\n \"Google to sign-in\"), 'error')\r\n if request.method == 'POST' and not form.validate():\r\n flash(gettext('Something went wrong, please correct the errors on the '\r\n 'form'), 'error')\r\n return render_template('/account/password_forgot.html', form=form)", "def request_password_reset():", "def forgot_password():\n if request.method == 'POST':\n if 'username' in request.form:\n username = request.form['username']\n user = Users.query.get(username)\n if user:\n reset_slug = utils.encrypt(username)\n reset_url = request.host_url + 'reset_password' + '/' + reset_slug\n from_email = ('noreply@thescriptgroup.in', 'TSG Bot')\n to_email = [(user.email, user.name)]\n subject = 'Password reset for Hades account'\n content = f\"Hello {user.name}, please click <a href=\\\"{reset_url}\\\">here</a> to reset your password!\"\n utils.send_mail(from_email, to_email, subject, content)\n return redirect(url_for('login'))\n return render_template('forgot_password.html')", "def send_email():\n send_mail(\"You've got some problem.\", 'REPAIR IT', 'dimazarj2009@rambler.ru',\n ['dimazarj2009@rambler.ru'], fail_silently=False,)", "def test_password_reset_email(self, send_mail_mock):\n pw_reset_name = 'auth_password_reset'\n # ensure view exists\n pw_reset_get_response = self.client.get(reverse(pw_reset_name))\n self.assertEqual(pw_reset_get_response.status_code, 200)\n # post data to password reset; make Django send email\n data = {'email': self.email}\n self.client.post(reverse(pw_reset_name), data=data, follow=True)\n # verify that email sent with right template\n send_mail_mock.assert_called_with(\n ANY,\n 'registration/password_reset_email.txt',\n ANY, ANY, ANY,\n html_email_template_name=ANY)", "def password_reset(request):\n\n\tcontext_dict = {}\n\tif request.method == 'POST':\n\t\temail = request.POST.get('email')\n\t\tif email:\n\t\t\tuser = models.Teacher.objects.get(\n\t\t\t\tsoft_delete=False, user__email=email\n\t\t\t)\n\t\t\tif not user:\n\t\t\t\tcontext_dict[\"message\"] = \"Email ID does'nt exist, Enter Correct details\"\n\t\t\tmail = {\n\t\t\t\t'email': email,\n\t\t\t\t'domain': request.META['HTTP_HOST'],\n\t\t\t\t'site_name': 'Placement Portal',\n\t\t\t\t'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n\t\t\t\t'user': user,\n\t\t\t\t'token': ''.join([random.choice(ascii_letters+digits) for i in range (128)]),\n\t\t\t\t'protocol': 'http',\n\t\t\t}\n\t\t\ttry:\n\t\t\t\treset_token = models.PasswordReset(\n\t\t\t\t\tuser=user,\n\t\t\t\t\ttoken=mail['token'],\n\t\t\t\t\ttoken_consumed=False,\n\t\t\t\t)\n\t\t\t\treset_token.save()\n\t\t\texcept Exception as e:\n\t\t\t\tprint (e)\n\t\t\tsubject_template_name = 'password_reset_email_subject.txt'\n\t\t\temail_template_name = 'password_reset_email.html'\n\t\t\tsubject = loader.render_to_string(subject_template_name, mail)\n\t\t\tsubject = ''.join(subject.splitlines())\n\t\t\temail_data = loader.render_to_string(email_template_name, mail)\n\t\t\tsend_mail(subject, email_data, DEFAULT_FROM_EMAIL, [email], fail_silently=False)\n\t\t\tcontext_dict[\"message\"] = \"Email has been sent to your registered Email ID with instructions.\"\n\treturn render(request, \"password_reset_form.html\", context_dict)", "def forgot_password():\n\n if not current_user.is_anonymous():\n return redirect(url_for(\"forum.index\"))\n\n form = ForgotPasswordForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n\n if user:\n token = user.make_reset_token()\n send_reset_token(user, token=token)\n\n flash((\"E-Mail sent! Please check your inbox.\"), \"info\")\n return redirect(url_for(\"auth.forgot_password\"))\n else:\n flash((\"You have entered an username or email that is not linked \\\n with your account\"), \"danger\")\n return render_template(\"auth/forgot_password.html\", form=form)", "def reset_password(email):\n user = AuthUser.query.filter_by(email=email).first()\n if user is None:\n return False\n # Generate email with unique link\n msg = Message(\n \"Password Reset Link\",\n recipients=[user.email] \n )\n msg.body = \"Click on this link and following the instructions to reset your \"\n \"password\\n\\n%s%s?uid=%s-%s\" % (\n app.config['SITE_URI'],\n \"/reset/password/\",\n user.id,\n user.get_uid()\n )\n mail.send(msg)\n return True", "def save(self, domain_override=None,\n subject_template_name='registration/password_reset_subject.txt',\n email_template_name='registration/password_reset_email.html',\n use_https=False, token_generator=default_token_generator,\n from_email=None, request=None,\n html_email_template_name=None):\n email = self.cleaned_data[\"email\"]\n User = get_user_model()\n active_users = User.objects.filter(email__iexact=email, is_active=True)\n for user in active_users:\n subject = _('Flisol - Restore your password')\n # send_email(\n # subject,\n # [user.email],\n # email_template_name,\n # {\n # 'email': user.email,\n # 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n # 'user': user,\n # 'token': token_generator.make_token(user),\n # 'protocol': settings.PROTOCOL,\n # },\n # )", "def forgotPassword():\n if request.method == 'POST':\n if emailform():\n email = request.form['email1']\n\n #Confirm the user exist\n if hl.confirmUser(email):\n user = hl.getUser(\"Email\",email)\n refLink = \"http://\"+request.headers['Host']+hl.genUrl(user[\"Name\"],\"Password\")\n #Send email\n msg = \"\"\"\n Dear {},\n\n You are receiving this email because you have requested your password be reset. \n Use the following link to reset your password:\n\n {}\n\n If you did not request that your password be changed, please reply to this email immediately.\n\n Regards,\n Onegroup Admin Team\n \"\"\".format(user[\"Name\"],refLink)\n\n emailMessage(\"Password Reset\", [user[\"Email\"]], msg)\n return redirect(url_for('confirm', confirmed = 'Password reset email has been sent.'))\n else:\n flash(\"User doesn't exists\")\n else:\n flash(\"Emails don't match\")\n \n return render_template('emailsend.html')", "def email_user(to_email, password=None, token=None):\n try:\n if password and token:\n raise Exception('No email has been sent. Both token and password is set.')\n mail = Mail(APP)\n if to_email and password:\n message = Message(\n 'Resela+ - Welcome!',\n sender=APP.iniconfig.get('flask', 'mail_username'),\n recipients=[to_email]\n )\n message.body = 'Greetings,\\nYour password: ' + password + \\\n '\\n\\nWhen you first log in to the system remember to change the ' \\\n 'password in settings.\\n\\n' + \\\n flask.url_for('default.index', _external=True) + \\\n '\\n\\nKind regards,\\nThe ReSeLa+ Group'\n elif to_email and token:\n message = Message(\n 'Resela+ - Reset password request, link valid for 10 minutes',\n sender=APP.iniconfig.get('flask', 'mail_username'),\n recipients=[to_email]\n )\n message.body = 'Greetings, \\nYou have requested to reset you password on ' \\\n 'ReSeLa+. Follow the link to complete the password reset ' \\\n 'process. \\n\\n' + \\\n flask.url_for('account.reset_password', _external=True,\n token=token) + \\\n '\\n\\nKind regards,\\nThe ReSeLa+ group'\n elif to_email:\n message = Message(\n 'Resela+ - Confirmation password reset',\n sender=APP.iniconfig.get('flask', 'mail_username'),\n recipients=[to_email]\n )\n message.body = 'Greetings,\\nYour password has now been reset. Log in to ' \\\n 'ReSeLa+:\\n\\n' + flask.url_for('default.index', _external=True) + \\\n '\\n\\nIf you did not make this request, please contact your ' \\\n 'ReSeLa+ administrator.\\n\\nKind regards,\\nThe ReSeLa+ Group'\n else:\n raise Exception('No email has been sent. Invalid parameters.')\n mail.send(message)\n except Exception as error:\n print(error)", "def reset_password():\n form = ResetPassword()\n if form.validate_on_submit():\n user_email = form.email.data\n mail_exist = db.check_email(user_email)\n if mail_exist is not None:\n new_password = generate_password()\n new_password_hash = generate_password_hash(new_password)\n username = mail_exist['username']\n db.update_password_username(username, new_password_hash)\n flash('Your new password has been sent to your mailbox')\n redirect('login')\n # send_password_reset_email(user_email, new_password)\n return redirect(url_for('login'))\n else:\n flash('This email address is not registered')\n return redirect('reset_password')\n return render_template('resetpassword.html', form=form)", "def reset_password_request():\n form = ResetPasswordRequestForm()\n if form.validate_on_submit():\n try:\n user = User.query.filter_by(email=form.email.data).first_or_404()\n except Exception:\n flash('This Email ID is Not Registered', 'error')\n return render_template('password_reset_request.html',\n form=form), 400\n\n if user:\n send_password_reset_email(user)\n flash('Please check your email for a password reset link.',\n 'success')\n return render_template('post_pass_reset_request.html',\n title=\"Reset Password\")\n else:\n flash(\n 'Your email address must be confirmed \\\n before attempting a password reset.',\n 'error')\n return redirect(url_for('auth.login'))\n\n return render_template('password_reset_request.html', form=form), 400", "def request_password_reset_token():\n j = request.get_json(force=True)\n user_requested = j['user'].lower()\n\n # Disabled user accounts can not request for a new password.\n target_user = User.query.filter_by(mail=user_requested).first()\n\n if target_user is None:\n return Errors.UNKNOWN_USER.make_json_response(status.HTTP_400_BAD_REQUEST)\n\n if target_user.state == StateType.DEACTIVATED:\n return Errors.DEACTIVATED_USER.make_json_response(status.HTTP_400_BAD_REQUEST)\n\n target_user.generate_password_request_token()\n\n send_mail(target_user.mail, render_template(\"password/reset_password_mail.txt\",\n greeting=get_opening_greeting(target_user),\n wlink=\"{}/password/reset/{}\".format(\n app.config['BUZZN_BASE_URL'],\n target_user.password_reset_token\n )), 'Passwort zurücksetzen für Buzzn-App')\n\n db.session.commit()\n return '', status.HTTP_201_CREATED", "def send_token(user):\n if 'research' in user.get_domains():\n domain = 'research'\n else: domain = 'academic'\n subject = \"ECE/CIS Password Reset\"\n url = \"https://www.eecis.udel.edu/accounts/reset_password/token/%s\" % user.token\n message = \"A request has been made for a password reset for your ECE/CIS %s account: %s\\n\\n\" % (domain, user.username)\n message += \"To reset your password, please visit the follow the reset link below:\\n\\n%s\\n\" % url\n message += \"This token will expire 30 minutes after the initial request was made\\n\\n\"\n message += \"If this is not your ECE/CIS username, or you did not request a password reset, please\\n\"\n message += \"submit a Help Request at https://www.eecis.udel.edu/helprequest\\n\\nECE/CIS Labstaff\"\n\n send('account@eecis.udel.edu', 'ECE/CIS Account System', \\\n [user.email], subject, message, MAILHOST)", "def send_password_mail(user_name, password):\n from databoard.db_tools import send_password_mail\n send_password_mail(user_name, password)", "def action_wx_user_reset_password(self):\n # prepare reset password signup\n create_mode = bool(self.env.context.get('create_user'))\n\n # no time limit for initial invitation, only for reset password\n expiration = False if create_mode else now(days=+1)\n\n self.mapped('partner_id').signup_prepare(signup_type=\"reset\", expiration=expiration)\n\n # send email to users with their signup url\n template = False\n if create_mode:\n try:\n template = self.env.ref('auth_signup.set_password_email', raise_if_not_found=False)\n except ValueError:\n pass\n if not template:\n template = self.env.ref('auth_signup.reset_password_email')\n assert template._name == 'mail.template'\n\n template_values = {\n 'email_to': '${object.email|safe}',\n 'email_cc': False,\n 'auto_delete': True,\n 'partner_to': False,\n 'scheduled_date': False,\n }\n template.write(template_values)\n\n for user in self:\n with self.env.cr.savepoint():\n if not user.wx_user_id:\n raise UserError(\"用户没有绑定微信,不能发送微信重置密码\")\n logging.info(\"密码重置OK.\")\n self.wx_reset_password(user)\n # template.with_context(lang=user.lang).send_mail(user.id, force_send=True, raise_exception=True)\n _logger.info(\"Password reset email sent for user <%s> to <%s>\", user.login, user.email)", "def handle_emails():\n email = request.data['email'].strip()\n user = User.query.filter_by(email=email).first()\n option = \\\n request.data['option'].strip() # have a <select> in the frontend\n token = s.dumps(email, salt='email-confirm')\n\n msg = Message('Reset password', sender=app.config['ADMINS'][0],\n recipients=[email])\n link = 'http://localhost:3000/confirm_email/{}/{}'\\\n .format(option, token)\n if user:\n msg.body = 'Your link is {}'.format(link)\n else:\n msg.body = 'You attempted to reset your password but you do not \\\n have an account with us. Please Sign Up and Log in. {}'\\\n .format('http://localhost:3000/register')\n\n mail.send(msg)\n return jsonify({\"message\":\"Please confirm your email.\"}), 201", "def reset_password():\n json_data = request.get_json()\n user_email = json_data.get('email') or None\n\n if user_email is None:\n raise BadRequest(description=INCORRECT_RESET_PARAMS_MSG)\n\n user_account = db.session.query(UserAccount).filter(\n UserAccount.email == user_email).first()\n if user_account is None:\n raise BadRequest(description=INCORRECT_RESET_PARAMS_MSG)\n\n # Generate password hash\n temp_password = str(random.randint(10000,99999))\n update_user = {'password_hashed': get_hashed_password(temp_password)}\n user_account.update(**update_user)\n user_account.save()\n\n email.send('reset_password', user_email, temp_password)\n\n return {'status_code': 200, 'message': 'Password reset success!'}", "def test_reset_password_email(self, send_email):\r\n\r\n good_req = self.request_factory.post('/password_reset/', {'email': self.user.email})\r\n good_resp = password_reset(good_req)\r\n self.assertEquals(good_resp.status_code, 200)\r\n obj = json.loads(good_resp.content)\r\n self.assertEquals(obj, {\r\n 'success': True,\r\n 'value': \"('registration/password_reset_done.html', [])\",\r\n })\r\n\r\n (subject, msg, from_addr, to_addrs) = send_email.call_args[0]\r\n self.assertIn(\"Password reset\", subject)\r\n self.assertIn(\"You're receiving this e-mail because you requested a password reset\", msg)\r\n self.assertEquals(from_addr, settings.DEFAULT_FROM_EMAIL)\r\n self.assertEquals(len(to_addrs), 1)\r\n self.assertIn(self.user.email, to_addrs)\r\n\r\n #test that the user is not active\r\n self.user = User.objects.get(pk=self.user.pk)\r\n self.assertFalse(self.user.is_active)\r\n re.search(r'password_reset_confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/', msg).groupdict()", "def reset_request():\n if current_user.is_authenticated:\n return redirect('/home')\n form = RequestResetForm()\n if form.validate_on_submit():\n staff = Staff.query.filter_by(email=form.email.data).first()\n send_reset_email(staff)\n flash('An email has been sent with instructions to reset your password.', 'info')\n return redirect(url_for('login'))\n return render_template('reset_request.html', title='Reset Password',\n form=form)", "def forgot_passwd(request):\n dc_settings = request.dc.settings\n\n return password_reset(\n request,\n template_name='gui/accounts/forgot.html',\n email_template_name='gui/accounts/forgot_email.txt',\n subject_template_name='gui/accounts/forgot_subject.txt',\n password_reset_form=partial(ForgotForm, request),\n post_reset_redirect=reverse('forgot_done'),\n from_email=dc_settings.DEFAULT_FROM_EMAIL,\n current_app='gui',\n extra_context={\n 'e_site_name': dc_settings.SITE_NAME,\n 'e_site_link': dc_settings.SITE_LINK,\n })", "def send_restore_password_email(user_pk):\n user = User.objects.get(pk=user_pk)\n type = 'restore_password'\n token = token_generation(user, type)\n subject = 'Update your password'\n from_email = 'Facebook <Facebook.com>'\n content = render_to_string(\n 'users/restore_password.html', {'token': token, 'user': user})\n msg = EmailMultiAlternatives(subject, content, from_email, [user.email])\n msg.attach_alternative(content, 'text/html')\n msg.send()", "def password_reset_token_created(sender, reset_password_token, *args, **kwargs):\n # send an e-mail to the user\n context = {\n 'current_user': reset_password_token.user,\n 'username': reset_password_token.user.username,\n 'email': reset_password_token.user.email,\n # ToDo: The URL can (and should) be constructed using pythons built-in `reverse` method.\n 'reset_password_url': \"http://some_url/reset/?token={token}\".format(token=reset_password_token.key)\n }\n\n # render email text\n email_html_message = render_to_string('email/user_reset_password.html', context)\n email_plaintext_message = render_to_string('email/user_reset_password.txt', context)\n\n msg = EmailMultiAlternatives(\n # title:\n \"Password Reset for {title}\".format(title=\"Some website title\"),\n # message:\n email_plaintext_message,\n # from:\n \"noreply@somehost.local\",\n # to:\n [reset_password_token.user.email]\n )\n msg.attach_alternative(email_html_message, \"text/html\")\n msg.send()", "def _request_reset(self, email):\n response = self.client.post(reverse('users.send_password_reset'),\n {'email': email})\n return response.context['token']", "def post(self):\n try:\n body = request.get_json()\n bearer = request.headers.get('Authorization')\n base_url = request.url_root\n token = bearer.split()[1]\n password = body.get('password')\n\n if not token or not password:\n raise SchemaValidationError\n\n user_id = decode_token(token)['sub']['user_id']\n\n user = User.objects.get(id=user_id)\n\n user.modify(password=password)\n user.hash_password()\n user.save()\n\n return send_email('[Unboxit] Password reset successful',\n sender='contact@tsantos.dev',\n recipients=[user.email],\n text_body='Password Reset',\n html_body=render_template(\n 'components/reset_password_response.html',\n first_name=user.first_name,\n base_url=base_url))\n\n except SchemaValidationError:\n raise SchemaValidationError\n except ExpiredSignatureError:\n raise ExpiredTokenError\n except (DecodeError, InvalidTokenError):\n raise BadTokenError\n except Exception as e:\n raise InternalServerError", "def reset_password():\n pass", "def send_recovery_email(app: Flask, token: str, email: str) -> None:\n mail = Mail(app)\n with open(\"api/mailer/templates/invite.html\", \"r\") as f:\n template = Template(f.read())\n\n msg = Message(\"Account Recovery\", sender=\"App Admin\", recipients=[email])\n\n msg.html = template.render(\n url=f\"{FRONTEND_URL}/recovery/{email}/{token}\",\n title=\"OSUMC Cultural Awareness App Admin Recovery Email\",\n link_caption=\"Click the following link to recover your account\",\n header=\"Recover your Account\",\n action=\"Recover Account\",\n )\n\n mail.send(msg)", "def send_email_reminder(admSessionID, login, subject=\"PMA.core password reminder\"):\n reminderParams = {\"username\": login, \"subject\": subject, \"messageTemplate\": \"\"}\n url = _pma_admin_url(admSessionID) + \"EmailPassword\"\n reminderResponse = _pma_http_post(url, reminderParams)\n return reminderResponse", "def reset_password(token):\n if current_user.is_authenticated:\n return redirect(url_for('main.index'))\n user = User.verify_reset_password_token(token)\n if not user:\n return redirect(url_for('main.index'))\n form = ResetPasswordForm()\n if form.validate_on_submit():\n user.set_password(form.password.data)\n user.email_confirmed = True\n db.session.commit()\n return render_template(\n 'successful_pass_reset.html', title=\"Password Reset\")\n return render_template('reset_password.html', title=\"Password Reset\",\n form=form), 417", "def save(\n self,\n domain_override=None,\n subject_template_name=\"registration/password_reset_subject.txt\",\n email_template_name=\"registration/password_reset_email.html\",\n use_https=False,\n token_generator=default_token_generator,\n from_email=None,\n request=None,\n html_email_template_name=None,\n extra_email_context=None,\n ):\n from django.core.mail import send_mail\n\n email = self.cleaned_data[\"email\"]\n active_users = User._default_manager.filter(email__iexact=email, is_active=True)\n for user in active_users:\n # Make sure that no email is sent to a user that actually has\n # a password marked as unusable\n if not user.has_usable_password():\n continue\n from_email = settings.DEFAULT_FROM_EMAIL or from_email\n\n base_url = get_base_url()\n parsed = urllib.parse.urlparse(base_url)\n domain = parsed.netloc\n protocol = parsed.scheme\n\n kbsite = models.KegbotSite.get()\n site_name = kbsite.title\n c = {\n \"email\": user.email,\n \"site_name\": site_name,\n \"uid\": urlsafe_base64_encode(force_bytes(user.pk)),\n \"user\": user,\n \"token\": token_generator.make_token(user),\n \"domain\": domain,\n \"protocol\": protocol,\n }\n subject = loader.render_to_string(subject_template_name, c)\n # Email subject *must not* contain newlines\n subject = \"\".join(subject.splitlines())\n email = loader.render_to_string(email_template_name, c)\n send_mail(subject, email, from_email, [user.email])", "def send_recover_email(user):\n # generate recovery key\n recovery_key = SystemRandom().randint(0, (2<< 62)-1)\n\n # send email\n params = {'email': user.email, 'recovery_key': str(recovery_key)}\n mailer = util.mail\n message = mailer.new(to=user.email, subject=_(\"Password Recovery - Brave Collective Core Services\"))\n\n #explicitley get the text contend for the mail\n mime, content = render(\"brave.core.account.template.mail/lost.txt\", dict(params=params))\n message.plain = content\n\n #explicitley get the html contend for the mail\n mime, content = render(\"brave.core.account.template.mail/lost.html\", dict(params=params))\n message.rich = content\n\n mailer.send(message)\n\n # store key in DB\n PasswordRecovery(user, recovery_key).save()", "def password_reset(request):\n\tif not request.user.is_authenticated():\n\t\treturn django.contrib.auth.views.password_reset(request,\n template_name='usermgr/password_reset_form.html',\n email_template_name= 'usermgr/password_reset_email.html',\n post_reset_redirect='/usermgr/password_reset/done/')\n\telse:\n\t\treturn HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)", "def send_email(request):\n if \"email\" in request.DATA:\n email_addr = request.DATA[\"email\"]\n try:\n user = User.objects.get(email=email_addr)\n except User.DoesNotExist:\n return JsonResponse(\n \"Bad request - No registered user with that email\",\n status=400,\n safe=False,\n )\n\n urlsafe_chars = string.ascii_letters + string.digits + \"-_\"\n code_str = \"\".join(random.choice(urlsafe_chars) for _ in range(100))\n\n # 30 minutes from now\n expiry_time = timezone.now() + datetime.timedelta(minutes=30)\n\n # overwrite old code\n if PasswordResetCode.objects.filter(user_id=user.id).exists():\n reset_code = PasswordResetCode.objects.get(user_id=user.id)\n reset_code.delete()\n\n PasswordResetCode.objects.create(\n user_id=user.id, code=code_str, expiry=expiry_time\n )\n\n message = build_email(\n email_addr, user.id, code_str, user.first_name, user.username\n )\n send_reset_email(message)\n\n return JsonResponse(\"OK - email sent\", status=200, safe=False)\n\n return JsonResponse(\n \"Bad request - Must provide email\", status=400, safe=False\n )", "def forgot():\n form = ForgotForm()\n\n if form.validate_on_submit():\n db.session.add(form.pw_reset)\n db.session.commit()\n\n form.pw_reset.send()\n flash('A password reset link has been sent to your email', 'alert-success')\n return redirect(url_for('default.home'))\n else:\n flash_form_errors(form)\n return render_template('forgot.html', form=form)", "def generateChangePasswordEmail(emailAddress, serverUrlPrefix):\n message = \"This is an automatically generated message from the Spectrum Monitoring System.\\n\"\\\n + \"Your password has been changed to value you entered into \" + str(serverUrlPrefix + \"/spectrumbrowser\") + \"\\n\"\\\n + \"If you did not originate the change password request, please contact the system administrator.\\n\"\n\n util.debugPrint(message)\n SendMail.sendMail(message, emailAddress, \"change password link\")", "def save(self, domain_override=None,\n subject_template_name='registration/password_reset_subject.txt',\n email_template_name='registration/password_reset_email.html',\n use_https=False, token_generator=default_token_generator,\n from_email=None, request=None, html_email_template_name=None,\n extra_email_context=None):\n email = self.cleaned_data[\"email\"]\n for user in self.get_users(email):\n if not domain_override:\n current_site = get_current_site(request)\n site_name = current_site.name\n domain = current_site.domain\n else:\n site_name = domain = domain_override\n context = {\n 'email': email,\n 'domain': domain,\n 'site_name': site_name,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'user': user,\n 'token': token_generator.make_token(user),\n 'protocol': 'https' if use_https else 'http',\n }\n if extra_email_context is not None:\n context.update(extra_email_context)\n self.send_mail(\n subject_template_name, email_template_name, context, from_email,\n email, html_email_template_name=html_email_template_name,\n )", "def save(self, domain_override=None,\n subject_template_name='registration/password_reset_subject.txt',\n email_template_name='registration/password_reset_email.html',\n use_https=False, token_generator=default_token_generator,\n from_email=None, request=None, html_email_template_name=None,\n extra_email_context=None):\n email = self.cleaned_data[\"email\"]\n for user in self.get_users(email):\n if not domain_override:\n current_site = get_current_site(request)\n site_name = current_site.name\n domain = current_site.domain\n else:\n site_name = domain = domain_override\n context = {\n 'email': email,\n 'domain': domain,\n 'site_name': site_name,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)).decode(),\n 'user': user,\n 'token': token_generator.make_token(user),\n 'protocol': 'https' if use_https else 'http',\n **(extra_email_context or {}),\n }\n self.send_mail(\n subject_template_name, email_template_name, context, from_email,\n email, html_email_template_name=html_email_template_name,\n )", "def post(self):\n args = password_reset.parse_args()\n email = args.get('email')\n new_password = password_generator()\n\n validation_email = email_validation(email)\n if validation_email:\n return validation_email\n\n user = User.query.filter_by(email=email).first()\n if user:\n user.password = new_password\n user.save()\n response = {\n \"message\": \"Password has been reset\",\n \"status\": \"Reset password succesful!\",\n \"new_password\": new_password\n }\n return response, 200\n else:\n response = {\n 'message': 'User email does not exist, Please try again',\n 'status': 'Reset password failed!'\n }\n return response, 400", "def reset_password_email(request):\n if request.method == 'POST' :\n try:\n print(request.POST)\n user = models.UserProfile.objects.get(email=request.POST.get('email',''))\n current_site=get_current_site(request)\n email_subject='Password Reset'\n message=render_to_string('reset_password.html',{\n 'user':user,\n 'domain':current_site.domain,\n 'uid':urlsafe_base64_encode(force_bytes(user.id)),\n 'token':account_activation_token.make_token(user),\n })\n to_email= user.email\n email= EmailMessage(email_subject,message,to=[to_email])\n email.send()\n return JsonResponse(\n {\n \"status\":\"The Reset password email has been sent.\"\n }\n )\n except(TypeError, ValueError, OverflowError, models.UserProfile.DoesNotExist):\n user = None\n return JsonResponse(\n {\n \"status\":\"No matching account found\"\n }\n )\n else :\n return JsonResponse(\n {\n \"status\":\"only post method is available\"\n }\n )", "def resetPassword(self, email):\n\t\turl = \"https://habitica.com/api/v3/user/auth/reset-password\"\n\t\tpayload ={\"email\": email}\n\t\treturn(postUrl(url, self.credentials, payload))", "def password_reset(request):\n try:\n with transaction.atomic():\n try:\n data = request.data\n data = validations_utils.email_validation(data) # Validates email id, it returns lower-cased email in data.\n user = validations_utils.user_validation_with_email(data['email'])\n except ValidationException as e: # Generic exception\n return Response(e.errors, status=e.status)\n current_site = get_current_site(request)\n domain = current_site.domain\n key = utils.create_reset_password_key(user.email)\n utils.send_reset_password_mail(user, key, domain) # Sends an email for resetting the password.\n return Response(messages.PASSWORD_RESET_LINK_SENT, status=status.HTTP_200_OK)\n except IntegrityError:\n return Response(messages.CAN_NOT_RESET_PASSWORD, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def get(self, email):\n UserLoginService.send_password_reset_email(email=email.lower())\n return {}, 200", "def send_passkey(user):\n session = getsession()\n passkey = base64.encodestring(os.urandom(50))[:password_max_length]\n\n email_msg = MIMEText(msg_mailbody.format(bbsname=system_bbsname,\n session=session,\n user=user,\n passkey=passkey))\n email_msg['From'] = msg_mailfrom\n email_msg['To'] = user.email\n email_msg['Subject'] = msg_mailsubj.format(bbsname=system_bbsname)\n\n try:\n smtp = smtplib.SMTP(mail_smtphost)\n smtp.sendmail(msg_mailfrom, [user.email], email_msg.as_string())\n smtp.quit()\n except Exception as err:\n log.exception(err)\n echo(u'{0}'.format(err))\n return False\n\n log.info(u'Password reset token delivered '\n u'to address {0!r} for user {1!r}.'\n .format(user.email, user.handle))\n return passkey", "def send_password_mails(password_f_name):\n from databoard.db_tools import send_password_mails\n send_password_mails(password_f_name)", "def reset_post():\n if g.session:\n # User is already authenticated\n return jsonify({'redirect': url_for('index.index')})\n\n form = request.values.get('form', default='email')\n token = request.values.get('token', default='')\n email = request.values.get('email', default='')\n password = request.values.get('password', default='')\n\n if form == 'password':\n try:\n user: User = db.session.query(User) \\\n .filter((User.password_token == token) & User.reset_active) \\\n .one()\n if user.is_reset_expired():\n return jsonify({'success': False, 'reason': 'expired'}), 401\n\n if len(password) < 8:\n return jsonify({'success': False, 'reason': 'password'}), 401\n\n user.set_password(password)\n db.session.commit()\n next_url = url_for('auth.reset_status', success=True)\n return jsonify({'success': True, 'redirect': next_url})\n except NoResultFound:\n return jsonify({'success': False, 'reason': 'token not found'}), 401\n else:\n try:\n user: User = db.session.query(User) \\\n .filter(User.email == email).one()\n user.reset_password()\n db.session.commit()\n\n reset_url = urllib.parse.urljoin(\n request.host_url,\n url_for('auth.reset_get', token=user.password_token))\n kwargs = {\n 'subject': gettext('Reset Password'),\n 'body': reset_url,\n 'recipients': [user.email]\n }\n mail.send_mail(**kwargs)\n next_url = url_for('auth.reset_status', sent=True)\n return jsonify({'success': True, 'redirect': next_url})\n except NoResultFound:\n return jsonify({'success': False, 'reason': 'email'}), 401", "def _send_email_key(user):\n subject = _(\"Recover your %(site)s account\") % {'site': settings.APP_SHORT_NAME}\n data = {\n 'validation_link': settings.APP_URL + \\\n reverse(\n 'user_account_recover',\n kwargs={'key':user.email_key}\n )\n }\n template = get_template('authenticator/email_validation.txt')\n message = template.render(data)\n send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [user.email])", "def send_token(email, token):\n with open(\"templates/email_password_recovery.txt\", mode=\"r\") as file_pointer:\n string = file_pointer.read()\n\n string = string % (token, email, token)\n sendemail.send_email(email, \"Skvaderhack Password Recovery\", string, \"baron@skvaderhack.xyz\")", "def save(self, domain_override=None,\r\n subject_template_name='registration/password_reset_subject.txt',\r\n email_template_name='registration/password_reset_email.html',\r\n use_https=False, token_generator=default_token_generator,\r\n from_email=None, request=None):\r\n from django.core.mail import send_mail\r\n UserModel = get_user_model()\r\n email = self.cleaned_data[\"email\"]\r\n username = self.cleaned_data[\"username\"]\r\n user = User.objects.get(username__exact=username)\r\n\r\n if user.is_active and user.has_usable_password():\r\n # Make sure that no email is sent to a user that actually has\r\n # a password marked as unusable\r\n if not domain_override:\r\n current_site = get_current_site(request)\r\n site_name = current_site.name\r\n domain = current_site.domain\r\n else:\r\n site_name = domain = domain_override\r\n c = {\r\n 'email': user.email,\r\n 'domain': domain,\r\n 'site_name': site_name,\r\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\r\n 'user': user,\r\n 'token': token_generator.make_token(user),\r\n 'protocol': 'https' if use_https else 'http',\r\n }\r\n subject = loader.render_to_string(subject_template_name, c)\r\n # Email subject *must not* contain newlines\r\n subject = ''.join(subject.splitlines())\r\n email = loader.render_to_string(email_template_name, c)\r\n send_mail(subject, email, from_email, [user.email])", "def login_resetrequest():\n if request.method == \"GET\":\n # In browser request that user wants to reset the password\n return flask.render_template('reset-request.html', message=\"Please reset the password\")\n\n if request.method == \"POST\":\n # Create a token\n email = flask.request.form[\"email\"]\n\n # Find if an account with that name exists\n conn.register([model.User])\n admindb = conn[current_app.config[\"CONFIGDB\"]]\n\n userdoc = admindb[\"users\"].User.find_one({\"name\" : email, \"type\" : \"passwd\"})\n if userdoc == None:\n # user not found\n return flask.Response('{\"error\" : \"User not found\"}')\n\n # First reset the password\n name = userdoc[\"label\"]\n emailto = userdoc[\"name\"]\n\n # Create accout and a random tocken\n userdoc[\"token\"] = bson.ObjectId()\n userdoc[\"password_status\"] = \"reset-request\"\n\n # May only be useful for some\n if \"password_ready\" in userdoc:\n del userdoc[\"password_ready\"]\n\n userdoc.validate()\n userdoc.save()\n\n # Create email\n emailfrom = current_app.config[\"EMAIL_FROM\"] \n\n body = \"Hello \" + name + \",\\n\\n\"\n body = body + \"You recently requested a password reset for your account at https://slide-atlas.org.\"\n body = body + \"\\n To complete the request operation please follow the link below- \\n\"\n body = body + \"\\n \" + url_for('.login_confirm', _external=True) + \"?token=\" + str(userdoc[\"token\"]) + \" \\n\"\n body = body + \"\\nIf clicking on the link doesn't work, try copying and pasting it into your browser.\\n\"\n body = body + \"\\nThis link will work only once, and will let you create a new password. \\n\"\n body = body + \"\\nIf you did not request password reset, please disregard this message.\\n\"\n body = body + \"\\nThank you,\\nThe SlideAtlas Administration Team\\n\"\n\n # Create a text/plain message\n msg = MIMEText(body)\n\n # me == the sender's email address\n # you == the recipient's email address\n msg['Subject'] = 'Password reset confirmation for slide-atlas.org'\n msg['From'] = emailfrom\n msg['To'] = emailto\n print msg\n s = smtplib.SMTP(current_app.config[\"SMTP\"])\n try:\n out = s.sendmail(emailfrom, [emailto], msg.as_string())\n except:\n return flask.Response(\"{\\\"error\\\" : \\\"Error sending email\\\"}\")\n\n s.quit()\n return flask.Response(\"{\\\"success\\\" : \\\"\" + str(out) + \"\\\"}\")", "def test_send_email(self):\n self.register()\n response = self.client.post(self.password_reset_url,\n self.email,\n format=\"json\")\n self.assertEqual(response. status_code, status.HTTP_200_OK)\n self.assertEqual(json.loads(response.content), {'message':\n 'Successfully sent.Check your email'})", "def send_reminder():\n\n name = config[\"email\"][\"name\"]\n user = config[\"email\"][\"user\"]\n subject = \"REMINDER: %s\" % sys.argv[1]\n body = sys.argv[2] if len(sys.argv) > 2 else \"\"\n email_helper.send(user, name, user, subject, body)", "def password_reset(self, password, vtoken, welcomeEmailTemplate = ''):\n auth = 'appkey='+ self._lr_object._get_api_key()+ '&appsecret='+ self._lr_object._get_api_secret() + '&vtoken=' + vtoken\n payload = {'password': password}\n url = SECURE_API_URL + \"raas/v1/account/password/reset\" + \"?\" + auth\n return self._lr_object._post_json(url, payload)", "def save(self, domain_override=None,\n subject_template_name='password_reset_subject.txt',\n email_template_name='password_reset_email.html',\n use_https=False, token_generator=default_token_generator,\n from_email=None, request=None):\n from django.core.mail import send_mail\n for user in self.users_cache:\n if not domain_override:\n current_site = get_current_site(request)\n site_name = current_site.name\n domain = current_site.domain\n else:\n site_name = domain = domain_override\n c = {\n 'email': user.email,\n 'domain': domain,\n 'site_name': site_name,\n 'uid': int_to_base36(user.pk),\n 'user': user,\n 'token': token_generator.make_token(user),\n 'protocol': use_https and 'https' or 'http',\n }\n subject = loader.render_to_string(subject_template_name, c)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n email = loader.render_to_string(email_template_name, c)\n send_mail(subject, email, from_email, [user.email])", "def test_45_password_reset_link(self):\r\n res = self.app.post('/account/forgot-password',\r\n data={'email_addr': self.user.email_addr},\r\n follow_redirects=True)\r\n assert (\"We don't have this email in our records. You may have\"\r\n \" signed up with a different email or used Twitter, \"\r\n \"Facebook, or Google to sign-in\") in res.data\r\n\r\n self.register()\r\n self.register(name='janedoe')\r\n self.register(name='google')\r\n self.register(name='facebook')\r\n jane = User.query.get(2)\r\n jane.twitter_user_id = 10\r\n google = User.query.get(3)\r\n google.google_user_id = 103\r\n facebook = User.query.get(4)\r\n facebook.facebook_user_id = 104\r\n db.session.add_all([jane, google, facebook])\r\n db.session.commit()\r\n with mail.record_messages() as outbox:\r\n self.app.post('/account/forgot-password',\r\n data={'email_addr': self.user.email_addr},\r\n follow_redirects=True)\r\n self.app.post('/account/forgot-password',\r\n data={'email_addr': 'janedoe@example.com'},\r\n follow_redirects=True)\r\n self.app.post('/account/forgot-password',\r\n data={'email_addr': 'google@example.com'},\r\n follow_redirects=True)\r\n self.app.post('/account/forgot-password',\r\n data={'email_addr': 'facebook@example.com'},\r\n follow_redirects=True)\r\n\r\n assert 'Click here to recover your account' in outbox[0].body\r\n assert 'your Twitter account to ' in outbox[1].body\r\n assert 'your Google account to ' in outbox[2].body\r\n assert 'your Facebook account to ' in outbox[3].body\r\n\r\n # Test with not valid form\r\n res = self.app.post('/account/forgot-password',\r\n data={'email_addr': ''},\r\n follow_redirects=True)\r\n msg = \"Something went wrong, please correct the errors\"\r\n assert msg in res.data, res.data", "def save(self, domain_override=None,\n subject_template_name='registration/password_reset_subject.txt',\n email_template_name='registration/password_reset_email.html',\n txt_email_template_name='registration/password_reset_email.txt',\n use_https=False, token_generator=default_token_generator,\n from_email=None, request=None):\n for user in self.users_cache:\n if not domain_override:\n current_site = get_current_site(request)\n site_name = current_site.name\n domain = current_site.domain\n else:\n site_name = domain = domain_override\n c = {\n 'email': user.email,\n 'domain': domain,\n 'site': site_name,\n 'uid': int_to_base36(user.id),\n 'user': user,\n 'token': token_generator.make_token(user),\n 'protocol': use_https and 'https' or 'http',\n }\n subject = loader.render_to_string(subject_template_name, c)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n \n text_content = render_to_string('registration/activation_email.txt', c)\n \n utils.send_mail(email_template_name, c, subject, text_content, \n settings.DEFAULT_FROM_EMAIL, [user.email,], None)", "def get(self, request, email=None):\n\n user = User.objects.filter(email=request.GET.get('email'))\n\n if user.count() == 1 and user.first() is not None:\n user = user.first()\n\n random_password = User.objects.make_random_password()\n user.set_password(random_password)\n user.save()\n\n message = \"\"\"Olá,\\nSua senha foi resetada, acesse a plataforma\n no link http://127.0.0.1/user/password e troque a\n senha\\nSua nova senha é:\\n {}\\nAtenciosamente,\n \\nEquipe Dream Rich.\"\"\".format(random_password)\n\n email = EmailMessage('Password reset',\n message, to=[user.email])\n email.send()\n\n return Response(dumps({'detail': 'email sent'}), status=200)\n\n return Response(dumps({'detail': 'user not found'}), status=404)", "def forgot_password(self, version):\n form=cgi.FieldStorage(\n fp=self.rfile,\n headers=self.headers,\n environ={'REQUEST_METHOD':'POST','CONTENT_TYPE':self.headers['Content-Type'],}\n )\n version=version.split('/')[0]\n host = self.headers['Host']\n\n data={'email':form['email'].value}\n user = UserServices()\n response_data = user.forgot(data,host,version)\n return response_data", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def init_reset_pw(email: str) -> FluxData:\n current_app.logger.info(f'Trying to send password reset email to {email}')\n try:\n send_password_reset_mail(email)\n except BadCode as error:\n current_app.logger.error(f'Sending password reset e-mail for {email} failed: {error}')\n return error_response(message=error.msg)\n\n return success_response(message=ResetPwMsg.send_pw_success)", "def reset_password(token):\n\n if not current_user.is_anonymous():\n return redirect(url_for(\"forum.index\"))\n\n form = ResetPasswordForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n expired, invalid, data = user.verify_reset_token(form.token.data)\n\n if invalid:\n flash((\"Your password token is invalid.\"), \"danger\")\n return redirect(url_for(\"auth.forgot_password\"))\n\n if expired:\n flash((\"Your password is expired.\"), \"danger\")\n return redirect(url_for(\"auth.forgot_password\"))\n\n if user and data:\n user.password = form.password.data\n user.save()\n flash((\"Your password has been updated.\"), \"success\")\n return redirect(url_for(\"auth.login\"))\n\n form.token.data = token\n return render_template(\"auth/reset_password.html\", form=form)", "def user_password_reset(self, request):\n reset_password_form = ResetPasswordForm(request.form)\n\n if request.method == \"POST\":\n if reset_password_form.validate_on_submit():\n if check_password_hash(current_user.password, reset_password_form.old_password.data):\n new_hashed_password = generate_password_hash(reset_password_form.password.data)\n\n temp = current_user.get_id()\n (role, email) = temp.split(\":\")\n\n # if first element is `sysadmin` instead of a scheme_id\n # call function to reset `sysadmin` pass\n if role == \"sysadmin\":\n self._scheme_handler.update_hash_password(email, new_hashed_password)\n else:\n # regular user reset\n self._student_handler.update_hash_password(current_user.scheme_id, current_user.k_number, new_hashed_password)\n\n flash(\"Password successfully updated\")\n else:\n flash(\"Old password incorrect\")\n else:\n flash(\"Please double check your new password is valid.\")\n \n return render_template(\"user/reset_password.html\", reset_password_form=reset_password_form)", "def test_request_password_reset(live_server, mailoutbox, settings):\n reset_url_template = \"http://localhost/reset-password/{key}\"\n settings.EMAIL_AUTH = {\"PASSWORD_RESET_URL\": reset_url_template}\n\n user = get_user_model().objects.create_user(username=\"Test User\")\n email = models.EmailAddress.objects.create(\n address=\"test@example.com\", is_verified=True, user=user\n )\n\n data = {\"email\": email.address}\n url = f\"{live_server}/rest/password-reset-requests/\"\n response = requests.post(url, data)\n\n assert response.status_code == 201\n assert response.json() == data\n assert len(mailoutbox) == 1\n\n msg = mailoutbox[0]\n reset = models.PasswordReset.objects.get()\n\n assert msg.to == [data[\"email\"]]\n assert reset_url_template.format(key=reset.token) in msg.body", "def resetPassword(self, customerguid, password, jobguid=\"\", executionparams=None):", "def forgotpassword(request):\n if request.method == 'GET':\n return render(request, 'app/other/forgot_password.html', {'title':'Forgot Password?',})\n elif request.method == 'POST':\n username = request.POST['username']\n\n if User.objects.filter(username = username).exists():\n user = User.objects.get(username = username)\n if Referee.objects.filter(user = user).exists():\n referee = Referee.objects.get(user = user)\n # generate token\n passwordResetTokenGenerator = PasswordResetTokenGenerator()\n token = PasswordResetTokenGenerator.generate_token(passwordResetTokenGenerator, str(user.id))\n token = str(token.decode('utf-8'))\n # email to referee\n subject = \"[Password Reset Link]\"\n message = 'http:////localhost:8000//reset//token=//' + token\n content = \"<br>Dear sir,</br><br></br><br></br>Link is: \"+message+'. Please click on the link to change the credentials.'+\"<br></br><br></br>Regards,<br></br>PhDPortal.\"\n email = []\n receiver = referee.user\n email.append(receiver.email)\n send_email_task.delay(email, subject, content)\n # redirect to same page with status to check your mail and click on activation link\n \n dict = {'status' : 'Done', 'message' : 'An Activation link has been sent to your mail-id'}\n return HttpResponse(json.dumps(dict), content_type = 'application/json')\n else: # given username is not valid to use this feature\n dict = {'status': 'Error', 'message' : 'You are not Authorized to change password'}\n return HttpResponse(json.dumps(dict), content_type = 'application/json')\n else: # given username is not valid to use this feature\n dict = {'status': 'Error', 'message' : 'Invalid Username, Try Again!'}\n return HttpResponse(json.dumps(dict), content_type = 'application/json')\n else:\n return redirect(reverse(URL_BAD_REQUEST))", "def requestPwdReminder(self, email=None, acctName=None):\n assert acctName or email\n assert not (acctName and email)\n data = {}\n if email is not None:\n data['email'] = email\n else:\n data['accountName'] = acctName\n return self.talk('forgotPassword', data)", "async def password_link_generate(mail: TextData, background_tasks: BackgroundTasks):\n email = mail.data\n mail, subject, body = await AccountProcessor.send_reset_link(email.strip())\n background_tasks.add_task(Utility.validate_and_send_mail, email=mail, subject=subject, body=body)\n return {\"message\": \"Success! A password reset link has been sent to your mail id\"}", "def task_send_reminder_email():\n send_reminder_email()\n logger.info(\"Sent reminder email\")", "def password_reset(request):\n host = settings.TACC_USER_PORTAL_HOST\n return redirect(f\"{host}/password-reset?{urlencode(request.GET)}\")", "def send_new_email(user):\n token = user.get_token()\n message = Message(\n 'Verify Your New Email',\n sender='storcwebsite@gmail.com',\n recipients=[user.temp_email])\n message.body = f\"The email address associated with your Storc \" \\\n f\"account has changed.\\n\\nTo verify your new email address, \" \\\n f\"please click the link below:\\n\\n\" \\\n f\"{url_for('users.new_email', token=token, _external=True)}\"\n mail.send(message)", "def send_email(sender: str, password: str) -> None:\r\n\r\n # Check for a email and password\r\n if not sender or not password:\r\n print('A recipent or password was not supplied')\r\n exit(1)\r\n \r\n initialize_server()\r\n\r\n webserver.login(sender, password)\r\n webserver.send_message(message)\r\n webserver.quit()", "def test_user_can_reset_password(self):\n # first register a user\n self.register_user()\n # registered user forgets password \n # its a post request with the user email as the payload\n response = self.client().post(AuthTestCase.reset, data=self.reset_email)\n self.assertIn(\"Password successfully reset.Check email for new password\", str(response.data))", "def forgot_req(request):\n server = request.META['SERVER_NAME']\n recover_url = urljoin(full_url(request), 'recover')\n\n if request.POST and not request.user.is_authenticated():\n\ttry:\n\t username_or_email = request.POST['username']\n\texcept KeyError:\n\t pass\n\telse:\n\t if '@' in username_or_email:\n\t\tqs = User.objects.filter(email = username_or_email)\n\t else:\n\t\tqs = User.objects.filter(username = username_or_email)\n\n\t users = []\n\t user = None\n\n\t for user in qs:\n\t\tquery = 'salt=%s&user=%s' % (urlsafe_b64encode(urandom(8)),\\\n\t\t\t\t\t user.username)\n\t\turl = add_encrypted_query_string(recover_url, query,\n\t\t\t\t\t\t settings.SECRET_KEY)\n\n\t\turl = sign_query_string(settings.SECRET_KEY + user.password,\n\t\t\t\t\turl)\n\n\t\tusers.append(dict(username = user.username, url = url))\n\n\t template = get_template('registration/recover-password.txt')\n\t context = Context(dict(users = users, ApplianceName = server))\n\n\t if len(users) == 1:\n\t\tplural = ''\n\t else:\n\t\tplural = 's'\n\n\t if user:\n\t\tuser.email_user(subject = \"Your %s console account%s\" % (server, plural),\n\t\t\t\tfrom_email = FROM_EMAIL,\n\t\t\t\tmessage = template.render(context))\n\n\t return HttpResponseRedirect('sent')\n\n return render_to_response('registration/forgotten.html',\n\t\t\t dict(username=request.GET.get('username', ''),\n META=request.META, root=settings.ROOT_URL,\n media=settings.MEDIA_URL))", "def reset_password(self):\n self.password = passwordResetter(self.user_id, self.password)", "def ask_password_reset(request):\n output_data = {}\n\n # Here we do not send a JSON answer based on success or failure\n # in order to prevent attackers from knowing if email exists in db or not.\n\n if request.method == 'POST':\n\n email = request.POST.get('email')\n\n if not email:\n output_data['error_code'] = '1'\n output_data['error_details'] = errors_for_dev['1']\n return JsonResponse(\n output_data,\n status=status.HTTP_400_BAD_REQUEST\n )\n\n email = email.lower()\n\n try:\n user = User.objects.get(email=email)\n except exceptions.ObjectDoesNotExist:\n return JsonResponse(output_data)\n\n signer = TimestampSigner()\n timestamped_id = signer.sign(user.id)\n\n password_reset_url = \"%s%s\" % (\n settings.SITE_BASE_URL,\n reverse(set_new_password, args=(timestamped_id,))\n )\n\n send_password_reset_email(email, password_reset_url)\n\n return JsonResponse(output_data)\n\n else:\n\n output_data['error_code'] = '8'\n output_data['error_details'] = errors_for_dev['8']\n return JsonResponse(\n output_data,\n status=status.HTTP_400_BAD_REQUEST\n )", "def link(self):\n return f\"https://{DOMAIN}/password-reset/{self.code}\"", "def post(self, request, token):\n form = PasswordResetForm(request.DATA)\n if form.is_valid():\n user_data = get_user_data(\n signing.loads(\n token,\n max_age=self.token_expires,\n salt=self.salt))\n if user_data:\n user_data.set_password(request.DATA['password1'])\n user_data.save()\n return render_to_response(\n 'registration/show_message.html',\n {\n 'title': \"Change successfully\",\n 'message': \"your password has Change successfully\"})\n return render_to_response(\n 'registration/show_message.html',\n {\n 'title': \"Sorry something wrong\",\n 'message': \"sorry try again to set new password\"})\n return render_to_response(\n 'registration/show_message.html',\n {\n 'title': \"Sorry something wrong\",\n 'message': \"sorry try again to set new password\"})", "def send_feedback_email_task(subject, message, sender, reciever):\n logger.info(\"Reminder email\")\n return send_reminder_mail(subject, message, sender, reciever)", "def reset_password(self, email,new_password):\n for user in self.users_list:\n if user['email'] == email:\n user['password'] = new_password\n return 'password reset was succesfull'\n continue\n return \"email provided does not match any user\"", "def send_recovery_email(self, user: User, user_token: UserRecoveryToken) -> NoReturn:\n if not self.sendgrid_email or not self.sendgrid_api_key:\n self.logger.error(\"Failed to send recovery token to %s\" % user.get_email())\n return\n from_email = Email(self.sendgrid_email)\n to_email = To(user.get_email())\n subject = \"Chotuve password recovery token\"\n content = Content(\"text/plain\", \"Recovery token: %s\" % user_token.get_token())\n mail = Mail(from_email, to_email, subject, content)\n sg = SendGridAPIClient(self.sendgrid_api_key)\n try:\n sg.send(mail)\n self.logger.debug(\"Sent recovery token to %s\" % user.get_email())\n except Exception:\n self.logger.exception(\"Failed to send recovery token to %s\" % user.get_email())\n return", "def post(self, request, *args, **kwargs):\n data = request.data\n serializer = self.serializer_class(data=data)\n serializer.is_valid(raise_exception=True)\n try:\n user = get_object_or_404(User, email=data['email'])\n current_site = get_current_site(request)\n token = password_rest_token.make_token(user),\n uidb64 = urlsafe_base64_encode(force_bytes(data['email'])).decode()\n body = json.dumps({\n 'message': 'Please use the url below to rest your password,\\\n This expires after an hour, Thank you.',\n 'domain': current_site.domain + f'/api/reset/{uidb64}/{token[0]}',\n })\n from_email = settings.DEFAULT_FROM_EMAIL\n to_email = data['email']\n subject = 'Confirm Your Article Account Password Reset'\n send_mail(subject, body, from_email, [\n to_email], fail_silently=False)\n response = {\n 'message': 'Please check your email to confirm rest password',\n 'status_code': status.HTTP_200_OK}\n except Exception as e:\n response = {'error': e, 'status_code': status.HTTP_400_BAD_REQUEST}\n return Response(response, content_type='text/json')" ]
[ "0.8450898", "0.8287654", "0.8170258", "0.81437016", "0.8022583", "0.79395986", "0.79303086", "0.7930095", "0.78969234", "0.7764277", "0.77437997", "0.7633127", "0.75886834", "0.7538881", "0.7430109", "0.74129903", "0.7399443", "0.73773956", "0.7365607", "0.7308838", "0.73015416", "0.71950275", "0.7185756", "0.7149004", "0.7147057", "0.71308535", "0.71006036", "0.70760643", "0.7029606", "0.7025331", "0.70053005", "0.7002142", "0.69940627", "0.6989504", "0.69837016", "0.69451976", "0.6921786", "0.6905326", "0.68907624", "0.6863957", "0.68542516", "0.684461", "0.6839116", "0.6825394", "0.67983663", "0.67902637", "0.67627215", "0.6759419", "0.6756177", "0.67539406", "0.67420816", "0.66962", "0.6692783", "0.6689775", "0.6660951", "0.6647132", "0.66457516", "0.6640469", "0.66388553", "0.66329324", "0.6612879", "0.65903306", "0.6583156", "0.65314627", "0.65219617", "0.6519242", "0.6500306", "0.6477499", "0.6459348", "0.6443609", "0.6435881", "0.64290655", "0.6414933", "0.6364906", "0.63623554", "0.6335278", "0.6311475", "0.6307195", "0.6299992", "0.62964344", "0.6290789", "0.62767893", "0.6259274", "0.6249569", "0.62402767", "0.6231929", "0.62189925", "0.6203711", "0.61947185", "0.61937916", "0.6191939", "0.6189613", "0.6185633", "0.61786306", "0.6173553", "0.6166129", "0.6162736", "0.61577964", "0.61480343", "0.6147575" ]
0.76305157
12
Initializes the finger model on which control's to be performed.
def __init__(self): self.urdf_path = '/opt/blmc_ei/src/robot_properties_fingers/urdf/pro/trifingerpro.urdf' self.tip_link_names = [ "finger_tip_link_0", "finger_tip_link_120", "finger_tip_link_240", ] self.robot_model = pinocchio.buildModelFromUrdf(self.urdf_path) self.data = self.robot_model.createData() self.tip_link_ids = [ self.robot_model.getFrameId(link_name) for link_name in self.tip_link_names ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def controls_setup(self):\n pass", "def _initialize(self):\n \n self.view.lineEdit_3.setText(\"C,H,N,O,P,S\")\n self.view.spin_hit.setValue(20)\n self.view.lineEdit_2.setValue(10.)\n self.view.checkBox_8.setChecked(True)", "def initialize(self):\n self.Update()\n ViewportManager.updateAll()\n self.wxStep()\n ViewportManager.initializeAll()\n # Position the camera\n if base.trackball is not None:\n base.trackball.node().setPos(0, 30, 0)\n base.trackball.node().setHpr(0, 15, 0)\n\n # to make persp view as default\n self.perspViewMenuItem.Check()\n self.onViewChange(None, 3)\n\n # initializing direct\n if self.fStartDirect:\n base.startDirect(fWantTk = 0, fWantWx = 0)\n\n base.direct.disableMouseEvents()\n newMouseEvents = [\"_le_per_%s\"%x for x in base.direct.mouseEvents] +\\\n [\"_le_fro_%s\"%x for x in base.direct.mouseEvents] +\\\n [\"_le_lef_%s\"%x for x in base.direct.mouseEvents] +\\\n [\"_le_top_%s\"%x for x in base.direct.mouseEvents]\n base.direct.mouseEvents = newMouseEvents\n base.direct.enableMouseEvents()\n\n base.direct.disableKeyEvents()\n keyEvents = [\"_le_per_%s\"%x for x in base.direct.keyEvents] +\\\n [\"_le_fro_%s\"%x for x in base.direct.keyEvents] +\\\n [\"_le_lef_%s\"%x for x in base.direct.keyEvents] +\\\n [\"_le_top_%s\"%x for x in base.direct.keyEvents]\n base.direct.keyEvents = keyEvents\n base.direct.enableKeyEvents()\n\n base.direct.disableModifierEvents()\n modifierEvents = [\"_le_per_%s\"%x for x in base.direct.modifierEvents] +\\\n [\"_le_fro_%s\"%x for x in base.direct.modifierEvents] +\\\n [\"_le_lef_%s\"%x for x in base.direct.modifierEvents] +\\\n [\"_le_top_%s\"%x for x in base.direct.modifierEvents]\n base.direct.modifierEvents = modifierEvents\n base.direct.enableModifierEvents()\n\n base.direct.cameraControl.lockRoll = True\n base.direct.setFScaleWidgetByCam(1)\n\n unpickables = [\n \"z-guide\",\n \"y-guide\",\n \"x-guide\",\n \"x-disc-geom\",\n \"x-ring-line\",\n \"x-post-line\",\n \"y-disc-geom\",\n \"y-ring-line\",\n \"y-post-line\",\n \"z-disc-geom\",\n \"z-ring-line\",\n \"z-post-line\",\n \"centerLines\",\n \"majorLines\",\n \"minorLines\",\n \"Sphere\",]\n\n for unpickable in unpickables:\n base.direct.addUnpickable(unpickable)\n\n base.direct.manipulationControl.optionalSkipFlags |= SKIP_UNPICKABLE\n base.direct.manipulationControl.fAllowMarquee = 1\n base.direct.manipulationControl.supportMultiView()\n base.direct.cameraControl.useMayaCamControls = 1\n base.direct.cameraControl.perspCollPlane = self.perspView.collPlane\n base.direct.cameraControl.perspCollPlane2 = self.perspView.collPlane2\n\n for widget in base.direct.manipulationControl.widgetList:\n widget.setBin('gui-popup', 0)\n widget.setDepthTest(0)\n\n # [gjeon] to intercept messages here\n base.direct.ignore('DIRECT-delete')\n base.direct.ignore('DIRECT-select')\n base.direct.ignore('DIRECT-preDeselectAll')\n base.direct.ignore('DIRECT-toggleWidgetVis')\n base.direct.fIgnoreDirectOnlyKeyMap = 1\n\n # [gjeon] do not use the old way of finding current DR\n base.direct.drList.tryToGetCurrentDr = False\n\n else:\n base.direct=None\n #base.closeWindow(base.win)\n base.win = base.winList[3]", "def initialize(self, model):\n pass", "def on_init(self):\n self.controller = gameController.Controller()", "def initialize_model(self):\n pass", "def InitView(self):\n\n self._DisablePlotterOptions()\n\n if (self.localModel.datasetCols == 2):\n self.view.Enable2DRadio()\n\n self.Radio2DClicked(True)\n\n if (self.localModel.datasetCols >= 3):\n self.view.Enable2DRadio()\n self.view.Enable3DRadio()\n self.view.Set3DSelected()\n\n self.Radio3DClicked(True)", "def initialize_default(self):\n self.initialize_navigation()\n self.initialize_viewport()", "def init_model(self):\n pass", "def __init__(self, setup=False):\n\n # Initilise pygame and joystick\n pygame.init()\n pygame.joystick.init()\n\n # Number of joysticks available\n js_count = pygame.joystick.get_count()\n \n try:\n # Return first joystick object if available, False if not\n self.js = pygame.joystick.Joystick(0)\n self.js.init()\n\n # Setup mode for finding control indices\n if setup == True:\n print \"In setup mode\"\n\n self.num_buttons = self.js.get_numbuttons()\n self.num_axes = self.js.get_numaxes()\n self.num_hats = self.js.get_numhats()\n\n print \"No. buttons: {}\".format(self.num_buttons)\n print \"No. axes: {}\".format(self.num_axes)\n print \"No. hats: {}\".format(self.num_hats)\n\n # Assign controls from joystick name\n if self.js.get_name() == \"PG-9037\" and setup == False:\n print \"Controller detected: PG-9037\"\n self.button_list, self.axis_list, self.hat_list = self.gamepad_default()\n elif setup == False:\n print \"Unfamiliar controller: Using defaults\"\n self.button_list, self.axis_list, self.hat_list = self.gamepad_default()\n\n except Exception, error:\n print \"No controllers detected\"", "def __init__(self, controller):\n super().__init__(controller)\n\n # The hovered input when entering this View.\n self.first_inp = \"s\"\n\n # Initialize selected variable.\n self.selected = None\n\n # Make background graphics.\n self.make_background_graphics()\n\n # Make Buttons.\n self.make_buttons()\n\n # Make the information box. This explains each Button.\n self.make_info_box()\n\n # Initializes popup.\n self.make_popup()\n\n # Map of input to functions.\n enter = self.graphics.ENTER_KEY\n self.controls = {\n # Pressing \"q\" will go back to the main menu.\n \"q\": lambda: Action(\"goto main menu view\", []),\n\n # Movement keys.\n \"w\": lambda: self.move_cursor(Direction.U),\n \"a\": lambda: self.move_cursor(Direction.L),\n \"s\": lambda: self.move_cursor(Direction.D),\n \"d\": lambda: self.move_cursor(Direction.R),\n\n # Repeat the last valid input.\n enter: self.repeat_last_valid_input,\n\n # Click the selected UIElement.\n \"m\": self.click\n }", "def init_controls(self):\n\n\n controls_keypress_QWERTY = {\n 'w': lambda: self.set_speed(\"pitch\", self.def_speed[\"pitch\"]),\n 's': lambda: self.set_speed(\"pitch\", -self.def_speed[\"pitch\"]),\n 'a': lambda: self.set_speed(\"roll\", -self.def_speed[\"roll\"]),\n 'd': lambda: self.set_speed(\"roll\", self.def_speed[\"roll\"]),\n 'q': lambda: self.set_speed(\"yaw\", -self.def_speed[\"yaw\"]),\n 'e': lambda: self.set_speed(\"yaw\", self.def_speed[\"yaw\"]),\n 'i': lambda: self.drone.flip_forward(),\n 'k': lambda: self.drone.flip_back(),\n 'j': lambda: self.drone.flip_left(),\n 'l': lambda: self.drone.flip_right(),\n 'Key.left': lambda: self.set_speed(\"yaw\", -1.5*self.def_speed[\"yaw\"]),\n 'Key.right': lambda: self.set_speed(\"yaw\", 1.5*self.def_speed[\"yaw\"]),\n 'Key.up': lambda: self.set_speed(\"throttle\", self.def_speed[\"throttle\"]),\n 'Key.down': lambda: self.set_speed(\"throttle\", -self.def_speed[\"throttle\"]),\n 'Key.tab': lambda: self.drone.takeoff(),\n 'Key.backspace': lambda: self.drone.land(),\n 'p': lambda: self.palm_land_approach(),\n 'v': lambda: self.toggle_use_voice(),\n 't': lambda: self.toggle_tracking(),\n 'k': lambda: self.toggle_distance_mode(),\n 'm': lambda: self.toogle_manual_control(),\n 'Key.enter': lambda: self.take_picture(),\n 'c': lambda: self.clockwise_degrees(360),\n \n \n \n \n \n \n # '0': lambda: self.drone.set_video_encoder_rate(0),\n # '1': lambda: self.drone.set_video_encoder_rate(1),\n # '2': lambda: self.drone.set_video_encoder_rate(2),\n # '3': lambda: self.drone.set_video_encoder_rate(3),\n # '4': lambda: self.drone.set_video_encoder_rate(4),\n # '5': lambda: self.drone.set_video_encoder_rate(5),\n\n '7': lambda: self.set_exposure(-1), \n '8': lambda: self.set_exposure(0),\n '9': lambda: self.set_exposure(1)\n }\n\n controls_keyrelease_QWERTY = {\n 'w': lambda: self.set_speed(\"pitch\", 0),\n 's': lambda: self.set_speed(\"pitch\", 0),\n 'a': lambda: self.set_speed(\"roll\", 0),\n 'd': lambda: self.set_speed(\"roll\", 0),\n 'q': lambda: self.set_speed(\"yaw\", 0),\n 'e': lambda: self.set_speed(\"yaw\", 0),\n 'Key.left': lambda: self.set_speed(\"yaw\", 0),\n 'Key.right': lambda: self.set_speed(\"yaw\", 0),\n 'Key.up': lambda: self.set_speed(\"throttle\", 0),\n 'Key.down': lambda: self.set_speed(\"throttle\", 0)\n }\n\n controls_keypress_AZERTY = {\n 'z': lambda: self.set_speed(\"pitch\", self.def_speed[\"pitch\"]),\n 's': lambda: self.set_speed(\"pitch\", -self.def_speed[\"pitch\"]),\n 'q': lambda: self.set_speed(\"roll\", -self.def_speed[\"roll\"]),\n 'd': lambda: self.set_speed(\"roll\", self.def_speed[\"roll\"]),\n 'a': lambda: self.set_speed(\"yaw\", -self.def_speed[\"yaw\"]),\n 'e': lambda: self.set_speed(\"yaw\", self.def_speed[\"yaw\"]),\n 'i': lambda: self.drone.flip_forward(),\n 'k': lambda: self.drone.flip_back(),\n 'j': lambda: self.drone.flip_left(),\n 'l': lambda: self.drone.flip_right(),\n 'Key.left': lambda: self.set_speed(\"yaw\", -1.5*self.def_speed[\"yaw\"]),\n 'Key.right': lambda: self.set_speed(\"yaw\", 1.5*self.def_speed[\"yaw\"]),\n 'Key.up': lambda: self.set_speed(\"throttle\", self.def_speed[\"throttle\"]),\n 'Key.down': lambda: self.set_speed(\"throttle\", -self.def_speed[\"throttle\"]),\n 'Key.tab': lambda: self.drone.takeoff(),\n 'Key.backspace': lambda: self.drone.land(),\n 'p': lambda: self.palm_land(),\n 't': lambda: self.toggle_tracking(),\n 'Key.enter': lambda: self.take_picture(),\n 'c': lambda: self.clockwise_degrees(360),\n '0': lambda: self.drone.set_video_encoder_rate(0),\n '1': lambda: self.drone.set_video_encoder_rate(1),\n '2': lambda: self.drone.set_video_encoder_rate(2),\n '3': lambda: self.drone.set_video_encoder_rate(3),\n '4': lambda: self.drone.set_video_encoder_rate(4),\n '5': lambda: self.drone.set_video_encoder_rate(5),\n\n '7': lambda: self.set_exposure(-1), \n '8': lambda: self.set_exposure(0),\n '9': lambda: self.set_exposure(1)\n }\n\n controls_keyrelease_AZERTY = {\n 'z': lambda: self.set_speed(\"pitch\", 0),\n 's': lambda: self.set_speed(\"pitch\", 0),\n 'q': lambda: self.set_speed(\"roll\", 0),\n 'd': lambda: self.set_speed(\"roll\", 0),\n 'a': lambda: self.set_speed(\"yaw\", 0),\n 'e': lambda: self.set_speed(\"yaw\", 0),\n 'Key.left': lambda: self.set_speed(\"yaw\", 0),\n 'Key.right': lambda: self.set_speed(\"yaw\", 0),\n 'Key.up': lambda: self.set_speed(\"throttle\", 0),\n 'Key.down': lambda: self.set_speed(\"throttle\", 0)\n }\n\n if self.kbd_layout == \"AZERTY\":\n self.controls_keypress = controls_keypress_AZERTY\n self.controls_keyrelease = controls_keyrelease_AZERTY\n else:\n self.controls_keypress = controls_keypress_QWERTY\n self.controls_keyrelease = controls_keyrelease_QWERTY\n self.key_listener = keyboard.Listener(on_press=self.on_press,\n on_release=self.on_release)\n self.key_listener.start()", "def init(self):\n self.connect_to_switches()\n self.reset_states()", "def init(self):\n\n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.x=0\n self.y=0", "def doInitializeDevice(self):\n super().doInitializeDevice()", "def __init__(self):\n self.game_model = ScrollerModel(1280, 480)\n self.view = ScrollerView(self.game_model, 1280, 480)", "def __setup_ui_controls(self):\n self.scene.append_to_caption('\\n')\n\n # Button to reset camera\n btn_reset = button(\n bind=self.__reset_camera, text=\"Reset Camera\")\n self.__ui_controls.btn_reset = btn_reset\n self.scene.append_to_caption('\\t')\n\n chkbox_cam = checkbox(\n bind=self.__camera_lock_checkbox,\n text=\"Camera Lock\", checked=self.__camera_lock)\n self.__ui_controls.chkbox_cam = chkbox_cam\n self.scene.append_to_caption('\\t')\n\n chkbox_rel = checkbox(\n bind=self.__grid_relative_checkbox,\n text=\"Grid Relative\", checked=self.__grid_relative)\n self.__ui_controls.chkbox_rel = chkbox_rel\n self.scene.append_to_caption('\\n\\n')\n\n # Button to clear the screen\n btn_clr = button(bind=self.clear_scene, text=\"Clear Scene\")\n self.__ui_controls.btn_clear = btn_clr\n self.scene.append_to_caption('\\n\\n')\n\n # Checkbox for grid visibility\n chkbox_grid = checkbox(\n bind=self.__grid_visibility_checkbox, text=\"Grid Visibility\",\n checked=self.__grid_visibility)\n self.__ui_controls.chkbox_grid = chkbox_grid\n self.scene.append_to_caption('\\t')\n\n # Prevent the space bar from toggling the active checkbox/button/etc\n # (default browser behaviour)\n self.scene.append_to_caption('''\n <script type=\"text/javascript\">\n $(document).keyup(function(event) {\n if(event.which === 32) {\n event.preventDefault();\n }\n });\n </script>''')\n # https://stackoverflow.com/questions/22280139/prevent-space-button-from-triggering-any-other-button-click-in-jquery\n\n # Control manual\n controls_str = '<br><b>Controls</b><br>' \\\n '<b>PAN</b><br>' \\\n 'SHFT + LMB | <i>free pan</i><br>' \\\n 'W , S | <i>up / down</i><br>' \\\n 'A , D | <i>left / right</i><br>' \\\n '<b>ROTATE</b><br>' \\\n 'ARROWS KEYS | <i>rotate direction</i><br>' \\\n 'Q , E | <i>roll left / right</i><br>' \\\n '<b>ZOOM</b></br>' \\\n 'MOUSEWHEEL | <i>zoom in / out</i><br>' \\\n '<script type=\"text/javascript\">var arrow_keys_handler = function(e) {switch(e.keyCode){ case 37: case 39: case 38: case 40: case 32: e.preventDefault(); break; default: break;}};window.addEventListener(\"keydown\", arrow_keys_handler, false);</script>' # noqa\n # Disable the arrow keys from scrolling in the browser\n # https://stackoverflow.com/questions/8916620/disable-arrow-key-scrolling-in-users-browser\n self.scene.append_to_caption(controls_str)", "def init(self):\n self.focus_modes = []\n for focus_mode in self['focusModes']:\n self.focus_modes.append(\\\n {'modeName': focus_mode.modeName,\n 'lensCombination': eval(focus_mode.lensCombination),\n 'lensModes': eval(focus_mode.lensModes),\n 'size': eval(focus_mode.size),\n 'message': eval(focus_mode.message),\n 'diverg': eval(focus_mode.divergence)})\n self.focus_motors_dict = {}\n\n focus_motors = []\n focus_motors = eval(self.getProperty('focusMotors'))\n\n for focus_motor in focus_motors:\n self.focus_motors_dict[focus_motor] = []\n\n #TODO\n self.motors_groups = [self.getObjectByRole(\"P14ExpTbl\"),\n self.getObjectByRole(\"P14KB\"),\n self.getObjectByRole(\"P14DetTrans\"),\n self.getObjectByRole(\"P14BCU\"),\n self.getObjectByRole(\"slitsMotors\")]\n \n\n if len(self.motors_groups) > 0:\n for motors_group in self.motors_groups:\n self.connect(motors_group,\n 'mGroupFocModeChanged',\n self.motor_group_focus_mode_changed)\n else:\n logging.getLogger(\"HWR\").debug('BeamFocusing: No motors defined')\n self.active_focus_mode = self.focus_modes[0]['modeName']\n self.size = self.focus_modes[0]['size']\n self.update_values()\n\n self.cmd_set_calibration_name = self.getCommandObject(\\\n 'cmdSetCallibrationName')\n try:\n self.cmd_set_phase = eval(self.getProperty('setPhaseCmd'))\n except:\n pass", "def __init__(self):\n super().__init__()\n\n # Robot state\n self.ask_mode = False\n\n # Connect two large motors on output ports B and C\n self.sound = Sound()\n self.leds = Leds()\n self.p1 = TouchSensor(INPUT_1)\n self.p2 = TouchSensor(INPUT_2)\n self.p3 = TouchSensor(INPUT_3)\n self.p4 = TouchSensor(INPUT_4)", "def __init__(self):\n self.cad = pifacecad.PiFaceCAD()\n self.listener = pifacecad.SwitchEventListener(chip=self.cad)\n for i in range(8):\n self.listener.register(i, pifacecad.IODIR_FALLING_EDGE, self.press_key)\n self.listener.activate()\n atexit.register(self.atexit)", "def initialize_screen_elements(self):\n\n pass", "def _initControls(self):\n\n print \"DEBUG: Initializing Controls\"\n Game.Controls[pygame.K_a] = Game.MoveLeft\n Game.Controls[pygame.K_d] = Game.MoveRight\n Game.Controls[pygame.K_w] = Game.Jump\n Game.Controls[pygame.K_s] = Game.Duck\n Game.Controls[pygame.K_SPACE] = Game.Fly\n Game.Controls[pygame.K_j] = Game.Fire\n Game.Controls[pygame.K_ESCAPE] = Game.Quit\n\n Game.BoundControls.append(pygame.K_a)\n Game.BoundControls.append(pygame.K_d)\n Game.BoundControls.append(pygame.K_w)\n Game.BoundControls.append(pygame.K_s)\n Game.BoundControls.append(pygame.K_j)\n Game.BoundControls.append(pygame.K_SPACE)\n Game.BoundControls.append(pygame.K_ESCAPE)", "def init(self):\n self.l_motor = lazytalonsrx.LazyTalonSRX(Constants.IL_MOTOR_ID)\n self.r_motor = lazytalonsrx.LazyTalonSRX(Constants.IR_MOTOR_ID)\n self.l_motor.initialize(\n inverted=False, encoder=False, phase=False, name=\"Intake Left\")\n self.r_motor.initialize(\n inverted=True, encoder=False, phase=False, name=\"Intake Right\")", "def teleopInit(self):\n self.Drive.resetEncoder()\n\n self.Drive.disableAutoForward()\n self.Drive.disableAutoTurn()\n self.Drive.disableVision()\n\n self.DS.setWhichVariable(True)\n self.Drive.updateSetpoint(\"teleop\")\n self.DS.setFirstTimeVariable(True)\n self.timer.reset()\n\n self.matchTime.startMode(isAuto=False)", "def __init__(self, view, model):\n self.view = view\n self.view.set_controller(self)\n self.model = model", "def initialise(self):\n self.set_up()", "def _add_init(self, p_model):\r\n\r\n raise NotImplementedError", "def _initializeUi(self):\r\n if self._mode == 'imperial':\r\n self.imperial_button.setChecked(True)\r\n self.imperial_button.clicked.emit()\r\n else:\r\n self.metric_button.setChecked(True)\r\n self.metric_button.clicked.emit()", "def initialize(self):\n self.ros.enable()\n self.phone_link.enable()", "def __init__(self, ui_file, controller):\n super(InputDeviceView, self).__init__()\n self._controller = controller\n self._controller.accepted_cb = self._accepted_cb\n self._controller.finished_cb = self._finished_cb\n self._controller.rejected_cb = self._rejected_cb\n self._controller.current_gait_cb = self._current_gait_cb\n\n self._always_enabled_buttons = []\n\n # Extend the widget with all attributes and children from UI file\n loadUi(ui_file, self)\n\n self.refresh_button.clicked.connect(self._update_possible_gaits)\n\n self._create_buttons()\n self._update_possible_gaits()", "def controls_setup(self):\n\n raise NotImplemented(\"Override this function by adding elements\")", "def initialize(self) -> None:\n # Set motors to stop, read encoder values for starting point\n self.drive.arcadeDrive(0, 0)\n self.drive.resetEncoders()", "def __init__(self):\n self.modes = {}\n self.modelist = []\n self.mode = 'main'\n self.defs = {}\n events.bind(Key=self.dispatch)", "def _setup_kinematics(self):\n self.kin = Kinematics(robot_name=self.robot_name,\n offset=self.offset,\n active_joint_names=self.get_actuated_joint_names(),\n base_name=\"\", \n eef_name=None,\n frames=self.root\n )\n self._init_transform()", "def setupCamera(self) :\n\t\tbase.disableMouse()\n\t\tbase.camera.setPos(self.avatarNP.getPos())\n\t\tbase.camera.setZ(self.avatarNP.getZ()+1.5)\n\t\tbase.camera.setHpr(self.avatarNP.getHpr()[0],0,0)\t\t\n\t\tself.fieldAngle = 46.8\t# similar to human eye;\n\t\t\t\t\t# change this to zoom in/out\n\t\tbase.camLens.setFov(self.fieldAngle)", "def _init_hardware(self):\n return", "def initialize(self):\n self.currState = self.startState", "def __init__(self, screen, model):\n self.screen = screen\n self.model = model", "def __init__(self):\n self.isMoving = 0#0 is stop, 1 is moving forward, -1 is moving backward\n self.isRoutating = False\n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)", "def initialize(self):\n self.actions = []\n \"*** YOUR CODE HERE\"\n #raise NotImplementedError()", "def __init__(self, model, screen):\n self.model = model\n self.screen = screen", "def __init__(self):\n self.model = None", "def __init__(self):\n self.model = None", "def __init__(self):\n super().__init__()\n self._time = 0 # storage the press time, initially is 0\n self._block_around = [] # storage the block around\n self._pressed = False # pressed status", "def initialize(self):\n self._ui.img_name.setText('No files selected')\n self._ui.bt_next.setEnabled(False)\n self._ui.bt_prev.setEnabled(False)\n self._ui.bt_right.setEnabled(False)\n self._ui.bt_left.setEnabled(False)\n self._ui.gps_button.setEnabled(False)\n\n self._open_btn = QPushButton('Open File', self._ui.img_label)\n self.adjustSize()", "def initializeUI(self):\n self.setGeometry(100, 100, 450, 300)\n self.setWindowTitle('Model and View Example')\n\n self.setupModelView()\n\n self.show()", "def __init__(self):\n\n GPIO.setup(PIN_BTN, GPIO.IN, GPIO.PUD_UP)\n GPIO.setup(PIN_RED_LED_0, GPIO.OUT, GPIO.LOW)\n GPIO.setup(PIN_BLUE_LED, GPIO.OUT, GPIO.LOW)", "def __init__(self):\n\n self.controller = None\n\n self.game_running = False\n self.menu_view_running = False\n self.end_game_running = False", "def teleopInit(self):\n self.myRobot.setSafetyEnabled(True)", "def autonomousInit(self):\n '''\n self.cumulativeTime=0\n self.totalTime=0\n self.dataSet=[[-0.5,0,1,-1.0],[0.3,0.4,1,1.0],[-0.5,0,1,-1.0]]\n for i in self.dataSet:\n self.totalTime+=i[2]\n self.intervals = 0\n self.currentTime = 0\n for i in range(0,len(self.dataSet)):\n self.dataSet[i].append([self.currentTime,self.currentTime+self.dataSet[i][2]])\n self.currentTime+=self.dataSet[i][2]\n for i in self.dataSet:\n if i[3]==1.0:\n i.append(\"Forward\")\n if i[3]==-1.0:\n i.append(\"Backward\")\n \n self.timer.reset()\n self.timer.start()\n '''\n self.timer.reset()\n self.timer.start()\n\n #self.auto = self.chooser.getSelected()\n self.auto = 6\n self.autoState = 0\n #self.auto = 1\n\n self.EC1.reset()\n \n\n #self.auto = self.chooser.getSelected()", "def __init__(self):\n\n super().__init__(\n dynamics_model=DoorDynamicsModel(),\n virtual_sensor_model=UnimodalVirtualSensorModel(\n virtual_sensor_model=[\n DoorVirtualSensorModel(modalities={\"image\"}),\n DoorVirtualSensorModel(modalities={\"pos\", \"sensors\"}),\n ],\n state_dim=3,\n ),\n )", "def setupWidget(self):\r\n self.generateCoordinates()\r\n self.modifyCoordinateLists()\r\n self.settings.movementMatrix = self.movementMatrix\r\n self.settings.ghostIntersectionList = self.ghostIntersectionList\r\n self.createBodies()\r\n print(\"GameW set\")", "def initialize(self): \r\n pass", "def _set_init(self):\n ## Main information\n self.idxs = None\n self.sp_relative_pos = None\n ## Auxiliar information\n self.ks = None\n self.iss = [0]\n ## Class structural information\n self._setted = False\n self._constant_rel_pos = False\n self.staticneighs = None\n self.staticneighs_set = None", "def init_kern_act(num_pitches):\n\n kern_act = []\n\n for i in range(num_pitches):\n kern_act.append(Matern32(1, lengthscales=1.0, variance=3.5))\n return kern_act", "def __init__(self):\n super(Pad, self).__init__()\n\n self.oldx, self.oldy = -1, -1\n self.width, self.height = -1, -1\n self.surface, self.cr = None, None\n\n self.add_events(gdk.BUTTON_PRESS_MASK\n | gdk.BUTTON_RELEASE_MASK\n | gdk.POINTER_MOTION_MASK\n | gdk.POINTER_MOTION_HINT_MASK)\n self.connect('button-press-event', self.button_press_cb)\n self.connect('button-release-event', self.button_release_cb)\n self.connect('configure-event', self.configure_cb)\n self.connect('expose-event', self.expose_cb)\n self.connect('motion_notify_event', self.motion_notify_cb)", "def onInit(self):\n pass", "def physical_init(self):\n \n # BCM numbering scheme for Pi pins\n GPIO.setmode(GPIO.BCM)\n \n for attr in self.parm_list:\n if attr.io_pin > 0:\n GPIO.setup(attr.io_pin, attr.io_dir)\n if attr.io_dir == GPIO.OUT:\n GPIO.output(attr.io_pin, attr.value)\n #\n # There seems to be a bug where the edge detection triggers on both\n # edges. Compensate in the ISR.\n #\n GPIO.add_event_detect(self.motion.io_pin, GPIO.BOTH, callback=self.motion_edge)", "def initializeUI(self):\n self.setStyleSheet(abstyle)\n self.setGeometry(140, 100, 860, 484)\n self.setWindowTitle('Emotions Data View')\n self.setupModelView()", "def __init__(self):\n self.inches_moved = 0\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.running = True\n self.ir_sensor = ev3.InfraredSensor()\n self.color_sensor = ev3.ColorSensor()\n assert self.color_sensor\n assert self.ir_sensor\n assert self.touch_sensor\n self.arm_motor.position = 0\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n assert self.pixy\n\n self.right_motor_encoder = self.right_motor.position\n self.left_motor_encoder = self.left_motor.position", "def init_ohw(self):\n if self.parent.current_ohw.analysis_meta[\"has_MVs\"]:\n self.btn_save_TA.setEnabled(True)\n self.init_TAmotion()\n else:\n self.btn_save_TA.setEnabled(False)\n self.clear_figs()", "def re_init(self):\n self.latent.re_init()\n if 're_init' in dir(self.inference_model):\n self.inference_model.re_init()\n if 're_init' in dir(self.generative_model):\n self.generative_model.re_init()", "def setModel(self):\n if self.model != \"\":\n # Setup the visual model\n # Animated stuff should be added soon\n print MODEL_DIR\n print self.model\n model = loader.loadModel(MODEL_DIR + self.model)\n model.reparentTo(self.bulletBody)", "def do_init(self):\n\n pass", "def on_load(self):\n self.__init__()", "def initialize(self, model):\n # Retrieve all parameters on which to act\n self.set_pruning_parameters(model)\n # Create a set of masks for each layer\n mask = [None] * len(self.prune_parameters)\n for step, (name, param) in enumerate(self.prune_parameters):\n mask[step] = torch.ones_like(param.data).detach()#.cpu().numpy()\n # Save mask\n self.mask = mask\n # Save the current model weights\n self.initial_state_dict = None", "def __init__(self):\n self.handlers = {}\n self.start_state = None\n self.end_states = []\n self.btn = Button()", "def enable_setup(self):\n self.high_ver_entry.config(state=\"normal\")\n self.low_ver_entry.config(state=\"normal\")\n self.left_hor_entry.config(state=\"normal\")\n self.right_hor_entry.config(state=\"normal\")", "def initialize(self):\n super(WaveformHighlightManager, self).initialize()\n data_manager = self.data_manager\n # self.get_data_position = self.data_manager.get_data_position\n self.full_masks = self.data_manager.full_masks\n self.clusters_rel = self.data_manager.clusters_rel\n self.cluster_colors = self.data_manager.cluster_colors\n self.nchannels = data_manager.nchannels\n self.nclusters = data_manager.nclusters\n self.nsamples = data_manager.nsamples\n self.spike_ids = data_manager.spike_ids\n self.nspikes = data_manager.nspikes\n self.npoints = data_manager.npoints\n # self.get_data_position = data_manager.get_data_position\n self.highlighted_spikes = []\n self.highlight_mask = np.zeros(self.npoints, dtype=np.int32)\n self.highlighting = False", "def initialize(self):\r\n pass", "def initialize(self):\r\n pass", "def init_kern(num_pitches, lengthscale, energy, frequency):\n\n kern_act = init_kern_act(num_pitches)\n kern_com = init_kern_com(num_pitches, lengthscale, energy, frequency)\n kern = [kern_act, kern_com]\n return kern", "def initialize(self):\n\t\tpcd8544.LCD.initialize(self)\n\t\tRPIO.setup(self._backlight_pin, RPIO.OUT, initial=RPIO.LOW)", "def initialize(self):\n\t\tpass", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def __init__(self, controller):\r\n self.controller = controller\r\n \r\n pygame.init() \r\n pygame.display.set_caption(\"Desktop CNC Miller\")\r\n #create the screen\r\n self.max_x = 1200\r\n self.max_y = 600\r\n self.window = pygame.display.set_mode( (self.max_x, self.max_y) )\r\n #set background\r\n #self.window.fill( (30, 30, 255) )\r\n self.window.fill( (0,0,0) )\r\n \r\n midpnt = int(self.max_x*0.6)\r\n self.drawer_bounds = pygame.Rect(0, 0, midpnt, self.max_y)\r\n self.control_panel_bounds = pygame.Rect(midpnt, 0, self.max_x-midpnt, self.max_y)\r\n \r\n self.control_panel = ControlPanel(self.window, self.control_panel_bounds, self.controller)\r\n self.drawer = Drawer(self.window)\r\n \r\n self.control_panel.draw()", "def __init__(self):\n self.model = gameModel.Model()\n self.view = gameView.View()", "def initialize_scene(self):\n if Time.now() - self.initial_time > 0.45 and self.should_initialize:\n self.should_initialize = False\n self.background_particle_controller = BackgroundParticlesController()\n self.player_controller = PlayerController()\n self.obstacle_controller_wrapper = ObstacleControllerWrapper()\n self.items_controller = ItemsControllerWrapper()\n self.score_controller = ScoreController()", "def setUI(self):\n self.parent.title(\"Handwritten digits classification\")\n self.pack(fill=BOTH, expand=1)\n self.columnconfigure(6,weight=1)\n self.rowconfigure(2, weight=1)\n self.canv = Canvas(self, bg=\"white\")\n self.canv.grid(row=2, column=0, columnspan=7,\n padx=5, pady=5,\n sticky=E + W + S + N)\n self.canv.bind(\"<B1-Motion>\",\n self.draw)\n\t\t\t\n\t\t\t\n #size_lab = Label(self, text=\"Classificator: \")\n #size_lab.grid(row=0, column=0, padx=5)\n predict_btn = Button(self, text=\"Predict\", width=10, command=lambda: self.predict())\n predict_btn.grid(row=0, column=0)\n delete_btn = Button(self, text=\"Clear\", width=10, command=lambda: self.canv.delete(\"all\"))\n delete_btn.grid(row=1, column=0, sticky=W)", "def init_view(self):\n self.view_map = self.ctx.clientmap", "def init():\r\n\t# add grabber tools based on proxy tools\r\n\tfor proxyWrapper in vizconnect.getToolsWithMode('Proxy'):\r\n\t\tgrabberTool = tools.grabber.HandGrabber(usingPhysics=False,\r\n\t\t\t\t\t\t\t\t\t\t\t\tusingSprings=False,\r\n\t\t\t\t\t\t\t\t\t\t\t\tplacementMode=tools.placer.MODE_DROP_DOWN)\r\n\t\t\r\n\t\tname = 'grabber_tool_based_on_'+proxyWrapper.getName()\r\n\t\tgrabberWrapper = vizconnect.addTool(raw=grabberTool,\r\n\t\t\t\t\t\t\t\t\t\t\tname=name,\r\n\t\t\t\t\t\t\t\t\t\t\tmake='Virtual',\r\n\t\t\t\t\t\t\t\t\t\t\tmodel='Grabber')\r\n\t\t# parent the grabber wrapper to the proxy's parent\r\n\t\tgrabberWrapper.setParent(proxyWrapper)\r\n\t\t\r\n\t\tgrabberTool.setItems(grabbableItems)\r\n\t\r\n\tviz.callback(viz.getEventID('RESET_THE_LOFT_LAYOUT'), lambda e: resetMovedObjects())", "def _manually_initialize(self) -> None:\n # XXX: maybe refactor, this is actually part of the public interface\n pass", "def setupKeyMappings(self) :\n\t\t# first create keyMap object with default values\n\t\tself.keyMap = { \"left\":0, \"right\":0, \\\n\t\t\t\t\"forward\":0, \"backward\":0, \"dash\":0, \\\n\t\t\t\t\"slide-left\":0, \"slide-right\":0, \\\n \t\t\t\t\"cam-up\":0, \"cam-down\":0, \\\n\t\t\t\t\"cam-left\":0, \"cam-right\":0, \\\n\t\t\t\t\"zoom-in\":0, \"zoom-out\":0, \\\n\t\t\t\t\"reset-view\":0, \"view\":0}\n\t\t\n\t\t# now setup keyboard events that modify keyMap thru setKey\n\t\tself.accept(\"escape\", sys.exit)\n\n\t\t# turn help text on/off\n\t\tself.accept(\"h\", self.setKey, [\"help\",1])\n\t\tself.accept(\"h-up\", self.setKey, [\"help\",0])\n\n\t\t# movement controls\n\t\tself.accept(\"arrow_left\", self.setKey, [\"left\",1])\n\t\tself.accept(\"arrow_left-up\", self.setKey, [\"left\",0])\n\t\tself.accept(\"arrow_right\", self.setKey, [\"right\",1])\n\t\tself.accept(\"arrow_right-up\", self.setKey, [\"right\",0])\n\n\t\tself.accept(\"arrow_up\", self.setKey, [\"forward\",1])\n\t\tself.accept(\"arrow_up-up\", self.setKey, [\"forward\",0])\n \t\tself.accept(\"arrow_down\", self.setKey, [\"backward\",1])\n \t\tself.accept(\"arrow_down-up\", self.setKey, [\"backward\",0])\n\n \t\tself.accept(\",\", self.setKey, [\"slide-left\",1])\n \t\tself.accept(\",-up\", self.setKey, [\"slide-left\",0])\n \t\tself.accept(\".\", self.setKey, [\"slide-right\",1])\n \t\tself.accept(\".-up\", self.setKey, [\"slide-right\",0])\n\n\t\tself.accept(\"alt-arrow_up\", self.setKey, [\"dash\", 1])\n \t\tself.accept(\"alt-up\", self.setKey, [\"dash\", 0])\n\n\t\t# camera direction contols\n\t\tself.accept(\"shift-arrow_up\", self.setKey, [\"cam-up\",1])\n\t\tself.accept(\"shift-arrow_down\", self.setKey, [\"cam-down\",1])\n\t\tself.accept(\"shift-arrow_left\", self.setKey, [\"cam-left\",1])\n\t\tself.accept(\"shift-arrow_right\", self.setKey, [\"cam-right\",1])\t\n\n\t\t# zoom controls\n\t\tself.accept(\"z\", self.setKey, [\"zoom-in\",1])\n\t\tself.accept(\"z-up\", self.setKey, [\"zoom-in\",0])\n \t\tself.accept(\"shift-z\", self.setKey, [\"zoom-out\",1])\n\t\tself.accept(\"r\", self.setKey, [\"reset-view\",1]) \n\t\tself.accept(\"r-up\", self.setKey, [\"reset-view\",0]) \n\n\t\tself.accept(\"v\", self.setKey, [\"view\",1])\n\t\tself.accept(\"v-up\", self.setKey, [\"view\",0])", "def init_turn(self):\n sender = self.sender()\n cur_settings = self.setting_dropdown.currentText()\n\n if sender:\n cur_disp = self.rad_grp.checkedButton()\n self.start_frm = self.start_frm_le.text()\n self.end_frm = self.end_frm_le.text()\n file_path = self.save_loc.text()\n if self.arg_check():\n\n wireframe = self.wireframe\n # Instantiate the tool logic with the selected values.\n start_turn = tl.Turntable(cur_disp.objectName(),\n self.start_frm,\n self.end_frm,\n file_path,\n wireframe)\n\n # If discipline is surface, set render settings.\n if cur_disp.objectName() == 'surface':\n set_turn = tl.RenderTurntable()\n set_turn.set_render_settings(cur_settings,self.start_frm,self.end_frm)\n\n start_turn.launch_tool()", "def _initialize(self):\n self.send_init_command()", "def __init__(self, model=None):\n super().__init__(model)\n\n self.server_update_direction = None\n self.client_update_direction = None\n self.new_client_update_direction = None", "def setUp(self):\n self.frequency = 250\n self.firmware = 30474\n self.mask = lmdm.ListModeDataMask(self.frequency, self.firmware)", "def setup_class(cls):\n super().setup_class()\n cls.detector = cls.faceEngine.createFaceDetector(DetectorType.FACE_DET_DEFAULT)\n cls.headPoseEstimator = cls.faceEngine.createHeadPoseEstimator()\n cls.image = VLImage.load(filename=ONE_FACE)\n cls.detection = TestHeadPose.detector.detectOne(cls.image, detect5Landmarks=True, detect68Landmarks=True)", "def __init__(self):\n self.hmd = None\n self.vr_render_models = None\n self.render_width = 0\n self.render_height = 0", "def __init__(self):\n ## Global initialization\n self.default_initialization()\n ## Initial function set\n self.selfdriven = False\n self._format_default_functions()\n ## Check descriptormodel\n self._assert_correctness()", "def __init__(self, controller):\n self._controller = controller", "def __init__(self):\n self.__uplinker = None\n self.__downlinker = None", "def init_widget(self):", "def initialize(self):\n \n # lumopt.figures_of_merit.modematch object need initialization and\n # forward setting. h is a spacemap.utilities.simulation object\n if self._fom_type == 'ModeMatch':\n self.fom.initialize(self._ha)\n self.fom.make_forward_sim(self._ha)", "def initialize(self):\n # FIX: INITIALIZE PROCESS INPUTS??\n for mech, value in self.initial_values.items():\n mech.initialize(value)", "def __init__(self):\n self.model = self.load_model()\n self.form_html = self.create_form_html()", "def __init__(self, fig, aperture_model, region_model, help_text, mask_handlers=None,\n domain=None, handlers=None, helpintrotext=None):\n\n fig.js_on_event(MouseEnter, CustomJS(code='window.controller_keys_enabled = true;'))\n fig.js_on_event(MouseLeave, CustomJS(code='window.controller_keys_enabled = false;'))\n\n # set the class for the help_text div so we can have a common style\n help_text.css_classes.append('controller_div')\n\n self.aperture_model = aperture_model\n\n self.helpmaskingtext = ''\n\n if helpintrotext is not None:\n self.helpintrotext = f\"{helpintrotext}<br/><br/>\\n\"\n else:\n self.helpintrotext = \"While the mouse is over the plot, choose from the following commands:<br/><br/>\\n\"\n\n self.helptooltext = ''\n self.helptext = help_text\n self.enable_user_masking = True if mask_handlers else False\n\n self.handlers = dict()\n if mask_handlers:\n if len(mask_handlers) != 2:\n raise ValueError(\"Must pass tuple (mask_fn, unmask_fn) to mask_handlers argument of Controller\")\n self.register_handler(Handler('m', 'Mask selected/closest',\n lambda key, x, y:\n mask_handlers[0](x, y, ((self.fig.x_range.end - self.fig.x_range.start)\n /float(self.fig.inner_width)) /\n ((self.fig.y_range.end - self.fig.y_range.start)/\n float(self.fig.inner_height)))))\n self.register_handler(Handler('u', 'Unmask selected/closest',\n lambda key, x, y:\n mask_handlers[1](x, y, ((self.fig.x_range.end - self.fig.x_range.start)\n /float(self.fig.inner_width)) /\n ((self.fig.y_range.end - self.fig.y_range.start)\n /float(self.fig.inner_height)))))\n if handlers:\n for handler in handlers:\n self.register_handler(handler)\n self.update_helpmaskingtext()\n\n self.tasks = dict()\n if aperture_model:\n self.tasks['a'] = ApertureTask(aperture_model, help_text, fig)\n if region_model:\n self.tasks['r'] = RegionTask(region_model, help_text, domain=domain)\n self.task = None\n self.x = None\n self.y = None\n # we need to always know where the mouse is in case someone\n # starts an Aperture or Band\n if aperture_model or region_model or mask_handlers or handlers:\n fig.on_event('mousemove', self.on_mouse_move)\n fig.on_event('mouseenter', self.on_mouse_enter)\n fig.on_event('mouseleave', self.on_mouse_leave)\n self.fig = fig\n self.set_help_text(None)", "def set_init_speed(self):\n self.control_instance.set_init_speed()" ]
[ "0.63043123", "0.60653013", "0.6031488", "0.59517133", "0.5951652", "0.5938752", "0.5925825", "0.5837696", "0.583645", "0.5820525", "0.5815757", "0.57706904", "0.5763885", "0.5759453", "0.5727885", "0.57115877", "0.57033086", "0.5632936", "0.5617571", "0.55889696", "0.5574349", "0.5570229", "0.5558112", "0.5537165", "0.55358523", "0.55319965", "0.551369", "0.5486456", "0.5486126", "0.54768443", "0.5471029", "0.5467227", "0.5437158", "0.5427382", "0.5411567", "0.5408896", "0.5399578", "0.53918505", "0.53870976", "0.5384934", "0.53777766", "0.53635275", "0.53635275", "0.5348521", "0.5344292", "0.5314459", "0.5314243", "0.53084445", "0.52910423", "0.5289498", "0.5287638", "0.5284276", "0.52822727", "0.5275925", "0.5270476", "0.52560383", "0.52542657", "0.5237826", "0.52273893", "0.5224257", "0.5217303", "0.5208276", "0.51941246", "0.51921666", "0.51869404", "0.5184535", "0.5175756", "0.5157173", "0.5155366", "0.51529557", "0.51529557", "0.51435477", "0.5136568", "0.51348686", "0.51320815", "0.51320815", "0.51320815", "0.51320815", "0.51310396", "0.51278377", "0.5123359", "0.5123306", "0.5121945", "0.5118628", "0.5115284", "0.51142025", "0.5111796", "0.5110269", "0.5107474", "0.51064265", "0.51061064", "0.5100817", "0.509465", "0.5094111", "0.50880104", "0.50844896", "0.50838006", "0.5077073", "0.5073214", "0.50716263", "0.50713074" ]
0.0
-1
Compute end effector positions for the given joint configuration.
def forward_kinematics(self, joint_positions): pinocchio.framesForwardKinematics( self.robot_model, self.data, joint_positions, ) return [ np.asarray(self.data.oMf[link_id].translation).reshape(-1).tolist() for link_id in self.tip_link_ids ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def end_effectors_pos(self):\n def relative_pos_in_egocentric_frame(physics):\n end_effector = physics.bind(self._entity.end_effectors).xpos\n torso = physics.bind(self._entity.root_body).xpos\n xmat = np.reshape(physics.bind(self._entity.root_body).xmat, (3, 3))\n return np.reshape(np.dot(end_effector - torso, xmat), -1)\n return observable.Generic(relative_pos_in_egocentric_frame)", "def end_effector_contacts(self, physics):\n return self.collect_contacts(physics, self._end_effector_geom_ids)", "def end_effectors(self) -> list:\n if not hasattr(self, \"_end_effectors\"):\n S = self.structure\n self._end_effectors = [\n [x, y]\n for x in S\n if S.out_degree(x) == 0\n for y in S.predecessors(x)\n if DIST in S[y][x]\n if S[y][x][DIST] < np.inf\n ]\n\n return self._end_effectors", "def end_effectors(self) -> list:\n if not hasattr(self, \"_end_effectors\"):\n S = self.structure\n self._end_effectors = [\n [x, y]\n for x in S\n if S.out_degree(x) == 0\n for y in S.predecessors(x)\n if DIST in S[y][x]\n if S[y][x][DIST] < np.inf\n ]\n\n return self._end_effectors", "def get_end_effector_link(self):\n return self._g.get_end_effector_link()", "def get_joint_positions(self, joint_angles ): \n\n\n # current angles\n res_joint_angles = joint_angles.copy() \n\n # detect limits\n maskminus= res_joint_angles > self.joint_lims[:,0]\n maskplus = res_joint_angles < self.joint_lims[:,1]\n \n res_joint_angles = res_joint_angles*(maskplus*maskminus) \n res_joint_angles += self.joint_lims[:,0]*(np.logical_not(maskminus) )\n res_joint_angles += self.joint_lims[:,1]*(np.logical_not(maskplus) )\n \n # mirror\n if self.mirror :\n res_joint_angles = -res_joint_angles\n res_joint_angles[0] += np.pi \n \n # calculate x coords of arm edges.\n # the x-axis position of each edge is the \n # sum of its projection on the x-axis\n # and all the projections of the \n # previous edges \n x = np.array([ \n sum([ \n self.segment_lengths[k] *\n np.cos( (res_joint_angles[:(k+1)]).sum() ) \n for k in range(j+1) \n ])\n for j in range(self.number_of_joint) \n ])\n \n # trabslate to the x origin \n x = np.hstack([self.origin[0], x+self.origin[0]])\n\n # calculate y coords of arm edges.\n # the y-axis position of each edge is the \n # sum of its projection on the x-axis\n # and all the projections of the \n # previous edges \n y = np.array([ \n sum([ \n self.segment_lengths[k] *\n np.sin( (res_joint_angles[:(k+1)]).sum() ) \n for k in range(j+1) \n ])\n for j in range(self.number_of_joint) \n ])\n \n # translate to the y origin \n y = np.hstack([self.origin[1], y+self.origin[1]])\n\n pos = np.array([x, y]).T\n \n return (pos, res_joint_angles)", "def energy_function(self):\n E = 0\n for i in range(len(self.config)):\n for j in range(len(self.config)):\n s = self.config[i,j]\n #Calculate the impact of neighboring particle pairs\n neighbors = (self.config[(i+1)%L, j] +\n self.config[i, (j+1)%L] + \n self.config[(i-1)%L, j] + \n self.config[i, (j-1)%L])\n E += -J*s*neighbors\n #fix for extra neighbors\n return E/4", "def get_vehicle_end_index(self):\n return [len(self.matrix) - 1 for i in range(len(self.vehicles))]", "def _generate_end_position(self):\n end_position = []\n new_row = []\n\n for i in range(1, self.PUZZLE_NUM_ROWS * self.PUZZLE_NUM_COLUMNS + 1):\n new_row.append(i)\n if len(new_row) == self.PUZZLE_NUM_COLUMNS:\n end_position.append(new_row)\n new_row = []\n\n end_position[-1][-1] = 0\n return end_position", "def num_deriv_exterior(\n cal: Calibration, cpar: ControlPar, dpos: float, dang: float, pos: vec3d\n):\n var = [\n cal.ext_par.x0,\n cal.ext_par.y0,\n cal.ext_par.z0,\n cal.ext_par.omega,\n cal.ext_par.phi,\n cal.ext_par.kappa,\n ]\n x_ders = np.zeros(6)\n y_ders = np.zeros(6)\n\n cal.ext_par = rotation_matrix(cal.ext_par)\n xs, ys = img_coord(pos, cal, cpar.mm)\n\n for pd in range(6):\n step = dang if pd > 2 else dpos\n var[pd] += step\n\n if pd > 2:\n cal.ext_par = rotation_matrix(cal.ext_par)\n\n xpd, ypd = img_coord(pos, cal, cpar.mm)\n x_ders[pd] = (xpd - xs) / step\n y_ders[pd] = (ypd - ys) / step\n\n var[pd] -= step\n\n cal.ext_par = rotation_matrix(cal.ext_par)\n\n return (x_ders, y_ders)", "def fk(arm,base=np.identity(4),joint_num=-1):\n\n pEE = base # Cumulative pose of the End Effector \n # (initially set up as the base of the robot)\n if joint_num==-1:\n for joint in arm:\n pEE=np.dot(pEE, joint.dhMatrix())\n else:\n for i in range(joint_num):\n pEE=np.dot(pEE, arm[i].dhMatrix())\n\n return pEE", "def affected_end(self):\n types = {alt.type for alt in self.ALT} # set!\n BAD_MIX = {INS, SV, BND, SYMBOLIC} # don't mix well with others\n if (BAD_MIX & types) and len(types) == 1 and list(types)[0] == INS:\n # Only insertions, return 0-based position right of first base\n return self.POS # right of first base\n else: # Return 0-based end position, behind last REF base\n return (self.POS - 1) + len(self.REF)", "def get_econs(self):\n eham = self.beads.vpath*self.nm.omegan2 + self.nm.kin + self.forces.pot\n eham += self.bias.pot # bias\n for e in self._elist:\n eham += e.get()\n\n return eham + self.eens", "def _get_end_points(self, segmented_instances, i, stats, idx):\n\n end_points=[]\n\n # find all points intersecting the bbox\n #(tl_x, th_y, width, height, area)\n label_num=i+1\n leftmost_x = stats['bbox'][i][cv2.CC_STAT_LEFT]\n topmost_y = stats['bbox'][i][cv2.CC_STAT_TOP]\n width = stats['bbox'][i][cv2.CC_STAT_WIDTH]\n height = stats['bbox'][i][cv2.CC_STAT_HEIGHT]\n bottom_most_y = topmost_y + height-1\n right_most_x = leftmost_x + width-1\n\n segmented_instances_copy=segmented_instances.copy()\n edge_points = np.zeros(segmented_instances.shape).astype(np.uint8)\n segs = np.zeros(segmented_instances.shape).astype(np.uint8)\n segs[segmented_instances==label_num]=255\n cv2.rectangle(segmented_instances_copy,(leftmost_x, topmost_y), (right_most_x, bottom_most_y), 150, 2)\n\n #Get all points for the current stem segment\n label_points = np.argwhere(segmented_instances.copy()==label_num)\n\n # upper points from (tl_x,th_y) to (th_x, th_y) that instersect with the upper edge of the bouding box\n upper_points = [i for i in label_points if i[0]==topmost_y and i[1]>=leftmost_x and i[1]<=right_most_x]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(upper_points, edge_points, segs, 1)\n center_upper_pts = sorted(self._get_centeroids(x_pts))\n\n # left side points from (tl_x, tl_y) to (tl_x, th_y) that instersect with the left edge of the bouding box\n left_points = [i for i in label_points if i[1]==leftmost_x and i[0]<=bottom_most_y and i[0]>=topmost_y]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(left_points, edge_points, segs, 0)\n center_left_pts = sorted(self._get_centeroids(x_pts))\n\n #right side points form (th_x, tl_y) to (th_x, th_y) that instersect with the right edge of the bouding box\n right_points = [i for i in label_points if i[1]==right_most_x and i[0]<=bottom_most_y and i[0]>=topmost_y]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(right_points, edge_points, segs, 0)\n center_right_pts = sorted(self._get_centeroids(x_pts))\n\n #bottom points from (tl_x, tl_y) to (th_x,tl_y)\n bottom_points = [i for i in label_points if i[1]>=leftmost_x and i[1]<=right_most_x and i[0]==bottom_most_y]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(bottom_points, edge_points, segs, 1)\n center_bottom_pts = sorted(self._get_centeroids(x_pts))\n\n # If there are corner edges, get the centroid of that\n center_x_lb, center_y_lb, center_left_pts, center_bottom_pts = self._get_corner_centers(center_left_pts, \\\n center_bottom_pts, bottom_most_y, leftmost_x)\n if (center_x_lb != None) and (center_y_lb != None):\n end_points.append([center_x_lb, center_y_lb])\n else:\n if len(center_left_pts)>0:\n for pt_idx in range(0, len(center_left_pts)):\n end_points.append([leftmost_x, center_left_pts[pt_idx]])\n if len(center_bottom_pts)>0:\n for pt_idx in range(0, len(center_bottom_pts)):\n end_points.append([center_bottom_pts[pt_idx], bottom_most_y])\n\n # If there are corner edges, get the centroid of that\n center_x_ur, center_y_ur, center_right_pts, center_upper_pts = self._get_corner_centers(center_right_pts, \\\n center_upper_pts, topmost_y, right_most_x)\n if (center_x_ur != None) and (center_y_ur != None):\n end_points.append([center_x_ur, center_y_ur])\n else:\n if len(center_right_pts)>0:\n for pt_idx in range(0, len(center_right_pts)):\n end_points.append([right_most_x, center_right_pts[pt_idx]])\n if len(center_upper_pts)>0:\n for pt_idx in range(0, len(center_upper_pts)):\n end_points.append([center_upper_pts[pt_idx], topmost_y])\n\n # If there are corner edges, get the centroid of that\n center_x_ul, center_y_ul, center_left_pts, center_upper_pts = self._get_corner_centers(center_left_pts, \\\n center_upper_pts, topmost_y, leftmost_x)\n if (center_x_ul != None) and (center_y_ul != None):\n end_points.append([center_x_ul, center_y_ul])\n else:\n if len(center_left_pts)>0:\n for pt_idx in range(0, len(center_left_pts)):\n end_points.append([leftmost_x, center_left_pts[pt_idx]])\n if len(center_upper_pts)>0:\n for pt_idx in range(0, len(center_upper_pts)):\n end_points.append([center_upper_pts[pt_idx], topmost_y])\n\n\n # If there are corner edges, get the centroid of that\n center_x_br, center_y_br, center_right_pts, center_bottom_pts = self._get_corner_centers(center_right_pts, \\\n center_bottom_pts, bottom_most_y, right_most_x)\n if (center_x_br != None) and (center_y_br != None):\n end_points.append([center_x_br, center_y_br])\n else:\n if len(center_right_pts)>0:\n for pt_idx in range(0, len(center_right_pts)):\n end_points.append([right_most_x, center_right_pts[pt_idx]])\n if len(center_bottom_pts)>0:\n for pt_idx in range(0, len(center_bottom_pts)):\n end_points.append([center_bottom_pts[pt_idx], bottom_most_y])\n\n #self.showme(segmented_instances_copy, 'bbox')\n\n return end_points", "def compute_end_point(self):\n\n L = self.level\n P = L.prob\n\n # check if Mth node is equal to right point and do_coll_update is false, perform a simple copy\n if self.coll.right_is_node and not self.params.do_coll_update:\n # a copy is sufficient\n L.uend = P.dtype_u(L.u[-1])\n else:\n # start with u0 and add integral over the full interval (using coll.weights)\n L.uend = P.dtype_u(L.u[0])\n for m in range(self.coll.num_nodes):\n L.uend += L.dt * self.coll.weights[m] * L.f[m + 1]\n # add up tau correction of the full interval (last entry)\n if L.tau[-1] is not None:\n L.uend += L.tau[-1]\n\n return None", "def compute_end_point(self):\n raise NotImplementedError('ERROR: sweeper has to implement compute_end_point(self)')", "def end_point(self) -> Vec3:\n v = list(self.vertices([self.dxf.end_angle]))\n return v[0]", "def get_ee_points_velocities(ref_jacobian, ee_points, ref_rot, joint_velocities):\n ref_jacobians_trans = ref_jacobian[:3, :]\n ref_jacobians_rot = ref_jacobian[3:, :]\n ee_velocities_trans = np.dot(ref_jacobians_trans, joint_velocities)\n ee_velocities_rot = np.dot(ref_jacobians_rot, joint_velocities)\n ee_velocities = ee_velocities_trans + np.cross(ee_velocities_rot.reshape(1, 3),\n ref_rot.dot(ee_points.T).T)\n return ee_velocities.reshape(-1)", "def frame_end(self):\n self.prev_p3d = self.model.get_p3d()[self.joint_indices]\n self.n_evaluations = 0", "def _findExonEnd(self, exonRecs, iBlkStart):\n iBlkEnd = iBlkStart + 1\n while (iBlkEnd < len(exonRecs)) and (self._tGapSize(exonRecs, iBlkEnd) < minIntronSize):\n iBlkEnd += 1\n return iBlkEnd, exonRecs[iBlkEnd - 1].end - exonRecs[iBlkStart].start", "def get_observations(self):\n joint_states = self.joints_state\n self.force = self.wrench_stamped.wrench.force\n self.torque = self.wrench_stamped.wrench.torque\n self.static_taxel = self.tactile_static.taxels\n# dynamic_taxel= tactile_dynamic\n\n# print(\"[force]\", self.force.x, self.force.y, self.force.z)\n# print(\"[torque]\", self.torque.x, self.torque.y, self.torque.z)\n shp_joint_ang = joint_states.position[0]\n shl_joint_ang = joint_states.position[1]\n elb_joint_ang = joint_states.position[2]\n wr1_joint_ang = joint_states.position[3]\n wr2_joint_ang = joint_states.position[4]\n wr3_joint_ang = joint_states.position[5]\n\n shp_joint_vel = joint_states.velocity[0]\n shl_joint_vel = joint_states.velocity[1]\n elb_joint_vel = joint_states.velocity[2]\n wr1_joint_vel = joint_states.velocity[3]\n wr2_joint_vel = joint_states.velocity[4]\n wr3_joint_vel = joint_states.velocity[5]\n\n q = [shp_joint_ang, shl_joint_ang, elb_joint_ang, wr1_joint_ang, wr2_joint_ang, wr3_joint_ang]\n# print(\"q(observation):\", q)\n eef_x, eef_y, eef_z = self.get_xyz(q)\n self.end_effector = self.get_xyz(q)\n eef_x_ini, eef_y_ini, eef_z_ini = self.get_xyz(self.init_joint_pose2) \n\n delta_image_r, delta_image_l = self.get_image()\n self.cnn_image_r = agent.update_cnn(delta_image_r)\n self.cnn_image_l = agent.update_cnn(delta_image_l)\n self.cnn_image_r_list = self.cnn_image_r.tolist()\n self.cnn_image_l_list = self.cnn_image_l.tolist()\n print(\"r_list\", self.cnn_image_r_list)\n print(\"l_list\", self.cnn_image_l_list)\n\n observation = []\n# rospy.logdebug(\"List of Observations==>\"+str(self.observations))\n for obs_name in self.observations:\n if obs_name == \"shp_joint_ang\":\n observation.append((shp_joint_ang - self.init_joint_pose2[0]) * self.joint_n)\n elif obs_name == \"shl_joint_ang\":\n observation.append((shl_joint_ang - self.init_joint_pose2[1]) * self.joint_n)\n elif obs_name == \"elb_joint_ang\":\n observation.append((elb_joint_ang - self.init_joint_pose2[2]) * self.joint_n)\n elif obs_name == \"wr1_joint_ang\":\n observation.append((wr1_joint_ang - self.init_joint_pose2[3]) * self.joint_n)\n elif obs_name == \"wr2_joint_ang\":\n observation.append((wr2_joint_ang - self.init_joint_pose2[4]) * self.joint_n)\n elif obs_name == \"wr3_joint_ang\":\n observation.append((wr3_joint_ang - self.init_joint_pose2[5]) * self.joint_n)\n elif obs_name == \"shp_joint_vel\":\n observation.append(shp_joint_vel)\n elif obs_name == \"shl_joint_vel\":\n observation.append(shl_joint_vel)\n elif obs_name == \"elb_joint_vel\":\n observation.append(elb_joint_vel)\n elif obs_name == \"wr1_joint_vel\":\n observation.append(wr1_joint_vel)\n elif obs_name == \"wr2_joint_vel\":\n observation.append(wr2_joint_vel)\n elif obs_name == \"wr3_joint_vel\":\n observation.append(wr3_joint_vel)\n elif obs_name == \"eef_x\":\n observation.append((eef_x - eef_x_ini) * self.eef_n)\n elif obs_name == \"eef_y\":\n observation.append((eef_y - eef_y_ini) * self.eef_n)\n elif obs_name == \"eef_z\":\n observation.append((eef_z - eef_z_ini) * self.eef_n)\n elif obs_name == \"force_x\":\n observation.append((self.force.x - self.force_ini.x) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_y\":\n observation.append((self.force.y - self.force_ini.y) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_z\":\n observation.append((self.force.z - self.force_ini.z) / self.force_limit1 * self.force_n)\n elif obs_name == \"torque_x\":\n observation.append((self.torque.x - self.torque_ini.x) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_y\":\n observation.append((self.torque.y - self.torque_ini.y) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_z\":\n observation.append((self.torque.z - self.torque_ini.z) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"image_cnn\":\n for x in range(0, 10):\n observation.append(self.cnn_image_r_list[0][x])\n# print(\"r_list\", self.cnn_image_r_list[0][x])\n for x in range(0, 10):\n observation.append(self.cnn_image_l_list[0][x])\n# print(\"l_list\", self.cnn_image_l_list[0][x])\n elif obs_name == \"static_taxel\":\n for x in range(0, 28):\n observation.append((self.static_taxel[0].values[x] - self.static_taxel_ini[0].values[x]) * self.taxel_n)\n for x in range(0, 28):\n observation.append((self.static_taxel[1].values[x] - self.static_taxel_ini[1].values[x]) * self.taxel_n)\n# elif obs_name == \"dynamic_taxel\":\n# observation.append(dynamic_taxel[0].values) * self.taxel_n\n# observation.append(dynamic_taxel[1].values) * self.taxel_n\n else:\n raise NameError('Observation Asked does not exist=='+str(obs_name))\n\n print(\"observation\", list(map(round, observation, [3]*len(observation))))\n# print(\"observation\", observation)\n\n return observation", "def get_envelope_end(env):\n denv = np.diff(env)\n i = np.where(np.abs(denv) > 0)[0]\n true_stop_index = np.max(i)+1\n return true_stop_index", "def get_unhindered_positions(self, endposition):\n pass", "def get_current_joint_position(self) -> list:\n joint_positions = get_joint_positions(self.body, self.joints[:self.DoF])\n for i in range(self.DoF):\n if self.JOINT_TYPES[i] == 'P':\n # get the unscaled joint position\n joint_positions[i] /= self.scaling\n return joint_positions", "def get_entang(self, axes_subset):\n self.x_axes = list(axes_subset)\n num_x_axes = len(self.x_axes)\n num_row_axes = len(self.den_mat.row_shape)\n self.y_axes = [k for k in range(num_row_axes) if k not in self.x_axes]\n num_y_axes = len(self.y_axes)\n self.Dxy = self.den_mat.get_rho_xy(self.x_axes, self.y_axes)\n Dxy_a = []\n\n # initial Dxy_a\n # Dxy_a[0] = Dxy,\n # all others are max entangled\n dm_max_ent = DenMat(self.Dxy.num_rows, self.Dxy.row_shape)\n x_axes0 = list(range(num_x_axes))\n y_axes0 = list(range(num_x_axes, num_row_axes, 1))\n max_ent_st = MaxEntangState(dm_max_ent.num_rows, dm_max_ent.row_shape,\n x_axes0, y_axes0)\n EntangCase.check_max_entang_st(max_ent_st)\n st_vec = max_ent_st.get_st_vec()\n entang = max_ent_st.get_known_entang()\n dm_max_ent.set_arr_from_st_vec(st_vec)\n # print('dddddd dm max ent', dm_max_ent.arr)\n for alp in range(self.num_hidden_states):\n if alp == 0:\n Dxy_alp = self.Dxy\n else:\n Dxy_alp = dm_max_ent\n Dxy_a.append(Dxy_alp)\n\n for step in range(self.num_ab_steps):\n if self.verbose:\n print('------------ab step=', step)\n print('entang=', entang)\n entang, Dxy_a = self.next_step(Dxy_a)\n if self.verbose:\n print('-----------\\nfinal entang=', entang)\n return entang", "def getEePointsVelocities(refJacobian, eePoints, refRot, jointVelocities):\n refJacobiansTrans = refJacobian[:3, :]\n refJacobiansRot = refJacobian[3:, :]\n eeVelocitiesTrans = np.dot(refJacobiansTrans, jointVelocities)\n eeVelocitiesRot = np.dot(refJacobiansRot, jointVelocities)\n eeVelocities = eeVelocitiesTrans + np.cross(eeVelocitiesRot.reshape(1, 3),\n refRot.dot(eePoints.T).T)\n return eeVelocities.reshape(-1)", "def get_electrode_positions(self):\n return self.electrodes[:, 1:3]", "def get_ecc(self):\n mu_mass = G*(self._mm + self._sm)\n h_mom = self.sp_ang_mom()\n vel = self.getvel_xyz()\n pos = self.getpos_xyz()\n e_vec = 1.0/mu_mass*(np.cross(vel, h_mom) -\n mu_mass*pos/np.linalg.norm(pos))\n return e_vec", "def end_effectors(self) -> list:\n S = self.parents\n return [[x, f\"q{x[1:]}\"] for x in S if S.out_degree(x) == 0]", "def jac_pos(self):\n J = self.sim.data.get_body_jacp(self.end_effector)\n J = J.reshape(3, -1)[:, 0:7].T\n return J", "def efSolver(self):\n dx = self.dh[0] # dx\n dy = self.dh[1] # dy\n dz = self.dh[2] # dz\n \n for i in np.arange(0, self.ni):\n for j in np.arange(0, self.nj):\n for k in np.arange(0, self.nk):\n\n #x-component#\n if i==0: \n # forward\n self.ef[i][j][k][0] = -(-3*self.phi[i][j][k]+\\\n 4*self.phi[i+1][j][k]-\\\n self.phi[i+2][j][k])/(2*dx)\n elif i==self.ni-1: \n # backward\n self.ef[i][j][k][0] = -(self.phi[i-2][j][k]-\\\n 4*self.phi[i-1][j][k]+\\\n 3*self.phi[i][j][k])/(2*dx)\n else: \n #central\n self.ef[i][j][k][0] = -(self.phi[i+1][j][k] - \\\n self.phi[i-1][j][k])/(2*dx)\n\n #y-component\n if j==0:\n self.ef[i][j][k][1] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j+1][k]-\\\n self.phi[i][j+2][k])/(2*dy)\n elif j==self.nj-1:\n self.ef[i][j][k][1] = -(self.phi[i][j-2][k] - \\\n 4*self.phi[i][j-1][k] +\\\n 3*self.phi[i][j][k])/(2*dy)\n else:\n self.ef[i][j][k][1] = -(self.phi[i][j+1][k] - \\\n self.phi[i][j-1][k])/(2*dy)\n\n #z-component\n if k==0:\n self.ef[i][j][k][2] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j][k+1]-\n self.phi[i][j][k+2])/(2*dz)\n elif k==self.nk-1:\n self.ef[i][j][k][2] = -(self.phi[i][j][k-2] - \\\n 4*self.phi[i][j][k-1] + \\\n 3*self.phi[i][j][k])/(2*dz)\n else:\n self.ef[i][j][k][2] = -(self.phi[i][j][k+1] - \\\n self.phi[i][j][k-1])/(2*dz)", "def get_end_vertices(self):\n # Note that concatenating two vertices needs to make a\n # vertices for the frame.\n extesion_fraction = self.extesion_fraction\n\n corx = extesion_fraction*2.\n cory = 1./(1. - corx)\n x1, y1, w, h = 0, 0, 1, 1\n x2, y2 = x1 + w, y1 + h\n dw, dh = w*extesion_fraction, h*extesion_fraction*cory\n\n if self.extend in [\"min\", \"both\"]:\n bottom = [(x1, y1),\n (x1+w/2., y1-dh),\n (x2, y1)]\n else:\n bottom = [(x1, y1),\n (x2, y1)]\n\n if self.extend in [\"max\", \"both\"]:\n top = [(x2, y2),\n (x1+w/2., y2+dh),\n (x1, y2)]\n else:\n top = [(x2, y2),\n (x1, y2)]\n\n if self.orientation == \"horizontal\":\n bottom = [(y,x) for (x,y) in bottom]\n top = [(y,x) for (x,y) in top]\n\n return bottom, top", "def solve_eom(self, tend, dt=0.0001):\n ic = np.zeros(6)\n tstart = 0.0\n self.t_span = np.linspace(tstart, tend, tend/dt)\n self.create_design_params()\n self.wx, self.wy, self.wz, self.psi, self.theta, self.phi = solver.integrate_eom(ic, self.t_span, self.design_params, self.SRM1, self.SRM2).T\n self.nutation_angle = solver.compute_nutation_angle(self.theta, self.phi)\n self.precession_angle = solver.compute_precession_angle(self.theta, self.psi)", "def get_end_cell(self):\n return (self.end_row, self.end_col)", "def event_ending_point_extractor(row) -> int:\n to_return = None\n # First, define the variables that we will need for the rest of this\n # function.\n positions_list = literal_eval(row[\"positions\"])\n assert isinstance(positions_list, list)\n assert 1 <= len(positions_list) <= 2\n\n # Next, extract the starting and ending positions.\n starting_x = positions_list[0].get(\"x\")\n starting_y = positions_list[0].get(\"y\")\n\n try:\n ending_x = positions_list[1].get(\"x\")\n raw_ending_y = positions_list[1].get(\"y\")\n except IndexError:\n # If the event is one where there is no ending point to list (i.e.,\n # a foul).\n ending_x, raw_ending_y = starting_x, starting_y\n\n ending_y = (raw_ending_y/100)*69\n\n # Finally, validate and return the result.\n to_return = [ending_x, ending_y]\n\n return to_return", "def jacobian_cost(self, joint_angles: dict, ee_goals) -> np.ndarray:\n kinematic_map = self.kinematic_map[\"p0\"] # get map to all nodes from root\n end_effector_nodes = ee_goals.keys()\n J = np.zeros(self.n)\n for (\n ee\n ) in end_effector_nodes: # iterate through end-effector nodes, assumes sorted\n ee_path = kinematic_map[ee][\n 1:\n ] # [:-1] # no last node, only phys. joint locations\n t_ee = self.get_pose(joint_angles, ee).trans\n dg_ee_x = t_ee[0] - ee_goals[ee].trans[0]\n dg_ee_y = t_ee[1] - ee_goals[ee].trans[1]\n for (pdx, joint_p) in enumerate(ee_path): # algorithm fills Jac per column\n p_idx = int(joint_p[1:]) - 1\n for jdx in range(pdx, len(ee_path)):\n node_jdx = ee_path[jdx]\n theta_jdx = sum([joint_angles[key] for key in ee_path[0 : jdx + 1]])\n J[p_idx] += (\n 2.0\n * self.a[node_jdx]\n * (-dg_ee_x * np.sin(theta_jdx) + dg_ee_y * np.cos(theta_jdx))\n )\n\n return J", "def set_to_end(self) -> None:\n final_config = self._path_points[-1]\n self._mobile.set_2d_pose(final_config[:3])", "def\tbegin_end(env, blc):\n\n\tb_e = np.empty((blc.shape[0], 2))\n\tinf = 0\n\tp = 0\n\twin = env.win_over\n\tb_e[0, 0] = inf\n\tif blc[0] + win <= blc[-1]:\n\t\tb_e[0, 1] = blc[0] + win\n\telse:\n\t\tb_e[0, 1] = blc[-1]\n\tif blc.shape[0] == 1:\n\t\tb_e[0, 1] = blc[0]\n\t\treturn (b_e)\n\tfor k in range(1, blc.shape[0] - 1):\n\t\tinf = blc[k - 1] - win\n\t\tb_e[k, 0] = inf\n\t\tif blc[k] + win <= blc[-1]:\n\t\t\tb_e[k, 1] = blc[k] + win\n\t\telse:\n\t\t\tb_e[k, 1] = blc[-1]\n\tb_e[blc.shape[0] - 1, 0] = blc[-2] - win\n\tb_e[blc.shape[0] - 1, 1] = blc[-1]\n\tneg = np.where(b_e < 0)[0]\n\tif neg.shape[0]:\n\t\tb_e = b_e[neg[-1]:]\n\t\tb_e[0, 0] = 0\n\treturn (b_e)", "def box_collision_info(self):\r\n position = np.zeros((self.Npart,3)) # antall part, dim, iterasjoner\r\n position[:,:] = np.random.uniform(0,1e-6, size = (self.Npart,3))\r\n velocity = np.zeros((self.Npart,3))\r\n velocity[:,:] = np.random.normal(0,self.sigma,size = (self.Npart,3))\r\n\r\n part_collided = 0\r\n part_escaped = 0\r\n momentum = 0\r\n\r\n print 'engine started'\r\n for i in xrange(1,self.n):\r\n #collision\r\n position += velocity*dt\r\n l_hole = position[:,0:2] > self.L/4\r\n h_hole = position[:,0:2] < (3*self.L)/4\r\n pos_xy = np.logical_and(l_hole, h_hole)\r\n pos_xy = np.logical_and(pos_xy[:,0], pos_xy[:,1])\r\n pos_z = position[:,2] < 0\r\n esc_part = np.logical_and(pos_z, pos_xy)\r\n\r\n #velocity[esc_part] = velocity[esc_part]\r\n part_escaped += np.sum(esc_part)\r\n\r\n for j in xrange(0,3):\r\n impact_wall_pos = np.logical_and(position[:,j] > 0,\r\n position[:,j] < self.L)\r\n velocity[np.logical_not(impact_wall_pos),j] = -velocity[\r\n np.logical_not(impact_wall_pos),j]\r\n\r\n\r\n if j == 0:\r\n part_collided += np.sum(np.logical_not(impact_wall_pos),j)\r\n momentum += np.sum(2*self.m*abs(velocity[np.logical_not(\r\n impact_wall_pos),j]))\r\n\r\n\r\n\r\n position[position < 0] = 0\r\n position[position >self.L] = self.L\r\n\r\n particle_collided = part_collided/2\r\n return position, velocity,part_escaped, impact_wall_pos, particle_collided, momentum", "def boundaries_end(*args):\n return _ida_hexrays.boundaries_end(*args)", "def efSolver2(self):\n dx = self.dh[0] # dx\n dy = self.dh[1] # dy\n dz = self.dh[2] # dz\n \n \"\"\"\n for i in np.arange(0, self.ni):\n for j in np.arange(0, self.nj):\n for k in np.arange(0, self.nk):\n \"\"\"\n\n ##x-component#\n #if i==0: \n #x-component#\n \"\"\"\n if i==0: \n # forward\n self.ef[i][j][k][0] = -(-3*self.phi[i][j][k]+\\\n 4*self.phi[i+1][j][k]-\\\n self.phi[i+2][j][k])/(2*dx)\n \"\"\"\n \n # forward\n self.ef[0,0:self.nj,0:self.nk,0] = -(-3*self.phi[0,0:self.nj,0:self.nk]+\\\n 4*self.phi[1,0:self.nj,0:self.nk]-\\\n self.phi[2,0:self.nj,0:self.nk])/(2*dx)\n \n #elif i==self.ni-1: \n \"\"\"\n elif i==self.ni-1: \n # backward\n self.ef[i][j][k][0] = -(self.phi[i-2][j][k]-\\\n 4*self.phi[i-1][j][k]+\\\n 3*self.phi[i][j][k])/(2*dx)\n \"\"\" \n # backward\n self.ef[self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[self.ni-3,0:self.nj,0:self.nk]-\\\n 4*self.phi[self.ni-2,0:self.nj,0:self.nk]+\\\n 3*self.phi[self.ni-1,0:self.nj,0:self.nk])/(2*dx)\n \"\"\"\n else: \n #central\n self.ef[i][j][k][0] = -(self.phi[i+1][j][k] - \\\n self.phi[i-1][j][k])/(2*dx)\n \"\"\" \n #central\n self.ef[1:self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[2:self.ni,0:self.nj,0:self.nk] - \\\n self.phi[0:self.ni-2,0:self.nj,0:self.nk])/(2*dx)\n\n\n #y-component\n #if j==0:\n \"\"\"\n if j==0:\n self.ef[i][j][k][1] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j+1][k]-\\\n self.phi[i][j+2][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,0,0:self.nk,1] = -(-3*self.phi[0:self.ni,0,0:self.nk] + \\\n 4*self.phi[0:self.ni,1,0:self.nk]-\\\n self.phi[0:self.ni,2,0:self.nk])/(2*dy)\n #elif j==self.nj-1:\n \"\"\"\n elif j==self.nj-1:\n self.ef[i][j][k][1] = -(self.phi[i][j-2][k] - \\\n 4*self.phi[i][j-1][k] +\\\n 3*self.phi[i][j][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,self.nj-3,0:self.nk] - \\\n 4*self.phi[0:self.ni,self.nj-2,0:self.nk] +\\\n 3*self.phi[0:self.ni,self.nj-1,0:self.nk])/(2*dy)\n #else:\n \"\"\"\n else:\n self.ef[i][j][k][1] = -(self.phi[i][j+1][k] - \\\n self.phi[i][j-1][k])/(2*dy)\n\n \"\"\"\n self.ef[0:self.ni,1:self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,2:self.nj,0:self.nk] - \\\n self.phi[0:self.ni,0:self.nj-2,0:self.nk])/(2*dy)\n\n #z-component\n '''\n if k==0:\n self.ef[i][j][k][2] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j][k+1]-\n self.phi[i][j][k+2])/(2*dz)\n \n '''\n #z-component\n #if k==0:\n self.ef[0:self.ni,0:self.nj,0,2] = -(-3*self.phi[0:self.ni,0:self.nj,0] + \\\n 4*self.phi[0:self.ni,0:self.nj,1]-\n self.phi[0:self.ni,0:self.nj,2])/(2*dz)\n\n \"\"\"\n elif k==self.nk-1:\n self.ef[i][j][k][2] = -(self.phi[i][j][k-2] - \\\n 4*self.phi[i][j][k-1] + \\\n 3*self.phi[i][j][k])/(2*dz)\n \"\"\"\n \n #elif k==self.nk-1:\n self.ef[0:self.ni,0:self.nj,self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,self.nk-3] - \\\n 4*self.phi[0:self.ni,0:self.nj,self.nk-2] + \\\n 3*self.phi[0:self.ni,0:self.nj,self.nk-1])/(2*dz) \n \"\"\"\n else:\n self.ef[i][j][k][2] = -(self.phi[i][j][k+1] - \\\n self.phi[i][j][k-1])/(2*dz)\n \"\"\"\n #else:\n self.ef[0:self.ni,0:self.nj,1:self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,2:self.nk] - \\\n self.phi[0:self.ni,0:self.nj,0:self.nk-2])/(2*dz)", "def end_points(self, origin, destination):\n # origin and destination are components with bounding-boxes\n # direction is a 2 char code representing starting and ending directions\n # 'h' horizontal, 'v' vertical\n o_coords = origin.bounding_coords()\n d_coords = destination.bounding_coords()\n\n start = {\n \"h\": core.Coords(o_coords.x2, o_coords.y1 + origin.height / 2),\n \"v\": core.Coords(origin.x + (o_coords.x2 - o_coords.x1) / 2, o_coords.y2),\n }\n end = {\n \"h\": core.Coords(d_coords.x1, d_coords.y1 + destination.height / 2),\n \"v\": core.Coords(\n destination.x + (d_coords.x2 - d_coords.x1) / 2, d_coords.y1\n ),\n }\n self.start = start[self.direction[0]]\n self.end = end[self.direction[-1]]\n return (self.start, self.end)", "def get_ending_direction_vector(self):\n\n total_length = len(self.pixel_list)\n\n if total_length < 2:\n return None\n elif total_length < 15:\n delta_x = self.pixel_list[-1].x - self.pixel_list[0].x\n delta_y = self.pixel_list[-1].y - self.pixel_list[0].y\n return delta_y, delta_x\n else:\n delta_x = self.pixel_list[-15].x - self.pixel_list[-1].x\n delta_y = self.pixel_list[-15].y - self.pixel_list[-1].y\n return delta_y, delta_x", "def compute_fk_position(self, jpos, tgt_frame):\n if isinstance(jpos, list):\n jpos = np.array(jpos)\n jpos = jpos.flatten()\n if jpos.size != self.arm_dof:\n raise ValueError('Length of the joint angles '\n 'does not match the robot DOF')\n assert jpos.size == self.arm_dof\n kdl_jnt_angles = joints_to_kdl(jpos)\n\n kdl_end_frame = kdl.Frame()\n idx = self.arm_link_names.index(tgt_frame) + 1\n fg = self._fk_solver_pos.JntToCart(kdl_jnt_angles,\n kdl_end_frame,\n idx)\n if fg < 0:\n raise ValueError('KDL Pos JntToCart error!')\n pose = kdl_frame_to_numpy(kdl_end_frame)\n pos = pose[:3, 3].flatten()\n rot = pose[:3, :3]\n return pos, rot", "def compute_observation(self):\n robotPos, robotOrn = p.getBasePositionAndOrientation(self.botId)\n robotEuler = p.getEulerFromQuaternion(robotOrn)\n linear, angular = p.getBaseVelocity(self.botId)\n return (np.array([robotEuler[0],angular[0],self.vt], dtype='float32'))", "def compute_joint_error_position(self, current_position, target_position):\n \n # helper variables\n tmp_c = []\n tmp_t = [] \n \n for x in range(0,20):\n tmp_c.append(current_position[x].joint_target)\n tmp_t.append(math.radians (target_position[x].joint_target) )\n \n # Compute the norm of the error\n error = numpy.linalg.norm( numpy.array(tmp_c) - numpy.array(tmp_t) )\n \n #print error \n\n return error", "def getEnergyEvolution(self):\n\n\t\tEBefore = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleBefore]\n\t\tEAfter = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleAfter]\n\n\t\treturn EBefore, EAfter", "def compute_positions(self):\n return (self.x + DIRECTIONS[self.facing_direction][0]) % (self.image.shape[0] - 1), \\\n (self.y + DIRECTIONS[self.facing_direction][1]) % (self.image.shape[1] - 1)", "def _body_coord(self):\r\n cth = np.cos(self.theta)\r\n sth = np.sin(self.theta)\r\n M = self.P - 0.5 * np.diag(self.lengths)\r\n # stores the vector from the center of mass to the nose\r\n c2n = np.array([np.dot(M[self.nose], cth), np.dot(M[self.nose], sth)])\r\n # absolute position of nose\r\n T = -self.pos_cm - c2n - self.goal\r\n # rotating coordinate such that nose is axis-aligned (nose frame)\r\n # (no effect when \\theta_{nose} = 0)\r\n c2n_x = np.array([cth[self.nose], sth[self.nose]])\r\n c2n_y = np.array([-sth[self.nose], cth[self.nose]])\r\n Tcn = np.array([np.sum(T * c2n_x), np.sum(T * c2n_y)])\r\n\r\n # velocity at each joint relative to center of mass velocity\r\n vx = -np.dot(M, sth * self.dtheta)\r\n vy = np.dot(M, cth * self.dtheta)\r\n # velocity at nose (world frame) relative to center of mass velocity\r\n v2n = np.array([vx[self.nose], vy[self.nose]])\r\n # rotating nose velocity to be in nose frame\r\n Vcn = np.array([np.sum((self.v_cm + v2n) * c2n_x),\r\n np.sum((self.v_cm + v2n) * c2n_y)])\r\n # angles should be in [-pi, pi]\r\n ang = np.mod(\r\n self.theta[1:] - self.theta[:-1] + np.pi,\r\n 2 * np.pi) - np.pi\r\n return Tcn, ang, Vcn, self.dtheta", "def jacobian_linear_symb(\n self, joint_angles: dict, pose_term=False, ee_keys=None\n ) -> dict:\n kinematic_map = self.kinematic_map[\"p0\"] # get map to all nodes from root\n\n if ee_keys is None:\n end_effector_nodes = []\n for ee in self.end_effectors: # get p nodes in end-effectors\n if ee[0][0] == \"p\":\n end_effector_nodes += [ee[0]]\n else:\n end_effector_nodes += [ee[1]]\n else:\n end_effector_nodes = ee_keys\n # Ts = self.get_all_poses_symb(joint_angles) # all frame poses\n Ts = self.get_all_poses(joint_angles) # all frame poses\n J = {} # np.zeros([0, len(node_names) - 1])\n for ee in end_effector_nodes: # iterate through end-effector nodes\n ee_path = kinematic_map[ee][\n 1:\n ] # [:-1] # no last node, only phys. joint locations\n\n T_0_ee = Ts[ee].as_matrix() # ee frame\n if pose_term:\n dZ = np.array([0.0, 0.0, 1.0])\n p_ee = T_0_ee[0:3, 0:3] @ dZ + T_0_ee[0:3, -1]\n else:\n p_ee = T_0_ee[0:3, -1] # ee position\n\n Jp = np.zeros([3, self.n], dtype=object) # translation jac\n for joint in ee_path: # algorithm fills Jac per column\n T_0_i = Ts[list(self.parents.predecessors(joint))[0]].as_matrix()\n z_hat_i = T_0_i[:3, 2]\n if pose_term:\n p_i = T_0_i[0:3, 0:3] @ dZ + T_0_i[0:3, -1]\n else:\n p_i = T_0_i[:3, -1]\n j_idx = int(joint[1:]) - 1 # node_names.index(joint) - 1\n Jp[:, j_idx] = cross_symb(z_hat_i, p_ee - p_i)\n J[ee] = Jp\n return J", "def test_endgameStrategy(self):\n self.result = \"\"\"\n 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 1 2 3 3 2 1 0 0\n 0 0 1 3 x x x x 1 0 0\n 0 0 2 x x 6 x 5 2 0 0\n 0 0 3 x 4 4 x x 2 0 0\n 0 0 3 x 5 5 x x 2 0 0\n 0 0 2 x x x x 3 1 0 0\n 0 0 1 2 3 3 2 1 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0\n \"\"\"", "def trigger_points(self, eouts, elens):\n bs, xmax, _ = eouts.size()\n log_probs = torch.log_softmax(self.output(eouts), dim=-1)\n best_paths = log_probs.argmax(-1)\n hyps = []\n for b in range(bs):\n indices = [best_paths[b, t].item() for t in range(elens[b])]\n collapsed_indices = [x[0] for x in groupby(indices)]\n best_hyp = [x for x in filter(lambda x: x != self.blank, collapsed_indices)]\n hyps.append(best_hyp)\n ymax = max([len(h) for h in hyps])\n trigger_points_pred = log_probs.new_zeros((bs, ymax + 1), dtype=torch.int32)\n for b in range(bs):\n n_triggers = 0\n for t in range(elens[b]):\n token_idx = best_paths[b, t]\n if token_idx == self.blank:\n continue\n if not (t == 0 or token_idx != best_paths[b, t - 1]):\n continue\n trigger_points_pred[b, n_triggers] = t\n n_triggers += 1\n return trigger_points_pred", "def hessian_cost(self, joint_angles: dict, ee_goals) -> np.ndarray:\n kinematic_map = self.kinematic_map[\"p0\"] # get map to all nodes from root\n end_effector_nodes = ee_goals.keys()\n H = np.zeros((self.n, self.n))\n for (\n ee\n ) in end_effector_nodes: # iterate through end-effector nodes, assumes sorted\n ee_path = kinematic_map[ee][\n 1:\n ] # [:-1] # no last node, only phys. joint locations\n t_ee = self.get_pose(joint_angles, ee).trans\n dg_ee_x = t_ee[0] - ee_goals[ee].trans[0]\n dg_ee_y = t_ee[1] - ee_goals[ee].trans[1]\n for (pdx, joint_p) in enumerate(ee_path): # algorithm fills Hess per column\n p_idx = int(joint_p[1:]) - 1\n sin_p_term = 0.0\n cos_p_term = 0.0\n for jdx in range(pdx, len(ee_path)):\n node_jdx = ee_path[jdx]\n theta_jdx = sum([joint_angles[key] for key in ee_path[0 : jdx + 1]])\n sin_p_term += self.a[node_jdx] * np.sin(theta_jdx)\n cos_p_term += self.a[node_jdx] * np.cos(theta_jdx)\n\n for (qdx, joint_q) in enumerate(\n ee_path[pdx:]\n ): # TODO: check if starting from pdx works\n qdx = qdx + pdx\n q_idx = int(joint_q[1:]) - 1\n sin_q_term = 0.0\n cos_q_term = 0.0\n for kdx in range(qdx, len(ee_path)):\n node_kdx = ee_path[kdx]\n theta_kdx = sum(\n [joint_angles[key] for key in ee_path[0 : kdx + 1]]\n )\n sin_q_term += self.a[node_kdx] * np.sin(theta_kdx)\n cos_q_term += self.a[node_kdx] * np.cos(theta_kdx)\n\n # assert(q_idx >= p_idx)\n H[p_idx, q_idx] += (\n 2.0 * sin_q_term * sin_p_term\n - 2.0 * dg_ee_x * cos_q_term\n + 2.0 * cos_p_term * cos_q_term\n - 2.0 * dg_ee_y * sin_q_term\n )\n\n return H + H.T - np.diag(np.diag(H))", "def get_end_pos(cls, start_pos, dimensions, left=False, up=False):\n dx = (dimensions[0] - 1) * cls.width\n if left: dx = -dx\n dy = (dimensions[1] -1 ) * cls.height\n if up: dy = -dy\n \n end_pos = start_pos[0] + dx, start_pos[1] + dy\n return end_pos", "def get_unhindered_positions(self, endposition):\n current_position = self.position\n potential_positions = { \n 'diag1' : [],\n 'diag2' : [],\n 'diag3' : [],\n 'diag4' : []\n }\n space_down = current_position[0]-1\n space_up = self.ncols-current_position[0]\n space_right = self.nrows - (ord('H')-ord(current_position[1]))\n space_left = ord(current_position[1]) - ord('A')\n\n for i in range(1, space_down+1):\n diag1 = (current_position[0]-i, chr(ord(current_position[1])+i))\n diag2 = (current_position[0]-i, chr(ord(current_position[1])-i))\n if self.pos_within_bounds(diag1):\n potential_positions['diag1'].append(diag1)\n if self.pos_within_bounds(diag2):\n potential_positions['diag2'].append(diag2)\n\n for i in range(1, space_up+1):\n diag3 = (current_position[0]+i, chr(ord(current_position[1])+i))\n diag4 = (current_position[0]+i, chr(ord(current_position[1])-i))\n if self.pos_within_bounds(diag3):\n potential_positions['diag3'].append(diag3)\n if self.pos_within_bounds(diag4):\n potential_positions['diag4'].append(diag4)\n \n for direction, square in potential_positions.items():\n if tuple(endposition) in square:\n return potential_positions[direction]", "def _get_output_steps_to_beam_indices(self, end_state: Tensor, beam_prev_indices: Tensor) ->List[int]:\n present_position = int(end_state[1])\n beam_index = int(end_state[2])\n beam_indices = torch.jit.annotate(List[int], [])\n while present_position >= 0:\n beam_indices.insert(0, beam_index)\n beam_index = int(beam_prev_indices[present_position][beam_index])\n present_position = present_position - 1\n return beam_indices", "def target_position(self, time):\n # get joint positions and use fk to get end effector position?\n # ar_tag from topic\n\n cur_pos = self.target_velocity(time)*time + self.start_pos\n\n self.points_generated.append(cur_pos)\n #print(self.start_pos)\n # print(cur_pos)\n return cur_pos", "def joint_variables(self, G: nx.Graph, T_final: dict = None) -> np.ndarray:\n # TODO: make this more readable\n tol = 1e-10\n q_zero = list_to_variable_dict(self.n * [0])\n kinematic_map = self.kinematic_map\n parents = self.parents\n get_pose = self.get_pose\n\n T = {}\n T[\"p0\"] = self.T_base\n theta = {}\n\n for ee in self.end_effectors:\n path = kinematic_map[\"p0\"][ee[0]][1:]\n axis_length = self.axis_length\n for node in path:\n aux_node = f\"q{node[1:]}\"\n pred = [u for u in parents.predecessors(node)]\n\n T_prev = T[pred[0]]\n\n T_prev_0 = get_pose(q_zero, pred[0])\n T_0 = get_pose(q_zero, node)\n T_rel = T_prev_0.inv().dot(T_0)\n T_0_q = get_pose(q_zero, node).dot(trans_axis(axis_length, \"z\"))\n T_rel_q = T_prev_0.inv().dot(T_0_q)\n\n p = G.nodes[node][POS] - T_prev.trans\n q = G.nodes[aux_node][POS] - T_prev.trans\n ps = T_prev.inv().as_matrix()[:3, :3].dot(p)\n qs = T_prev.inv().as_matrix()[:3, :3].dot(q)\n\n zs = skew(np.array([0, 0, 1]))\n cp = (T_rel.trans - ps) + zs.dot(zs).dot(T_rel.trans)\n cq = (T_rel_q.trans - qs) + zs.dot(zs).dot(T_rel_q.trans)\n ap = zs.dot(T_rel.trans)\n aq = zs.dot(T_rel_q.trans)\n bp = zs.dot(zs).dot(T_rel.trans)\n bq = zs.dot(zs).dot(T_rel_q.trans)\n\n c0 = cp.dot(cp) + cq.dot(cq)\n c1 = 2 * (cp.dot(ap) + cq.dot(aq))\n c2 = 2 * (cp.dot(bp) + cq.dot(bq))\n c3 = ap.dot(ap) + aq.dot(aq)\n c4 = bp.dot(bp) + bq.dot(bq)\n c5 = 2 * (ap.dot(bp) + aq.dot(bq))\n\n # poly = [c0 -c2 +c4, 2*c1 - 2*c5, 2*c0 + 4*c3 -2*c4, 2*c1 + 2*c5, c0 + c2 + c4]\n diff = np.array(\n [\n c1 - c5,\n 2 * c2 + 4 * c3 - 4 * c4,\n 3 * c1 + 3 * c5,\n 8 * c2 + 4 * c3 - 4 * c4,\n -4 * c1 + 4 * c5,\n ]\n )\n if all(diff < tol):\n theta[node] = 0\n else:\n sols = np.roots(\n diff\n ) # solutions to the Whaba problem for fixed axis\n\n def error_test(x):\n if abs(x.imag) > 0:\n return 1e6\n x = -2 * arctan2(x.real, 1)\n return (\n c0\n + c1 * sin(x)\n - c2 * cos(x)\n + c3 * sin(x) ** 2\n + c4 * cos(x) ** 2\n - c5 * sin(2 * x) / 2\n )\n\n sol = min(sols, key=error_test)\n theta[node] = -2 * arctan2(sol.real, 1)\n\n T[node] = (T_prev.dot(rot_axis(theta[node], \"z\"))).dot(T_rel)\n\n if T_final is None:\n return theta\n\n if (\n T_final[ee[0]] is not None\n and norm(cross(T_rel.trans, np.array([0, 0, 1]))) < tol\n ):\n T_th = (T[node]).inv().dot(T_final[ee[0]]).as_matrix()\n theta[ee[0]] += np.arctan2(T_th[1, 0], T_th[0, 0])\n\n return theta", "def __path_to_end(self) -> List[List[int]]:\n predecessors = self.__predecessors_list()\n path = []\n\n row_exit, col_exit = Player.find_exit_position(self.__labyrinth)\n dest = self.__convert_position(row_exit, col_exit)\n\n v = dest\n\n path.append([v // 10, v % 10])\n\n while predecessors[v] != -1:\n path.append(predecessors[v])\n v = self.__convert_position(predecessors[v][0], predecessors[v][1])\n\n return path[::-1]", "def _generate_real_coordinates_according_to_compartment(self):\n self.compartments = self._get_sorted_compartments()\n final_compartments = {}\n left_current_x, left_current_y = X_ENTITY_DISTANCE, Y_ENTITY_DISTANCE\n right_current_x, right_current_y = self.process_glyph_x + 174, Y_ENTITY_DISTANCE\n\n for compartments in self.compartments.get(\"both_side_comps\"):\n for compartment, entities in compartments.items():\n comp_x, comp_y = left_current_x, left_current_y\n comp_width, comp_height = 0, 0\n left_current_x += X_ENTITY_DISTANCE\n left_current_y += Y_ENTITY_DISTANCE\n right_current_y += Y_ENTITY_DISTANCE\n for left_entity in entities.get(\"left_side\"):\n self._recalculate_coordinates(left_entity, left_current_x, left_current_y)\n left_current_y += left_entity[\"size\"][\"height\"] + Y_ENTITY_DISTANCE\n if left_current_y - comp_y > comp_height:\n comp_height = left_current_y - comp_y\n for right_entity in entities.get(\"right_side\"):\n self._recalculate_coordinates(right_entity, right_current_x, right_current_y)\n right_current_y += right_entity[\"size\"][\"height\"] + Y_ENTITY_DISTANCE\n if right_current_y - comp_y > comp_height:\n comp_height = right_current_y - comp_y\n if right_current_x + right_entity[\"size\"][\"width\"] + X_ENTITY_DISTANCE - comp_x > comp_width:\n comp_width = right_current_x + right_entity[\"size\"][\"width\"] + X_ENTITY_DISTANCE - comp_x\n\n final_compartments[compartment] = {\n \"coordinates\": {\n \"x\": comp_x,\n \"y\": comp_y,\n \"width\": comp_width,\n \"height\": comp_height\n }\n }\n\n if left_current_y > right_current_y:\n left_current_y, right_current_y = left_current_y, left_current_y\n else:\n left_current_y, right_current_y = right_current_y, right_current_y\n\n left_current_x = X_ENTITY_DISTANCE\n right_current_x = self.process_glyph_x + 174\n left_current_y += Y_ENTITY_DISTANCE\n right_current_y += Y_ENTITY_DISTANCE\n\n for compartments in self.compartments.get(\"left_side_comps\"):\n for compartment, entities in compartments.items():\n comp_x, comp_y = left_current_x, left_current_y\n comp_width, comp_height = 0, 0\n left_current_x += X_ENTITY_DISTANCE\n left_current_y += Y_ENTITY_DISTANCE\n for left_entity in entities.get(\"left_side\"):\n self._recalculate_coordinates(left_entity, left_current_x, left_current_y)\n left_current_y += left_entity[\"size\"][\"height\"] + Y_ENTITY_DISTANCE\n if left_current_y - comp_y > comp_height:\n comp_height = left_current_y - comp_y\n if left_current_x + left_entity[\"size\"][\"width\"] + X_ENTITY_DISTANCE - comp_x > comp_width:\n comp_width = left_current_x + left_entity[\"size\"][\"width\"] + X_ENTITY_DISTANCE - comp_x\n\n final_compartments[compartment] = {\n \"coordinates\": {\n \"x\": comp_x,\n \"y\": comp_y,\n \"width\": comp_width,\n \"height\": comp_height\n }\n }\n\n left_current_x = X_ENTITY_DISTANCE\n left_current_y += Y_ENTITY_DISTANCE\n\n for compartments in self.compartments.get(\"right_side_comps\"):\n for compartment, entities in compartments.items():\n comp_x, comp_y = right_current_x, right_current_y\n comp_width, comp_height = 0, 0\n right_current_x += X_ENTITY_DISTANCE\n right_current_y += Y_ENTITY_DISTANCE\n for right_entity in entities.get(\"right_side\"):\n self._recalculate_coordinates(right_entity, right_current_x, right_current_y)\n right_current_y += right_entity[\"size\"][\"height\"] + Y_ENTITY_DISTANCE\n if right_current_y - comp_y > comp_height:\n comp_height = right_current_y - comp_y\n if right_current_x + right_entity[\"size\"][\"width\"] + X_ENTITY_DISTANCE - comp_x > comp_width:\n comp_width = right_current_x + right_entity[\"size\"][\"width\"] + X_ENTITY_DISTANCE - comp_x\n\n final_compartments[compartment] = {\n \"coordinates\": {\n \"x\": comp_x,\n \"y\": comp_y,\n \"width\": comp_width,\n \"height\": comp_height\n }\n }\n\n right_current_x = self.process_glyph_x + 174\n right_current_y += Y_ENTITY_DISTANCE\n\n self.compartments = final_compartments\n max_y = left_current_y if left_current_y > right_current_y else right_current_y\n return right_current_x, max_y", "def compute_fk_velocity(self, jpos, jvel, tgt_frame):\n if isinstance(jpos, list):\n jpos = np.array(jpos)\n if isinstance(jvel, list):\n jvel = np.array(jvel)\n kdl_end_frame = kdl.FrameVel()\n kdl_jnt_angles = joints_to_kdl(jpos)\n kdl_jnt_vels = joints_to_kdl(jvel)\n kdl_jnt_qvels = kdl.JntArrayVel(kdl_jnt_angles, kdl_jnt_vels)\n idx = self.arm_link_names.index(tgt_frame) + 1\n fg = self._fk_solver_vel.JntToCart(kdl_jnt_qvels,\n kdl_end_frame,\n idx)\n if fg < 0:\n raise ValueError('KDL Vel JntToCart error!')\n end_twist = kdl_end_frame.GetTwist()\n return np.array([end_twist[0], end_twist[1], end_twist[2],\n end_twist[3], end_twist[4], end_twist[5]])", "def calculate_E(self):\n \n E = 0\n for i in xrange(self.size):\n Ei = self.h[i]\n Ei += 0.5*sum((1 if self.spins[j] else -1)*self.J[i,j] for j in self.adjacency[i])\n if not self.spins[i]:\n Ei *= -1\n E += Ei\n \n return E", "def _save_parameters(self):\n\n # eigenvectors are the coefficients of an ellipse in general form\n # a*x^2 + 2*b*x*y + c*y^2 + 2*d*x + 2*f*y + g = 0 [eqn. 15) from (**) or (***)\n a = self.coef[0, 0]\n b = self.coef[1, 0]/2.\n c = self.coef[2, 0]\n d = self.coef[3, 0]/2.\n f = self.coef[4, 0]/2.\n g = self.coef[5, 0]\n\n # finding center of ellipse [eqn.19 and 20] from (**)\n x0 = (c*d-b*f)/(b**2.-a*c)\n y0 = (a*f-b*d)/(b**2.-a*c)\n\n # Find the semi-axes lengths [eqn. 21 and 22] from (**)\n numerator = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\n denominator1 = (b*b-a*c) * \\\n ((c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n denominator2 = (b*b-a*c) * \\\n ((a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n width = np.sqrt(numerator/denominator1)\n height = np.sqrt(numerator/denominator2)\n\n # angle of counterclockwise rotation of major-axis of ellipse to x-axis [eqn. 23] from (**)\n # or [eqn. 26] from (***).\n phi = .5*np.arctan((2.*b)/(a-c))\n\n self._center = [x0, y0]\n self._width = width\n self._height = height\n self._phi = phi", "def get_exterior(self, x, y, x1, x2, bottom, head_y):\n fx1 = x+(x-x1)*8\n fx2 = x+(x-x2)*8\n # compute bounding ellipse; and intersection with body outline\n cv2.ellipse(self.ellipse_finder, ((x/mscale,y/mscale), ((fx1-fx2)/mscale, (2*(bottom-head_y))/mscale), 0), 255,-1 )\n intersection = np.bitwise_and(255-self.ellipse_finder, self.median_finder)\n # find external blobs\n im2, out_contours, out_hierarchy = cv2.findContours(intersection,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n return out_contours, out_hierarchy, fx1-fx2", "def erosion_deposition(self):\n\n # assign variables\n erdep = 'erdep' # kg/m^2s\n sedflux = 'flux' # kg/ms\n\n # parse, advance, and stamp time\n (evolved_elevation, time, depth, sediment_flux, erosion_deposition,\n difference) = self.parse_time()\n\n # compute slope and partial derivatives\n slope, dx, dy = self.compute_slope()\n\n # hydrologic simulation\n depth = self.simwe(dx, dy, depth)\n\n # erosion-deposition simulation\n gscript.run_command(\n 'r.sim.sediment',\n elevation=self.elevation,\n water_depth=depth,\n dx=dx,\n dy=dy,\n detachment_coeff=self.detachment,\n transport_coeff=self.transport,\n shear_stress=self.shearstress,\n man=self.mannings,\n erosion_deposition=erdep,\n niterations=self.rain_interval,\n nwalkers=self.walkers,\n nprocs=self.threads,\n overwrite=True)\n\n # filter outliers\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{erosion_deposition}\"\n \"=if({erdep}<{erdepmin},\"\n \"{erdepmin},\"\n \"if({erdep}>{erdepmax},{erdepmax},{erdep}))\".format(\n erosion_deposition=erosion_deposition,\n erdep=erdep,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax),\n overwrite=True)\n gscript.run_command(\n 'r.colors',\n map=erosion_deposition,\n raster=erdep)\n\n # evolve landscape\n \"\"\"\n change in elevation (m)\n = change in time (s)\n * net erosion-deposition (kg/m^2s)\n / sediment mass density (kg/m^3)\n \"\"\"\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{evolved_elevation}\"\n \"={elevation}\"\n \"+({rain_interval}*60\"\n \"*{erosion_deposition}\"\n \"/{density})\".format(\n evolved_elevation=evolved_elevation,\n elevation=self.elevation,\n rain_interval=self.rain_interval,\n erosion_deposition=erosion_deposition,\n density=self.density),\n overwrite=True)\n\n # fill sinks\n if self.fill_depressions:\n evolved_elevation = self.fill_sinks(evolved_elevation)\n\n # gravitational diffusion\n evolved_elevation = self.gravitational_diffusion(evolved_elevation)\n\n # compute elevation change\n difference = self.compute_difference(evolved_elevation, difference)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['erdep',\n 'dx',\n 'dy'],\n flags='f')\n\n return (evolved_elevation, time, depth, erosion_deposition, difference)", "def get_end_loc(self) -> Tuple[int, int]:\n assert self.pos_marker\n return self.pos_marker.working_loc_after(\n self.raw,\n )", "def end(self) -> pdarray:\n return self._ends", "def end(self, *args):\n return _ida_hexrays.qvector_lvar_t_end(self, *args)", "def end(self, *args):\n return _ida_hexrays.qvector_ccase_t_end(self, *args)", "def calculateExteriorElementBoundaryQuadrature(self):\n #\n #get physical locations of element boundary quadrature points\n #\n #assume all components live on the same mesh\n self.u[0].femSpace.elementMaps.getValuesGlobalExteriorTrace(self.elementBoundaryQuadraturePoints,\n self.ebqe['x'])\n #\n #get metric tensor and unit normals\n #\n if self.movingDomain:\n if self.tLast_mesh != None:\n self.ebqe['xt'][:]=self.ebqe['x']\n self.ebqe['xt']-=self.ebqe['x_last']\n alpha = 1.0/(self.t_mesh - self.tLast_mesh)\n self.ebqe['xt']*=alpha\n else:\n self.ebqe['xt'][:]=0.0\n self.ebqe['x_last'][:]=self.ebqe['x']\n self.u[0].femSpace.elementMaps.getJacobianValuesGlobalExteriorTrace_movingDomain(self.elementBoundaryQuadraturePoints,\n self.ebqe['xt'],\n self.ebqe['inverse(J)'],\n self.ebqe['g'],\n self.ebqe['sqrt(det(g))'],\n self.ebqe['n'])\n else:\n self.u[0].femSpace.elementMaps.getJacobianValuesGlobalExteriorTrace(self.elementBoundaryQuadraturePoints,\n self.ebqe['inverse(J)'],\n self.ebqe['g'],\n self.ebqe['sqrt(det(g))'],\n self.ebqe['n'])\n #now map the physical points back to the reference element\n #assume all components live on same mesh\n self.u[0].femSpace.elementMaps.getInverseValuesGlobalExteriorTrace(self.ebqe['inverse(J)'],self.ebqe['x'],self.ebqe['hat(x)'])\n #\n #since the points on the reference boundary may be reordered on many right element boundaries, we\n #have to use an array of reference boundary points on all element boundaries\n #first copy the left reference element boundary quadrature points from the reference element boundary\n self.testSpace[0].getBasisValuesGlobalExteriorTrace(self.elementBoundaryQuadraturePoints,\n self.ebqe[('w',0)])\n cfemIntegrals.calculateWeightedShapeGlobalExteriorTrace(self.mesh.exteriorElementBoundariesArray,\n self.mesh.elementBoundaryElementsArray,\n self.mesh.elementBoundaryLocalElementBoundariesArray,\n self.elementBoundaryQuadratureWeights[('f',0)],\n self.ebqe['sqrt(det(g))'],\n self.ebqe[('w',0)],\n self.ebqe[('w*dS_f',0)])\n self.u[0].femSpace.getBasisGradientValuesGlobalExteriorTrace(self.elementBoundaryQuadraturePoints,\n self.ebqe['inverse(J)'],\n self.ebqe[('grad(v)',0)])\n #setup flux boundary conditions\n self.fluxBoundaryConditionsObjectsDict = dict([(cj,FluxBoundaryConditions(self.mesh,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.ebqe[('x')],\n self.advectiveFluxBoundaryConditionsSetterDict[cj],\n self.diffusiveFluxBoundaryConditionsSetterDictDict[cj]))\n for cj in self.advectiveFluxBoundaryConditionsSetterDict.keys()])\n self.ebqe['dS'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n cfemIntegrals.calculateIntegrationWeights(self.ebqe['sqrt(det(g))'],\n self.elementBoundaryQuadratureWeights[('u',0)],\n self.ebqe['dS'])\n for ci in range(self.nc): self.ebqe[('dS',ci)] = self.ebqe['dS']\n #\n self.ellamDiscretization.calculateExteriorElementBoundaryQuadrature(self.ebqe)\n #\n self.coefficients.initializeGlobalExteriorElementBoundaryQuadrature(self.timeIntegration.t,self.ebqe)", "def test_endgameStrategy2(self):\n\n self.result = \"\"\"\n 1 x 1 0 0 2 x 2 1 x 1 0 0 1 x x 1\n 1 1 1 0 0 2 x 3 2 1 1 0 0 1 3 4 3\n 0 0 0 0 0 1 2 x 1 0 0 0 0 0 1 x x\n 0 0 0 0 0 0 1 1 1 0 0 0 0 0 1 2 2\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1\n 1 1 1 0 0 0 0 0 0 0 0 0 0 1 2 x 1\n 1 x 1 0 0 0 0 0 0 0 0 0 0 1 x 2 1\n \"\"\"", "def get_unhindered_positions(self, endposition):\n current_position = self.position\n potential_positions = potential_positions = {\n 'left' : [], \n 'right' : [],\n 'up' : [], \n 'down' : []\n }\n space_down = current_position[0]-1\n space_up = self.ncols-current_position[0]\n space_right = self.nrows - (ord('H')-ord(current_position[1]))\n space_left = ord(current_position[1]) - ord('A')\n\n for i in range(1, space_down+1):\n pos = (current_position[0]-i, current_position[1])\n if self.pos_within_bounds(pos):\n potential_positions['down'].append(pos)\n\n for i in range(1, space_up+1):\n pos = (current_position[0]+i, current_position[1])\n if self.pos_within_bounds(pos):\n potential_positions['up'].append(pos)\n \n for i in range(1, space_left+1):\n pos = (current_position[0], chr(ord(current_position[1])-i))\n if self.pos_within_bounds(pos):\n potential_positions['left'].append(pos)\n\n for i in range(1, space_right+1):\n pos = (current_position[0], chr(ord(current_position[1])+i))\n if self.pos_within_bounds(pos):\n potential_positions['right'].append(pos)\n\n for direction, square in potential_positions.items():\n if tuple(endposition) in square:\n return potential_positions[direction]", "def joint_limits_upper_constraint(q,ee_pos):\n return self.max_angles - q", "def apply_boundary_conditions(self):\n E = self.__mesh.get_edge_list()\n\n # Top and bottom wall Dirichlet bcs (boundary_id = 21)\n \n e21_iterator = self.__mesh.edge_iterator(21)\n\n self.edge_center_value[e21_iterator[0]:e21_iterator[1]+1] = 0.0 \n \n # Left Dirichlet bc (boundary_id = 2)\n \n e2_iterator = self.__mesh.edge_iterator(2)\n\n b = np.sin(np.pi*self.y_e[e2_iterator[0]:e2_iterator[1]+1])\n\n self.edge_center_value[e2_iterator[0]:e2_iterator[1]+1] \\\n = b\n \n # Right Neumann bc (Zero flux, boundary_id = 3)\n \n e3_iterator = self.__mesh.edge_iterator(3)\n \n for i in range(e3_iterator[0], e3_iterator[1]+1):\n LC = E[i].get_straddling_cells()\n n = LC.get_global_cell_number() - 1\n self.edge_center_value[i] = self.cell_centroid_value[n]", "def get_final_pruned_indices(self):\n return self.final_pruned_indices", "def end(self, *args):\n return _ida_hexrays.qvector_carg_t_end(self, *args)", "def get_fk(self, joints):\n\n header = Header()\n header.frame_id = self.group.get_planning_frame()\n\n robot_state = self.robot.get_current_state()\n robot_state.joint_state.position = joints\n\n links = [self.group.get_end_effector_link()]\n\n return self.fk_solver(header, links, robot_state).pose_stamped[0]", "def _set_end(self, coordinates):\n self._end = coordinates", "def jacobian_linear(self, joint_angles: dict, query_frame: str = \"\") -> np.ndarray:\n\n kinematic_map = self.kinematic_map[\"p0\"] # get map to all nodes from root\n end_effector_nodes = []\n for ee in self.end_effectors: # get p nodes in end-effectors\n if ee[0][0] == \"p\":\n end_effector_nodes += [ee[0]]\n if ee[1][0] == \"p\":\n end_effector_nodes += [ee[1]]\n\n node_names = [\n name for name in self.structure if name[0] == \"p\"\n ] # list of p node ids\n\n # Ts = self.get_full_pose_fast_lambdify(joint_angles) # all frame poses\n Ts = self.get_all_poses(joint_angles) # all frame poses\n Ts[\"p0\"] = np.eye(4)\n\n J = np.zeros([0, len(node_names) - 1])\n for ee in end_effector_nodes: # iterate through end-effector nodes\n ee_path = kinematic_map[ee][:-1] # no last node, only phys. joint locations\n\n T_0_ee = Ts[ee] # ee frame\n p_ee = T_0_ee[0:3, -1] # ee position\n\n Jp_t = np.zeros([3, len(node_names) - 1]) # translation jac for theta\n Jp_al = np.zeros([3, len(node_names) - 1]) # translation jac alpha\n for joint in ee_path: # algorithm fills Jac per column\n T_0_i = Ts[joint]\n z_hat_i = T_0_i[:3, 2]\n x_hat_i = T_0_i[:3, 0]\n p_i = T_0_i[:3, -1]\n j_idx = node_names.index(joint)\n Jp_t[:, j_idx] = np.cross(z_hat_i, p_ee - p_i)\n Jp_al[:, j_idx] = np.cross(x_hat_i, p_ee - p_i)\n\n J_ee = np.vstack([Jp_t, Jp_al])\n J = np.vstack([J, J_ee]) # stack big jac for multiple ee\n\n return J", "def calc_ell_list(chain) :\n ell_list = np.zeros(len(chain.bridges_dict))\n \n for b in chain.bridges_dict.keys() :\n i, j = chain.bridges_dict[b].lumen1, chain.bridges_dict[b].lumen2\n L_i, pos_i = chain.lumens_dict[i].length, chain.lumens_dict[i].pos\n L_j, pos_j = chain.lumens_dict[j].length, chain.lumens_dict[j].pos\n \n chain.bridges_dict[b].length = np.abs(pos_j - pos_i) - (L_i + L_j)", "def end_position(variant_obj):\n alt_bases = len(variant_obj['alternative'])\n num_bases = max(len(variant_obj['reference']), alt_bases)\n return variant_obj['position'] + (num_bases - 1)", "def getEnds(self) -> List[int]:\n ...", "def joint_angles_callback(self, msg):\n\t\t\t# read the current joint angles from the robot\n\t\t\tpos_curr = np.array([msg.joint1,msg.joint2,msg.joint3,msg.joint4,msg.joint5,msg.joint6,msg.joint7]).reshape((7,1))\n\n\t\t\t# convert to radians\n\t\t\tpos_curr = pos_curr*(math.pi/180.0)\n\n\t\t\t# update torque from PID based on current position\n\t\t\tself.torque = self.Impedence_control(pos_curr)", "def _get_observation_upper_bound(self):\n upper_bound = np.zeros(self._get_observation_dimension())\n num_motors = self.rex.num_motors\n upper_bound[0:num_motors] = math.pi # Joint angle.\n upper_bound[num_motors:2 * num_motors] = motor.MOTOR_SPEED_LIMIT # Joint velocity.\n upper_bound[2 * num_motors:3 * num_motors] = motor.OBSERVED_TORQUE_LIMIT # Joint torque.\n upper_bound[3 * num_motors:-7] = 1.0 # Quaternion of base orientation.\n upper_bound[-7] = 1.0 # ratio in [0,1]\n upper_bound[-6:-2] = [1.0, 1.0, 1.0, 1.0] # sin in [-1, 1]\n upper_bound[-2:] = [self.max_speed, self.max_side_speed]\n\n return upper_bound", "def _propagate_eci(self, when_utc):\n # Orbit parameters\n sma = self._sma\n ecc = self._ecc\n p = sma * (1 - ecc ** 2)\n inc = radians(self._inc)\n raan = radians(self._raan)\n argp = radians(self._argp)\n ta = radians(self._ta)\n\n delta_t_sec = (when_utc - self._epoch).total_seconds()\n\n # Propagate\n position_eci, velocity_eci = kepler(argp, delta_t_sec, ecc, inc, p, raan, sma, ta)\n\n return tuple(position_eci), tuple(velocity_eci)", "def _finish_paths(self, term_mask, append_vals=False, append_cvals=False):\n if not term_mask.any():\n return np.logical_not(term_mask)\n\n # We do not count env time out (mature termination) as true terminal state, append values\n if append_vals:\n if self.rollout_mode=='iv_gae':\n last_val = self.policy.get_v(self._current_observation[:,term_mask], factored=False)\n else:\n last_val = self.policy.get_v(self._current_observation[term_mask], factored=True)[self.v_inds[term_mask],np.arange(term_mask.sum())]\n else:\n # init final values\n if self.rollout_mode=='iv_gae':\n last_val = np.zeros(shape=(self.ensemble_size, term_mask.sum()))\n else:\n last_val = np.zeros(shape=(term_mask.sum()))\n\n if append_cvals:\n if self.rollout_mode=='iv_gae':\n last_cval = self.policy.get_vc(self._current_observation[:,term_mask], factored=False)\n else:\n last_cval = self.policy.get_vc(self._current_observation[term_mask], factored=True)[self.vc_inds[term_mask],np.arange(term_mask.sum())]\n else:\n # init final values\n if self.rollout_mode=='iv_gae':\n last_cval = np.zeros(shape=(self.ensemble_size, term_mask.sum()))\n else:\n last_cval = np.zeros(shape=(term_mask.sum()))\n\n self.pool.finish_path_multiple(term_mask, last_val, last_cval)\n\n remaining_path_mask = np.logical_not(term_mask)\n\n return remaining_path_mask", "def get_start_positions(img_in):\n\n def initialize_coordinates(kernel_h, kernel_w):\n \"\"\" locates positions of interest by traversing eroded image and\n saves 9 points on each area of interest to global matrix\n :param kernel_h height of kernel used for harsh erosion\n :param kernel_w width of kernel used for harsh erosion\"\"\"\n global init_coords\n\n count = 0\n y = 0\n while y < frame_height - kernel_h:\n x = 0\n while x < frame_width - kernel_w:\n locator = img[y:y+kernel_h, x:x+kernel_w, 2] > 0 + numpy.zeros((kernel_h, kernel_w))\n if numpy.any(locator):\n if count == 0:\n init_coords[count][0][0] = y - 2\n init_coords[count][0][1] = x + 2\n elif count == 1:\n init_coords[count][0][0] = y + 2\n init_coords[count][0][1] = x + 2\n elif count == 2:\n init_coords[count][0][0] = y + 2\n init_coords[count][0][1] = x + 2\n elif count == 3:\n init_coords[count][0][0] = y - 3\n init_coords[count][0][1] = x + 2\n elif count == 4:\n init_coords[count][0][0] = y + 3\n init_coords[count][0][1] = x - 5\n count += 1\n break\n x += kernel_w\n y += kernel_h\n\n # store 8 more points for each body part\n f = 1.5\n for count in range(5):\n init_coords[count][1][1] = init_coords[count][0][1] + 3*f\n init_coords[count][1][0] = init_coords[count][0][0] + 0\n init_coords[count][2][1] = init_coords[count][0][1] + 6*f\n init_coords[count][2][0] = init_coords[count][0][0] + 0\n init_coords[count][3][1] = init_coords[count][0][1] + 0\n init_coords[count][3][0] = init_coords[count][0][0] + 3*f\n init_coords[count][4][1] = init_coords[count][0][1] + 3*f\n init_coords[count][4][0] = init_coords[count][0][0] + 3*f\n init_coords[count][5][1] = init_coords[count][0][1] + 6*f\n init_coords[count][5][0] = init_coords[count][0][0] + 3*f\n init_coords[count][6][1] = init_coords[count][0][1] + 0\n init_coords[count][6][0] = init_coords[count][0][0] + 6*f\n init_coords[count][7][1] = init_coords[count][0][1] + 3*f\n init_coords[count][7][0] = init_coords[count][0][0] + 6*f\n init_coords[count][8][1] = init_coords[count][0][1] + 6*f\n init_coords[count][8][0] = init_coords[count][0][0] + 6*f\n\n limb_coords[0][0][0] = init_coords[0][5][0]\n limb_coords[0][0][1] = init_coords[0][5][1]\n limb_coords[1][0][0] = init_coords[1][5][0]\n limb_coords[1][0][1] = init_coords[1][5][1]\n limb_coords[2][0][0] = init_coords[2][5][0]\n limb_coords[2][0][1] = init_coords[2][5][1]\n limb_coords[3][0][0] = init_coords[3][5][0]\n limb_coords[3][0][1] = init_coords[3][5][1]\n limb_coords[4][0][0] = init_coords[4][5][0]\n limb_coords[4][0][1] = init_coords[4][5][1]\n\n img = img_in.copy()\n img = segment_red(img, 205, 135)\n erode(img, 14, 12)\n initialize_coordinates(14, 12)", "def at_b (self):\n self.argc = int((len(n.coord[0]))/2)\n self.pts_con = np.array(self.coord[:,self.argc:len(n.coord[0])])\n\n self.xd = self.xdi\n self.zd = self.zdi \n \n for i, x in enumerate(self.xdi):\n self.aux_con = self.pts_con[0] - x \n self.arg1 = np.argmin(abs(self.aux_con)) \n \n if (self.aux_con[self.arg1] < 0 and self.arg1 == 0) or (self.aux_con[self.arg1] > 0 and self.arg1 == len(self.aux_con)-1):\n self.yd[i] = 99999.\n #print(self.yd[i],self.arg1)\n #print(self.aux_con)\n \n elif (self.aux_con[self.arg1] > 0 and self.aux_con[self.arg1+1] > self.aux_con[self.arg1]): #(self.aux_con[self.arg1] < 0 and self.aux_con[self.arg1-1] > self.aux_con[self.arg1]) or \n self.yd[i] = 99999.\n #print(self.yd[i],self.arg1)\n #print(self.aux_con)\n \n elif self.aux_con[self.arg1] < 0:\n #print(self.arg1)\n self.arg1 = self.arg1 + self.argc\n self.arg2 = self.arg1 - 1\n self.yd[i] = self.coord[1,n.arg1] + (x-self.coord[0,n.arg1])*(self.coord[1,n.arg2]-self.coord[1,n.arg1])/(self.coord[0,n.arg2]-self.coord[0,n.arg1])\n #print(self.yd[i],self.arg1,self.arg2)\n #print(self.aux_con)\n\n elif self.aux_con[self.arg1] > 0:\n #print(self.arg1) \n self.arg1 = self.arg1 + self.argc\n self.arg2 = self.arg1 + 1\n self.yd[i] = self.coord[1,n.arg1] + (x-self.coord[0,n.arg1])*(self.coord[1,n.arg2]-self.coord[1,n.arg1])/(self.coord[0,n.arg2]-self.coord[0,n.arg1]) \n #print(self.yd[i],self.arg1,self.arg2)\n #print(self.aux_con)\n \n #print('Defensa {0}\\n{1}: {2}\\n{3}: {4}'.format(i,self.arg1,self.aux_con[self.arg1],self.arg2,self.aux_con[self.arg2])) \n \n #self.yd = self.yd\n self.b = np.array([self.xd,self.yd,self.zd])\n #self.b.loc[:,('y')] = self.b.loc[:,('y')] ", "def _calculate_h(self, end):\n return PATH_COST * (abs(self.x - end.x) + abs(self.y - end.y))", "def update_model(self, sim, joint_index, id_name='right_hand'):\n self.model_timestep = sim.model.opt.timestep\n self.interpolation_steps = np.floor(self.ramp_ratio * self.control_freq / self.model_timestep)\n self.current_position = sim.data.body_xpos[sim.model.body_name2id(id_name)]\n self.current_orientation_mat = sim.data.body_xmat[sim.model.body_name2id(id_name)].reshape([3, 3])\n self.current_lin_velocity = sim.data.body_xvelp[sim.model.body_name2id(id_name)]\n self.current_ang_velocity = sim.data.body_xvelr[sim.model.body_name2id(id_name)]\n\n self.current_joint_position = sim.data.qpos[joint_index]\n self.current_joint_velocity = sim.data.qvel[joint_index]\n\n self.Jx = sim.data.get_body_jacp(id_name).reshape((3, -1))[:, joint_index]\n self.Jr = sim.data.get_body_jacr(id_name).reshape((3, -1))[:, joint_index]\n self.J_full = np.vstack([self.Jx, self.Jr])\n # print(self.current_position)", "def get_es(self):\n\t\tif not self.data_p.has_key(\"delta\"):\n\t\t\tself.getdelta()\n\t\tself.data_p[\"endsimmer\"] = self.data_p[\"tau_nuc\"]/self.data_p[\"tau_cdyn\"]*self.data_p[\"delta\"]**0.5", "def compute_torques(self, caller):\n if caller == 'pose':\n self.pose_call = True\n if caller == 'vel':\n self.vel_call = True\n #If both vels and poses has called compute torques\n if self.pose_call and self.vel_call:\n #Reset checkers\n self.pose_call = False\n self.vel_call = False\n #Vels and poses\n # print \"Heard:\"\n # print \" \".join(str(n) for n in self.joints_vels)\n # print \" \".join(str(n) for n in self.joints_poses)\n #Compute B g and C matrices\n array_vels = np.asarray(self.joints_vels)[np.newaxis].T\n array_poses = np.asarray(self.joints_poses)[np.newaxis].T\n # print(\"array_vels\")\n # print(array_vels[2:4])\n # print(\"array_poses\")\n # print(array_poses[2:4])\n err_vels = array_vels[1:4] - self.target_vel\n err_poses = array_poses[1:4] - self.target_pose\n print(\"velocity error:\")\n print(err_vels)\n print(\"position error:\")\n print(err_poses)\n B = np.matrix([[0.0040055721446399998476906034738931*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.0013481452371199999142570291610355*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.011671172651879999466092491395841*cos(self.joints_poses[2]) + 0.0040085638208*cos(self.joints_poses[3]) - 0.0039281369187899997198368480111341*sin(self.joints_poses[2]) + 0.042812399753418998939427354098797,\\\n 0.0020027860723199999238453017369466*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.00067407261855999995712851458051773*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0058355863259399997330462456979205*cos(self.joints_poses[2]) + 0.0040085638208*cos(self.joints_poses[3]) - 0.0019640684593949998599184240055671*sin(self.joints_poses[2]) + 0.01625959562072499985284632093574,\\\n 0.0020027860723199999238453017369466*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.00067407261855999995712851458051773*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0020042819104*cos(self.joints_poses[3]) + 0.0026794854106086355903769417993171],\\\n [0.0020027860723199999238453017369466*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.00067407261855999995712851458051773*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0058355863259399997330462456979205*cos(self.joints_poses[2]) + 0.0040085638208*cos(self.joints_poses[3]) - 0.0019640684593949998599184240055671*sin(self.joints_poses[2]) + 0.01625959562072499985284632093574,\\\n 0.0040085638208*cos(self.joints_poses[3]) + 0.01618298062072499985284632093574,\\\n 0.0020042819104*cos(self.joints_poses[3]) + 0.0026794854106086355903769417993171],\n [0.0020027860723199999238453017369466*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.00067407261855999995712851458051773*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0020042819104*cos(self.joints_poses[3]) + 0.0026794854106086355903769417993171,\\\n 0.0020042819104*cos(self.joints_poses[3]) + 0.0026794854106086355903769417993171,\\\n 0.0026403112045896820614231443819367]])\n\n C = np.matrix([[- 0.176*self.joints_vels[3]*(0.0038299580599999997564120146620326*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.011379466319999999567302850778105*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0113879654*sin(self.joints_poses[3])) - 1.0*self.joints_vels[2]*(0.00067407261855999995712851458051773*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.0020027860723199999238453017369466*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0019640684593949998599184240055671*cos(self.joints_poses[2]) + 0.0058355863259399997330462456979205*sin(self.joints_poses[2])),\\\n - 0.176*self.joints_vels[3]*(0.0038299580599999997564120146620326*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.011379466319999999567302850778105*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0113879654*sin(self.joints_poses[3])) - 1.0*self.joints_vels[1]*(0.00067407261855999995712851458051773*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.0020027860723199999238453017369466*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0019640684593949998599184240055671*cos(self.joints_poses[2]) + 0.0058355863259399997330462456979205*sin(self.joints_poses[2])) - 1.0*self.joints_vels[2]*(0.00067407261855999995712851458051773*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.0020027860723199999238453017369466*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0019640684593949998599184240055671*cos(self.joints_poses[2]) + 0.0058355863259399997330462456979205*sin(self.joints_poses[2])),\\\n -0.176*(self.joints_vels[1] + self.joints_vels[2] + self.joints_vels[3])*(0.0038299580599999997564120146620326*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.011379466319999999567302850778105*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0113879654*sin(self.joints_poses[3]))],\\\n [self.joints_vels[1]*(0.00067407261855999995712851458051773*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.0020027860723199999238453017369466*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0019640684593949998599184240055671*cos(self.joints_poses[2]) + 0.0058355863259399997330462456979205*sin(self.joints_poses[2])) - 0.0020042819104*self.joints_vels[3]*sin(self.joints_poses[3]),\\\n -0.0020042819104*self.joints_vels[3]*sin(self.joints_poses[3]),\\\n -0.0020042819104*sin(self.joints_poses[3])*(self.joints_vels[1] + self.joints_vels[2] + self.joints_vels[3])],\\\n [0.0020042819104*self.joints_vels[2]*sin(self.joints_poses[3]) + 0.176*self.joints_vels[1]*(0.0038299580599999997564120146620326*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.011379466319999999567302850778105*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0113879654*sin(self.joints_poses[3])),\\\n 0.0020042819104*sin(self.joints_poses[3])*(self.joints_vels[1] + self.joints_vels[2]),0]])\n\n g = np.array([[0.69474494555999997358275432901564*cos(self.joints_poses[1]) + 0.21649055273999998623105089912144*sin(self.joints_poses[1]) + 0.40336448984999999688544018994207*cos(self.joints_poses[1])*cos(self.joints_poses[2]) - 0.40336448984999999688544018994207*sin(self.joints_poses[1])*sin(self.joints_poses[2]) + 0.1384355808*cos(self.joints_poses[1])*cos(self.joints_poses[2])*cos(self.joints_poses[3]) - 0.1384355808*cos(self.joints_poses[1])*sin(self.joints_poses[2])*sin(self.joints_poses[3]) - 0.1384355808*cos(self.joints_poses[2])*sin(self.joints_poses[1])*sin(self.joints_poses[3]) - 0.1384355808*cos(self.joints_poses[3])*sin(self.joints_poses[1])*sin(self.joints_poses[2])],\\\n [0.1384355808*cos(self.joints_poses[1] + self.joints_poses[2] + self.joints_poses[3]) + 0.40336448984999999688544018994207*cos(self.joints_poses[1] + self.joints_poses[2])],\\\n [ 0.1384355808*cos(self.joints_poses[1] + self.joints_poses[2] + self.joints_poses[3])]])\n #Compute control torque\n control_from_errors = self.target_acc -np.dot(self.KD, err_vels) - np.dot(self.KP, err_poses)\n print(\"Derivative contribution: \")\n print(np.dot(self.KD, err_vels))\n print(\"proportional contribution: \")\n print(np.dot(self.KP, err_poses))\n control_torque = np.dot(C, self.target_vel) + g + np.dot(B, control_from_errors)\n print(\"Torques: \")\n print(control_torque)\n #Create ROS message\n self.torques.layout.dim = [self.torques_layout]\n # self.torques.layout.dim.size = 6\n # self.torques.layout.dim.stride = 1\n self.torques.layout.data_offset = 0\n self.torques.data = [0.0, control_torque[0], control_torque[1], control_torque[2], 0.0, 0.0]\n self.torque_pub.publish(self.torques)", "def E(x, y):\n # sum of products of neighboring paris {xi, yi}\n xxm = np.zeros_like(x)\n xxm[:-1, :] = x[1:, :] # down\n xxm[1:, :] += x[:-1, :] # up\n xxm[:, :-1] += x[:, 1:] # right\n xxm[:, 1:] += x[:, :-1] # left\n xx = np.sum(xxm * x)\n xy = np.sum(x * y)\n xsum = np.sum(x)\n return h * xsum - beta * xx - eta * xy", "def getGlobalPosition(self, s, ey):\n\n ### what is ey?? error in y coordinate of vehicle from the track inertial frame?\n\n # wrap s along the track\n while (s > self.TrackLength):\n s = s - self.TrackLength\n\n # Compute the segment in which system is evolving\n PointAndTangent = self.PointAndTangent\n\n index = np.all([[s >= PointAndTangent[:, 3]], [s < PointAndTangent[:, 3] + PointAndTangent[:, 4]]], axis=0)\n ## i = int(np.where(np.squeeze(index))[0])\n i = np.where(np.squeeze(index))[0]\n\n if PointAndTangent[i, 5] == 0.0: # If segment is a straight line\n # Extract the first final and initial point of the segment\n xf = PointAndTangent[i, 0]\n yf = PointAndTangent[i, 1]\n xs = PointAndTangent[i - 1, 0]\n ys = PointAndTangent[i - 1, 1]\n psi = PointAndTangent[i, 2]\n\n # Compute the segment length\n deltaL = PointAndTangent[i, 4]\n reltaL = s - PointAndTangent[i, 3]\n\n # Do the linear combination\n x = (1 - reltaL / deltaL) * xs + reltaL / deltaL * xf + ey * np.cos(psi + np.pi / 2)\n y = (1 - reltaL / deltaL) * ys + reltaL / deltaL * yf + ey * np.sin(psi + np.pi / 2)\n theta = psi\n else:\n r = 1 / PointAndTangent[i, 5] # Extract curvature\n ang = PointAndTangent[i - 1, 2] # Extract angle of the tangent at the initial point (i-1)\n # Compute the center of the arc\n if r >= 0:\n direction = 1\n else:\n direction = -1\n\n CenterX = PointAndTangent[i - 1, 0] \\\n + np.abs(r) * np.cos(ang + direction * np.pi / 2) # x coordinate center of circle\n CenterY = PointAndTangent[i - 1, 1] \\\n + np.abs(r) * np.sin(ang + direction * np.pi / 2) # y coordinate center of circle\n\n spanAng = (s - PointAndTangent[i, 3]) / (np.pi * np.abs(r)) * np.pi\n\n angleNormal = wrap((direction * np.pi / 2 + ang))\n angle = -(np.pi - np.abs(angleNormal)) * (sign(angleNormal))\n\n x = CenterX + (np.abs(r) - direction * ey) * np.cos(\n angle + direction * spanAng) # x coordinate of the last point of the segment\n y = CenterY + (np.abs(r) - direction * ey) * np.sin(\n angle + direction * spanAng) # y coordinate of the last point of the segment\n theta = ang + direction * spanAng\n\n return x, y, theta", "def calculate_coordinates(self):\n # get coordinates for lef side of equation\n self._calculate_for_one_side(self.left_side)\n\n # set process glyph x coordinate\n self.process_glyph_x = self.x_limit + 150\n\n self._calculate_for_one_side(self.right_side, side=\"right_side\")\n\n self.x_limit, self.y_limit = self._generate_real_coordinates_according_to_compartment()\n\n # set process glyph y coordinate\n self.process_glyph_y = self.y_limit / 2\n\n # set final image width, height\n self.x_limit += 10\n self.y_limit += 20", "def reached_final_point():\n return all(point.constraints[b.atom_indexes] == b.final_dist\n for b in self.bonds)", "def vectfit_nuclide(endf_file, njoy_error=5e-4, vf_pieces=None,\n log=False, path_out=None, mp_filename=None, **kwargs):\n\n # ======================================================================\n # PREPARE POINT-WISE XS\n\n # make 0K ACE data using njoy\n if log:\n print(\"Running NJOY to get 0K point-wise data (error={})...\".format(njoy_error))\n\n nuc_ce = IncidentNeutron.from_njoy(endf_file, temperatures=[0.0],\n error=njoy_error, broadr=False, heatr=False, purr=False)\n\n if log:\n print(\"Parsing cross sections within resolved resonance range...\")\n\n # Determine upper energy: the lower of RRR upper bound and first threshold\n endf_res = IncidentNeutron.from_endf(endf_file).resonances\n if hasattr(endf_res, 'resolved') and \\\n hasattr(endf_res.resolved, 'energy_max') and \\\n type(endf_res.resolved) is not ResonanceRange:\n E_max = endf_res.resolved.energy_max\n elif hasattr(endf_res, 'unresolved') and \\\n hasattr(endf_res.unresolved, 'energy_min'):\n E_max = endf_res.unresolved.energy_min\n else:\n E_max = nuc_ce.energy['0K'][-1]\n E_max_idx = np.searchsorted(nuc_ce.energy['0K'], E_max, side='right') - 1\n for mt in nuc_ce.reactions:\n if hasattr(nuc_ce.reactions[mt].xs['0K'], '_threshold_idx'):\n threshold_idx = nuc_ce.reactions[mt].xs['0K']._threshold_idx\n if 0 < threshold_idx < E_max_idx:\n E_max_idx = threshold_idx\n\n # parse energy and cross sections\n energy = nuc_ce.energy['0K'][:E_max_idx + 1]\n E_min, E_max = energy[0], energy[-1]\n n_points = energy.size\n total_xs = nuc_ce[1].xs['0K'](energy)\n elastic_xs = nuc_ce[2].xs['0K'](energy)\n\n try:\n absorption_xs = nuc_ce[27].xs['0K'](energy)\n except KeyError:\n absorption_xs = np.zeros_like(total_xs)\n\n fissionable = False\n try:\n fission_xs = nuc_ce[18].xs['0K'](energy)\n fissionable = True\n except KeyError:\n pass\n\n # make vectors\n if fissionable:\n ce_xs = np.vstack((elastic_xs, absorption_xs, fission_xs))\n mts = [2, 27, 18]\n else:\n ce_xs = np.vstack((elastic_xs, absorption_xs))\n mts = [2, 27]\n\n if log:\n print(\" MTs: {}\".format(mts))\n print(\" Energy range: {:.3e} to {:.3e} eV ({} points)\".format(\n E_min, E_max, n_points))\n\n # ======================================================================\n # PERFORM VECTOR FITTING\n\n if vf_pieces is None:\n # divide into pieces for complex nuclides\n peaks, _ = find_peaks(total_xs)\n n_peaks = peaks.size\n if n_peaks > 200 or n_points > 30000 or n_peaks * n_points > 100*10000:\n vf_pieces = max(5, n_peaks // 50, n_points // 2000)\n else:\n vf_pieces = 1\n piece_width = (sqrt(E_max) - sqrt(E_min)) / vf_pieces\n\n alpha = nuc_ce.atomic_weight_ratio/(K_BOLTZMANN*TEMPERATURE_LIMIT)\n\n poles, residues = [], []\n # VF piece by piece\n for i_piece in range(vf_pieces):\n if log:\n print(\"Vector fitting piece {}/{}...\".format(i_piece + 1, vf_pieces))\n # start E of this piece\n e_bound = (sqrt(E_min) + piece_width*(i_piece-0.5))**2\n if i_piece == 0 or sqrt(alpha*e_bound) < 4.0:\n e_start = E_min\n e_start_idx = 0\n else:\n e_start = max(E_min, (sqrt(alpha*e_bound) - 4.0)**2/alpha)\n e_start_idx = np.searchsorted(energy, e_start, side='right') - 1\n # end E of this piece\n e_bound = (sqrt(E_min) + piece_width*(i_piece + 1))**2\n e_end = min(E_max, (sqrt(alpha*e_bound) + 4.0)**2/alpha)\n e_end_idx = np.searchsorted(energy, e_end, side='left') + 1\n e_idx = range(e_start_idx, min(e_end_idx + 1, n_points))\n\n p, r = _vectfit_xs(energy[e_idx], ce_xs[:, e_idx], mts, log=log,\n path_out=path_out, **kwargs)\n\n poles.append(p)\n residues.append(r)\n\n # collect multipole data into a dictionary\n mp_data = {\"name\": nuc_ce.name,\n \"AWR\": nuc_ce.atomic_weight_ratio,\n \"E_min\": E_min,\n \"E_max\": E_max,\n \"poles\": poles,\n \"residues\": residues}\n\n # dump multipole data to file\n if path_out:\n if not os.path.exists(path_out):\n os.makedirs(path_out)\n if not mp_filename:\n mp_filename = \"{}_mp.pickle\".format(nuc_ce.name)\n mp_filename = os.path.join(path_out, mp_filename)\n with open(mp_filename, 'wb') as f:\n pickle.dump(mp_data, f)\n if log:\n print(\"Dumped multipole data to file: {}\".format(mp_filename))\n\n return mp_data", "def calculate_com(self):\n vr, vphi, gamma = self.emitter.get_velocities()\n u1, u3, gamma2 = self.emitter.get_rotation_velocities()\n math_v, gamma3 = self.emitter.get_momentum_velocity()\n rho = self.emitter.rho\n\n alpha = 5/2 * self.emitter.get_s() / rho**2\n\n E = self._E(self.chi, self.eta, self.iota, gamma, vphi, gamma2, u1, u3, math_v, gamma3)\n L = self._L(self.chi, self.eta, self.iota, gamma, vphi, gamma2, u1, u3, math_v, gamma3)\n Q = self._Q(self.chi, self.eta, L)\n\n return E, L, Q", "def compute_controller(self):\n \n # here we implement an example for a consensus algorithm\n neig = self.get_neighbors()\n messages = self.get_messages()\n pos, rot = self.get_pos_and_orientation()\n \n #send message of positions to all neighbors indicating our position\n for n in neig:\n self.send_message(n, pos)\n \n # check if we received the position of our neighbors and compute desired change in position\n # as a function of the neighbors (message is composed of [neighbors id, position])\n dx = 0.\n dy = 0.\n if messages:\n for m in messages:\n dx += m[1][0] - pos[0]\n dy += m[1][1] - pos[1]\n # integrate\n des_pos_x = pos[0] + self.dt * dx\n des_pos_y = pos[1] + self.dt * dy\n \n #compute velocity change for the wheels\n vel_norm = np.linalg.norm([dx, dy]) #norm of desired velocity\n if vel_norm < 0.01:\n vel_norm = 0.01\n des_theta = np.arctan2(dy/vel_norm, dx/vel_norm)\n right_wheel = np.sin(des_theta-rot)*vel_norm + np.cos(des_theta-rot)*vel_norm\n left_wheel = -np.sin(des_theta-rot)*vel_norm + np.cos(des_theta-rot)*vel_norm\n self.set_wheel_velocity([left_wheel, right_wheel])", "def end(self) -> pos.Pos:\n return self.__end", "def val(self):\n fpath = '/home/shengjie/Documents/Project_SemanticDepth/splits/eigen/test_files.txt'\n val_filenames = readlines(fpath)\n\n btspred = '/media/shengjie/c9c81c9f-511c-41c6-bfe0-2fc19666fb32/Bts_Pred/result_bts_eigen_v2_pytorch_densenet161/pred'\n kittiroot = '/home/shengjie/Documents/Data/Kitti/kitti_raw/kitti_data'\n vlsroot = '/media/shengjie/disk1/visualization/shapeintegrationpred/bts'\n os.makedirs(vlsroot, exist_ok=True)\n\n crph = 352\n crpw = 1216\n\n count = 0\n for entry in val_filenames:\n seq, index, dir = entry.split(' ')\n\n rgb = pil.open(os.path.join(kittiroot, seq, 'image_02', \"data\", \"{}.png\".format(index)))\n\n w, h = rgb.size\n top = int(h - crph)\n left = int((w - crpw) / 2)\n\n calibpath = os.path.join(kittiroot, seq.split('/')[0], 'calib_cam_to_cam.txt')\n cam2cam = read_calib_file(calibpath)\n K = np.eye(4)\n K[0:3, :] = cam2cam['P_rect_02'].reshape(3, 4)\n K[0, 2] = K[0, 2] - left\n K[1, 2] = K[1, 2] - top\n K = torch.from_numpy(K).unsqueeze(0).float()\n\n pred = pil.open(os.path.join(btspred, \"{}_{}.png\".format(seq.split('/')[1], index)))\n pred = np.array(pred).astype(np.float) / 256.0\n\n predtorch = torch.from_numpy(pred).unsqueeze(0).unsqueeze(0).float()\n prednorm = self.sfnormOptimizer.depth2norm(depthMap=predtorch, intrinsic=K)\n\n fig1 = tensor2disp(1 / predtorch, vmax=0.15, ind=0)\n fig2 = tensor2rgb((prednorm + 1) / 2, ind=0)\n\n fig = np.concatenate([np.array(fig1), np.array(fig2)], axis=1)\n pil.fromarray(fig).save(os.path.join(vlsroot, \"{}_{}.png\".format(seq.split('/')[1], str(index).zfill(10))))\n count = count + 1\n print(\"%s finished\" % entry)" ]
[ "0.66042507", "0.5623347", "0.5479054", "0.5479054", "0.53170127", "0.5305009", "0.517255", "0.515755", "0.51083076", "0.5084015", "0.5083615", "0.503888", "0.4970291", "0.49539378", "0.4946133", "0.4942832", "0.49085337", "0.49002567", "0.48989028", "0.48899674", "0.48782575", "0.48716366", "0.48357993", "0.48251703", "0.48156708", "0.48106152", "0.47985956", "0.4788797", "0.47841695", "0.47750777", "0.47716215", "0.4763336", "0.47486994", "0.4739811", "0.47194958", "0.4719348", "0.47154754", "0.4704499", "0.46914205", "0.46838602", "0.46811143", "0.46589234", "0.46560818", "0.4654651", "0.46510512", "0.46456885", "0.46297443", "0.4612739", "0.46060115", "0.4596369", "0.4591239", "0.45782524", "0.4576129", "0.45726055", "0.45619774", "0.45522287", "0.45492244", "0.45385396", "0.45173967", "0.4510981", "0.45070657", "0.44973046", "0.4494444", "0.44936803", "0.44816288", "0.44754326", "0.44734228", "0.44645604", "0.4456136", "0.44551012", "0.44385788", "0.44325915", "0.44272152", "0.44269404", "0.44175673", "0.44139832", "0.4404477", "0.4400266", "0.4394388", "0.43868223", "0.43858957", "0.4378142", "0.43684995", "0.43671328", "0.4366219", "0.43661457", "0.43652344", "0.43621707", "0.43549442", "0.4344106", "0.43371204", "0.43321142", "0.43255135", "0.43217054", "0.4319703", "0.43139926", "0.43139446", "0.43137467", "0.43109876", "0.43095025", "0.42982236" ]
0.0
-1
Compute the jacobian of a finger at configuration q0.
def compute_jacobian(self, finger_id, q0): frame_id = self.tip_link_ids[finger_id] return pinocchio.computeFrameJacobian( self.robot_model, self.data, q0, frame_id, pinocchio.ReferenceFrame.LOCAL_WORLD_ALIGNED, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def jacobian(self, dt):\n raise NotImplementedError", "def jacobian(self, x):\n pass", "def jacobian_ur5(q, delta=0.0001):\n # Alocacion de memoria\n J = np.zeros((3,6))\n # Transformacion homogenea inicial (usando q)\n T = fkine_ur5(q)\n # Iteracion para la derivada de cada columna\n for i in xrange(6):\n # Copiar la configuracion articular inicial\n dq = copy(q);\n # Incrementar la articulacion i-esima usando un delta\n dq[i] = dq[i] + delta \n dT = fkine_ur5(dq)\n \n J[:,i] = (dT[0:3, 3] - T[0:3, 3])/delta\n\n return J", "def jacobian(self, dt):\n return self._F_cache", "def jacobian(x, u):\n yaw = x[2, 0]\n v = u[0, 0]\n jac = np.array([\n [1.0, 0.0, -dt * v * math.sin(yaw), dt * math.cos(yaw)],\n [0.0, 1.0, dt * v * math.cos(yaw), dt * math.sin(yaw)],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]])\n\n return jac", "def jacobian(self, v):\n from scipy.special import erf, erfcx\n def integrand(u_arr):\n \"\"\"Integrand of self-consistency equation\"\"\"\n integrand_all = erfcx(-u_arr)\n #integrand_all = np.zeros(u_arr.shape)\n #u_mask = u_arr < -4.0\n #u = u_arr[u_mask]\n #integrand_all[u_mask] = -1. / np.sqrt(np.pi) * (1.0 / u - 1.0 / (2.0 * u**3) + \n #3.0 / (4.0 * u**5) - \n #15.0 / (8.0 * u**7))\n #integrand_all[~u_mask] = np.exp(u_arr[~u_mask]**2) * (1. + erf(u_arr[~u_mask]))\n return integrand_all\n\n\n mu_v = self.mu(v)\n sd_v = self.sd(v)\n low = (self.V_r - mu_v) / sd_v # reduced resting potential\n up = (self.theta - mu_v) / sd_v # reduced threshold\n f_low = integrand(low)\n f_up = integrand(up)\n jac_mat_1 = self.tau_m * 1e-3 * np.sqrt(np.pi) * self.mat_mu\n jac_mat_2 = self.tau_m * 1e-3 * np.sqrt(np.pi) * self.mat_var / (2. * sd_v**2)\n\n jac_T = np.diag(1. / v**2) - \\\n jac_mat_1.T * (f_up - f_low) + \\\n jac_mat_2.T * (f_up * up - f_low * low)\n return jac_T.T", "def _compute_dq(self, finger_id, xdes, q0):\n Ji = self.compute_jacobian(finger_id, q0)[:3, :]\n frame_id = self.tip_link_ids[finger_id]\n xcurrent = self.data.oMf[frame_id].translation\n Jinv = np.linalg.pinv(Ji)\n return Jinv.dot(xdes - xcurrent)", "def jacobian(self, x1, x2, out=None):\n raise NotImplementedError", "def jacobian(self, c):\n\n raise NotImplementedError", "def jacobian_pose_ur5(q, delta=0.0001):\n J = np.zeros((7,6))\n # Transformacion homogenea inicial (usando q)\n T = fkine_ur5(q)\n Q = rot2quat(T[0:3,0:3])\n\n for i in xrange(6):\n dq = copy(q)\n dq[i] = dq[i] + delta\n dT = fkine_ur5(dq)\n dQ = rot2quat(dT[0:3,0:3])\n Jpos = (dT[0:3,3] - T[0:3,3])/delta\n Jrot = (dQ - Q)/delta\n #Jrot \t= np.squeeze(np.asarray(Jrot))\n J[:,i] = np.concatenate((Jpos, Jrot), axis=0)\n \n return J", "def jacobian_ik(robot, q_init: dict, q_goal: dict, params=None, use_limits=True):\n if params is None:\n tol = 1e-6\n maxiter = 5000\n dt = 1e-3\n method = \"dls_inverse\"\n else:\n tol = params[\"tol\"]\n maxiter = params[\"maxiter\"]\n dt = params[\"dt\"]\n method = params[\"method\"]\n\n n = robot.n\n ub = np.array(variable_dict_to_list(robot.ub))\n lb = np.array(variable_dict_to_list(robot.lb))\n q_bar = (ub + lb) / 2.0\n q = np.array(variable_dict_to_list(q_init))\n\n N_ee = len(robot.end_effectors)\n\n k = 0.01 # DLS jacobian inverse damping factor\n k0 = 20 # joint limit gain\n\n # gains\n K_p = np.eye(3) * 1000 # position gain\n K_o = np.eye(3) * 1000 # orientation gain\n\n K = np.eye(6)\n K[:3, :3] = K_p\n K[3:, 3:] = K_o\n K = np.kron(np.eye(N_ee), K)\n\n count = 0\n\n # Initialize system\n e = error(robot, q, q_goal)\n J, J_star = stacked_jacobians(robot, q)\n ll, llinv = stacked_L(robot, q, q_goal)\n q_dot = np.dot(J_star, np.dot(K, np.dot(llinv, e)))\n # loop unitl error is converged AND all joint angles are within bounds.\n while (\n np.linalg.norm(e) > tol or (any((q > ub) | (q < lb)) and use_limits)\n ) and count < maxiter:\n\n J, J_star = stacked_jacobians(robot, q) # get jacobians\n\n e = error(robot, q, q_goal) # Error to goal\n\n ll, llinv = stacked_L(\n robot, q, q_goal\n ) # Accounting for Euler Error (see eqn. 387 on p. 139)\n\n if use_limits:\n q_dot = (\n -k0 / n * (q - q_bar) / (ub - lb) * q_dot\n ) # Joint angle avoidance using eqn. 3.57 on p. 126\n q_dot = np.dot(J_star, np.dot(K, np.dot(llinv, e))) + np.dot(\n (np.eye(n) - np.dot(J_star, J)), q_dot\n )\n\n q = q + q_dot * dt # update joint angles\n q = (q + np.pi) % (2 * np.pi) - np.pi # wrap angles to -pi to pi\n\n if count % 100 == 0:\n print(\"count: %s\" % count)\n print(\"error: %s\" % e)\n print(\"q_dot: %s\", q_dot)\n U, S, V = np.linalg.svd(J)\n cond = np.min(S) / np.max(S)\n print(\"Jacobian condition: %s\" % cond)\n\n print(\"q: %s\" % q)\n count += 1\n\n if count >= maxiter:\n print(\"Did not find config!\")\n print(\"iterations: %s\" % count)\n print(\"error: %s\" % e)\n ja_violations = (q > ub) | (q < lb)\n print(\"Violations: %s\" % ja_violations)\n return q, count\n else:\n\n print(\"Finished\")\n print(\"iterations: %s\" % count)\n print(\"error: %s\" % e)\n print(\"Joint Angles: %s\" % q)\n ja_violations = (q > ub) | (q < lb)\n print(\"Violations: %s\" % ja_violations)\n return q, count", "def jacobian_func(f):\n jacobian = jacfwd(f)\n return jacobian", "def jacobian(self, dt):\n if dt not in self._F_cache:\n d = self._dimension\n with torch.no_grad():\n F = eye_like(self.sa2, d)\n F[: d // 2, d // 2 :] = dt * eye_like(self.sa2, d // 2)\n self._F_cache[dt] = F\n\n return self._F_cache[dt]", "def jacobian(self,x,p,fun):\n n = self.n\n y = fun(x,p)\n h = 1e-4\n nout = np.size(y)\n dfdx = np.zeros((nout,n))\n for j in range(n):\n dx1 = np.zeros(n)\n dx2 = np.zeros(n)\n dx1[j] = -h\n dx2[j] = h\n dfdx[:,j] = (fun(x+dx2,p)-fun(x+dx1,p))/(2*h)\n return dfdx", "def jacobian(f, x, dx):\n x = np.atleast_1d(x)\n dx = np.atleast_1d(dx)\n nx = len(x)\n ny = 0\n jacobi = None\n e = np.zeros(nx)\n for ix in xrange(nx):\n e *= 0\n e[ix] = 1\n deriv = np.atleast_1d((f(x + e * dx) - f(x - e * dx)) / (2 * dx[ix]))\n if ix == 0:\n ny = len(deriv)\n jacobi = np.empty((ny, nx))\n jacobi[:, ix] = deriv\n return jacobi", "def jacobian(f, x, epsilon = 1e-10):\n f_ = f(x)\n value = np.zeros((len(f_), len(x)))\n \n for i in range(len(x)):\n f_ = partial_derivative(f, x, i, epsilon)\n value[:,i] = f_\n\n return value", "def jacobian(self, t, x, u, w):\n a= u[0]\n theta = x[2]\n v = x[3]\n fx = np.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [-v*np.sin(theta), v*np.cos(theta), 0, 0],\n [np.cos(theta), np.sin(theta), 0, 0]])\n fu = np.array([[0, 0, 0, 1],\n [0, 0, 1, 0]])\n w = w * self.w_scale\n fw = np.array([[np.cos(theta), - np.sin(theta), 0, 0],\n [np.sin(theta), np.cos(theta), 0, 0],\n [0, 0, v, 0],\n [0, 0, 0, v]])\n return [fx, fu, fw]", "def jacobian(f, x):\n\n B, N = x.shape\n x.requires_grad = True\n in_ = torch.zeros(B, 1)\n \n y = f(in_, x)\n jacobian = list()\n \n for i in range(N):\n v = torch.zeros_like(y)\n v[:, i] = 1.\n dy_i_dx = torch.autograd.grad(y,\n x,\n grad_outputs=v,\n retain_graph=True,\n create_graph=True,\n allow_unused=True)[0] # shape [B, N]\n jacobian.append(dy_i_dx)\n\n jacobian = torch.stack(jacobian, dim=2).requires_grad_()\n\n return jacobian", "def jacobian(Lfrac, Lstar_10, qlf):\n D = np.tile(qlf.c_B*Lstar_10**qlf.k_B, [len(Lfrac),1])\n Lfrac_2D = np.tile(Lfrac, [len(qlf.c_B),1]).T\n return np.sum(-D*Lfrac_2D**qlf.k_B,axis=1) / np.sum(D*(qlf.k_B -1)*Lfrac_2D**qlf.k_B,axis=1)\n #return np.sum(D*(1.+qlf.k_B)*Lfrac_2D**qlf.k_B, axis=1)/np.sum(D*Lfrac_2D**qlf.k_B, axis=1)", "def jacobian_i(self, x):\n return np.matrix([-x**3, -x**2, -x, -1])", "def _compute_jacobian(self):\n q_sum = np.cumsum(self._q)\n self._sines = np.sin(q_sum)\n self._cosines = np.cos(q_sum)\n (s_1, s_12, s_123) = self._sines\n (c_1, c_12, c_123) = self._cosines\n self._jacobian = np.array([\n np.cumsum([\n self._jnt_lengths[2] * c_123,\n self._jnt_lengths[1] * c_12,\n self._jnt_lengths[0] * c_1\n ])[::-1], # compute jacobian 1st row\n np.cumsum([\n -self._jnt_lengths[2] * s_123,\n -self._jnt_lengths[1] * s_12,\n -self._jnt_lengths[0] * s_1\n ])[::-1] # jacobian 2nd row\n ])\n self._jacobian_psinv = np.matmul(\n self._jacobian.T,\n np.linalg.inv(np.matmul(self._jacobian, self._jacobian.T))\n )", "def jacobian_c(self, x, out=None, **kwargs):\n return empty_matrix(0, self.nx)", "def get_jacobian_spatial(self, qs=None) -> np.ndarray:\n if qs is None:\n qs = self.get_current_joint_position()\n return self.robot.jacob0(qs)", "def jacobian_linear(self, joint_angles: dict, query_frame: str = \"\") -> np.ndarray:\n\n kinematic_map = self.kinematic_map[\"p0\"] # get map to all nodes from root\n end_effector_nodes = []\n for ee in self.end_effectors: # get p nodes in end-effectors\n if ee[0][0] == \"p\":\n end_effector_nodes += [ee[0]]\n if ee[1][0] == \"p\":\n end_effector_nodes += [ee[1]]\n\n node_names = [\n name for name in self.structure if name[0] == \"p\"\n ] # list of p node ids\n\n # Ts = self.get_full_pose_fast_lambdify(joint_angles) # all frame poses\n Ts = self.get_all_poses(joint_angles) # all frame poses\n Ts[\"p0\"] = np.eye(4)\n\n J = np.zeros([0, len(node_names) - 1])\n for ee in end_effector_nodes: # iterate through end-effector nodes\n ee_path = kinematic_map[ee][:-1] # no last node, only phys. joint locations\n\n T_0_ee = Ts[ee] # ee frame\n p_ee = T_0_ee[0:3, -1] # ee position\n\n Jp_t = np.zeros([3, len(node_names) - 1]) # translation jac for theta\n Jp_al = np.zeros([3, len(node_names) - 1]) # translation jac alpha\n for joint in ee_path: # algorithm fills Jac per column\n T_0_i = Ts[joint]\n z_hat_i = T_0_i[:3, 2]\n x_hat_i = T_0_i[:3, 0]\n p_i = T_0_i[:3, -1]\n j_idx = node_names.index(joint)\n Jp_t[:, j_idx] = np.cross(z_hat_i, p_ee - p_i)\n Jp_al[:, j_idx] = np.cross(x_hat_i, p_ee - p_i)\n\n J_ee = np.vstack([Jp_t, Jp_al])\n J = np.vstack([J, J_ee]) # stack big jac for multiple ee\n\n return J", "def __calc_jacobian_matrix(self):\n\n tf_matrix_first_to_last = self.tf_matrices_list[-1]\n self.jacobian_matrix = [diff(tf_matrix_first_to_last[:3, -1], self.q[i]).reshape(1, 3) for i in range(len(self.q))]\n self.jacobian_matrix = Matrix(self.jacobian_matrix).T # .T returns the transpose of matrix.", "def JacobianFunction(p,x,y,z):\n \n n = len(x)\n \n J = np.array([ np.ones((n)),x,x**2,y,y**2,x*y ])\n \n return J", "def jacobian(self, x):\n x_ = np.atleast_2d(x)\n if self.normalize:\n x_ = (x_ - self.sample_mean) / self.sample_std\n s_ = (self.samples - self.sample_mean) / self.sample_std\n else:\n s_ = self.samples\n\n fx, jf = self.reg_model(x_)\n rx, drdx = self.corr_model(x=x_, s=s_, params=self.corr_model_params, dx=True)\n y_grad = np.einsum('ikj,jm->ik', jf, self.beta) + np.einsum('ijk,jm->ki', drdx.T, self.gamma)\n if self.normalize:\n y_grad = y_grad * self.value_std / self.sample_std\n if x_.shape[1] == 1:\n y_grad = y_grad.flatten()\n return y_grad", "def _calc_J(self, name, x, lambdify=True):\n\n J = None\n J_func = None\n filename = name + '[0,0,0]' if np.allclose(x, 0) else name\n filename += '_J'\n\n # check to see if should try to load functions from file\n J, J_func = self._load_from_file(filename, lambdify)\n\n if J is None and J_func is None:\n # if no saved file was loaded, generate function\n print('Generating Jacobian function for %s' % filename)\n\n Tx = self._calc_Tx(name, x=x, lambdify=False)\n # NOTE: calculating the Jacobian this way doesn't incur any\n # real computational cost (maybe 30ms) and it simplifies adding\n # the orientation information below (as opposed to using\n # sympy's Tx.jacobian method)\n # TODO: rework to use the Jacobian function and automate\n # derivation of the orientation Jacobian component\n J = []\n # calculate derivative of (x,y,z) wrt to each joint\n for ii in range(self.N_JOINTS):\n J.append([])\n J[ii].append(Tx[0].diff(self.q[ii])) # dx/dq[ii]\n J[ii].append(Tx[1].diff(self.q[ii])) # dy/dq[ii]\n J[ii].append(Tx[2].diff(self.q[ii])) # dz/dq[ii]\n\n if 'EE' in name:\n end_point = self.N_JOINTS\n elif 'link' in name:\n end_point = int(name.strip('link'))\n elif 'joint' in name:\n end_point = int(name.strip('joint'))\n # can't have more joint derivatives than there are joints\n end_point = min(end_point, self.N_JOINTS)\n\n # add on the orientation information up to the last joint\n for ii in range(end_point):\n J[ii] = J[ii] + list(self.J_orientation[ii])\n # fill in the rest of the joints orientation info with 0\n for ii in range(end_point, self.N_JOINTS):\n J[ii] = J[ii] + [0, 0, 0]\n J = sp.Matrix(J).T # correct the orientation of J\n\n # save to file\n abr_control.utils.os_utils.makedirs(\n '%s/%s' % (self.config_folder, filename))\n cloudpickle.dump(J, open(\n '%s/%s/%s' % (self.config_folder, filename, filename), 'wb'))\n\n if lambdify is False:\n # if should return expression not function\n return J\n\n if J_func is None:\n J_func = self._generate_and_save_function(\n filename=filename, expression=J,\n parameters=self.q+self.x)\n return J_func", "def jacobian_d(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_d(x, out=out, **kwargs)", "def jacobian_d(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_d(x, out=out, **kwargs)", "def calc_jacobian_numerical(model, x, dim, device, eps=1e-6):\n\n # set to eval mode but remember original state\n in_training: bool = model.training\n model.eval() # otherwise we will get 0 gradients\n\n # clone input to avoid problems\n x = x.clone().requires_grad_(True)\n\n # init jacobian\n J = torch.zeros(dim, x.shape[1])\n\n # iterate over input dims and perturb\n for j in range(dim):\n delta = torch.zeros(dim).to(device)\n delta[j] = eps\n J[:, j] = (model(x + delta) - model(x)).abs().mean(0) / (2 * eps)\n\n # reset to original state\n if in_training is True:\n model.train()\n\n return J", "def jacobian(self, xs):\n rx_list = []\n for nx,x in enumerate(xs):\n \n numpy.testing.assert_array_almost_equal(self.independentVariableShapeList[nx], numpy.shape(x), err_msg = '\\ntaped xs[%d].shape != forward xs[%d]\\n'%(nx,nx))\n rx = numpy.ravel(x)\n rx_list.append(rx)\n self.x = numpy.concatenate(rx_list)\n return wrapped_functions.jacobian(self.tape_tag, self.x)", "def compute_jacobian(self):\n \n d = len(self.theta)\n n,p = self.b.shape\n \n if not self.quiet:\n print \"Running jacobian computation.\"\n print \"D will be a {}x{}x{} array\".format(p,n,d)\n \n if self.x is None:\n raise ValueError('Can not compute Jacobian. self.x is None.')\n \n #print \"n={},n={}\".format(n,d);\n \n D = numpy.zeros((p,n,d))\n \n \n for k in range(d):\n A_k, b_k = self.get_diff_A_b(k)\n \n for i in range(p):\n D[i,:,k] = - self.solver.backsolve(A_k.dot(self.x[:,i]) - b_k[:,i])\n \n return D", "def _forward_log_det_jacobian(self, x):\n d = self._compute_shared(x=x)\n relx = (x - d.x_k) / d.w_k\n relx = relx # tf.where(d.out_of_bounds, 0.5*tf.ones_like(x), relx)\n grad = (\n 2 * tf.math.log(d.s_k) +\n tf.math.log(d.d_kp1 * relx**2 + 2 * d.s_k * relx * (1 - relx) + # newln\n d.d_k * (1 - relx)**2) -\n 2 * tf.math.log((d.d_kp1 + d.d_k - 2 * d.s_k) * relx *\n (1 - relx) + d.s_k))\n return grad # tf.where(d.out_of_bounds, tf.zeros_like(grad), grad)", "def jacobF(x, u):\n v_x =u[0, 0] \n v_y =u[1, 0] \n jF = np.matrix([ \n [1.0, 0.0, 1, 0],\n [0.0, 1.0, 0, 1],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]])\n return jF", "def jacobian(self,var,g=None):\n if (g==None):g=self.g\n jac=np.zeros([self.n+1,self.n])\n for i in range(self.n):\n for j in range(self.n):\n if(i==j): jac[i][j]=2.*(var[i]+1.)-g*np.sum([self.XXZ.Z(i,k) for k in range(self.n) if k!=i])\n else: jac[i][j]=g*self.XXZ.Z(i,j)\n for i in range(self.n):\n jac[self.n][i]=1.\n return jac", "def jacobian(self, p):\n delta = 1.\n props = {'density': self.density}\n xp, zp = self.x, self.z\n verts = self.verts\n x, z = p\n jac = np.transpose([\n (talwani.gz(xp, zp, [Polygon(verts + [[x + delta, z]], props)]) -\n talwani.gz(xp, zp, [Polygon(verts + [[x - delta, z]], props)])\n ) / (2. * delta),\n (talwani.gz(xp, zp, [Polygon(verts + [[x, z + delta]], props)]) -\n talwani.gz(xp, zp, [Polygon(verts + [[x, z - delta]], props)])\n ) / (2. * delta)])\n return jac", "def jacobian(Angle1Final,Angle2Final,Angle3Final):\n\tAngle1,Angle2,Angle3 = sp.symbols('Angle1,Angle2,Angle3',real=True)\n\tx = ShoulderToElbowLength*sp.sin(Angle1) \\\n\t\t+ ForearmLength*sp.sin(Angle1+Angle2) \\\n\t\t+ HandLength*sp.sin(Angle1+Angle2-Angle3)\n\ty = -ShoulderToElbowLength*sp.cos(Angle1) \\\n\t\t- ForearmLength*sp.cos(Angle1+Angle2) \\\n\t\t- HandLength*sp.cos(Angle1+Angle2-Angle3)\n\talpha = Angle1 + Angle2 - Angle3\n\n\tGeometricModel = sp.Matrix([x,y,alpha])\n\tSymbolicJacobianMatrix = GeometricModel.jacobian([Angle1,Angle2,Angle3])\n\tJacobianMatrix = SymbolicJacobianMatrix.subs([(Angle1,Angle1Final), (Angle2,Angle2Final), (Angle3,Angle3Final)]).evalf()\n\treturn(np.array(JacobianMatrix).astype(float))", "def jacobian_g(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_d(x, out=out)", "def J(self, name, q, x=None):\n\n x = self.x_zeros if x is None else x\n funcname = name + '[0,0,0]' if np.allclose(x, 0) else name\n # check for function in dictionary\n if self._J.get(funcname, None) is None:\n self._J[funcname] = self._calc_J(name=name, x=x)\n parameters = tuple(q) + tuple(x)\n return np.array(self._J[funcname](*parameters), dtype='float32')", "def jacobian_c(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_c(x, out=out, **kwargs)", "def jacobian(self,x,y,l,a):\n J = np.zeros([*x.shape,2,2])\n\n J = _jacobian(x,y,l,a,J)\n\n return J", "def dJ(self, name, q, dq, x=None):\n\n x = self.x_zeros if x is None else x\n funcname = name + '[0,0,0]' if np.allclose(x, 0) else name\n # check for function in dictionary\n if self._dJ.get(funcname, None) is None:\n self._dJ[funcname] = self._calc_dJ(name=name, x=x)\n parameters = tuple(q) + tuple(dq) + tuple(x)\n return np.array(self._dJ[funcname](*parameters), dtype='float32')", "def _calculate_jacobian(self,\n x0: np.ndarray,\n step: float = 10 ** (-6)) -> np.ndarray:\n y0 = self._calculate_residual(x0)\n\n jacobian = []\n for i in enumerate(x0):\n x = x0.copy()\n x[i] += step\n y = self._calculate_residual(x)\n derivative = (y - y0) / step\n jacobian.append(derivative)\n jacobian = np.array(jacobian).T\n\n return jacobian", "def jacobianstructure(self):\n pass", "def jacobian(self, p):\n verts = self.p2vertices(p)\n delta = np.array([0, 1])\n jac = np.empty((self.ndata, self.nparams))\n for i in range(self.nparams):\n diff = Polygon([verts[i + 2], verts[i + 1] - delta,\n verts[i], verts[i + 1] + delta], self.props)\n jac[:, i] = talwani.gz(self.x, self.z, [diff])/(2*delta[1])\n return jac", "def classical_jacobian(qnode):\r\n\r\n def classical_preprocessing(*args, **kwargs):\r\n \"\"\"Returns the trainable gate parameters for\r\n a given QNode input\"\"\"\r\n qnode.construct(args, kwargs)\r\n return qml.math.stack(qnode.qtape.get_parameters())\r\n\r\n if qnode.interface == \"autograd\":\r\n return qml.jacobian(classical_preprocessing)\r\n\r\n if qnode.interface == \"torch\":\r\n import torch\r\n\r\n def _jacobian(*args, **kwargs): # pylint: disable=unused-argument\r\n return torch.autograd.functional.jacobian(classical_preprocessing, args)\r\n\r\n return _jacobian\r\n\r\n if qnode.interface == \"jax\":\r\n import jax\r\n\r\n return jax.jacobian(classical_preprocessing)\r\n\r\n if qnode.interface == \"tf\":\r\n import tensorflow as tf\r\n\r\n def _jacobian(*args, **kwargs):\r\n with tf.GradientTape() as tape:\r\n tape.watch(args)\r\n gate_params = classical_preprocessing(*args, **kwargs)\r\n\r\n return tape.jacobian(gate_params, args)\r\n\r\n return _jacobian", "def jacobian(self, points):\n # check if re-computation of dW/dx can be avoided\n if not np.array_equal(self._cached_points, points):\n # recompute dW/dx, i.e. the relative weight of each point wrt\n # the source landmarks\n self.dW_dX = self.transform.weight_points(points)\n # cache points\n self._cached_points = points\n\n # dX/dp is simply the Jacobian of the model\n dX_dp = self.pdm.model.jacobian\n\n # dW_dX: n_points x n_points x n_dims\n # dX_dp: n_points x n_params x n_dims\n dW_dp = np.einsum('ild, lpd -> ipd', self.dW_dX, dX_dp)\n # dW_dp: n_points x n_params x n_dims\n\n return dW_dp", "def f(self, (k,t), (J,q,dq), **params):\n f = 0.*q\n return f", "def jacobian(kernel: Kern, variable_points: ndarray, fixed_points: ndarray) -> ndarray:\n if isinstance(kernel, RBF):\n lengthscale = kernel.lengthscale.values[0]\n k = kernel.K(variable_points, fixed_points)\n\n # The (i, j, k)-th element of this is the k-th component of X_i - D_j.\n differences = variable_points[:, newaxis, :] - fixed_points[newaxis, :, :]\n\n return -k[:, :, newaxis] * differences / (lengthscale ** 2)\n else:\n raise NotImplementedError", "def jacobian(self, theta, force=False):\n \n # Update the internal solution\n self.solution_update(theta, force)\n \n # Run the internal jacobian calculation\n return self.compute_jacobian()", "def jacobian_g(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_g(x, out=out, **kwargs)", "def transform_and_compute_jacobian(self, xj):\n x = xj[:, :self.d].detach()\n log_j = xj[:, -1]\n\n x.requires_grad = True\n y = self.flow_(x)\n\n n_batch = xj.shape[0]\n\n jx = torch.zeros(n_batch, self.d, self.d).to(log_j.device)\n directions = torch.eye(self.d).to(log_j).unsqueeze(0).repeat(n_batch, 1, 1)\n\n for i in range(self.d):\n jx[:, i, :] = torch.autograd.grad(y, x, directions[:, i, :],\n allow_unused=True, create_graph=True, retain_graph=True)[0]\n x.requires_grad = False\n x.grad = None\n\n log_det_j = torch.log(torch.abs(torch.det(jx)))\n return torch.cat([y.detach(), (log_j + log_det_j).unsqueeze(1)], 1)", "def fd_jacobian(self,y):\n res0 = self.residual(y)\n eps = 1e-6\n dofs = y.shape[0]\n jac_approx = np.zeros((dofs,dofs))\n for i in range(dofs):\n y_temp = np.copy(y)\n y_temp[i]+=eps\n\n r2 = self.residual(y_temp)\n dr = (r2-res0)/eps\n for j in range(dofs):\n jac_approx[j,i] = dr[j]\n \n return jac_approx", "def _get_jacobian(self):\n srcs, recs = self.srcs, self.recs\n if not self.sparse:\n jac = numpy.array(\n [ttime2d.straight([cell], '', srcs, recs, velocity=1.)\n for cell in self.mesh]).T\n else:\n shoot = ttime2d.straight\n nonzero = []\n extend = nonzero.extend\n for j, c in enumerate(self.mesh):\n extend((i, j, tt)\n for i, tt in enumerate(shoot([c], '', srcs, recs,\n velocity=1.))\n if tt != 0)\n row, col, val = numpy.array(nonzero).T\n shape = (self.ndata, self.nparams)\n jac = scipy.sparse.csr_matrix((val, (row, col)), shape)\n return jac", "def get_jacobian(self, joint_angles):\n q = kdl.JntArray(self._urdf_chain.getNrOfJoints())\n for i in range(q.rows()):\n q[i] = joint_angles[i]\n jac = kdl.Jacobian(self._urdf_chain.getNrOfJoints())\n fg = self._jac_solver.JntToJac(q, jac)\n if fg < 0:\n raise ValueError('KDL JntToJac error!')\n jac_np = kdl_array_to_numpy(jac)\n return jac_np", "def jacobian1(self,A):\r\n\r\n # Compute second derivatives in spectral space\r\n A_x_x_hat = self.calc_derivative(A, 'x', 'x')\r\n A_y_y_hat = self.calc_derivative(A, 'y', 'y')\r\n A_x_y_hat = self.calc_derivative(A, 'x', 'y')\r\n A_y_x_hat = self.calc_derivative(A, 'y', 'x')\r\n\r\n # Compute realspace representations for multiplication\r\n A_x_x = self.inverse_fft(self.dealias_pad(A_x_x_hat))\r\n A_y_y = self.inverse_fft(self.dealias_pad(A_y_y_hat))\r\n A_x_y = self.inverse_fft(self.dealias_pad(A_x_y_hat))\r\n A_y_x = self.inverse_fft(self.dealias_pad(A_y_x_hat))\r\n\r\n # Multiply in realspace\r\n J_canonical = (A_x_x*A_y_y) - (A_x_y*A_y_x)\r\n\r\n # Return to Fourier space and return spectrum\r\n return self.dealias_unpad(self.forward_fft(J_canonical))", "def numerical_jacobian (fhandle, x, **args):\n \n y = fhandle (x, **args)\n numRows, numCols = (len (y), len (x))\n J = np.zeros ((numRows, numCols))\n\n for col in range (0, numCols):\n xPrime = x.copy ()\n deltaX = max (1e-4*x[col], 1e-6)\n xPrime[col] += deltaX\n yPrime = fhandle (xPrime, **args)\n J[:, col] = (yPrime - y) / deltaX\n\n return J", "def J(cst, x):\n [u0, v0, u1, v1, u2, v2, coeffs] = cst\n [u, v, g1, g2, g3] = x\n df1du = 2*u*g3**2 - 2*g3*u0 + 2*g3*coeffs[3]*(g1*u1-u0) + 2*g3*coeffs[4]*(g2*u2-u0)\n df1dv = -2*v*g3**2 + 2*g3*v0 - 2*g3*coeffs[3]*(g1*v1-v0) - 2*g3*coeffs[4]*(g2*v2-v0)\n df1dg1 = 2*g1*coeffs[0]*(u1**2-v1**2) + 2*(v1*v0-u1*u0)*(coeffs[0]+coeffs[1]+coeffs[3]) + 2*g2*coeffs[1]*(u1*u2-v1*v2) + 2*g3*coeffs[3]*(u1*u-v1*v)\n df1dg2 = 2*g2*coeffs[2]*(u2**2-v2**2) + 2*(v2*v0-u2*u0)*(coeffs[1]+coeffs[2]+coeffs[4]) + 2*g1*coeffs[1]*(u1*u2-v1*v2) + 2*g3*coeffs[4]*(u2*u-v2*v)\n df1dg3 = 2*g3*(u**2-v**2) + 2*(v*v0-u*u0)*(coeffs[3]+coeffs[4]+1) + 2*g1*coeffs[3]*(u1*u-v1*v) + 2*g2*coeffs[4]*(u2*u-v2*v)\n\n df2du = 0\n df2dv = 2*v*g3**2 + 2*g3*(-v0 + coeffs[3]*(g1*v1-v0) + coeffs[4]*(g2*v2-v0))\n df2dg1 = 2*g1*coeffs[0]*(v1**2-1) + 2*(1-v1*v0)*(coeffs[0]+coeffs[1]+coeffs[3]) + 2*g2*coeffs[1]*(v1*v2-1) + 2*g3*coeffs[3]*(v1*v-1)\n df2dg2 = 2*g2*coeffs[2]*(v2**2-1) + 2*(1-v2*v0)*(coeffs[1]+coeffs[2]+coeffs[4]) + 2*g1*coeffs[1]*(v1*v2-1) + 2*g3*coeffs[4]*(v2*v-1)\n df2dg3 = 2*g3*(v**2-1) + 2*(1-v*v0)*(coeffs[3]+coeffs[4]+1) + 2*g1*coeffs[3]*(v1*v-1) + 2*g2*coeffs[4]*(v2*v-1)\n\n df3du = g3*coeffs[3]*(g1*v1-v0) + g3*coeffs[4]*(g2*v2-v0) + g3*(g3*v-v0)\n df3dv = g3*coeffs[3]*(g1*u1-u0) + g3*coeffs[4]*(g2*u2-u0) + g3*(g3*u-u0)\n df3dg1 = 2*g1*coeffs[0]*u1*v1 - (v1*u0+u1*v0)*(coeffs[0]+coeffs[1]+coeffs[3]) + g2*coeffs[1]*(u1*v2+v1*u2) + g3*coeffs[3]*(v1*u+u1*v)\n df3dg2 = 2*g2*coeffs[2]*u2*v2 - (v2*u0+u2*v0)*(coeffs[1]+coeffs[2]+coeffs[4]) + g1*coeffs[1]*(u1*v2+v1*u2) + g3*coeffs[4]*(v2*u+u2*v)\n df3dg3 = 2*g3*u*v - (u*v0+v*u0)*(coeffs[3]+coeffs[4]+1) + g1*coeffs[3]*(v1*u+u1*v) + g2*coeffs[4]*(v2*u+u2*v)\n\n df4du = g3*coeffs[3]*(g1-1) + g3*coeffs[4]*(g2-1) + g3*(g3-1)\n df4dv = 0\n df4dg1 = 2*g1*coeffs[0]*u1 - (u0+u1)*(coeffs[0]+coeffs[1]+coeffs[3]) + g2*coeffs[1]*(u1+u2) + g3*coeffs[3]*(u+u1)\n df4dg2 = 2*g2*coeffs[2]*u2 - (u0+u2)*(coeffs[1]+coeffs[2]+coeffs[4]) + g1*coeffs[1]*(u1+u2) + g3*coeffs[4]*(u+u2)\n df4dg3 = 2*g3*u - (u+u0)*(coeffs[3]+coeffs[4]+1) + g1*coeffs[3]*(u+u1) + g2*coeffs[4]*(u+u2)\n\n df5du = 0\n df5dv = g3*coeffs[3]*(g1-1) + g3*coeffs[4]*(g2-1) + g3*(g3-1)\n df5dg1 = 2*g1*coeffs[0]*v1 - (v1+v0)*(coeffs[0]+coeffs[1]+coeffs[3]) + g2*coeffs[1]*(v2+v1) + g3*coeffs[3]*(v1+v)\n df5dg2 = 2*g2*coeffs[2]*v2 - (v2+v0)*(coeffs[1]+coeffs[2]+coeffs[4]) + g1*coeffs[1]*(v2+v1) + g3*coeffs[4]*(v2+v)\n df5dg3 = 2*g3*v - (v0+v)*(coeffs[3]+coeffs[4]+1) + g1*coeffs[3]*(v1+v) + g2*coeffs[4]*(v2+v)\n\n return np.array([\n [df1du, df1dv, df1dg1, df1dg2, df1dg3],\n [df2du, df2dv, df2dg1, df2dg2, df2dg3],\n [df3du, df3dv, df3dg1, df3dg2, df3dg3],\n [df4du, df4dv, df4dg1, df4dg2, df4dg3],\n [df5du, df5dv, df5dg1, df5dg2, df5dg3],\n ])", "def jacobian(self, points):\n # check if re-computation of dW/dx can be avoided\n if not np.array_equal(self._cached_points, points):\n # recompute dW/dx, i.e. the relative weight of each point wrt\n # the source landmarks\n self.dW_dX = self.transform.weight_points(points)\n # cache points\n self._cached_points = points\n\n model_jacobian = self.pdm.model.jacobian\n points = self.pdm.model.mean.points\n\n # compute dX/dp\n\n # dX/dq is the Jacobian of the global transform evaluated at the\n # mean of the model.\n dX_dq = self._global_transform_jacobian(points)\n # dX_dq: n_points x n_global_params x n_dims\n\n # by application of the chain rule dX_db is the Jacobian of the\n # model transformed by the linear component of the global transform\n dS_db = model_jacobian\n dX_dS = self.pdm.global_transform.jacobian_points(points)\n dX_db = np.einsum('ilj, idj -> idj', dX_dS, dS_db)\n # dS_db: n_points x n_weights x n_dims\n # dX_dS: n_points x n_dims x n_dims\n # dX_db: n_points x n_weights x n_dims\n\n # dX/dp is simply the concatenation of the previous two terms\n dX_dp = np.hstack((dX_dq, dX_db))\n\n # dW_dX: n_points x n_points x n_dims\n # dX_dp: n_points x n_params x n_dims\n dW_dp = np.einsum('ild, lpd -> ipd', self.dW_dX, dX_dp)\n # dW_dp: n_points x n_params x n_dims\n\n return dW_dp", "def jacobian(theta, event, parameters_to_fit):\n for (key, val) in enumerate(parameters_to_fit):\n setattr(event.model.parameters, val, theta[key])\n return event.chi2_gradient(parameters_to_fit)", "def FIM(C1s,C0s,ks,bs,sigma=1):\n \n H=partial(joint_meas_func, C1s,C0s,ks,bs)\n \n # Taking partial derivative of H w.r.t. the zeroth argument, which is q.\n dHdq=jit(jacfwd(H,argnums=0))\n # import pdb\n # pdb.set_trace()\n return lambda q,ps:1/(jnp.power(sigma,2)) * dHdq(q.reshape(ps.shape[1],),ps).T.dot(dHdq(q.reshape(ps.shape[1],),ps))", "def jacobian(self, dmin, src=None, rec=None, u=None, U=None, vp=None, **kwargs):\n # Source term is read-only, so re-use the default\n src = src or self.geometry.src\n # Create a new receiver object to store the result\n rec = rec or self.geometry.rec\n\n # Create the forward wavefields u and U if not provided\n u = u or TimeFunction(name='u', grid=self.model.grid,\n time_order=2, space_order=self.space_order)\n U = U or TimeFunction(name='U', grid=self.model.grid,\n time_order=2, space_order=self.space_order)\n\n # Pick vp from model unless explicitly provided\n vp = vp or self.model.vp\n\n # Execute operator and return wavefield and receiver data\n summary = self.op_born().apply(dm=dmin, u=u, U=U, src=src, rec=rec,\n vp=vp, dt=kwargs.pop('dt', self.dt), **kwargs)\n return rec, u, U, summary", "def _jacobian_sketchAtom(task,Phi,theta,z_to_ignore):\n if task == \"kmeans\":\n grad_z_th = Phi.grad(theta)\n elif task == \"gmm\":\n (mu,sig) = _destackAtom(\"gmm\",theta,Phi.d)\n z_th = fourierSketchOfGaussian(mu,np.diag(sig),Phi.Omega,Phi.xi,Phi.c_norm)\n grad_z_th = np.zeros((2*Phi.d,Phi.m)) + 1j*np.zeros((2*Phi.d,Phi.m))\n grad_z_th[:Phi.d] = 1j*Phi.Omega * z_th # Jacobian w.r.t. mu\n grad_z_th[Phi.d:] = -0.5*(Phi.Omega**2) * z_th # Jacobian w.r.t. sigma\n elif task == \"gmm-nondiag\":\n (mu,Sig) = _destackAtom(\"gmm-nondiag\",theta,Phi.d)\n z_th = fourierSketchOfGaussian(mu,Sig,Phi.Omega,Phi.xi,Phi.c_norm)\n grad_z_th = (1+1j)*np.zeros(((Phi.d+1)*Phi.d,Phi.m))\n grad_z_th[:Phi.d] = 1j*Phi.Omega * z_th # Jacobian w.r.t. mu\n for j in range(Phi.m):\n omega2 = -(np.outer(Phi.Omega[:,j],Phi.Omega[:,j]))\n omega2 = omega2 * (np.ones((Phi.d,Phi.d)) - 0.5*np.eye(Phi.d))\n grad_z_th[Phi.d:,j] = np.reshape(omega2,-1) * z_th[j] # Jacobian w.r.t. sigma\n else:\n raise ValueError\n return grad_z_th * z_to_ignore", "def jacobian_cost(self, joint_angles: dict, ee_goals) -> np.ndarray:\n kinematic_map = self.kinematic_map[\"p0\"] # get map to all nodes from root\n end_effector_nodes = ee_goals.keys()\n J = np.zeros(self.n)\n for (\n ee\n ) in end_effector_nodes: # iterate through end-effector nodes, assumes sorted\n ee_path = kinematic_map[ee][\n 1:\n ] # [:-1] # no last node, only phys. joint locations\n t_ee = self.get_pose(joint_angles, ee).trans\n dg_ee_x = t_ee[0] - ee_goals[ee].trans[0]\n dg_ee_y = t_ee[1] - ee_goals[ee].trans[1]\n for (pdx, joint_p) in enumerate(ee_path): # algorithm fills Jac per column\n p_idx = int(joint_p[1:]) - 1\n for jdx in range(pdx, len(ee_path)):\n node_jdx = ee_path[jdx]\n theta_jdx = sum([joint_angles[key] for key in ee_path[0 : jdx + 1]])\n J[p_idx] += (\n 2.0\n * self.a[node_jdx]\n * (-dg_ee_x * np.sin(theta_jdx) + dg_ee_y * np.cos(theta_jdx))\n )\n\n return J", "def jacobian(self, b):\n \n # Substitute parameters in partial derivatives\n subs = [pd.subs(zip(self._b, b)) for pd in self._pderivs]\n # Evaluate substituted partial derivatives for all x-values\n vals = [sp.lambdify(self._x, sub, \"numpy\")(self.xvals) for sub in subs]\n # Arrange values in column-major order\n return np.column_stack(vals)", "def jacobian_information(self):\n has_jacobian = False\n jacobian_free_solvers = []\n return has_jacobian, jacobian_free_solvers", "def electrical_jacobian(self, state, u_in, omega, *_):\n pass", "def jacobian(self, coordinates, force_coords, dtype=\"float64\"):\n force_east, force_north = n_1d_arrays(force_coords, n=2)\n east, north = n_1d_arrays(coordinates, n=2)\n jac = np.empty((east.size * 2, force_east.size * 2), dtype=dtype)\n if parse_engine(self.engine) == \"numba\":\n jac = jacobian_2d_numba(\n east, north, force_east, force_north, self.mindist, self.poisson, jac\n )\n else:\n jac = jacobian_2d_numpy(\n east, north, force_east, force_north, self.mindist, self.poisson, jac\n )\n return jac", "def F(self, (k,t), (j,x), **params):\n d = len(x)/2\n q,dq = x[:d],x[d:]\n J = j\n M = self.M( (k,t), (J,q,dq), **params )\n f = self.f( (k,t), (J,q,dq), **params )\n c = self.c( (k,t), (J,q,dq), **params )\n Da = self.Da( (k,t), (J,q), **params )\n Db = self.Db( (k,t), (J,q), **params )\n D = np.vstack((Da,Db))\n lambda_ = self.lambda_( (k,t), (J,q,dq), **params )\n ddq = util.dot( la.inv(M), f + util.dot(c,dq) + util.dot(lambda_, D) )\n dx = np.hstack((dq,ddq))\n return dx", "def __update_jacobian(self, x, F):\n old_err = _n.seterr(divide='raise')\n\n try:\n y = F - self.F\n s = x - self.x\n\n zt = None\n if self.update_type == BroydenSolver.UPDATE_ICUM:\n maxi = abs(_n.ravel(y)).argmax()\n zt = _n.transpose(_n.zeros((1,self.n), _n.float_))\n zt[0, maxi] = 1\n elif self.update_type == BroydenSolver.UPDATE_GOOD_BROYDEN:\n # (Good) Broyden update\n zt = _n.dot(_n.transpose(s), self.H)\n elif self.update_type == BroydenSolver.UPDATE_BAD_BROYDEN:\n # (Bad) Broyden update\n zt = _n.transpose(y)\n else:\n raise ValueError(\"Unknown update type %s\" % (self.update_type))\n\n self.H = self.H \\\n + _n.dot(s - _n.dot(self.H, y), zt) / _n.dot(zt, y)\n except FloatingPointError:\n warnings.warn(\"%% Broyden reset: singular\", BroydenWarning)\n self.H = _n.identity(self.n) / self.initial_scale\n\n _n.seterr(**old_err)", "def jacobian_numba(coordinates, points, jac, greens_function):\n east, north, upward = coordinates[:]\n point_east, point_north, point_upward = points[:]\n for i in prange(east.size):\n for j in range(point_east.size):\n jac[i, j] = greens_function(\n east[i],\n north[i],\n upward[i],\n point_east[j],\n point_north[j],\n point_upward[j],\n )", "def jacobi(self, lattice):\n kernel = np.array([[[0.0,0.0,0.0],[0.0,1.0,0.0],[0.0,0.0,0.0]],\n [[0.0,1.0,0.0],[1.0,0.0,1.0],[0.0,1.0,0.0]],\n [[0.0,0.0,0.0],[0.0,1.0,0.0],[0.0,0.0,0.0]]])\n return ((signal.fftconvolve(lattice, kernel, mode='same') + self.J)/ 6.0)", "def calculate_jacobian(robot_position, landmark_pos):\n\n return None", "def jacobval(state, time, press):\n a = len(state)\n jacobian = np.zeros(a**2)\n pyjacob.py_eval_jacobian(time, press, state, jacobian)\n jacobian = np.reshape(jacobian, (a,a))\n return jacobian", "def d_j(self,q):\n dj = np.roll(q,-1,axis=-2) - q\n return dj", "def intern_J(self):\n if self.Fz is None:\n fz_none = True\n else:\n fx, fy, fu = self.Fz\n fz_none = False\n if self.A is None:\n def J(x,y):\n if self.hx is None or self.gradh is None:\n if fz_none:\n fx, _, _ = self.F(x,y)\n xp, _, _ = minus(x, fx)\n xp, _, _ = operator_P(self.proj, xp)\n xp, _, _ = minus(x, xp)\n return LA.norm(xp),None,None\n else:\n if fz_none:\n fx, fy, _ = self.F(x,y)\n xp, yp, _ = minus(x, fx, y, fy)\n xp, yp, _ = operator_P(self.proj, xp, yp)\n xp, yp, _ = minus(x, xp, y, yp)\n total = np.concatenate((xp, yp))\n return LA.norm(xp)+LA.norm(yp),None,None\n else:\n def J(x,y,u):\n if self.hx is None or self.gradh is None:\n if fz_none:\n fx, _,fu = self.F(x,y,u)\n xp, up, _ = minus(x, fx, u, fu)\n xp, _, up = operator_P(self.proj, xp, None, up)\n xp, up, _ = minus(x, xp, u, up)\n total = np.concatenate((xp, up))\n return LA.norm(xp)+LA.norm(up),None,None\n else:\n if fz_none:\n fx, fy, fu = self.F(x,y,u)\n xp, yp, up = minus(x, fx, y, fy, u, fu)\n xp, yp, up = operator_P(self.proj, xp, yp, up)\n xp, yp, up = minus(x, xp, y, yp, u, up)\n total = np.concatenate((xp, yp, up))\n return LA.norm(xp)+LA.norm(yp)+LA.norm(up),None,None\n return J", "def newton_jacobian(f, x0, Jf, eps=1e-10):\n # Initialization\n globvar.ncalls = 0\n x = np.copy(x0)\n n = len(x)\n J = np.zeros((n, n), dtype='float64')\n fx = f(x)\n\n # Begin root search\n while True:\n globvar.ncalls += 1\n\n # Calculate Jacobian\n J = Jf(x)\n\n # Decompose and solve using Given's rotations\n decomp(J)\n Dx = -fx\n solve(J, Dx)\n\n # Begin backtracking linesearch\n lamb = 2.0\n while True: \n lamb /= 2\n y = x + Dx * lamb\n fy = f(y)\n\n fynorm = np.linalg.norm(fy)\n fxnorm = np.linalg.norm(fx)\n\n if (fynorm < (1 - lamb / 2) * fxnorm) or (lamb < (1 / 128.0)):\n break\n\n # Save latest approximation\n x = y\n fx = fy\n\n fxnorm = np.linalg.norm(fx)\n if fxnorm < eps:\n break\n\n return x", "def make_cp_qe_quad_func(x_0):\n def quad_func(xi, eta, nodes):\n x = geo.quadratic_interp(xi, eta, nodes)\n return geo.stresslet(x, x_0)\n return quad_func", "def _calc_dJ(self, name, x, lambdify=True):\n\n dJ = None\n dJ_func = None\n filename = name + '[0,0,0]' if np.allclose(x, 0) else name\n filename += '_dJ'\n # check to see if should try to load functions from file\n dJ, dJ_func = self._load_from_file(filename, lambdify)\n\n if dJ is None and dJ_func is None:\n # if no saved file was loaded, generate function\n print('Generating derivative of Jacobian ',\n 'function for %s' % filename)\n\n J = self._calc_J(name, x=x, lambdify=False)\n dJ = sp.Matrix(np.zeros(J.shape, dtype='float32'))\n # calculate derivative of (x,y,z) wrt to time\n # which each joint is dependent on\n for ii in range(J.shape[0]):\n for jj in range(J.shape[1]):\n for kk in range(self.N_JOINTS):\n dJ[ii, jj] += J[ii, jj].diff(self.q[kk]) * self.dq[kk]\n dJ = sp.Matrix(dJ)\n\n # save expression to file\n abr_control.utils.os_utils.makedirs(\n '%s/%s' % (self.config_folder, filename))\n cloudpickle.dump(dJ, open(\n '%s/%s/%s' % (self.config_folder, filename, filename), 'wb'))\n\n if lambdify is False:\n # if should return expression not function\n return dJ\n\n if dJ_func is None:\n dJ_func = self._generate_and_save_function(\n filename=filename, expression=dJ,\n parameters=self.q+self.dq+self.x)\n return dJ_func", "def jacobian(self, A, B):\r\n\r\n # Compute the derivatives spectrally\r\n A_x_hat = self.calc_derivative(A, 'x')\r\n A_y_hat = self.calc_derivative(A, 'y')\r\n B_x_hat = self.calc_derivative(B, 'x')\r\n B_y_hat = self.calc_derivative(B, 'y')\r\n\r\n # Compute the values in realspace for multiplication\r\n A_x = self.inverse_fft(self.dealias_pad(A_x_hat))\r\n A_y = self.inverse_fft(self.dealias_pad(A_y_hat))\r\n B_y = self.inverse_fft(self.dealias_pad(B_y_hat))\r\n B_x = self.inverse_fft(self.dealias_pad(B_x_hat))\r\n\r\n # Compute the Jacobian\r\n J_canonical = (A_x*B_y) - (B_x*A_y)\r\n\r\n # Return to spectral space the return\r\n return self.dealias_unpad(self.forward_fft(J_canonical))", "def planar_jacobian(robot: RobotPlanar, q: list, ee: str):\n # assume all joints are revolute\n n = robot.n\n\n Jp = np.zeros((3, n))\n Jo = np.zeros((3, n))\n\n if type(robot) == Revolute3dChain:\n path_names = [f\"p{i}\" for i in range(0, robot.n + 1)]\n else:\n path_names = robot.kinematic_map[\"p0\"][ee]\n\n if type(robot) == RobotPlanar:\n edges = list(robot.tree_graph().edges) # for Revolute2dtree\n elif type(robot) == RobotPlanar:\n edges = list(robot.chain_graph().edges) # for Revolute2dchain\n elif type(robot) == Revolute3dChain or type(robot) == Revolute3dTree:\n edges = [\n (node, path_names[p_ind + 1]) for p_ind, node in enumerate(path_names[0:-1])\n ]\n # elif type(robot) == Revolute3dTree:\n # edges = [\n #\n # ]\n\n # Ts = robot.get_full_pose_fast_lambdify(list_to_variable_dict(q))\n Ts = robot.get_all_poses(list_to_variable_dict(q))\n # Ts[\"p0\"] = np.eye(4)\n\n T_0_ee = Ts[ee]\n pe = T_0_ee.as_matrix()[0:3, -1]\n\n for i_path, joint in enumerate(path_names[:-1]):\n T_0_i = Ts[joint].as_matrix()\n z_hat_i = T_0_i[0:3, 2]\n p_i = T_0_i[0:3, -1]\n edge = (joint, path_names[i_path + 1])\n j_idx = edges.index(edge) # get joint column number\n Jp[:, j_idx] = np.cross(z_hat_i, pe - p_i)\n\n # Euler error jacobian as in eqn 3.88\n Jo[:, j_idx] = z_hat_i\n\n J = np.vstack([Jp, Jo])\n return J", "def jacobian(expr, symbols):\n jac = []\n for symbol in symbols:\n # Differentiate to every param\n f = sympy.diff(expr, symbol)\n jac.append(f)\n return jac", "def _compute_theoretical_jacobian(x, x_shape, x_data, dy, dy_shape, dx,\n extra_feed_dict):\n # Complex vectors are treated as vectors of twice as many reals.\n if x.dtype.is_complex:\n x_shape = tuple(x_shape) + (2,)\n dy_factor = 2 if dy.dtype.is_complex else 1\n\n # To compute the jacobian, we treat x and y as one-dimensional vectors.\n x_size = _product(x_shape)\n x_val_size = _product(x_shape[1:]) # This is used for sparse gradients\n dy_size = _product(dy_shape) * dy_factor\n\n # Allocate 2-D Jacobian, with x dimensions smashed into the first\n # dimension and y dimensions smashed into the second.\n jacobian = np.zeros((x_size, dy_size),\n dtype=x.dtype.real_dtype.as_numpy_dtype)\n\n # For each of the entry of dy, we set this to be 1 and\n # everything else to be 0 and compute the backprop -- this will give us one\n # one column of the Jacobian matrix.\n dy_data = np.zeros(dy_shape, dtype=dy.dtype.as_numpy_dtype)\n dy_data_flat = dy_data.ravel().view(dy.dtype.real_dtype.as_numpy_dtype)\n sess = tf.get_default_session()\n for col in range(dy_size):\n dy_data_flat[col] = 1\n if isinstance(dx, tf.IndexedSlices):\n backprop_indices, backprop_values = sess.run(\n [dx.indices, dx.values],\n feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))\n for i, v in zip(backprop_indices, backprop_values):\n r_begin = i * x_val_size\n r_end = r_begin + x_val_size\n jacobian[r_begin:r_end, col] += v.flat\n else:\n assert isinstance(dx, tf.Tensor), \"dx = \" + str(dx)\n backprop = sess.run(\n dx, feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))\n jacobian[:, col] = backprop.ravel().view(jacobian.dtype)\n dy_data_flat[col] = 0\n\n # If the output is empty, run the gradients at least once and make sure\n # they produce zeros.\n if not dy_size:\n backprop = sess.run(\n dx, feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))\n if backprop.shape != x_data.shape:\n raise ValueError(\"Empty gradient has wrong shape: expected %s, got %s\" %\n (x_data.shape, backprop.shape))\n if np.any(backprop):\n raise ValueError(\"Empty tensor with nonzero gradients\")\n\n return jacobian", "def calc_jacobian(\n model: nn.Module,\n latents: torch.Tensor,\n normalize: bool = False,\n eps: float = 1e-8,\n vectorize=False,\n reverse_ad=True,\n norm_range=True,\n norm_diagonal=False,\n) -> torch.Tensor:\n # set to eval mode but remember original state\n in_training: bool = model.training\n model.eval() # otherwise we will get 0 gradients\n with torch.set_grad_enabled(True):\n jacob = []\n input_vars = latents.clone().requires_grad_(True)\n\n output_vars = model(input_vars)\n if not vectorize:\n for i in range(output_vars.shape[1]):\n jacob.append(\n torch.autograd.grad(\n output_vars[:, i : i + 1],\n input_vars,\n create_graph=True,\n grad_outputs=torch.ones(output_vars[:, i : i + 1].shape).to(\n output_vars.device\n ),\n )[0].detach()\n )\n\n jacobian = torch.stack(jacob, 1)\n else:\n from functorch import vmap, jacrev, jacfwd\n\n if reverse_ad is True:\n jac_fn = jacrev\n else:\n jac_fn = jacfwd\n\n sample_jacobian = jac_fn(model.forward, argnums=0)\n jacobian = vmap(\n lambda x: sample_jacobian(torch.unsqueeze(x, 0)), in_dims=0\n )(input_vars).squeeze()\n\n if normalize is True:\n # normalize the Jacobian by making it volume preserving\n # jacobian /= jacobian.det().abs().pow(1 / jacobian.shape[-1]).reshape(-1, 1, 1)\n\n # normalize to make variance to 1\n # norm_factor = (output_vars.std(dim=0) + 1e-8)\n # jacobian /= norm_factor.reshape(1, 1, -1)\n if norm_range is True:\n # normalize range to [0;1]\n dim_range = (\n (output_vars.max(dim=0)[0] - output_vars.min(dim=0)[0])\n .abs()\n .reshape(-1, 1)\n )\n\n jacobian /= dim_range + eps\n elif norm_diagonal is True:\n assert (dim := jacobian.shape[1]) == jacobian.shape[2]\n jacobian /= jacobian[:, (r := torch.arange(dim)), r].unsqueeze(-1) + eps\n\n # set back to original mode\n if in_training is True:\n model.train()\n\n return jacobian", "def _initialize_kindiffeq_matrices(self, kdeqs):\n\n if kdeqs:\n if len(self.q) != len(kdeqs):\n raise ValueError('There must be an equal number of kinematic '\n 'differential equations and coordinates.')\n kdeqs = Matrix(kdeqs)\n\n u = self.u\n qdot = self._qdot\n # Dictionaries setting things to zero\n u_zero = dict((i, 0) for i in u)\n uaux_zero = dict((i, 0) for i in self._uaux)\n qdot_zero = dict((i, 0) for i in qdot)\n\n f_k = msubs(kdeqs, u_zero, qdot_zero)\n k_ku = (msubs(kdeqs, qdot_zero) - f_k).jacobian(u)\n k_kqdot = (msubs(kdeqs, u_zero) - f_k).jacobian(qdot)\n\n f_k = k_kqdot.LUsolve(f_k)\n k_ku = k_kqdot.LUsolve(k_ku)\n k_kqdot = eye(len(qdot))\n\n self._qdot_u_map = solve_linear_system_LU(\n Matrix([k_kqdot.T, -(k_ku * u + f_k).T]).T, qdot)\n\n self._f_k = msubs(f_k, uaux_zero)\n self._k_ku = msubs(k_ku, uaux_zero)\n self._k_kqdot = k_kqdot\n else:\n self._qdot_u_map = None\n self._f_k = Matrix()\n self._k_ku = Matrix()\n self._k_kqdot = Matrix()", "def F(self, (k,t), (j,x), **params):\n return 0.*x", "def _grad_j(q_j, A_j, b_j, b_j_norm, a_1_j, a_2_j, m):\n return (A_j.t() @ q_j / (-m)) + (b_j * (a_1_j / b_j_norm + a_2_j))", "def calculateElementCoefficients(self):\n #\n #get u,grad(u), and grad(u)Xgrad(w) at the quadrature points\n #\n for cj in range(self.nc):\n self.u[cj].getValues(self.q[('v',cj)],\n self.q[('u',cj)])\n if self.q.has_key(('grad(u)',cj)):\n self.u[cj].getGradientValues(self.q[('grad(v)',cj)],\n self.q[('grad(u)',cj)])\n #\n #get functions of (t,x,u) at the quadrature points\n #\n self.coefficients.evaluate(self.timeIntegration.t,self.q)\n log(\"Coefficients on element\",level=10,data=self.q)\n #\n # time integration is handled directly in ELLAM weak approximation, don't have a hook for\n # doing that via a time integration object (could if it were a direct Lagrange Galerkin formulation I believe)\n # however, need to set time integration's m_tmp if use that anywhere\n #if self.timeTerm:\n # self.timeIntegration.calculateElementCoefficients(self.q)\n\n #todo eventually can add nonlinear potential here\n\n #cek and mwf need to go through this section to clean up, some of next two blocks could go to calcQuad\n #\n #todo need non-diagonal dependence?\n for ci in range(self.nc):\n cfemIntegrals.calculateCFLADR(self.elementEffectiveDiametersArray,\n self.q[('dm',ci,ci)],\n self.q[('df',ci,ci)],#could just be velocity\n self.q[('cfl',ci)])", "def ddq_a(self, q = np.zeros(2) , dq = np.zeros(2) , T = np.zeros(2) , uD = 0 , t = 0 ): \n \n R = self.R[ int(uD) ]\n B = np.dot( self.jacobian_actuators( q ).T , R.T ) # Transfor to rotor space \n \n H_all = self.H( q ) + np.dot( B , np.dot( self.Ia , B.T ) )\n \n dJ_a = self.jacobian_actuators_diff( q , dq )\n \n C = self.C( q , dq )\n Ca = np.dot( B , np.dot( self.Ia , np.dot( R , dJ_a ) ) )\n \n D = self.D( q , dq )\n Da = np.dot( B , np.dot( self.Da , B.T ) )\n \n CD_all = C + D + Ca + Da\n \n G = self.G( q )\n \n # External forces\n J_e = self.jacobian_endeffector( q )\n f_e = self.F_ext( q , dq )\n \n # Disturbance force\n f_d = self.F_dist( t )\n \n ddq = np.dot( np.linalg.inv( H_all ) , ( np.dot( B , T ) + np.dot( J_e.T , f_e ) - f_d - np.dot( CD_all , dq ) - G ) )\n \n return ddq", "def ddq_a(self, q = np.zeros(2) , dq = np.zeros(2) , T = np.zeros(2) , uD = 0 , t = 0 ): \n \n R = self.R[ int(uD) ]\n B = np.dot( self.jacobian_actuators( q ).T , R.T ) # Transfor to rotor space \n \n H_all = self.H( q ) + np.dot( B , np.dot( self.Ia , B.T ) )\n \n dJ_a = self.jacobian_actuators_diff( q , dq )\n \n C = self.C( q , dq )\n Ca = np.dot( B , np.dot( self.Ia , np.dot( R , dJ_a ) ) )\n \n D = self.D( q , dq )\n Da = np.dot( B , np.dot( self.Da , B.T ) )\n \n CD_all = C + D + Ca + Da\n \n G = self.G( q )\n \n # External forces\n J_e = self.jacobian_endeffector( q )\n f_e = self.F_ext( q , dq )\n \n # Disturbance force\n f_d = self.F_dist( t )\n \n ddq = np.dot( np.linalg.inv( H_all ) , ( np.dot( B , T ) + np.dot( J_e.T , f_e ) - f_d - np.dot( CD_all , dq ) - G ) )\n \n return ddq", "def _get_jacobian(tris_pts):\n a = np.array(tris_pts[:, 1, :] - tris_pts[:, 0, :])\n b = np.array(tris_pts[:, 2, :] - tris_pts[:, 0, :])\n J = _to_matrix_vectorized([[a[:, 0], a[:, 1]],\n [b[:, 0], b[:, 1]]])\n return J", "def jacobian_solver(self,\n u: np.ndarray,\n lmbda: float,\n rhs: np.ndarray) -> np.ndarray:\n A = self.lap - lmbda * dia_matrix((self.mass @ np.exp(u), 0),\n self.mass.shape)\n du = np.zeros_like(u)\n du = solve(*condense(A, rhs, D=self.D))\n return du", "def jacobian(expression, wrt, consider_constant=None, disconnected_inputs=\"raise\"):\n\n if not isinstance(expression, Variable):\n raise TypeError(\"jacobian expects a Variable as `expression`\")\n\n if expression.ndim > 1:\n raise ValueError(\n \"jacobian expects a 1 dimensional variable as `expression`.\"\n \" If not use flatten to make it a vector\"\n )\n\n using_list = isinstance(wrt, list)\n using_tuple = isinstance(wrt, tuple)\n\n if isinstance(wrt, (list, tuple)):\n wrt = list(wrt)\n else:\n wrt = [wrt]\n\n if expression.ndim == 0:\n # expression is just a scalar, use grad\n return as_list_or_tuple(\n using_list,\n using_tuple,\n grad(\n expression,\n wrt,\n consider_constant=consider_constant,\n disconnected_inputs=disconnected_inputs,\n ),\n )\n\n def inner_function(*args):\n idx = args[0]\n expr = args[1]\n rvals = []\n for inp in args[2:]:\n rval = grad(\n expr[idx],\n inp,\n consider_constant=consider_constant,\n disconnected_inputs=disconnected_inputs,\n )\n rvals.append(rval)\n return rvals\n\n # Computing the gradients does not affect the random seeds on any random\n # generator used n expression (because during computing gradients we are\n # just backtracking over old values. (rp Jan 2012 - if anyone has a\n # counter example please show me)\n jacobs, updates = aesara.scan(\n inner_function,\n sequences=aesara.tensor.arange(expression.shape[0]),\n non_sequences=[expression] + wrt,\n )\n assert not updates, \"Scan has returned a list of updates; this should not happen.\"\n return as_list_or_tuple(using_list, using_tuple, jacobs)", "def __inverse_kinematics(self, guess, target_point):\n\n error = 1.0\n tolerance = 0.05\n\n # Initial Guess - Joint Angles\n thetas = np.matrix(guess) # thetas is list which is contain all axes theta angles.\n target_point = np.matrix(target_point) # X, Y, Z list to matrix for Target Position\n # print(target_point.shape)\n # Jacobian\n self.__calc_jacobian_matrix()\n tf_matrix_first_to_last = self.tf_matrices_list[-1]\n\n error_grad = []\n\n theta_dict = {}\n\n lr = 0.2\n while error > tolerance:\n for i in range(len(np.array(thetas)[0])):\n theta_dict[self.q[i]] = np.array(thetas)[0][i]\n\n theta_dict[self.q[-1]] = self.q[-1]\n\n calculated_target_point = np.matrix(self.get_coords_from_forward_kinematics(self.__forward_kinematics(np.array(thetas)[0])[-1]))\n logger.debug(f'calculated target point is \\n{calculated_target_point}')\n\n diff_wanted_calculated = target_point - calculated_target_point\n\n jacob_mat = np.matrix(self.jacobian_matrix.evalf(subs=theta_dict, chop=True, maxn=4)).astype(np.float64).T\n logger.debug(f'jacobian matrix is\\n{jacob_mat} \\n\\n diff is \\n {diff_wanted_calculated}')\n\n thetas = thetas + lr * (jacob_mat * diff_wanted_calculated.T)\n # thetas = np.array(thetas)[0] # this line's purpose is changing Q from matrix level to array level.\n\n prev_error = error\n\n error = linalg.norm(diff_wanted_calculated)\n\n if error > 10 * tolerance:\n lr = 0.3\n elif error < 10 * tolerance:\n lr = 0.2\n error_grad.append((error - prev_error))\n\n # print(error)\n return np.array(thetas)[0]", "def user_cons_hJ(h, Jac, mbs_data, tsim):\n\n # Example: Compute the expression of h and Jac then assign the values.\n # h[1] = mbs_data.q[1]-mbs_data.q[2]*mbs_data.q[2]\n # Jac[1,1] = 1.\n # Jac[1,2] = -2*mbs_data.q[2].\n # IMPORTANT: NEVER REASSIGN h => h = np.array([0,mbs_data.q[1]-mbs_data.q[2]*mbs_data.q[2],0])\n # NEVER REASSIGN Jac => Jac = np.array([[0,0,0,0],[0,1,-2*mbs_data.q[2],0])\n # Both command will change the values of h, Jac in this function\n # but they will not be modified outside the scope of this function.\n rwt = RwtTrackGeometry(mbs_data, pointer = mbs_data.user_model['addons']['rwt'])\n rwc = RwcMain(pointer = mbs_data.user_model['addons']['rwc'])\n \n rwt.cons_hJ(mbs_data, h, Jac)\n rwc.compute_constraints(mbs_data, h, Jac)\n \n \"\"\"id1 = mbs_data.joint_id[\"R1_caisse1\"]\n id2 = mbs_data.joint_id[\"R1_caisse2\"]\n id3 = mbs_data.joint_id[\"R1_chassis1\"]\n id4 = mbs_data.joint_id[\"R1_chassis2\"]\n\n # define the value of the constraint\n h[1] = (mbs_data.q[id1] + mbs_data.q[id3]) - (mbs_data.q[id2] + mbs_data.q[id4])\n\n # define the value of the jacobian matrix\n Jac[1,id1] = 1\n Jac[1,id2] = -1\n Jac[1,id3] = 1\n Jac[1,id4] = -1\"\"\"\n \n return", "def calculateElementQuadrature(self):\n #\n #get physical locations of quadrature points and jacobian information there\n #assume all components live on the same mesh\n #\n #mwf debug\n #import pdb\n #pdb.set_trace()\n self.u[0].femSpace.elementMaps.getValues(self.elementQuadraturePoints,\n self.q['x'])\n if self.movingDomain:\n if self.tLast_mesh != None:\n self.q['xt'][:]=self.q['x']\n self.q['xt']-=self.q['x_last']\n alpha = 1.0/(self.t_mesh - self.tLast_mesh)\n self.q['xt']*=alpha\n else:\n self.q['xt'][:]=0.0\n self.q['x_last'][:]=self.q['x']\n self.u[0].femSpace.elementMaps.getJacobianValues(self.elementQuadraturePoints,\n self.q['J'],\n self.q['inverse(J)'],\n self.q['det(J)'])\n self.q['abs(det(J))']=numpy.absolute(self.q['det(J)'])\n #\n # get physical space integration weights\n #\n self.q['dV'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n cfemIntegrals.calculateIntegrationWeights(self.q['abs(det(J))'],\n self.elementQuadratureWeights[('u',0)],\n self.q['dV'])\n for ci in range(self.nc): self.q[('dV_u',ci)] = self.q['dV']\n #\n #get shape information at the quadrature points\n #\n self.testSpace[0].getBasisValues(self.elementQuadraturePoints,\n self.q[('w',0)])\n cfemIntegrals.calculateWeightedShape(self.elementQuadratureWeights[('u',0)],\n self.q['abs(det(J))'],\n self.q[('w',0)],\n self.q[('w*dV',0)])\n cfemIntegrals.calculateWeightedShape(self.elementQuadratureWeights[('m',0)],\n self.q['abs(det(J))'],\n self.q[('w',0)],\n self.q[('w*dV_m',0)])\n self.testSpace[0].getBasisGradientValues(self.elementQuadraturePoints,\n self.q['inverse(J)'],\n self.q[('grad(w)',0)])\n cfemIntegrals.calculateWeightedShapeGradients(self.elementQuadratureWeights[('u',0)],\n self.q['abs(det(J))'],\n self.q[('grad(w)',0)],\n self.q[('grad(w)*dV',0)])\n\n #\n self.ellamDiscretization.updateElementQuadrature(self.q)\n #\n self.coefficients.initializeElementQuadrature(self.timeIntegration.t,self.q)", "def calc_jacobian(*args, **kwargs):\n try:\n tag = kwargs[\"tag\"]\n except:\n tag = 0\n\n try:\n sparse = kwargs[\"sparse\"]\n except:\n sparse = True\n\n if sparse:\n try:\n shape = kwargs[\"shape\"]\n except:\n raise ValueError(\"'shape' should be passed to calculate sparse jacobian!\")\n\n \n options = np.array([0,0,0,0],dtype=int)\n result = ad.colpack.sparse_jac_no_repeat(tag, *args, options=options)\n nnz = result[0]\n ridx = result[1]\n cidx = result[2]\n values = result[3]\n assert nnz > 0\n jac = sp.csr_matrix((values, (ridx, cidx)), shape=shape)\n jac = jac.toarray()\n else:\n jac = ad.jacobian(tag, *args)\n return jac", "def AB_zero_Jy(self):\n c = 1e-8 * Constants.c.to('m/s').value\n f = 1e5 / c * self.lpivot.to('AA').value ** 2 * self.AB_zero_flux.value\n return f * Unit('Jy')", "def jacobianTransformedParameters(self, x):\n temp = self.invLogit(x)\n return (self.upper - self.lower) * temp * (1.0 - temp)" ]
[ "0.7174676", "0.7125965", "0.71125233", "0.702315", "0.6786565", "0.6759826", "0.6759339", "0.66214025", "0.6568994", "0.653416", "0.6530263", "0.6495432", "0.64650875", "0.64634913", "0.64042807", "0.6402122", "0.6401185", "0.6370891", "0.6329893", "0.63101774", "0.63079286", "0.63009334", "0.62985003", "0.62862515", "0.6283873", "0.6275171", "0.6254552", "0.6234334", "0.6226862", "0.6226862", "0.6195395", "0.6185433", "0.61789954", "0.6146487", "0.6125557", "0.61021876", "0.6088697", "0.6077178", "0.6075934", "0.6074088", "0.6068991", "0.60672027", "0.6054552", "0.60276246", "0.6002893", "0.5960055", "0.5957439", "0.59270847", "0.59232354", "0.5920915", "0.59181607", "0.59155315", "0.5901883", "0.5849441", "0.5849073", "0.5846575", "0.5795442", "0.57842314", "0.57525754", "0.57479095", "0.5745773", "0.5719537", "0.57059824", "0.5705483", "0.5691522", "0.5689119", "0.5675022", "0.5657975", "0.5642702", "0.5620168", "0.56111276", "0.56076366", "0.5604953", "0.5595783", "0.55950534", "0.5581109", "0.5574347", "0.5572245", "0.55701566", "0.55652493", "0.55485386", "0.55142987", "0.55008847", "0.5468188", "0.54629594", "0.5451866", "0.5448795", "0.5430216", "0.5425723", "0.5416853", "0.5416853", "0.541124", "0.5407656", "0.54052424", "0.5401478", "0.540089", "0.53836876", "0.53666055", "0.5365047", "0.5354201" ]
0.7850823
0
Performs a line search, reducing the step size until the end effector moves closer to the desired position.
def _line_search(self, finger_id, xdes, q0, dq, max_iter=10, dt=1.0): xcurrent = self.forward_kinematics(q0)[finger_id] original_error = np.linalg.norm(xdes - xcurrent) error = np.inf q = q0 iter = 0 while error >= original_error: q = pinocchio.integrate(self.robot_model, q0, dt * dq) q = self._project_onto_constraints(q) xcurrent = self.forward_kinematics(q)[finger_id] error = np.linalg.norm(xdes - xcurrent) dt /= 2 iter += 1 if iter == max_iter: # Likely at a local minimum return q0, original_error, 0 return q, error, 2 * dt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LineSearch(Pos, Dir, dx, EFracTol, M, L, Cut,\n Accel = 1.5, MaxInc = 10., MaxIter = 10000):\n #start the iteration counter\n Iter = 0\n\n #find the normalized direction\n NormDir = Dir / np.sqrt(np.sum(Dir * Dir))\n\n #take the first two steps and compute energies\n Dists = [0., dx]\n PEs = [mdlib.calcenergy(Pos + NormDir * x, M, L, Cut) for x in Dists]\n\n #if the second point is not downhill in energy, back\n #off and take a shorter step until we find one\n while PEs[1] > PEs[0]:\n Iter += 1\n dx = dx * 0.5\n Dists[1] = dx\n PEs[1] = mdlib.calcenergy(Pos + NormDir * dx, M, L, Cut)\n\n #find a third point\n Dists = Dists + [2. * dx]\n PEs = PEs + [mdlib.calcenergy(Pos + NormDir * 2. * dx, M, L, Cut)]\n\n #keep stepping forward until the third point is higher\n #in energy; then we have bracketed a minimum\n while PEs[2] < PEs[1]:\n Iter += 1\n\n #find a fourth point and evaluate energy\n Dists = Dists + [Dists[-1] + dx]\n PEs = PEs + [mdlib.calcenergy(Pos + NormDir * Dists[-1], M, L, Cut)]\n\n #check if we increased too much in energy; if so, back off\n if (PEs[3] - PEs[0]) > MaxInc * (PEs[0] - PEs[2]):\n PEs = PEs[:3]\n Dists = Dists[:3]\n dx = dx * 0.5\n else:\n #shift all of the points over\n PEs = PEs[-3:]\n Dists = Dists[-3:]\n dx = dx * Accel\n\n #we've bracketed a minimum; now we want to find it to high\n #accuracy\n OldPE3 = 1.e300\n while True:\n Iter += 1\n if Iter > MaxIter:\n print(\"Warning: maximum number of iterations reached in line search.\")\n break\n\n #store distances for ease of code-reading\n d0, d1, d2 = Dists\n PE0, PE1, PE2 = PEs\n\n #use a parobolic approximation to estimate the location\n #of the minimum\n d10 = d0 - d1\n d12 = d2 - d1\n Num = d12*d12*(PE0-PE1) - d10*d10*(PE2-PE1)\n Dem = d12*(PE0-PE1) - d10*(PE2-PE1)\n if Dem == 0:\n #parabolic extrapolation won't work; set new dist = 0\n d3 = 0\n else:\n #location of parabolic minimum\n d3 = d1 + 0.5 * Num / Dem\n\n #compute the new potential energy\n PE3 = mdlib.calcenergy(Pos + NormDir * d3, M, L, Cut)\n\n #sometimes the parabolic approximation can fail;\n #check if d3 is out of range < d0 or > d2 or the new energy is higher\n if d3 < d0 or d3 > d2 or PE3 > PE0 or PE3 > PE1 or PE3 > PE2:\n #instead, just compute the new distance by bisecting two\n #of the existing points along the line search\n if abs(d2 - d1) > abs(d0 - d1):\n d3 = 0.5 * (d2 + d1)\n else:\n d3 = 0.5 * (d0 + d1)\n PE3 = mdlib.calcenergy(Pos + NormDir * d3, M, L, Cut)\n\n #decide which three points to keep; we want to keep\n #the three that are closest to the minimum\n if d3 < d1:\n if PE3 < PE1:\n #get rid of point 2\n Dists, PEs = [d0, d3, d1], [PE0, PE3, PE1]\n else:\n #get rid of point 0\n Dists, PEs = [d3, d1, d2], [PE3, PE1, PE2]\n else:\n if PE3 < PE1:\n #get rid of point 0\n Dists, PEs = [d1, d3, d2], [PE1, PE3, PE2]\n else:\n #get rid of point 2\n Dists, PEs = [d0, d1, d3], [PE0, PE1, PE3]\n\n #check how much we've changed\n if abs(OldPE3 - PE3) < EFracTol * abs(PE3):\n #the fractional change is less than the tolerance,\n #so we are done and can exit the loop\n break\n OldPE3 = PE3\n\n #return the position array at the minimum (point 1)\n PosMin = Pos + NormDir * Dists[1]\n PEMin = PEs[1]\n\n #if using visualization, update the display\n if UseVisual:\n if atomvis.Initialized:\n #update the positions\n atomvis.Update(PosMin)\n else:\n #initialize the visualization window\n atomvis.Init(PosMin)\n\n return PEMin, PosMin", "def line_search(update, x0, g0, g, nstep=0, on=True):\n tmp_s = [0]\n tmp_g0 = [g0]\n tmp_phi = [torch.norm(g0) ** 2]\n s_norm = torch.norm(x0) / torch.norm(update)\n\n def phi(s, store=True):\n if s == tmp_s[0]:\n return tmp_phi[0] # If the step size is so small... just return something\n x_est = x0 + s * update\n g0_new = g(x_est)\n phi_new = _safe_norm(g0_new) ** 2\n if store:\n tmp_s[0] = s\n tmp_g0[0] = g0_new\n tmp_phi[0] = phi_new\n return phi_new\n\n if on:\n s, phi1, ite = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0], amin=1e-2)\n if (not on) or s is None:\n s = 1.0\n ite = 0\n\n x_est = x0 + s * update\n if s == tmp_s[0]:\n g0_new = tmp_g0[0]\n else:\n g0_new = g(x_est)\n return x_est, g0_new, x_est - x0, g0_new - g0, ite", "def _LineSearch(self, disp_vector):\n self.GetDispDeriv(self.disp_mag, disp_vector)\n disp_mag = self.disp_mag\n disp_sign = 1.0 if self.disp_deriv <= 0.0 else -1.0\n disp_mag *= disp_sign\n disp_sign_same = True\n ref_energy = self.mol.e_total\n\n # binary search to find upper bound on displacement magnitude\n self.n_subiter = 0\n while (disp_sign_same):\n self.n_subiter += 1\n self._DisplaceCoords(+1.0 * disp_mag, disp_vector)\n self.GetDispDeriv(disp_mag, disp_vector)\n self._DisplaceCoords(-1.0 * disp_mag, disp_vector)\n if self.mol.e_total > ref_energy:\n disp_mag *= 0.5\n break\n old_disp_sign = disp_sign\n disp_sign = 1.0 if self.disp_deriv <= 0.0 else -1.0\n disp_sign_same = bool(disp_sign == old_disp_sign)\n disp_mag *= 2.0\n self.GetDispDeriv(disp_mag, disp_vector)\n self.AdjustDispMag(self.n_subiter)\n\n # binary search to find value of displacement within bounds\n numer = 1.0\n denom = 2.0\n for i in range(const.NUMLINESEARCHSTEPS):\n self.n_subiter += 1\n test_disp = disp_mag * numer / denom\n self._DisplaceCoords(+1.0 * test_disp, disp_vector)\n self.GetDispDeriv(disp_mag / (2**(-i)), disp_vector)\n self._DisplaceCoords(-1.0 * test_disp, disp_vector)\n direc = 1.0 if self.disp_deriv < 0.0 else -1.0\n numer = 2*numer + direc\n denom = 2*denom\n disp_mag *= numer / denom\n\n # final line search energy minimized molecular coordinates\n self._DisplaceCoords(+1.0 * disp_mag, disp_vector)", "def _linesearch(self):\n pass", "def _vanilla_line_search(self, gamma, maximum_line_search):\n # alpha: Step size\n alpha = 1.\n trajectory_current = np.zeros((self.T, self.n+self.m, 1))\n for _ in range(maximum_line_search): # Line Search if the z value is greater than zero\n trajectory_current = self.dynamic_model.update_traj(self.trajectory, self.K_matrix, self.k_vector, alpha)\n obj_fun_value_current = self.obj_fun.eval_obj_fun(trajectory_current)\n obj_fun_value_delta = obj_fun_value_current-self.obj_fun_value_last\n alpha = alpha * gamma\n if obj_fun_value_delta<0:\n return trajectory_current, obj_fun_value_current\n return self.trajectory, self.obj_fun_value_last", "def linesearch(self):\n alp = self.alims[0]\n da = self.da\n Na = int((self.alims[1]-self.alims[0])/da)+1\n Jcv_prev = np.Inf\n Ncv = self.Nls\n xpmin = np.hstack((self.xlims[0,:],self.plims[0,:]))\n xpmax = np.hstack((self.xlims[1,:],self.plims[1,:]))\n Nxp = self.n+self.n_p\n xps = np.random.uniform(xpmin,xpmax,size=(Ncv,Nxp))\n xs,ps,_ = np.hsplit(xps,np.array([self.n,Nxp]))\n print(\"========================================================\")\n print(\"============= LINE SEARCH OF OPTIMAL ALPHA =============\")\n print(\"========================================================\")\n for k in range(Na):\n self.cvstem0(xs,ps,alp)\n print(\"Optimal value: Jcv =\",\"{:.2f}\".format(self.Jcv),\\\n \"( alpha =\",\"{:.3f}\".format(alp),\")\")\n if Jcv_prev <= self.Jcv:\n alp = alp-da\n break\n alp += da\n Jcv_prev = self.Jcv\n self.alp_opt = alp\n print(\"Optimal contraction rate: alpha =\",\"{:.3f}\".format(alp))\n print(\"========================================================\")\n print(\"=========== LINE SEARCH OF OPTIMAL ALPHA END ===========\")\n print(\"========================================================\\n\\n\")\n pass", "def backtracking_line_search(x0, dx, obj, g, stepsize = 1.0, min_stepsize=1e-8,\n alpha=0.2, beta=0.7):\n x = x0\n\n # criterion: stop when f(x + stepsize * dx) < f(x) + \\alpha * stepsize * f'(x)^T dx\n f_term = obj(x)\n grad_term = alpha * np.dot(g.ravel(), dx.ravel())\n\n # decrease stepsize until criterion is met\n # or stop at minimum step size\n while stepsize > min_stepsize:\n fx = obj(x+ stepsize*dx)\n if np.isnan(fx) or fx > f_term + grad_term*stepsize:\n stepsize *= beta\n else:\n break\n\n return stepsize", "def find_line(view, start=0, end=-1, target=0):\r\n if target < 0 or target > view.size():\r\n return -1\r\n\r\n if end == -1: end = view.size()\r\n\r\n lo, hi = start, end\r\n while lo <= hi:\r\n middle = lo + (hi - lo) / 2\r\n if get_line_nr(view, middle) < target:\r\n lo = getEOL(view, middle) + 1\r\n elif get_line_nr(view, middle) > target:\r\n hi = getBOL(view, middle) - 1\r\n else:\r\n return view.full_line(middle)\r\n return -1", "def line_search_step(state, value_and_gradients_function, search_direction,\n grad_tolerance, f_relative_tolerance, x_tolerance):\n dtype = state.position.dtype.base_dtype\n line_search_value_grad_func = _restrict_along_direction(\n value_and_gradients_function, state.position, search_direction)\n derivative_at_start_pt = tf.reduce_sum(input_tensor=state.objective_gradient *\n search_direction)\n ls_result = linesearch.hager_zhang(\n line_search_value_grad_func,\n initial_step_size=tf.convert_to_tensor(value=1, dtype=dtype),\n objective_at_zero=state.objective_value,\n grad_objective_at_zero=derivative_at_start_pt)\n\n state_after_ls = update_fields(\n state,\n failed=~ls_result.converged, # Fail if line search failed to converge.\n num_iterations=state.num_iterations + 1,\n num_objective_evaluations=(\n state.num_objective_evaluations + ls_result.func_evals))\n\n def _do_update_position():\n return _update_position(\n state_after_ls,\n search_direction * ls_result.left_pt,\n ls_result.full_result.f,\n ls_result.full_result.full_gradient, # Extract gradient\n grad_tolerance, f_relative_tolerance, x_tolerance)\n\n return prefer_static.cond(\n state_after_ls.failed,\n true_fn=lambda: state_after_ls,\n false_fn=_do_update_position)", "def _feasibility_line_search(self, gamma, maximum_line_search):\n # alpha: Step size\n alpha = 1.\n trajectory_current = np.zeros((self.T, self.n+self.m, 1))\n for _ in range(maximum_line_search): # Line Search if the z value is greater than zero\n trajectory_current = self.dynamic_model.update_traj(self.trajectory, self.K_matrix, self.k_vector, alpha)\n obj_fun_value_current = self.obj_fun.eval_obj_fun(trajectory_current)\n obj_fun_value_delta = obj_fun_value_current-self.obj_fun_value_last\n alpha = alpha * gamma\n if obj_fun_value_delta<0 and (not np.isnan(obj_fun_value_delta)):\n return trajectory_current, obj_fun_value_current\n return self.trajectory, self.obj_fun_value_last", "def cgd_linesearch(x, error0, direction, error_fcn, h):\n\n # FIXME: Add tests\n\n x = np.asarray(x)\n direction = np.asarray(direction)\n h = np.asarray(h)\n\n direction_n = direction / np.linalg.norm(direction, ord=2)\n error_list = [error0]\n stepsize = h\n maxSteps = 6\n factor = np.zeros(1)\n\n for iStep in range(1, maxSteps):\n\n factor = np.concatenate([factor, [2**(iStep-1)]])\n xc = x.copy() + direction_n * stepsize * factor[iStep]\n error, xc = error_fcn(xc) # xc may be changed due to limits\n error_list.append(error)\n\n if error_list[-1] >= error_list[-2]: # end of decline\n if iStep == 1: # no success\n step = 0\n error1 = error0\n\n else: # parabolic\n p = np.polyfit(factor, error_list, 2)\n fx = np.arange(factor[0], factor[-1] + .1, .1)\n fy = np.polyval(p, fx)\n idx = np.argmin(fy)\n fxm = fx[idx]\n xcm = x.copy() + direction_n * stepsize * fxm\n error1, xcm = error_fcn(xcm) # xc may be changed due to limits\n\n if error1 < error_list[iStep - 1]:\n xc = xcm.copy()\n step = fxm\n\n else: # finding Minimum did not work\n xc = x.copy() + direction_n * stepsize * factor[iStep-1] # before last point\n error1, xc = error_fcn(xc) # recalculate error in order to check for limits again\n step = factor[iStep-1]\n\n return xc, error1, step\n\n step = factor[iStep]\n error1 = error_list[iStep]\n\n return xc, error1, step", "def _line_search(self, pi_gain_lo, feed_dict):\n step_size = 1.0\n\n for _ in range(self.line_search_steps):\n if self._terminate:\n return\n\n # Update the policy\n self.sess.run(self.model.update_pi, feed_dict={self.model.pi_opt_conf.lr_ph: step_size})\n\n # Compute the policy surrogate gain and the KL divergence\n kl, pi_gain = self.sess.run([self.model.mean_kl, self.model.pi_gain], feed_dict=feed_dict)\n\n if not np.isfinite(kl) or not np.isfinite(pi_gain):\n logger.warning(\"Non-finite loss values: kl=%f, pi_gain=%f. Shrinking step\", kl, pi_gain)\n elif kl > self.max_kl * 1.5:\n logger.debug(\"Violated KL constraint. Shrinking step\")\n elif pi_gain < pi_gain_lo:\n logger.debug(\"Surrogate objective did not improve. Shrinking step\")\n else:\n logger.debug(\"Stepsize OK\")\n break\n\n # Shrink step size\n step_size = step_size * 0.5\n\n else:\n logger.info(\"Line search could not compute a good step\")\n # Reset pi to its initial state\n self.sess.run(self.model.reset_pi)", "def armijo_line_search(self, n_iter:int, coords:torch.tensor, \n st:OptState, energy_helper:EnergyAndGradHelperInterface) -> int:\n \n eta = 4 # the higher eta the closer to the original move will be the line search\n c1 = 1e-4\n max_ls = 5\n \n # directional derivative\n gtd = torch.sum(st.flat_grad * st.d, dim=1) # g * d\n\n line_search_done = torch.zeros(st.n_confs, dtype=torch.int16, device=self.device)\n st.t = self._add_grad(coords, st.t, st.d)\n \n F_new, std = energy_helper.compute_energy()\n \n ls_step = 0\n ls_func_evals = 0\n while ls_step < max_ls:\n ls_func_evals += 1\n bad_steps = F_new > st.loss + c1*st.t*gtd # Armijo condition\n #bad_steps &= (st.status_actives & Status.ALL_CONVERGED) < Status.ALL_CONVERGED\n line_search_done += bad_steps.to(dtype=torch.int16)\n\n if log.isEnabledFor(logging.DEBUG):\n log.debug(f'{n_iter}.{ls_step} loss {st.loss[0:5].detach().cpu().numpy()} F_new{F_new[0:5].detach().cpu().numpy()}')\n# log.debug(f' x12 {coords[0:5,0:2,0].detach().cpu().numpy()}') \n \n if bad_steps.sum() == 0:\n break\n \n t_new = st.t.clone()\n if ls_step == 0:\n st.t[bad_steps] = st.t[bad_steps]/eta\n\n if log.isEnabledFor(logging.DEBUG):\n log.debug(f' bad {bad_steps} t-t_new {st.t-t_new}')\n \n self._add_grad_wFilter(coords, st.t-t_new, st.d, bad_steps)\n \n # filtering did not save any time\n F_new, std = energy_helper.compute_energy() \n #F_new[bad_steps] = energy_helper.compute_energy_with_filter(bad_steps) \n \n ls_func_evals += 1\n ls_step += 1\n \n ##############################################\n # adjust learning rate so that we do have occasional but not\n # frequent line searches\n \n st.iters_with_no_linsearch[line_search_done == 0] += 1\n st.iters_with_no_linsearch[line_search_done > 0] -= 1\n st.iters_with_no_linsearch[st.iters_with_no_linsearch < 0] = 0\n st.lr[st.iters_with_no_linsearch >= 5] *= 1.2\n st.iters_with_no_linsearch[st.iters_with_no_linsearch >= 5] -= 2\n\n st.lr[line_search_done > 0] = st.t[line_search_done > 0]\n st.lr[st.lr < 0.000001] = 0.000001\n \n# st.iters_with_high_linsearch += line_search_done\n# st.iters_with_high_linsearch[st.iters_with_high_linsearch > 6] = 6\n# # st.iters_with_high_linsearch[line_search_done == 0] -= 1\n# # st.iters_with_high_linsearch[st.iters_with_high_linsearch < 0] = 0\n# st.lr[st.iters_with_high_linsearch >= 4] /= 1.2\n# st.iters_with_high_linsearch[st.iters_with_high_linsearch >= 4] -= 1\n if log.isEnabledFor(logging.DEBUG):\n log.debug(f'lr: {st.lr}')\n \n st.loss = F_new\n st.std = std\n \n if n_iter != self.convergence_opts.max_iter:\n # re-evaluate function only if not in last iteration\n # the reason we do this: in a stochastic setting,\n # no use to re-evaluate that function here\n st.flat_grad = energy_helper.compute_grad().reshape(st.n_confs,-1)\n #st.abs_grad_sum = st.flat_grad.abs().sum(1)\n if log.isEnabledFor(logging.DEBUG):\n log.debug('{} loss: {}, coords[0:5,1,0] {}'.format(\n n_iter, st.loss[0:5].detach().cpu().numpy(), coords[0:5,0,0].detach().cpu().numpy()))\n\n return ls_func_evals", "def find_move_from_line(\n x,\n data,\n overlap_penalty,\n norm_penalty,\n offdiagonal_energy_penalty,\n lagrange_multiplier,\n energy_weights=None,\n max_norm_deviation=0.2,\n):\n N = np.abs(data[\"overlap\"].diagonal(axis1=1, axis2=2))\n Nij = np.asarray([np.sqrt(np.outer(a, a)) for a in N])\n nwf = data[\"energy\"].shape[-1]\n if energy_weights is None:\n energy_weights = np.ones(nwf) / nwf\n\n energy = data[\"energy\"] / Nij\n overlap = data[\"overlap\"]\n # print(\"energy cost\", np.sum(energy.diagonal(axis1=1,axis2=2),axis=1))\n # print(\"overlap cost\",np.sum(np.triu(overlap**2,1),axis=(1,2)) )\n # print(\"offdiagonal_energy\", energy)\n # print(\"norm\",np.einsum('ijj->i', (overlap-1)**2 ))\n cost = (\n np.einsum(\"i,nii->n\", energy_weights, energy)\n + overlap_penalty * np.sum(np.triu(overlap**2, 1), axis=(1, 2))\n + np.sum(lagrange_multiplier * np.triu(overlap, 1), axis=(1, 2))\n + offdiagonal_energy_penalty * np.sum(np.triu(energy**2, 1), axis=(1, 2))\n + norm_penalty * np.einsum(\"ijj->i\", (overlap - 1) ** 2)\n )\n\n # good_norms = np.prod(np.einsum('ijj->ij',np.abs(overlap-1) < max_norm_deviation),axis=1)\n # print(\"good norms\", good_norms, 'cost', cost[good_norms])\n xmin = linemin.stable_fit(x, cost)\n return xmin, cost", "def linesearch(f, x0, J0, g0, a, amax, mu1, mu2, p):\r\n J, g, a = bracketing(f, x0, J0, g0, a, amax, mu1, mu2, p)\r\n x = x0+a*p\r\n return x, J, g, a", "def line_search_asrch(fcn, x, f, g, arc, stp, maxfev,\r\n ncur=NEGATIVE_CURVATURE_PARAMETER,\r\n ftol=SUFFICIENT_DECREASE_PARAMETER,\r\n gtol=CURVATURE_CONDITION_PARAMETER,\r\n xtol=X_TOLERENT, stpmin=MINIMUM_STEP_LENGTH,\r\n stpmax=MAXIMUM_STEP_LENGTH, print_flag=False, fname=None,\r\n bisect=0.0):\r\n # parameters\r\n xtrapu = 4\r\n p66 = 0.66\r\n\r\n # flags\r\n is_bracketed = False\r\n info_out_flag = False\r\n # counters\r\n nfev = 0\r\n\r\n # interval width tracker\r\n width = stpmax - stpmin\r\n width1 = 2 * width\r\n\r\n # inital values\r\n [s, ds] = arc(0)\r\n\r\n stx = 0.0\r\n fx = f\r\n dx = np.dot(g, ds)\r\n\r\n finit = fx\r\n ginit = dx\r\n\r\n fp = 0.0\r\n dp = 0.0\r\n\r\n sty = 0.0\r\n fy = 0.0\r\n dy = 0.0\r\n\r\n # formatting & printing\r\n if print_flag or fname is not None:\r\n print_flag = True\r\n header_format = \"{:4} {:6}\" + 5 * \" {:14.14}\" + \"|{:4.4}\\n\"\r\n data_format_1 = \"{:4} {:6}\" + 5 * \" {:14.8g}\"\r\n # data_format_2 = \"|{:4}\\n\"\r\n print(header_format.format(\"nfev\", \"b\", \"stx\", \"sty\", \"stp\", \"fp\", \"dp\", \"case\",\r\n file=fname), end=\"\")\r\n\r\n while True:\r\n if is_bracketed:\r\n stmin = min(stx, sty)\r\n stmax = max(stx, sty)\r\n else:\r\n stmin = stx\r\n stmax = stp + xtrapu * (stp - stx)\r\n\r\n # safeguard the trial step size (make sure step passed in is in legal bounds\r\n stp = max(stp, stpmin)\r\n stp = min(stp, stpmax)\r\n\r\n # If an unusual termination is to occur then let\r\n # stp be the lowest point obtained so far.\r\n if ((is_bracketed and (stp <= stmin or stp >= stmax))\r\n or nfev >= maxfev - 1\r\n or (is_bracketed and stmax - stmin <= xtol * stmax)):\r\n stp = stx\r\n (s, ds) = arc(stp)\r\n # print(\"(s, ds) = \", s, ds)\r\n # Likelihood at new beta\r\n f, g = fcn(x + s)\r\n\r\n fp = float(f)\r\n dp = np.dot(g, ds)\r\n nfev += 1\r\n\r\n if print_flag:\r\n print(data_format_1.format(nfev, is_bracketed, stx, sty, stp, fp, dp), file=fname,\r\n end=\"\")\r\n\r\n # compute modified function values\r\n mstx = stx\r\n mfx = fx - finit - ftol * (ginit * stx + 0.5 * min(ncur, 0) * stx ** 2)\r\n mdx = dx - ftol * (ginit + min(ncur, 0) * stx)\r\n\r\n # mstp = stp\r\n mfp = fp - finit - ftol * (ginit * stp + 0.5 * min(ncur, 0) * stp ** 2)\r\n mdp = dp - ftol * (ginit + min(ncur, 0) * stp)\r\n\r\n msty = sty\r\n mfy = fy - finit - ftol * (ginit * sty + 0.5 * min(ncur, 0) * sty ** 2)\r\n mdy = dy - ftol * (ginit + min(ncur, 0) * sty)\r\n\r\n # convergence tests TODO could wrap this up to not repeat myself - not worth for now\r\n # since don't have understanding\r\n\r\n # terminate if rounding errors prevent progress\r\n if is_bracketed and (stp <= stmin or stp >= stmax):\r\n info_out_flag = LineSearchFlags.TERMINATION_ROUNDING_ERROR\r\n\r\n # terminate at stpmax\r\n if stp == stpmax and mfp <= 0 and mdp < 0:\r\n info_out_flag = LineSearchFlags.TERMINATION_STPMAX\r\n\r\n # terminate at stpmin\r\n if stpmin > 0 and stp == stpmin and (mfp > 0 or mdp >= 0):\r\n info_out_flag = LineSearchFlags.TERMINATION_STPMIN\r\n\r\n # terminate if interval is too small\r\n if is_bracketed and (stmax - stmin < xtol * stmax):\r\n info_out_flag = LineSearchFlags.TERMINATION_INTERVAL_TOO_SMALL\r\n\r\n # terminate if reached maximum number of function evaluations\r\n if nfev >= maxfev:\r\n info_out_flag = LineSearchFlags.TERMINATION_MAX_FUNC_EVALS\r\n\r\n # terminate if strong wolfe conditions are met\r\n if (fp <= finit + ftol * (ginit * stp + 0.5 * min(ncur, 0) * stp ** 2)\r\n and abs(dp) <= gtol * abs(ginit + min(ncur, 0) * stp)):\r\n info_out_flag = LineSearchFlags.TERMINATION_STRONG_WOLFE_MET\r\n\r\n # if strong wolfe conditions are met with at == stpmax\r\n if info_out_flag == LineSearchFlags.TERMINATION_STRONG_WOLFE_MET and stp == stpmax:\r\n info_out_flag = LineSearchFlags.TERMINATION_STRONG_WOLFE_AND_STPMAX\r\n\r\n if info_out_flag is not False: # if we have info_out_flag\r\n x = x + s\r\n if print_flag:\r\n print(f\"|t-{LineSearchFlags(info_out_flag).name}\", file=fname)\r\n return (x, f, g, stp, info_out_flag, nfev)\r\n\r\n # update the interval\r\n if mfp > mfx:\r\n # case U1\r\n # stx = stx fx = fx dx = dx\r\n # tODO these values look static per iter sometimes\r\n sty = stp\r\n fy = fp\r\n dy = dp\r\n is_bracketed = True\r\n ucase = 1\r\n elif mfp <= mfx and mdp * (stx - stp) > 0:\r\n # case U2\r\n stx = stp\r\n fx = fp\r\n dx = dp\r\n # sty = sty fy = fy dy = dy\r\n ucase = 2\r\n else: # mfp <= mfx && mdp*(stx-stp) < 0\r\n # case U3\r\n sty = stx\r\n fy = fx\r\n dy = dx\r\n stx = stp\r\n fx = fp\r\n dx = dp\r\n is_bracketed = True\r\n ucase = 3\r\n\r\n # print the case\r\n if print_flag:\r\n print(f\"|u-{ucase}\")\r\n # compute new trial step size\r\n if is_bracketed and bisect:\r\n # bisect if desired\r\n stp = stx + 0.5 * (sty - stx)\r\n else:\r\n # compute new step using interpolation\r\n stp = line_search_astep(\r\n mstx, mfx, mdx, msty, mfy, mdy, stp, mfp,\r\n mdp, is_bracketed, stmin, stmax)\r\n # safeguard the step and update the interval width tracker\r\n if is_bracketed:\r\n if (abs(sty - stx) >= p66 * width1):\r\n stp = stx + 0.5 * (sty - stx)\r\n\r\n width1 = width\r\n width = abs(sty - stx)", "def indexToWin(self, direction, line): \n size = self.size\n if len(line[1:]) != size - 1:\n return None\n\n ## Experiment in avoiding conditional if then statements\n i = 0\n if direction in ['Vertical' , 'Horizontal']:\n # A vertical line is defined by the x coordinate of its points\n # A horizontal line is defined byt the y coordinate of its points\n i = {'Vertical': line[1]%size, 'Horizontal' : line[1]//size }[direction]\n\n return {'D-neg' : [k for k in range(size -1, size**2, size-1)[:-1] if k not in line[1:]][0],\n \n 'D-pos' : [k for k in range(0, size**2, size+1) if k not in line[1:]][0],\n \n 'Vertical' : [k for k in range(i, i + size**2, size) if k not in line[1:]][0],\n \n 'Horizontal': [k for k in range(i*size, i*size +size) if k not in line[:1]][0] } [direction]\n\n #Explanation of return statement above:\n #For each line on the grid, the index of its points belong to an arithmetic progression.\n #For example, the first horizontal line's indices are; 0,1,2..size-1 \n #Ex 6x6:\n # 0 1 2 3 4 5\n # 6 7\n # 12 14\n # 18 21\n # 24 28\n # 30 35\n # So for horizontals step size is 1, shift by n to get all others\n # For verticals step size is n, shift by i to get all others\n # For positive diagonal step size is n+1\n # for negative diagonal step size is n-1 ", "def line_moved(self):\n\n # The line is supposed to be moved by hand to the beginning of first wrinkle.\n # The optimal spot is local maximum (not always visible)\n ext_index = self.index_of_drop + int(self.line.value() * 10000)\n ext_value = self.data[ext_index]\n\n p_i, p_f = toolbox_2.get_pressure_change(self.measurement)\n smallest_growing_particle = toolbox_2.minimum_particle_diameter(p_i, p_f, self.saturation_percentage / 100)\n\n n = toolbox_2.particle_count_2(ext_value)\n\n # measurement series 1\n if self.selected_data == 3 and 7 <= self.meas_selected_number <= 17 and self.meas_selected_series == 1:\n index = self.meas_selected_number - 7 # Assumes that first measurement is number 7\n self.smallest_particles[index] = smallest_growing_particle\n self.number_counts[index] = n\n\n self.update_distribution()\n # Update plot\n self.curve_distribution.setData(self.particle_distribution_x, self.particle_distribution_y*1e-10)\n self.curve_distribution_cumulative.setData(self.smallest_particles, self.number_counts*1e-10)\n\n # measurement series 2\n elif self.selected_data == 3 and self.meas_selected_series == 2:\n index = self.meas_selected_number - 1 # begins from 1, 0th measurement is just copy of 8th\n self.number_counts_2[index] = n\n\n self.curve_rotatometer.setData(np.array([4, 6, 8, 10, 12, 14, 16, 18]), self.number_counts_2*1e-10)\n x = np.linspace(3.5, 20, 100)\n self.curve_rotatometer_fit.setData(x, self.number_counts_2[0] * 4 * (1 / x) *1e-10)\n\n #print(\"N\", \"%.2e\"%n, \"dpres\", round(p_i - p_f))", "def pathFinder(M, start, end):\r\n point = M[start-1][end-1]\r\n if point != 0:\r\n pathFinder(M, start, point)\r\n print \"V\" + str(point)\r\n pathFinder(M, point, end)", "def setLinescanPos(self, point):\n self.lineHorizontal.setPos(0, point.y())\n self.lineVertical.setPos(point.x(), 0)\n self.handle.setPos(point)\n self.emitter.signal.emit()\n self.update()", "def offsetline(linen, pattern_result):\n\n if \"nlines\" in pattern_result:\n nlines = pattern_result[\"nlines\"]\n else:\n nlines = 0\n new_linen = linen - nlines - 1\n if new_linen < 0:\n return 0\n else:\n return new_linen", "def _move(self, direction, count, step_size):\n for _ in range(count):\n if not self.has_hit_edge():\n self.position = min(self.scene.step_count - 1, max(0,\n self.position + direction * step_size))\n self._take_measure()\n self.steps_taken += 1\n self.visited_positions.append(self.position)", "def step (result: list, line: str):\n return result + points (result [-1], line)", "def Find_Line_By_XY( self, x, y ):\r\n for i in self.handle_list:\r\n #examine the bounding box of each line\r\n bbox = self.canvas_one.bbox( i.line_handle )\r\n xb1 = bbox[ 0 ]\r\n yb = ( bbox[ 1 ] + bbox[ 3 ] ) / 2\r\n xb2 = bbox[ 2 ]\r\n if x >= xb1 and x <= xb2 and abs( y-yb ) <= cb.ytick / 2:\r\n #found, return handle\r\n return i\r\n #not found return -1\r\n return -1", "def LineSearchXS(F, x, s, dx, ds, L, U, iterates):\n \n L_val = F(x + dx * L, s + ds * L)\n U_val = F(x + dx * U, s + ds * U)\n \n if iterates <= 0:\n if L_val < U_val:\n return L\n else:\n return U\n \n \n if L_val < U_val:\n return LineSearchXS(F, x, s, dx, ds, L, (U + L) / 2, iterates - 1)\n else:\n \treturn LineSearchXS(F, x, s, dx, ds, (U + L) / 2, U, iterates - 1)", "def move_to_line_start(self) -> None:\n self.index = self.buffer.get_line_start(self.index)", "def lineDetectThread(self):\n \n while True:\n\n tmpList = self.octo.analog_read_all()\n avg = 0\n for x in tmpList:\n avg = avg + x\n\n avg = avg / len(tmpList)\n\n if avg <= self.cutoff:\n if not self.seenLine: # first time seeing the white line\n print(\"Line detected\")\n \n self.seenLine = True\n \n # change directions of the motors to drive away from the line\n if self.lmd == self.BWD:\n self.lmd = self.FWD\n else:\n self.lmd = self.BWD\n \n if self.rmd == self.BWD:\n self.rmd = self.FWD\n else:\n self.rmd = self.BWD\n\n self.myMotor.set_drive(self.L_MTR, self.lmd, self.lmp)\n self.myMotor.set_drive(self.R_MTR, self.rmd, self.rmp)\n else:\n if self.seenLine:\n print(\"back to black\")\n self.seenLine = False", "def _nearest_point_on_line(begin, end, point):\n b2e = _vec_sub(end, begin)\n b2p = _vec_sub(point, begin)\n nom = _vec_dot(b2p, b2e)\n denom = _vec_dot(b2e, b2e)\n if denom == 0.0:\n return begin\n u = nom / denom\n if u <= 0.0:\n return begin\n elif u >= 1.0:\n return end\n else:\n return _vec_add(begin, _vec_scale(b2e, u))", "def line_search(solve_eq, find_jac, x, alpha=1e-4, NRtol=1e-6):\n\n print \"\\n\\tBegin Newton line search method\"\n print \"\\t------------------------------------\"\n finCond = False\n while not finCond:\n # Calculate the newton step, dx\n F_x0 = solve_eq(x)\n j_x0 = find_jac(x)\n dx = linalg.solve(j_x0, -F_x0)\n\n # Define the master function\n f_x0 = real(0.5*dot(conj(F_x0), F_x0))\n\n slope_x0dx = real(-2*f_x0) #-dot(conj(F_x0), F_x0)\n\n # Decide whether to take the Newton Step by Armijo line search method\n # First initialise variables so that first iteration happens \n lam = 1 \n lamPrev = 1 \n f_xn = f_x0 + alpha*lam*slope_x0dx + 1\n f_lam2Prev = 0 # Doesn't matter, will be set before it is used\n f_lamPrev = 0 \n counter = 0\n\n # Now choose a lambda and see if it is good.\n while f_xn >= f_x0 + alpha*lam*slope_x0dx:\n\n if counter == 1:\n # set lambda by a quadratic model for the residual master function f\n lam = - slope_x0dx / 2*(f_xn - f_x0 - slope_x0dx)\n #print \"square model lambda =\", lam\n \n # impose upper and lower bounds on lambda \n if lam > 0.5:\n lam = 0.5\n if lam < 0.1:\n lam = 0.1\n\n elif counter > 1:\n # set lambda by a cubic model for the residual master function f\n abmat = zeros((2,2))\n abmat[0,0] = 1/(lamPrev*lamPrev)\n abmat[0,1] = -1/(lam2Prev*lam2Prev)\n abmat[1,0] = -lam2Prev/(lamPrev*lamPrev)\n abmat[1,1] = lamPrev/(lam2Prev*lam2Prev)\n\n f3vec = zeros(2)\n f3vec[0] = f_lamPrev - f_x0 - slope_x0dx*lamPrev\n f3vec[1] = f_lam2Prev - f_x0 - slope_x0dx*lam2Prev\n\n abvec = (1./(lamPrev-lam2Prev)) * dot(abmat, f3vec)\n aaa = abvec[0]\n bbb = abvec[1]\n lam = (- bbb + sqrt(bbb**2 - 3*aaa*slope_x0dx)) / 3*aaa\n\n # impose upper and lower bounds on lambda \n if lam > 0.5*lamPrev:\n lam = 0.5*lamPrev\n if lam < 0.1*lamPrev:\n lam = 0.1*lamPrev\n\n #print \"cubic model lambda\", lam\n\n if lam < 1e-6:\n print \" loop counter of last step = \", counter-1\n print \"step too small, take full Newton step and hope for the best.\"\n lam = 1\n break\n\n # calculate the residual and master function so we can see if the\n # step was a good one.\n F_xn = solve_eq(x + lam*dx)\n f_xn = real(0.5*dot(conj(F_xn), F_xn))\n #print \"\"\" |F_xn| = \"\"\", linalg.norm(F_xn) \n\n # update old values for cubic method\n lam2Prev = lamPrev\n lamPrev = lam\n f_lam2Prev = f_lamPrev\n f_lamPrev = f_xn\n\n counter += 1\n \n\n # change x to the value at the step we just took\n x = x + lam*dx\n\n # Extra symmerterisation step\n x[0:vecLen] = symmetrise(x[0:vecLen])\n x[vecLen:2*vecLen] = symmetrise(x[vecLen:2*vecLen])\n x[2*vecLen:3*vecLen] = symmetrise(x[2*vecLen:3*vecLen])\n x[3*vecLen:4*vecLen] = symmetrise(x[3*vecLen:4*vecLen])\n \n # Print norm and check if we can exit yet.\n L2 = linalg.norm(F_xn)\n print \"\"\"|F_xn| = {0:10.5g}, |dx| = {1:10.5g}, lambda = {2}\"\"\".format(\n L2, linalg.norm(dx), lam)\n\n # Quit if L2 norm is getting huge\n if L2 > 1e50:\n print \"Error: Shooting off to infinity!\"\n exit(1)\n\n if L2 < NRtol:\n print \"Solution found!\"\n finCond = True\n\n\n PSI = xVec[0:vecLen] \n Cxx = xVec[1*vecLen:2*vecLen] \n Cyy = xVec[2*vecLen:3*vecLen] \n Cxy = xVec[3*vecLen:4*vecLen]\n pickle.dump((PSI,Cxx,Cyy,Cxy), open(outFileName, 'w'))\n\n return x", "def do_step(subpixel_steps, normal, candidate_indices, candidate_normals, angle_weight):\n n_options = candidate_indices.shape[0]\n\n if n_options == 0:\n return\n elif n_options == 1:\n # If we have only one valid option, go here for the next iteration\n return candidate_indices.flatten()\n else:\n # There are multiple valid candidates which could be an extension of the line. Compute the score for each.\n scores = score_candidates(normal, subpixel_steps, candidate_normals, angle_weight)\n\n # Update current point with the best scoring point\n return candidate_indices[np.argmin(scores), :]", "def my_line_search(c1, c2, pk, xk, old_x=None, alpha_0=0, alpha_max=1, method=\"sd\"):\n phi0 = phi_function(0, pk, xk)\n dphi0 = phi_prime(pk, xk)\n\n # choose alpha_1\n if old_x is not None and dphi0 != 0 and method == \"sd\":\n alpha_1 = min(1.0, 1.01 * 2 * (rosenbock2Nd(xk, 0) - rosenbock2Nd(old_x, 0)) / dphi0)\n else:\n alpha_1 = 1.0\n\n if alpha_1 <= 0:\n alpha_1 = 1.0\n\n if alpha_max is not None:\n alpha_1 = min(alpha_1, alpha_max)\n\n alpha_vec = [alpha_0, alpha_1]\n\n i = 1\n while True:\n # alpha i = ai\n alpha_i = alpha_vec[i]\n # compute phi(ai)\n phi_i = phi_function(alpha_i, pk, xk)\n # Armijo condition.\n if phi_i > phi0 + c1 * alpha_i * dphi0 \\\n or (i > 1 and phi_function(alpha_i, pk, xk) >= phi_function(alpha_vec[i - 1], pk, xk)):\n return zoom(alpha_low=alpha_vec[i - 1], alpha_high=alpha_vec[i], xk=xk, pk=pk, c1=c1, c2=c2), i\n\n # compute phi prime at alpha i (ai).\n phi_prime_alpha_i = phi_prime(pk, xk + alpha_i * pk)\n # curvature condition.\n if abs(phi_prime_alpha_i) <= -c2 * dphi0:\n return alpha_i, i\n\n if phi_prime_alpha_i >= 0:\n return zoom(alpha_low=alpha_i, alpha_high=alpha_vec[i - 1], xk=xk, pk=pk, c1=c1, c2=c2), i\n\n alpha_vec.append(random.uniform(alpha_i, alpha_max))\n i += 1", "def search(path, f):\n\n started = False\n\n for count, line in enumerate(f):\n number = count + 1\n if search_line(line):\n if not started:\n print config.term.highlight(relpath(path), 'GREEN')\n if config.filenames:\n break\n started = True\n if len(line) <= config.output_limit:\n print '%d:%s' % (number,\n config.term.highlight(line.rstrip('\\n\\r'),\n ('BLACK', 'BG_YELLOW'),\n config.search))\n else:\n print '%d:LINE IS TOO LONG (>%d)' % (number, config.output_limit)\n if started:\n print", "def _distance_to_line(begin, end, point):\n return _vec_distance(point, _nearest_point_on_line(begin, end, point))", "def free_line(p, eps, s, dps1, dps2, ds):\n px = p[0]\n py = p[1]\n s1x = s[0, 0]\n s1y = s[0, 1]\n s2x = s[1, 0]\n s2y = s[1, 1]\n if s1x == s2x and s1y == s2y:\n if eucl_dist(p, s[0]) > eps:\n lf = [-1, -1]\n else:\n lf = [0, 1]\n else:\n if point_to_seg(p, s[0], s[1], dps1, dps2, ds) > eps:\n # print(\"No Intersection\")\n lf = [-1, -1]\n else:\n segl = eucl_dist(s[0], s[1])\n segl2 = segl * segl\n intersect = circle_line_intersection(px, py, s1x, s1y, s2x, s2y, eps)\n if intersect[0][0] != intersect[1][0] or intersect[0][1] != intersect[1][1]:\n i1x = intersect[0, 0]\n i1y = intersect[0, 1]\n u1 = (((i1x - s1x) * (s2x - s1x)) + ((i1y - s1y) * (s2y - s1y))) / segl2\n\n i2x = intersect[1, 0]\n i2y = intersect[1, 1]\n u2 = (((i2x - s1x) * (s2x - s1x)) + ((i2y - s1y) * (s2y - s1y))) / segl2\n ordered_point = sorted((0, 1, u1, u2))\n lf = ordered_point[1:3]\n else:\n if px == s1x and py == s1y:\n lf = [0, 0]\n elif px == s2x and py == s2y:\n lf = [1, 1]\n else:\n i1x = intersect[0][0]\n i1y = intersect[0][1]\n u1 = (((i1x - s1x) * (s2x - s1x)) + ((i1y - s1y) * (s2y - s1y))) / segl2\n if 0 <= u1 <= 1:\n lf = [u1, u1]\n else:\n lf = [-1, -1]\n return lf", "def line_search(self, Rs_grads, mu_grads, obj_init, r, eps):\n step = 1.\n\n while step > 1e-15:\n\n R_search = [np.clip(R + step*R_grad, 0., np.max(R))\n for (R_grad, R) in Rs_grads]\n mu_search = mu_grads[1] + step*mu_grads[0]\n r_search = mu_search + kron_mvp(R_search, eps)\n obj_search, kl_search, like_search = self.eval_obj(R_search, mu_search,\n r_search)\n if obj_init - obj_search > step:\n pos_def = True\n for R in R_search:\n if np.all(np.linalg.eigvals(R) > 0) == False:\n pos_def = False\n if pos_def:\n return R_search, mu_search, obj_search, step\n step = step * 0.5\n return None", "def findHoughLines():\n global D\n\n # apply Hough transformation to find straight line segments\n # For more information, see:\n # http://docs.opencv.org/doc/tutorials/imgproc/imgtrans/hough_lines/hough_lines.html\n #\n # Here are the options for the Hough transformation\n distance_res = 3 # distance resolution of Hough accumulator, in pixels, was 3\n angle_res = radians(10) # angular resolution of Hough acc., in radians, was 10\n min_votes = 20 # how many votes needed to count a line?, was 20\n min_line_len = 25 # shortest allowable line, in pixels, was 5\n max_gap_len = 40 # pixels, was 30\n \n # The return value is a list of line segments\n Lines = cv.HoughLines2(D.hough,\n D.storage,\n cv.CV_HOUGH_PROBABILISTIC,\n distance_res, \n angle_res,\n min_votes,\n min_line_len,\n max_gap_len)\n\n N = len(Lines)\n #print \"\\n\\n\"\n most_vertr_slope = 0\n most_vertr_line = ()\n most_vertl_slope = 0\n most_vertl_line = ()\n\n most_horiz_slope = float('inf')\n most_horiz_line = ()\n\n for i in range(N):\n line = Lines[i]\n #print \"line[\",i,\"] is\", line\n\n start_pt = line[0]\n end_pt = line[1]\n\n midpoint = ((start_pt[0] + end_pt[0])/2, (start_pt[1] + end_pt[1])/2)\n\n # helpful calls, perhaps:\n cv.Line(D.image, start_pt, end_pt, cv.RGB(0, 255, 0), 1) # 1 == thickness\n cv.Line(D.image, midpoint, midpoint, cv.RGB(0, 0, 255), 4)\n\n run = start_pt[0] - end_pt[0]\n if (run != 0):\n slope = (start_pt[1] - end_pt[1])/float(run)\n else:\n slope = float('inf')\n\n if abs(slope) > abs(most_vertl_slope) and start_pt[0] > CENTER and end_pt[0] > CENTER \\\n and ((start_pt[1] < CENTER and end_pt[1]) > CENTER or (start_pt[1] > CENTER and end_pt[1] < CENTER)):\n most_vertl_slope = slope\n most_vertl_line = line\n if abs(slope) > abs(most_vertr_slope) and start_pt[0] < CENTER and end_pt[0] < CENTER \\\n and ((start_pt[1] < CENTER and end_pt[1]) > CENTER or (start_pt[1] > CENTER and end_pt[1] < CENTER)):\n most_vertr_slope = slope\n most_vertr_line = line\n if abs(slope) < abs(most_horiz_slope) and start_pt[1] > CENTER and end_pt[1] > CENTER \\\n and ((start_pt[0] < CENTER and end_pt[0]) > CENTER or (start_pt[0] > CENTER and end_pt[0] < CENTER)):\n\n most_horiz_slope = slope\n most_horiz_line = line\n #print line, slope\n\n if most_vertr_line:\n cv.Line(D.image, most_vertr_line[0], most_vertr_line[1], cv.RGB(255, 255, 255), 3) # 1 == thickness\n if most_vertl_line:\n cv.Line(D.image, most_vertl_line[0], most_vertl_line[1], cv.RGB(255, 255, 255), 3) # 1 == thickness\n if most_horiz_line:\n cv.Line(D.image, most_horiz_line[0], most_horiz_line[1], cv.RGB(255, 0, 100), 3) # 1 == thickness\n\n\n mpwrang = -1 if not most_vertr_line else -atan2(most_vertr_line[0][0]-most_vertr_line[1][0], most_vertr_line[0][1] - most_vertr_line[1][1])\n mpwlang = -1 if not most_vertl_line else -atan2(most_vertl_line[0][0]-most_vertl_line[1][0], most_vertl_line[0][1] - most_vertl_line[1][1])\n mpwfang = -1 if not most_horiz_line else -atan2(most_horiz_line[0][0]-most_horiz_line[1][0], most_horiz_line[0][1] - most_horiz_line[1][1])\n\n mpwrdist = -1 if not most_vertr_line else -most_vertr_line[0][0] + CENTER\n mpwldist = -1 if not most_vertl_line else +most_vertl_line[0][0] - CENTER\n mpwfdist = -1 if not most_horiz_line else most_horiz_line[0][1] - CENTER\n\n data_to_publish = (mpwrang, mpwlang, mpwfang, mpwrdist, mpwldist, mpwfdist)\n D.wallPub.publish(String(str( data_to_publish )))", "def next_line():\r\n set_point(point().next_line())", "def repair_lines(self) -> None:\n if len(self.lines) == 0:\n self.create_lines()\n else:\n for line in self.lines:\n connection = line.connection\n start_component = self.components[connection.start_entity]\n end_component = self.components[connection.end_entity]\n start_pin_location = (\n start_component.location\n + start_component.pin_locations[connection.start_pin]\n )\n end_pin_location = (\n end_component.location\n + end_component.pin_locations[connection.end_pin]\n )\n\n # If the line can be straight we do that\n if (\n start_pin_location.x == end_pin_location.x\n or start_pin_location.y == end_pin_location.y\n ):\n line.locations = [start_pin_location, end_pin_location]\n\n if not (\n start_pin_location == line.locations[0]\n and end_pin_location == line.locations[-1]\n ):\n # Change locations of lines when components move\n if len(line.locations) < 4:\n # Add a bend if the line was previously straight\n x_midpoint = (start_pin_location.x + end_pin_location.x) / 2\n bend_start = Point(x_midpoint, start_pin_location.y)\n bend_end = Point(x_midpoint, end_pin_location.y)\n bends = [bend_start, bend_end]\n line.locations = [start_pin_location, *bends, end_pin_location]\n else:\n # Otherwise, just change the y of the existing points to match\n line.locations[0] = start_pin_location\n line.locations[1].y = start_pin_location.y\n line.locations[-2].y = end_pin_location.y\n line.locations[-1] = end_pin_location", "def expand_line(x0,y0,x1,y1,nx,ny,edge=6):\n def d2(x0,y0,x1,y1):\n \"\"\"squared distance between two points\"\"\"\n return (x0-x1)*(x0-x1) + (y0-y1)*(y0-y1)\n def inside(x,e,n):\n \"\"\"return if x is within e and n-e-1\n \"\"\"\n if x < e: return False\n if x > n-e-1: return False\n return True\n # bypass everything\n if False:\n return [x0,y0,x1,y1]\n # pathetic cases\n if x0==x1: return [x0, edge, x1, ny-1-edge]\n if y0==y1: return [edge, y0, nx-1-edge, y1]\n # slope and center point of line\n a = (y1-y0)/(x1-x0)\n xc = (x0+x1)/2.0\n yc = (y0+y1)/2.0\n # intersections with the box vertices\n x_e = xc + (edge-yc)/a\n y_e = yc + a*(edge-xc)\n x_n = xc + (ny-edge-1-yc)/a\n y_n = yc + a*(nx-edge-1-xc)\n print \"x,y(0) x,y(1):\",x0,y0,x1,y1\n print \"x,y(e) x,y(n):\",x_e,y_e,x_n,y_n\n e = []\n if inside(x_e,edge,nx): \n e.append(x_e)\n e.append(edge)\n if inside(y_e,edge,ny):\n e.append(edge)\n e.append(y_e)\n if inside(x_n,edge,nx):\n e.append(x_n)\n e.append(ny-edge-1)\n if inside(y_n,edge,ny):\n e.append(nx-edge-1)\n e.append(y_n)\n if len(e) != 4:\n # can happen for small maps?\n msg = \"Math Error in expand_line: \",e\n raise Exception,msg\n return e", "def find_path(self):\n \n if self.line_num != -1:\n return self.line_num\n\n max_line = self.graph.gps_length - 1\n min_line = 0\n #last_id = dg.normalize(self.graph.lines[-1])[0]\n last_id = normalize_simple(self.graph.lines[-1])[0]\n pivot = int((self.trip_id-1)/float(last_id)*self.graph.gps_length)\n #cur_id = dg.normalize(self.graph.lines[pivot])[0]\n cur_id = normalize_simple(self.graph.lines[pivot])[0]\n while cur_id != self.trip_id:\n if cur_id < self.trip_id:\n min_line = pivot\n else:\n max_line = pivot\n #TODO: could make this run in essentially constant time by hopping predetermined distance\n pivot = (min_line + max_line) / 2\n #cur_id = dg.normalize(self.graph.lines[pivot])[0]\n cur_id = normalize_simple(self.graph.lines[pivot])[0]\n\n #while dg.normalize(self.graph.lines[pivot])[0] == self.trip_id:\n while normalize_simple(self.graph.lines[pivot])[0] == self.trip_id:\n pivot -= 1\n\n pivot += 1\n self.line_num = pivot\n return pivot", "def line_search_astep(stx, fx, dx, sty, fy, dy, stp, fp, dp, brackt, stpmin, stpmax):\r\n # parameter\r\n p66 = 0.66 # TODO why a magic constant\r\n\r\n sgnd = dp * (dx / abs(dx))\r\n\r\n if (fp > fx):\r\n # First case: A higher function value. The minimum is bracketed.\r\n # If the cubic step is closer to stx than the quadratic step, the\r\n # cubic step is taken, otherwise the average of the cubic and\r\n # quadratic steps is taken.\r\n\r\n theta = 3.0 * (fx - fp) / (stp - stx) + dx + dp\r\n s = max((abs(theta), abs(dx), abs(dp)))\r\n gamma = s * np.sqrt((theta / s) ** 2 - (dx / s) * (dp / s))\r\n if (stp < stx):\r\n gamma = -gamma\r\n\r\n p = (gamma - dx) + theta\r\n q = ((gamma - dx) + gamma) + dp\r\n r = p / q\r\n stpc = stx + r * (stp - stx)\r\n stpq = stx + ((dx / ((fx - fp) / (stp - stx) + dx)) / 2.0) * (stp - stx)\r\n if (abs(stpc - stx) < abs(stpq - stx)):\r\n stpf = stpc\r\n else:\r\n stpf = stpc + (stpq - stpc) / 2.0\r\n\r\n # brackt = true\r\n\r\n elif (sgnd < 0.0):\r\n # Second case: A lower function value and derivatives of opposite\r\n # sign. The minimum is bracketed. If the cubic step is farther from\r\n # stp than the secant step, the cubic step is taken, otherwise the\r\n # secant step is taken.\r\n\r\n theta = 3.0 * (fx - fp) / (stp - stx) + dx + dp\r\n s = max((abs(theta), abs(dx), abs(dp)))\r\n gamma = s * np.sqrt((theta / s) ** 2 - (dx / s) * (dp / s))\r\n if (stp > stx):\r\n gamma = -gamma\r\n\r\n p = (gamma - dp) + theta\r\n q = ((gamma - dp) + gamma) + dx\r\n r = p / q\r\n stpc = stp + r * (stx - stp)\r\n stpq = stp + (dp / (dp - dx)) * (stx - stp)\r\n if (abs(stpc - stp) > abs(stpq - stp)):\r\n stpf = stpc\r\n else:\r\n stpf = stpq\r\n\r\n # brackt = true\r\n\r\n elif (abs(dp) < abs(dx)):\r\n # Third case: A lower function value, derivatives of the same sign,\r\n # and the magnitude of the derivative decreases.\r\n\r\n # The cubic step is computed only if the cubic ts to infinity\r\n # in the direction of the step or if the minimum of the cubic\r\n # is beyond stp. Otherwise the cubic step is defined to be the\r\n # secant step.\r\n\r\n theta = 3.0 * (fx - fp) / (stp - stx) + dx + dp\r\n s = max((abs(theta), abs(dx), abs(dp)))\r\n\r\n # The case gamma = 0 only arises if the cubic does not t\r\n # to infinity in the direction of the step.\r\n\r\n gamma = s * np.sqrt(max(0.0, (theta / s) ** 2 - (dx / s) * (dp / s)))\r\n if (stp > stx):\r\n gamma = -gamma\r\n\r\n p = (gamma - dp) + theta\r\n q = (gamma + (dx - dp)) + gamma\r\n r = p / q\r\n if (r < 0.0 and gamma != 0.0):\r\n stpc = stp + r * (stx - stp)\r\n elif (stp > stx):\r\n stpc = stpmax\r\n else:\r\n stpc = stpmin\r\n\r\n stpq = stp + (dp / (dp - dx)) * (stx - stp)\r\n\r\n if (brackt):\r\n\r\n # A minimizer has been bracketed. If the cubic step is\r\n # closer to stp than the secant step, the cubic step is\r\n # taken, otherwise the secant step is taken.\r\n\r\n if (abs(stpc - stp) < abs(stpq - stp)):\r\n stpf = stpc\r\n else:\r\n stpf = stpq\r\n\r\n if (stp > stx):\r\n stpf = min(stp + p66 * (sty - stp), stpf)\r\n else:\r\n stpf = max(stp + p66 * (sty - stp), stpf)\r\n\r\n else:\r\n\r\n # A minimizer has not been bracketed. If the cubic step is\r\n # farther from stp than the secant step, the cubic step is\r\n # taken, otherwise the secant step is taken.\r\n\r\n if (abs(stpc - stp) > abs(stpq - stp)):\r\n stpf = stpc\r\n else:\r\n stpf = stpq\r\n\r\n stpf = min(stpmax, stpf)\r\n stpf = max(stpmin, stpf)\r\n\r\n else:\r\n # Fourth case: A lower function value, derivatives of the same sign,\r\n # and the magnitude of the derivative does not decrease. If the\r\n # minimum is not bracketed, the step is either stpmin or stpmax,\r\n # otherwise the cubic step is taken.\r\n\r\n if (brackt):\r\n theta = 3.0 * (fp - fy) / (sty - stp) + dy + dp\r\n s = max((abs(theta), abs(dy), abs(dp)))\r\n gamma = s * np.sqrt((theta / s) ** 2 - (dy / s) * (dp / s))\r\n if (stp > sty):\r\n gamma = -gamma\r\n\r\n p = (gamma - dp) + theta\r\n q = ((gamma - dp) + gamma) + dy\r\n r = p / q\r\n stpc = stp + r * (sty - stp)\r\n stpf = stpc\r\n elif (stp > stx):\r\n stpf = stpmax\r\n else:\r\n stpf = stpmin\r\n return stpf", "def skim_step_seekable(average_length, lines, file):\n # Seek forward average_length * lines bytes\n log.debug(\"Seeking %d forward\", average_length*lines)\n file.seek(average_length*lines, WHENCE_RELATIVE)\n \n # Try and read a line\n \n c = file.read(1)\n if c != '\\n':\n # Part way through a line, skip it\n log.debug(\"Skipping part-line\")\n file.readline()\n\n # We use file.tell() to determine the length because we're after\n # line lengths in bytes, not characters. Might differ in UTF16 etc.\n # Might be worth some research here to determine if we can get an\n # accurate byte count out of the line instead.\n \n start = file.tell()\n log.debug(\"Starting line read at %d\", start)\n line = file.readline()\n log.debug(\"Read '%s'\", line)\n end = file.tell()\n log.debug(\"Line read finished at %d\", end)\n \n if start == end:\n # If the file has ended, we're done, break\n log.debug(\"File end encountered, exiting\")\n return (None, 0)\n \n return (line, end-start)", "def refine_center(self, search_size=100, step_size=5, radius_additive=INNER_RADIUS_FULL_SIZE_PHOTO):\n iter = 0\n saves = np.zeros(((search_size*2)**2,3))\n for i_add in xrange(-search_size, search_size, step_size):\n for j_add in xrange(-search_size, search_size, step_size):\n perim_intensities = self.scan_perimeter_intensity(\n self.i_img_center+i_add, self.j_img_center+j_add, self.T, radius_additive, step_size)\n saves[iter, 0] = self.i_img_center+i_add\n saves[iter, 1] = self.j_img_center+j_add\n saves[iter, 2] = perim_intensities[:,3].sum()\n iter += 1\n\n idxs = np.argsort(saves[:,2])\n winner_ij = saves[idxs[-1],:2]\n self.i_img_center = winner_ij[0]\n self.j_img_center = winner_ij[1]\n return(winner_ij[0], winner_ij[1])", "def snappy_endings(lines, max_distance):\n \n # initialize snapped lines with list of original lines\n # snapping points is a MultiPoint object of all vertices\n snapped_lines = [line for line in lines]\n snapping_points = vertices_from_lines(snapped_lines)\n \n \n # isolated endpoints are going to snap to the closest vertex\n isolated_endpoints = find_isolated_endpoints(snapped_lines) \n \n \n # only move isolated endpoints, one by one\n# accum=1\n \n \n count = len(isolated_endpoints)\n print(\"Performing line snapping 3/3\")\n pb = pbar.ProgressBar(count)\n \n for endpoint in isolated_endpoints:\n pb += 1\n # find all vertices within a radius of max_distance as possible\n target = nearest_neighbor_within(snapping_points, endpoint, \n max_distance)\n \n # do nothing if no target point to snap to is found\n if not target:\n continue \n \n # find the LineString to modify within snapped_lines and update it \n for i, snapped_line in enumerate(snapped_lines):\n if endpoint.touches(snapped_line):\n snapped_lines[i] = bend_towards(snapped_line, where=endpoint, \n to=target)\n break\n \n # also update the corresponding snapping_points\n for i, snapping_point in enumerate(snapping_points):\n if endpoint.equals(snapping_point):\n snapping_points[i] = target\n break\n\n # post-processing: remove any resulting lines of length 0\n snapped_lines = [s for s in snapped_lines if s.length > 0]\n del pb\n return snapped_lines", "def _none_line_search(self):\n trajectory_current = self.dynamic_model.update_traj(self.trajectory, self.K_matrix, self.k_vector, 1)\n obj_fun_value_current = self.obj_fun.eval_obj_fun(trajectory_current)\n return trajectory_current, obj_fun_value_current", "def simplify_line_dp(pts, tolerance):\r\n anchor = 0\r\n floater = len(pts) - 1\r\n stack = []\r\n keep = set()\r\n\r\n stack.append((anchor, floater)) \r\n while stack:\r\n anchor, floater = stack.pop()\r\n \r\n # initialize line segment\r\n if pts[floater] != pts[anchor]:\r\n anchorX = float(pts[floater][0] - pts[anchor][0])\r\n anchorY = float(pts[floater][1] - pts[anchor][1])\r\n seg_len = sqrt(anchorX ** 2 + anchorY ** 2)\r\n # get the unit vector\r\n anchorX /= seg_len\r\n anchorY /= seg_len\r\n else:\r\n anchorX = anchorY = seg_len = 0.0\r\n \r\n # inner loop:\r\n max_dist = 0.0\r\n farthest = anchor + 1\r\n for i in range(anchor + 1, floater):\r\n dist_to_seg = 0.0\r\n # compare to anchor\r\n vecX = float(pts[i][0] - pts[anchor][0])\r\n vecY = float(pts[i][1] - pts[anchor][1])\r\n seg_len = sqrt( vecX ** 2 + vecY ** 2 )\r\n # dot product:\r\n proj = vecX * anchorX + vecY * anchorY\r\n if proj < 0.0:\r\n dist_to_seg = seg_len\r\n else: \r\n # compare to floater\r\n vecX = float(pts[i][0] - pts[floater][0])\r\n vecY = float(pts[i][1] - pts[floater][1])\r\n seg_len = sqrt( vecX ** 2 + vecY ** 2 )\r\n # dot product:\r\n proj = vecX * (-anchorX) + vecY * (-anchorY)\r\n if proj < 0.0:\r\n dist_to_seg = seg_len\r\n else: # calculate perpendicular distance to line (pythagorean theorem):\r\n dist_to_seg = sqrt(abs(seg_len ** 2 - proj ** 2))\r\n if max_dist < dist_to_seg:\r\n max_dist = dist_to_seg\r\n farthest = i\r\n\r\n if max_dist <= tolerance: # use line segment\r\n keep.add(anchor)\r\n keep.add(floater)\r\n else:\r\n stack.append((anchor, farthest))\r\n stack.append((farthest, floater))\r\n\r\n keep = list(keep)\r\n keep.sort()\r\n return [pts[i] for i in keep]", "def _update_segment(self, segment, leading_segment, path):\n for index, position in enumerate(path):\n if (leading_segment.position - position).magnitude() >= self.follow_distance:\n segment.position = position.copy()\n segment.heading_vector = (leading_segment.position - position).unit()\n return index\n return len(path)", "def find_segment(p, line, start_vertex=0):\n EPS = 1e-9\n for seg in range(start_vertex, len(line)-1):\n if is_near(p, line[seg]):\n return seg, 0\n if line[seg][0] == line[seg+1][0]:\n if not (p[0]-EPS <= line[seg][0] <= p[0]+EPS):\n continue\n px = None\n else:\n px = (p[0] - line[seg][0]) / (line[seg+1][0] - line[seg][0])\n if px is None or (0 <= px <= 1):\n if line[seg][1] == line[seg+1][1]:\n if not (p[1]-EPS <= line[seg][1] <= p[1]+EPS):\n continue\n py = None\n else:\n py = (p[1] - line[seg][1]) / (line[seg+1][1] - line[seg][1])\n if py is None or (0 <= py <= 1):\n if py is None or px is None or (px-EPS <= py <= px+EPS):\n return seg, px or py\n return None, None", "def prob_getLine(img, threshold, line_length, line_gap, width, height, theta):\n\n\n\n # maximum line number to prevent infinite loop\n lines_max = 2 ** 15\n lines = []\n\n # calculate the image diagonal\n imgDiagnal = 2 * np.ceil((np.sqrt(img.shape[0] * img.shape[0] +img.shape[1] * img.shape[1])))\n accum = np.zeros((int(imgDiagnal), int(theta.shape[0])))\n offset = imgDiagnal / 2\n nthetas = theta.shape[0]\n # compute the bins and allocate the accumulator array\n mask = np.zeros((height, width))\n line_end = np.zeros((2, 2))\n\n # compute sine and cosine of angles\n cosinTheta = np.cos(theta)\n sinTheta = np.sin(theta)\n\n # find the nonzero indexes\n yXis, xXis = np.nonzero(img)\n points = list(zip(xXis, yXis))\n # mask all non-zero indexes\n mask[yXis, xXis] = 1\n shift = 16\n\n while 1:\n\n # check if the image is empty, quit if no remaining points\n count = len(points)\n if count == 0:\n break\n\n # select a random non-zero point\n index = random.randint(0,count) % count\n x = points[index][0]\n y = points[index][1]\n\n # remove the pixel from the image\n del points[index]\n\n # if previously eliminated, skip\n if not mask[y, x]:\n continue\n\n #set some constant for the ease of later use\n value = 0\n max_value = threshold - 1\n max_theta = -1\n\n # apply hough transform on point\n for j in range(nthetas):\n accum_idx = int(round((cosinTheta[j] * x + sinTheta[j] * y)) + offset)\n accum[accum_idx, j] += 1\n value = accum[accum_idx, j]\n if value > max_value:\n max_value = value\n max_theta = j\n\n #check if the highest value change for this pixel has detected line or not\n if max_value < threshold:\n continue #if less than the threshold, than skip this point\n\n # from the random point walk in opposite directions and find the longest line segment continuous\n a = -sinTheta[max_theta]\n b = cosinTheta[max_theta]\n x0 = x\n y0 = y\n\n # calculate gradient of walks using fixed point math\n xflag = np.fabs(a) > np.fabs(b)\n if xflag:\n if a > 0:\n dx0 = 1\n else:\n dx0 = -1\n dy0 = round(b * (1 << shift) / np.fabs(a))\n y0 = (y0 << shift) + (1 << (shift - 1))\n else:\n if b > 0:\n dy0 = 1\n else:\n dy0 = -1\n dx0 = round(a * (1 << shift) / np.fabs(b))\n x0 = (x0 << shift) + (1 << (shift - 1))\n\n # find the line segment not exceeding the acceptable line gap\n for k in range(2):\n gap = 0\n px = x0\n py = y0\n dx = dx0\n dy = dy0\n if k > 0:\n dx = -dx\n dy = -dy\n while 1:\n if xflag:\n x1 = px\n y1 = int(py) >> shift\n else:\n x1 = int(px) >> shift\n y1 = py\n # check when line exits image boundary\n if x1 < 0 or x1 >= width or y1 < 0 or y1 >= height:\n break\n gap += 1\n # if non-zero point found, continue the line\n if mask[y1, x1]:\n gap = 0\n line_end[k, 1] = y1\n line_end[k, 0] = x1\n # if gap to this point was too large, end the line\n elif gap > line_gap:\n break\n px += dx\n py += dy\n\n\n # confirm line length is acceptable\n acceptableLine = abs(line_end[1, 1] - line_end[0, 1]) >= line_length or \\\n abs(line_end[1, 0] - line_end[0, 0]) >= line_length\n\n # reset the accumulator and points on this line\n for k in range(2):\n px = x0\n py = y0\n dx = dx0\n dy = dy0\n if k > 0:\n dx = -dx\n dy = -dy\n while 1:\n if xflag:\n x1 = px\n y1 = int(py) >> shift\n else:\n x1 = int(px) >> shift\n y1 = py\n # if non-zero point found, continue the line\n if mask[y1, x1]:\n if acceptableLine:\n accum_idx = int(round((cosinTheta[j] * x1 + sinTheta[j] * y1)) + offset)\n accum[accum_idx, max_theta] -= 1\n mask[y1, x1] = 0\n # exit when the point is the line end\n if x1 == line_end[k, 0] and y1 == line_end[k, 1]:\n break\n px += dx\n py += dy\n\n # add line to the result\n if acceptableLine:\n lines.append(((line_end[0, 0], line_end[0, 1]),\n (line_end[1, 0], line_end[1, 1])))\n if len(lines) > lines_max:\n return lines\n\n return lines", "def find_line_through_point(center, theta, length):\n\n r = length\n cx, cy = center\n\n xo = int(r * math.sin(theta))\n yo = int(r * math.cos(theta))\n\n line_start = cx, cy\n line_end = cx + xo, cy + yo\n\n return line_start, line_end", "def do_train_step(self, additional_ops):\n self._total_step_distance = 0\n initial_search_step = self._sess.run(self.initial_search_step) if \\\n is_tensor(self.initial_search_step) else self.initial_search_step\n\n # does step to position on line, which got inferred in the last call of this function\n loss_at_current_position, line_derivative_current_pos, additional_ops_results, norm_of_step_direction \\\n = self._get_loss_directional_deriv_and_save_gradient(additional_ops)\n\n ###\n # endregion\n final_loss =self.binary_line_search(loss_at_current_position, initial_search_step,0, True)\n\n\n self._sess.run(self._increase_global_step_op)\n\n return loss_at_current_position, final_loss, self._total_step_distance, line_derivative_current_pos", "def get_line(start, end): \n # Setup initial conditions\n x1, y1 = start\n x2, y2 = end\n dx = x2 - x1\n dy = y2 - y1\n \n # Determine how steep the line is\n is_steep = abs(dy) > abs(dx)\n \n # Rotate line\n if is_steep:\n x1, y1 = y1, x1\n x2, y2 = y2, x2\n \n # Swap start and end points if necessary and store swap state\n swapped = False\n if x1 > x2:\n x1, x2 = x2, x1\n y1, y2 = y2, y1\n swapped = True\n \n # Recalculate differentials\n dx = x2 - x1\n dy = y2 - y1\n \n # Calculate error\n error = int(dx / 2.0)\n ystep = 1 if y1 < y2 else -1\n \n # Iterate over bounding box generating points between start and end\n y = y1\n points = []\n for x in range(x1, x2 + 1):\n coord = np.array((y, x)) if is_steep else np.array((x, y))\n points.append(coord)\n error -= abs(dy)\n if error < 0:\n y += ystep\n error += dx\n \n # Reverse the list if the coordinates were swapped\n if swapped:\n points.reverse()\n return np.array(points)", "def line(x1,y1,x2,y2,z_thickness,laser):\r\n\t#Global variables that are used by all algorithms\r\n\tlayers = int(z_thickness/laser[\"z_spacing\"])\r\n\r\n\t#Works out offset when beginning on a new layer\r\n\ttaper = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * laser[\"z_spacing\"]\r\n\ttaper_x,taper_y = offset(x1,y1,x2,y2,taper)\r\n\r\n\t#Works out offset between each parallel scan on the same layer\r\n\tdelta_x,delta_y = offset(x1,y1,x2,y2,laser[\"xy_spacing\"])\r\n\r\n\t#Works out maximum offset from starting line, we don't want to exceed this at any point.\r\n\tmax_taper = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * (z_thickness) * 2\r\n\tmax_delta_x, max_delta_y = offset(x1,y1,x2,y2,max_taper)\r\n\t#max_delta_x, max_delta_y = 2*max_delta_x, 2*max_delta_y\r\n\r\n\t#Loops through each layer, in which we fit as many parallel raster scans as the maximum offset allows\r\n\tcutlist = []\r\n\tfor a in range(layers):\r\n\t\tnew_x1,new_x2,new_y1,new_y2 = x1 + a*taper_x, x2 + a*taper_x, y1 + a*taper_y, y2 + a*taper_y\r\n\t\ti = 0\r\n\t\tcutlist.append([\"z_step\", str(-laser[\"z_spacing\"])])\r\n\t\twhile abs(new_x1-x1) < abs(max_delta_x) or abs(new_y1-y1) < abs(max_delta_y):\r\n\t\t\t#This use of i is to reduce the jump distance between individual scans\r\n\t\t\tif i % 2 == 0:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x2:.6f}\", f\"{new_y2:.6f}\"])\r\n\t\t\telse:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x2:.6f}\", f\"{new_y2:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\tnew_x1,new_x2,new_y1,new_y2 = new_x1 + delta_x, new_x2 + delta_x, new_y1 + delta_y, new_y2 + delta_y\r\n\t\t\ti = i + 1\r\n\t\t#Having completed one layer, the laser moves down to begin the next layer\r\n\t\tmax_delta_x = max_delta_x - taper_x\r\n\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\treturn json.dumps(cutlist)", "def segment_and_find_positions(self):\n initial_image = self.data\n xdim = self.data.shape[0]\n\n ydim = self.data.shape[1]\n downsized_image = transform.resize(\n initial_image,\n (xdim / DOWNSCALING_FACTOR, ydim / DOWNSCALING_FACTOR),\n mode=\"constant\",\n )\n rescaled_image = exposure.rescale_intensity(downsized_image)\n print(\"Starting Canny filtering\")\n g_edges = skimage.feature.canny(\n rescaled_image,\n sigma=self.canny_sigma,\n low_threshold=self.canny_low_threshold,\n )\n print(\"Starting dilation\")\n dilation = morphology.dilation(g_edges, morphology.disk(3))\n print(\"Starting erosion\")\n eroded = morphology.erosion(dilation, morphology.disk(4))\n dilation = morphology.dilation(\n eroded, morphology.diamond(4)\n ) # Dont change to disk\n print(\"Starting to remove small holes\")\n filled = morphology.remove_small_holes(\n dilation, area_threshold=self.remove_small_holes_area_threshold\n )\n print(\"Starting erosion\")\n eroded = morphology.erosion(filled, morphology.diamond(3))\n print(\"Applying filters\")\n filtered_image = eroded\n if self.colony_filters_dict is not None:\n for filter_name in self.colony_filters_dict.keys():\n filtered_image = segmentation_filters.apply_filter(\n filter_name, filtered_image, self.colony_filters_dict[filter_name]\n )\n\n colony_edges = morphology.dilation(feature.canny(filtered_image, 0.01))\n print(\"Starting outlining\")\n outline = downsized_image.copy()\n outline[colony_edges] = 65535\n distance = ndimage.distance_transform_edt(filtered_image)\n smoothed_well = ndimage.gaussian_filter(downsized_image, 0.35)\n outline.copy()\n objs, num_objs = ndimage.label(filtered_image)\n print(\"Applying filters for points\")\n if self.mode == \"A\":\n # point selection: Smoothest point in the center region\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # for each colony,\n # find the maximum distance from the two fold distance map.\n # The edge is at 0% and the center of the colony is at 100%\n d_max = dist_mask.max()\n # Getting the points which is at least 40% away from the edge\n top_percent = dist_mask > (d_max * 0.40)\n colony_mask = smoothed_well * top_percent\n colony_edges = feature.canny(colony_mask, 0.1)\n # applying the second distance transform\n # to find the smoothest point in the correct region\n inner_edges = ndimage.distance_transform_edt(\n ~colony_edges * top_percent\n )\n smooth_point = numpy.where(inner_edges == inner_edges.max())\n smooth_point = (smooth_point[0][0], smooth_point[1][0])\n smooth_point_corrected = (\n smooth_point[0] * DOWNSCALING_FACTOR,\n smooth_point[1] * DOWNSCALING_FACTOR,\n )\n self._point_locations.append(smooth_point_corrected)\n elif self.mode == \"C\":\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # point selection: edge, ridge & center respectively\n self.get_mode_c_points(dist_mask, 0, 0.03)\n self.get_mode_c_points(dist_mask, 0.15, 0.20)\n self.get_mode_c_points(dist_mask, 0.90, 0.99)", "def solve_step(particle_list, step, size):\r\n \r\n # Detect edge-hitting and collision of every particle\r\n for i in range(len(particle_list)):\r\n particle_list[i].compute_refl(step,size)\r\n for j in range(i+1,len(particle_list)):\r\n particle_list[i].compute_coll(particle_list[j],step) \r\n\r\n \r\n # Compute position of every particle \r\n for particle in particle_list:\r\n particle.compute_step(step)", "def forward_pass(self, gamma = 0.5, max_line_search = 50, line_search = \"vanilla\", stopping_method = \"vanilla\", stopping_criterion = 1e-6):\n # Do line search\n if line_search == \"vanilla\":\n self.trajectory, obj_fun_value_current = self._vanilla_line_search(gamma, max_line_search)\n elif line_search == \"feasibility\":\n self.trajectory, obj_fun_value_current = self._feasibility_line_search(gamma, max_line_search)\n elif line_search == None:\n self.trajectory, obj_fun_value_current = self._none_line_search()\n # Check the stopping criterion\n if stopping_method == \"vanilla\":\n is_stop = self._vanilla_stopping_criterion(obj_fun_value_current, stopping_criterion)\n # Do forward pass\n self.C_matrix = self.obj_fun.eval_hessian_obj_fun(self.trajectory)\n self.c_vector = self.obj_fun.eval_grad_obj_fun(self.trajectory)\n self.F_matrix = self.dynamic_model.eval_grad_dynamic_model(self.trajectory)\n # Finally update the objective_function_value_last\n self.obj_fun_value_last = obj_fun_value_current\n return obj_fun_value_current, is_stop", "def reverse_search(view, what, start=0, end=-1, flags=0):\n if end == -1:\n end = view.size()\n end = find_eol(view, view.line(end).a)\n\n last_match = None\n\n lo, hi = start, end\n while True:\n middle = (lo + hi) / 2\n line = view.line(middle)\n middle, eol = find_bol(view, line.a), find_eol(view, line.a)\n\n if search_in_range(view, what, middle, hi, flags):\n lo = middle\n elif search_in_range(view, what, lo, middle - 1, flags):\n hi = middle -1\n\n # Don't search forever the same line.\n if last_match and line.contains(last_match):\n match = find_last_match(view, what, lo, hi, flags=flags)\n return view.rowcol(match.begin())[0] + 1\n\n last_match = sublime.Region(line.begin(), line.end())", "def find_isolated_endpoints(lines):\n \n isolated_endpoints = []\n count = len(lines)\n print(\"Finding isolated end points 2/3\")\n pb = pbar.ProgressBar(count)\n for i, line in enumerate(lines):\n pb += 1\n other_lines = lines[:i] + lines[i+1:]\n for q in [0,-1]:\n endpoint = Point(line.coords[q])\n if any(endpoint.touches(another_line) \n for another_line in other_lines):\n continue\n else:\n isolated_endpoints.append(endpoint)\n del pb\n return isolated_endpoints", "def _execute_ins_line(self, ins_line, ins_lcount):\n cursor_pos = 0 # starting cursor position\n val_dict = {} # storage dict for obsname: obsval pairs in line\n # for ii,ins in enumerate(ins_line):\n ii = 0 # counter over instruction entries\n all_markers = True\n line_seps = set([\",\", \" \", \"\\t\"])\n n_ins = len(ins_line) # number of instructions on line\n maxsearch = 500 # maximum number of characters to search when slicing line\n while True:\n if ii >= n_ins:\n break\n ins = ins_line[ii] # extract instruction\n i1 = ins[:1] # first char in instruction\n # primary marker\n if ii == 0 and i1 == self._marker:\n # if first and instruction starts with primary marker\n # search for presence of primary marker e.g. ~start~\n mstr = ins.replace(self._marker, \"\")\n while True:\n # loop over lines until primary marker is found\n line = self._readline_output() # read line from output\n if line is None:\n self.throw_out_error(\n \"EOF when trying to find primary marker '{0}' from \"\n \"instruction file line {1}\".format(mstr, ins_lcount)\n )\n if mstr in line: # when marker is found break and update\n # cursor position in current line\n break\n # copy a version of line commas replaced\n # (to support comma sep strings)\n rline = line.replace(\",\", \" \").replace(\"\\t\",\"\")\n\n cursor_pos = line.index(mstr) + len(mstr)\n\n # line advance\n elif i1 == \"l\": # if start of instruction is line advance\n try:\n nlines = int(ins[1:]) # try and get advance number\n except Exception as e:\n self.throw_ins_error(\n \"casting line advance to int for \"\n \"instruction '{0}'\".format(ins),\n ins_lcount,\n )\n for i in range(nlines):\n line = self._readline_output()\n if line is None:\n self.throw_out_error(\n \"EOF when trying to read {0} lines for line \"\n \"advance instruction '{1}', from instruction \"\n \"file line number {2}\".format(nlines, ins, ins_lcount)\n )\n # copy a version of line commas replaced\n # (to support comma sep strings)\n rline = line.replace(\",\", \" \")\n elif ins == \"w\": # whole string comparison\n raw = rline[cursor_pos : cursor_pos + maxsearch].split(\n None, 2\n ) # TODO: maybe slow for long strings -- hopefuly maxsearch helps\n if line[cursor_pos] in line_seps:\n raw.insert(0, \"\")\n if len(raw) == 1:\n self.throw_out_error(\n \"no whitespaces found on output line {0} past {1}\".format(\n line, cursor_pos\n )\n )\n # step over current value\n cursor_pos = rline.replace(\"\\t\",\" \").find(\" \", cursor_pos)\n # now find position of next entry\n cursor_pos = rline.find(raw[1], cursor_pos)\n # raw[1]\n # )\n\n elif i1 == \"!\": # indicates obs instruction folows\n oname = ins.replace(\"!\", \"\")\n # look a head for a second/closing marker\n if ii < n_ins - 1 and ins_line[ii + 1] == self._marker:\n # if penultimate instruction and last instruction is\n # primary marker, look for that marker in line\n m = ins_line[ii + 1].replace(self._marker, \"\")\n es = line.find(m, cursor_pos)\n if es == -1: # m not in rest of line\n self.throw_out_error(\n \"secondary marker '{0}' not found from cursor_pos {1}\".format(\n m, cursor_pos\n )\n )\n # read to closing marker\n val_str = line[cursor_pos:es]\n else:\n # find next space in (r)line -- signifies end of entry\n es = rline.find(\" \", cursor_pos)\n if es == -1 or es == cursor_pos:\n # if no space or current position is space\n # use old fashioned split to get value\n # -- this will happen if there are leading blanks before\n # vals in output file (e.g. formatted)\n val_str = rline[cursor_pos : cursor_pos + maxsearch].split(\n None, 1\n )[0]\n else:\n # read val (constrained slice is faster for big strings)\n val_str = rline[cursor_pos:es]\n try:\n val = float(val_str)\n except Exception as e:\n if oname != \"dum\":\n self.throw_out_error(\n \"casting string '{0}' to float for instruction '{1}'\".format(\n val_str, ins\n )\n )\n\n if oname != \"dum\":\n val_dict[oname] = val\n ipos = line.find(val_str.strip(), cursor_pos)\n # val_len = len(val_str)\n cursor_pos = ipos + len(val_str) # update cursor\n all_markers = False\n\n elif i1 == self._marker:\n m = ins.replace(self._marker, \"\") # extract just primary marker\n # find position of primary marker in line\n es = line.find(m, cursor_pos)\n if es == -1: # m not in rest of line\n if all_markers:\n ii = 0\n continue\n else:\n self.throw_out_error(\n \"secondary marker '{0}' not found from \"\n \"cursor_pos {1}\".format(m, cursor_pos)\n )\n cursor_pos = es + len(m)\n\n elif i1 == \"(\":\n if \")\" not in ins:\n self.throw_ins_error(\"unmatched ')'\", self._instruction_lcount)\n oname = ins[1:].split(\")\", 1)[0].lower()\n raw = ins.split(\")\")[1]\n if \":\" not in raw:\n self.throw_ins_error(\n \"couldnt find ':' in semi-fixed instruction: '{0}'\".format(ins),\n lcount=self._instruction_lcount,\n )\n raw = raw.split(\":\")\n try:\n s_idx = int(raw[0]) - 1\n except Exception as e:\n self.throw_ins_error(\n \"error converting '{0}' to integer in semi-fixed instruction: '{1}'\".format(\n raw[0], ins\n ),\n lcount=self._instruction_lcount,\n )\n try:\n e_idx = int(raw[1])\n except Exception as e:\n self.throw_ins_error(\n \"error converting '{0}' to integer in semi-fixed instruction: '{1}'\".format(\n raw[1], ins\n ),\n lcount=self._instruction_lcount,\n )\n\n if len(line) < e_idx:\n self.throw_out_error(\n \"output line only {0} chars long, semi-fixed ending col {1}\".format(\n len(line), e_idx\n )\n )\n\n if cursor_pos > e_idx:\n self.throw_out_error(\n \"cursor at {0} has already read past semi-fixed ending col {1}\".format(\n cursor_pos, e_idx\n )\n )\n\n ss_idx = max(cursor_pos, s_idx)\n raw = line[ss_idx : ss_idx + maxsearch].split(\n None, 1\n ) # slpitting only 1 might be margin faster\n rs_idx = line.index(raw[0])\n if rs_idx > e_idx:\n self.throw_out_error(\n \"no non-whitespace chars found in semi-fixed observation {0}\".format(\n ins\n )\n )\n re_idx = rs_idx + len(raw[0])\n val_str = line[rs_idx:re_idx]\n try:\n val = float(val_str)\n except Exception as e:\n if oname != \"dum\":\n self.throw_out_error(\n \"casting string '{0}' to float for instruction '{1}'\".format(\n val_str, ins\n )\n )\n\n if oname != \"dum\":\n val_dict[oname] = val\n cursor_pos = re_idx\n\n elif i1 == \"[\":\n if \"]\" not in ins:\n self.throw_ins_error(\"unmatched ']'\", self._instruction_lcount)\n oname = ins[1:].split(\"]\", 1)[0].lower()\n raw = ins.split(\"]\")[1]\n if \":\" not in raw:\n self.throw_ins_error(\n \"couldnt find ':' in fixed instruction: '{0}'\".format(ins),\n lcount=self._instruction_lcount,\n )\n raw = raw.split(\":\")\n try:\n s_idx = int(raw[0]) - 1\n except Exception as e:\n self.throw_ins_error(\n \"error converting '{0}' to integer in fixed instruction: '{1}'\".format(\n raw[0], ins\n ),\n lcount=self._instruction_lcount,\n )\n try:\n e_idx = int(raw[1])\n except Exception as e:\n self.throw_ins_error(\n \"error converting '{0}' to integer in fixed instruction: '{1}'\".format(\n raw[1], ins\n ),\n lcount=self._instruction_lcount,\n )\n\n if len(line) < e_idx:\n self.throw_out_error(\n \"output line only {0} chars long, fixed ending col {1}\".format(\n len(line), e_idx\n )\n )\n\n if cursor_pos > s_idx:\n self.throw_out_error(\n \"cursor at {0} has already read past fixed starting col {1}\".format(\n cursor_pos, e_idx\n )\n )\n\n val_str = line[s_idx:e_idx]\n try:\n val = float(val_str)\n except Exception as e:\n if oname != \"dum\":\n self.throw_out_error(\n \"casting string '{0}' to float for instruction '{1}'\".format(\n val_str, ins\n )\n )\n\n if oname != \"dum\":\n val_dict[oname] = val\n cursor_pos = e_idx\n\n else:\n self.throw_out_error(\n \"unrecognized instruction '{0}' on ins file line {1}\".format(\n ins, ins_lcount\n )\n )\n ii += 1\n return val_dict", "def compute_refl(self, step, size):\r\n r, v, x = self.radius, self.velocity, self.position\r\n projx = step*abs(np.dot(v,np.array([1.,0.])))\r\n projy = step*abs(np.dot(v,np.array([0.,1.])))\r\n if abs(x[0])-r < projx or abs(size-x[0])-r < projx:\r\n self.velocity[0] *= -1\r\n if abs(x[1])-r < projy or abs(size-x[1])-r < projy:\r\n self.velocity[1] *= -1.", "def begining_of_line():\r\n set_point(point().begining_of_line())", "def findInLine(self) -> str:\n raise NotImplementedError", "def draw_lines_slow(orbit_pos, factor):", "def snap_to_line(points, lines, tolerance=100, sindex=None):\n\n # get list of columns to copy from flowlines\n line_columns = lines.columns[lines.columns != \"geometry\"].to_list()\n\n # generate spatial index if it is missing\n if sindex is None:\n sindex = lines.sindex\n # Note: the spatial index is ALWAYS based on the integer index of the\n # geometries and NOT their index\n\n # generate a window around each point\n window = points.bounds + [-tolerance, -tolerance, tolerance, tolerance]\n # get a list of the line ordinal line indexes (integer index, not actual index) for each window\n hits = window.apply(lambda row: list(sindex.intersection(row)), axis=1)\n\n # transpose from a list of hits to one entry per hit\n # this implicitly drops any that did not get hits\n tmp = pd.DataFrame(\n {\n # index of points table\n \"pt_idx\": np.repeat(hits.index, hits.apply(len)),\n # ordinal position of line - access via iloc\n \"line_i\": np.concatenate(hits.values),\n }\n )\n\n # reset the index on lines to get ordinal position, and join to lines and points\n tmp = tmp.join(lines.reset_index(drop=True), on=\"line_i\").join(\n points.geometry.rename(\"point\"), on=\"pt_idx\"\n )\n tmp = gp.GeoDataFrame(tmp, geometry=\"geometry\", crs=points.crs)\n tmp[\"snap_dist\"] = tmp.geometry.distance(gp.GeoSeries(tmp.point))\n\n # drop any that are beyond tolerance and sort by distance\n tmp = tmp.loc[tmp.snap_dist <= tolerance].sort_values(by=[\"pt_idx\", \"snap_dist\"])\n\n # find the nearest line for every point, and count number of lines that are within tolerance\n by_pt = tmp.groupby(\"pt_idx\")\n closest = gp.GeoDataFrame(\n by_pt.first().join(by_pt.size().rename(\"nearby\")), geometry=\"geometry\"\n )\n\n # now snap to the line\n # project() calculates the distance on the line closest to the point\n # interpolate() generates the point actually on the line at that point\n snapped_pt = closest.interpolate(\n closest.geometry.project(gp.GeoSeries(closest.point))\n )\n snapped = gp.GeoDataFrame(\n closest[line_columns + [\"snap_dist\", \"nearby\"]], geometry=snapped_pt\n )\n\n # NOTE: this drops any points that didn't get snapped\n return points.drop(columns=[\"geometry\"]).join(snapped).dropna(subset=[\"geometry\"])", "def find_startpos(self, searched_object:str):\r\n fak = 1 #< When the figure needs to be pushed to the right -> fak = 1 else fak = 0\r\n # The main figures spwan position beginns at index 14 and ends at size(self.look_up_table) - 9\r\n start_index = 14\r\n y = start_index \r\n end_index = -9\r\n for x in self.look_up_table[start_index : end_index]:\r\n # When the serached object is in the row then get the index of it\r\n if searched_object in x:\r\n x = x.index(searched_object)\r\n break\r\n y += 1\r\n # Pac-Man does not need to push to the right\r\n if searched_object == 'PACMAN':\r\n fak = 0\r\n return x * self.grid_size + fak * self.grid_size // 2, y * self.grid_size", "def vlinecomp(self):\n m_h, c_h = self.fitline(0,2) # Computes the equation for a line joining the points on the outside of the gear on opposites sides of the edm cut\n\n m_v_avg = self.average_grad() # Computes the average gradient of the constructed vertical line\n\n m_v_avg, c_v = self.line_through_point(m_v_avg,4) # Equation of line with average gradient though crack start point\n\n x_intersect,y_intersect = self.intersect_point(m_h, c_h, m_v_avg, c_v)\n\n coord_top = [x_intersect,y_intersect]\n coord_bot = [self.points[4, 0], self.points[4, 1]]\n\n distance = self.distance(coord_bot,coord_top)\n\n return coord_top, coord_bot, distance", "def update(self, lines):\n \n definite = []\n maybe = None\n maybe_slope = np.inf\n \n # sort given lines into \"match\" and \"not\"\n for line in lines:\n # calculate slope (as angle):\n slope = self.slope(*line)\n\n # filter out vertical / horizontal slopes\n if not self.filter_slope(slope):\n continue\n \n # check if it's a good line\n if self.confidence_interval[0] <= slope <= self.confidence_interval[1]:\n self.slopes.append(slope)\n definite.append(line)\n else:\n # keep track of the closest \"bad\" line\n # check if our error is worse than next best\n if not self.rho or (abs(slope-self.rho) > abs(maybe_slope-self.rho)):\n continue\n\n # construct maybe interval:\n maybe_interval = [\n self.rho + self.maybe_degrees, \n self.rho - self.maybe_degrees]\n maybe_interval.sort()\n \n # check if this is within our allowable error (very wide range)\n if maybe_interval[0] <= slope <= maybe_interval[1]:\n maybe = line\n maybe_slope = slope\n \n # if we don't have any matches use the next best guess\n if len(definite) == 0:\n # if we didn't get any maybes just return nothing\n if maybe is None:\n return \n definite.append(maybe)\n self.slopes.append(maybe_slope)\n \n # add matches to our list\n self.matches += definite\n self.matches_per_frame.append(len(definite))\n \n # update with matches from the last N frames\n matches_to_update = sum(self.matches_per_frame[-self.n_lanes:])\n self.calc_confidence(matches_to_update)", "def __quickSearchMarkOccurrences(self, txt):\n aw = self.activeWindow()\n \n lineFrom = 0\n indexFrom = 0\n lineTo = -1\n indexTo = -1\n \n aw.clearSearchIndicators()\n ok = aw.findFirstTarget(txt, False, False, False,\n lineFrom, indexFrom, lineTo, indexTo)\n while ok:\n tgtPos, tgtLen = aw.getFoundTarget()\n aw.setSearchIndicator(tgtPos, tgtLen)\n ok = aw.findNextTarget()", "def go_to(self, value=None):\n self.go_to_this_line = self.line_number.get()\n self.my_text.mark_set(INSERT, str(float(self.go_to_this_line)))\n self.current_area()\n self.my_text.see(INSERT)\n self.searcher.destroy()", "def _update_linetab(self, offset):\n t = self.input\n if offset < 0 or offset > len(t):\n raise IndexError(\"position %d not in 0..%d\" % (offset, len(t)))\n\n # self.__linepos[x] caches the offset of EOL for line x + 1.\n # A virtual line is imputed to exist prior to the input ending at\n # offset -1.\n # Lines are indexed internally from zero.\n lpc = self.__linepos\n if not lpc:\n lpc.append(-1)\n\n # Include all lines prior to offset.\n pos = lpc[-1] + 1\n while pos < offset:\n if t[pos] == '\\n':\n lpc.append(pos)\n pos += 1\n\n # Include the line containing offset, if possible.\n while pos < len(t) and lpc[-1] < offset:\n if t[pos] == '\\n':\n lpc.append(pos)\n pos += 1", "def calc_line(start, target, map):\n\t\"\"\" Returns the real world point at the farthest range \"\"\"\n\tdx = abs(target[0] - start[0])\n\tdy = abs(target[1] - start[1])\n\txi = start[0]\n\tyi = start[1]\n\tn = 1 + dx + dy\n\tx_dir = np.sign(target[0] - start[0])\n\ty_dir = np.sign(target[1] - start[1])\n\terror = dx - dy;\n\tdx *= 2\n\tdy *= 2\n\n\tfor i in xrange(n):\n\t\tif map.grid[xi,yi] is not map.empty and map.grid[xi,yi] > 0:\n\t\t\treturn xi, yi\n\n\t\tif error > 0:\n\t\t\txi += x_dir\n\t\t\terror -= dy\n\t\telse:\n\t\t\tyi += y_dir\n\t\t\terror += dx\n\treturn target", "def move_to_line_end(self) -> None:\n self.index = self.buffer.get_line_end(self.index)", "def findLines(self, sequence): \n size = self.size\n lines_in_seq = {'Vertical': {}, 'Horizontal': {}, 'D-pos': {}, 'D-neg': {} }\n\n ###############\n # Evaluate and append a line in a particular direction at a particular coordinate \n def tallyLine(direction, coordinate, move):\n tic = 0\n tac = move[1]\n current_line = lines_in_seq[direction].get(coordinate, 'Empty')\n\n if current_line is not None and current_line is 'Empty':\n lines_in_seq[direction][coordinate] = [tac, move[0]]\n\n elif current_line is not None and current_line[tic] == tac:\n lines_in_seq[direction][coordinate].append(move[0]) if move[0] not in lines_in_seq[direction][coordinate] else None\n\n else:\n lines_in_seq[direction][coordinate] = None\n \n ###############\n # Check if each point in the game sequence belongs to a line or not\n for move in sequence:\n x , y = self.getCoordinates(move[0])\n\n # Tally of horizontal and vertical lines\n for direction in ['Horizontal', 'Vertical']:\n coordinate = {'Vertical': x, 'Horizontal': y}[direction]\n tallyLine(direction, coordinate, move)\n \n # Tally of the two possible diagonal lines\n if x == y: \n tallyLine('D-pos', 0, move)\n if x + y == size - 1:\n tallyLine('D-neg', 0, move)\n\n return lines_in_seq", "def search_the_limit_outwards(p, increment, flow, factor, printIt):\n if not(p.captured):\n exception_msg = 'Sorry, the particle isn\\'t captured. We cannot ' \\\n + 'proceed to the computation :('\n raise Exception(exception_msg)\n\n x0, y0 = p.pos0\n\n new_p = deepcopy(p)\n\n while new_p.captured:\n print('y0 = %e. Still inside the capture domain...' % new_p.pos0[1])\n\n pCopy = deepcopy(new_p)\n\n y0 -= increment\n new_pos0 = np.array([x0, y0])\n\n new_p = Particle(p.diameter, p.density, p.birth, p.lifetime,\n new_pos0, None)\n\n new_p.compute_trajectory(flow, factor, printIt)\n\n return pCopy, new_p", "def bend_towards(line, where, to):\n \n if not line.contains(where) and not line.touches(where):\n raise ValueError('line does not contain the point where.')\n \n coords = line.coords[:]\n # easy case: where is (within numeric precision) a vertex of line\n for k, vertex in enumerate(coords):\n if where.almost_equals(Point(vertex)):\n # move coordinates of the vertex to destination\n coords[k] = to.coords[0]\n return LineString(coords)\n \n # hard case: where lies between vertices of line, so\n # find nearest vertex and move that one to point to\n _, min_k = min((where.distance(Point(vertex)), k) \n for k, vertex in enumerate(coords))\n coords[min_k] = to.coords[0]\n return LineString(coords)", "def line(self, x0, y0, x1, y1, color):\n steep = abs(y1 - y0) > abs(x1 - x0)\n if steep:\n x0, y0 = y0, x0\n x1, y1 = y1, x1\n if x0 > x1:\n x0, x1 = x1, x0\n y0, y1 = y1, y0\n dx = x1 - x0\n dy = abs(y1 - y0)\n err = dx // 2\n ystep = 1 if y0 < y1 else -1\n while x0 <= x1:\n if steep:\n self.pixel(y0, x0, color)\n else:\n self.pixel(x0, y0, color)\n err -= dy\n if err < 0:\n y0 += ystep\n err += dx\n x0 += 1", "def seek_line(self, pattern):\n line = next(self.f)\n while not line.startswith(pattern):\n line = next(self.f)\n return line", "def read_centerline(s, x, y, cur, theta):\n if MODE == 2:\n print('MODE 2: READ YOUR OWN RIVER CENTERLINE FROM FILE is selected')\n try:\n centerlinexy = np.loadtxt(FNAME)\n except IOError:\n print('\\'' + FNAME + '\\' not found')\n print('Please place \\'' + FNAME + '\\' in working directory\\n')\n job_done()\n sys.exit(1)\n else:\n return s, x, y, cur, theta\n x = centerlinexy[:, 0]\n y = centerlinexy[:, 1]\n if FLIPSTRM:\n x = x[::-1]\n y = y[::-1]\n# if np.mean(np.abs(x)) > 1e6 or np.mean(np.abs(y)) > 1e6:\n# print('!!! centerline X/Y too large, forced to shift toward (0, 0) !!!')\n# print('shifting vector: ('+str(-np.mean(x))+', '+str(-np.mean(y))+')')\n# x -= np.mean(x)\n# y -= np.mean(y)\n length = x.size\n s = np.zeros(length)\n for j in range(1, x.size):\n s[j] = s[j-1] + np.sqrt((x[j]-x[j-1])**2 + (y[j]-y[j-1])**2)\n mean1 = np.around(np.mean(np.diff(s)), decimals=2)\n median1 = np.around(np.median(np.diff(s)), decimals=2)\n mode1 = np.around(mode(np.diff(s))[0][0], decimals=2)\n print('+> Resampling centerline & Calculating curvature...', end='')\n s, x, y, cur, theta = resample_centerline(s, x, y)\n print(' [done]')\n mean2 = np.around(np.mean(np.diff(s)), decimals=2)\n median2 = np.around(np.median(np.diff(s)), decimals=2)\n mode2 = np.around(mode(np.diff(s))[0][0], decimals=2)\n print_resamp_table(mean1, median1, mode1, mean2, median2, mode2)\n print_para_table(s)\n return s, x, y, cur, theta", "def findWithinHorizon(self) -> str:\n raise NotImplementedError", "def findChar(self, position, spaceLength ):\n leer=0 ## numeator of empty column\n Queue=[] ##this will help in serching for neighbours of pixels\n PiksList=[] ##list of balck piksels, of with consist the charakter\n length, high = self.getSize()\n \n while (position < length and self.vLineHistogram(position)==0): #serching for a first not empty line, for given position\n position+=1\n leer+=1\n if position == length: ## check if it is Space or it is End of line\n return position, \"Enter\", 0\n elif leer>=spaceLength:\n return position, \"Space\", 0\n else:\n for i in range(0,high): ##extracting all black pixels from this line\n if self.getPixel(position, i)<128:\n Queue.append((position, i))\n PiksList.append((position, i))\n\n while len(Queue)>0:\n Piksel=Queue.pop(0) ##geting firs element from Queue\n neighbourhood=[(Piksel[0]-1, Piksel[1]+1),(Piksel[0]-1, Piksel[1]),(Piksel[0]-1, Piksel[1]-1),(Piksel[0], Piksel[1]+1),(Piksel[0], Piksel[1]-1),(Piksel[0]+1, Piksel[1]+1),(Piksel[0]+1, Piksel[1]),(Piksel[0]+1, Piksel[1]-1)]\n ##to co wyzej to lista współrzędnych sąsiadów Piksela\n\n for neighbour in neighbourhood: ##cheking neighbourhood of each pixel\n if not(neighbour in PiksList) and (neighbour[0] in range(0,length)) and (neighbour[1] in range(0,high)) and self.getPixel(neighbour[0],neighbour[1])==0:\n Queue.append(neighbour)\n PiksList.append(neighbour)\n \n PiksList.sort() ##sorts list with number of column\n\n \n PiksList=self.addHigherPiks(PiksList) ##adds all piksel over finden pixels\n PiksList.sort()\n position1,High1=PiksList[0]\n position2,High2=PiksList[len(PiksList)-1] ## geting number of smalest and biggest column in group\n charLength=position2-position1\n if len(PiksList)>5: ##checkin if there are more then 5 piksels in group to eliminate case, when there are single pixels not eliminated by initial fomating\n if charLength<high: ##check if the length of finden group of pixels isn't bigger then length of tile\n newPosition= position1+(charLength/2) ##new position in the center of finden char to eliminate case, when one char is over the second\n Char=CharFrame(high,high) ##create new CrarFrame object\n \n for el in PiksList: ##making all pixels in PiksList black in ChatFrame object and white in self(LineFrame object)\n Char.putPixel(el[0]-position1,el[1])\n self.makeWhite(el[0],el[1])\n \n Char.reScale(30,30) #scaling CharFrame to the ening size\n \n return newPosition, Char, charLength/2\n\n else: ##length of goup of pixels is too big\n PiksList, Char = reconChar(PiksList,high) ## finding where to divide group of pixels\n for Piks in PiksList:\n self.makeWhite(Piks[0],Piks[1])\n position1,High1=PiksList[0]\n position2,High2=PiksList[len(PiksList)-1] ## geting number of smalest and biggest column in group\n charLength=position2-position1\n newPosition= position1+(charLength/2) ##new position in the center of finden char to eliminate case, when one char is over the second\n return newPosition, Char, charLength/2\n else: ##if there is less then 5 pixels in group\n for el in PiksList: ##making all pixels in PiksList white in self(LineFrame object)\n self.makeWhite(el[0],el[1])\n newPosition= position1+(charLength/2)\n return newPosition, \"None\", charLength/2", "def reduce_lines(input_horizontal, input_vertical, min_distance):\n\n seen_vertical = set()\n seen_horizontal = set()\n output_vertical = []\n output_horizontal = []\n\n # vertical\n for index, (x1,y1,x2,y2) in enumerate(input_vertical):\n if index in seen_vertical:\n continue\n x_values = [x1]\n for other_index, (x1_b,y1_b,x2_b,y2_b) in enumerate(input_vertical):\n if other_index in seen_vertical:\n continue\n if (abs(x1 - x1_b) < min_distance):\n # if the end is further to the top, choose this end\n if (y2_b < y2):\n y2 = y2_b\n # if the start if further to the bottom, choose it\n if (y1_b > y1):\n y1 = y1_b\n\n x_values.append(x1_b)\n seen_vertical.add(other_index)\n\n # taking the average x value for all the lines to get the middle\n x = int(np.mean(x_values))\n output_vertical.append((x,y1,x,y2))\n\n #horizontal\n for index, (x1,y1,x2,y2) in enumerate(input_horizontal):\n if index in seen_horizontal:\n continue\n y_values = [y1, y2]\n for other_index, (x1_b,y1_b,x2_b,y2_b) in enumerate(input_horizontal):\n if other_index in seen_horizontal:\n continue\n if (abs(y1 - y1_b) < min_distance):\n # if the start if further to the left, choose this point\n if (x1_b < x1):\n x1 = x1_b\n # if the end is further to the right, choose it\n if (x2_b > x2):\n x2 = x2_b\n\n y_values += [y1_b, y2_b]\n seen_horizontal.add(other_index)\n\n # taking the average y value for all the lines to get the middle\n y = int(np.mean(y_values))\n output_horizontal.append((x1,y,x2,y))\n\n return (output_vertical, output_horizontal)", "def get_line_error(im): \n ### Crop the picture\n height = len(im)\n width = len(im[0])\n im = im[height/CROP_RATIO:-height/CROP_RATIO, width/CROP_RATIO:-width/CROP_RATIO]\n\n ### thresholding. susceptible to glare, solve with masking tape?\n thresh = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n success, thresh = cv2.threshold(thresh, BW_THRESHOLD, 255, cv2.THRESH_BINARY)\n if not success:\n print \"Could not threshold frame, skipping.\"\n return None\n\n ### edge detection. constants here are magic\n canny = cv2.Canny(thresh, 180, 220, apertureSize = 3)\n\n ### contour detection\n contours, _ = cv2.findContours(canny,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\n\n if len(contours) < 1:\n return None\n\n sorted_contours = sorted(contours, key=lambda x:cv2.arcLength(x,False), reverse=True)\n \n ## JUST FOR TESTING\n # longest contours\n if DEBUG_MODE:\n cv2.drawContours(im,sorted_contours[0:2],-1,(0,255,0),3) # draw longest contour \n cv2.imshow('lines',im)\n k = cv2.waitKey(5)\n if k == 27: \n cv2.destroyAllWindows()\n return None\n\n ### Find x coordinates of endpoints\n if len(sorted_contours) == 0:\n print \"No contours found, skipping\"\n return None\n\n # get points for the longest contours \n mask = numpy.zeros(im.shape,numpy.uint8)\n cv2.drawContours(mask,[sorted_contours[0]],0,255,-1)\n pixelpoints = numpy.transpose(numpy.nonzero(mask)) \n xTop_one = pixelpoints[0][1] # IMPORTANT: pixelpoints is returned in row, column format\n xBottom_one = pixelpoints[-1][1] ## IMPORTANT TODO: assumes points are returned sorted, need to verify\n\n if len(sorted_contours) > 1: # we have more than one contour\n mask = numpy.zeros(im.shape,numpy.uint8)\n cv2.drawContours(mask,[sorted_contours[1]],0,255,-1)\n pixelpoints = numpy.transpose(numpy.nonzero(mask)) \n xTop_two = pixelpoints[0][1] # IMPORTANT: pixelpoints is returned in row, column format\n xBottom_two = pixelpoints[-1][1] ## IMPORTANT TODO: assumes points are returned sorted, need to verify\n\n # average two longest contours if available \n if len(sorted_contours) == 1:\n xTop = xTop_one\n xBottom = xBottom_one\n else:\n xTop = (xTop_one + xTop_two) / 2\n xBottom = (xBottom_one + xBottom_two) / 2\n\n ### Calculate offset to return\n ### (XTop - XBottom) + (XTop - CENTER)\n ### CENTER = TRUE_CENTER - CENTER_OFFSET\n MOST_POSITIVE_VAL = 3*len(im[0])/2 + CENTER_OFFSET\n MOST_NEGATIVE_VAL = -3*len(im[0])/2 + CENTER_OFFSET\n adjusted_midpoint = len(im[0])/2 - CENTER_OFFSET\n\n #unscaled_error = xTop - xBottom + 2*(xTop - adjusted_midpoint)\n unscaled_error = xTop - adjusted_midpoint\n if unscaled_error == 0:\n return 0.0\n\n if unscaled_error > 0:\n scaled_error = float(unscaled_error)/MOST_POSITIVE_VAL\n if abs(scaled_error) > 1.0:\n print \"Warning: scaled_error value greater than 1.0: \" + scaled_error\n return min(scaled_error, 1.0)\n else:\n scaled_error = float(unscaled_error)/abs(MOST_NEGATIVE_VAL)\n if abs(scaled_error) > 1.0:\n print \"Warning: scaled_error value less than -1.0: \" + scaled_error \n return max(scaled_error, -1.0)", "def end_of_line():\r\n set_point(point().end_of_line())", "def move_marker(self, key, command):\n if self.check_array_exit(key=key, command=command):\n if key == 'x' and \\\n self.inspection['y_array'][self.data['x']\n ['current']] - 1 != \\\n self.data['y']['limit']:\n Me.info_message('VF size change caused by depth change.')\n # Update VF array limits\n self.data['y']['limit'] = int(self.inspection['y_array']\n [self.data['x']['current']] - 1)\n # If the number of inspection points across drops\n # below 5 (to 3) move to center, otherwise update\n # to closest location and reset the limit\n if self.inspection['y_array'][self.data['x']['current']] < 5:\n self.data['y']['current'] = 1\n else:\n self.data['y']['current'] = \\\n int(float(self.data['y']['current']) /\n float(self.data['y']['limit']) *\n (self.inspection['y_array']\n [self.data['x']['current']]))\n\n self.update_current_pose(string='marker')\n self.update_markers()\n self.adjust_camera()\n success = True\n else:\n success = False\n return success", "def select_cells_by_cut(self,line,start=0,side='left',delta=1.0):\n marks=np.zeros(self.Ncells(),np.bool8)\n\n def test_edge(j):\n cells=self.edges['cells'][j]\n if cells[0]<0 or cells[1]<0:\n return True # don't traverse\n seg=geometry.LineString( cc[cells] )\n return line.intersects(seg)\n\n stack=[start]\n count=0\n\n start_on_left=None\n\n cc=self.cells_center()\n e2c=self.edge_to_cells()\n\n while stack:\n count+=1\n if count%5000==0:\n self.log.info(\"checked on %d/%d edges\"%(count,self.Nedges()))\n\n c=stack.pop()\n\n marks[c]=True\n for j in self.cell_to_edges(c):\n if test_edge(j):\n if start_on_left is None:\n # figure out the orientation\n cells=e2c[j]\n if cells[0]>=0 and cells[1]>=0:\n if cells[0]==c:\n seg=geometry.LineString( cc[cells] )\n else:\n seg=geometry.LineString( cc[cells[::-1]] )\n orientation=orient_intersection(seg,line)\n if orientation>0:\n start_on_left=True\n else:\n start_on_left=False\n continue\n for nbr in self.edges['cells'][j]:\n # redundant but cheap check on nbr sign.\n if nbr>=0 and marks[nbr]==0:\n stack.append(nbr)\n\n # make sure we eventually had a real edge crossing\n assert start_on_left is not None\n\n # would like to know which side of the cut we are on...\n # and invert marks if the request was for the other side\n if (side=='left') != (start_on_left==True):\n marks=~marks\n\n return marks", "def test_split_line_on_loop(self):\n tol = 5e-4\n line = gnx.LineString([(10.8332501, 43.6994487),\n (10.8333313, 43.6995065),\n (10.8331066, 43.6996864),\n (10.8327284, 43.6994203),\n (10.8332501, 43.6994487)])\n for distance in [0.000925456010099422, 0.0, 5.0, 9.967085832788407e-05, 0.0008499479239845902]:\n split_result = gnx.split_line(line, distance)\n self.assertIsNot(split_result, None)\n gnx_tu.assert_coordinates_almost_equals(split_result[0].coords[-1],\n split_result[1].coords[0])\n if distance < line.length:\n self.assertAlmostEquals(split_result[0].length, distance, delta=tol)\n self.assertAlmostEquals(split_result[1].length, line.length - distance, delta=tol)\n else:\n self.assertAlmostEquals(split_result[0].length, line.length, delta=tol)\n self.assertAlmostEquals(split_result[1].length, 0.0, delta=tol)", "def estimate_lines(self):\r\n logger.debug(\"estimate Lines\")\r\n self.filesize = Path(self.fileName).stat().st_size\r\n text = self.textwnd.toPlainText()\r\n linetext = text.split(\"\\n\")[1] + \"\\\\r\\\\n\"\r\n self.linesize = len(linetext.encode('utf-8'))\r\n self.estimated_lines = self.filesize // self.linesize\r\n logger.debug(\"Estimate Lines: {}\".format(self.estimated_lines))\r\n self.statusBar.showMessage(f\"Estimated lines: {self.estimated_lines}\")", "def find_path(self):\n j, i = utl.pixel_coords_to_pos(\n self.xcor(), self.ycor(), self.maze_size)\n level_cpy = copy.deepcopy(self.level.maze)\n self.backtrack(level_cpy, i, j, [])", "def next(self):\n if self.is_done:\n return\n\n self.idx += 1\n\n if self.sweep_line is not None:\n self.remaining_events = self.remaining_events[1:]\n\n if len(self.remaining_events) == 0: # End of everything\n if self.sweep_line is None:\n self.is_done = True\n return\n else:\n self.sweep_line = None\n return\n\n self.sweep_line, self.a_line = self.remaining_events[0]\n\n if self.sweep_line == self.lines[self.a_line][1]: # left\n current_n = self.lines[self.a_line][0]\n current_r = self.lines[self.a_line][2]\n\n for i in self.active_line_segments:\n n, _, r = self.lines[i]\n if r < current_r:\n self.overlap_graph.add_edge(n, current_n)\n # self.interval_graph.add_edge(n, current_n)\n\n self.active_line_segments.append(self.a_line)\n\n elif self.sweep_line == self.lines[self.a_line][2]: # right\n self.active_line_segments.remove(self.a_line)", "def next_line(self, context, line):", "def Apply_Line_Filter( self ):\r\n self.system.Filter_By_Protein_Distance( self.filter_distance )", "def highlight_next_match(self):\n self.text.tag_remove('found.focus', '1.0',\n tk.END) # remove existing tag\n try:\n start, end = self.text.tag_nextrange('found', self.start, tk.END)\n self.text.tag_add('found.focus', start, end)\n self.text.mark_set(tk.INSERT, start)\n self.text.see(start)\n self.start = end\n except ValueError:\n if self.start != '1.0':\n self.start = '1.0'\n self.text.see('1.0')\n self.highlight_next_match()", "def highlight_next_match(self):\n self.text.tag_remove('found.focus', '1.0',\n tk.END) # remove existing tag\n try:\n start, end = self.text.tag_nextrange('found', self.start, tk.END)\n self.text.tag_add('found.focus', start, end)\n self.text.mark_set(tk.INSERT, start)\n self.text.see(start)\n self.start = end\n except ValueError:\n if self.start != '1.0':\n self.start = '1.0'\n self.text.see('1.0')\n self.highlight_next_match()", "def IntersectWithLine(self, , , p_float_6, p_float_7, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def go_to_line(self, value=None):\n self.searcher = Toplevel()\n self.searcher.wm_title(\"Go To Line\")\n self.line_number = Entry(self.searcher)\n on_clicker = Button(self.searcher, command=self.go_to, text=\"Go\")\n self.line_number.pack()\n on_clicker.pack()", "def line(self) -> int:", "def scrollPoint(self):\r\n # productive #onButton\r\n profprint()\r\n self.changeValue()\r\n widget = slicer.modules.NeedleFinderWidget\r\n needle = widget.editNeedleTxtBox.value\r\n # print self.ptNumber\r\n # print needle\r\n coord = [0, 0, 0]\r\n ptName = '.' + str(needle) + '-' + str(self.ptNumber)\r\n # print ptName\r\n modelNode = slicer.util.getNode(ptName)\r\n if modelNode != None:\r\n self.ptNumber = self.ptNumber + 1\r\n if modelNode.GetAttribute(\"ValidationNeedle\") == \"1\":\r\n modelNode.GetFiducialCoordinates(coord)\r\n X = coord[0]\r\n Y = coord[1]\r\n Z = coord[2]\r\n\r\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeRed\")\r\n if sRed == None :\r\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode1\")\r\n\r\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeYellow\")\r\n if sYellow == None :\r\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode2\")\r\n\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeGreen\")\r\n if sGreen == None :\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode3\")\r\n\r\n mYellow = sYellow.GetSliceToRAS()\r\n mYellow.SetElement(0, 3, X)\r\n sYellow.Modified()\r\n sYellow.UpdateMatrices()\r\n\r\n mGreen = sGreen.GetSliceToRAS()\r\n mGreen.SetElement(1, 3, Y)\r\n sGreen.Modified()\r\n sGreen.UpdateMatrices()\r\n\r\n mRed = sRed.GetSliceToRAS()\r\n mRed.SetElement(2, 3, Z)\r\n sRed.Modified()\r\n sRed.UpdateMatrices()\r\n elif self.ptNumber != 0:\r\n self.ptNumber = 0\r\n self.scrollPoint()", "def _MoveToMain(self, input_line, input_lines, unused_output_stream):\n for line in input_lines[input_line - 1:]:\n if line.strip():\n # Skipped all the whitespace.\n break\n\n # Moving on to the next line.\n input_line += 1\n\n return input_line", "def _goto(self, end):\n ## Version with undo-stuff\n go_modes = ( self._drawing,\n self._pencolor,\n self._pensize,\n isinstance(self._fillpath, list))\n screen = self.screen\n undo_entry = (\"go\", self._position, end, go_modes,\n (self.currentLineItem,\n self.currentLine[:],\n screen._pointlist(self.currentLineItem),\n self.items[:])\n )\n if self.undobuffer:\n self.undobuffer.push(undo_entry)\n start = self._position\n if self._speed and screen._tracing == 1:\n diff = (end-start)\n diffsq = (diff[0]*screen.xscale)**2 + (diff[1]*screen.yscale)**2\n nhops = 1+int((diffsq**0.5)/(3*(1.1**self._speed)*self._speed))\n delta = diff * (1.0/nhops)\n for n in range(1, nhops):\n if n == 1:\n top = True\n else:\n top = False\n self._position = start + delta * n\n if self._drawing:\n screen._drawline(self.drawingLineItem,\n (start, self._position),\n self._pencolor, self._pensize, top)\n self._update()\n if self._drawing:\n screen._drawline(self.drawingLineItem, ((0, 0), (0, 0)),\n fill=\"\", width=self._pensize)\n # Myturtle now at end,\n if self._drawing: # now update currentLine\n self.currentLine.append(end)\n if isinstance(self._fillpath, list):\n self._fillpath.append(end)\n ###### vererbung!!!!!!!!!!!!!!!!!!!!!!\n self._position = end\n if self._creatingPoly:\n self._poly.append(end)\n if len(self.currentLine) > 42: # 42! answer to the ultimate question\n # of life, the universe and everything\n self._newLine()\n self._update() #count=True)", "def emission_line_search(line_e, delta_e, incident_energy, element_list=None):\n if xraylib is None:\n raise XraylibNotInstalledError(__name__)\n\n if element_list is None:\n element_list = range(1, 101)\n\n search_list = [XrfElement(item) for item in element_list]\n\n cand_lines = [e.line_near(line_e, delta_e, incident_energy) for e in search_list]\n\n out_dict = dict()\n for e, lines in zip(search_list, cand_lines):\n if lines:\n out_dict[e.sym] = lines\n\n return out_dict" ]
[ "0.68932176", "0.6578148", "0.6477977", "0.6415695", "0.63791597", "0.63132477", "0.6291235", "0.62559795", "0.61942357", "0.6165324", "0.59852433", "0.59499186", "0.58552325", "0.58447623", "0.58173835", "0.57838714", "0.5747636", "0.5531475", "0.5473468", "0.546951", "0.5461822", "0.54303175", "0.5413849", "0.5384173", "0.53791416", "0.53725237", "0.5350976", "0.5339442", "0.5338593", "0.5332258", "0.5307211", "0.53053945", "0.5299419", "0.5290109", "0.5288816", "0.5279989", "0.52793086", "0.5249009", "0.5241272", "0.5215144", "0.52015996", "0.5197061", "0.51939654", "0.51923573", "0.5185961", "0.5179895", "0.5168259", "0.51611984", "0.51556337", "0.5149126", "0.51434946", "0.5126374", "0.51237166", "0.51232713", "0.51213145", "0.51180154", "0.51129127", "0.5110922", "0.51104087", "0.51078117", "0.5093366", "0.50883484", "0.50783074", "0.50763977", "0.5071527", "0.506313", "0.50605404", "0.50549245", "0.50492346", "0.5024714", "0.5023112", "0.5012622", "0.5012463", "0.49977225", "0.49965602", "0.49911416", "0.498723", "0.4974136", "0.49739587", "0.49711296", "0.49672827", "0.49598664", "0.495074", "0.49488056", "0.49396244", "0.4914265", "0.49116877", "0.48979148", "0.48905012", "0.48888645", "0.48879063", "0.48867026", "0.48867026", "0.48846698", "0.48794216", "0.48773235", "0.4876457", "0.48761818", "0.4876006", "0.48721284" ]
0.6214263
8
Computes the direction in which to update joint positions.
def _compute_dq(self, finger_id, xdes, q0): Ji = self.compute_jacobian(finger_id, q0)[:3, :] frame_id = self.tip_link_ids[finger_id] xcurrent = self.data.oMf[frame_id].translation Jinv = np.linalg.pinv(Ji) return Jinv.dot(xdes - xcurrent)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def junction_direction(start_junction: Cell, end_junction: Cell) -> Direction:\n dx = end_junction.column - start_junction.column\n dy = end_junction.row - start_junction.row\n if dy == 0:\n return Direction.E if dx > 0 else Direction.W\n return Direction.S if dy > 0 else Direction.N", "def direction(self):\n len = self.length()\n if len == 0.0:\n uvec = pos.Pos(np.transpose(np.array([0, 0, 0])))\n else:\n uvec = pos.Pos(np.transpose(np.array([(self.end.x - self.start.x) / len,\n (self.end.y - self.start.y) / len,\n (self.end.z - self.start.z) / len])))\n return uvec", "def direction(self):\r\n return 180 - atan2(self.x, self.y)*180/pi", "def move(self, dt):\n lims = self.settings['agent']['jointLimits']\n # print '[move] curr joint Angle:'\n # print self.jointAngle\n # print '[move] curr speed:'\n # print self.speed\n\n J = self.jointAngle + dt * np.array(self.speed)\n self.jointAngle[0] = min(max(J[0], lims[0][0]), lims[0][1])\n self.jointAngle[1] = min(max(J[1], lims[1][0]), lims[1][1])\n self.forward_kinematics()", "def direction(self):\n g = self._grad_f(self._x, *self._args)\n self._calls[1] += 1\n if self._prev_dx is None:\n dx = -g\n else:\n b = max(0, np.dot(g, g - self._prev_g) / np.sum(self._prev_g ** 2))\n dx = -g + b * self._prev_dx\n if np.dot(dx, g) > 0:\n dx = -g\n self._prev_g = g\n self._prev_dx = dx\n return np.nan_to_num(dx)", "def direction(self):\n return atan2d(self.y, self.x)", "def get_normalized_direction(self, direction):\n return round(self.normal_joystick_slope * direction + self.normal_joystick_intercept, 2)", "def direction(self):\n norm=math.sqrt(self.x**2 + self.y**2 + self.z**2)\n return Vector3(self.x/norm, self.y/norm, self.z/norm)", "def direction(self) -> np.ndarray:\n return self._direction", "def move(self):\n if (self._dir is Direction.UP) and (self._y_pos is 0):\n self._dir = Direction.DOWN\n elif (self._dir is Direction.DOWN) and (self._y_pos+self._len is self._bs):\n self._dir = Direction.UP\n elif (self._dir is Direction.LEFT) and (self._x_pos is 0):\n self._dir = Direction.RIGHT\n elif (self._dir is Direction.RIGHT) and (self._x_pos+self._len is self._bs):\n self._dir = Direction.LEFT\n self.change_pos(self._dir)\n return self._dir", "def update_position_direction(self, l):\n\n x = self.x + self.mu * l\n mu = self.mu\n\n return x, mu", "def get_direction(self):\n return self.actual_coordinates[2]", "def direction_angle(self):\n return math.atan2(self.velocity, self.velocity)", "def _joint_angle_control(self):\n\n error = self.target_pos - self.robot_arm_pos\n return self._pd_control(error) + self.torque", "def direction(self):\n if self._is_hit:\n return Direction.NOT_MOVING\n return self._dir", "def find_direction(self):\n\t\tif self.direction == OUTPUT.MOTOR_UP:\n\t\t\tfor floor in xrange(self.currentFloor+1, config.NUM_FLOORS):\n\t\t\t if self.orderQueue.has_order_in_floor(floor):\n\t\t\t\t\treturn OUTPUT.MOTOR_UP\n\t\t\treturn OUTPUT.MOTOR_DOWN\n\t\telse:\n\t\t\tfor floor in xrange(self.currentFloor-1, -1, -1):\n\t\t\t\tif self.orderQueue.has_order_in_floor(floor):\n\t\t\t\t\treturn OUTPUT.MOTOR_DOWN\n\t\t\treturn OUTPUT.MOTOR_UP\n\t\treturn OUTPUT.MOTOR_UP", "def comet_joint_orient(pynodes, aim_axis = None, up_axis = None, up_dir = None, do_auto = False):\n if aim_axis is None:\n aim_axis = [1, 0, 0]\n if up_axis is None:\n up_axis = [0, 0, 1]\n if up_dir is None:\n up_dir = [1, 0, 0]\n # convert to Vector\n aim_axis = pm.dt.Vector(aim_axis)\n up_axis = pm.dt.Vector(up_axis)\n up_dir = pm.dt.Vector(up_dir)\n # Filter supplied pynodes, if equal to 0 then return false\n if len(pynodes) == 0:\n return False\n\n # make sure only joint get passed through here\n pynodes = pm.ls(pynodes, type = 'joint')\n\n # init variable prevUp for later use\n prev_up = pm.dt.Vector()\n\n for i, o in enumerate(pynodes):\n parent_point = None\n # first we need to unparent everything and then store that,\n children = o.getChildren()\n for x in children:\n x.setParent(None)\n\n # find parent for later in case we need it\n parent = o.getParent()\n\n # Now if we have a child joint... aim to that\n aim_tgt = None\n for child in children:\n if child.nodeType() == 'joint':\n aim_tgt = child\n break\n\n if aim_tgt:\n # init variable upVec using upDir variable\n up_vec = pm.dt.Vector(up_dir)\n\n # first off... if doAuto is on, we need to guess the cross axis dir\n if do_auto:\n # now since the first joint we want to match the second orientation\n # we kind of hack the things passed in if it is the first joint\n # ie: if the joint doesnt have a parent... or if the parent it has\n # has the 'same' position as itself... then we use the 'next' joints\n # as the up cross calculations\n jnt_point = o.getRotatePivot(space = 'world')\n if parent:\n parent_point.setValue(parent.getRotatePivot(space = 'world'))\n else:\n parent_point = jnt_point.copy()\n aim_tgt_point = aim_tgt.getRotatePivot(space = 'world')\n\n # how close to we consider 'same'?\n tol = 0.0001\n\n point_cond = jnt_point - parent_point\n pos_cond = [abs(x) for x in point_cond.tolist()]\n if not parent or pos_cond[0] <= tol and pos_cond[1] <= tol and pos_cond[2] <= tol:\n # get aimChild\n aim_child = None\n aim_children = aim_tgt.getChildren(type = 'joint')\n if aim_children:\n aim_child = aim_children[0]\n\n # get aimChild vector\n if aim_child:\n aim_child_point = aim_child.getRotatePivot(space = 'world')\n else:\n aim_child_point = pm.dt.Vector()\n\n # find the up vector using child vector of aim target\n up_vec = (jnt_point - aim_tgt_point).cross(aim_child_point - aim_tgt_point)\n else:\n # find the up vector using the parent vector\n up_vec = (parent_point - jnt_point).cross(aim_tgt_point - jnt_point)\n\n # reorient the current joint\n a_cons = pm.aimConstraint(\n aim_tgt, o, aimVector = aim_axis, upVector = up_axis, worldUpVector = up_vec.tolist(),\n worldUpType = 'vector', weight = 1\n )\n pm.delete(a_cons)\n\n # now compare the up we used to the prev one\n current_up = up_vec.normal()\n # dot product for angle between... store for later\n dot = current_up.dot(prev_up)\n prev_up = up_vec\n\n if i > 0 >= dot:\n # adjust the rotation axis 180 if it looks like we have flopped the wrong way!\n # FIXME: some shit need to fix here\n # pm.xform( o, relative = True, objectSpace = True, rotateAxis = True )\n o.rotateX.set(o.rotateX.get() + (aim_axis.x * 180))\n o.rotateY.set(o.rotateY.get() + (aim_axis.y * 180))\n o.rotateZ.set(o.rotateZ.get() + (aim_axis.z * 180))\n\n prev_up *= -1\n elif parent:\n # otherwise if there is no target, just dup orientation of parent...\n transformation.align(o, parent, mode = 'rotate')\n\n # and now finish clearing out joint axis ...\n pm.joint(o, e = True, zeroScaleOrient = True)\n transformation.freeze_transform(o)\n\n # now that we are done ... reparent\n if len(children) > 0:\n for x in children:\n x.setParent(o)\n\n return True", "def update_position_direction(self, l):\n\n x = np.sqrt(self.x**2 + l**2 + 2 * l * self.x * self.mu)\n mu = (l + self.x * self.mu) / x\n\n return x, mu", "def get_direction(self):\r\n return self.__direction", "def dmove_joint(self, delta_pos: [list, np.ndarray]) -> [bool, np.ndarray]:\n if not isinstance(delta_pos, np.ndarray):\n delta_pos = np.array(delta_pos)\n abs_pos = np.array(self.get_current_joint_position()) # or self._position_joint_desired ?\n abs_pos += delta_pos\n return self.move_joint(abs_pos)", "def read_direction(self):\n global motor_direction\n with self._lock:\n return motor_direction", "def update_direction(self, move : np.ndarray, direction: np.ndarray):\r\n pos = move.copy()\r\n \r\n\r\n pos += direction\r\n while(self.in_board(pos)):\r\n if self.board[pos[0],pos[1]] == self.turn:\r\n pos -= direction\r\n while((pos != move).any()):\r\n self.board[pos[0], pos[1]] = self.turn\r\n self.count += 1\r\n pos -= direction\r\n break\r\n\r\n elif self.board[pos[0],pos[1]] == 0:\r\n\r\n break\r\n else:\r\n pos += direction", "def calc_relative_direction(ship_dir, ww_dir):\n if ship_dir not in (\"N\", \"E\", \"S\", \"W\"):\n raise Exception(\"Direction not accepted.\")\n ww_360 = ww_dir\n ww_360[ww_360 < 0] = 360 + ww_dir[0]\n if ship_dir in (\"N\"):\n dir_4 = np.full((len(ww_dir), 1), 2)\n dir_4[(ww_dir < 45) | (ww_dir > 315)] = 1\n dir_4[(ww_dir > 135) & (ww_dir < 225)] = 3\n if ship_dir in (\"E\"):\n dir_4 = np.full((len(ww_dir), 1), 2)\n dir_4[(ww_dir > 45) & (ww_dir < 135)] = 1\n dir_4[(ww_dir > 225) & (ww_dir < 315)] = 3\n if ship_dir in (\"W\"):\n dir_4 = np.full((len(ww_dir), 1), 2)\n dir_4[(ww_dir > 45) & (ww_dir < 135)] = 3\n dir_4[(ww_dir > 225) & (ww_dir < 315)] = 1\n if ship_dir in (\"S\"):\n dir_4 = np.full((len(ww_dir), 1), 2)\n dir_4[(ww_dir < 45) | (ww_dir > 315)] = 3\n dir_4[(ww_dir > 135) & (ww_dir < 225)] = 1\n return dir_4", "def get_direction_matrix(self) -> int:", "def __calc_target_angle(self, delta_angle, direction):\n if self.is_reverse:\n direction = not direction\n\n if direction:\n if self.current_angle - delta_angle < 0 or self.current_angle - delta_angle > pi:\n return self.current_angle\n return self.current_angle - delta_angle # this mines (-) for cw.\n else:\n if self.current_angle + delta_angle < 0 or self.current_angle + delta_angle > pi:\n return self.current_angle\n return self.current_angle + delta_angle", "def direction(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"direction\")", "def move_to_joint_pos_delta(self, cmd):\n curr_q = self.joint_angles()\n joint_names = self.joint_names()\n\n joint_command = dict([(joint, curr_q[joint] + cmd[i])\n for i, joint in enumerate(joint_names)])\n\n self.move_to_joint_positions(joint_command)", "def getDirection(self):\n return self.ray.direction", "def calculate_direction(east, north):\n if not Ensemble.is_bad_velocity(east) and not Ensemble.is_bad_velocity(north):\n bin_dir = (math.atan2(east, north)) * (180.0 / math.pi)\n\n # The range is -180 to 180\n # This moves it to 0 to 360\n if bin_dir < 0.0:\n bin_dir = 360.0 + bin_dir\n\n return bin_dir\n else:\n return Ensemble.BadVelocity", "def set_j(cmd, limb, joints, index, delta):\n joint = joints[index]\n cmd[joint] = delta + limb.joint_angle(joint)", "def direction(self) -> str:\n return pulumi.get(self, \"direction\")", "def get_direction(self):\n return self.direction", "def direction(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"direction\")", "def _calc_relative_move_direction(self, char, direction):\n if char in (\"Left\", \"Right\"):\n di = -1 if self.video.hflip else 1\n else:\n di = -1 if self.video.vflip else 1\n return direction * di", "def calculate_head_direction_from_leds(positions, return_as_deg=False):\n X_led1, Y_led1, X_led2, Y_led2 = positions[:, 0], positions[:, 1], positions[:, 2], positions[:, 3]\n # Calculate head direction\n head_direction = np.arctan2(X_led1 - X_led2, Y_led1 - Y_led2)\n # Put in right perspective in relation to the environment\n offset = +np.pi/2\n head_direction = (head_direction + offset + np.pi) % (2*np.pi) - np.pi\n head_direction *= -1\n if return_as_deg:\n head_direction = head_direction * (180 / np.pi)\n\n return head_direction", "def construct_direction(self, embs_source: torch.Tensor, embs_target: torch.Tensor):\n return (embs_target.mean(0) - embs_source.mean(0)).unsqueeze(0)", "def direction(self):\n g = self._grad_f(self._x, *self._args)\n H = self._hess_f(self._x, *self._args)\n self._calls[1:3] += 1\n gr = g\n Hr = g\n damping = 0\n while True:\n try:\n L, _ = cho_factor(Hr, lower=0)\n dx = -cho_solve((L, 0), gr)\n break\n except:\n #print('Ooops... singular Hessian')\n damping = 10 * max(TINY, damping)\n gr = g + damping * self._x\n Hr = H + damping * np.eye(len(self._x))\n return np.nan_to_num(dx)", "def update_player_direction(self,direction):\n pass", "def update_direction(self):\n direction = self.get_direction()\n\n if direction[1] == NORTH[1]:\n self.current_animation = N_ANIM \n elif direction[1] == SOUTH[1]:\n self.current_animation = S_ANIM \n elif direction[0] == EAST[0]:\n self.current_animation = E_ANIM \n else:\n self.current_animation = W_ANIM", "def get_direction(self):\n\n return -1 if self.curr_player == self.PLAYER1 else 1", "def find_upwinding_direction(self):\n self.upwinded_face_cell = []\n for cell_index in range(self.mesh.get_number_of_cells()):\n for [face_index, orientation] in zip(self.mesh.get_cell(cell_index), \n self.mesh.get_cell_normal_orientation(cell_index)):\n if orientation*self.current_velocity[face_index]>0:\n self.upwinded_face_cell.append([face_index, cell_index])\n\n ## Set fractional flow for dirichlet cells set by points\n ## based on up-winded flow direction. If the flow is out \n ## of the cell, this is done automatically in previous \n ## loops. However, for flow into the cell, we must find \n ## the saturation from the cell it points to. \n for face_index in self.mesh.get_dirichlet_pointer_faces():\n (cell_index, orientation) = self.mesh.get_dirichlet_pointer(face_index)\n if self.current_velocity[face_index]*orientation<0.:\n self.upwinded_face_cell.append([face_index, cell_index])", "def direction(a:tuple, b:tuple, c:tuple)->int:\n return ((b[1] - a[1]) * (c[0] - b[0])) - ((b[0] - a[0]) * (c[1] - b[1]))", "def getRobotDirection(self):\n return self.direction", "def getRobotDirection(self):\n return self.direction", "def getRobotDirection(self):\n return self.direction\n #raise NotImplementedError", "def getRobotDirection(self):\n return self.direction\n #raise NotImplementedError", "def get_origin_direction(self):\n return self.origin_coordinates[2]", "def get_joint_positions(self, joint_angles ): \n\n\n # current angles\n res_joint_angles = joint_angles.copy() \n\n # detect limits\n maskminus= res_joint_angles > self.joint_lims[:,0]\n maskplus = res_joint_angles < self.joint_lims[:,1]\n \n res_joint_angles = res_joint_angles*(maskplus*maskminus) \n res_joint_angles += self.joint_lims[:,0]*(np.logical_not(maskminus) )\n res_joint_angles += self.joint_lims[:,1]*(np.logical_not(maskplus) )\n \n # mirror\n if self.mirror :\n res_joint_angles = -res_joint_angles\n res_joint_angles[0] += np.pi \n \n # calculate x coords of arm edges.\n # the x-axis position of each edge is the \n # sum of its projection on the x-axis\n # and all the projections of the \n # previous edges \n x = np.array([ \n sum([ \n self.segment_lengths[k] *\n np.cos( (res_joint_angles[:(k+1)]).sum() ) \n for k in range(j+1) \n ])\n for j in range(self.number_of_joint) \n ])\n \n # trabslate to the x origin \n x = np.hstack([self.origin[0], x+self.origin[0]])\n\n # calculate y coords of arm edges.\n # the y-axis position of each edge is the \n # sum of its projection on the x-axis\n # and all the projections of the \n # previous edges \n y = np.array([ \n sum([ \n self.segment_lengths[k] *\n np.sin( (res_joint_angles[:(k+1)]).sum() ) \n for k in range(j+1) \n ])\n for j in range(self.number_of_joint) \n ])\n \n # translate to the y origin \n y = np.hstack([self.origin[1], y+self.origin[1]])\n\n pos = np.array([x, y]).T\n \n return (pos, res_joint_angles)", "def head_direction(self, target, useAvoidance=False, verbose=False, turnSpeed=1, door = False):\n\n endVector = target\n\n speedLeft, speedRight = self.target_to_left_right_speeds(endVector)\n\n self.logger.debug(\"endVector: %s\" % repr(endVector))\n self.logger.debug(\"Wheel speeds: %d, %d\" % (speedLeft, speedRight))", "def getDirectionalMovement(self, currentLocation, direction):\n # get the maximum diagonal vs single line movements based on the robot type\n if self.isCrusader:\n # Crusader can move a maximum of 3 blocks in a single line and 2 blocks in a diagonal line\n singleLineMovementSpeed = 3\n diagonalMovementSpeed = 2\n else:\n # all other units can move a maximum of 2 blocks in a single line and 1 block in a diagonal line\n singleLineMovementSpeed = 2\n diagonalMovementSpeed = 1\n\n # set the x and y directional movements to the current direction's x and y. These will be used to store the maximum movement speed of the robot in a given direction\n xDirectionalMovement = direction[0]\n yDirectionalMovement = direction[1]\n\n # if neither x or y are 0, then the unit is traveling diagonally so we multiply the values by the diagonal movement speed\n if xDirectionalMovement != 0 and yDirectionalMovement != 0:\n xDirectionalMovement *= diagonalMovementSpeed\n yDirectionalMovement *= diagonalMovementSpeed\n else:\n # else we multiply the movement values by the single line movement speed\n xDirectionalMovement *= singleLineMovementSpeed\n yDirectionalMovement *= singleLineMovementSpeed\n\n # store the values of how far the current location is from the target location\n xOffset = self.targetLocation[0] - currentLocation[0]\n yOffset = self.targetLocation[1] - currentLocation[1]\n\n # if the distance from \n if self.isCrusader:\n if xDirectionalMovement != 0:\n if (xOffset > 0 and xOffset < 3) or (xOffset < 0 and xOffset > -3):\n xDirectionalMovement = xOffset\n\n if yDirectionalMovement != 0:\n if (yOffset > 0 and yOffset < 3) or (yOffset < 0 and yOffset > -3):\n yDirectionalMovement = yOffset\n else:\n if xDirectionalMovement != 0 and (xOffset == 1 or xOffset == -1):\n xDirectionalMovement = xOffset\n\n if yDirectionalMovement != 0 and (yOffset == 1 or yOffset == -1):\n yDirectionalMovement = yOffset\n\n return (xDirectionalMovement, yDirectionalMovement)", "def update(self):\n self.updateCount = self.updateCount + 1\n if self.updateCount > self.updateCountMax:\n\n # update previous positions\n for i in range(self.length - 1, 0, -1):\n self.x[i] = self.x[i - 1]\n self.y[i] = self.y[i - 1]\n\n # update position of player : party lead\n if self.direction == 0:\n self.x[0] = self.x[0] + self.step\n if self.direction == 1:\n self.x[0] = self.x[0] - self.step\n if self.direction == 2:\n self.y[0] = self.y[0] - self.step\n if self.direction == 3:\n self.y[0] = self.y[0] + self.step\n\n self.updateCount = 0", "def joint_angles_callback(self, msg):\n\t\t\t# read the current joint angles from the robot\n\t\t\tpos_curr = np.array([msg.joint1,msg.joint2,msg.joint3,msg.joint4,msg.joint5,msg.joint6,msg.joint7]).reshape((7,1))\n\n\t\t\t# convert to radians\n\t\t\tpos_curr = pos_curr*(math.pi/180.0)\n\n\t\t\t# update torque from PID based on current position\n\t\t\tself.torque = self.Impedence_control(pos_curr)", "def direction(self):\n return self.cfg.direction", "def motors__Direction(self, speed_l, speed_r):\n\n if speed_l >= 0:\n self.motorDirection(self.motor1DirectionPin, self.motorForward)\n else:\n self.motorDirection(self.motor1DirectionPin, self.motorReverse)\n\n if speed_r >= 0:\n self.motorDirection(self.motor2DirectionPin, self.motorForward)\n else :\n self.motorDirection(self.motor2DirectionPin, self.motorReverse)", "def direction(self):\n _direction = self._custom.get(\"direction\")\n if _direction is not None:\n return _direction\n\n _direction = self._infer_direction()\n self._custom[\"direction\"] = _direction\n\n return _direction", "def direction(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"direction\")", "def getDirection(self):\n if 'N' in str(self.trip_update.trip.trip_id):\n direction = 'northbound'\n if 'S' in str(self.trip_update.trip.trip_id):\n direction = 'southbound'\n return direction", "def _get_direction(self, action, direction):\n left = [2,3,1,0]\n right = [3,2,0,1]\n if direction == 0:\n new_direction = action\n elif direction == -1:\n new_direction = left[action]\n elif direction == 1:\n new_direction = right[action]\n else:\n raise Exception(\"getDir received an unspecified case\")\n return new_direction", "def direction(self):\n return None if not bool(self.relation) else (self.s_end <= self.o_start)", "def _vector_direction(self,\n desired_pos : np.array,\n actual_pos : np.array) -> np.array:\n return desired_pos - actual_pos", "def motorsDirection(self, direction):\n\n print (direction)\n if direction == 'r' or direction == 'R':\n self.motorDirection(self.motor1DirectionPin, self.motorReverse)\n self.motorDirection(self.motor2DirectionPin, self.motorReverse)\n print (\"Direction reverse\")\n else:\n self.motorDirection(self.motor1DirectionPin, self.motorForward)\n self.motorDirection(self.motor2DirectionPin, self.motorForward)\n print (\"Direction forward\")", "def forward_kinematics(joint_angle_state,num_dof,link_length):\n\t#initialize the xpos and ypos\n\txpos = 0\n\typos = 0\n\tfor i in range(1,num_dof+1):\n\t\txpos += (link_length*np.cos(np.sum(joint_angle_state[0,:i])))\n\t\typos += (link_length*np.sin(np.sum(joint_angle_state[0,:i])))\n\treturn (int(xpos),int(ypos))", "def get_direction(curr_pos, next_pos):\n if curr_pos == next_pos:\n return 'CLEAN'\n\n v_dist = next_pos[0] - curr_pos[0]\n h_dist = next_pos[1] - curr_pos[1]\n\n if h_dist != 0:\n if h_dist < 0:\n return 'LEFT'\n else:\n return 'RIGHT'\n else:\n if v_dist < 0:\n return 'UP'\n else:\n return 'DOWN'", "def direction(self):\n return self._direction.copy()", "def _route_to_goal(self, position, orientation):\n _, (_x,_y) = self._calc_torus_distance(position, self.goal)\n move = None\n\n if orientation == 'up':\n if self.goal[1] > position[1] and _y > 0:\n move = 'move'\n elif self.goal[1] < position[1] and _y < 1:\n move = 'move'\n elif self.goal[0] > position[0]:\n if _x > 0:\n move = 'left'\n else:\n move = 'right'\n else:\n if _x > 0:\n move = 'right'\n else:\n move = 'left'\n\n if orientation == 'down':\n if self.goal[1] < position[1] and _y > 0:\n move = 'move'\n elif self.goal[1] > position[1] and _y < 1:\n move = 'move'\n elif self.goal[0] > position[0]:\n if _x > 0:\n move = 'right'\n else:\n move = 'left'\n else:\n if _x > 0:\n move = 'left'\n else:\n move = 'right'\n\n if orientation == 'right':\n if self.goal[0] < position[0] and _x > 0:\n move = 'move'\n elif self.goal[0] > position[0] and _x < 1:\n move = 'move'\n elif self.goal[1] > position[1]:\n if _y > 0:\n move = 'left'\n else:\n move = 'right'\n else:\n if _y > 0:\n move = 'right'\n else:\n move = 'left'\n\n if orientation == 'left':\n if self.goal[0] > position[0] and _x > 0:\n move = 'move'\n elif self.goal[0] < position[0] and _x < 1:\n move = 'move'\n elif self.goal[1] > position[1]:\n if _y > 0:\n move = 'right'\n else:\n move = 'left'\n else:\n if _y > 0:\n move = 'left'\n else:\n move = 'right'\n\n return move", "def __get_new_direction(self):\n return fabs(self.random_generator.normal(self.__direction_mean,\n self.__direction_deviation))", "def change_direction(self, direction):\r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n pygame.quit()\r\n sys.exit()\r\n elif event.key == pygame.K_UP:\r\n if self.direction == [0, 1]:\r\n self.direction == [0, 1]\r\n return self.direction\r\n else:\r\n self.direction = [dx, dy] = [0, -1]\r\n return self.direction\r\n elif event.key == pygame.K_DOWN:\r\n if self.direction == [0, -1]:\r\n self.direction == [0, -1]\r\n return self.direction\r\n else:\r\n self.direction = [dx, dy] = [0, 1]\r\n return self.direction\r\n elif event.key == pygame.K_LEFT:\r\n if self.direction == [1, 0]:\r\n self.direction == [1, 0]\r\n return self.direction\r\n else:\r\n self.direction = [dx, dy] = [-1, 0]\r\n return self.direction\r\n elif event.key == pygame.K_RIGHT:\r\n if self.direction == [-1, 0]:\r\n self.direction == [-1, 0]\r\n return self.direction\r\n else:\r\n self.direction = [dx, dy] = [1, 0]\r\n return self.direction", "def get_direction(self):\n is_direction_correct = False\n while not is_direction_correct:\n direction = random.randint(0, 2)\n if direction == 0:\n self.turtle.left(90)\n elif direction == 1:\n self.turtle.right(90)\n else:\n self.turtle.right(0)\n is_direction_correct = self.check_boundary()", "def update_direction(self, update_data: dict):\n if self.on_update_direction:\n self.on_update_direction(self, update_data)", "def right(self):\r\n z = len(direction_tuple)\r\n if self.d in direction_tuple:\r\n index = direction_tuple.index(self.d)\r\n if index == (z-1):\r\n self.d = direction_tuple[0]\r\n else:\r\n self.d = direction_tuple[index + 1]\r\n else:\r\n print(\"NO VALID ROBOT POSITION\")", "def get_desired_joint_position(self):\n return self._position_joint_desired", "def get_next_moving_direction(self, nearest_zombie_pos=None):\n if nearest_zombie_pos is None:\n nearest_zombie_pos=(uniform(-1.0, 1.0), uniform(-1.0, 1.0))\n\n vector = (self.pos[0]-nearest_zombie_pos[0], \\\n self.pos[1]-nearest_zombie_pos[1])\n magnitude = math.sqrt(vector[0]**2 + vector[1]**2)\n direction_vector = (vector[0]/magnitude, vector[1]/magnitude)\n return direction_vector", "def translate_direction(self):\n xpart = math.sin(self.direction)\n ypart = math.cos(self.direction)\n if ypart > 0:\n print(\"oben \", end='')\n else:\n print(\"unten \", end='')\n if xpart > 0:\n print(\"rechts\")\n else:\n print(\"links\")", "def set_direction(self, dir):\n if dir == 0:\n self.direction = [0, -1]\n elif dir == 1:\n self.direction = [1, 0]\n elif dir == 2:\n self.direction = [0, 1]\n elif dir == 3:\n self.direction = [-1, 0]", "def direction(point0, point1):\n d = [0, 0, 0]\n vector = [point1[0] - point0[0], point1[1] - point0[1]]\n d[1] = math.atan2(vector[1], vector[0])\n while d[1] <= -np.pi / 2:\n d[1] += np.pi\n return d", "def motor_angles(self):\n return np.asarray(self._robot_state.position)", "def getDirection(self):\n return self.listener.direction", "def dirToIncrement(direction):\n # assume upperleft corner is (0,0)\n if direction == 'up':\n return (0,-1)\n elif direction == 'down':\n return (0,1)\n elif direction == 'left':\n return (-1,0)\n elif direction == 'right':\n return (1,0)\n vertical, horizontal = direction.split()\n v1, h1 = dirToIncrement(vertical)\n v2, h2 = dirToIncrement(horizontal)\n return (v1+v2, h1+h2)", "def coordnav(self,dx,dy,dth): \n self.cg_ant = np.array(self.cg)\n self.coord[0] = self.coord[0] - self.cg[0]\n self.coord[1] = self.coord[1] - self.cg[1] \n self.Rz = rotation.matrix([0,0,1],dth)\n self.coord = np.dot(self.Rz,self.coord)\n \n self.coord[0] = self.coord[0] + self.cg[0] + dx\n self.coord[1] = self.coord[1] + self.cg[1] + dy \n \n self.px = self.coord[:,self.px_index]\n self.Bx = self.px-self.cg \n self.basex = self.Bx/math.sqrt(np.dot(self.Bx,self.Bx))", "def move(self, twist: Optional[Twist] = None):\n if twist is None:\n left = right = 0\n self.navigation_goal = None\n else:\n linear = np.clip(twist.linear.x, -1, 1)\n angular = np.clip(twist.angular.x, -1, 1)\n left, right = (linear - angular) / 2, (linear + angular) / 2\n # # always give a robot the full velocity at least on one side\n # if (greater := max(abs(left), abs(right))) > 0:\n # left, right = left / greater, right / greater\n\n self.locomotion_lock.acquire()\n self.v_left = SPEEDUP * left\n self.v_right = SPEEDUP * right\n self.locomotion_lock.release()", "def compute_angle(self, direction):\n scaled_cosine = self.w1.dot(direction) # ||direction|| cos(theta)\n scaled_sine = self.w2.dot(direction) # ||direction|| sin(theta)\n return np.arctan2(scaled_sine, scaled_cosine)", "def move_in_direction(self, direction):\n if direction == NORTH:\n self.__position[y] += 1\n elif direction == NORTHEAST:\n self.__position[x] += 1\n self.__position[y] += 1\n elif direction == EAST:\n self.__position[x] += 1\n elif direction == SOUTHEAST:\n self.__position[x] += 1\n self.__position[y] -= 1\n elif direction == SOUTH:\n self.__position[y] -= 1\n elif direction == SOUTHWEST:\n self.__position[x] -= 1\n self.__position[y] -= 1\n elif direction == WEST:\n self.__position[x] -= 1\n elif direction == NORTHWEST:\n self.__position[x] -= 1\n self.__position[y] += 1", "def turn_to_endpoint(previous_direction, w_real, d, d_traveled):\n theta = math.atan((D_ENDPOINT-d_traveled)/w_real)\n phi = np.pi/2 - theta\n\n if previous_direction==0:\n # drone yaws to the right to the direction of the endpoint\n yaw(phi)\n else:\n # drone yaws to the left to the direction of the endpoint\n yaw(-phi)\n phi = -phi\n\n return phi", "def compute_direction(self, feats):\n if feats.name == \"ARNC\":\n if feats[\"z-score\"] < -1.5:\n return Directions.long_dir\n elif feats[\"z-score\"] > 1.5:\n return Directions.short_dir\n elif feats.name == \"UNG\":\n if feats[\"z-score\"] < -1.5:\n return Directions.short_dir\n elif feats[\"z-score\"] > 1.5:\n return Directions.long_dir", "def get_relative_direction(self, source, destination, orientation):\r\n direction = {}\r\n direction['North'] = {}\r\n direction['North']['North'] = 'Straight'\r\n direction['North']['East'] = 'Right'\r\n direction['North']['West'] = 'Left'\r\n direction['East'] = {}\r\n direction['East']['East'] = 'Straight'\r\n direction['East']['North'] = 'Left'\r\n direction['East']['South'] = 'Right'\r\n direction['South'] = {}\r\n direction['South']['South'] = 'Straight'\r\n direction['South']['East'] = 'Left'\r\n direction['South']['West'] = 'Right'\r\n direction['West'] = {}\r\n direction['West']['West'] = 'Straight'\r\n direction['West']['South'] = 'Left'\r\n direction['West']['North'] = 'Right'\r\n if type(orientation) != str or orientation not in ['North', 'East', 'South', 'West']:\r\n return None\r\n final_orientation = self.get_orientation_from_to(source, destination)\r\n if orientation == final_orientation:\r\n return 'Straight'\r\n else:\r\n print orientation\r\n print final_orientation\r\n return direction[orientation][final_orientation]", "def direction(self,four_dir=False):\n a = self.angle()\n if a is None:\n return None\n\n if four_dir:\n if a >= -45 and a <= 45:\n return \"UP\"\n elif a >= 45 and a <= 135:\n return \"RIGHT\"\n elif a >= 135 or a <= -135:\n return \"DOWN\"\n elif a >= -135 and a <= -45:\n return \"LEFT\"\n else:\n raise RuntimeError(\"Couldn't figure out %f\" % a)\n else:\n if a >= -22.5 and a <= 22.5:\n return \"UP\"\n elif a >= 22.5 and a <= 67.5:\n return \"UP-RIGHT\"\n elif a >= 67.5 and a <= 112.5:\n return \"RIGHT\"\n elif a >= 112.5 and a <= 157.5:\n return \"DOWN-RIGHT\"\n elif a >= 157.5 or a <= -157.5:\n return \"DOWN\"\n elif a >= -157.5 and a <= -112.5:\n return \"DOWN-LEFT\"\n elif a >= -112.5 and a <= -67.5:\n return \"LEFT\"\n elif a >= -67.5 and a <= -22.5:\n return \"UP-LEFT\"\n else:\n raise RuntimeError(\"Couldn't figure out %f\" % a)", "def update_position(position, direction):\n\n\tif direction == 'left':\n\t\treturn [position[0], position[1] - 1]\n\tif direction == 'right':\n\t\treturn [position[0], position[1] + 1]\n\tif direction == 'down':\n\t\treturn [position[0] + 1, position[1]]\n\tif direction == 'up':\n\t\treturn [position[0] - 1, position[1]]\n\treturn [-1, -1]", "async def direction(self, value) -> str:\n if value is None:\n return \"N\"\n\n direction_array = [\n \"N\",\n \"NNE\",\n \"NE\",\n \"ENE\",\n \"E\",\n \"ESE\",\n \"SE\",\n \"SSE\",\n \"S\",\n \"SSW\",\n \"SW\",\n \"WSW\",\n \"W\",\n \"WNW\",\n \"NW\",\n \"NNW\",\n \"N\",\n ]\n direction_str = direction_array[int((value + 11.25) / 22.5)]\n return self._translations[\"wind_dir\"][direction_str]", "def getMovement(self):\n # store the robot's current location and set the directional movement to 0,0 so that the robot won't move by default\n currentLocation = (self.me['x'], self.me['y'])\n directionalMovement = (0,0)\n\n # ensure that target location is not none and not equal to the current location\n if self.targetLocation and not currentLocation == self.targetLocation:\n\n # store the direction, directional movement, and the new map location we will trying to move the robot to this round\n direction = self.getDirection(currentLocation, self.targetLocation)\n directionalMovement = self.getDirectionalMovement(currentLocation, direction)\n newLocation = self.getNewLocation(currentLocation, directionalMovement)\n\n # store the current direction for use later\n initialDirection = direction\n\n # by default, the robot is ready to move in the event that the new map location is already passable\n readyToMove = True\n\n # while the new map location is not passable\n while not self.isPassable(newLocation):\n # if unit is a crusader moving diagonally at their fastest pace, set their directional movement to (1,1)\n if self.isCrusader and directionalMovement[0] == 2 and directionalMovement[1] == 2:\n directionalMovement[0] = 1\n directionalMovement[1] = 1\n # or if the unit is traveling faster than 1 block East\n elif directionalMovement[0] > 1:\n # lower the unit's movement East by 1 block\n directionalMovement[0] -= 1\n # or if the unit is traveling faster than 1 block West\n elif directionalMovement[0] < -1:\n # lower the unit's movement West by 1 block\n directionalMovement[0] += 1\n # or if the unit is traveling faster than 1 block South\n elif directionalMovement[1] > 1:\n # lower the unit's movement South by 1 block\n directionalMovement[1] -= 1\n # or if the unit is traveling faster than 1 block North\n elif directionalMovement[1] < -1:\n # lower the unit's movement North by 1 block\n directionalMovement[1] += 1\n # else the unit is already moving the shortest distance they can in the current direction\n else:\n # rotate the robots direction clockwise and proceed\n direction = self.getRotatedDirection(direction, 1)\n\n # if we ened up facing the same direction we started in\n if direction == initialDirection:\n # let the code know we're not ready to move\n readyToMove = False\n # break out of the while loop\n break\n\n # overwrite the directional movement with a new one based on the direction we just got\n directionalMovement = self.getDirectionalMovement(currentLocation, direction)\n\n # overwrite the new location with the location we get from the directional movement we just got\n newLocation = self.getNewLocation(currentLocation, directionalMovement)\n\n # if the robot ended up not being ready to move\n if not readyToMove:\n # change the directional movement back to (0,0) so that it doesn't move\n directionalMovement = (0,0)\n else :\n self.targetLocation = self.getRandomPassableLocation()\n # return the directional movement\n return directionalMovement", "def align(self) -> np.ndarray:\n vel = self.state[:, :, Boids.Attr.VEL]\n vel_norm = np.linalg.norm(vel, axis=0)\n orientation = vel / (vel_norm + EPSILON)\n mut_influence = self._perceive(self.p_range)\n desired_orientation = np.dot(orientation, mut_influence)\n desired_orientation = np.multiply(desired_orientation, \n vel_norm + EPSILON)\n return desired_orientation - orientation", "def direction(self) -> int:\n return self._direction", "def _step_direction(self, rho, phi, direction_reading, *args, **kwargs):\r\n condition = kwargs['obj'] is not None\\\r\n and rho <= self.range\\\r\n and phi <= self.aperture #<= 3*np.pi/self.n_sectors\r\n if direction_reading is None:\r\n direction_reading = 0.\r\n # import pdb; pdb.set_trace()\r\n if condition and direction_reading == 0.0:\r\n my_pos = self.get_position(self.sensors_idx[args[0]]) + np.r_[0, 0, 0.1] #+ np.r_[0, 0, 0.017]\r\n tar_post = kwargs['obj'].position + np.r_[0, 0, 0.07] # my_pos[2]]\r\n ray_res = p.rayTest(my_pos, tar_post, physicsClientId=self.sensor_owner.physics_client)[0][0]\r\n # signal_strength = self.propagation(rho, phi)\r\n if ray_res == kwargs['obj'].id:\r\n direction_reading = 1.\r\n return direction_reading", "def get_dir_of_obst(cur_pos: Node, cur_heading: float, obst_to_follow: ObstacleSegment) -> Dir:\n # find closest obstacle boundary point\n closest_dist = math.inf\n closest_bp = obst_to_follow.boundary_points[0]\n for bp in obst_to_follow.get_boundary_points():\n d = cur_pos.dist_2d(bp)\n if d <= closest_dist:\n closest_bp = bp\n closest_dist = d\n\n a = cur_pos.as_node_2d()\n b = cur_pos + Node.from_array(rotate(np.array([0, 0]), np.array([1, 0]), cur_heading))\n p = closest_bp.as_node_2d()\n\n # make a origin\n b = b - a\n p = p - a\n c_p = np.cross(b.as_ndarray_2d(), p.as_ndarray_2d())\n if np.all(c_p > 0):\n return Dir.LEFT\n else:\n return Dir.RIGHT", "def calculate_heading_direction_from_position(positions, smoothing=False, return_as_deg=False):\n X, Y = positions[:, 0], positions[:, 1]\n # Smooth diffs instead of speeds directly\n Xdiff = np.diff(X)\n Ydiff = np.diff(Y)\n if smoothing:\n Xdiff = smooth_signal(Xdiff, smoothing)\n Ydiff = smooth_signal(Ydiff, smoothing)\n # Calculate heading direction\n heading_direction = np.arctan2(Ydiff, Xdiff)\n heading_direction = np.append(heading_direction, heading_direction[-1])\n if return_as_deg:\n heading_direction = heading_direction * (180 / np.pi)\n\n return heading_direction", "def getBotRightVelocity(self):\n\t\tif len(self.prevPositions) < 2:\n\t\t\tself.velocity = (0,0,0)\n\t\telse:\n\t\t\ttime = self.position[2] - self.prevPositions[len(self.prevPositions)-1][2]\n\t\t\txdist = self.position[1][0] - self.prevPositions[len(self.prevPositions)-1][1][0]\n\t\t\tydist = self.position[1][1] - self.prevPositions[len(self.prevPositions)-1][1][1]\n\t\t\tself.velocity = (xdist,ydist,time.total_seconds())\n\t\treturn self.velocity", "def current_direction(self):\n return self._attributes.get(\"current_direction\")", "def get_goal_direction(self, cur_goal):\n\t\trho_robot = math.atan2(cur_goal.y - self.cur_pos.y, cur_goal.x - self.cur_pos.x)\n\n\t\tyaw_err = rho_robot - self.rotation\n\t\tif yaw_err < 0:\n\t\t\tself.cur_action = 'tr'\n\t\telse:\n\t\t\tself.cur_action = 'tl'\n\t\tself.next_action_time = rospy.Time.now() + rospy.Duration(abs(yaw_err) / self.angular_speed)", "def get_angle_rad_between_joints(joint_a: Joint2D, joint_b: Joint2D) -> float:\n return math.atan2(joint_a.y - joint_b.y, joint_a.x - joint_b.x)", "def comp_angle_magnet(self):\n Rbo = self.get_Rbo()\n W0 = self.comp_W0m()\n Harc = self.comp_H_arc()\n if self.is_outwards():\n return float(2 * arctan(W0 / (2 * (Rbo + self.H1 - Harc))))\n else:\n return float(2 * arctan(W0 / (2 * (Rbo - self.H1 - Harc))))\n\n # if self.W0_is_rad:\n # return self.W0\n # else: # Convert W0 from m to rad\n # Rbo = self.get_Rbo()\n # return float(2 * arcsin(self.W0 / (2 * Rbo)))", "def get_direction(pi_values):\n if pi_values == (0, 0, 0, 0):\n return 'do not move'\n pi_right, pi_up, pi_left, pi_down = pi_values\n pi_sum = sum((pi_right, pi_up, pi_left, pi_down))\n\n p_right = pi_right / pi_sum\n p_up = pi_up / pi_sum\n p_left = pi_left / pi_sum\n p_down = pi_down / pi_sum\n\n return np.random.choice(\n ('right', 'up', 'left', 'down'),\n p=[p_right, p_up, p_left, p_down])", "def GetCGStepDir(self):\n if self.n_iter <= 1:\n self.hvec = self.mol.g_total\n gamma = 0.0\n else:\n v1 = self.traj.grad[-1] - self.traj.grad[-2]\n v1 = v1.reshape((1, const.NUMDIM * self.mol.n_atoms))\n v2 = self.traj.grad[-1].reshape((const.NUMDIM, self.mol.n_atoms, 1))\n gamma = numpy.linalg.norm(numpy.dot(v1, v2))\n gamma *= 1.0 / numpy.linalg.norm(self.traj.grad[-1])**2\n self.hvec = self.mol.g_total + gamma * self.hvec\n self.step_dir = self.hvec" ]
[ "0.66263056", "0.64241725", "0.6403347", "0.6384829", "0.635541", "0.6334753", "0.62950736", "0.6242469", "0.6156684", "0.61190027", "0.6118653", "0.6086555", "0.60604405", "0.59984165", "0.5993906", "0.5980935", "0.59729487", "0.59629905", "0.59500116", "0.5931701", "0.590662", "0.58862895", "0.58595294", "0.5856711", "0.5843115", "0.5835779", "0.5829806", "0.5822836", "0.5820924", "0.5812013", "0.5810164", "0.57964545", "0.57873756", "0.5761599", "0.5761255", "0.57542366", "0.5747684", "0.574381", "0.5743554", "0.57274693", "0.5727316", "0.5686406", "0.56828964", "0.56828964", "0.5672305", "0.5672305", "0.5671456", "0.5669859", "0.5665756", "0.5643113", "0.5636472", "0.56340665", "0.5631199", "0.562689", "0.56176996", "0.56088686", "0.5599692", "0.559684", "0.55908096", "0.55895734", "0.55877846", "0.55841184", "0.5582723", "0.55687314", "0.55615866", "0.5552442", "0.55450046", "0.55294347", "0.55281556", "0.5526004", "0.5519666", "0.55157775", "0.548434", "0.5480678", "0.5474303", "0.5467319", "0.54595673", "0.54578173", "0.5440314", "0.5434078", "0.5433077", "0.5418265", "0.5397833", "0.53960496", "0.53929716", "0.5390294", "0.5383909", "0.5375977", "0.5372015", "0.53687024", "0.53636444", "0.53631204", "0.5355761", "0.5355041", "0.534519", "0.5337984", "0.5328253", "0.53159", "0.53072935", "0.5301656", "0.52974576" ]
0.0
-1
Compute the joint positions which approximately result in a given end effector position.
def inverse_kinematics(self, finger_id, xdes, q0, tol=0.001, max_iter=20): iter = 0 q = self._project_onto_constraints(q0) xcurrent = self.forward_kinematics(q)[finger_id] error = np.linalg.norm(xdes - xcurrent) dt = 1.0 prev_error = np.inf while error > tol and (prev_error - error) > 1e-5 and iter < max_iter: dq = self._compute_dq(finger_id, xdes, q) # start the line search with a step size a bit larger than the # previous step size. dt = min(1.0, 2 * dt) prev_error = error q, error, dt = self._line_search(finger_id, xdes, q, dq, dt=dt) iter += 1 if error > tol: return None return q
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_joint_positions(self, joint_angles ): \n\n\n # current angles\n res_joint_angles = joint_angles.copy() \n\n # detect limits\n maskminus= res_joint_angles > self.joint_lims[:,0]\n maskplus = res_joint_angles < self.joint_lims[:,1]\n \n res_joint_angles = res_joint_angles*(maskplus*maskminus) \n res_joint_angles += self.joint_lims[:,0]*(np.logical_not(maskminus) )\n res_joint_angles += self.joint_lims[:,1]*(np.logical_not(maskplus) )\n \n # mirror\n if self.mirror :\n res_joint_angles = -res_joint_angles\n res_joint_angles[0] += np.pi \n \n # calculate x coords of arm edges.\n # the x-axis position of each edge is the \n # sum of its projection on the x-axis\n # and all the projections of the \n # previous edges \n x = np.array([ \n sum([ \n self.segment_lengths[k] *\n np.cos( (res_joint_angles[:(k+1)]).sum() ) \n for k in range(j+1) \n ])\n for j in range(self.number_of_joint) \n ])\n \n # trabslate to the x origin \n x = np.hstack([self.origin[0], x+self.origin[0]])\n\n # calculate y coords of arm edges.\n # the y-axis position of each edge is the \n # sum of its projection on the x-axis\n # and all the projections of the \n # previous edges \n y = np.array([ \n sum([ \n self.segment_lengths[k] *\n np.sin( (res_joint_angles[:(k+1)]).sum() ) \n for k in range(j+1) \n ])\n for j in range(self.number_of_joint) \n ])\n \n # translate to the y origin \n y = np.hstack([self.origin[1], y+self.origin[1]])\n\n pos = np.array([x, y]).T\n \n return (pos, res_joint_angles)", "def end_effectors_pos(self):\n def relative_pos_in_egocentric_frame(physics):\n end_effector = physics.bind(self._entity.end_effectors).xpos\n torso = physics.bind(self._entity.root_body).xpos\n xmat = np.reshape(physics.bind(self._entity.root_body).xmat, (3, 3))\n return np.reshape(np.dot(end_effector - torso, xmat), -1)\n return observable.Generic(relative_pos_in_egocentric_frame)", "def jacobian_cost(self, joint_angles: dict, ee_goals) -> np.ndarray:\n kinematic_map = self.kinematic_map[\"p0\"] # get map to all nodes from root\n end_effector_nodes = ee_goals.keys()\n J = np.zeros(self.n)\n for (\n ee\n ) in end_effector_nodes: # iterate through end-effector nodes, assumes sorted\n ee_path = kinematic_map[ee][\n 1:\n ] # [:-1] # no last node, only phys. joint locations\n t_ee = self.get_pose(joint_angles, ee).trans\n dg_ee_x = t_ee[0] - ee_goals[ee].trans[0]\n dg_ee_y = t_ee[1] - ee_goals[ee].trans[1]\n for (pdx, joint_p) in enumerate(ee_path): # algorithm fills Jac per column\n p_idx = int(joint_p[1:]) - 1\n for jdx in range(pdx, len(ee_path)):\n node_jdx = ee_path[jdx]\n theta_jdx = sum([joint_angles[key] for key in ee_path[0 : jdx + 1]])\n J[p_idx] += (\n 2.0\n * self.a[node_jdx]\n * (-dg_ee_x * np.sin(theta_jdx) + dg_ee_y * np.cos(theta_jdx))\n )\n\n return J", "def fk(arm,base=np.identity(4),joint_num=-1):\n\n pEE = base # Cumulative pose of the End Effector \n # (initially set up as the base of the robot)\n if joint_num==-1:\n for joint in arm:\n pEE=np.dot(pEE, joint.dhMatrix())\n else:\n for i in range(joint_num):\n pEE=np.dot(pEE, arm[i].dhMatrix())\n\n return pEE", "def get_unhindered_positions(self, endposition):\n pass", "def connect(ends):\n d = np.diff(ends, axis=0)[0]\n j = np.argmax(np.abs(d))\n D = d[j]\n aD = np.abs(D)\n return ends[0] + (np.outer(np.arange(aD + 1), d) + (aD >> 1)) // aD", "def _calculate_h(self, end):\n return PATH_COST * (abs(self.x - end.x) + abs(self.y - end.y))", "def get_current_joint_position(self) -> list:\n joint_positions = get_joint_positions(self.body, self.joints[:self.DoF])\n for i in range(self.DoF):\n if self.JOINT_TYPES[i] == 'P':\n # get the unscaled joint position\n joint_positions[i] /= self.scaling\n return joint_positions", "def joint_variables(self, G: nx.Graph, T_final: dict = None) -> np.ndarray:\n # TODO: make this more readable\n tol = 1e-10\n q_zero = list_to_variable_dict(self.n * [0])\n kinematic_map = self.kinematic_map\n parents = self.parents\n get_pose = self.get_pose\n\n T = {}\n T[\"p0\"] = self.T_base\n theta = {}\n\n for ee in self.end_effectors:\n path = kinematic_map[\"p0\"][ee[0]][1:]\n axis_length = self.axis_length\n for node in path:\n aux_node = f\"q{node[1:]}\"\n pred = [u for u in parents.predecessors(node)]\n\n T_prev = T[pred[0]]\n\n T_prev_0 = get_pose(q_zero, pred[0])\n T_0 = get_pose(q_zero, node)\n T_rel = T_prev_0.inv().dot(T_0)\n T_0_q = get_pose(q_zero, node).dot(trans_axis(axis_length, \"z\"))\n T_rel_q = T_prev_0.inv().dot(T_0_q)\n\n p = G.nodes[node][POS] - T_prev.trans\n q = G.nodes[aux_node][POS] - T_prev.trans\n ps = T_prev.inv().as_matrix()[:3, :3].dot(p)\n qs = T_prev.inv().as_matrix()[:3, :3].dot(q)\n\n zs = skew(np.array([0, 0, 1]))\n cp = (T_rel.trans - ps) + zs.dot(zs).dot(T_rel.trans)\n cq = (T_rel_q.trans - qs) + zs.dot(zs).dot(T_rel_q.trans)\n ap = zs.dot(T_rel.trans)\n aq = zs.dot(T_rel_q.trans)\n bp = zs.dot(zs).dot(T_rel.trans)\n bq = zs.dot(zs).dot(T_rel_q.trans)\n\n c0 = cp.dot(cp) + cq.dot(cq)\n c1 = 2 * (cp.dot(ap) + cq.dot(aq))\n c2 = 2 * (cp.dot(bp) + cq.dot(bq))\n c3 = ap.dot(ap) + aq.dot(aq)\n c4 = bp.dot(bp) + bq.dot(bq)\n c5 = 2 * (ap.dot(bp) + aq.dot(bq))\n\n # poly = [c0 -c2 +c4, 2*c1 - 2*c5, 2*c0 + 4*c3 -2*c4, 2*c1 + 2*c5, c0 + c2 + c4]\n diff = np.array(\n [\n c1 - c5,\n 2 * c2 + 4 * c3 - 4 * c4,\n 3 * c1 + 3 * c5,\n 8 * c2 + 4 * c3 - 4 * c4,\n -4 * c1 + 4 * c5,\n ]\n )\n if all(diff < tol):\n theta[node] = 0\n else:\n sols = np.roots(\n diff\n ) # solutions to the Whaba problem for fixed axis\n\n def error_test(x):\n if abs(x.imag) > 0:\n return 1e6\n x = -2 * arctan2(x.real, 1)\n return (\n c0\n + c1 * sin(x)\n - c2 * cos(x)\n + c3 * sin(x) ** 2\n + c4 * cos(x) ** 2\n - c5 * sin(2 * x) / 2\n )\n\n sol = min(sols, key=error_test)\n theta[node] = -2 * arctan2(sol.real, 1)\n\n T[node] = (T_prev.dot(rot_axis(theta[node], \"z\"))).dot(T_rel)\n\n if T_final is None:\n return theta\n\n if (\n T_final[ee[0]] is not None\n and norm(cross(T_rel.trans, np.array([0, 0, 1]))) < tol\n ):\n T_th = (T[node]).inv().dot(T_final[ee[0]]).as_matrix()\n theta[ee[0]] += np.arctan2(T_th[1, 0], T_th[0, 0])\n\n return theta", "def jac_pos(self):\n J = self.sim.data.get_body_jacp(self.end_effector)\n J = J.reshape(3, -1)[:, 0:7].T\n return J", "def get_unhindered_positions(self, endposition):\n current_position = self.position\n potential_positions = { \n 'diag1' : [],\n 'diag2' : [],\n 'diag3' : [],\n 'diag4' : []\n }\n space_down = current_position[0]-1\n space_up = self.ncols-current_position[0]\n space_right = self.nrows - (ord('H')-ord(current_position[1]))\n space_left = ord(current_position[1]) - ord('A')\n\n for i in range(1, space_down+1):\n diag1 = (current_position[0]-i, chr(ord(current_position[1])+i))\n diag2 = (current_position[0]-i, chr(ord(current_position[1])-i))\n if self.pos_within_bounds(diag1):\n potential_positions['diag1'].append(diag1)\n if self.pos_within_bounds(diag2):\n potential_positions['diag2'].append(diag2)\n\n for i in range(1, space_up+1):\n diag3 = (current_position[0]+i, chr(ord(current_position[1])+i))\n diag4 = (current_position[0]+i, chr(ord(current_position[1])-i))\n if self.pos_within_bounds(diag3):\n potential_positions['diag3'].append(diag3)\n if self.pos_within_bounds(diag4):\n potential_positions['diag4'].append(diag4)\n \n for direction, square in potential_positions.items():\n if tuple(endposition) in square:\n return potential_positions[direction]", "def compute_joint_error_position(self, current_position, target_position):\n \n # helper variables\n tmp_c = []\n tmp_t = [] \n \n for x in range(0,20):\n tmp_c.append(current_position[x].joint_target)\n tmp_t.append(math.radians (target_position[x].joint_target) )\n \n # Compute the norm of the error\n error = numpy.linalg.norm( numpy.array(tmp_c) - numpy.array(tmp_t) )\n \n #print error \n\n return error", "def joint_pairs(self):\n return [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16], #17 body keypoints\n [20-3, 23-3], [21-3, 24-3], [22-3, 25-3], [26-3, 42-3], [27-3, 41-3], [28-3, 40-3], [29-3, 39-3], [30-3, 38-3], \n [31-3, 37-3], [32-3, 36-3], [33-3, 35-3], [43-3, 52-3], [44-3, 51-3], [45-3, 50-3], [46-3, 49-3], [47-3, 48-3], \n [62-3, 71-3], [63-3, 70-3], [64-3, 69-3], [65-3, 68-3], [66-3, 73-3], [67-3, 72-3], [57-3, 61-3], [58-3, 60-3],\n [74-3, 80-3], [75-3, 79-3], [76-3, 78-3], [87-3, 89-3], [93-3, 91-3], [86-3, 90-3], [85-3, 81-3], [84-3, 82-3],\n [94-3, 115-3], [95-3, 116-3], [96-3, 117-3], [97-3, 118-3], [98-3, 119-3], [99-3, 120-3], [100-3, 121-3],\n [101-3, 122-3], [102-3, 123-3], [103-3, 124-3], [104-3, 125-3], [105-3, 126-3], [106-3, 127-3], [107-3, 128-3],\n [108-3, 129-3], [109-3, 130-3], [110-3, 131-3], [111-3, 132-3], [112-3, 133-3], [113-3, 134-3], [114-3, 135-3]]", "def jacobian_linear(self, joint_angles: dict, query_frame: str = \"\") -> np.ndarray:\n\n kinematic_map = self.kinematic_map[\"p0\"] # get map to all nodes from root\n end_effector_nodes = []\n for ee in self.end_effectors: # get p nodes in end-effectors\n if ee[0][0] == \"p\":\n end_effector_nodes += [ee[0]]\n if ee[1][0] == \"p\":\n end_effector_nodes += [ee[1]]\n\n node_names = [\n name for name in self.structure if name[0] == \"p\"\n ] # list of p node ids\n\n # Ts = self.get_full_pose_fast_lambdify(joint_angles) # all frame poses\n Ts = self.get_all_poses(joint_angles) # all frame poses\n Ts[\"p0\"] = np.eye(4)\n\n J = np.zeros([0, len(node_names) - 1])\n for ee in end_effector_nodes: # iterate through end-effector nodes\n ee_path = kinematic_map[ee][:-1] # no last node, only phys. joint locations\n\n T_0_ee = Ts[ee] # ee frame\n p_ee = T_0_ee[0:3, -1] # ee position\n\n Jp_t = np.zeros([3, len(node_names) - 1]) # translation jac for theta\n Jp_al = np.zeros([3, len(node_names) - 1]) # translation jac alpha\n for joint in ee_path: # algorithm fills Jac per column\n T_0_i = Ts[joint]\n z_hat_i = T_0_i[:3, 2]\n x_hat_i = T_0_i[:3, 0]\n p_i = T_0_i[:3, -1]\n j_idx = node_names.index(joint)\n Jp_t[:, j_idx] = np.cross(z_hat_i, p_ee - p_i)\n Jp_al[:, j_idx] = np.cross(x_hat_i, p_ee - p_i)\n\n J_ee = np.vstack([Jp_t, Jp_al])\n J = np.vstack([J, J_ee]) # stack big jac for multiple ee\n\n return J", "def get_end_vertices(self):\n # Note that concatenating two vertices needs to make a\n # vertices for the frame.\n extesion_fraction = self.extesion_fraction\n\n corx = extesion_fraction*2.\n cory = 1./(1. - corx)\n x1, y1, w, h = 0, 0, 1, 1\n x2, y2 = x1 + w, y1 + h\n dw, dh = w*extesion_fraction, h*extesion_fraction*cory\n\n if self.extend in [\"min\", \"both\"]:\n bottom = [(x1, y1),\n (x1+w/2., y1-dh),\n (x2, y1)]\n else:\n bottom = [(x1, y1),\n (x2, y1)]\n\n if self.extend in [\"max\", \"both\"]:\n top = [(x2, y2),\n (x1+w/2., y2+dh),\n (x1, y2)]\n else:\n top = [(x2, y2),\n (x1, y2)]\n\n if self.orientation == \"horizontal\":\n bottom = [(y,x) for (x,y) in bottom]\n top = [(y,x) for (x,y) in top]\n\n return bottom, top", "def end_tensor(end_vector, # type: Union[np.ndarray, List[int]]\n center_in, # type: List[int]\n center_out, # type: List[int]\n surround_in, # type: List[int]\n surround_out, # type: List[int]\n attractor_function=__linear_function_generator, # type: Callable[[], Callable[[Real], Real]]\n size=3\n ):\n ndim = len(end_vector)\n assert ndim >= 1\n if not isinstance(end_vector, np.ndarray):\n end_vector = np.asarray(end_vector)\n\n attractor_function = attractor_function()\n\n center_surround = np.zeros(shape=[size for _ in range(ndim)] + [len(center_out), len(center_in)])\n zero_centered = np.ndarray(shape=[size for _ in range(ndim)])\n\n for tup in itertools.product(*[range(size) for _ in range(ndim)]):\n tup_vec = np.asarray(tup) - np.asarray([size / 2 for _ in range(ndim)])\n angle_dist = (m.acos(\n np.dot(tup_vec, end_vector) / (np.linalg.norm(tup_vec) * np.linalg.norm(end_vector))) - m.pi / 2.0) / m.pi\n zero_centered[tup] = attractor_function(angle_dist * m.pi)\n\n __normalize_center_surround(zero_centered)\n\n for tup in itertools.product(*[range(size) for _ in range(ndim)]):\n center_surround[tup] = [[surround_out[o] * surround_in[i] * abs(zero_centered[tup]) if zero_centered[tup] < 0\n else center_surround[tuple(tup + (o, i))]\n for o in range(len(surround_out))] for i in range(len(surround_in))]\n center_surround[tup] = [[center_out[o] * center_in[i] * abs(zero_centered[tup]) if zero_centered[tup] >= 0\n else center_surround[tuple(tup + (i, o))]\n for o in range(len(center_out))] for i in range(len(center_in))]\n\n return center_surround", "def p2p_xyz(start_point, end_point, top_left_cor, cellsize, dem):\n start_cell = (int((start_point[0] - top_left_cor[0]) / cellsize[0]),\n int((start_point[1] - top_left_cor[1]) / cellsize[1]))\n end_cell = (int((end_point[0] - top_left_cor[0]) / cellsize[0]),\n int((end_point[1] - top_left_cor[1]) / cellsize[1]))\n cells = misc.get_line(start_cell, end_cell) \n pnts = []\n elev = []\n \n dem_elv = dem[:,1]\n dem_indx = dem[:,2:4]\n\n for cell in cells:\n x = top_left_cor[0] + cell[0] * cellsize[0] + cellsize[0] / 2\n y = top_left_cor[1] + cell[1] * cellsize[1] + cellsize[1] / 2\n #xy_indx=[str(cell[0]),str(cell[1])]\n z_indx=np.logical_and(np.equal(dem_indx[:,0],cell[0]),np.equal(dem_indx[:,1],cell[1]))\n try:\n z=dem_elv[z_indx][0]\n except (np.sum(z_indx)>1):\n print(\"Oops! That was more than one indices in dem matching the query index (in getCellValue)\")\n #z_indx = [i for i,j in enumerate(dem_indx) if j == xy_indx]\n z = float(dem_elv[z_indx])\n pnts.append((x, y))\n elev.append(z)\n return pnts, elev", "def enumerate_joint_ask(X, e, P):\n Q = ProbDist(X) ## A probability distribution for X, initially empty\n Y = [v for v in P.variables if v != X and v not in e]\n for xi in P.values(X):\n Q[xi] = enumerate_joint(Y, extend(e, X, xi), P)\n return Q.normalize()", "def target_position(self, time):\n # get joint positions and use fk to get end effector position?\n # ar_tag from topic\n\n cur_pos = self.target_velocity(time)*time + self.start_pos\n\n self.points_generated.append(cur_pos)\n #print(self.start_pos)\n # print(cur_pos)\n return cur_pos", "def get_desired_joint_position(self):\n return self._position_joint_desired", "def moveit_cartesian_path(start_pos, start_quat,\n delta_xyz, moveit_group,\n eef_step, jump_threshold=0.0):\n start_pos = np.array(start_pos).flatten()\n\n delta_xyz = np.array(delta_xyz).flatten()\n end_pos = start_pos + delta_xyz\n moveit_waypoints = []\n wpose = moveit_group.get_current_pose().pose\n wpose.position.x = start_pos[0]\n wpose.position.y = start_pos[1]\n wpose.position.z = start_pos[2]\n wpose.orientation.x = start_quat[0]\n wpose.orientation.y = start_quat[1]\n wpose.orientation.z = start_quat[2]\n wpose.orientation.w = start_quat[3]\n moveit_waypoints.append(copy.deepcopy(wpose))\n\n wpose.position.x = end_pos[0]\n wpose.position.y = end_pos[1]\n wpose.position.z = end_pos[2]\n wpose.orientation.x = start_quat[0]\n wpose.orientation.y = start_quat[1]\n wpose.orientation.z = start_quat[2]\n wpose.orientation.w = start_quat[3]\n moveit_waypoints.append(copy.deepcopy(wpose))\n\n (plan, fraction) = moveit_group.compute_cartesian_path(\n moveit_waypoints, # waypoints to follow\n eef_step, # eef_step\n jump_threshold) # jump_threshold\n return plan", "def _solve_joints(self, bicep_angle: float, forearm_angle: float):\n bicep_length_x = self.bicep_length*math.cos(math.radians(bicep_angle))\n bicep_length_y = self.bicep_length*math.sin(math.radians(bicep_angle))\n forearm_length_x = self.forearm_length*math.cos(math.radians(bicep_angle + forearm_angle))\n forearm_length_y = self.forearm_length*math.sin(math.radians(bicep_angle + forearm_angle))\n elbow = (bicep_length_x, bicep_length_y)\n hand = (bicep_length_x + forearm_length_x, bicep_length_y + forearm_length_y)\n return elbow, hand", "def _generate_end_position(self):\n end_position = []\n new_row = []\n\n for i in range(1, self.PUZZLE_NUM_ROWS * self.PUZZLE_NUM_COLUMNS + 1):\n new_row.append(i)\n if len(new_row) == self.PUZZLE_NUM_COLUMNS:\n end_position.append(new_row)\n new_row = []\n\n end_position[-1][-1] = 0\n return end_position", "def estimate_foe(start, end):\n A = np.zeros(start.shape)\n diff = end - start\n A[:, 0] = diff[:, 1]\n A[:, 1] = -diff[:, 0]\n b = start[:, 0] * diff[:, 1] - start[:, 1] * diff[:, 0]\n foe = np.dot(np.linalg.inv(np.dot(A.T, A)), np.dot(A.T, b))\n foe = (int(foe[0]),int(foe[1]))\n return foe", "def compute_fk_position(self, jpos, tgt_frame):\n if isinstance(jpos, list):\n jpos = np.array(jpos)\n jpos = jpos.flatten()\n if jpos.size != self.arm_dof:\n raise ValueError('Length of the joint angles '\n 'does not match the robot DOF')\n assert jpos.size == self.arm_dof\n kdl_jnt_angles = joints_to_kdl(jpos)\n\n kdl_end_frame = kdl.Frame()\n idx = self.arm_link_names.index(tgt_frame) + 1\n fg = self._fk_solver_pos.JntToCart(kdl_jnt_angles,\n kdl_end_frame,\n idx)\n if fg < 0:\n raise ValueError('KDL Pos JntToCart error!')\n pose = kdl_frame_to_numpy(kdl_end_frame)\n pos = pose[:3, 3].flatten()\n rot = pose[:3, :3]\n return pos, rot", "def create_joint_trajectory(start_position, end_position,\n duration_of_trajectory, frequency_of_trajectory):\n\n frequency_of_ros_messages = frequency_of_trajectory # in Hz.\n number_of_way_points = duration_of_trajectory * frequency_of_ros_messages\n number_of_joints = start_position.__len__()\n trajectory = np.zeros((number_of_joints, number_of_way_points))\n\n for i in xrange(number_of_joints):\n trajectory[i] = np.linspace(start_position[i], end_position[i],\n number_of_way_points)\n trajectory = trajectory.T.copy()\n vel_lims = np.diff(trajectory, axis=0)\n #Because this is discrete differentiation,\n # the last value is missing: len(vel_lims) = len(trajectory) - 1\n # so we just repeat the last calculated velocity.\n vel_lims = np.append(vel_lims, [[x for x in vel_lims[-1,:]]], axis = 0)\n vel_lims = vel_lims * frequency_of_trajectory\n vel_lims = np.absolute(vel_lims)\n\n if vel_lims.all() > 1.0:\n raise ValueError(\"One or more of the values in the specified velocities\"\n \"Exceed 1 rad / second. The robot won't like this.\"\n \"Adjust the trajectory so that each point can be \"\n \"reached without exceeding this limit.\")\n return trajectory, vel_lims", "def hessian_cost(self, joint_angles: dict, ee_goals) -> np.ndarray:\n kinematic_map = self.kinematic_map[\"p0\"] # get map to all nodes from root\n end_effector_nodes = ee_goals.keys()\n H = np.zeros((self.n, self.n))\n for (\n ee\n ) in end_effector_nodes: # iterate through end-effector nodes, assumes sorted\n ee_path = kinematic_map[ee][\n 1:\n ] # [:-1] # no last node, only phys. joint locations\n t_ee = self.get_pose(joint_angles, ee).trans\n dg_ee_x = t_ee[0] - ee_goals[ee].trans[0]\n dg_ee_y = t_ee[1] - ee_goals[ee].trans[1]\n for (pdx, joint_p) in enumerate(ee_path): # algorithm fills Hess per column\n p_idx = int(joint_p[1:]) - 1\n sin_p_term = 0.0\n cos_p_term = 0.0\n for jdx in range(pdx, len(ee_path)):\n node_jdx = ee_path[jdx]\n theta_jdx = sum([joint_angles[key] for key in ee_path[0 : jdx + 1]])\n sin_p_term += self.a[node_jdx] * np.sin(theta_jdx)\n cos_p_term += self.a[node_jdx] * np.cos(theta_jdx)\n\n for (qdx, joint_q) in enumerate(\n ee_path[pdx:]\n ): # TODO: check if starting from pdx works\n qdx = qdx + pdx\n q_idx = int(joint_q[1:]) - 1\n sin_q_term = 0.0\n cos_q_term = 0.0\n for kdx in range(qdx, len(ee_path)):\n node_kdx = ee_path[kdx]\n theta_kdx = sum(\n [joint_angles[key] for key in ee_path[0 : kdx + 1]]\n )\n sin_q_term += self.a[node_kdx] * np.sin(theta_kdx)\n cos_q_term += self.a[node_kdx] * np.cos(theta_kdx)\n\n # assert(q_idx >= p_idx)\n H[p_idx, q_idx] += (\n 2.0 * sin_q_term * sin_p_term\n - 2.0 * dg_ee_x * cos_q_term\n + 2.0 * cos_p_term * cos_q_term\n - 2.0 * dg_ee_y * sin_q_term\n )\n\n return H + H.T - np.diag(np.diag(H))", "def get_joints(joint_listener): \n if LOCAL_TEST: # dummy\n return np.array([-0.5596, 0.5123, 0.5575, -1.6929, 0.2937, 1.6097, -1.237, 0.04, 0.04])\n else:\n joints = joint_listener.joint_position\n print('robot joints', joints)\n return joints", "def get_unhindered_positions(self, endposition):\n current_position = self.position\n potential_positions = potential_positions = {\n 'left' : [], \n 'right' : [],\n 'up' : [], \n 'down' : []\n }\n space_down = current_position[0]-1\n space_up = self.ncols-current_position[0]\n space_right = self.nrows - (ord('H')-ord(current_position[1]))\n space_left = ord(current_position[1]) - ord('A')\n\n for i in range(1, space_down+1):\n pos = (current_position[0]-i, current_position[1])\n if self.pos_within_bounds(pos):\n potential_positions['down'].append(pos)\n\n for i in range(1, space_up+1):\n pos = (current_position[0]+i, current_position[1])\n if self.pos_within_bounds(pos):\n potential_positions['up'].append(pos)\n \n for i in range(1, space_left+1):\n pos = (current_position[0], chr(ord(current_position[1])-i))\n if self.pos_within_bounds(pos):\n potential_positions['left'].append(pos)\n\n for i in range(1, space_right+1):\n pos = (current_position[0], chr(ord(current_position[1])+i))\n if self.pos_within_bounds(pos):\n potential_positions['right'].append(pos)\n\n for direction, square in potential_positions.items():\n if tuple(endposition) in square:\n return potential_positions[direction]", "def _get_output_steps_to_beam_indices(self, end_state: Tensor, beam_prev_indices: Tensor) ->List[int]:\n present_position = int(end_state[1])\n beam_index = int(end_state[2])\n beam_indices = torch.jit.annotate(List[int], [])\n while present_position >= 0:\n beam_indices.insert(0, beam_index)\n beam_index = int(beam_prev_indices[present_position][beam_index])\n present_position = present_position - 1\n return beam_indices", "def joint_trajectory(theta_start, theta_end, Tf, N, method):\n\n N = int(N)\n timegap = Tf / (N - 1.0) # N points, N-1 line segments\n traj = np.zeros((len(theta_start), N)) # intitialize the trajectory matrix, 1D joint vars, 2D each time instance\n\n # for each line segment, from 0 to T, calculate the corresponding s value (0to1)\n for i in range(N):\n if method == 3:\n s = cubic_time_scaling(Tf, timegap * i)\n else:\n s = quintic_time_scaling(Tf, timegap * i)\n traj[:, i] = s * np.array(theta_end) + (1 - s) * np.array(theta_start) # xi = x_start + (0.whatever fraction s)(x_end-x_start)\n traj = np.array(traj).T\n return traj", "def jacobian_linear_symb(\n self, joint_angles: dict, pose_term=False, ee_keys=None\n ) -> dict:\n kinematic_map = self.kinematic_map[\"p0\"] # get map to all nodes from root\n\n if ee_keys is None:\n end_effector_nodes = []\n for ee in self.end_effectors: # get p nodes in end-effectors\n if ee[0][0] == \"p\":\n end_effector_nodes += [ee[0]]\n else:\n end_effector_nodes += [ee[1]]\n else:\n end_effector_nodes = ee_keys\n # Ts = self.get_all_poses_symb(joint_angles) # all frame poses\n Ts = self.get_all_poses(joint_angles) # all frame poses\n J = {} # np.zeros([0, len(node_names) - 1])\n for ee in end_effector_nodes: # iterate through end-effector nodes\n ee_path = kinematic_map[ee][\n 1:\n ] # [:-1] # no last node, only phys. joint locations\n\n T_0_ee = Ts[ee].as_matrix() # ee frame\n if pose_term:\n dZ = np.array([0.0, 0.0, 1.0])\n p_ee = T_0_ee[0:3, 0:3] @ dZ + T_0_ee[0:3, -1]\n else:\n p_ee = T_0_ee[0:3, -1] # ee position\n\n Jp = np.zeros([3, self.n], dtype=object) # translation jac\n for joint in ee_path: # algorithm fills Jac per column\n T_0_i = Ts[list(self.parents.predecessors(joint))[0]].as_matrix()\n z_hat_i = T_0_i[:3, 2]\n if pose_term:\n p_i = T_0_i[0:3, 0:3] @ dZ + T_0_i[0:3, -1]\n else:\n p_i = T_0_i[:3, -1]\n j_idx = int(joint[1:]) - 1 # node_names.index(joint) - 1\n Jp[:, j_idx] = cross_symb(z_hat_i, p_ee - p_i)\n J[ee] = Jp\n return J", "def joint_limits_upper_constraint(q,ee_pos):\n return self.max_angles - q", "def backtrack_to_start_to_draw_purpose(board, end):\r\n cell = board.at(end)\r\n # print(cell)\r\n path = []\r\n lis = []\r\n while cell != None:\r\n path.append(cell)\r\n cell = cell.path_from\r\n for i in path[-1:]:\r\n for j in i.position:\r\n lis.append(j)\r\n\r\n return path", "def get_return_from_grasp_joint_trajectory(self, start_joints, target_pose, n_steps=40):\n assert len(start_joints) == len(self.joint_indices)\n assert target_pose.frame.count('base_link') == 1\n \n # set active manipulator and start joint positions\n self.robot.SetDOFValues(start_joints, self.joint_indices)\n \n # initialize trajopt inputs\n rave_pose = tfx.pose(self.sim.transform_from_to(target_pose.matrix, target_pose.frame, 'world'))\n quat = rave_pose.orientation\n xyz = rave_pose.position\n quat_target = [quat.w, quat.x, quat.y, quat.z]\n xyz_target = [xyz.x, xyz.y, xyz.z]\n rave_mat = rave.matrixFromPose(np.r_[quat_target, xyz_target])\n \n request = self._get_return_from_grasp_trajopt_request(xyz_target, quat_target, n_steps)\n \n # convert dictionary into json-formatted string\n s = json.dumps(request) \n # create object that stores optimization problem\n prob = trajoptpy.ConstructProblem(s, self.sim.env)\n \n tool_link = self.robot.GetLink(self.tool_frame)\n def penalize_low_height(x):\n self.robot.SetDOFValues(x, self.joint_indices, False)\n z = tool_link.GetTransform()[2,3]\n return max(0, 10.0 - z)\n\n for t in xrange(n_steps-2):\n prob.AddErrorCost(penalize_low_height, [(t,j) for j in xrange(len(self.joint_indices))], \"ABS\", \"PENALIZE_LOW_HEIGHT_%i\"%t)\n \n # do optimization\n result = trajoptpy.OptimizeProblem(prob)\n \n self.robot.SetDOFValues(start_joints, self.joint_indices)\n prob.SetRobotActiveDOFs() # set robot DOFs to DOFs in optimization problem\n num_upsampled_collisions = self._num_collisions(result.GetTraj())\n print('Number of collisions: {0}'.format(num_upsampled_collisions))\n self.robot.SetDOFValues(start_joints, self.joint_indices)\n if num_upsampled_collisions > 2:\n return None\n else:\n return result.GetTraj()", "def joint_pairs(self):\n return ((1, 4), (2, 5), (3, 6), (14, 11), (15, 12), (16, 13))", "def compute_end_point(self):\n\n L = self.level\n P = L.prob\n\n # check if Mth node is equal to right point and do_coll_update is false, perform a simple copy\n if self.coll.right_is_node and not self.params.do_coll_update:\n # a copy is sufficient\n L.uend = P.dtype_u(L.u[-1])\n else:\n # start with u0 and add integral over the full interval (using coll.weights)\n L.uend = P.dtype_u(L.u[0])\n for m in range(self.coll.num_nodes):\n L.uend += L.dt * self.coll.weights[m] * L.f[m + 1]\n # add up tau correction of the full interval (last entry)\n if L.tau[-1] is not None:\n L.uend += L.tau[-1]\n\n return None", "def test_calcAngles_joint_centers(self):\n _,joint_centers = pycgmCalc.calcAngles(self.motion_data, vsk=self.cal_SM, frame=0,\\\n formatData=False, splitAnglesAxis=False, returnjoints=True)\n #Verify that several the expected joint_centers are returned.\n expected_Front_Head = np.array([ 255.19071198, 406.12081909, 1721.92053223])\n expected_LHip = np.array([182.57097863, 339.43231855, 935.52900126])\n expected_RHand = np.array([ 859.80614366, 517.28239823, 1051.97278944])\n expected_Thorax = np.array([256.149810236564, 364.3090603933987, 1459.6553639290375])\n expected_LKnee = np.array([143.55478579, 279.90370346, 524.78408753])\n expected_result = [expected_Front_Head, expected_LHip, expected_RHand, expected_Thorax, expected_LKnee]\n result = [\n joint_centers[0]['Front_Head'],\n joint_centers[0]['LHip'],\n joint_centers[0]['RHand'],\n joint_centers[0]['Thorax'],\n joint_centers[0]['LKnee']\n ]\n np.testing.assert_almost_equal(result, expected_result, self.rounding_precision)", "def _body_coord(self):\r\n cth = np.cos(self.theta)\r\n sth = np.sin(self.theta)\r\n M = self.P - 0.5 * np.diag(self.lengths)\r\n # stores the vector from the center of mass to the nose\r\n c2n = np.array([np.dot(M[self.nose], cth), np.dot(M[self.nose], sth)])\r\n # absolute position of nose\r\n T = -self.pos_cm - c2n - self.goal\r\n # rotating coordinate such that nose is axis-aligned (nose frame)\r\n # (no effect when \\theta_{nose} = 0)\r\n c2n_x = np.array([cth[self.nose], sth[self.nose]])\r\n c2n_y = np.array([-sth[self.nose], cth[self.nose]])\r\n Tcn = np.array([np.sum(T * c2n_x), np.sum(T * c2n_y)])\r\n\r\n # velocity at each joint relative to center of mass velocity\r\n vx = -np.dot(M, sth * self.dtheta)\r\n vy = np.dot(M, cth * self.dtheta)\r\n # velocity at nose (world frame) relative to center of mass velocity\r\n v2n = np.array([vx[self.nose], vy[self.nose]])\r\n # rotating nose velocity to be in nose frame\r\n Vcn = np.array([np.sum((self.v_cm + v2n) * c2n_x),\r\n np.sum((self.v_cm + v2n) * c2n_y)])\r\n # angles should be in [-pi, pi]\r\n ang = np.mod(\r\n self.theta[1:] - self.theta[:-1] + np.pi,\r\n 2 * np.pi) - np.pi\r\n return Tcn, ang, Vcn, self.dtheta", "def rel_kin(self, joints): # kinematic term\n order1 = [9, 5, 20, 1, 2]\n order2 = [8, 6, 4, 20, 3] # joints' order\n order3 = [10, 4, 8, 0, 20]\n refer1 = [5, 6, 4, 2, 0] # kinseg's order\n refer2 = [6, 5, 4, 3, 1]\n\n segrel = defaultdict(lambda: int(0))\n result = []\n cnts = np.zeros(21)\n\n for i in xrange(len(order1)):\n A = np.array([joints[order1[i]].Position.x, joints[order1[i]].Position.y, joints[order1[i]].Position.z])\n B = np.array([joints[order2[i]].Position.x, joints[order2[i]].Position.y, joints[order2[i]].Position.z])\n C = np.array([joints[order3[i]].Position.x, joints[order3[i]].Position.y, joints[order3[i]].Position.z])\n\n tmp = min(np.abs(np.linalg.norm(A-B)*100-self.kinseg[refer1[i]])/self.kinseg[refer1[i]], 1)\n segrel[order1[i]] += tmp\n segrel[order2[i]] += tmp\n\n tmp = min(np.abs(np.linalg.norm(A-C)*100-self.kinseg[refer2[i]])/self.kinseg[refer2[i]], 1)\n segrel[order1[i]] += tmp\n segrel[order3[i]] += tmp\n\n cnts[order1[i]] += 2\n cnts[order2[i]] += 1\n cnts[order3[i]] += 1\n\n for i in self.trg_jorder:\n result.append(1-(segrel[i]/cnts[i]))\n\n return result", "def compute_position(self, goal: np.ndarray) -> Any:\n return NotImplementedError", "def get_vehicle_end_index(self):\n return [len(self.matrix) - 1 for i in range(len(self.vehicles))]", "def affected_end(self):\n types = {alt.type for alt in self.ALT} # set!\n BAD_MIX = {INS, SV, BND, SYMBOLIC} # don't mix well with others\n if (BAD_MIX & types) and len(types) == 1 and list(types)[0] == INS:\n # Only insertions, return 0-based position right of first base\n return self.POS # right of first base\n else: # Return 0-based end position, behind last REF base\n return (self.POS - 1) + len(self.REF)", "def calculateExteriorElementBoundaryQuadrature(self):\n #\n #get physical locations of element boundary quadrature points\n #\n #assume all components live on the same mesh\n self.u[0].femSpace.elementMaps.getValuesGlobalExteriorTrace(self.elementBoundaryQuadraturePoints,\n self.ebqe['x'])\n #\n #get metric tensor and unit normals\n #\n if self.movingDomain:\n if self.tLast_mesh != None:\n self.ebqe['xt'][:]=self.ebqe['x']\n self.ebqe['xt']-=self.ebqe['x_last']\n alpha = 1.0/(self.t_mesh - self.tLast_mesh)\n self.ebqe['xt']*=alpha\n else:\n self.ebqe['xt'][:]=0.0\n self.ebqe['x_last'][:]=self.ebqe['x']\n self.u[0].femSpace.elementMaps.getJacobianValuesGlobalExteriorTrace_movingDomain(self.elementBoundaryQuadraturePoints,\n self.ebqe['xt'],\n self.ebqe['inverse(J)'],\n self.ebqe['g'],\n self.ebqe['sqrt(det(g))'],\n self.ebqe['n'])\n else:\n self.u[0].femSpace.elementMaps.getJacobianValuesGlobalExteriorTrace(self.elementBoundaryQuadraturePoints,\n self.ebqe['inverse(J)'],\n self.ebqe['g'],\n self.ebqe['sqrt(det(g))'],\n self.ebqe['n'])\n #now map the physical points back to the reference element\n #assume all components live on same mesh\n self.u[0].femSpace.elementMaps.getInverseValuesGlobalExteriorTrace(self.ebqe['inverse(J)'],self.ebqe['x'],self.ebqe['hat(x)'])\n #\n #since the points on the reference boundary may be reordered on many right element boundaries, we\n #have to use an array of reference boundary points on all element boundaries\n #first copy the left reference element boundary quadrature points from the reference element boundary\n self.testSpace[0].getBasisValuesGlobalExteriorTrace(self.elementBoundaryQuadraturePoints,\n self.ebqe[('w',0)])\n cfemIntegrals.calculateWeightedShapeGlobalExteriorTrace(self.mesh.exteriorElementBoundariesArray,\n self.mesh.elementBoundaryElementsArray,\n self.mesh.elementBoundaryLocalElementBoundariesArray,\n self.elementBoundaryQuadratureWeights[('f',0)],\n self.ebqe['sqrt(det(g))'],\n self.ebqe[('w',0)],\n self.ebqe[('w*dS_f',0)])\n self.u[0].femSpace.getBasisGradientValuesGlobalExteriorTrace(self.elementBoundaryQuadraturePoints,\n self.ebqe['inverse(J)'],\n self.ebqe[('grad(v)',0)])\n #setup flux boundary conditions\n self.fluxBoundaryConditionsObjectsDict = dict([(cj,FluxBoundaryConditions(self.mesh,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.ebqe[('x')],\n self.advectiveFluxBoundaryConditionsSetterDict[cj],\n self.diffusiveFluxBoundaryConditionsSetterDictDict[cj]))\n for cj in self.advectiveFluxBoundaryConditionsSetterDict.keys()])\n self.ebqe['dS'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n cfemIntegrals.calculateIntegrationWeights(self.ebqe['sqrt(det(g))'],\n self.elementBoundaryQuadratureWeights[('u',0)],\n self.ebqe['dS'])\n for ci in range(self.nc): self.ebqe[('dS',ci)] = self.ebqe['dS']\n #\n self.ellamDiscretization.calculateExteriorElementBoundaryQuadrature(self.ebqe)\n #\n self.coefficients.initializeGlobalExteriorElementBoundaryQuadrature(self.timeIntegration.t,self.ebqe)", "def _handle_bounds_speedj(self):\n inside_bound, inside_buffer_bound, mat, xyz = self._check_bound(self._qt_[-1])\n inside_angle_bound = np.all(self._angles_low <= self._qt_[-1, self._joint_indices]) and \\\n np.all(self._qt_[-1, self._joint_indices] <= self._angles_high)\n if inside_bound:\n self.return_point = None\n if inside_angle_bound:\n self.angle_return_point = False\n if not inside_bound:\n if self.return_point is None:\n # we are outside the bounds and return point wasn't computed yet\n print(\"outside box bound\")\n xyz = np.clip(xyz, self._end_effector_low + self._box_bound_buffer,\n self._end_effector_high - self._box_bound_buffer)\n mat[:3, 3] = xyz\n ref_pos = self._q_ref.copy()\n ref_pos[self._joint_indices] = self._q_[-1, self._joint_indices]\n solutions = ur_utils.inverse_near(mat, wrist_desired=self._q_ref[-1], ref_pos=ref_pos,\n params=self._ik_params)\n servoj_q = self._q_ref.copy()\n if len(solutions) == 0:\n servoj_q[self._joint_indices] = self._q_[-1, self._joint_indices]\n else:\n servoj_q[self._joint_indices] = solutions[0][self._joint_indices]\n self.return_point = servoj_q[self._joint_indices]\n # Speed at which arm approaches the boundary. The faster this speed,\n # the larger opposite acceleration we need to apply in order to slow down\n self.init_boundary_speed = np.max(np.abs(self._qd_.copy()))\n # if return point is already computed, keep going to it, no need\n # to recompute it at every time step\n self._cmd_ = self.return_point - self._q_[0][self._joint_indices]\n # Take the direction to return point and normalize it to have norm 0.1\n if np.linalg.norm(self._cmd_) != 0:\n self._cmd_ /= np.linalg.norm(self._cmd_) / 0.1\n\n self._speedj_packet[1:1 + 6][self._joint_indices] = self._cmd_\n # This acceleration guarantees that we won't move beyond\n # the bounds by more than 0.05 radian on each joint. This\n # follows from kinematics equations.\n accel_to_apply = np.max(np.abs(self._qd_)) * self.init_boundary_speed / 0.05\n\n # self.boundary_packet[1:1 + 6][self.joint_indices] = self.return_point\n # self.actuator_comms['UR5'].actuator_buffer.write(self.reset_packet)\n # time.sleep(1.0)\n self._speedj_packet[-2] = np.clip(accel_to_apply, 2.0, 5.0)\n self._actuation_packet_['UR5'] = self._speedj_packet\n self._cmd_.fill(0.0)\n self._cmd_prev_.fill(0.0)\n self._first_deriv_.fill(0.0)\n\n elif not inside_angle_bound:\n # if return point is already computed, keep going to it, no need\n self.rel_indices = self._joint_indices\n cur_pos = self._q_[0][self._joint_indices]\n clipped_pos = np.clip(cur_pos, self._angles_low + self._angle_bound_buffer,\n self._angles_high - self._angle_bound_buffer)\n # a point within the box to which we will be returning\n affected_joints = np.where(clipped_pos != cur_pos)\n if not self.angle_return_point:\n print(\"outside of angle bound on joints %r\" % (list(affected_joints[0])))\n self.angle_return_point = True\n self._cmd_[affected_joints] = np.sign(clipped_pos - cur_pos)[affected_joints]*np.max(np.abs(self._cmd_))\n self._speedj_packet[1:1 + 6][self._joint_indices] = self._cmd_\n self._actuation_packet_['UR5'] = self._speedj_packet", "def get_neighb_coords(self, i, ci):\n j = self.conn[i][ci]\n rj = self.xyz[j].copy()\n if self.periodic:\n if self.use_pconn:\n img = self.pconn[i][ci]\n rj += np.dot(img, self.cell)\n else:\n all_rj = rj + self.images_cellvec\n all_r = all_rj - self.xyz[i]\n all_d = np.sqrt(np.add.reduce(all_r*all_r,1))\n closest = np.argsort(all_d)[0]\n return all_rj[closest]\n return rj", "def compute_observation(self):\n robotPos, robotOrn = p.getBasePositionAndOrientation(self.botId)\n robotEuler = p.getEulerFromQuaternion(robotOrn)\n linear, angular = p.getBaseVelocity(self.botId)\n return (np.array([robotEuler[0],angular[0],self.vt], dtype='float32'))", "def end_points(self, origin, destination):\n # origin and destination are components with bounding-boxes\n # direction is a 2 char code representing starting and ending directions\n # 'h' horizontal, 'v' vertical\n o_coords = origin.bounding_coords()\n d_coords = destination.bounding_coords()\n\n start = {\n \"h\": core.Coords(o_coords.x2, o_coords.y1 + origin.height / 2),\n \"v\": core.Coords(origin.x + (o_coords.x2 - o_coords.x1) / 2, o_coords.y2),\n }\n end = {\n \"h\": core.Coords(d_coords.x1, d_coords.y1 + destination.height / 2),\n \"v\": core.Coords(\n destination.x + (d_coords.x2 - d_coords.x1) / 2, d_coords.y1\n ),\n }\n self.start = start[self.direction[0]]\n self.end = end[self.direction[-1]]\n return (self.start, self.end)", "def comet_joint_orient(pynodes, aim_axis = None, up_axis = None, up_dir = None, do_auto = False):\n if aim_axis is None:\n aim_axis = [1, 0, 0]\n if up_axis is None:\n up_axis = [0, 0, 1]\n if up_dir is None:\n up_dir = [1, 0, 0]\n # convert to Vector\n aim_axis = pm.dt.Vector(aim_axis)\n up_axis = pm.dt.Vector(up_axis)\n up_dir = pm.dt.Vector(up_dir)\n # Filter supplied pynodes, if equal to 0 then return false\n if len(pynodes) == 0:\n return False\n\n # make sure only joint get passed through here\n pynodes = pm.ls(pynodes, type = 'joint')\n\n # init variable prevUp for later use\n prev_up = pm.dt.Vector()\n\n for i, o in enumerate(pynodes):\n parent_point = None\n # first we need to unparent everything and then store that,\n children = o.getChildren()\n for x in children:\n x.setParent(None)\n\n # find parent for later in case we need it\n parent = o.getParent()\n\n # Now if we have a child joint... aim to that\n aim_tgt = None\n for child in children:\n if child.nodeType() == 'joint':\n aim_tgt = child\n break\n\n if aim_tgt:\n # init variable upVec using upDir variable\n up_vec = pm.dt.Vector(up_dir)\n\n # first off... if doAuto is on, we need to guess the cross axis dir\n if do_auto:\n # now since the first joint we want to match the second orientation\n # we kind of hack the things passed in if it is the first joint\n # ie: if the joint doesnt have a parent... or if the parent it has\n # has the 'same' position as itself... then we use the 'next' joints\n # as the up cross calculations\n jnt_point = o.getRotatePivot(space = 'world')\n if parent:\n parent_point.setValue(parent.getRotatePivot(space = 'world'))\n else:\n parent_point = jnt_point.copy()\n aim_tgt_point = aim_tgt.getRotatePivot(space = 'world')\n\n # how close to we consider 'same'?\n tol = 0.0001\n\n point_cond = jnt_point - parent_point\n pos_cond = [abs(x) for x in point_cond.tolist()]\n if not parent or pos_cond[0] <= tol and pos_cond[1] <= tol and pos_cond[2] <= tol:\n # get aimChild\n aim_child = None\n aim_children = aim_tgt.getChildren(type = 'joint')\n if aim_children:\n aim_child = aim_children[0]\n\n # get aimChild vector\n if aim_child:\n aim_child_point = aim_child.getRotatePivot(space = 'world')\n else:\n aim_child_point = pm.dt.Vector()\n\n # find the up vector using child vector of aim target\n up_vec = (jnt_point - aim_tgt_point).cross(aim_child_point - aim_tgt_point)\n else:\n # find the up vector using the parent vector\n up_vec = (parent_point - jnt_point).cross(aim_tgt_point - jnt_point)\n\n # reorient the current joint\n a_cons = pm.aimConstraint(\n aim_tgt, o, aimVector = aim_axis, upVector = up_axis, worldUpVector = up_vec.tolist(),\n worldUpType = 'vector', weight = 1\n )\n pm.delete(a_cons)\n\n # now compare the up we used to the prev one\n current_up = up_vec.normal()\n # dot product for angle between... store for later\n dot = current_up.dot(prev_up)\n prev_up = up_vec\n\n if i > 0 >= dot:\n # adjust the rotation axis 180 if it looks like we have flopped the wrong way!\n # FIXME: some shit need to fix here\n # pm.xform( o, relative = True, objectSpace = True, rotateAxis = True )\n o.rotateX.set(o.rotateX.get() + (aim_axis.x * 180))\n o.rotateY.set(o.rotateY.get() + (aim_axis.y * 180))\n o.rotateZ.set(o.rotateZ.get() + (aim_axis.z * 180))\n\n prev_up *= -1\n elif parent:\n # otherwise if there is no target, just dup orientation of parent...\n transformation.align(o, parent, mode = 'rotate')\n\n # and now finish clearing out joint axis ...\n pm.joint(o, e = True, zeroScaleOrient = True)\n transformation.freeze_transform(o)\n\n # now that we are done ... reparent\n if len(children) > 0:\n for x in children:\n x.setParent(o)\n\n return True", "def startEndPoints(mazz):\n for i in range (len(mazz)):\n for j in range (len(mazz[i])):\n if mazz[i][j] == 6:\n startx = i\n starty = j\n elif mazz[i][j] == 7:\n endx = i\n endy = j\n return startx, starty, endx, endy", "def _get_observation_upper_bound(self):\n upper_bound = np.zeros(self._get_observation_dimension())\n num_motors = self.rex.num_motors\n upper_bound[0:num_motors] = math.pi # Joint angle.\n upper_bound[num_motors:2 * num_motors] = motor.MOTOR_SPEED_LIMIT # Joint velocity.\n upper_bound[2 * num_motors:3 * num_motors] = motor.OBSERVED_TORQUE_LIMIT # Joint torque.\n upper_bound[3 * num_motors:-7] = 1.0 # Quaternion of base orientation.\n upper_bound[-7] = 1.0 # ratio in [0,1]\n upper_bound[-6:-2] = [1.0, 1.0, 1.0, 1.0] # sin in [-1, 1]\n upper_bound[-2:] = [self.max_speed, self.max_side_speed]\n\n return upper_bound", "def get_fk(self, joints):\n\n header = Header()\n header.frame_id = self.group.get_planning_frame()\n\n robot_state = self.robot.get_current_state()\n robot_state.joint_state.position = joints\n\n links = [self.group.get_end_effector_link()]\n\n return self.fk_solver(header, links, robot_state).pose_stamped[0]", "def trigger_points(self, eouts, elens):\n bs, xmax, _ = eouts.size()\n log_probs = torch.log_softmax(self.output(eouts), dim=-1)\n best_paths = log_probs.argmax(-1)\n hyps = []\n for b in range(bs):\n indices = [best_paths[b, t].item() for t in range(elens[b])]\n collapsed_indices = [x[0] for x in groupby(indices)]\n best_hyp = [x for x in filter(lambda x: x != self.blank, collapsed_indices)]\n hyps.append(best_hyp)\n ymax = max([len(h) for h in hyps])\n trigger_points_pred = log_probs.new_zeros((bs, ymax + 1), dtype=torch.int32)\n for b in range(bs):\n n_triggers = 0\n for t in range(elens[b]):\n token_idx = best_paths[b, t]\n if token_idx == self.blank:\n continue\n if not (t == 0 or token_idx != best_paths[b, t - 1]):\n continue\n trigger_points_pred[b, n_triggers] = t\n n_triggers += 1\n return trigger_points_pred", "def compute_positions(self):\n return (self.x + DIRECTIONS[self.facing_direction][0]) % (self.image.shape[0] - 1), \\\n (self.y + DIRECTIONS[self.facing_direction][1]) % (self.image.shape[1] - 1)", "def run_viterbi(emission_scores, trans_scores, start_scores, end_scores):\r\n L = start_scores.shape[0]\r\n assert end_scores.shape[0] == L\r\n assert trans_scores.shape[0] == L\r\n assert trans_scores.shape[1] == L\r\n assert emission_scores.shape[1] == L\r\n N = emission_scores.shape[0]\r\n\r\n y = []\r\n dp_scores = []\r\n back_pointer = []\r\n\r\n for i in xrange(N):\r\n dp_scores.append([])\r\n back_pointer.append([])\r\n for j in xrange(L):\r\n if (i == 0):\r\n score = start_scores[j] + emission_scores[0, j]\r\n back = -1\r\n else:\r\n max = dp_scores[i-1][0] + trans_scores[0, j]\r\n back = 0\r\n for k in xrange(L):\r\n if (dp_scores[i-1][k] + trans_scores[k, j] > max):\r\n max = dp_scores[i-1][k] + trans_scores[k, j]\r\n back = k\r\n score = max + emission_scores[i, j]\r\n dp_scores[i].append(score)\r\n back_pointer[i].append(back)\r\n\r\n s = dp_scores[N-1][0] + end_scores[0]\r\n back = 0\r\n for k in xrange(L):\r\n if (dp_scores[N-1][k] + end_scores[k] > s):\r\n s = dp_scores[N-1][k] + end_scores[k]\r\n back = k\r\n\r\n y.append(back)\r\n for i in range(N-1, 0, -1):\r\n y.append(back_pointer[i][back])\r\n back = back_pointer[i][back]\r\n y.reverse()\r\n\r\n return (s, y)", "def get_move_indexes(i, j):\n return (i, j), (j, n - 1 - i), (n - 1 - i, n - 1 - j), (n - 1 - j, i)", "def _findExonEnd(self, exonRecs, iBlkStart):\n iBlkEnd = iBlkStart + 1\n while (iBlkEnd < len(exonRecs)) and (self._tGapSize(exonRecs, iBlkEnd) < minIntronSize):\n iBlkEnd += 1\n return iBlkEnd, exonRecs[iBlkEnd - 1].end - exonRecs[iBlkStart].start", "def\tbegin_end(env, blc):\n\n\tb_e = np.empty((blc.shape[0], 2))\n\tinf = 0\n\tp = 0\n\twin = env.win_over\n\tb_e[0, 0] = inf\n\tif blc[0] + win <= blc[-1]:\n\t\tb_e[0, 1] = blc[0] + win\n\telse:\n\t\tb_e[0, 1] = blc[-1]\n\tif blc.shape[0] == 1:\n\t\tb_e[0, 1] = blc[0]\n\t\treturn (b_e)\n\tfor k in range(1, blc.shape[0] - 1):\n\t\tinf = blc[k - 1] - win\n\t\tb_e[k, 0] = inf\n\t\tif blc[k] + win <= blc[-1]:\n\t\t\tb_e[k, 1] = blc[k] + win\n\t\telse:\n\t\t\tb_e[k, 1] = blc[-1]\n\tb_e[blc.shape[0] - 1, 0] = blc[-2] - win\n\tb_e[blc.shape[0] - 1, 1] = blc[-1]\n\tneg = np.where(b_e < 0)[0]\n\tif neg.shape[0]:\n\t\tb_e = b_e[neg[-1]:]\n\t\tb_e[0, 0] = 0\n\treturn (b_e)", "def get_bend_port_distances(bend: Component) -> Tuple[float64, float64]:\n p0, p1 = bend.ports.values()\n return abs(p0.x - p1.x), abs(p0.y - p1.y)", "def end_effectors(self) -> list:\n if not hasattr(self, \"_end_effectors\"):\n S = self.structure\n self._end_effectors = [\n [x, y]\n for x in S\n if S.out_degree(x) == 0\n for y in S.predecessors(x)\n if DIST in S[y][x]\n if S[y][x][DIST] < np.inf\n ]\n\n return self._end_effectors", "def end_effectors(self) -> list:\n if not hasattr(self, \"_end_effectors\"):\n S = self.structure\n self._end_effectors = [\n [x, y]\n for x in S\n if S.out_degree(x) == 0\n for y in S.predecessors(x)\n if DIST in S[y][x]\n if S[y][x][DIST] < np.inf\n ]\n\n return self._end_effectors", "def getPos(self,len,end,nodes):\n start=end\n if self.count==nodes:\n last=len\n else:\n last=end+(int)(len/(nodes+1))\n self.count+=1\n return (start,last)", "def heuristicValueOfPosition(currPositions):\n hVal = 0;\n\n for y in range(1, n+1): #1,2,3\n for x in range(1, n+1):\n val = currPositions[y][x];\n if ((val == 0) or (goalPositions[val] == (y,x))): #val 0 means blank\n continue;\n else:\n hVal += abs(y-goalPositions[val][0]) + abs(x-goalPositions[val][1])\n\n return hVal;", "def get_ecc(self):\n mu_mass = G*(self._mm + self._sm)\n h_mom = self.sp_ang_mom()\n vel = self.getvel_xyz()\n pos = self.getpos_xyz()\n e_vec = 1.0/mu_mass*(np.cross(vel, h_mom) -\n mu_mass*pos/np.linalg.norm(pos))\n return e_vec", "def __path_to_end(self) -> List[List[int]]:\n predecessors = self.__predecessors_list()\n path = []\n\n row_exit, col_exit = Player.find_exit_position(self.__labyrinth)\n dest = self.__convert_position(row_exit, col_exit)\n\n v = dest\n\n path.append([v // 10, v % 10])\n\n while predecessors[v] != -1:\n path.append(predecessors[v])\n v = self.__convert_position(predecessors[v][0], predecessors[v][1])\n\n return path[::-1]", "def end_point(self) -> Vec3:\n v = list(self.vertices([self.dxf.end_angle]))\n return v[0]", "def reach_gradient(self):\n step_size = 0.05\n min_step_size = 0.001\n moved_closer = True\n while_loop_counter = 0\n max_steps = 100\n old_total_cost = 10\n epsilon = 0.005\n\n # While moved closer and not reached minimum step size\n while moved_closer and step_size > min_step_size:\n while_loop_counter += 1\n # Set a maximum number of steps per change to see progress - used for testing\n if while_loop_counter > max_steps:\n break\n new_total_cost = 0\n text = \"\"\n i = 0\n\n # Go through each joint within the arm\n for joint_key, joint_value in self.joint_angles.items():\n # Text to show for each joint change\n text += str(self.joint_names[i]) + \" \"\n i += 1\n\n # Old endpoint values\n old_value = joint_value\n\n # Update joints in ROS with current self.joint_angle values\n self.update_angles()\n self.get_current_ee_pose() # Old endpoint\n\n # Determine cost from current end effector to target\n old_cost = self.cost(self.arm_endpoint)\n\n # Gradient of old values\n gradient = self.gradient(joint_key)\n if gradient > 0: # Determine direction of gradient\n direction = 1\n else:\n direction = -1\n\n # Determine new angle value based on gradient\n self.joint_angles[joint_key] = (old_value - direction * step_size)\n\n if self.joint_angles[joint_key] < self.joint_min[joint_key]:\n self.joint_angles[joint_key] = self.joint_min[joint_key]\n elif self.joint_angles[joint_key] > self.joint_max[joint_key]:\n self.joint_angles[joint_key] = self.joint_max[joint_key]\n\n # Update joint angle values within ROS and get new endpoint value\n self.update_angles()\n self.get_current_ee_pose()\n\n # Determine cost from current end effector to target\n new_cost = self.cost(self.arm_endpoint)\n\n # Determine the cost of\n if new_cost > old_cost:\n self.joint_angles[joint_key] = old_value\n new_total_cost += old_cost\n text += \": No change \\n\"\n else:\n text += \": Improved by \" + str(direction * step_size) + \"\\n\"\n new_total_cost += new_cost\n\n # Display change of each joint through text\n print(\"Robot part changes: \\n\", text)\n self.cost_values += [new_total_cost]\n\n # Check if improved from previous position\n if old_total_cost < new_total_cost:\n step_size -= .01\n moved_closer = False\n else:\n moved_closer = True\n\n print(\"abs(old_total_cost - new_total_cost): \", abs(old_total_cost - new_total_cost))\n print(\"new_total_cost: \", new_total_cost)\n # If changes are less than epsilon, we stop\n if abs(old_total_cost - new_total_cost) < epsilon:\n break\n old_total_cost = new_total_cost\n\n # Save new joint angle values\n save_file = \"/OptimizedAngles.csv\"\n print(\"Saving new joint angles at \", save_file)\n self.save_new_joint_angles(save_file)", "def at_b (self):\n self.argc = int((len(n.coord[0]))/2)\n self.pts_con = np.array(self.coord[:,self.argc:len(n.coord[0])])\n\n self.xd = self.xdi\n self.zd = self.zdi \n \n for i, x in enumerate(self.xdi):\n self.aux_con = self.pts_con[0] - x \n self.arg1 = np.argmin(abs(self.aux_con)) \n \n if (self.aux_con[self.arg1] < 0 and self.arg1 == 0) or (self.aux_con[self.arg1] > 0 and self.arg1 == len(self.aux_con)-1):\n self.yd[i] = 99999.\n #print(self.yd[i],self.arg1)\n #print(self.aux_con)\n \n elif (self.aux_con[self.arg1] > 0 and self.aux_con[self.arg1+1] > self.aux_con[self.arg1]): #(self.aux_con[self.arg1] < 0 and self.aux_con[self.arg1-1] > self.aux_con[self.arg1]) or \n self.yd[i] = 99999.\n #print(self.yd[i],self.arg1)\n #print(self.aux_con)\n \n elif self.aux_con[self.arg1] < 0:\n #print(self.arg1)\n self.arg1 = self.arg1 + self.argc\n self.arg2 = self.arg1 - 1\n self.yd[i] = self.coord[1,n.arg1] + (x-self.coord[0,n.arg1])*(self.coord[1,n.arg2]-self.coord[1,n.arg1])/(self.coord[0,n.arg2]-self.coord[0,n.arg1])\n #print(self.yd[i],self.arg1,self.arg2)\n #print(self.aux_con)\n\n elif self.aux_con[self.arg1] > 0:\n #print(self.arg1) \n self.arg1 = self.arg1 + self.argc\n self.arg2 = self.arg1 + 1\n self.yd[i] = self.coord[1,n.arg1] + (x-self.coord[0,n.arg1])*(self.coord[1,n.arg2]-self.coord[1,n.arg1])/(self.coord[0,n.arg2]-self.coord[0,n.arg1]) \n #print(self.yd[i],self.arg1,self.arg2)\n #print(self.aux_con)\n \n #print('Defensa {0}\\n{1}: {2}\\n{3}: {4}'.format(i,self.arg1,self.aux_con[self.arg1],self.arg2,self.aux_con[self.arg2])) \n \n #self.yd = self.yd\n self.b = np.array([self.xd,self.yd,self.zd])\n #self.b.loc[:,('y')] = self.b.loc[:,('y')] ", "def endtoend(d):\n dists = distance_matrix(d)\n avgdists = np.array([np.diag(dists, i).mean() for i in range(dists.shape[0])])\n return avgdists", "def calculate_coefficients(self, start, end):\n A = np.array([\n [self.deltaT**3, self.deltaT**4, self.deltaT**5],\n [3 * self.deltaT**2, 4 * self.deltaT**3, 5 * self.deltaT**4],\n [6 * self.deltaT, 12 * self.deltaT**2, 20 * self.deltaT**3],\n ])\n\n a_0, a_1, a_2 = start[0], start[1], start[2] / 2.0\n c_0 = a_0 + a_1 * self.deltaT + a_2 * self.deltaT**2\n c_1 = a_1 + 2 * a_2 * self.deltaT\n c_2 = 2 * a_2\n\n B = np.array([\n end[0] - c_0,\n end[1] - c_1,\n end[2] - c_2\n ])\n\n a_3_4_5 = np.linalg.solve(A, B)\n coeff = np.concatenate((np.array([a_0, a_1, a_2]), a_3_4_5))\n\n return coeff", "def lendiag(self):\n if self.y <= self.x:\n return self.y\n else:\n return self.x", "def get_frame_joints(self, frame):\n joints = frame[(self.POS_SIZE + self.ROT_SIZE):].copy()\n return joints", "def get_end_pos(cls, start_pos, dimensions, left=False, up=False):\n dx = (dimensions[0] - 1) * cls.width\n if left: dx = -dx\n dy = (dimensions[1] -1 ) * cls.height\n if up: dy = -dy\n \n end_pos = start_pos[0] + dx, start_pos[1] + dy\n return end_pos", "def find(self, ends):\n\n if self.function(ends[0]) * self.function(ends[1]) > 0:\n raise ValueError('Sign of function at both the end points must be opposite.')\n \n a = min(ends)\n b = max(ends)\n\n while (b - a > self.epsilon):\n mid = (a + b) / 2\n if self.function(mid) * self.function(a) > 0:\n a = mid\n else:\n b = mid\n \n return (a + b) / 2", "def evalBottomGuideEndDisp(length: float,\n bottomcurve_radius: float,\n tendonDistFromAxis: float,\n tendonOrientationDF: float)-> np.ndarray:\n horizontalDispAlongCurve = tendonDistFromAxis*sin(tendonOrientationDF)\n return np.array((\n tendonDistFromAxis*cos(tendonOrientationDF),\n tendonDistFromAxis*sin(tendonOrientationDF),\n -sqrt(bottomcurve_radius**2 - horizontalDispAlongCurve**2) + bottomcurve_radius - length/2\n ))", "def get_ee_points_velocities(ref_jacobian, ee_points, ref_rot, joint_velocities):\n ref_jacobians_trans = ref_jacobian[:3, :]\n ref_jacobians_rot = ref_jacobian[3:, :]\n ee_velocities_trans = np.dot(ref_jacobians_trans, joint_velocities)\n ee_velocities_rot = np.dot(ref_jacobians_rot, joint_velocities)\n ee_velocities = ee_velocities_trans + np.cross(ee_velocities_rot.reshape(1, 3),\n ref_rot.dot(ee_points.T).T)\n return ee_velocities.reshape(-1)", "def joints_torque(self):\r\n return self._arm.joints_torque", "def determine_move_position(self):\n green_probs = []\n net_size = len(self.net)\n adjacents = self.net[self.current_pos].adjacents\n #Belief propagation:\n #Analyzes each position's probability of obtaining\n #green when measuring at a time t+1.\n for i in adjacents:\n accum = 0\n for j in range(0, net_size):\n distance = self.__get_distance(i-1, j)\n if distance == 0: #Probability of measure green at distance 0 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[0][0]\n elif distance == 1: #Probability of measure green at distance 1 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[1][0]\n elif distance == 2: #Probability of measure green at distance 2 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[2][0]\n elif distance == 3: #Probability of measure green at distance 3 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[3][0]\n else: #Probability of measure green at a distance >= 4 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[4][0]\n green_probs.append((i, accum))\n #Returns the position in which the probability of\n #obtaining green when measuring is the lowest.\n return min(green_probs, key=itemgetter(1))[0]", "def _handle_bounds_servoj(self):\n inside_bound, inside_buffer_bound, mat, xyz = self._check_bound(self._qt_[-1])\n inside_angle_bound = np.all(self._angles_low <= self._qt_[-1, self._joint_indices]) and \\\n np.all(self._qt_[-1, self._joint_indices] <= self._angles_high)\n\n if inside_bound and inside_angle_bound:\n self.return_point = None\n return\n\n if self.return_point is None:\n # we are outside the bounds and return point wasn't computed yet\n if inside_bound and not inside_angle_bound:\n print(\"outside of angle bound\")\n self.rel_indices = self._joint_indices\n self._cmd_ = self._q_[0][self._joint_indices]\n self._cmd_ = np.clip(self._cmd_, self._angles_low + self._angle_bound_buffer,\n self._angles_high - self._angle_bound_buffer)\n # a point within the box to which we will be returning\n self.return_point = self._cmd_.copy()\n # Speed at which arm approaches the boundary. The faster this speed,\n # the larger opposite acceleration we need to apply in order to slow down\n self.init_boundary_speed = np.max(np.abs(self._qd_.copy()))\n\n else:\n print(\"outside box bound\")\n xyz = np.clip(xyz, self._end_effector_low + self._box_bound_buffer,\n self._end_effector_high - self._box_bound_buffer)\n mat[:3, 3] = xyz\n ref_pos = self._q_ref.copy()\n ref_pos[self._joint_indices] = self._q_[-1, self._joint_indices]\n solutions = ur_utils.inverse_near(mat, wrist_desired=self._q_ref[-1], ref_pos=ref_pos,\n params=self._ik_params)\n servoj_q = self._q_ref.copy()\n if len(solutions) == 0:\n servoj_q[self._joint_indices] = self._q_[-1, self._joint_indices]\n else:\n servoj_q[self._joint_indices] = solutions[0][self._joint_indices]\n self.return_point = servoj_q[self._joint_indices]\n # Speed at which arm approaches the boundary. The faster this speed,\n # the larger opposite acceleration we need to apply in order to slow down\n self.init_boundary_speed = np.max(np.abs(self._qd_.copy()))\n # if return point is already computed, keep going to it, no need\n # to recompute it at every time step\n self._cmd_ = self.return_point - self._q_[0][self._joint_indices]\n # Take the direction to return point and normalize it to have norm 0.1\n if np.linalg.norm(self._cmd_) != 0:\n self._cmd_ /= np.linalg.norm(self._cmd_) / 0.1\n\n self._speedj_packet[1:1 + 6][self._joint_indices] = self._cmd_\n # This acceleration guarantees that we won't move beyond\n # the bounds by more than 0.05 radian on each joint. This\n # follows from kinematics equations.\n accel_to_apply = np.max(np.abs(self._qd_)) * self.init_boundary_speed / 0.05\n\n # self.boundary_packet[1:1 + 6][self.joint_indices] = self.return_point\n # self.actuator_comms['UR5'].actuator_buffer.write(self.reset_packet)\n # time.sleep(1.0)\n self._speedj_packet[-2] = np.clip(accel_to_apply, 2.0, 5.0)\n self._actuation_packet_['UR5'] = self._speedj_packet\n self._cmd_.fill(0.0)\n self._cmd_prev_.fill(0.0)\n self._first_deriv_.fill(0.0)", "def __inverse_kinematics(self, guess, target_point):\n\n error = 1.0\n tolerance = 0.05\n\n # Initial Guess - Joint Angles\n thetas = np.matrix(guess) # thetas is list which is contain all axes theta angles.\n target_point = np.matrix(target_point) # X, Y, Z list to matrix for Target Position\n # print(target_point.shape)\n # Jacobian\n self.__calc_jacobian_matrix()\n tf_matrix_first_to_last = self.tf_matrices_list[-1]\n\n error_grad = []\n\n theta_dict = {}\n\n lr = 0.2\n while error > tolerance:\n for i in range(len(np.array(thetas)[0])):\n theta_dict[self.q[i]] = np.array(thetas)[0][i]\n\n theta_dict[self.q[-1]] = self.q[-1]\n\n calculated_target_point = np.matrix(self.get_coords_from_forward_kinematics(self.__forward_kinematics(np.array(thetas)[0])[-1]))\n logger.debug(f'calculated target point is \\n{calculated_target_point}')\n\n diff_wanted_calculated = target_point - calculated_target_point\n\n jacob_mat = np.matrix(self.jacobian_matrix.evalf(subs=theta_dict, chop=True, maxn=4)).astype(np.float64).T\n logger.debug(f'jacobian matrix is\\n{jacob_mat} \\n\\n diff is \\n {diff_wanted_calculated}')\n\n thetas = thetas + lr * (jacob_mat * diff_wanted_calculated.T)\n # thetas = np.array(thetas)[0] # this line's purpose is changing Q from matrix level to array level.\n\n prev_error = error\n\n error = linalg.norm(diff_wanted_calculated)\n\n if error > 10 * tolerance:\n lr = 0.3\n elif error < 10 * tolerance:\n lr = 0.2\n error_grad.append((error - prev_error))\n\n # print(error)\n return np.array(thetas)[0]", "def generate_obstacle_point(start, end):\n top_left = (start[0], start[1] - _OBSTACLE_SIZE)\n top_right = (end[0], end[1] - _OBSTACLE_SIZE)\n return start, end, top_right, top_left", "def compute_fk_velocity(self, jpos, jvel, tgt_frame):\n if isinstance(jpos, list):\n jpos = np.array(jpos)\n if isinstance(jvel, list):\n jvel = np.array(jvel)\n kdl_end_frame = kdl.FrameVel()\n kdl_jnt_angles = joints_to_kdl(jpos)\n kdl_jnt_vels = joints_to_kdl(jvel)\n kdl_jnt_qvels = kdl.JntArrayVel(kdl_jnt_angles, kdl_jnt_vels)\n idx = self.arm_link_names.index(tgt_frame) + 1\n fg = self._fk_solver_vel.JntToCart(kdl_jnt_qvels,\n kdl_end_frame,\n idx)\n if fg < 0:\n raise ValueError('KDL Vel JntToCart error!')\n end_twist = kdl_end_frame.GetTwist()\n return np.array([end_twist[0], end_twist[1], end_twist[2],\n end_twist[3], end_twist[4], end_twist[5]])", "def _inverse_kinematics(self):\n q_2 = np.arccos(\n (\n (self._x_obj_0 - self._jnt_lengths[2]) ** 2\n + self._y_obj_0 ** 2\n - self._jnt_lengths[0] ** 2\n - self._jnt_lengths[1] ** 2\n )\n / (2 * self._jnt_lengths[0] * self._jnt_lengths[1])\n )\n psi = np.arcsin(\n self._jnt_lengths[1] * np.sin(q_2)\n / np.sqrt(\n (self._x_obj_0 - self._jnt_lengths[2]) ** 2\n + self._y_obj_0 ** 2\n )\n )\n q_1 = (np.arctan2(self._x_obj_0 - self._jnt_lengths[2], self._y_obj_0)\n - psi)\n q_3 = np.pi / 2 - q_1 - q_2\n return np.array([q_1, q_2, q_3])", "def _robot_jpos_getter(self):\n return np.array(self.env._joint_positions)", "def run_viterbi(emission_scores, trans_scores, start_scores, end_scores):\r\n\r\n L = start_scores.shape[0]\r\n assert end_scores.shape[0] == L\r\n assert trans_scores.shape[0] == L\r\n assert trans_scores.shape[1] == L\r\n assert emission_scores.shape[1] == L\r\n N = emission_scores.shape[0]\r\n\r\n #T - Score matrix same as in assignement pdf\r\n T = np.zeros(shape=(L,N))\r\n #Back pointers - to store the previous best tag for word at (i-1)th position\r\n #that resulted into current best tag for (i)th word \r\n back_pointer = np.full((L,N), -1)\r\n\r\n for i in xrange(L):\r\n emission = emission_scores[0][i]\r\n combined = emission + start_scores[i]\r\n T[i][0] = combined\r\n\r\n # Loop over all the words in a sequesnce\r\n for i in xrange(1, N):\r\n # Loop over all the tags for the word at index i \r\n for j in xrange(L):\r\n # Varibale for maximum tag score from previous word (word at i-1)\r\n tmp_max = float('-inf')\r\n tmp_max_idx = -1\r\n #Emission value of word at idx i from state (i.e tag) j\r\n emission = emission_scores[i][j]\r\n #Loop over all the possibile tags for previous word T[tag (1..L), word at i-1]\r\n #and get max among them. Store the corresponding back pointer for there T[tag (1..L), word at i-1]\r\n for k in xrange(L):\r\n transition = trans_scores[k][j]\r\n prev_path = T[k][i-1]\r\n combined = transition + prev_path\r\n if (tmp_max < combined):\r\n tmp_max = combined\r\n tmp_max_idx = k\r\n\r\n back_pointer[j][i] = tmp_max_idx\r\n T[j][i] = tmp_max + emission\r\n\r\n # Doing this step outside because if N == 1 then above loop will not run\r\n # Variable for maximum tag score\r\n tag_max = float('-inf')\r\n # Variable for back pointer(previous T[tag, word])\r\n tag_max_idx = -1\r\n for i in xrange(L):\r\n T[i][N-1] = T[i][N-1] + end_scores[i]\r\n if (tag_max < T[i][N-1]):\r\n tag_max = T[i][N-1]\r\n tag_max_idx = i\r\n # print(\"Max tag -> \" + str(tag_max_idx))\r\n\r\n #Variable to track the path length - should be equal to N\r\n path_length = 0\r\n #Variable to back track on the tags\r\n tag_idx = tag_max_idx\r\n #Varibale to track the word index in N\r\n word_idx = N-1 \r\n #Path strored using backtracking\r\n y = []\r\n\r\n #Getting the best path using backtracking on back_pointers\r\n while path_length != N-1:\r\n y.append(back_pointer[tag_idx][word_idx])\r\n tag_idx = back_pointer[tag_idx][word_idx]\r\n word_idx = word_idx - 1\r\n path_length = path_length + 1\r\n\r\n #Reversing the backtracked path\r\n y = y[::-1]\r\n #Adding the tag for the last word idx in N\r\n y.append(tag_max_idx)\r\n # print(\"Path -> \" + str(y))\r\n\r\n return (tag_max, y)", "def getEePointsJacobians(refJacobian, eePoints, refRot, numberOfJoints):\n eePoints = np.asarray(eePoints)\n refJacobiansTrans = refJacobian[:3, :]\n refJacobiansRot = refJacobian[3:, :]\n endEffectorPointsRot = np.expand_dims(refRot.dot(eePoints.T).T, axis=1)\n eePointsJacTrans = np.tile(refJacobiansTrans, (eePoints.shape[0], 1)) + \\\n np.cross(refJacobiansRot.T, endEffectorPointsRot).transpose(\n (0, 2, 1)).reshape(-1, numberOfJoints)\n eePointsJacRot = np.tile(refJacobiansRot, (eePoints.shape[0], 1))\n return eePointsJacTrans, eePointsJacRot", "def get_first_quadrant(self):\n num_copies_x = ceil(self.max_x / self.room_x)\n num_copies_x = int(num_copies_x)\n num_copies_y = ceil(self.max_y / self.room_y)\n num_copies_y = int(num_copies_y)\n\n player_exp_x = []\n player_exp_y = []\n guard_exp_x = []\n guard_exp_y = []\n # Loop expands along the x axis\n for i in range(0, num_copies_x + 1, 1):\n temp_player_y_list = []\n temp_guard_y_list = []\n r_x = self.room_x * i\n\n if len(player_exp_x) == 0:\n n_p_p_x = self.player_x\n else:\n n_p_p_x = (r_x - player_exp_x[-1][0]) + r_x\n player_exp_x.append([n_p_p_x, self.player_y, 1])\n\n if len(guard_exp_x) == 0:\n n_g_p_x = self.guard_x\n else:\n n_g_p_x = (r_x - guard_exp_x[-1][0]) + r_x\n guard_exp_x.append([n_g_p_x, self.guard_y, 7])\n\n # Loop expands along the x axis\n for j in range(1, num_copies_y + 1, 1):\n r_y = self.room_y * j\n if len(temp_guard_y_list) == 0:\n n_g_p_y = (r_y - self.guard_y) + r_y\n temp_guard_y_list.append(n_g_p_y)\n else:\n n_g_p_y = (r_y - temp_guard_y_list[-1]) + r_y\n temp_guard_y_list.append(n_g_p_y)\n guard_exp_y.append([n_g_p_x, n_g_p_y, 7])\n\n if len(temp_player_y_list) == 0:\n n_p_p_y = (r_y - self.player_y) + r_y\n temp_player_y_list.append(n_p_p_y)\n else:\n n_p_p_y = (r_y - temp_player_y_list[-1]) + r_y\n temp_player_y_list.append(n_p_p_y)\n player_exp_y.append([n_p_p_x, n_p_p_y, 1])\n\n return player_exp_x + guard_exp_x + player_exp_y + guard_exp_y", "def run_viterbi(emission_scores, trans_scores, start_scores, end_scores):\r\n L = start_scores.shape[0]\r\n assert end_scores.shape[0] == L\r\n assert trans_scores.shape[0] == L\r\n assert trans_scores.shape[1] == L\r\n assert emission_scores.shape[1] == L\r\n N = emission_scores.shape[0]\n \r\n trans_scores += start_scores\n back_ptrs = np.zeros_like(emission_scores,dtype=np.int32)\n emission_scores += start_scores\r\n em_scores = np.zeros_like(emission_scores)\n em_scores[0] = start_scores+emission_scores[0]\n \n for k in range(1,N):\n transition_plus_score =trans_scores+np.expand_dims(em_scores[k-1],1)\n back_ptrs[k] =np.argmax(transition_plus_score,0)\n em_scores[k] =np.max(transition_plus_score,0)+emission_scores[k]\n \n v = [np.argmax(end_scores+em_scores[-1])]\n v_score = np.max(end_scores+em_scores[-1])\n\n for back_ptr in reversed(back_ptrs[1:]):\n v.append(back_ptr[v[-1]])\n v.reverse()\n return v_score,v", "def run_viterbi(emission_scores, trans_scores, start_scores, end_scores):\r\n L = start_scores.shape[0]\r\n assert end_scores.shape[0] == L\r\n assert trans_scores.shape[0] == L\r\n assert trans_scores.shape[1] == L\r\n assert emission_scores.shape[1] == L\r\n N = emission_scores.shape[0]\r\n\r\n # SHAPES \r\n # N = 5, L = 3\r\n # emission_scores = (5,3), trans_scores = (3,3)\r\n # start_scores = (3,), end_scores = (3,)\r\n\r\n # Creating the transition DP matrix\r\n T = [[0 for _ in range(N)] for _ in range(L)]\r\n backpointers = [[0 for _ in range(N)] for _ in range(L)]\r\n\r\n # Filling the first column\r\n for row in range(L):\r\n T[row][0] = emission_scores[0][row] + start_scores[row] # emission_scores matrix is (N X L)\r\n \r\n # Filling the rest of the transition matrix\r\n for col in range(1, N):\r\n for row in range(L):\r\n prev_list = []\r\n for prev_label in range(L):\r\n prev_list.append(trans_scores[prev_label, row] + T[prev_label][col-1])\r\n T[row][col] = max(prev_list) + emission_scores[col][row] \r\n backpointers[row][col] = np.argmax(prev_list)\r\n\r\n # Filling the last column\r\n for row in range(L):\r\n T[row][N-1] += end_scores[row]\r\n\r\n # print for debug\r\n # print \"T\"\r\n # for i in T:\r\n # print i\r\n \r\n # print \r\n # print\r\n\r\n # print \"B\"\r\n # for i in backpointers:\r\n # print i\r\n\r\n # Finding max score in last column of T matrix\r\n T = np.array(T)\r\n score = np.asscalar(np.max(T[:,N-1]))\r\n location = np.asscalar(np.argmax(T[:,N-1]))\r\n\r\n # Getting best sequence from right to left using backpointers\r\n y = [location]\r\n for col in range(N-1, 0, -1):\r\n y.insert(0, backpointers[location][col])\r\n location = backpointers[location][col]\r\n\r\n '''\r\n y = []\r\n for i in xrange(N):\r\n # stupid sequence\r\n y.append(i % L)\r\n # score set to 0\r\n return (0.0, y)\r\n '''\r\n return (score, y)", "def _get_joint_positions_all(self, abs_input: [list, np.ndarray]):\n return np.copy(abs_input)", "def _endx(self, parents):\n ALPHA = (1.-2*0.35**2)**0.5/2.\n BETA = 0.35/(self.n_gene-1)**0.5\n\n child = np.empty(self.n_gene+1)\n\n t1 = (parents[1, :self.n_gene]-parents[0, :self.n_gene]) / 2.\n t2 = np.random.normal(scale=ALPHA) * (\n parents[1, :self.n_gene] - parents[0, :self.n_gene]\n )\n t3 = np.sum(\n np.random.normal(scale=BETA, size=self.n_gene)[:, np.newaxis]\n * (\n parents[2:, :self.n_gene] - (\n np.sum(parents[2:, :self.n_gene], axis=0) / self.n_gene\n )\n ), axis=0\n )\n child[:self.n_gene] = t1 + t2 + t3\n\n return child", "def interpolate_to_parent(self, start, end, linspace_count):\n \n v = end - start\n length = norm(v)\n v = v / length # Make v a unit vector\n l = np.linspace(0, length, linspace_count) \n\n return np.array([start[i] + v[i] * l for i in range(3)])", "def calc_nearest_ind(self, robot_pose):\n pass", "def get_ending_direction_vector(self):\n\n total_length = len(self.pixel_list)\n\n if total_length < 2:\n return None\n elif total_length < 15:\n delta_x = self.pixel_list[-1].x - self.pixel_list[0].x\n delta_y = self.pixel_list[-1].y - self.pixel_list[0].y\n return delta_y, delta_x\n else:\n delta_x = self.pixel_list[-15].x - self.pixel_list[-1].x\n delta_y = self.pixel_list[-15].y - self.pixel_list[-1].y\n return delta_y, delta_x", "def joint_angles(self, arm):\n self.main(1, arm.angles)\n # self.distance_service = rospy.Service('distance', Distance, self.distance)\n return Joint_anglesResponse(True)", "def get_imin(x1, x2, y, k=1, normalize=None, norm=np.inf):\n\n if normalize:\n y = normalize(y)\n\n y_tree = cKDTree(y)\n\n n = len(y)\n i_spec = np.zeros((2, n))\n\n for jj, x in enumerate([x1, x2]):\n\n if normalize:\n x = normalize(x)\n\n # construct state array for the joint processes:\n xy = np.c_[x,y]\n\n # store data pts in kd-trees for efficient nearest neighbour computations\n # TODO: choose a better leaf size\n x_tree = cKDTree(x)\n xy_tree = cKDTree(xy)\n\n # kth nearest neighbour distances for every state\n # query with k=k+1 to return the nearest neighbour, not counting the data point itself\n # dist, idx = xy_tree.query(xy, k=k+1, p=norm)\n dist, idx = xy_tree.query(xy, k=k+1, p=np.inf)\n epsilon = dist[:, -1]\n\n # for each point, count the number of neighbours\n # whose distance in the x-subspace is strictly < epsilon\n # repeat for the y subspace\n nx = np.empty(n, dtype=np.int)\n ny = np.empty(n, dtype=np.int)\n for ii in xrange(N):\n # nx[ii] = len(x_tree.query_ball_point(x_tree.data[ii], r=epsilon[ii], p=norm)) - 1\n # ny[ii] = len(y_tree.query_ball_point(y_tree.data[ii], r=epsilon[ii], p=norm)) - 1\n nx[ii] = len(x_tree.query_ball_point(x_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1\n ny[ii] = len(y_tree.query_ball_point(y_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1\n\n i_spec[jj] = digamma(k) - digamma(nx+1) + digamma(ny+1) + digamma(n) # version (1)\n\n i_min = np.mean(np.min(i_spec, 0))\n\n return i_min", "def inv_kin(self, ee_pos=None):\n\n if ee_pos != None:\n self.ee_pos = ee_pos\n else:\n ee_pos = self.ee_pos\n \n def distance_to_default(q, *args):\n \"\"\"Objective function to minimize\n Calculates the euclidean distance through joint space to the\n default arm configuration. The weight list allows the penalty of\n each joint being away from the resting position to be scaled\n differently, such that the arm tries to stay closer to resting\n state more for higher weighted joints than those with a lower\n weight.\n\n q : np.array\n the list of current joint angles\n\n returns : scalar\n euclidean distance to the default arm position\n \"\"\"\n # weights found with trial and error,\n # get some wrist bend, but not much\n weight = [1, 1, 1.3]\n return np.sqrt(np.sum([(qi - q0i)**2 * wi\n for qi, q0i, wi in zip(q, self.q0, weight)]))\n\n def x_constraint(q,ee_pos):\n \"\"\"Returns the corresponding hand self.ee_pos coordinates for\n a given set of joint angle values [shoulder, elbow, wrist],\n and the above defined arm segment lengths, L\n\n q : np.array\n the list of current joint angles\n self.ee_pos : np.array\n current self.ee_pos position (not used)\n\n returns : np.array\n the difference between current and desired x position\n \"\"\"\n x = (self.length[0]*np.cos(q[0]) + self.length[1]*np.cos(q[0]+q[1]) +\n self.length[2]*np.cos(np.sum(q))) - self.ee_pos[0]\n return x\n\n def y_constraint(q,ee_pos):\n \"\"\"Returns the corresponding hand self.ee_pos coordinates for\n a given set of joint angle values [shoulder, elbow, wrist],\n and the above defined arm segment lengths, L\n\n q : np.array\n the list of current joint angles\n self.ee_pos : np.array\n current self.ee_pos position (not used)\n returns : np.array\n the difference between current and desired y position\n \"\"\"\n y = (self.length[0]*np.sin(q[0]) + self.length[1]*np.sin(q[0]+q[1]) +\n self.length[2]*np.sin(np.sum(q))) - self.ee_pos[1]\n return y\n\n def joint_limits_upper_constraint(q,ee_pos):\n \"\"\"Used in the function minimization such that the output from\n this function must be greater than 0 to be successfully passed.\n\n q : np.array\n the current joint angles\n self.ee_pos : np.array\n current self.ee_pos position (not used)\n\n returns : np.array\n all > 0 if constraint matched\n \"\"\"\n return self.max_angles - q\n\n def joint_limits_lower_constraint(q,ee_pos):\n \"\"\"Used in the function minimization such that the output from\n this function must be greater than 0 to be successfully passed.\n\n q : np.array\n the current joint angles\n self.ee_pos : np.array\n current self.ee_pos position (not used)\n\n returns : np.array\n all > 0 if constraint matched\n \"\"\"\n return q - self.min_angles\n\n return scipy.optimize.fmin_slsqp(\n func=distance_to_default,\n x0=self.q,\n eqcons=[x_constraint,\n y_constraint],\n # uncomment to add in min / max angles for the joints\n # ieqcons=[joint_limits_upper_constraint,\n # joint_limits_lower_constraint],\n args=(self.ee_pos,),\n iprint=0) # iprint=0 suppresses output", "def ortho_line_cut(self):\n x_mid_left, y_mid_left = self.midpoint(0,1) # Computes the mid point of the LHS face of the edm cut\n x_mid_right, y_mid_right = self.midpoint(2,3) # Computes the mid point of the RHS face of the edm cut\n\n ave_grad = self.average_grad()\n m_horizontal = -1/ave_grad #90 degrees rotation of the vertical line average gradient\n\n horizontal_eq_c = y_mid_right - m_horizontal*x_mid_right # y offset of horizontal line\n vertical_eq_left_c = y_mid_left - ave_grad * x_mid_left # y offset of vertical line on left side\n\n x_intersect, y_intersect = self.intersect_point(m_horizontal, horizontal_eq_c, ave_grad,vertical_eq_left_c)\n\n\n coordleft = [x_intersect, y_intersect]\n coordright =[x_mid_right, y_mid_right]\n\n dist = self.distance(coordleft, coordright)\n\n return coordleft, coordright, dist", "def joint_variables(self, G: nx.Graph, T_final: dict = None) -> np.ndarray:\n R = {\"p0\": SO3.identity()}\n joint_variables = {}\n for u, v, dat in self.structure.edges(data=DIST):\n if dat:\n diff_uv = G.nodes[v][POS] - G.nodes[u][POS]\n len_uv = np.linalg.norm(diff_uv)\n\n sol = np.linalg.lstsq(len_uv * R[u].as_matrix(), diff_uv)\n sol = sol[0]\n\n theta_idx = np.math.atan2(sol[1], sol[0]) + pi / 2\n Rz = SO3.rotz(theta_idx)\n\n alpha_idx = abs(np.math.acos(min(sol[2], 1)))\n Rx = SO3.rotx(alpha_idx)\n\n joint_variables[v] = [wraptopi(theta_idx), alpha_idx]\n R[v] = R[u].dot(Rz.dot(Rx))\n\n return joint_variables", "def _get_end_points(self, segmented_instances, i, stats, idx):\n\n end_points=[]\n\n # find all points intersecting the bbox\n #(tl_x, th_y, width, height, area)\n label_num=i+1\n leftmost_x = stats['bbox'][i][cv2.CC_STAT_LEFT]\n topmost_y = stats['bbox'][i][cv2.CC_STAT_TOP]\n width = stats['bbox'][i][cv2.CC_STAT_WIDTH]\n height = stats['bbox'][i][cv2.CC_STAT_HEIGHT]\n bottom_most_y = topmost_y + height-1\n right_most_x = leftmost_x + width-1\n\n segmented_instances_copy=segmented_instances.copy()\n edge_points = np.zeros(segmented_instances.shape).astype(np.uint8)\n segs = np.zeros(segmented_instances.shape).astype(np.uint8)\n segs[segmented_instances==label_num]=255\n cv2.rectangle(segmented_instances_copy,(leftmost_x, topmost_y), (right_most_x, bottom_most_y), 150, 2)\n\n #Get all points for the current stem segment\n label_points = np.argwhere(segmented_instances.copy()==label_num)\n\n # upper points from (tl_x,th_y) to (th_x, th_y) that instersect with the upper edge of the bouding box\n upper_points = [i for i in label_points if i[0]==topmost_y and i[1]>=leftmost_x and i[1]<=right_most_x]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(upper_points, edge_points, segs, 1)\n center_upper_pts = sorted(self._get_centeroids(x_pts))\n\n # left side points from (tl_x, tl_y) to (tl_x, th_y) that instersect with the left edge of the bouding box\n left_points = [i for i in label_points if i[1]==leftmost_x and i[0]<=bottom_most_y and i[0]>=topmost_y]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(left_points, edge_points, segs, 0)\n center_left_pts = sorted(self._get_centeroids(x_pts))\n\n #right side points form (th_x, tl_y) to (th_x, th_y) that instersect with the right edge of the bouding box\n right_points = [i for i in label_points if i[1]==right_most_x and i[0]<=bottom_most_y and i[0]>=topmost_y]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(right_points, edge_points, segs, 0)\n center_right_pts = sorted(self._get_centeroids(x_pts))\n\n #bottom points from (tl_x, tl_y) to (th_x,tl_y)\n bottom_points = [i for i in label_points if i[1]>=leftmost_x and i[1]<=right_most_x and i[0]==bottom_most_y]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(bottom_points, edge_points, segs, 1)\n center_bottom_pts = sorted(self._get_centeroids(x_pts))\n\n # If there are corner edges, get the centroid of that\n center_x_lb, center_y_lb, center_left_pts, center_bottom_pts = self._get_corner_centers(center_left_pts, \\\n center_bottom_pts, bottom_most_y, leftmost_x)\n if (center_x_lb != None) and (center_y_lb != None):\n end_points.append([center_x_lb, center_y_lb])\n else:\n if len(center_left_pts)>0:\n for pt_idx in range(0, len(center_left_pts)):\n end_points.append([leftmost_x, center_left_pts[pt_idx]])\n if len(center_bottom_pts)>0:\n for pt_idx in range(0, len(center_bottom_pts)):\n end_points.append([center_bottom_pts[pt_idx], bottom_most_y])\n\n # If there are corner edges, get the centroid of that\n center_x_ur, center_y_ur, center_right_pts, center_upper_pts = self._get_corner_centers(center_right_pts, \\\n center_upper_pts, topmost_y, right_most_x)\n if (center_x_ur != None) and (center_y_ur != None):\n end_points.append([center_x_ur, center_y_ur])\n else:\n if len(center_right_pts)>0:\n for pt_idx in range(0, len(center_right_pts)):\n end_points.append([right_most_x, center_right_pts[pt_idx]])\n if len(center_upper_pts)>0:\n for pt_idx in range(0, len(center_upper_pts)):\n end_points.append([center_upper_pts[pt_idx], topmost_y])\n\n # If there are corner edges, get the centroid of that\n center_x_ul, center_y_ul, center_left_pts, center_upper_pts = self._get_corner_centers(center_left_pts, \\\n center_upper_pts, topmost_y, leftmost_x)\n if (center_x_ul != None) and (center_y_ul != None):\n end_points.append([center_x_ul, center_y_ul])\n else:\n if len(center_left_pts)>0:\n for pt_idx in range(0, len(center_left_pts)):\n end_points.append([leftmost_x, center_left_pts[pt_idx]])\n if len(center_upper_pts)>0:\n for pt_idx in range(0, len(center_upper_pts)):\n end_points.append([center_upper_pts[pt_idx], topmost_y])\n\n\n # If there are corner edges, get the centroid of that\n center_x_br, center_y_br, center_right_pts, center_bottom_pts = self._get_corner_centers(center_right_pts, \\\n center_bottom_pts, bottom_most_y, right_most_x)\n if (center_x_br != None) and (center_y_br != None):\n end_points.append([center_x_br, center_y_br])\n else:\n if len(center_right_pts)>0:\n for pt_idx in range(0, len(center_right_pts)):\n end_points.append([right_most_x, center_right_pts[pt_idx]])\n if len(center_bottom_pts)>0:\n for pt_idx in range(0, len(center_bottom_pts)):\n end_points.append([center_bottom_pts[pt_idx], bottom_most_y])\n\n #self.showme(segmented_instances_copy, 'bbox')\n\n return end_points", "def get_exterior(self, x, y, x1, x2, bottom, head_y):\n fx1 = x+(x-x1)*8\n fx2 = x+(x-x2)*8\n # compute bounding ellipse; and intersection with body outline\n cv2.ellipse(self.ellipse_finder, ((x/mscale,y/mscale), ((fx1-fx2)/mscale, (2*(bottom-head_y))/mscale), 0), 255,-1 )\n intersection = np.bitwise_and(255-self.ellipse_finder, self.median_finder)\n # find external blobs\n im2, out_contours, out_hierarchy = cv2.findContours(intersection,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n return out_contours, out_hierarchy, fx1-fx2" ]
[ "0.6462823", "0.63993484", "0.5633511", "0.5578069", "0.5572044", "0.55286443", "0.549767", "0.54915667", "0.5486143", "0.5462283", "0.5461075", "0.54595315", "0.54163367", "0.5408067", "0.5399364", "0.53867376", "0.53730583", "0.536113", "0.533599", "0.5330091", "0.5328494", "0.5319201", "0.53152406", "0.5295751", "0.5288994", "0.52836466", "0.52835757", "0.52709764", "0.526558", "0.5255029", "0.5250141", "0.5206064", "0.5205125", "0.5193275", "0.5190355", "0.51846075", "0.51632285", "0.5141516", "0.5126835", "0.5121687", "0.5116427", "0.51126635", "0.5078218", "0.50752306", "0.5065359", "0.50528884", "0.5047486", "0.504582", "0.5044543", "0.5036272", "0.5026307", "0.5015012", "0.50148976", "0.50040954", "0.49992245", "0.49958342", "0.49935743", "0.49911764", "0.49881643", "0.4980434", "0.4980434", "0.49780747", "0.49752676", "0.49728394", "0.49715993", "0.4971583", "0.49675456", "0.49673098", "0.49567753", "0.49555266", "0.4954187", "0.49531457", "0.49510938", "0.49409878", "0.49396205", "0.49262637", "0.492414", "0.49201354", "0.4914983", "0.49141097", "0.49099877", "0.48908836", "0.48833558", "0.48725563", "0.48718724", "0.4869381", "0.48673844", "0.48662364", "0.48627448", "0.48590475", "0.4858755", "0.48361635", "0.48333922", "0.4832488", "0.48283625", "0.48243064", "0.4824188", "0.48235804", "0.48216158", "0.48202258", "0.4817702" ]
0.0
-1
Return the first element of a 2tuple. >>> x([1,2]) 1
def x(a): return a[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _unpack_tuple(x):\n if len(x) == 1:\n return x[0]\n else:\n return x", "def _unpack_tuple(x):\n if len(x) == 1:\n return x[0]\n else:\n return x", "def single_element_tuple():\n single = (1,)\n print(type(single)) # <type 'tuple'>", "def _item_or_tuple(self, seq):\n t = tuple(seq)\n if self._is_multi:\n return t\n else:\n return t[0]", "def second(xs):\n if not xs:\n return None\n return xs[1]", "def second(xs):\n if not xs:\n return None\n return xs[1]", "def first(pair):\n\treturn pair[0]", "def _item_or_tuple(self, seq):\n t = tuple(seq)\n if self._is_multi:\n return t\n else:\n return t[0]", "def _item_or_tuple(self, seq):\n t = tuple(seq)\n if self._is_multi:\n return t\n else:\n return t[0]", "def single_structure_tuple(x,strucutre_id):\n if isinstance(x,tuple):\n assert len(x)==2;\"tuple input must have two elements\"\n return (x[0][:,strucutre_id,:],x[1][:,strucutre_id,:])\n else:\n return x[:,strucutre_id,:]", "def first(xs):\n if not xs:\n return None\n return xs[0]", "def first(xs):\n if not xs:\n return None\n return xs[0]", "def tuple(x):\n pass", "def return_first(x):\r\n if x == []:\r\n return ''\r\n else:\r\n return x[0]", "def second(pair):\n\treturn pair[1]", "def first(items):\r\n return items[0]", "def first(x):\n try:\n x = x.to_series()\n except AttributeError:\n pass\n return list(x)[0]", "def _flatten_one(x):\n return x[0] if is_iterable(x) else x", "def give_me_a_tuple():\n my_tuple = ('p','e','r','m','i','t')\n return my_tuple\n pass", "def _sfn(x):\n if len(x) == 1:\n return x[0]\n return fn(*x)", "def _get_first_element(cls, d):\n\n t = np.where(d[:, 2] > 0)[0]\n if len(t):\n return d[t[0], 0], d[t[0], 1], t[0]\n return None, None, None", "def getfirst(s):\n return s[0] if isinstance(s, list) else s", "def x(self):\n return self[0]", "def key(self, x):\r\n return tuple(x)", "def take_first(info):\n return info[0]", "def tuple_from_sequence(*args):\n return tuple(args)", "def tuple_map(x):\n return x * 2", "def extract(l):\n if l is None: return None\n if len(l) > 1:\n raise ValueError('More than 1 Value')\n try:\n return l[0]\n except IndexError:\n return None", "def first(items):\n return next(iter(items or []), None)", "def __tuple_to_scalar(tuple_value):\n if isinstance(tuple_value, tuple) and len(tuple_value)==1:\n return tuple_value[0]\n else:\n return tuple_value", "def _tuple_from_one_or_two_ints(self, v):\n try:\n a, b = [int(x) for x in v]\n except TypeError:\n a, b = int(v), int(v)\n return (a,b)", "def second(self) -> Element:\n return typing.cast(Element, self[1])", "def element_to_tuple(list_of_elements):\n return list(map(lambda x: tuple(x), list_of_elements))", "def first(l):\n return next(iter(l), None)", "def get_only_element_from_collection(one_element_collection):\n if len(one_element_collection) != 1:\n raise AssertionError(u'Expected a collection with exactly one element, but got: {}'\n .format(one_element_collection))\n return funcy.first(one_element_collection)", "def get_first(self):\n return self.A[1][0] if self.n > 0 else None", "def get_first_item(checklist):\r\n return checklist['items'][0]", "def first(iterable: t.Iterable[T]) -> T:\n return next(iter(iterable))", "def first(self) -> Element:\n return typing.cast(Element, self[0])", "def _list_to_tuple(v):\n if isinstance(v, list):\n return tuple(v)\n return v", "def get_only(seq: Iterable[T]) -> T:\n it = iter(seq)\n try:\n first_element = it.__next__()\n # we use the sentinel approach rather than the usual (evil) Python \"attempt can catch the\n # exception\" approach to avoid raising zillions of spurious exceptions on the expected\n # code path, which makes debugging a pain\n sentinel = object()\n second_element = next(it, sentinel)\n if second_element is sentinel:\n return first_element\n else:\n got_msg: str\n if isinstance(seq, Sized):\n got_msg = str_list_limited(seq, limit=10)\n else:\n got_msg = f\"{first_element!r}, {second_element!r}, and possibly more.\"\n raise ValueError(f\"Expected one item in sequence but got {got_msg}\")\n except StopIteration:\n raise ValueError(\"Expected one item in sequence but got none\")", "def _get_first(details: CallableDetails) -> CallableArg:\n return details.args[0]", "def first(self):\n if self.is_empty():\n raise Empty('list is empty')\n return self._head._element # front aligned with head of list", "def identity(*args):\n return args if len(args) > 1 else args[0]", "def hd(lst):\n return lst[0] if lst else None", "def firstMove(board):\r\n x = board.size / 2\r\n return (x, x)", "def head(array) -> T:\n return array[0]", "def get_value_tuple_outer_function(index, tuple_input):\n return (tuple_input[0][index],\n tuple_input[1][index],\n tuple_input[2][index])", "def get_first_element(dataset):\n return dataset.first()", "def easy_unpack_my(elements):\n try:\n res = tuple(elements[i] for i in [0, 2, -2])\n except IndexError:\n res = 0\n return res", "def _first(self) -> Tuple[np.ndarray, np.ndarray, ModelGeneratorBase]:\n pass", "def first(seq):\n try: # try iterator interface\n return seq.next()\n except AttributeError:\n pass\n try: # seq is no iterator, try indexed lookup\n return seq[0]\n except IndexError:\n pass\n raise TypeError(\n \"Argument to `first()` method needs to be iterator or sequence.\")", "def pair(first, second):\n return [first, second]", "def fetchone(cursor):\n\t# type: (Cursor, ) -> Any\n\n\trows = cursor.fetchall()\n\tif len(rows) == 0:\n\t\traise NoResult(\"No result found\")\n\telif len(rows) == 1:\n\t\treturn rows[0]\n\telse:\n\t\traise InconsistentState(\"More than one result found\")", "def try_tuple(obj):\n # type: (Union[T, Tuple[T]]) -> Tuple[T]\n if isinstance(obj, tuple):\n return obj\n\n return obj, # NOTE the comma, made into tuple", "def pick_first(self, x, y):\n\n return self.cast(x, x.type.combine(y.type))", "def head(self, xes):\n return xes[0]", "def astuple(self):\n try:\n return tuple([x.astuple() for x in self])\n except Exception:\n pass\n return tuple([x for x in self])", "def tup(item, ret_is_single=False):\r\n #return true for iterables, except for strings, which is what we want\r\n if hasattr(item, '__iter__'):\r\n return (item, False) if ret_is_single else item\r\n else:\r\n return ((item,), True) if ret_is_single else (item,)", "def _ensure_iterable(x):\n if isinstance(x[0], Iterable):\n if len(x) > 1:\n raise TypeError(\"Either Iterable or variable argument list expected\")\n return x[0]\n else:\n return x", "def visit_Tuple(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n return to_call(to_attribute(self.operator, '__tuple__'), node.elts)\n return node", "def flatten(x): # przerobić na lambda?\n if x==[]:\n return None\n else:\n return x[0]", "def P_(x, y):\r\n return (x, y)", "def first(self):\n if self.is_empty():\n raise Empty('list is empty')\n return self._head._next._element # front aligned with head of list", "def first(seq):\n return next(iter(seq))", "def get_first_item(videos):\n\n return next(iter(videos or []), None)", "def aind(x):\n\treturn tuple(x.T)", "def identity_filter(element_tuple):\r\n\treturn element_tuple", "def index(l_: List[int], i: Tuple[int, ...]) -> Tuple[int, ...]:\n return tuple([l_[x] for x in i])", "def __getitem__(self, index):\n if index == 0:\n return self.x\n elif index == 1:\n return self.y\n raise IndexError", "def take_second(info):\n return info[1]", "def first(self):\n return _(self._[0])", "def vec2tuple(x):\n return (x.x, x.y, x.z)", "def easy_unpack(elements: Tuple[int]) -> Tuple[int]:\n\n return itemgetter(0, 2, -2)(elements)", "def first_last(item):\n # first_element = item[0]\n # last_element = item[len(item) - 1]\n # if type(item) == list:\n # return [first_element, last_element]\n # if type(item) == tuple:\n # return (first_element, last_element)\n # if type(item) == str:\n # return first_element + last_element\n\n # return item[:1] + item[-1:] \n \n return item[0: len(item): len(item)-1]", "def first(s):\n assert is_link(s), 'fist only applies to a linked list.'\n assert s != empty, 'empty linked list has no first element.'\n return s[0]", "def first2(x, y):\n y = np.asarray(y)\n return y[np.argsort(x)][0]", "def x(self):\n return self._arr[0]", "def parse(arg: Tuple[str, str, str, str, str]) -> Tuple[str, str, str]:\n return (arg[2], arg[3], arg[4])", "def first(s):\n assert is_link(s), 'first only applies to linked lists.'\n assert s != empty, 'empty linked list has no first element.'\n return s[0]", "def project(self, x):\n return (x, x) # TODO Your code goes here.", "def _get(self, (y, x)):\n return self[y][x]", "def tuple(self, arg: SeField[Any]) -> str:\n if is_bare_tuple(arg.type):\n return arg.varname\n elif is_variable_tuple(arg.type):\n earg = arg[0]\n earg.name = \"v\"\n return f\"tuple({self.render(earg)} for v in {arg.varname})\"\n else:\n rvalues = []\n for i, _ in enumerate(type_args(arg.type)):\n r = arg[i]\n r.name = f\"{arg.varname}[{i}]\"\n rvalues.append(self.render(r))\n return f\"({', '.join(rvalues)},)\" # trailing , is required for single element tuples", "def first(s):\n assert is_link(s), \"first only applies to linked lists.\"\n assert s != empty, \"empty linked list has no first element.\"\n return s[0]", "def popArg(args):\n if len(args) == 0:\n return (None, args)\n nextArg = args[0]\n args = args[1:]\n return (nextArg, args)", "def unpack_all_equal_tuple(t):\n if not isinstance(t, tuple):\n return t\n\n assert all(x == t[0] for x in t)\n return t[0]", "def __call__ (self, x):\n try:\n return self._lookup[x - 1]\n except IndexError:\n message = '{} not in permutation. Must be between 1 and {}.'\n raise IndexError(message.format(x, len(self)))", "def first(collection):\n return next(iter(collection))", "def first(collection):\n return next(iter(collection))", "def get(s: Iterable[T]) -> T:\n return next(iter(s))", "def retrieve_data_tuple(self):\n return ((42,))", "def index_to_tuple(self, index):\n if index < 0:\n index = self.num_items + index\n assert index >= 0 and index < self.num_items\n\n return self.indices[index]", "def _transform_inputs(x):\n if not isinstance(x, (list, tuple)):\n return x\n assert len(x) > 0\n return x[-1]", "def return_first_item(func):\n\n # Define the wrapper function.\n def wrapper(self, *args, **kwargs):\n\n # Execute the decorated method with the provided arguments.\n result = func(self, *args, **kwargs)\n\n # If the function returned a result and that result is a list then\n # return the first item on that list.\n if result and isinstance(result, list):\n result = result[0]\n\n return result\n\n return wrapper", "def convert_to_tuple(v):\n if not isinstance(v, tuple):\n return tuple(v)\n else:\n return v", "def tuple(self) -> tuple:\n return tuple(self)", "def one(self):\n return next(iter(self), None)", "def firstElement(self):\n return self.top()", "def first(self):\n if self.is_empty():\n raise Empty(\"List is empty!\")\n return self._header._next._element", "def pop_first_arg(argv):\n for arg in argv:\n if not arg.startswith('-'):\n argv.remove(arg)\n return (arg, argv)\n\n return (None, argv)" ]
[ "0.7628563", "0.7628563", "0.70851874", "0.6855407", "0.6824074", "0.6824074", "0.68147033", "0.6742139", "0.6742139", "0.6360675", "0.63022053", "0.63022053", "0.62847096", "0.62811184", "0.62070453", "0.6096625", "0.60256463", "0.6006833", "0.59457403", "0.5892296", "0.5847164", "0.58175504", "0.5778301", "0.5728105", "0.57054967", "0.569324", "0.56838566", "0.56570625", "0.56556493", "0.56485033", "0.5637585", "0.5636039", "0.5619627", "0.5612777", "0.55828965", "0.55768", "0.5573989", "0.5553067", "0.5537594", "0.55251414", "0.55221456", "0.5518657", "0.54852265", "0.5481925", "0.5464605", "0.5463466", "0.54615915", "0.5460893", "0.54498744", "0.54493195", "0.54469216", "0.5446488", "0.54053175", "0.54050773", "0.5403222", "0.54023606", "0.53963584", "0.53914964", "0.53729063", "0.53598475", "0.53536355", "0.5347252", "0.5334872", "0.5330845", "0.5325149", "0.5324077", "0.5318189", "0.5314992", "0.53143454", "0.5312873", "0.5298198", "0.52895695", "0.52873296", "0.52840364", "0.52785397", "0.52663386", "0.52577466", "0.5253106", "0.5250338", "0.52496433", "0.52486944", "0.52481043", "0.52422357", "0.5238056", "0.5236511", "0.5236008", "0.5234331", "0.5226396", "0.5226396", "0.52244323", "0.5210247", "0.5203924", "0.52038413", "0.51797706", "0.5178927", "0.5178878", "0.51760066", "0.51730216", "0.5161182", "0.5160664" ]
0.6782777
7
Return the second element of a 2tuple. >>> y([1,2]) 2
def y(a): return a[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def second(xs):\n if not xs:\n return None\n return xs[1]", "def second(xs):\n if not xs:\n return None\n return xs[1]", "def second(pair):\n\treturn pair[1]", "def _unpack_tuple(x):\n if len(x) == 1:\n return x[0]\n else:\n return x", "def _unpack_tuple(x):\n if len(x) == 1:\n return x[0]\n else:\n return x", "def _get(self, (y, x)):\n return self[y][x]", "def second(self) -> Element:\n return typing.cast(Element, self[1])", "def _item_or_tuple(self, seq):\n t = tuple(seq)\n if self._is_multi:\n return t\n else:\n return t[0]", "def _item_or_tuple(self, seq):\n t = tuple(seq)\n if self._is_multi:\n return t\n else:\n return t[0]", "def _item_or_tuple(self, seq):\n t = tuple(seq)\n if self._is_multi:\n return t\n else:\n return t[0]", "def pick_second(self, x, y):\n\n return self.cast(y, x.type.combine(y.type))", "def take_second(info):\n return info[1]", "def get(self, x, y):\n i = self.map[y][x]\n return self.get(i)", "def single_element_tuple():\n single = (1,)\n print(type(single)) # <type 'tuple'>", "def P_(x, y):\r\n return (x, y)", "def x(a):\n return a[0]", "def tuple(x):\n pass", "def secondMember(x, bsp, i, j=0):\n\ty = source(x)*bsp(x, i=i)\n\treturn y", "def __getitem__(self, index):\n if index == 0:\n return self.x\n elif index == 1:\n return self.y\n raise IndexError", "def give_me_a_tuple():\n my_tuple = ('p','e','r','m','i','t')\n return my_tuple\n pass", "def p2(self):\n return tuple(self.rect[2:])", "def single_structure_tuple(x,strucutre_id):\n if isinstance(x,tuple):\n assert len(x)==2;\"tuple input must have two elements\"\n return (x[0][:,strucutre_id,:],x[1][:,strucutre_id,:])\n else:\n return x[:,strucutre_id,:]", "def tuple_map(x):\n return x * 2", "def first2(x, y):\n y = np.asarray(y)\n return y[np.argsort(x)][0]", "def x2(self):\n return self._x2", "def pair(first, second):\n return [first, second]", "def Rt(X):\n return X[:2,:2], X[:2, 2]", "def y2(self):\n return self._y2", "def __getitem__(self, index):\n x = self._input_data[index]\n if self._output_data is None:\n return x, x\n else:\n y = self._output_data[index]\n return x, y", "def _tuple_from_one_or_two_ints(self, v):\n try:\n a, b = [int(x) for x in v]\n except TypeError:\n a, b = int(v), int(v)\n return (a,b)", "def y(self):\n return self[1]", "def y(self, x):\n return x", "def get_value_tuple_outer_function(index, tuple_input):\n return (tuple_input[0][index],\n tuple_input[1][index],\n tuple_input[2][index])", "def first(pair):\n\treturn pair[0]", "def retrieve_data_tuple(self):\n return ((42,))", "def last2(x, y):\n y = np.asarray(y)\n return y[np.argsort(x)][-1]", "def y(self, value=None):\n if isinstance(value, (int, float)):\n self[1] = value\n else:\n if value is not None:\n raise TypeError(\"Cannot be set to {}\".format(type(value)))\n return self[1]", "def try_tuple(obj):\n # type: (Union[T, Tuple[T]]) -> Tuple[T]\n if isinstance(obj, tuple):\n return obj\n\n return obj, # NOTE the comma, made into tuple", "def get(self):\n return (self.x,self.y);", "def easy_unpack_my(elements):\n try:\n res = tuple(elements[i] for i in [0, 2, -2])\n except IndexError:\n res = 0\n return res", "def key(self, x):\r\n return tuple(x)", "def getPixel (self, x, y):\r\n return self.image [y][x]", "def get(self) -> tuple:", "def popArg(args):\n if len(args) == 0:\n return (None, args)\n nextArg = args[0]\n args = args[1:]\n return (nextArg, args)", "def __getitem__(self, index):\n if isinstance(index, (tuple, list)) and len(index) == 2:\n return self.cells[index[1]][index[0]]\n return self.cells[index]", "def y(self):\n return self._arr[1]", "def second(self) -> int:\r\n return self._second", "def twoaxes(self, y):\r\n X = [y] if np.isscalar(y[0]) else y\r\n N2 = len(X[0]) // 2\r\n f = [1e6 * sum(x[0:N2]**2) + sum(x[N2:]**2) for x in X]\r\n return f if len(f) > 1 else f[0]", "def retrieve_result(self, x, y):\n pass", "def visit_Tuple(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n return to_call(to_attribute(self.operator, '__tuple__'), node.elts)\n return node", "def get_substr(self, y, x1, x2):\n return self.lines[y][x1 : x2]", "def __tuple_to_scalar(tuple_value):\n if isinstance(tuple_value, tuple) and len(tuple_value)==1:\n return tuple_value[0]\n else:\n return tuple_value", "def _value_at_axis(value, axis):\n if not isinstance(value, (list, tuple)):\n return value\n if len(value) == 1:\n return value[0]\n else:\n return value[axis]", "def get_two_armies(self) -> tuple:\n\n if(len(self.armies) < 2):\n print(\"Could not choose an army. must have more than one army on the list\")\n raise Exception\n\n while(True):\n first = R.randint(0, len(self.armies)-1)\n second = R.randint(0, len(self.armies)-1)\n\n if(first != second):\n break\n\n return (self.armies[first], self.armies[second])", "def relay_tuple_getitem(c, t, idx):\n assert idx.is_constant(int)\n return relay.expr.TupleGetItem(c.ref(t), idx.value)", "def getAxisTuple(axis):", "def extract(l):\n if l is None: return None\n if len(l) > 1:\n raise ValueError('More than 1 Value')\n try:\n return l[0]\n except IndexError:\n return None", "def easy_unpack(elements: Tuple[int]) -> Tuple[int]:\n\n return itemgetter(0, 2, -2)(elements)", "def _coord_to_tuple(self, coord):\n if isinstance(coord, str):\n return tuple(float(k) for k in coord[1:-1].split(', '))\n else:\n assert len(coord) == 2\n return coord", "def pick_first(self, x, y):\n\n return self.cast(x, x.type.combine(y.type))", "def index(l_: List[int], i: Tuple[int, ...]) -> Tuple[int, ...]:\n return tuple([l_[x] for x in i])", "def calc_cell(self, x, y):\n if x < 1 or x > self.length or y < 1 or y > self.length:\n raise IndexError\n n = (x - 1) * self.length + y - 1\n return self.lst[n][1]", "def getP2(self):\n return self.points[1]", "def _list_to_tuple(v):\n if isinstance(v, list):\n return tuple(v)\n return v", "def _get_pt_tuple(pnt1, pnt2):\n return tuple(map(_map_x_dim(tuple(pnt1)), pnt2))", "def __getitem__(self, index):\n if isinstance(index, int):\n return list.__getitem__(self, index)\n if isinstance(index, tuple):\n return list.__getitem__(self, index[0])[index[1]]\n raise TypeError, \"Table indices must be int or tuple\"", "def get_xy(self, x, y):\r\n\t\treturn self.grid[y, x]", "def __getitem__(self, j):\n\t\treturn self._coords[j]", "def getPair(self, args):\r\n return self.name, self.getValue(args)", "def get(s: Iterable[T]) -> T:\n return next(iter(s))", "def __getitem__(self, index: int) -> float:\n if index == 0:\n return self.x\n elif index == 1:\n return self.y\n else:\n raise IndexError", "def second(a, b):", "def second_path(self):\n\t\treturn self.args[2]", "def __getitem__(self, key):\n return tuple(self._mapping[key])", "def other_entry(self):\r\n l = self.other_entries()\r\n assert len(l) == 1\r\n return l[0][1]", "def x(self):\n return self[0]", "def foo_2(x, y):\n\tif x > y:\n\t\treturn x\n\treturn y", "def _get_none(self, x, y):\n try:\n return self[x, y]\n except ArrayError:\n return None", "def hinted_tuple_hook(obj):\n if '__tuple__' in obj:\n return tuple(obj['items'])\n return obj", "def get_cell(self, x, y):\n if y < 0 or y >= len(self.g): return None\n if x < 0 or x >= len(self.g[y]): return None\n return self.g[y][x]", "def get(self) -> Tuple[str, Tuple]:\n # TODO:\n pass", "def studying_tuple():\r\n tuple_one = ()\r\n tuple_two = tuple()\r\n print(\"Creating empty tuple: \", tuple_one)\r\n print(\"Creating empty tuple: \", tuple_two)\r\n tuple_two = tuple(range(10))\r\n print(\"Tuple created using range: \", tuple_two)\r\n tuple_one = (11, 22, 33, 44, 55, 66, 77)\r\n print(\"Created tuple is: \", tuple_one)\r\n print(\"Accessing element using element index,tuple_one[2]: \", tuple_one[2])\r\n # returns tuple\r\n sliced_elements = tuple_one[2:]\r\n print(\"Sliced elements from the tuple are: \", sliced_elements)\r\n tuple_two = (1, 2, 3, 4, \"tom\", 3.14, 7+4j, True)\r\n print(\"New tuple is: \", tuple_two)\r\n three_tuple = 3, 4.6, \"dog\"\r\n print(\"New tuple is: \", three_tuple)\r\n string_tuple = (\"hello\",)\r\n # <class 'tuple'>\r\n print(\"Type of tuple is: \", type(string_tuple))\r\n data_tuple = (\"mouse\", [8, 4, 6], (1, 2, 3), 110, 134, 167, \"mouse\")\r\n print(\"Original data tuple is: \", data_tuple)\r\n # using nested indexing accessing tuple element\r\n print(\"Element data_tuple[0][3] we got is: \", data_tuple[0][3])\r\n print(\"Element data_tuple[1][1] we got is: \", data_tuple[1][1])\r\n # negative indexing\r\n print(\"Accessing last element from tuple using negative index: \",\r\n data_tuple[-1])\r\n print(\"Use index method to get index of element,data_tuple.index(\\\"mouse\\\"): \",\r\n data_tuple.index(\"mouse\"))\r\n print(\"Use count method to get number of count of element,data_tuple.count(\\\"mouse\\\"): \",\r\n data_tuple.count(\"mouse\"))\r\n # join two tuples\r\n tuple_one = (\"a\", \"b\", \"c\")\r\n tuple_two = (1, 2, 3)\r\n tuple_three = tuple_one+tuple_two\r\n print(\"tuple_one is: \", tuple_one)\r\n print(\"tuple_two is: \", tuple_two)\r\n print(\"Joined tuple of above two tuple is: \", tuple_three)\r\n print(\"Length of tuple_three is,len(tuple_three): \", len(tuple_three))\r\n print(\"We can delete entire tuple using \\\" del data_tuple \\\"\")\r\n return", "def add_get_tuple(self, input_name, index, name=None):\n return self._build_op('get_tuple', [input_name], name=name, attr={'index': index})", "def _transform_point(self, x, y):\n return (x, y)", "def parse(arg: Tuple[str, str, str, str, str]) -> Tuple[str, str, str]:\n return (arg[2], arg[3], arg[4])", "def y2(self, y2=None):\n\n if y2 is None:\n return self._y2\n else:\n if not isinstance(y2, int) and not isinstance(y2, float):\n raise TypeError(\"y2 must be numeric, not '%s'\" % y2)\n self._y2 = y2", "def tuple_from_sequence(*args):\n return tuple(args)", "def get_sample(x, y):\n return noise[x][y]", "def room_xy(room, x, y, value=None):\n return room[x][y]", "def __getitem__(self, index):\n return index, super().__getitem__(index)", "def y2(self):\n return self._y + self._y2", "def coord (i, j):\r\n return j, i", "def get(self, x, y=None):\n edges = self.edges.setdefault(x,{})\n if y is None:\n return edges\n else:\n return edges.get(y, math.inf)", "def case(self, x, y):\n return self.grille[x][y]", "def x2(self, x2=None):\n\n if x2 is None:\n return self._x2\n else:\n if not isinstance(x2, int) and not isinstance(x2, float):\n raise TypeError(\"x2 must be numeric, not '%s'\" % x2)\n self._x2 = x2", "def __getitem__(self, data):\n i,j = data\n return self._data[i][j]", "def getTuple(self):\n return self.position.exportToTuple()", "def arg2(self) -> int:\n if self.command_type() not in (\n CommandType.push,\n CommandType.pop,\n CommandType.function,\n CommandType.call,\n ):\n raise RuntimeError('Cannot call arg2 on command type: %s' % self.command_type().value)\n\n return int(self.current_command_split[2])", "def getByLable(self, a, b):\n\t\treturn self.matrix[self.access[a]][self.access[b]]", "def xy(self) -> Tuple[float, float]:\n return (self.x, self.y)" ]
[ "0.68817425", "0.68817425", "0.6846556", "0.67233163", "0.67233163", "0.6483205", "0.62575674", "0.6175765", "0.60955626", "0.60955626", "0.60284346", "0.59068465", "0.57835966", "0.57772356", "0.57753384", "0.5761288", "0.570475", "0.5695715", "0.5689088", "0.568156", "0.56749314", "0.5633721", "0.5557852", "0.5532588", "0.5458863", "0.5448019", "0.5414589", "0.5352322", "0.5340934", "0.53265786", "0.53212565", "0.53122145", "0.53105396", "0.5310256", "0.53067535", "0.52893555", "0.52661616", "0.52656734", "0.52266437", "0.52086437", "0.51995254", "0.51876235", "0.5183957", "0.5181199", "0.5179069", "0.51669544", "0.51638526", "0.5159012", "0.5153408", "0.5152366", "0.51437855", "0.5142978", "0.5142834", "0.5140046", "0.51341766", "0.5125775", "0.51182455", "0.511737", "0.51096845", "0.510093", "0.5099762", "0.5096865", "0.50936615", "0.50908786", "0.50899583", "0.5081977", "0.508174", "0.5071511", "0.50659704", "0.5064966", "0.5061577", "0.50594354", "0.5059427", "0.5052633", "0.50510675", "0.5037052", "0.5035394", "0.502749", "0.50156295", "0.5015554", "0.5012766", "0.5003684", "0.4999431", "0.49903595", "0.49815106", "0.4976937", "0.4968053", "0.49609736", "0.49609426", "0.49577174", "0.49438936", "0.49418417", "0.49358925", "0.49296108", "0.49197114", "0.49158052", "0.49141008", "0.49037278", "0.48978984", "0.48958135" ]
0.6603884
5
Euclidean distance (in pixels). >>> distance( (1,1),(2,2) ) == math.sqrt(2) True
def distance(a,b): return np.sqrt( (x(a)-x(b))**2 + (y(a)-y(b))**2 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def euclidean_distance(x, y):\n x1, y1 = x\n x2, y2 = y\n return sqrt((x1 - x2)**2 + (y1 - y2)**2)", "def euclidean_distance(x1: np.ndarray, x2: np.ndarray) -> float:\n return np.sqrt(np.square(x1 - x2).sum())", "def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.square(np.subtract(x1, x2))))", "def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.power(x1 - x2, 2)))", "def euclidean_distance(x1, y1, x2, y2):\n distance = math.sqrt(((x2 - x1) ** 2) + ((y2 - y1) ** 2))\n return distance", "def euclideanDistance(x1,y1,x2,y2):\n distance = math.sqrt(abs(math.pow((x2-x1),2)) + abs(math.pow((y2-y1),2)))\n return distance", "def euclidean_distance(a, b):\n return np.linalg.norm(a - b)", "def euclidean_distance(self,):\n return sqrt(pow((self.pose1.x - self.pose2.x), 2) +\n pow((self.pose1.y - self.pose2.y), 2))", "def euclidean_distance(point1, point2):\n return np.linalg.norm(np.array(point1) - np.array(point2))", "def euclidean(p1, p2):\n return p1.distance(p2)", "def euclidean_distance(a, b):\n return sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)", "def euclidean_distance(x: np.ndarray, y: np.ndarray) -> float:\n\n distance = np.linalg.norm(x - y)\n\n return distance", "def EuclideanDistance( self, a, b ):\n return sqrt( self.EuclideanDistanceSq(a,b) )", "def euclidean_distance(point_one, point_two):\n return np.linalg.norm(point_one-point_two)", "def euclidean_distance(vec1, vec2):\n return numpy.linalg.norm(vec1 - vec2)", "def euclidean_distance(x1, x2):\n return (x2[0] - x1[0])**2 + (x2[1] - x1[1])**2", "def euclidian_distance(x1, y1, x2, y2):\n distance = sqrt(pow((x1-x2), 2)+(pow((y1-y2), 2)))\n return distance", "def distance(a, b):\n return math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2)", "def distance(self, c1, c2):\r\n x = (c2.x - c1.x) ** 2\r\n y = (c2.y - c1.y) ** 2\r\n d = int(round(math.sqrt(x + y)))\r\n return d", "def euclidean_distance(x1, x2):\n\tdistance = 0\n\t# Squared distance between each coordinate\n\tfor i in range(len(x1)):\n\t\tdistance += pow((x1[i], x2[i]), 2)\n\treturn math.sqrt(distance)", "def euclidean_distance(point1, point2):\n\n return math.sqrt(sum([(x - y) ** 2 for x, y in zip(point1, point2)]))", "def euclidean_distance(p1, p2):\n dist = np.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)\n return dist", "def distance(x,y):\n return np.sqrt( np.power(np.array(x) - np.array(y), 2).sum() )", "def euclidean_distance(x, y):\n return sqrt(sum(pow(a - b, 2) for a, b in zip(x, y)))", "def dist_euclidean(line, i1, i2):\n return sqrt((line[i1][0] - line[i2][0]) ** 2 + (line[i1][1] - line[i2][1]) ** 2)", "def get_euclidean_distance(p1, p2):\n return np.sqrt(np.power((p2[0] - p1[0]), 2) + np.power((p2[1] - p1[1]), 2))", "def euclidean(x,y): \n\treturn np.sqrt(np.sum((x-y)**2))", "def compute_euclidean_dist(vec1, vec2):\r\n assert len(vec1) == len(vec2)\r\n vec1 = np.array(vec1)\r\n vec2 = np.array(vec2)\r\n return np.sqrt(np.sum(np.square(vec2 - vec1)))", "def dist(x1, x2, distance):\n if distance == 'l2':\n return np.sqrt(np.sum(np.square(x1 - x2)))\n elif distance == 'squared_l2':\n return np.sum(np.square(x1 - x2))\n else:\n raise Exception(\"The distance '%s' is not supported.\" % distance)", "def euclideanDistance(loc1, loc2):\n # BEGIN_YOUR_CODE (our solution is 1 line of code, but don't worry if you deviate from this)\n return math.sqrt((loc1[1]-loc2[1])**2+(loc1[0]-loc2[0])**2)\n # END_YOUR_CODE", "def euclidean_distance(square_one: tuple, square_two: tuple):\n return math.sqrt((square_one[0] - square_two[0]) ** 2 + (square_one[1] - square_two[1]) ** 2)", "def euclidean_distance(start, end):\n\n value = np.sqrt(np.sum(np.square(np.subtract(start, end)), axis=-1))\n return value", "def euclideanDistance(loc1, loc2):\n return math.sqrt(sum([(a - b) ** 2 for a, b in zip(loc1, loc2)]))", "def test_euclidean_distance_Ndimension(self):\n\n self.assertEqual(15, euclidean_distance([0, 0, 0], [10, 10, 5]))\n self.assertEqual(15, euclidean_distance([0, 0, 0], [-10, -10, -5]))\n\n self.assertEqual(17, euclidean_distance([0, 0, 0, 0], [10, 10, 8, 5]))\n self.assertEqual(17, euclidean_distance([0, 0, 0, 0], [-10, -10, -8, -5]))\n\n self.assertEqual(8, euclidean_distance([0, 0, 0, 0, 0], [5, 1, 1, 1, 6]))\n self.assertEqual(8, euclidean_distance([0, 0, 0, 0, 0], [-5, -1, -1, -1, -6]))", "def _euclidian_distance(self, x1, x2):\n a= x1-x2\n a2 = a**2\n b = np.sum(a2, axis=1)\n c = np.sqrt(b)\n return c", "def distance(x1, y1, x2, y2):\n return ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5", "def euclidean_distance(x, y):\n distance = 0\n for i, j in zip(x, y):\n distance += (i - j) ** 2\n return math.sqrt(distance)", "def euclidean_distance(x: np.ndarray, y: np.ndarray) -> float:\n distance_vector: np.ndarray = x - y\n distance = compute_norm(distance_vector)\n return distance", "def euclidean(x, y):\n return np.sqrt(np.sum((x - y) ** 2))", "def euclidean_dist(X, y):\n return np.sqrt(np.sum((X - y) ** 2, 1)) # broadcasted calculations", "def euclidean_distance(pred, squared=False, eps=1e-12):\n pred_square = pred.pow(2).sum(dim=-1) # (N, )\n prod = torch.mm(pred, pred.t()) # (N, N)\n distance = (pred_square.unsqueeze(1) + pred_square.unsqueeze(0) -\n 2 * prod).clamp(min=eps) # (N, N)\n\n if not squared:\n distance = distance.sqrt()\n\n distance = distance.clone()\n distance[range(len(prod)), range(len(prod))] = 0\n return distance", "def square_distance(a, b):\n return np.sum((a-b)**2)", "def get_euclidean_distance(self, x_coord_1, x_coord_2, y_coord_1, y_coord_2):\r\n\r\n return math.sqrt(((x_coord_1 - x_coord_2) ** 2) + \\\r\n ((y_coord_1 - y_coord_2) ** 2))", "def calculate_euclidean_distance(self, matrix, input, output_neuron):\n result = 0\n\n # Loop over all input data.\n diff = input - matrix[output_neuron]\n return np.sqrt(sum(diff*diff))", "def euclidean(x, y):\n ed = np.sqrt(np.sum((x-y)**2))\n # print ed\n return ed", "def euclidianDistance(row1, row2):\n\n dist = 0.0\n for i in range(len(row1)-1):\n dist += (row1[i] - row2[i])**2\n \n return sqrt(dist)", "def euclidean_distance(self, point: List[int]) -> float:\n return sqrt(point[0] ** 2 + point[1] ** 2)", "def euclidean_distance(a: Tuple[float, ...], b: Tuple[float, ...]) -> float:\n assert len(a) == len(b)\n return sqrt(sum(pow(x[0] - x[1], 2) for x in zip(a, b)))", "def distance(coord1, coord2):\n \n return sqrt((coord1[0]-coord2[0])**2+\n (coord1[1]-coord2[1])**2+\n (coord1[2]-coord2[2])**2)", "def get_distance(x1, y1, x2, y2):\n return math.sqrt((x1 - x2) ** 2 + (y1 * 2.38 - y2 * 2.38) ** 2)", "def distance(p1, p2):\n\treturn sqrt((p1[1]-p2[1])**2 + (p1[0]-p2[0])**2)", "def euclidean_distance(arr1,arr2):\n distance = np.sqrt(np.sum((arr1 - arr2)**2))\n return distance", "def _dist(x, y):\n return np.sqrt(np.mean(np.square(x - y)))", "def euclidean_distance(p1, p2):\n distance = 0\n for i in range(len(p1)-1):\n distance += (p1[i]-p2[i])**(2)\n return sqrt(distance)", "def distance_checker(xyz1, xyz2):\n return math.sqrt((xyz1[0] - xyz2[0])**2 + (xyz1[1] - xyz2[1])**2 +\n (xyz1[2] - xyz2[2])**2)", "def eucl_dist(x_0, y_0, x_1, y_1):\n return sqrt((x_1 - x_0)**2 + (y_1 - y_0)**2)", "def euclidean_distance(self, other_point):\n return sqrt((self.x - other_point.x)**2 + (self.y - other_point.y)**2)", "def distance(rgb1, rgb2):\n diffs = np.array(rgb1) - np.array(rgb2)\n return math.sqrt(np.sum(diffs**2))", "def distance(rgb1: Tuple[int, int, int], rgb2: Tuple[int, int, int]) -> float:\n r = rgb1[0] - rgb2[0]\n g = rgb1[1] - rgb2[1]\n b = rgb1[2] - rgb2[2]\n return math.sqrt(r**2 + g**2 + b**2)", "def euclidean_dist(ss1, ss2):\n lat1, lon1 = ss1.centroid\n lat2, lon2 = ss2.centroid\n\n return sqrt((lat1 - lat2)**2 + (lon1 - lon2)**2)", "def _euclidean_distance(self, points_a, points_b):\n assert len(points_a.shape) == 2\n assert len(points_b.shape) == 2\n\n transpose_b = points_b.T\n dot = np.dot(points_a, transpose_b)\n\n a_mode_sq = np.tile(\n (points_a ** 2).sum(-1, keepdims=True), (1, points_b.shape[0]))\n b_mode_sq = np.tile((transpose_b ** 2).sum(0, keepdims=True),\n (points_a.shape[0], 1))\n\n distance = np.sqrt(a_mode_sq + b_mode_sq - 2 * dot)\n return distance", "def distance(a: Point, b: Point) -> float:\n return math.sqrt(math.pow(b.x - a.x, 2) + math.pow(b.y - a.y, 2))", "def euclidean_distance(list1, list2):\n # Make sure we're working with lists\n # Sorry, no other iterables are permitted\n assert isinstance(list1, list)\n assert isinstance(list2, list)\n\n dist = 0\n\n # 'zip' is a Python builtin, documented at\n # <http://www.python.org/doc/lib/built-in-funcs.html>\n for item1, item2 in zip(list1, list2):\n dist += (item2 - item1)**2\n return math.sqrt(dist)", "def distance(self, x2, y2):\r\n return math.sqrt((x2 - self.x) ** 2 + (y2 - self.y) ** 2)", "def distance(a,b): \r\n return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)", "def euclidian_distance(stroke1, stroke2):\n\n x1 = np.array(stroke1.x)\n x2 = np.array(stroke2.x)\n y1 = np.array(stroke1.y)\n y2 = np.array(stroke2.y)\n\n d = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n m = d - np.min(d)\n if np.mean(m) < 0:\n return 0, 0\n else:\n return np.mean(d), np.mean(m)", "def distance(XYZ1=np.array([0, 0, 0], dtype='float32'),\n XYZ2=np.array([1, 1, 1], dtype='float32')):\n a=XYZ2-XYZ1\n b=a**2\n c=b.sum()\n return np.sqrt(c)", "def distance(pt1, pt2):\n\tx1, y1 = pt1\n\tx2, y2 = pt2\n\tx = x2 - x1\n\ty = y2 - y1\n\ts = x**2 + y**2\n\treturn np.sqrt(s)", "def _distance(coord1, coord2):\n xdist = coord1[0] - coord2[0]\n ydist = coord1[1] - coord2[1]\n return sqrt(xdist*xdist + ydist*ydist)", "def euclidean_distance(self):\n return sqrt(pow((self.goal_pose.x - self.ground_truth_pose.x), 2) +\n pow((self.goal_pose.y - self.ground_truth_pose.y), 2))", "def distance((x,y,z),(x0,y0,z0)):\n return sqrt((x-x0)**2+(y-y0)**2+(z-z0)**2)", "def distance(p1, p2):\n return sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)", "def _nn_euclidean_distance(x, y):\n distances = _pdist(x, y)\n return np.maximum(0.0, distances.min(axis=0))", "def _nn_euclidean_distance(x, y):\n distances = _pdist(x, y)\n return np.maximum(0.0, distances.min(axis=0))", "def distance(p1,p2):\n return ((p1.x - p2.x)**2 + (p1.y - p2.y)**2)**0.5", "def euclideanDistance(a, b):\n vec = [pow(a[i] - b[i], 2) for i in range(len(a)) if None not in [a[i],b[i]]]\n return (sum(vec) / len(vec)) if len(vec) > 0 else NaN", "def _distance(point_a: tuple, point_b: tuple):\n # rgb values\n x1, y1, z1 = point_a\n x2, y2, z2 = point_b\n\n # distances\n dx = x1 - x2\n dy = y1 - y2\n dz = z1 - z2\n\n # final distance\n return sqrt(dx**2 + dy**2 + dz**2)", "def euclidian_distance(x: np.arrays, y: np.arrays):\r\n diff = x - np.mean(y, axis=0)\r\n return np.sqrt(np.dot(diff.T, diff))", "def distance (p1,p2):\n return np.sqrt(np.sum(np.power(p2-p1,2)))", "def euclidean_distance(s1,s2): \n tmpsum = 0\n \n for index,value in enumerate(s1):\n tmpsum += (s1[index]-s2[index])**2\n \n return math.sqrt(tmpsum)", "def euclidean_distance(vector1, vector2):\n e_dist = [(v1 - v2) ** 2 for v1, v2 in zip(vector1, vector2)]\n e_dist = math.sqrt(sum(e_dist))\n return e_dist", "def distance(self, x: int, y: int) -> float:\n return math.sqrt((x - self.x) ** 2 + (y - self.y) ** 2)", "def distance(x: int, y: int, a: int, b: int) -> float:\n return ((x - a) ** 2 + (y - b) ** 2) ** .5", "def compute_distance(node1, node2):\n return np.linalg.norm(node1 - node2)", "def distance(X, Y):\n\n return math.sqrt(np.sum((X-Y)**2))", "def distance(self,coord_1, coord_2):\n return np.sqrt(np.sum((np.array(coord_1)-np.array(coord_2))**2))", "def distance(x1, y1, x2, y2):\n dist = ((x1-x2)**2 + (y1-y2)**2)**0.5\n return dist", "def DISTANCE(x,y,x2=0,y2=0):\n\treturn sqrt((x-x2)*(x-x2)+(y-y2)*(y-y2))", "def euclidean_distance(data1, data2):\n #Convert data into numpy array\n array1 = np.array(data1)\n array2 = np.array(data2)\n \n #Create distance array\n dist_array = np.sqrt(np.sum((array2-array1)**2, axis=1))\n \n #Reshape array before return results\n return np.reshape(dist_array, [len(dist_array),1])", "def __distance(start_x, start_y, end_x, end_y):\n distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)\n return distance", "def distance(p1, p2):\n return math.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2 + (p1[2]-p2[2])**2)", "def distance(a1, a2):\n [x1, y1, z1] = a1\n [x2, y2, z2] = a2\n\n return math.sqrt((x1 - x2)**2 + (y1 - y2)**2 + (z1 - z2)**2)", "def distance(a, b):\n return math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)", "def euclidean_distance(vector_x, vector_y):\n if len(vector_x) != len(vector_y):\n raise Exception('Vectors must be same dimensions')\n return math.sqrt(sum((vector_x[dim] - vector_y[dim]) ** 2 for dim in range(len(vector_x))))", "def distance(a, b):\n return (np.sum((a - b)**2))**0.5", "def __get_distance(point1: np.ndarray, point2: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(point1 - point2)))", "def distance(p1, p2):\n\n return sqrt(((p2[0] - p1[0])**2) + ((p2[1] - p1[1])**2))", "def distance(self,other):\n return math.sqrt((self.x - other.x)**2 +(self.y - other.y)**2)", "def euclidian_distance(p):\n return(np.sqrt(sum([(p[0][i]-p[1][i])**2 for i, _ in enumerate(p)])))", "def _euclideanDistance(A, B):\n if len(A) != len(B):\n raise ValueError(\"A and B must have the same number of dimensions\")\n sqr_dist = 0\n for i in range(len(A)):\n sqr_dist += (A[i] - B[i])**2\n return np.sqrt(sqr_dist)" ]
[ "0.73406076", "0.7278529", "0.7243951", "0.7199394", "0.71825296", "0.7171505", "0.71208745", "0.7119449", "0.7014406", "0.6984854", "0.69682425", "0.69490933", "0.694145", "0.69366956", "0.69258237", "0.6921075", "0.6908294", "0.68853986", "0.68835557", "0.68756497", "0.6842449", "0.6838545", "0.6832998", "0.6801225", "0.6791674", "0.67895573", "0.67835706", "0.67649865", "0.6743123", "0.6741848", "0.67387414", "0.6738642", "0.6733243", "0.67299277", "0.6721555", "0.67181486", "0.67178416", "0.67136693", "0.67016584", "0.6700291", "0.6696712", "0.66937786", "0.66827035", "0.667923", "0.66445756", "0.6639561", "0.66245794", "0.66222435", "0.65561783", "0.6541609", "0.6535502", "0.6516878", "0.6515811", "0.6512158", "0.65072954", "0.6502012", "0.6498344", "0.6494", "0.6483181", "0.64785075", "0.6472602", "0.64686227", "0.6467172", "0.64664537", "0.64651066", "0.6455185", "0.6449013", "0.6447178", "0.6447032", "0.64436805", "0.6438832", "0.64329696", "0.64317924", "0.64317924", "0.6427897", "0.6417032", "0.6416153", "0.63987476", "0.639824", "0.6386913", "0.6362629", "0.63590485", "0.63553023", "0.6351933", "0.63499737", "0.6346408", "0.6345497", "0.63420963", "0.63406944", "0.63380146", "0.63368154", "0.6333405", "0.633035", "0.6329958", "0.6324855", "0.63229364", "0.6313493", "0.63020915", "0.6301684", "0.62949383" ]
0.6488884
58
Creates initial design of n_samples drawn from a latin hypercube.
def sample_latin_hypercube(low, high, n_samples, rng=None): if rng is None: rng = np.random.RandomState(np.random.randint(0, 10000)) n_dims = low.shape[0] samples = [] for i in range(n_dims): if isinstance(low[i], numbers.Integral): sample = random.sample(range(low[i], high[i]), n_samples) elif isinstance(low[i], numbers.Real): lower_bound = low[i] upper_bound = high[i] sample = lower_bound + rng.uniform(0, 1, n_samples) * (upper_bound - lower_bound) else: raise ValueError('Latin hypercube sampling can only draw from types int and real,' ' got {}!'.format(type(low[i]))) samples.append(sample) samples = np.array(samples, dtype=object) for i in range(n_dims): rng.shuffle(samples[i, :]) return samples.T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_hypercube(samples, N):\n\n np.random.seed(4654562)\n hypercube = lhs(N, samples=samples)\n\n return hypercube", "def latin_hypercube(n_pts, dim):\n X = np.zeros((n_pts, dim))\n centers = (1.0 + 2.0 * np.arange(0.0, n_pts)) / float(2 * n_pts)\n for i in range(dim): # Shuffle the center locataions for each dimension.\n X[:, i] = centers[np.random.permutation(n_pts)]\n\n # Add some perturbations within each box\n pert = np.random.uniform(-1.0, 1.0, (n_pts, dim)) / float(2 * n_pts)\n X += pert\n return X", "def generate_latin_hypercube(samples, param_dict, class_root, seed=10):\n # Set random seed\n random.seed(seed)\n\n # Create dictionary to hold sampled parameter values\n sample_points = {}\n for key in param_dict.keys():\n sample_points[key] = np.zeros(samples)\n Ndim = len(param_dict.keys())\n pnames = [key for key in param_dict.keys()]\n\n # List of indices for each dimension\n l = [range(samples) for j in range(Ndim)]\n\n # Generate samples until there are no indices left to choose\n for i in range(samples):\n\n # Randomly choose index and then remove the number that was chosen\n # (Latin hypercubes require at most one item per row and column)\n for j, p in enumerate(pnames):\n pmin, pmax = param_dict[p]\n idx = random.choice(l[j])\n\n # Get value at this sample point (add 0.5 to idx get bin centroid)\n sample_points[p][i] = pmin + (pmax - pmin) \\\n * (idx + 0.5) / float(samples)\n l[j].remove(idx) # Remove choice from list (sampling w/o replacement)\n\n return sample_points", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def latin_hypercube(n_pts, mins, maxs):\n #return a latin_hypercube\n design = lhs(np.size(maxs), samples=n_pts)\n for i in range(2):\n design[:, i] = design[:, i] * (maxs[i]-mins[i]) + mins[i]\n return design", "def latin_hypercube_sampler(n=1, indim=1, bounds=None, rng=None):\r\n rng = ensure_rng(rng)\r\n if bounds is None:\r\n bounds = np.zeros((indim, 2))\r\n bounds[:,1] = 1. \r\n # Divide each dimension into `n` equal intervals\r\n hypercubes = np.linspace(bounds[:,0], bounds[:,1], n+1)\r\n \r\n l = hypercubes[:-1,:].reshape(-1,)\r\n u = hypercubes[1:,:].reshape(-1,)\r\n _x = rng.uniform(l,u, (1, indim*n)).reshape(n, indim)\r\n x = _x\r\n for j in range(indim):\r\n x[:,j] = _x[rng.permutation(n), j]\r\n return x", "def lhs_start(hyperbounds, n_samples, rng=None):\n low_bounds = []\n high_bounds = []\n for bound in hyperbounds:\n low_bounds.append(bound[0])\n high_bounds.append(bound[1])\n\n low_bounds = np.array(low_bounds, dtype=object)\n high_bounds = np.array(high_bounds, dtype=object)\n\n samples = sample_latin_hypercube(low_bounds, high_bounds, n_samples, rng=rng)\n samples = samples.tolist()\n return samples", "def n_cube(self, dim_n):\n if dim_n == 1:\n return Polyhedron(vertices = [[1],[-1]])\n\n pre_cube = polytopes.n_cube(dim_n-1)\n vertices = [];\n for pre_v in pre_cube.vertex_generator():\n vertices.append( [ 1] + [v for v in pre_v] );\n vertices.append( [-1] + [v for v in pre_v] );\n return Polyhedron(vertices = vertices)", "def __init__(self, n, prey_cnt=0, predator_cnt=0):\n # print n, prey_cnt, predator_cnt\n self.grid_size = n\n self.grid = []\n for i in range(n):\n row = [0]*n # row is a list of n zeros\n self.grid.append(row)\n self.init_animals(prey_cnt, predator_cnt)", "def construct_initial_sample(n):\n sample_normal = np.random.normal(size=(n, 3))\n sample_radius = np.linalg.norm(sample_normal, axis=1, keepdims=True)\n sample_cartesian = sample_normal / sample_radius\n sample_polar = cartesian_to_polar(sample_cartesian)\n return np.reshape(sample_polar[:, 1:3], (-1))", "def create_samples(self):\n self._samples = self.load_samples()\n self.modify_samples()", "def sample(self, n_samples):\n with torch.no_grad():\n z = torch.randn((n_samples, self.z_dim))\n samples = self.decoder(z)\n im_size = int(np.sqrt(self.input_dim))\n samples = samples.view(-1, 1, im_size, im_size)\n\n return samples", "def test_2_1_3D_cube_init(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0), (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5)]\n\n nn_checks = {\n (1, 1, 1): [(1, 1, 0), (0, 1, 1), (1, 0, 0), (0, 0, 1), (1, 0, 1),\n (0.5, 0.5, 0.5), (0, 1, 0)],\n (1, 0, 1): [(1, 0, 0), (0, 0, 1), (0, 0, 0), (0.5, 0.5, 0.5),\n (1, 1, 1)],\n (0.5, 0.5, 0.5): [(1, 1, 0), (0, 1, 1), (0, 1, 0), (1, 0, 0),\n (0, 0, 1), (1, 0, 1), (0, 0, 0), (1, 1, 1)]}\n\n init_triangulation(3, 0, check, nn_checks)", "def initialiser(N, dimensions = 2):\r\n \r\n #shape for correct dimensions\r\n shape = tuple([N]) * dimensions\r\n \r\n #randomise spins\r\n lattice = np.random.choice([1,-1], size = shape)\r\n \r\n return lattice", "def create_grids_structure(self):\n for indices, hypercube in np.ndenumerate(self.hypercubes):\n self.hypercubes[indices] = Hypercube(coords=indices)", "def __init__(self, n_features):\n self.W = np.random.randn(n_features)\n self.b = np.random.randn()", "def _make_random_matrix(self, n_components, n_features):", "def createCube():\n subjects, detections, antigen = getAxes()\n cube = np.full([len(subjects), len(detections), len(antigen)], np.nan)\n\n IGG = importIGG()\n glycan, dfGlycan = importGlycan()\n glyCube = np.full([len(subjects), len(glycan)], np.nan)\n\n for k, curAnti in enumerate(antigen):\n lumx = importLuminex(curAnti)\n\n for _, row in lumx.iterrows():\n i = subjects.index(row[\"subject\"])\n j = detections.index(row[\"variable\"])\n cube[i, j, k] = row[\"value\"]\n\n for _, row in dfGlycan.iterrows():\n i = subjects.index(row[\"subject\"])\n j = glycan.index(row[\"variable\"])\n glyCube[i, j] = row[\"value\"]\n\n # Add IgG data on the end as another detection\n for _, row in IGG.iterrows():\n i = subjects.index(row[\"subject\"])\n k = antigen.index(row[\"variable\"])\n cube[i, -1, k] = row[\"value\"]\n\n # Clip to 0 as there are a few strongly negative outliers\n cube = np.clip(cube, 1.0, None)\n glyCube = np.clip(glyCube, 0.1, None)\n\n cube = np.log10(cube)\n glyCube = np.log10(glyCube)\n\n # Mean center each measurement\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n cube -= np.nanmean(cube, axis=0)\n glyCube -= np.nanmean(glyCube, axis=0)\n\n # Check that there are no slices with completely missing data\n assert ~np.any(np.all(np.isnan(cube), axis=(0, 1)))\n assert ~np.any(np.all(np.isnan(cube), axis=(0, 2)))\n assert ~np.any(np.all(np.isnan(cube), axis=(1, 2)))\n\n glyCube *= np.sqrt(np.nanvar(cube) / np.nanvar(glyCube))\n return cube, glyCube", "def generate_samples(self, n_samples=100):\n \t\t\n\t\t#how many times should ancestral sampling be run\n\t\t#n_samples\n prior_samples=[]\n for i in range(0,n_samples):\n prior_sample = self.prior.get_samples(\n n_latent_nodes=self.n_latent_nodes,\n n_gibbs_sampling_steps=100, \n sampling_mode=\"gibbs_ancestral\")\n prior_sample = torch.cat(prior_sample)\n prior_samples.append(prior_sample)\n prior_samples=torch.stack(prior_samples)\n # prior_samples = tf.slice(prior_samples, [0, 0], [num_samples, -1])\n output_activations = self.decoder.decode(prior_samples)\n output_activations = output_activations+self._train_bias\n output_distribution = Bernoulli(logit=output_activations)\n output=torch.sigmoid(output_distribution.logits)\n # output_activations[0] = output_activations[0] + self.train_bias\n # output_dist = FactorialBernoulliUtil(output_activations)\n # output_samples = tf.nn.sigmoid(output_dist.logit_mu)\n # print(\"--- \",\"end VAE::generate_samples()\")\n return output", "def __create_sample_data__(npts = 20):\n\t#data function\n\tdef wavy(x, y):\n\t\treturn np.sin(0.2*np.pi*x)*np.cos(0.4*np.pi*y)\n\t\n\t#make grid\n\txs = np.linspace(0, 2*20, 2*npts + 1)\n\tys = np.linspace(0, 20, npts + 1)\n\t(xgrid, ygrid) = np.meshgrid(xs, ys)\n\tzgrid = wavy(xgrid, ygrid)\n\t\n\treturn (xgrid, ygrid, zgrid)", "def generate_in(self, n_samples, start_sample=0):\n pass", "def random_LatinHypercube(seeds, n, skip_seeds=0):\n Nseeds = len(seeds)\n rndms = np.zeros((Nseeds, n), dtype='float64')\n # define arrays here and re-use them later\n samples = np.zeros(n, dtype='float64')\n perms = np.zeros(n, dtype='float64')\n\n for seed_i in range(skip_seeds, Nseeds, 1):\n # set the seed\n np.random.seed(seeds[seed_i])\n\n # draw the random numbers and re-generate permutations array\n for i in range(n):\n samples[i] = np.random.uniform(0., 1.)\n perms[i] = i + 1\n\n # in-place shuffle permutations\n np.random.shuffle(perms)\n\n for j in range(n):\n rndms[seed_i, j] = (perms[j] - samples[j]) / float(n)\n\n return rndms", "def sample(h, seed_ix, n):\n x = np.zeros((vocab_size, 1))\n x[seed_ix] = 1\n ixes = []\n for t in xrange(n):\n h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)\n y = np.dot(Why, h) + by\n p = np.exp(y) / np.sum(np.exp(y))\n ix = np.argmax(p)#np.random.choice(range(vocab_size), p=p.ravel())\n x = np.zeros((vocab_size, 1))\n x[ix] = 1\n ixes.append(ix)\n return ixes", "def __init__(self, nonogram_size):\n # create random id\n self.nonogram_id = uuid.uuid4()\n self.row_numbers = [(2), (2), (2)]\n self.column_numbers = [(1, 1), (3), (1)]\n self.nonogram_size = nonogram_size\n self.grid = Nonogram.create_rand_grid(nonogram_size)\n #TODO\n self.fitness = 999", "def sample(h, seed_ix, n):\n x = np.zeros((vocab_size,1))\n x[seed_ix] = 1\n ixes = []\n for t in xrange(n):\n h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)\n y = np.dot(Why, h) + by\n p = np.exp(y) / np.sum(np.exp(y))\n ix = np.random.choice(range(vocab_size), p=p.ravel())\n x = np.zeros((vocab_size,1))\n x[ix] = 1\n ixes.append(ix)\n return ixes", "def _reconstruct(self, num_samples=None):", "def setUp(self):\n self.cube = _create_2d_cube()", "def get_raw_seedling(datasource, n):\n matrix = MatrixDict()\n \n for lang, sent in SEEDLING[datasource].sents():\n features = sent2ngrams(sent, n=n)\n matrix.setdefault(lang, Counter()).update(features)\n \n return matrix", "def initial_samples(lb, ub, method, numSamp):\r\n if not len(lb) == len(ub):\r\n raise AssertionError('Lower and upper bounds have different #s of design variables in initial_samples function.')\r\n assert method == 'random' or method == 'nolh' or method == 'nolh-rp' or method == 'nolh-cdr' or method == 'lhc' or method == 'rand-wor', 'An invalid method was specified for the initial_samples.'\r\n assert (method == 'nolh' or method == 'nolh-rp' or method == 'nolh-cdr') and len(ub) >= 2 and len(ub) <= 29, 'The Phase space dimensions are outside of the bounds for initial_samples.'\r\n for case in Switch(method):\r\n if case('random'):\r\n s = np.zeros((numSamp, len(lb)))\r\n for i in range(0, numSamp, 1):\r\n s[i, :] = lb + (ub - lb) * rand(len(lb))\r\n\r\n break\r\n if case('rand-wor'):\r\n s = np.zeros((numSamp, len(lb)))\r\n for i in range(0, numSamp, 1):\r\n s[i, :] = choice(len(ub), size=len(ub), replace=False)\r\n\r\n break\r\n if case('nolh'):\r\n dim = len(ub)\r\n m, q, r = params(dim)\r\n conf = range(q)\r\n if r != 0:\r\n remove = range(dim - r, dim)\r\n nolh = NOLH(conf, remove)\r\n else:\r\n nolh = NOLH(conf)\r\n s = np.array([ list(lb + (ub - lb) * nolh[i, :]) for i in range(len(nolh[:, 0]))\r\n ])\r\n break\r\n if case('nolh-rp'):\r\n dim = len(ub)\r\n m, q, r = params(dim)\r\n conf = random.sample(range(q), q)\r\n if r != 0:\r\n remove = random.sample(range(q - 1), r)\r\n nolh = NOLH(conf, remove)\r\n else:\r\n nolh = NOLH(conf)\r\n s = np.array([ list(lb + (ub - lb) * nolh[i, :]) for i in range(len(nolh[:, 0]))\r\n ])\r\n break\r\n if case('nolh-cdr'):\r\n dim = len(ub)\r\n m, q, r = params(dim)\r\n conf, remove = get_cdr_permutations(len(ub))\r\n if remove != []:\r\n nolh = NOLH(conf, remove)\r\n else:\r\n nolh = NOLH(conf)\r\n s = np.array([ list(lb + (ub - lb) * nolh[i, :]) for i in range(len(nolh[:, 0]))\r\n ])\r\n break\r\n if case('lhc'):\r\n tmp = lhs(len(lb), samples=numSamp, criterion='center')\r\n s = np.array([ list(lb + (ub - lb) * tmp[i, :]) for i in range(len(tmp[:, 0]))\r\n ])\r\n break\r\n if case():\r\n print 'Somehow you evaded my assert statement - good job!',\r\n print ' However, you still need to use a valid method string.'\r\n\r\n return s", "def test_1_1_2D_cube_init(self): # TODO: REMOVE FUNC AFTER SPLIT\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5)]\n\n nn_checks = {(0.5, 0.5): [(0, 1), (1, 0), (0, 0), (1, 1)],\n (0, 1): [(0, 0), (1, 1), (0.5, 0.5)]}\n\n init_triangulation(2, 0, check, nn_checks)", "def generate(net, z, maxlen=50, im=None, init=None, use_end=True):\n caption = lm_tools.sample(net, z['word_dict'], z['index_dict'], num=maxlen, Im=im, initial=init, use_end=use_end)\n print ' '.join(caption)", "def create_samples(self):\n for s_id in range(len(self.data[\"sample\"])):\n self.samples.add(Sample(s_id, [self.data[key][s_id] for key in self.data.keys() if key not in WRONG_KEYS],\n self.data[\"label\"][s_id]))", "def __init__(self, n):\n self.rows = [0 for _ in range(n)]\n self.columns = [0 for _ in range(n)]\n # First diagonal x+y, second y-x\n self.diagonal = [0, 0]\n self.score = {1: 1, 2: n+1}\n self.win = {1: n, 2: (n+1)*n}\n self.size = n", "def set_up_threshold_cube():\n test_data = 50*np.arange(16).reshape(4, 4)\n grid_x = DimCoord(np.arange(4), standard_name=\"projection_x_coordinate\",\n units=\"km\")\n grid_y = DimCoord(np.arange(4), standard_name=\"projection_y_coordinate\",\n units=\"km\")\n test_cube = iris.cube.Cube(test_data, long_name=\"surface_altitude\",\n units=\"m\",\n dim_coords_and_dims=[(grid_y, 0), (grid_x, 1)])\n return test_cube", "def initialize(self, n_samples=1, kl_scaling=None, *args, **kwargs):\n if kl_scaling is None:\n kl_scaling = {}\n if n_samples <= 0:\n raise ValueError(\n \"n_samples should be greater than zero: {}\".format(n_samples))\n\n self.n_samples = n_samples\n self.kl_scaling = kl_scaling\n return super(elbo_optimizer, self).initialize(*args, **kwargs)", "def __init__(self, num_prealloc_samples=0):\n self.num_prealloc_samples_ = num_prealloc_samples\n if self.num_prealloc_samples_ > 0:\n self._preallocate_samples()", "def initialize(self, numSamples, sampleMethod):\n initSamples = initial_samples(self.lb, self.ub, sampleMethod, numSamples)\n if sum(self.xID) != 0:\n xUB = [\n self.ub[np.where(self.xID == 1)[0][0]]] * len(self.xID)\n xSamples = initial_samples([0] * len(self.xID), xUB, 'rand-wor', numSamples)\n for var in range(len(self.varType)):\n if 'i' in self.varType[var] or 'd' in self.varType[var]:\n initSamples[:, var] = np.rint(initSamples[:, var])\n\n if sum(self.xID) != 0:\n initSamples = initSamples * (self.cID + self.iID + self.dID) + xSamples * self.xID\n return initSamples", "def setUp(self):\n # generate lattice\n self.lattice = lattice.Lattice()\n self.lattice.addAtom(\"He\", [0,0,0], 0)\n self.lattice.addAtom(\"He\", [2,0,0], 0)\n self.lattice.addAtom(\"He\", [0,2,0], 0)\n self.lattice.addAtom(\"He\", [0,0,2], 0)\n self.lattice.addAtom(\"He\", [9,9,9], 0)\n self.lattice.addAtom(\"He\", [2,2,0], 0)\n self.lattice.addAtom(\"He\", [2,0,2], 0)\n self.lattice.addAtom(\"He\", [0,2,2], 0)\n self.lattice.addAtom(\"He\", [2,2,2], 0)\n \n # indexes of cluster atoms\n self.bigClusterIndexes = [0,1,2,3,5,6,7,8]\n self.smallClusterIndexes = [4]\n \n # filter\n self.filter = clusterFilter.ClusterFilter(\"Cluster\")", "def make_sample_workspace(self, **kwargs):\n function = \"name=LinearBackground, A0=0.1;name=Lorentzian, PeakCentre=0.5, Amplitude=2, FWHM=0.1\"\n sample = self.make_dummy_workspace(function, output_name='sample', **kwargs)\n return sample", "def _sample_schechter(x0, alpha, x_min, size=100, max_iter=1000):\n out = []\n n = 0\n num_iter = 0\n while (n<size) & (num_iter<max_iter):\n x = np.random.gamma(scale=x0, shape=alpha+2, size=size)\n x = x[x>x_min]\n u = np.random.uniform(size=x.size)\n x = x[u<x_min/x]\n out.append(x)\n n+=x.size\n num_iter += 1\n\n if num_iter >= max_iter:\n msg = (\"The maximum number of iterations reached.\",\n \"Random variates may not be representitive.\",\n \"Try increasing `max_iter`.\")\n print(msg)\n\n return np.concatenate(out)[:size]", "def generate_samples(self, nsamples):\n assert self.trained, \"Model must first be fitted to some data.\"\n logger.debug(f'Generate synthetic dataset of size {nsamples}')\n synthetic_data, _ = self.gm.sample(nsamples)\n return synthetic_data", "def __init__(self, initial, size, horizontalChunks, verticalChunks, goal = \"\"):\n\t\tself.initial = initial\n\t\tself.size = size\n\t\tself.horChunks = horizontalChunks\n\t\tself.verChunks = verticalChunks\n\n\t\t# Goal holds the solution, once we find it.\n\t\tself.goal = goal\n\n\t\t# For a puzzle of size n, initializes blank n x n 2d array\n\t\tself.graph = [[0 for x in range(self.size)] for x in range(self.size)] \n\t\tfor i in range (0,self.size):\n\t\t\tfor j in range (0,self.size):\n\t\t\t\tself.graph[i][j] = initial[i*self.size + j] \n\t\tself.initial = \"\"", "def generate_cube():\n \n num_voxels = 31\n\n data_x = []\n data_y = []\n data_z = []\n data_intensity = []\n\n volume = numpy.zeros((num_voxels, num_voxels, num_voxels))\n\n for x in range(num_voxels):\n for y in range(num_voxels):\n for z in range(num_voxels):\n\n if 5 < x < 10 and 5 < y < 10:\n data_x.append(x)\n data_y.append(y)\n data_z.append(z)\n data_intensity.append(200.0)\n\n volume[x,y,z] = 200.0\n\n\n return data_x, data_y, data_z, data_intensity, volume", "def sample(h, seed_ix, n):\n x = np.zeros((vocab_size, 1))\n x[seed_ix] = 1\n ixes = []\n H = hidden_size\n\n for t in range(n):\n W = np.dot(Wxh, x) # hidden state, shape(300, 1)\n U = np.dot(Whh, h) # hidden state, shape(300, 100)\n z = sigmoid(W[:H] + U[:H] + bh[:H])\n r = sigmoid(W[H:H * 2] + U[H:H * 2] + bh[H:H * 2])\n r_ = r * U[H * 2:]\n h_ = np.tanh(W[H * 2:] + r_)\n h = z * h + (1 - z) * h_\n\n y = np.dot(Why, h) + by\n p = np.exp(y) / np.sum(np.exp(y))\n ix = np.random.choice(range(vocab_size), p=p.ravel())\n x = np.zeros((vocab_size, 1))\n x[ix] = 1\n ixes.append(ix)\n return ixes", "def randomgrid(self, n):\n lam = np.random.random((n, 3))\n return self.normalize(lam)", "def _make_test_cube(long_name):\n cs = GeogCS(EARTH_RADIUS)\n data = np.array([[1.0, 1.0, 1.0], [0.0, 0.0, 0.0], [1.0, 0.0, 1.0]])\n cube = Cube(data, long_name=long_name)\n x_coord = DimCoord(\n np.linspace(-45.0, 45.0, 3), \"latitude\", units=\"degrees\", coord_system=cs\n )\n y_coord = DimCoord(\n np.linspace(120, 180, 3), \"longitude\", units=\"degrees\", coord_system=cs\n )\n cube.add_dim_coord(x_coord, 0)\n cube.add_dim_coord(y_coord, 1)\n return cube", "def __init__(self, n_rows: int = 2, n_columns: int = 2):\n self.set_uniform(n_rows, n_columns)", "def __init__(self, n):\n self.rows = [0] * n\n self.cols = [0] * n\n self.diagonal1 = 0\n self.diagonal2 = 0\n self.n = n", "def initial_setup(N,L,dim):\r\n r = np.zeros((3,N))\r\n #n = int(np.rint((N/3)**(1/dim)))\r\n #nz = int(n+n)\r\n x1 = np.linspace(0+L/n/2,L-L/n/2,n)\r\n d0 = 1.0e-10 #intial distance between hydrogen and oxygen\r\n x2 = x1 + d0\r\n x3 = x1 - d0/np.sqrt(2)\r\n z = np.zeros(3*n)\r\n xy = np.zeros(3*n)\r\n for ii in range(n):\r\n z[ii*3+1] = x1[ii]\r\n z[ii*3+2] = x2[ii]\r\n z[ii*3] = x3[ii]\r\n xy[ii*3] = x1[ii]\r\n xy[ii*3+1] = x1[ii]\r\n xy[ii*3+2] = x1[ii]\r\n rx, ry,rz = np.meshgrid(x1,x1,z)\r\n r[0,:] = np.reshape(rx,N)\r\n r[1,:] = np.reshape(ry,N)\r\n r[2,:] = np.reshape(rz,N)\r\n r[0,0::3] += d0/np.sqrt(2) #add additional offset to one Oxygen\r\n return r", "def generate_test():\n o = []\n pos = [384, 288]\n note_group_size = GAN_PARAMS[\"note_group_size\"]\n generate_set(begin=3 * note_group_size, start_pos=pos,\n length_multiplier=dist_multiplier, group_id=3, plot_map=True)", "def random_sample(self, n):\n indices = random.sample(xrange(np.shape(self.data)[0]), n)\n table = DataTable(self.data[indices], self.dims, self.legends, self.tags.copy())\n return table", "def __init__(self, i, h, o):\n self.Wz = np.random.randn(i + h, h)\n self.bz = np.zeros((1, h))\n self.Wr = np.random.randn(i + h, h)\n self.br = np.zeros((1, h))\n self.Wh = np.random.randn(i + h, h)\n self.bh = np.zeros((1, h))\n self.Wy = np.random.randn(h, o)\n self.by = np.zeros((1, o))", "def __init__(self, n):\n self.n = n\n self.rows = [0 for _ in range(n)]\n self.colums = [0 for _ in range(n)]\n self.diag = [0,0]", "def gen_data(npt, typ, ndim, rstate=None):\n mid = .5 # i'm placing in unit cube\n if typ == 'ball':\n r0 = 0.5\n pts = genball(npt, ndim, rstate=rstate) * r0 + mid\n volume = (np.pi**(ndim / 2) / scipy.special.gamma(ndim / 2 + 1) *\n r0**ndim)\n elif typ == 'pin':\n w = 0.01\n a = 1\n pts = np.zeros((npt, ndim))\n pts[:, 1:] = genball(npt, ndim - 1, rstate=rstate) * w + mid\n pts[:, 0] = (rstate.uniform(size=npt) - 0.5) * a + mid\n volume = (np.pi**((ndim - 1) / 2) /\n scipy.special.gamma((ndim - 1) / 2 + 1) * w**(ndim - 1) * a)\n elif typ == 'torus':\n w = 0.01\n r0 = 0.45\n pts = np.zeros((npt, ndim))\n pts[:, :2] = genshell(r0 - w / 2, r0 + w / 2, npt, 2,\n rstate=rstate) + mid\n pts[:,\n 2:] = (rstate.uniform(size=(npt, ndim - 2)) * 2 - 1) * w / 2 + mid\n volume = w**(ndim - 2) * np.pi * ((r0 + w / 2)**2 - (r0 - w / 2)**2)\n elif typ == 'cylinder':\n w = 0.01\n r0 = 0.45\n a = 1\n pts = np.zeros((npt, ndim))\n pts[:, :2] = genshell(r0 - w / 2, r0 + w / 2, npt, 2,\n rstate=rstate) + mid\n pts[:, 2:] = rstate.uniform(size=(npt, ndim - 2)) * a\n volume = np.pi * ((r0 + w / 2)**2 - (r0 - w / 2)**2)\n elif typ == 'shell':\n r1 = 0.45\n r2 = 0.46\n pts = genshell(r1, r2, npt, ndim, rstate=rstate) + mid\n volume = (np.pi**(ndim / 2) / scipy.special.gamma(ndim / 2 + 1) *\n (r2**ndim - r1**ndim))\n else:\n raise RuntimeError('unknown', typ)\n return pts, volume", "def __init__(self, n):\n self.row = [0] * n\n self.col = [0] * n\n self.diagonal = 0\n self.antidiagonal = 0\n self.winning = False", "def __init__(self, number_of_cheeses, number_of_stools):\n self.model = TOAHModel(number_of_stools)\n self.model.fill_first_stool(number_of_cheeses)", "def pseudo_sample(self):\n return (torch.zeros(1, 1, 28, 28), None)", "def generate_initial_sample(pmin, pmax, ntemps, nwalkers):\n\n npl = pmin.npl\n nobs = pmin.nobs\n\n assert npl == pmax.npl, 'Number of planets must agree in prior bounds'\n assert nobs == pmax.nobs, 'Number of observations must agree in prior bounds'\n\n N = pmin.shape[-1]\n\n samps=params.Parameters(arr=np.zeros((ntemps, nwalkers, N)), nobs=nobs, npl=npl)\n\n V=samps.V\n tau=samps.tau\n sigma=samps.sigma\n sigma0=samps.sigma0\n for i in range(nobs):\n V[:,:,i] = nr.uniform(low=pmin.V[i], high=pmax.V[i], size=(ntemps, nwalkers))\n tau[:,:,i] = draw_logarithmic(low=pmin.tau[i], high=pmax.tau[i], size=(ntemps,nwalkers))\n sigma[:,:,i] = draw_logarithmic(low=pmin.sigma[i], high=pmax.sigma[i], size=(ntemps,nwalkers))\n sigma0[:,:,i] = draw_logarithmic(low=pmin.sigma[i], high=pmax.sigma[i], size=(ntemps, nwalkers))\n samps.V=np.squeeze(V)\n samps.tau = np.squeeze(tau)\n samps.sigma = np.squeeze(sigma)\n samps.sigma0 = np.squeeze(sigma0)\n\n if npl >= 1:\n samps.K = np.squeeze(draw_logarithmic(low=pmin.K[0], high=pmax.K[0], size=(ntemps, nwalkers, npl)))\n\n # Make sure that periods are increasing\n samps.n = np.squeeze(np.sort(draw_logarithmic(low=pmin.n, high=pmax.n, size=(ntemps,nwalkers,npl)))[:,:,::-1])\n\n samps.e = np.squeeze(nr.uniform(low=0.0, high=1.0, size=(ntemps, nwalkers,npl)))\n samps.chi = np.squeeze(nr.uniform(low=0.0, high=1.0, size=(ntemps, nwalkers,npl)))\n samps.omega = np.squeeze(nr.uniform(low=0.0, high=2.0*np.pi, size=(ntemps, nwalkers,npl)))\n\n return samps", "def random_sample_object_of_size(\n self, n: int, **parameters: int\n ) -> CombinatorialObjectType:", "def test_4_1_5D_cube_init(self):\n check = [(0, 0, 0, 0, 0), (1, 1, 1, 1, 1), (1, 0, 0, 0, 0),\n (1, 1, 0, 0, 0),\n (1, 1, 1, 0, 0), (1, 1, 1, 1, 0), (1, 1, 1, 0, 1),\n (1, 1, 0, 1, 0),\n (1, 1, 0, 1, 1), (1, 1, 0, 0, 1), (1, 0, 1, 0, 0),\n (1, 0, 1, 1, 0),\n (1, 0, 1, 1, 1), (1, 0, 1, 0, 1), (1, 0, 0, 1, 0),\n (1, 0, 0, 1, 1),\n (1, 0, 0, 0, 1), (0, 1, 0, 0, 0), (0, 1, 1, 0, 0),\n (0, 1, 1, 1, 0),\n (0, 1, 1, 1, 1), (0, 1, 1, 0, 1), (0, 1, 0, 1, 0),\n (0, 1, 0, 1, 1),\n (0, 1, 0, 0, 1), (0, 0, 1, 0, 0), (0, 0, 1, 1, 0),\n (0, 0, 1, 1, 1),\n (0, 0, 1, 0, 1), (0, 0, 0, 1, 0), (0, 0, 0, 1, 1),\n (0, 0, 0, 0, 1),\n (0.5, 0.5, 0.5, 0.5, 0.5)]\n\n nn_checks = {(0, 1, 0, 1, 1): [(0, 0, 0, 0, 0), (\n 0.5, 0.5, 0.5, 0.5, 0.5), (0, 0, 0, 1, 1), (1, 1, 0, 1, 1),\n (0, 1, 0, 0, 0),\n (0, 1, 0, 0, 1),\n (0, 1, 0, 1, 0),\n (0, 0, 0, 0, 1),\n (1, 1, 1, 1, 1),\n (0, 1, 1, 1, 1),\n (0, 0, 0, 1, 0)]}\n\n init_triangulation(5, 0, check, nn_checks)", "def sample(self, n_samples):\n\n z = sample_prior((n_samples,) + self.flow.z_shape)\n ldj = torch.zeros(z.size(0))\n\n z, ldj = self.flow (z, ldj, reverse=True)\n z, ldj = self.logit_normalize(z, ldj, reverse=True)\n\n return z", "def __init__(self, dimension, n):\n self.dimension = dimension\n self.n = n\n self.basis = None", "def __init__(self, n=1):\n vertices = [Vertex(i) for i in range(n)]\n for vertex in vertices:\n self.add_vertex(vertex)\n self.populate_graph()", "def __init__(self, n_samples=1000, n_features=4):\n self.n_samples = 1000\n self.n_features = 4\n self.forest = []", "def main(n_samples):\n uso = usolib.uso.uar(N)\n lst = [usolib.randomfacet.randomfacet_sample(uso, N) for i in range(n_samples)]\n return sum(lst) / float(n_samples)", "def generate( k, n, scale = 10, prior = \"uniform\" ): \n\n if prior == \"uniform\":\n # Each topic is a multinomial generated from a Dirichlet\n topics = sc.column_stack( [ dirichlet( sc.ones( n ) * scale\n ) for i in xrange( k ) ] )\n # We also draw the weights of each topic\n weights = dirichlet( sc.ones(k) * scale ) \n\n return TopicModel( weights, topics )\n else:\n # TODO: Support the anchor word assumption.\n raise NotImplementedError", "def setUp(self):\n self.samples = 5\n self.otus = 10\n seed(0) # this will seed numpy prng at 0 before each test", "def setUp(self):\n self.grid = SudukuGrid(BaseCase)\n for i in range(81):\n self.grid[i] = SudukuAlphabet.VALUES[(i+(i//9)*3+i//27)%9]", "def latent_space(size):\n n = torch.randn(size, 100)\n return n", "def __init__(self, n: int):\n self.n = n\n self.rows_1 = [0 for _ in range(n + 1)]\n self.rows_2 = [0 for _ in range(n + 1)]\n self.cols_1 = [0 for _ in range(n + 1)]\n self.cols_2 = [0 for _ in range(n + 1)]\n self.diag1 = [0 for _ in range(n + 1)]\n self.diag2 = [0 for _ in range(n + 1)]", "def sample(self, n=1):\n raise NotImplementedError", "def test_3_1_4D_cube_init(self):\n check = [(0, 0, 0, 0), (1, 1, 1, 1), (1, 0, 0, 0), (1, 1, 0, 0),\n (1, 1, 1, 0), (1, 1, 0, 1), (1, 0, 1, 0), (1, 0, 1, 1),\n (1, 0, 0, 1), (0, 1, 0, 0), (0, 1, 1, 0), (0, 1, 1, 1),\n (0, 1, 0, 1), (0, 0, 1, 0), (0, 0, 1, 1), (0, 0, 0, 1),\n (0.5, 0.5, 0.5, 0.5)]\n nn_checks = {(0, 1, 1, 0): [(1, 1, 1, 0), (0, 1, 1, 1), (1, 1, 1, 1),\n (0, 1, 0, 0), (0, 0, 1, 0), (0, 0, 0, 0),\n (0.5, 0.5, 0.5, 0.5)],\n (0.5, 0.5, 0.5, 0.5): [(1, 1, 0, 1), (1, 0, 1, 1),\n (1, 1, 1, 0), (1, 0, 0, 0),\n (1, 1, 0, 0), (1, 0, 1, 0),\n (0, 1, 1, 1), (0, 0, 0, 1),\n (1, 1, 1, 1), (1, 0, 0, 1),\n (0, 1, 0, 0), (0, 0, 1, 0),\n (0, 0, 0, 0), (0, 1, 1, 0),\n (0, 1, 0, 1), (0, 0, 1, 1)],\n (1, 0, 0, 0): [(1, 1, 0, 1), (1, 0, 1, 1), (1, 1, 1, 0),\n (1, 1, 0, 0), (1, 0, 1, 0), (1, 1, 1, 1),\n (1, 0, 0, 1), (0, 0, 0, 0),\n (0.5, 0.5, 0.5, 0.5)]}\n\n init_triangulation(4, 0, check, nn_checks)", "def generate_synth_data(n):", "def generaCubo(self):\r\n #Use Panda predefined format for vertex coordinate only\r\n format = GeomVertexFormat.getV3()\r\n \r\n #Build Vertex data using the created format. Vertex will never change so I use Static attribute \r\n vdata = GeomVertexData('CuboData', format, Geom.UHStatic)\r\n \r\n #I will have to write vertex data so I create a writer for these data\r\n vertex = GeomVertexWriter(vdata, 'vertex')\r\n \r\n #I now use the writer to add vertex data\r\n vertex.addData3f(0, 0, 0)\r\n vertex.addData3f(1, 1, 1)\r\n vertex.addData3f(0, 1, 1)\r\n vertex.addData3f(0, 1, 0)\r\n vertex.addData3f(0, 0, 1)\r\n vertex.addData3f(1, 0, 0)\r\n vertex.addData3f(1, 0, 1)\r\n vertex.addData3f(1, 1, 0)\r\n \r\n #I now create 12 triangles\r\n prim = GeomTriangles(Geom.UHStatic)\r\n\r\n #and then I add vertex to them\r\n #Next time use addVertices(0,1,2) !!!\r\n prim.addVertex(7)\r\n prim.addVertex(0)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n \r\n prim.addVertex(3)\r\n prim.addVertex(0)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n \r\n prim.addVertex(2)\r\n prim.addVertex(6)\r\n prim.addVertex(4)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(1)\r\n prim.addVertex(6)\r\n prim.addVertex(2)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(7)\r\n prim.addVertex(2)\r\n prim.addVertex(3)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(1)\r\n prim.addVertex(2)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(3)\r\n prim.addVertex(4)\r\n prim.addVertex(0)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(2)\r\n prim.addVertex(4)\r\n prim.addVertex(3)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(0)\r\n prim.addVertex(6)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(4)\r\n prim.addVertex(6)\r\n prim.addVertex(0)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(5)\r\n prim.addVertex(1)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(6)\r\n prim.addVertex(1)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n\r\n #Create a Geom to bing vertex data to primitives\r\n geom = Geom(vdata)\r\n geom.addPrimitive(prim)\r\n\r\n #Create a node for the Geom in order to be able to render it\r\n node = GeomNode('gnode')\r\n node.addGeom(geom)\r\n\r\n #Adde the node to the scene graph == render it!\r\n nodePath = render.attachNewNode(node)\r\n \r\n #is this needed?\r\n nodePath.setPos( 0, 5, 0)\r\n \r\n self.camera.lookAt(nodePath)\r\n \r\n base.setBackgroundColor( .0, .0, .0 )\r\n \r\n taskMgr.add(self.SpinCameraTask, \"SpinCameraTask\")", "def _generate(self, **kwargs):\n N = self.parameter_schema['N']\n parameter_count = len(self._parameter_names)\n common_override_kwargs = {}\n override_kwargs = self._sampler_overrides(common_override_kwargs)\n if kwargs:\n kwargs.update(override_kwargs)\n else:\n kwargs = override_kwargs\n __import__(\"SALib.sample\", fromlist=[self.sampler_class])\n sampler = getattr(SALib.sample, self.sampler_class)\n problem = self.parameter_schema[\"problem\"]\n self._samples = sampler.sample(problem, N, **kwargs)\n self._samples = numpy.unique(self._samples, axis=0)\n super()._generate()", "def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.__iteration_number = kwargs['iteration_number']\n self.__particles = [\n PSOParticle(**kwargs, bit_generator=self._random)\n for _ in range(kwargs['particles'])\n ]\n\n # The library stores particles in the visualizer .... groan\n positions = [particle.position for particle in self.__particles]\n self._visualizer = NoVisualizer(**kwargs)\n self._visualizer.add_data(positions=positions)", "def construct_data_LHS(model,num_pts,seed=None):\n num_dims = model.num_dims\n rv_trans = define_random_variable_transformation_hydromad(model)\n pts = latin_hypercube_design( num_pts, num_dims, seed )\n # returns points on [0,1] but need pts on [-1,1]\n pts = 2*pts-1.\n pts = rv_trans.map_from_canonical_distributions( pts )\n vals = model.evaluate_set( pts )\n numpy.savetxt( 'pts.txt', pts, delimiter = ',' )\n numpy.savetxt( 'vals.txt', vals, delimiter = ',' )", "def create_samples(self):\n sample_list = []\n genes = []\n for record in range(len(self.data_dict[\"samples\"])):\n sample_id = self.data_dict[\"samples\"][record]\n genes_cols = list(self.data_dict.keys())[2:]\n for gene in genes_cols:\n genes.append(self.data_dict[gene][record])\n label = self.data_dict[\"type\"][record]\n sample_list.append(Sample(sample_id, genes, label))\n genes = []\n return sample_list", "def __init__(self, n):\n self._n = n\n self._grid = [[False] * n for _ in range(n)]\n # create sites for n-by-n grid and 2 \"virtual\" sites for top and bottom\n # self._uf = QuickFindUF(n * n + 2)\n self._uf = WeightedQuickUnionUF(n * n + 2) # QuickFindUF(n * n + 2)\n # connect top and bottom virtual sites with respecting sides of grid\n self._top_idx = n * n\n self._bottom_idx = n * n + 1\n for i in range(n):\n self._uf.union(self._top_idx, i)\n self._uf.union(self._bottom_idx, (n - 1) * n + i)", "def load_hypercube(fname):\n # Get header\n f = open(fname, 'r')\n hdr = f.readline()[2:-1].split(\" \")\n f.close()\n\n # Load data\n dat = np.loadtxt(fname).T\n\n # Build dict\n sample_points = {}\n for i in range(len(hdr)):\n sample_points[hdr[i]] = dat[i]\n return sample_points", "def generate_samples(mu1,cov,number_of_samples):\n samples = np.random.multivariate_normal(mu1, cov,number_of_samples)\n return samples", "def __init__(self, cube_size, time_range):\n\n # cubesize is in z,y,x for interactions with tile/image data\n self.zdim, self.ydim, self.xdim = self.cubesize = [cube_size[2], cube_size[1], cube_size[0]]\n self.time_range = time_range\n self._newcube = False", "def n_simplex(self, dim_n=3, project = True):\n verts = permutations([0 for i in range(dim_n)] + [1])\n if project: verts = [Polytopes.project_1(x) for x in verts]\n return Polyhedron(vertices = verts)", "def __init__(self, n_samples, max_iter=1000, verbose=1, eps=1e-3):\n\n\t\tself.sparam = dict()\n\t\twith open('config.json') as config_file:\n\t\t\tself.sparam = json.load(config_file)\n\n\t\tself.C = self.sparam['C']\n\t\tself.sizePsi = self.sparam['sizePsi']\n\t\tself.num_classes = self.sparam['num_classes']\n\t\t#self.w = np.random.rand(sparam['sizePsi'],1)\n\t\tself.w = np.zeros((self.sparam['sizePsi'],1))\n\t\tself.tempw = np.zeros((self.sparam['sizePsi'],1))\n\t\t#self.tempw = np.random.rand(self.sparam['sizePsi'], 1)\n\t\t#self.tempw = np.random.rand(self.sparam['sizePsi'])\n\t\t#self.tempw[0:self.sizePsi/2] = np.zeros(self.sizePsi/2)\n\t\t#self.tempw = self.tempw.reshape(self.sizePsi, 1)\n\t\t#print np.sum(self.tempw)\n\t\tself.w_changed = False\n\t\tself.n = n_samples\n\t\tself.max_iter = max_iter\n\t\tself.verbose = verbose\n\t\tself.eps = eps\n\t\tself.alphas = []\n\t\tself.losses = []", "def generate_samples(self,n_samples=100):\n rnd_input=torch.randn((n_samples,self._reparam_nodes[1]))\n zeta=rnd_input \n # rnd_input=torch.where((rnd_input>0.5),torch.ones(rnd_input.size()),torch.zeros(rnd_input.size()))\n # print(rnd_input) \n # output, mu, logvar, zeta=self.forward(rnd_input)\n # mu = self._reparam_layers['mu'](rnd_input)\n # logvar = self._reparam_layers['var'](rnd_input)\n # zeta = self.reparameterize(mu, logvar)\n output = self.decoder.decode(zeta)\n return output", "def reconstructions_sample(self, n=()):\n self.assert_sampled()\n return [[j.sample(sample_shape=n, seed=self.randint).numpy()\n for j in i]\n for i in self._reconstructions]", "def _generate(self, **kwargs):\n self._samples = numpy.array(list(itertools.product(*self.parameter_schema.values())), dtype=object)\n super()._generate()", "def resample(self):\n self.X = np.random.uniform(-1, 1, (100, 2))\n self.generate_y()\n self.phi = None\n self.K = 0\n self.centers = None\n self.cluster_sizes = None\n self.g = None\n self.gamma = None", "def __init__(self, n):\n self.n = n\n self.w = np.zeros(self.n)\n self.z = np.zeros(self.n)\n self.F = 0\n self.M = 0", "def __init__(self, i, h, o):\n self.Wh = np.random.normal(size=(i+h, h))\n self.Wy = np.random.normal(size=(h, o))\n self.bh = np.zeros((1, h))\n self.by = np.zeros((1, o))", "def _init_empty_polyhedron(self):\n self._ambient_dim = 0\n\n self._Vrepresentation = Sequence([])\n self._Vrepresentation.set_immutable()\n \n self._Hrepresentation = Sequence([])\n Equation(self, [-1]);\n self._Hrepresentation.set_immutable()\n\n self._V_adjacency_matrix = matrix(ZZ, 0, 0, 0)\n self._V_adjacency_matrix.set_immutable()\n\n self._H_adjacency_matrix = matrix(ZZ, 1, 1, 0)\n self._H_adjacency_matrix.set_immutable()", "def create_start_data(self):\n\t\tdef inputMesh(feature_size):\n\t\t\tc1= np.expand_dims(np.array([0,-0.9]),0)\n\t\t\tc2= np.expand_dims(np.array([-0.9,0.9]),0)\n\t\t\tc3= np.expand_dims(np.array([0.9,0.9]),0)\n\t\t\tx1 = np.expand_dims(np.pad(np.array([0,-0.9]),(0,feature_size-2),'constant',constant_values=(0,0)),0)\n\t\t\tx2 = np.expand_dims(np.pad(np.array([-0.9,0.9]),(0,feature_size-2),'constant',constant_values=(0,0)),0)\n\t\t\tx3 = np.expand_dims(np.pad(np.array([0.9,0.9]),(0,feature_size-2),'constant',constant_values=(0,0)),0)\n\t\t\tedge_index = np.transpose(np.array([[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]])) # COO format\n\t\t\treturn np.concatenate((c1,c2,c3),axis=0), np.concatenate((x1,x2,x3),axis=0),edge_index\n\n\t\tc, x, edge_index = inputMesh(self.params.feature_size)# x is c with zeros appended, x=f ..pixel2mesh\n\t\tdata_list_x = []\n\t\tdata_list_c = []\n\t\tdata_list_pid = []\n\t\tfor i in range(self.params.batch_size):\n\t\t\tdata_list_x.append(Data(x=torch.Tensor(x).type(dtypeF), edge_index=torch.Tensor(edge_index).type(dtypeL)))\n\t\t\tdata_list_c.append(Data(x=torch.Tensor(c).type(dtypeF), edge_index=torch.Tensor(edge_index).type(dtypeL)))\n\t\t\tdata_list_pid.append(Data(x=torch.zeros(c.shape[0],1).type(dtypeL).requires_grad_(False)))\n\t\tbatch_x = Batch.from_data_list(data_list_x)\n\t\tbatch_c = Batch.from_data_list(data_list_c)\n\t\tbatch_pid = Batch.from_data_list(data_list_pid)\n\t\treturn batch_x, batch_c, batch_pid", "def labelledCube(self, dim=None, sample=None):\n if dim is None:\n dim = self.D\n if sample is None:\n sample = range(1, int(self.poolSize)+1)\n \n all_labels = list(it.product(*(range(self.slices),) * dim))\n self.sample_labels = set(random.sample(all_labels, k= len(sample)))\n labelled_sample = {label : sample for label, sample in zip(self.sample_labels, sample)}\n self.text[\"labelledSamples\"] = labelled_sample\n return labelled_sample", "def __init__(self, n_in, n_out):\n self.W = np.random.randn(n_in, n_out) * 0.1\n self.b = np.zeros(n_out)", "def view_surface_rec(self, x, n_max=1000, random_state=42, title=None, dataset_name=None):\n if self.comet_exp is not None:\n # If comet_exp is set, use different backend to avoid display errors on clusters\n matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!\n import matplotlib.pyplot as plt\n from grae.data.manifolds import set_axes_equal\n\n np.random.seed(random_state)\n\n x_hat = self.reconstruct(x)\n x, y = x.numpy()\n\n if x.shape[0] > n_max:\n sample_mask = np.random.choice(x.shape[0], size=n_max, replace=False)\n x_hat = x_hat[sample_mask]\n x = x[sample_mask]\n y = y[sample_mask]\n\n scene_dict = dict(SwissRoll=(0, 0), Mammoth=(-15, 90), ToroidalHelices=(30, 0))\n if dataset_name in scene_dict:\n tilt, rotation = scene_dict[dataset_name]\n else:\n tilt, rotation = 0, 0\n\n # set up a figure twice as wide as it is tall\n fig = plt.figure(figsize=plt.figaspect(0.5))\n\n ax = fig.add_subplot(1, 2, 1, projection='3d')\n ax.view_init(tilt, rotation)\n ax.set_title('Input')\n ax.scatter(*x.T, c=y, cmap='jet', edgecolor='k')\n set_axes_equal(ax)\n\n ax = fig.add_subplot(1, 2, 2, projection='3d')\n\n ax.view_init(tilt, rotation)\n ax.set_title('Reconstruction')\n ax.scatter(*x_hat.T, c=y, cmap='jet', edgecolor='k')\n set_axes_equal(ax)\n\n\n if title is not None:\n fig.suptitle(title, fontsize=20)\n\n if self.comet_exp is not None:\n self.comet_exp.log_figure(figure=plt, figure_name=title)\n plt.clf()\n else:\n plt.show()", "def __init__(self,nback=1,ntokens_pm=2,ntokens_og=3,stimdim=2,seed=99):\n np.random.seed(seed)\n tr.manual_seed(seed)\n self.nback = nback\n # embedding\n self.ntokens_pm = ntokens_pm\n self.ntokens_og = ntokens_og\n self.stimdim = stimdim\n # emat\n self.randomize_emat()\n return None", "def test_gan_qiskit(n, Database):\n mini = np.min(Database)\n maxi = np.max(Database)\n h = (maxi - mini) / (2 ** n)\n bins = [[k for d in Database if mini + h * k < d < mini + h * (k + 1)] for k in range(2 ** n)]\n interv = [mini + h * k for k in range(2 ** n)]\n backend = BasicAer.get_backend('statevector_simulator')\n random_seed = 10598\n\n quantum_instance = QuantumInstance(backend, seed=random_seed, seed_transpiler=random_seed)\n gan_test = QGAN(Database, num_qubits=[n], snapshot_dir=None,\n quantum_instance=quantum_instance, batch_size=int(len(Database) / 20), num_epochs=300)\n gan_test.train()\n samp, bins_var = gan_test.generator.get_output(gan_test.quantum_instance, shots=4096)\n\n compar = [len(b) / len(Database) for b in bins]\n if len(interv) == len(compar):\n plt.plot(interv, compar)\n\n plt.plot(interv, bins_var)\n\n plt.show()", "def init_homographies(self, homography_path, n_images):\n with open(homography_path) as f:\n h_data = f.readlines()\n h_scale = h_data[0].rstrip().split(' ')\n self.h_scale_x = int(h_scale[1])\n self.h_scale_y = int(h_scale[2])\n h_bounds = h_data[1].rstrip().split(' ')\n self.h_bounds_x = [float(h_bounds[1]), float(h_bounds[2])]\n self.h_bounds_y = [float(h_bounds[3]), float(h_bounds[4])]\n homographies = h_data[2:2 + n_images]\n homographies = [torch.from_numpy(np.array(line.rstrip().split(' ')).astype(np.float32).reshape(3, 3)) for line\n in\n homographies]\n self.homographies = homographies", "def sample(self, n):\n raise NotImplementedError" ]
[ "0.7344228", "0.70358026", "0.6742598", "0.66756856", "0.66756856", "0.66473216", "0.6490043", "0.6219821", "0.61849946", "0.5989805", "0.5899154", "0.5826263", "0.57939756", "0.5752896", "0.57472134", "0.5707989", "0.5667523", "0.5647223", "0.5642459", "0.5631831", "0.55860597", "0.55722594", "0.55670935", "0.55549943", "0.5521048", "0.55015755", "0.5500793", "0.548168", "0.54805875", "0.54661304", "0.5465828", "0.54652905", "0.54336905", "0.53997153", "0.53947407", "0.53863263", "0.53831786", "0.53694826", "0.536645", "0.53449196", "0.532726", "0.5314865", "0.52996874", "0.52964103", "0.5285389", "0.5283856", "0.5274502", "0.52640295", "0.52618", "0.52577335", "0.5249758", "0.52486694", "0.5232152", "0.5227412", "0.5224593", "0.5215586", "0.5215308", "0.52136815", "0.5211935", "0.52034837", "0.5198511", "0.51967734", "0.5193326", "0.51876485", "0.518466", "0.5181856", "0.51705223", "0.51680934", "0.516793", "0.51662", "0.51651675", "0.51615375", "0.51598644", "0.51565355", "0.5154698", "0.51540935", "0.51501155", "0.51463443", "0.5144513", "0.51418734", "0.5138205", "0.5137083", "0.51350915", "0.5134563", "0.5134352", "0.5129879", "0.51263607", "0.5126178", "0.5121261", "0.5114322", "0.5112676", "0.5104572", "0.5104282", "0.5101256", "0.50980407", "0.50959635", "0.5089722", "0.5076849", "0.5075957", "0.50732213" ]
0.6458857
7
Creates the initial search space using latin hypercube sampling.
def lhs_start(hyperbounds, n_samples, rng=None): low_bounds = [] high_bounds = [] for bound in hyperbounds: low_bounds.append(bound[0]) high_bounds.append(bound[1]) low_bounds = np.array(low_bounds, dtype=object) high_bounds = np.array(high_bounds, dtype=object) samples = sample_latin_hypercube(low_bounds, high_bounds, n_samples, rng=rng) samples = samples.tolist() return samples
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_latin_hypercube(samples, param_dict, class_root, seed=10):\n # Set random seed\n random.seed(seed)\n\n # Create dictionary to hold sampled parameter values\n sample_points = {}\n for key in param_dict.keys():\n sample_points[key] = np.zeros(samples)\n Ndim = len(param_dict.keys())\n pnames = [key for key in param_dict.keys()]\n\n # List of indices for each dimension\n l = [range(samples) for j in range(Ndim)]\n\n # Generate samples until there are no indices left to choose\n for i in range(samples):\n\n # Randomly choose index and then remove the number that was chosen\n # (Latin hypercubes require at most one item per row and column)\n for j, p in enumerate(pnames):\n pmin, pmax = param_dict[p]\n idx = random.choice(l[j])\n\n # Get value at this sample point (add 0.5 to idx get bin centroid)\n sample_points[p][i] = pmin + (pmax - pmin) \\\n * (idx + 0.5) / float(samples)\n l[j].remove(idx) # Remove choice from list (sampling w/o replacement)\n\n return sample_points", "def latin_hypercube(n_pts, dim):\n X = np.zeros((n_pts, dim))\n centers = (1.0 + 2.0 * np.arange(0.0, n_pts)) / float(2 * n_pts)\n for i in range(dim): # Shuffle the center locataions for each dimension.\n X[:, i] = centers[np.random.permutation(n_pts)]\n\n # Add some perturbations within each box\n pert = np.random.uniform(-1.0, 1.0, (n_pts, dim)) / float(2 * n_pts)\n X += pert\n return X", "def latin_hypercube(n_pts, mins, maxs):\n #return a latin_hypercube\n design = lhs(np.size(maxs), samples=n_pts)\n for i in range(2):\n design[:, i] = design[:, i] * (maxs[i]-mins[i]) + mins[i]\n return design", "def localInitialize(self):\n SVL = self.readFromROM()\n self._generateQuadsAndPolys(SVL)\n #print out the setup for each variable.\n msg = self.printTag+' INTERPOLATION INFO:\\n'\n msg += ' Variable | Distribution | Quadrature | Polynomials\\n'\n for v in self.quadDict:\n msg += ' '+' | '.join([v,self.distDict[v].type,self.quadDict[v].type,self.polyDict[v].type])+'\\n'\n msg += ' Polynomial Set Degree: '+str(self.maxPolyOrder)+'\\n'\n msg += ' Polynomial Set Type : '+str(SVL.indexSetType)+'\\n'\n self.raiseADebug(msg)\n\n self.raiseADebug('Starting index set generation...')\n self.indexSet = IndexSets.factory.returnInstance(SVL.indexSetType)\n self.indexSet.initialize(self.features, self.importanceDict, self.maxPolyOrder)\n if self.indexSet.type=='Custom':\n self.indexSet.setPoints(SVL.indexSetVals)\n\n self.sparseGrid = Quadratures.factory.returnInstance(self.sparseGridType)\n self.raiseADebug(f'Starting {self.sparseGridType} sparse grid generation...')\n self.sparseGrid.initialize(self.features, self.indexSet, self.dists, self.quadDict, self.jobHandler)\n\n if self.writeOut is not None:\n msg = self.sparseGrid.__csv__()\n outFile = open(self.writeOut,'w')\n outFile.writelines(msg)\n outFile.close()\n\n self.limit=len(self.sparseGrid)\n self.raiseADebug(f'Size of Sparse Grid: {self.limit}')\n self.raiseADebug('Finished sampler generation.')\n\n self.raiseADebug('indexset:',self.indexSet)\n for SVL in self.ROM.supervisedContainer:\n SVL.initialize({'SG': self.sparseGrid,\n 'dists': self.dists,\n 'quads': self.quadDict,\n 'polys': self.polyDict,\n 'iSet': self.indexSet})", "def setUp(self):\n self.grid = SudukuGrid(BaseCase)\n for i in range(81):\n self.grid[i] = SudukuAlphabet.VALUES[(i+(i//9)*3+i//27)%9]", "def sample_latin_hypercube(low, high, n_samples, rng=None):\n if rng is None:\n rng = np.random.RandomState(np.random.randint(0, 10000))\n\n n_dims = low.shape[0]\n\n samples = []\n for i in range(n_dims):\n if isinstance(low[i], numbers.Integral):\n sample = random.sample(range(low[i], high[i]), n_samples)\n elif isinstance(low[i], numbers.Real):\n lower_bound = low[i]\n upper_bound = high[i]\n sample = lower_bound + rng.uniform(0, 1, n_samples) * (upper_bound - lower_bound)\n else:\n raise ValueError('Latin hypercube sampling can only draw from types int and real,'\n ' got {}!'.format(type(low[i])))\n\n samples.append(sample)\n\n samples = np.array(samples, dtype=object)\n\n for i in range(n_dims):\n rng.shuffle(samples[i, :])\n\n return samples.T", "def latin_hypercube_sampler(n=1, indim=1, bounds=None, rng=None):\r\n rng = ensure_rng(rng)\r\n if bounds is None:\r\n bounds = np.zeros((indim, 2))\r\n bounds[:,1] = 1. \r\n # Divide each dimension into `n` equal intervals\r\n hypercubes = np.linspace(bounds[:,0], bounds[:,1], n+1)\r\n \r\n l = hypercubes[:-1,:].reshape(-1,)\r\n u = hypercubes[1:,:].reshape(-1,)\r\n _x = rng.uniform(l,u, (1, indim*n)).reshape(n, indim)\r\n x = _x\r\n for j in range(indim):\r\n x[:,j] = _x[rng.permutation(n), j]\r\n return x", "def initialize(self):\n#TODO: choose user defined START position\n values_type = np.dtype(float)\n self.visual_field = np.zeros(self.number_of_locs, dtype=values_type)\n self.weighted_sums = np.zeros(self.number_of_locs, dtype=values_type)\n self.prior_prob = 1.0 / np.prod(self.number_of_locs)\n self.post_probs = np.full(\n self.number_of_locs, self.prior_prob, dtype=values_type\n )\n starting_location = np.array(START)\n self.focus = get_index_of_in(starting_location,self.senzory_map)\n self.target_location = [\n x for x in xrange(self.number_of_locs) if x != self.focus\n ][random.randint(0,self.number_of_locs-2)]", "def __init__(self, initial, size, horizontalChunks, verticalChunks, goal = \"\"):\n\t\tself.initial = initial\n\t\tself.size = size\n\t\tself.horChunks = horizontalChunks\n\t\tself.verChunks = verticalChunks\n\n\t\t# Goal holds the solution, once we find it.\n\t\tself.goal = goal\n\n\t\t# For a puzzle of size n, initializes blank n x n 2d array\n\t\tself.graph = [[0 for x in range(self.size)] for x in range(self.size)] \n\t\tfor i in range (0,self.size):\n\t\t\tfor j in range (0,self.size):\n\t\t\t\tself.graph[i][j] = initial[i*self.size + j] \n\t\tself.initial = \"\"", "def gen_hypercube(samples, N):\n\n np.random.seed(4654562)\n hypercube = lhs(N, samples=samples)\n\n return hypercube", "def __init__(self):\n super().__init__()\n self.type = 'SparseGridCollocationSampler'\n self.printTag = 'SAMPLER '+self.type.upper()\n self.maxPolyOrder = None #L, the relative maximum polynomial order to use in any dimension\n self.indexSetType = None #TP, TD, or HC; the type of index set to use\n self.polyDict = {} #varName-indexed dict of polynomial types\n self.quadDict = {} #varName-indexed dict of quadrature types\n self.importanceDict = {} #varName-indexed dict of importance weights\n self.maxPolyOrder = None #integer, relative maximum polynomial order to be used in any one dimension\n self.lastOutput = None #pointer to output dataObjects object\n self.ROM = None #pointer to ROM\n self.jobHandler = None #pointer to job handler for parallel runs\n self.doInParallel = True #compute sparse grid in parallel flag, recommended True\n self.dists = {} #Contains the instance of the distribution to be used. keys are the variable names\n self.writeOut = None\n self.indexSet = None\n self.sparseGrid = None\n self.features = None\n self.sparseGridType = None\n self.addAssemblerObject('ROM', InputData.Quantity.one)", "def initialise():\n _initialiseGlobals()\n for pop in AnadPartOfPerspectiveDb.Iterator():\n _addToKnowledge(pop)\n return", "def create_grids_structure(self):\n for indices, hypercube in np.ndenumerate(self.hypercubes):\n self.hypercubes[indices] = Hypercube(coords=indices)", "def setUp(self):\n # generate lattice\n self.lattice = lattice.Lattice()\n self.lattice.addAtom(\"He\", [0,0,0], 0)\n self.lattice.addAtom(\"He\", [2,0,0], 0)\n self.lattice.addAtom(\"He\", [0,2,0], 0)\n self.lattice.addAtom(\"He\", [0,0,2], 0)\n self.lattice.addAtom(\"He\", [9,9,9], 0)\n self.lattice.addAtom(\"He\", [2,2,0], 0)\n self.lattice.addAtom(\"He\", [2,0,2], 0)\n self.lattice.addAtom(\"He\", [0,2,2], 0)\n self.lattice.addAtom(\"He\", [2,2,2], 0)\n \n # indexes of cluster atoms\n self.bigClusterIndexes = [0,1,2,3,5,6,7,8]\n self.smallClusterIndexes = [4]\n \n # filter\n self.filter = clusterFilter.ClusterFilter(\"Cluster\")", "def main():\n\n parser = argparse.ArgumentParser(description='Create a new Wordsearch')\n parser.add_argument('size', type=grid_size_type,\n help=\"height and width of our wordsearch grid (min: 3)\")\n parser.add_argument('wordfile', type=argparse.FileType('r'),\n help=\"file including words to search for\")\n parser_args = parser.parse_args()\n\n new_matrix = Matrix(parser_args.size)\n\n words_to_find = create_word_list_from_file(parser_args.wordfile, parser_args.size)\n\n words_found = []\n for word in words_to_find:\n if word not in words_found and word in new_matrix:\n words_found.append(word)\n\n print(\"\\n{}\\n\\n{}\\n\".format(new_matrix, \" \".join(sorted(words_found))))", "def at_object_creation(self):\r\n with open(\"./commands/CSW15.txt\") as word_file:\r\n self.db.csw15 = set(word.strip().upper() for word in word_file)\r\n self.db.centre = \"\" \r\n self.db.tiledict = {'A' : 9,\r\n 'B' : 2,\r\n 'C' : 2,\r\n 'D' : 4,\r\n 'E' : 12,\r\n 'F' : 2,\r\n 'G' : 3,\r\n 'H' : 2,\r\n 'I' : 9,\r\n 'J' : 1,\r\n 'K' : 1,\r\n 'L' : 4,\r\n 'M' : 2,\r\n 'N' : 6,\r\n 'O' : 8,\r\n 'P' : 2,\r\n 'Q' : 1,\r\n 'R' : 6,\r\n 'S' : 4,\r\n 'T' : 6,\r\n 'U' : 4,\r\n 'V' : 2,\r\n 'W' : 2,\r\n 'X' : 1,\r\n 'Y' : 2,\r\n 'Z' : 1,\r\n '?' : 0\r\n } #removing blanks from play; blanks make it very slow. Change here, in dict\r\n self.db.tilestring = list(''.join([L*self.db.tiledict[L] for L in string.ascii_uppercase+'?']))", "def setUp(self):\n np.random.seed(1234)\n\n _TEST_FILE_NAME = 'AHN3.las'\n _TEST_DATA_SOURCE = 'testdata'\n\n _CYLINDER = InfiniteCylinder(4)\n _PC_260807 = load(os.path.join(_TEST_DATA_SOURCE, _TEST_FILE_NAME))\n _PC_1000 = copy_point_cloud(_PC_260807, array_mask=(\n np.random.choice(range(len(_PC_260807[keys.point]['x']['data'])), size=1000, replace=False)))\n _1000_NEIGHBORHOODS_IN_260807 = list(compute_neighbors.compute_neighborhoods(_PC_260807, _PC_1000, _CYLINDER))\n\n self.point_cloud = _PC_260807\n self.neigh = _1000_NEIGHBORHOODS_IN_260807", "def convert_searchspace(self, hyperparameter):\n LOG.debug(\"convert input parameter\\n\\n\\t{}\\n\".format(pformat(hyperparameter)))\n searchspace = [[], []]\n for name, param in hyperparameter.items():\n if param[\"domain\"] != \"categorical\" and \"frequency\" not in param.keys():\n param[\"frequency\"] = DEFAULTGRIDFREQUENCY\n warnings.warn(\"No frequency field found, used default gridsearch frequency {}\".format(DEFAULTGRIDFREQUENCY))\n\n if param[\"domain\"] == \"categorical\":\n searchspace[0].append(name)\n searchspace[1].append(param[\"data\"])\n elif param[\"domain\"] == \"uniform\":\n searchspace[0].append(name)\n searchspace[1].append(get_uniform_axis_sample(param[\"data\"][0],\n param[\"data\"][1],\n param[\"frequency\"],\n param[\"type\"]))\n elif param[\"domain\"] == \"normal\":\n searchspace[0].append(name)\n searchspace[1].append(get_gaussian_axis_sample(param[\"data\"][0],\n param[\"data\"][1],\n param[\"frequency\"],\n param[\"type\"]))\n elif param[\"domain\"] == \"loguniform\":\n searchspace[0].append(name)\n searchspace[1].append(get_logarithmic_axis_sample(param[\"data\"][0],\n param[\"data\"][1],\n param[\"frequency\"],\n param[\"type\"]))\n return searchspace", "def initialize(self, search_space, names, outer_i=None):\n name = search_space.name\n names = copy.deepcopy(names)\n names.append(name)\n output_dim = self.cells[-1].hidden_size\n\n num_inner = self.search_space.eval_(search_space.num_inner, **locals())\n if len(num_inner) > 1:\n key = f'{\"_\".join(names[:-1])}_{len(num_inner)}_{name}s'\n add_if_doesnt_exist(self.policies, key, nn.Linear(output_dim, len(num_inner)))\n add_if_doesnt_exist(self.values, key, nn.Linear(output_dim, len(num_inner)))\n\n add_increment(self.embedding_index, f'{name}_start')\n add_increment(self.embedding_index, f'{name}_end')\n\n self.adapt(search_space.outer.items(), names, outer_i)\n\n for i in range(max(num_inner)):\n add_increment(self.embedding_index, f'{i+1}_{name}s')\n if isinstance(search_space.inner, (list, tuple)):\n for space in search_space.inner: self.initialize(space, names, i)\n elif isinstance(search_space.inner, SearchSpace):\n self.initialize(search_space.inner, names, i)\n else:\n assert isinstance(search_space.inner, dict), \\\n 'Inner search space must be either list, dict or SearchSpace.'\n self.adapt(search_space.inner.items(), names, outer_i)\n add_increment(self.embedding_index, f'{name}_inner_done')", "def initialize(self):\n self.SIZE = self.vectors.shape[0]\n # todo can use max distance to allocation farthest apart points\n self.centroids = self.vectors[[random.randint(1, self.SIZE) for x in range(self.K)], :]", "def grid_search(self):\n\t\t''' common settings without grid-search '''\n\t\tbinary_rele, unknown_as_zero = False, False\n\t\tcommon_data_dict = dict(data_id=self.data_id, dir_data=self.dir_data, min_docs=10, min_rele=1,\n\t\t\t\t\t\t\t\tunknown_as_zero=unknown_as_zero, binary_rele=binary_rele)\n\n\t\tdata_meta = get_data_meta(data_id=self.data_id) # add meta-information\n\t\tcommon_data_dict.update(data_meta)\n\n\t\t''' some settings for grid-search '''\n\t\tchoice_presort = [True] if self.debug else [True]\n\t\tchoice_sample_rankings_per_q = [1] if self.debug else [1] # number of sample rankings per query\n\t\tchoice_scale_data, choice_scaler_id, choice_scaler_level = get_default_scaler_setting(data_id=self.data_id, grid_search=True)\n\n\t\tfor scale_data, scaler_id, scaler_level, presort, sample_rankings_per_q in product(choice_scale_data,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t choice_scaler_id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t choice_scaler_level,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t choice_presort,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t choice_sample_rankings_per_q):\n\n\t\t\tself.data_dict = dict(presort=presort, sample_rankings_per_q=sample_rankings_per_q,\n\t\t\t\t\t\t\t\t scale_data=scale_data, scaler_id=scaler_id, scaler_level=scaler_level)\n\t\t\tself.data_dict.update(common_data_dict)\n\t\t\tyield self.data_dict", "def initialize( self, layout, numGhostAgents=1000 ):\n self.data.initialize(layout, numGhostAgents) ##self.data is defined in the Grid() class of game.py REF112.It creates an initial game state from a layout array (see layout.py).", "def __init__(self, center_words, context_words, neg_samples): \n self.center_words = center_words\n self.context_words = context_words\n self.neg_samples = neg_samples\n # The index of the data the batch should start from. \n self.data_index = 0", "def test_init_experiment(self):\n optimizer = \"RandomSearch\"\n name = \"test_init_experiment\"\n param_defs = {\n \"x\": MinMaxNumericParamDef(0, 1),\n \"name\": NominalParamDef([\"A\", \"B\", \"C\"])\n }\n minimization = True\n\n LAss = PrettyLabAssistant()\n LAss.init_experiment(name, optimizer, param_defs, minimization=minimization)\n\n exp_ass = LAss.exp_assistants[name]\n\n assert_equal(exp_ass.optimizer, optimizer)\n assert_is_none(exp_ass.optimizer_arguments, None)\n assert_equal(exp_ass.experiment.minimization_problem, minimization)\n with assert_raises(ValueError):\n LAss.init_experiment(name, optimizer, param_defs, minimization=minimization)", "def __init__(self, hyperparameters, total_dim, num_is):\n self._dim = total_dim # dimension of IS \\times search space\n self._num_is = num_is # Number of information sources, then including 0th IS (truth), size of hyper should be dim * (num_is+1).\n # Note: it's not (dim+1)*(num_is+1) because dimension of search space is (dim-1), plus the multiplication factor param is dim\n self.set_hyperparameters(hyperparameters)", "def build_index(self):\n # Init the HNSWLIB index\n self.create_index()\n logger.info(f\"Building HNSWLIB index, max_elements: {len(self.corpus)}\")\n logger.debug(f\"Parameters Required: M: {self.M}\")\n logger.debug(f\"Parameters Required: ef_construction: {self.ef_construction}\")\n logger.debug(f\"Parameters Required: ef(>topn): {self.ef}\")\n\n # Then we train the index to find a suitable clustering\n self.index.add_items(self.corpus_embeddings, list(range(len(self.corpus_embeddings))))", "def init_embedding(size=50):\n vector = np.random.normal(0.0, 0.01, size)\n return vector", "def __init__(self):\n # better to be a prime number, less collision\n self.key_space = 2069\n self.hash_table = [Bucket() for i in range(self.key_space)]", "def create_index(args, client):\n policy = {}\n client.index_geo2dsphere_create(args.nspace, args.set,\n LOCBIN, LOCNDX, policy)\n client.index_integer_create(args.nspace, args.set,\n HSHBIN, HSHNDX, policy)", "def initializeDistribution(self):\n self.checkDistParams()\n\n self.lowerBound = min(self.mapping.keys())\n self.upperBound = max(self.mapping.keys())", "def __init__(self, lower=True, num_norm=True,\n use_char=True, initial_vocab=None):\n self._num_norm = num_norm\n self._use_char = use_char\n \n # TODO: check how to use this\n self._node_vocab = Vocabulary(lower=False)\n self._word_vocab = Vocabulary(lower=lower)\n self._char_vocab = Vocabulary(lower=False)\n #TODO: check usability\n self._label_vocab = Vocabulary(lower=False, unk_token=False)\n\n if initial_vocab:\n self._word_vocab.add_documents([initial_vocab])\n self._char_vocab.add_documents(initial_vocab)", "def init_from_scratch(args, train_exs, dev_exs):\n \n # Build a dictionary from the data sqls+queries (train/dev splits)\n logger.info('-' * 100)\n logger.info('Build vocab')\n \n vocab = build_vocab(train_exs + dev_exs, args)\n logger.info('Num words = %d' % vocab.size())\n \n # Initialize model\n model = CopyNet(args, vocab)\n logger.info('-' * 100)\n logger.info('Model Architecture')\n logger.info(model)\n if args.embedding_file:\n model.load_embeddings(vocab.tokens(), args.embedding_file)\n \n return model, vocab", "def setUp(self):\n self._vocab = np.array([\"one\", \"two\", \"three\", \"four\",\n \"five\", \"six\", \"seven\", \"eight\", \"nine\"])\n self._embedding_dim = 2\n\n self._default_config = {\n \"vocab\": self._vocab,\n \"embedding_dim\": self._embedding_dim,\n \"position_encoding\": None\n }", "def init(x_in):\n global public_keys, secret_keys, x\n x = func.get_bits(x_in)\n\n public_keys, secret_keys = [], []\n\n elgamal.init_g_p_q()\n for i in range(3):\n create_keys(i)", "def main(self):\n grid = self.make_game_grid()\n print(self.grid_size, ' by ', self.grid_size, 'grid')\n trie = self.retrieve_trie()\n if not trie:\n trie = self.read_in_file(self.file)\n self.persist_trie(trie)\n\n all_possible_words = []\n # left to right rows\n all_possible_words = all_possible_words + self.search_columns_and_rows(grid)\n # right to left rows\n all_possible_words = all_possible_words + self.search_columns_and_rows(grid, reverse=True, transpose=False)\n # left to right columns\n all_possible_words = all_possible_words + self.search_columns_and_rows(grid, reverse=False, transpose=True)\n # right to left columns\n all_possible_words = all_possible_words + self.search_columns_and_rows(grid, reverse=True, transpose=True)\n\n # handle all possible sun sets of the array row\n all_possible_words = self.all_words(all_possible_words)\n # get diagonal letters top to bottom\n all_possible_words = all_possible_words + self.get_diagonal_words(grid)\n # get diagonal letters bottom to top\n all_possible_words = all_possible_words + self.get_diagonal_words(grid, reverse=True)\n ans = self.check_words_in_trie(trie, all_possible_words)\n self.sorted_words = sorted(ans, key=len)\n if self.sorted_words:\n print(\"The number of words in the solution is: %s.\" % (len(ans),))\n print(\"The shortest word in the solution is: %s.\" % (self.sorted_words[0],))\n print(\"The longest word in the solution is: %s.\" % (self.sorted_words[-1],))\n print('the possible words in this grid are ', self.sorted_words)\n return self.sorted_words", "def init(self, rng_key, num_warmup, init_params, model_args, model_kwargs):\n raise NotImplementedError", "def _initialize(self):\n for doc_index, doc in enumerate(self.document):\n temp_word_topic_matrix = []\n for word in doc:\n if word in self.word2id.keys():\n start_topic_index = np.random.randint(0, self.K)\n temp_word_topic_matrix.append(start_topic_index)\n self.doc_topic_matrix[doc_index, start_topic_index] += 1\n self.topic_word_matrix[start_topic_index, self.word2id[word]] += 1\n self.topic_matrix[start_topic_index] += 1\n self.current_word_topic_matrix.append(temp_word_topic_matrix)", "def test_1_1_2D_cube_init(self): # TODO: REMOVE FUNC AFTER SPLIT\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5)]\n\n nn_checks = {(0.5, 0.5): [(0, 1), (1, 0), (0, 0), (1, 1)],\n (0, 1): [(0, 0), (1, 1), (0.5, 0.5)]}\n\n init_triangulation(2, 0, check, nn_checks)", "def __init__(self, input_n, input_t, input_language, input_epochs, input_embedding_type, input_clusters_num,\n input_training_data, input_evaluation_data, input_hunits_lower, input_hunits_upper,\n input_embedding_dim_lower, input_embedding_dim_upper, input_c, input_iterations):\n self.n = input_n\n self.t = input_t\n self.language = input_language\n self.epochs = input_epochs\n self.embedding_type = input_embedding_type\n self.clusters_num = input_clusters_num\n self.training_data = input_training_data\n self.evaluation_data = input_evaluation_data\n self.hunits_lower = input_hunits_lower\n self.hunits_upper = input_hunits_upper\n self.embedding_dim_lower = input_embedding_dim_lower\n self.embedding_dim_upper = input_embedding_dim_upper\n self.c = input_c\n self.iterations = input_iterations\n\n # Setting self.lambda to the number of the parameters of the largest possible model\n word_segmenter = WordSegmenter(input_name=\"temp\", input_n=50, input_t=10000,\n input_clusters_num=self.clusters_num,\n input_embedding_dim=self.embedding_dim_upper, input_hunits=self.hunits_upper,\n input_dropout_rate=0.2, input_output_dim=4, input_epochs=1,\n input_training_data=self.training_data,\n input_evaluation_data=self.evaluation_data, input_language=self.language,\n input_embedding_type=self.embedding_type)\n word_segmenter.train_model()\n self.lam = 1/word_segmenter.model.count_params()", "def init_algorithm(self):\n pass", "def create_initial_grid():\n\n\tgrid = {(x, y) : ' + ' for x in range(8) for y in range(8)}\n\n\t# Define initial positions \n\tgrid[(3,3)] = colors.RED + \"[I]\" + colors.STOP\n\tgrid[(4,3)] = colors.GREEN + \"[A]\" + colors.STOP\n\tgrid[(3,4)] = colors.GREEN + \"[A]\" + colors.STOP\n\tgrid[(4,4)] = colors.RED + \"[I]\" + colors.STOP\n\n\treturn grid", "def test_2_1_3D_cube_init(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0), (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5)]\n\n nn_checks = {\n (1, 1, 1): [(1, 1, 0), (0, 1, 1), (1, 0, 0), (0, 0, 1), (1, 0, 1),\n (0.5, 0.5, 0.5), (0, 1, 0)],\n (1, 0, 1): [(1, 0, 0), (0, 0, 1), (0, 0, 0), (0.5, 0.5, 0.5),\n (1, 1, 1)],\n (0.5, 0.5, 0.5): [(1, 1, 0), (0, 1, 1), (0, 1, 0), (1, 0, 0),\n (0, 0, 1), (1, 0, 1), (0, 0, 0), (1, 1, 1)]}\n\n init_triangulation(3, 0, check, nn_checks)", "def __init__( self ):\n self.NQ = 16\n self.Nbranches = 3\n self.NatomsUC = 1\n self.dim = 3\n self.QVectors = np.zeros( ( self.NQ , 3 ) )\n self.MakeQVectors()\n self.EigenVectors = np.zeros( [ self.NQ , \n self.Nbranches ,\n self.NatomsUC , \n self.dim ] )\n self.MakeEigenVectors()", "def __init__(self):\n self.space = 1000\n self.hash_table = [Node(-1, -1)] * self.space", "def __initCluster(self):\n data_size, cluster_center = self.data_size, self.cluster_center\n self.cluster_temp = np.zeros(data_size, dtype=int)\n self.cluster_upper_bound = np.full(len(cluster_center), float('inf'), dtype=float)\n for center in cluster_center:\n self.cluster_temp[center] = center", "def _init_vocab(self):\n self._word2idx = {}\n self._idx2word = {}\n self.freqs = {}\n self.vocab_size = 0\n\n self._add_word(self.pad_word)\n self._add_word(self.start_word)\n self._add_word(self.end_word)\n self._add_word(self.unk_word)\n\n self.start_word_idx = self.stoi(self.start_word)\n self.end_word_idx = self.stoi(self.end_word)\n self.unk_word_idx = self.stoi(self.unk_word)\n self.pad_word_idx = self.stoi(self.pad_word)\n\n self._special_tokens = {\n 'bos_token': self.start_word,\n 'cls_token': self.start_word,\n 'eos_token': self.end_word,\n 'sep_token': self.end_word,\n 'pad_token': self.pad_word,\n 'unk_token': self.unk_word,\n }\n\n self._special_ids = {\n 'bos_token_id': self.start_word_idx,\n 'cls_token_id': self.start_word_idx,\n 'eos_token_id': self.end_word_idx,\n 'sep_token_id': self.end_word_idx,\n 'pad_token_id': self.pad_word_idx,\n 'unk_token_id': self.unk_word_idx,\n }\n\n self.cls_token_id = self.bos_token_id = self.start_word_idx\n self.eos_token_id = self.sep_token_id = self.end_word_idx\n self.pad_token_id = self.pad_word_idx\n self.unk_token_id = self.unk_word_idx\n\n self.cls_token = self.bos_token = self.start_word\n self.eos_token = self.sep_token = self.end_word\n self.pad_token = self.pad_word\n self.unk_token = self.unk_word", "def initialize_grid(self):\n self.grid = np.zeros([self.N, self.N, self.N])\n return self.grid", "def setup_StandardGCMCSphereSampler():\n # Make variables global so that they can be used\n global std_gcmc_sphere_sampler\n global std_gcmc_sphere_simulation\n\n pdb = PDBFile(utils.get_data_file(os.path.join('tests', 'bpti-ghosts.pdb')))\n ff = ForceField('amber14-all.xml', 'amber14/tip3p.xml')\n system = ff.createSystem(pdb.topology, nonbondedMethod=PME, nonbondedCutoff=12 * angstroms,\n constraints=HBonds)\n\n ref_atoms = [{'name': 'CA', 'resname': 'TYR', 'resid': '10'},\n {'name': 'CA', 'resname': 'ASN', 'resid': '43'}]\n\n std_gcmc_sphere_sampler = samplers.StandardGCMCSphereSampler(system=system, topology=pdb.topology,\n temperature=300*kelvin, referenceAtoms=ref_atoms,\n sphereRadius=4*angstroms,\n ghostFile=os.path.join(outdir, 'bpti-ghost-wats.txt'),\n log=os.path.join(outdir, 'stdgcmcspheresampler.log'))\n\n # Define a simulation\n integrator = NonequilibriumLangevinIntegrator(temperature=300*kelvin, collision_rate=1./picosecond, timestep=2.*femtoseconds)\n\n try:\n platform = Platform.getPlatformByName('CUDA')\n except:\n try:\n platform = Platform.getPlatformByName('OpenCL')\n except:\n platform = Platform.getPlatformByName('CPU')\n\n std_gcmc_sphere_simulation = Simulation(pdb.topology, system, integrator, platform)\n std_gcmc_sphere_simulation.context.setPositions(pdb.positions)\n std_gcmc_sphere_simulation.context.setVelocitiesToTemperature(300*kelvin)\n std_gcmc_sphere_simulation.context.setPeriodicBoxVectors(*pdb.topology.getPeriodicBoxVectors())\n\n # Set up the sampler\n std_gcmc_sphere_sampler.initialise(std_gcmc_sphere_simulation.context, [3054, 3055, 3056, 3057, 3058])\n\n return None", "def create_sparseDB():\n datas = data.Kmercount_to_matrix()\n datas.run()\n print('***Sparse matrix created***')", "def initialize(self):\n self.U = range(self.K)\n self.H = np.identity(self.rank)\n temp = 0\n self.S = np.zeros([self.rank, self.rank, self.K])\n for k in range(self.K):\n self.S[:, :, k] = np.identity(self.rank)\n temp += self.X[k].T.dot(self.X[k])\n [eigval, eigvec] = np.linalg.eig(temp)\n self.V = eigvec[:, range(self.rank)]", "def create_index():", "def __init__(self, initial, goal=None):\n \n #fill the grid with random numbers\n from random import randint as IA\n \n #in switches we must keep track of what numbers are here at the start\n #to prevent switching them.\n initialNumber = [[1 for x in range(size)] for y in range(size)]\n \n for i in range(size):\n for j in range(size):\n if(initial[i][j] == 0):\n x = IA(1,9)\n initialNumber[i][j] = 0\n while(not isLegalInBox(initial,i,j,x)):\n x = IA(1,9)\n initial[i][j] = x\n \n self.initialNumber = initialNumber\n self.initial = initial", "def setUp(self):\n self.cube = _create_2d_cube()", "def _train(self):\n return np.zeros(1, 10)", "def __init__(self, vocabulary_size=1000):\n self.vocabulary_size = vocabulary_size", "def setUp(self):\n problem = setup_house_L(size=(40, 40))\n\n env = MetroLayoutEnv()\n\n costfn = objectives.ConstraintsHeur(problem,\n wmap={'AspectConstraint':0.1,\n 'AreaConstraint': 2\n },\n default=1.)\n\n model = algo.MetropolisHastings(env, costfn)\n\n self.exp = SimpleMH(\n env,\n problem,\n model=model,\n cost_fn=costfn,\n num_iter=1000,\n initializer=PointsInBound(problem, env, size=3, seed=69)\n )", "def init(self):\n\t\tfrom splat_to_db import splat_to_db\n\t\tfrom visualize.clustering_test import clustering_test\n\t\tfrom codense.codense2db import codense2db\n\t\tself.splat_to_db_instance = splat_to_db()\n\t\tself.clustering_test_instance = clustering_test()\n\t\tself.codense2db_instance = codense2db()\n\t\t\n\t\tif not os.path.isdir(self.dir_files):\n\t\t\tos.makedirs(self.dir_files)\n\t\telse:\n\t\t\tsys.stderr.write(\"Warning, directory %s already exists.\\n\"%(self.dir_files))\n\t\tself.tmpinfname = os.path.join(self.dir_files, 'input')\n\t\tself.tmpoutfname = os.path.join(self.dir_files, 'output')\n\t\t\n\t\tself.crack_dict = {1: crack_by_modes(self.debug),\n\t\t\t2:crack_by_splat(self.debug)}\n\t\tself.argument1_dict = {1: self.clustering_test_instance,\n\t\t\t2: self.splat_to_db_instance}\n\t\t\n\t\t#two descending tables\n\t\tself.splat_table = '%ss'%self.table\n\t\tself.mcl_table = self.splat_table.replace('splat','mcl')\n\t\tif self.mcl_table == self.splat_table:\n\t\t\tsys.stderr.write(\"Error: new splat and mcl tables have the same name, %s\\n\"%self.splat_table)\n\t\t\tsys.exit(2)", "def _init_dataset(self):\n chars = set()\n with open(self.file_path + \"/words.txt\", 'r') as input_file:\n for line in input_file:\n line_split = line.strip().split('\\t')\n file_name = self.file_path+\"/words/\"+line_split[1]\n gt_text = line_split[0]\n chars = chars.union(set(list(gt_text)))\n self.samples.append((file_name, gt_text))\n input_file.close()\n\n self.char_set = sorted(list(chars))", "def initialize(cls):\n return cls( *([0.]*cls._parsize) )", "def __init__(self):\n self.words = None\n self.letters = None\n self.a = None\n self.nwords = None\n self.nletters = None", "def set_up_threshold_cube():\n test_data = 50*np.arange(16).reshape(4, 4)\n grid_x = DimCoord(np.arange(4), standard_name=\"projection_x_coordinate\",\n units=\"km\")\n grid_y = DimCoord(np.arange(4), standard_name=\"projection_y_coordinate\",\n units=\"km\")\n test_cube = iris.cube.Cube(test_data, long_name=\"surface_altitude\",\n units=\"m\",\n dim_coords_and_dims=[(grid_y, 0), (grid_x, 1)])\n return test_cube", "def setup_spatial():\n arena_size = \"default\"\n\n output_dict = {\n \"arena_size\": arena_size,\n }\n return output_dict", "def __init__(self, neighbourhood, algorithm, iterations, set_up):\n self.input = neighbourhood\n self.algorithm = algorithm\n self.set_up = set_up\n self.iterations = int(iterations)\n self.configs = self.get_configs()\n self.houses = self.load_houses()\n self.big_iterations = -1\n self.small_iterations = 0\n self.caps = []\n self.batteries = {}\n self.lowest = 99999\n self.index = 0\n self.run_algorithm()", "def __init__(self):\n self.grid = {}\n for i in range(21):\n self.grid[i] = [' ']*21\n self._len_x = len(self.grid[0])\n self._len_y = len(self.grid)\n self.forbidden_tiles = []\n self.allowed_tiles = []\n self.exit = None\n self.entrance = None", "def initial_samples(lb, ub, method, numSamp):\r\n if not len(lb) == len(ub):\r\n raise AssertionError('Lower and upper bounds have different #s of design variables in initial_samples function.')\r\n assert method == 'random' or method == 'nolh' or method == 'nolh-rp' or method == 'nolh-cdr' or method == 'lhc' or method == 'rand-wor', 'An invalid method was specified for the initial_samples.'\r\n assert (method == 'nolh' or method == 'nolh-rp' or method == 'nolh-cdr') and len(ub) >= 2 and len(ub) <= 29, 'The Phase space dimensions are outside of the bounds for initial_samples.'\r\n for case in Switch(method):\r\n if case('random'):\r\n s = np.zeros((numSamp, len(lb)))\r\n for i in range(0, numSamp, 1):\r\n s[i, :] = lb + (ub - lb) * rand(len(lb))\r\n\r\n break\r\n if case('rand-wor'):\r\n s = np.zeros((numSamp, len(lb)))\r\n for i in range(0, numSamp, 1):\r\n s[i, :] = choice(len(ub), size=len(ub), replace=False)\r\n\r\n break\r\n if case('nolh'):\r\n dim = len(ub)\r\n m, q, r = params(dim)\r\n conf = range(q)\r\n if r != 0:\r\n remove = range(dim - r, dim)\r\n nolh = NOLH(conf, remove)\r\n else:\r\n nolh = NOLH(conf)\r\n s = np.array([ list(lb + (ub - lb) * nolh[i, :]) for i in range(len(nolh[:, 0]))\r\n ])\r\n break\r\n if case('nolh-rp'):\r\n dim = len(ub)\r\n m, q, r = params(dim)\r\n conf = random.sample(range(q), q)\r\n if r != 0:\r\n remove = random.sample(range(q - 1), r)\r\n nolh = NOLH(conf, remove)\r\n else:\r\n nolh = NOLH(conf)\r\n s = np.array([ list(lb + (ub - lb) * nolh[i, :]) for i in range(len(nolh[:, 0]))\r\n ])\r\n break\r\n if case('nolh-cdr'):\r\n dim = len(ub)\r\n m, q, r = params(dim)\r\n conf, remove = get_cdr_permutations(len(ub))\r\n if remove != []:\r\n nolh = NOLH(conf, remove)\r\n else:\r\n nolh = NOLH(conf)\r\n s = np.array([ list(lb + (ub - lb) * nolh[i, :]) for i in range(len(nolh[:, 0]))\r\n ])\r\n break\r\n if case('lhc'):\r\n tmp = lhs(len(lb), samples=numSamp, criterion='center')\r\n s = np.array([ list(lb + (ub - lb) * tmp[i, :]) for i in range(len(tmp[:, 0]))\r\n ])\r\n break\r\n if case():\r\n print 'Somehow you evaded my assert statement - good job!',\r\n print ' However, you still need to use a valid method string.'\r\n\r\n return s", "def initialize(self):\n self.n_words = len(self.vocab)\n self.n_docs = len(self.documents)\n\n # Initialize the three count matrices.\n # The (i,j) entry of self.nmz is the number of words in document i assigned to topic j.\n self.nmz = np.zeros((self.n_docs, self.n_topics))\n # The (i,j) entry of self.nzw is the number of times term j is assigned to topic i.\n self.nzw = np.zeros((self.n_topics, self.n_words))\n # The (i)-th entry is the number of times topic i is assigned in the corpus.\n self.nz = np.zeros(self.n_topics)\n\n # Initialize the topic assignment dictionary.\n self.topics = {} # key-value pairs of form (m,i):z\n\n for m in range(self.n_docs):\n for i in self.documents[m]:\n # Get random topic assignment, i.e. z is a random integer in the range of topics\n z = np.random.randint(self.n_topics)\n # Increment count matrices\n self.nmz[m,z] += 1\n self.nzw[z,self.documents[m][i]] += 1\n self.nz[z] += 1\n # Store topic assignment\n self.topics[(m,i)] = z", "def initGrid( self, name, suffix, n, ni, nj, ifields=[],rfields=[], lsize=10):\n #print \"initGrid: initializing %s\"%(name)\n self.name = name\n self.suffix = suffix\n self.n = n\n self.ni = ni\n self.nj = nj\n self.ifields = ifields\n self.rfields = rfields\n #print \"ifields=%s\\nrfields=%s\\nlsize=%d\"%(temp_ifields, temp_rfields, lsize)\n self.lgrid = attributevector.AttributeVector( ifields, rfields, lsize )\n #print \"allocating a temp array...\"\n temp = Numeric.zeros( lsize, Numeric.Float64 )\n #temp = -9999.0\n #print \"Filling real fields with default values...\"\n for f in rfields:\n #print \"\\tFilling field\",f,\":\",\n self.lgrid.importRAttr( f, temp )\n #print \"... OK!\"\n print \"initGrid: Initialized Grid!\"\n # setup av complete\n return", "def initialize_beam(self, prefix):\n\n def _from_none():\n \"\"\" Initializes a beam from scratch. \"\"\"\n beam_prefix = np.array([[self.vocab.go_id]] * self.opt.batch_size, dtype=np.int32)\n return beam_prefix\n\n def _from_string(string_prefix):\n \"\"\" Initializes a beam from an existing string prefix. \"\"\"\n idx_prefix = index_sentence(string_prefix, self.vocab, self.opt)\n beam_prefix = np.concatenate(\n [np.array([[self.vocab.eos_id]] * self.opt.batch_size, dtype=np.int32), idx_prefix], 1)\n return beam_prefix\n\n beam = list()\n # Initialize beam entries; the number is determined by the specified beam width\n # Beam entries are tuples of the form (generated sequence, sequence probability)\n for _ in range(self.opt.beam_width):\n if prefix is None:\n beam_init = _from_none()\n else:\n beam_init = _from_string(prefix)\n beam_tpl = (np.array([[1.0]] * self.opt.batch_size, dtype=np.float32), beam_init)\n beam.append(beam_tpl)\n\n return beam", "def __init__(self):\n self.bigramCounts = collections.defaultdict(lambda : 0)\n self.trigramCounts = collections.defaultdict(lambda : 0)\n self.unigramCounts = collections.defaultdict(lambda : 1)\n self.continuationCounts = collections.defaultdict(lambda: 0)\n self.followingCounts = collections.defaultdict(lambda: 0)\n self.total = 1\n self.totalBigramCounts = 0\n print \"Training Language Model...\"\n self.train(brown.sents())\n print \"--Training Complete--\"", "def GenerateInitialSolution():\n c = random.random()*C\n count = 0\n while np.count_nonzero(alpha) < gamma:\n rand = random.randint(0, len(x_train)-1)\n if y_train[rand] == 1:\n alpha[rand] = c\n L[rand, 1] = c\n # L[count, 0] = rand\n # L[count, 1] = alpha[rand]\n SVs[count] = rand\n count += 1\n while np.count_nonzero(alpha) < 2*gamma:\n rand = random.randint(0, len(x_train)-1)\n if y_train[rand] == -1:\n alpha[rand] = c\n L[rand, 1] = c\n # L[count, 0] = rand\n # L[count, 1] = alpha[rand]\n SVs[count] = rand\n count += 1\n return alpha", "def __init__(self, quad_corpus, translations, train_ratio=0.9):\n a = list(quad_corpus)\n random.seed(1337)\n random.shuffle(a)\n t = int(train_ratio * len(a))\n self.a = a[:t]\n self.b = a[t:]\n self.translations = translations\n self.i = 0", "def setup_StandardGCMCSystemSampler():\n # Make variables global so that they can be used\n global std_gcmc_system_sampler\n global std_gcmc_system_simulation\n\n pdb = PDBFile(utils.get_data_file(os.path.join('tests', 'water-ghosts.pdb')))\n ff = ForceField('tip3p.xml')\n system = ff.createSystem(pdb.topology, nonbondedMethod=PME, nonbondedCutoff=12 * angstroms,\n constraints=HBonds)\n\n std_gcmc_system_sampler = samplers.StandardGCMCSystemSampler(system=system, topology=pdb.topology,\n temperature=300*kelvin,\n boxVectors=np.array(pdb.topology.getPeriodicBoxVectors()),\n ghostFile=os.path.join(outdir, 'water-ghost-wats.txt'),\n log=os.path.join(outdir, 'stdgcmcsystemsampler.log'))\n\n # Define a simulation\n integrator = NonequilibriumLangevinIntegrator(temperature=300*kelvin, collision_rate=1./picosecond, timestep=2.*femtoseconds)\n\n try:\n platform = Platform.getPlatformByName('CUDA')\n except:\n try:\n platform = Platform.getPlatformByName('OpenCL')\n except:\n platform = Platform.getPlatformByName('CPU')\n\n std_gcmc_system_simulation = Simulation(pdb.topology, system, integrator, platform)\n std_gcmc_system_simulation.context.setPositions(pdb.positions)\n std_gcmc_system_simulation.context.setVelocitiesToTemperature(300*kelvin)\n std_gcmc_system_simulation.context.setPeriodicBoxVectors(*pdb.topology.getPeriodicBoxVectors())\n\n # Set up the sampler\n std_gcmc_system_sampler.initialise(std_gcmc_system_simulation.context, [2094, 2095, 2096, 2097, 2098])\n\n return None", "def __init__(self, Q, initSol, tenure, scaleFactor, timeout):\n this = _tabu_search.new_TabuSearch(Q, initSol, tenure, scaleFactor, timeout)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def init():\n # analyzer es utilizado para interactuar con el modelo\n analyzer = model.newAnalyzer()\n return analyzer", "def _suggest_samples(dataset: Dataset, settings: ZoomOptSettings) -> np.ndarray:\n\n if settings.batch < 1:\n raise ValueError(f\"Use batch size at least 1. (Was {settings.batch}).\") # pragma: no cover\n\n continuous_dict, categorical_dict = dataset.parameter_space\n\n # If any categorical variable is present, we raise an exception. In theory they should be represented by one-hot\n # encodings, but I'm not sure how to retrieve the bounds of this space and do optimization within it (the\n # best way is probably to optimize it in an unconstrained space and map it to one-hot vectors using softmax).\n # Moreover, in BayesOpt there is iteration over contexts.\n if categorical_dict:\n raise NotImplementedError(\"This method doesn't work with categorical inputs right now.\") # pragma: no cover\n\n # It seems that continuous_dict.values() contains pandas series instead of tuples, so we need to map over it\n # to retrieve the parameter space\n original_space: Hypercuboid = [(a, b) for a, b in continuous_dict.values()]\n\n # Find the location of the optimum. We will shrink the space around it\n optimum: np.ndarray = _get_optimum_location(dataset)\n\n # Estimate how many optimization iterations were performed.\n step_number: int = settings.n_step or _estimate_step_number(\n n_points=len(dataset.output_array), batch_size=settings.batch\n )\n\n # Convert to per-batch shrinking factor if a per-iteration shrinking factor supplied\n per_batch_shrinking_factor = (\n settings.shrinking_factor ** settings.batch if settings.shrink_per_iter else settings.shrinking_factor\n )\n\n # Calculate by what factor each dimension of the hypercube should be shrunk\n shrinking_factor_per_dim: float = _calculate_shrinking_factor(\n initial_shrinking_factor=per_batch_shrinking_factor, step_number=step_number, n_dim=len(original_space)\n )\n\n # Shrink the space\n new_space: Hypercuboid = [\n shrink_interval(\n shrinking_factor=shrinking_factor_per_dim, interval=interval, shrinking_anchor=optimum_coordinate\n )\n for interval, optimum_coordinate in zip(original_space, optimum)\n ]\n\n # The shrunk space may be out of the original bounds (e.g. if the maximum was close to the boundary).\n # Translate it.\n new_space = _move_to_original_bounds(new_space=new_space, original_space=original_space)\n\n # Sample the new space to get a batch of new suggestions.\n parameter_space = ParameterSpace([ContinuousParameter(f\"x{i}\", low, upp) for i, (low, upp) in enumerate(new_space)])\n\n return designs.suggest_samples(\n parameter_space=parameter_space, design_type=settings.design, point_count=settings.batch\n )", "def create_samples(self):\n self._samples = self.load_samples()\n self.modify_samples()", "def __init__(self, settings,study):\n \n # Store the study #\n ###################\n \n self._study = study\n self._parameters_size = self._study.geometry.parameters_size\n \n # Read settings #\n ################# \n if hasattr(settings, 'global_sample_function'):\n # Use given function and ignore bounds\n self._global_sample_function = settings.global_sample_function\n self._global_parameters_bounds = None\n else:\n # If no function, use uniform rand with given boundaries if provided. If not, assume [0,1]\n if hasattr(settings, 'global_parameters_bounds'):\n self._global_parameters_bounds = np.array(settings.global_parameters_bounds)\n else:\n self._global_parameters_bounds = [(0, 1)]*self._parameters_size\n \n self._global_sample_function = lambda: self._global_parameters_bounds[:,0] + (self._global_parameters_bounds[:,1]-self._global_parameters_bounds[:,0])*np.random.rand(1,self._parameters_size).flatten()\n \n\n if hasattr(settings, 'global_result_constraint'):\n self._global_result_constraint = settings.global_result_constraint\n else:\n self._global_result_constraint = None \n \n if hasattr(settings, 'local_result_constraint'):\n self._local_result_constraint = settings.local_result_constraint\n else:\n self._local_result_constraint = None\n \n if hasattr(settings, 'local_max_iterations'):\n self._local_max_iterations = settings.local_max_iterations\n else:\n self._local_max_iterations = 50\n \n if hasattr(settings, 'local_method'):\n self._local_method = settings.local_method\n else:\n self._local_method = 'L-BFGS-B'\n \n if hasattr(settings, 'local_scaling_factor'):\n self._local_scaling_factor = settings.local_scaling_factor\n else:\n self._local_scaling_factor = 1\n \n if hasattr(settings, 'local_ftol'):\n self._local_ftol = settings.local_ftol\n else:\n self._local_ftol = 1e-5\n \n if hasattr(settings, 'local_pgtol'):\n self._local_pgtol = settings.local_pgtol\n else:\n self._local_pgtol = 1e-5\n \n # Wavelength settings for lumopt \n if hasattr(settings, 'local_wavelength_start'):\n self._local_wavelength_start = settings.local_wavelength_start\n else:\n self._local_wavelength_start = 1550e-9\n \n if hasattr(settings, 'local_wavelength_stop'):\n self._local_wavelength_stop = settings.local_wavelength_stop\n else:\n self._local_wavelength_stop = 1550e-9\n \n if hasattr(settings, 'local_wavelength_points'):\n self._local_wavelength_points = settings.local_wavelength_points\n else:\n self._local_wavelength_points = 1\n \n # Keep track of the latest random restart. Run a first simulation with\n # the initial parameters already stored in the geometry\n self._new_param = None", "def initialize(self, corpus):\n if self.id2word is None:\n logger.info(\"no word id mapping provided; initializing from corpus, assuming identity\")\n self.id2word = utils.dict_from_corpus(corpus)\n self.num_terms = len(self.id2word)\n elif self.id2word:\n self.num_terms = 1 + max(self.id2word)\n else:\n self.num_terms = 0\n\n shape = self.num_topics, self.num_terms\n logger.info(\"constructing %s random matrix\", str(shape))\n # Now construct the projection matrix itself.\n # Here i use a particular form, derived in \"Achlioptas: Database-friendly random projection\",\n # and his (1) scenario of Theorem 1.1 in particular (all entries are +1/-1).\n randmat = 1 - 2 * np.random.binomial(1, 0.5, shape) # convert from 0/1 to +1/-1\n # convert from int32 to floats, for faster multiplications\n self.projection = np.asfortranarray(randmat, dtype=np.float32)\n # TODO: check whether the Fortran-order shenanigans still make sense. In the original\n # code (~2010), this made a BIG difference for np BLAS implementations; perhaps now the wrappers\n # are smarter and this is no longer needed?", "def create_individual(self):\n self.genes = np.random.rand(self.chromosome_size)\n self.personal_best = self.genes.copy", "def __init__(self, name, grid):\n self.name = name\n self.space_dimensions = grid.dimensions\n self.step_dimension = grid.stepping_dim\n self.dtype = grid.dtype\n\n # All known solutions and grids in this context\n self.solutions = []\n self.grids = {}", "def __init__(self, corpus, n_grams, min_length):\n self.grams = {}\n self.n_grams = n_grams\n self.corpus = corpus\n self.min_length = min_length\n self.sequences()", "def __init__(self, min_freq = 20):\n\n # A dict that maps a word to an index.\n self.word_to_idx = {}\n\n # A dict that maps an index to it's word.\n self.idx_to_word = {}\n\n # Store the min_freq.\n self.min_freq = min_freq", "def __init__(self, n, sents, corpus='', gamma=None, addone=True):\n self.n = n\n self.smoothingtechnique = 'Interpolated (Jelinek Mercer) Smoothing'\n self.gamma = gamma\n self.addone = addone\n self.counts = counts = defaultdict(int)\n self.gamma_flag = True\n self.corpus = corpus\n # way more efficient than use set unions\n voc = ['</s>']\n for s in sents:\n voc += s\n self.voc = list(set(voc))\n\n if gamma is None:\n self.gamma_flag = False\n\n # if not gamma given\n if not self.gamma_flag:\n total_sents = len(sents)\n aux = int(total_sents * 90 / 100)\n # 90 per cent for training\n train_sents = sents[:aux]\n # 10 per cent for perplexity (held out data)\n held_out_sents = sents[-total_sents+aux:]\n\n train_sents = list(map((lambda x: ['<s>']*(n-1) + x), train_sents))\n train_sents = list(map((lambda x: x + ['</s>']), train_sents))\n\n for sent in train_sents:\n for j in range(n+1):\n # move along the sent saving all its j-grams\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # added by hand\n counts[('</s>',)] = len(train_sents)\n # variable only for tests\n self.tocounts = counts\n # search the gamma that gives lower perplexity\n gamma_candidates = [i*50 for i in range(1, 15)]\n # xs is a list with (gamma, perplexity)\n xs = []\n sents = train_sents\n for aux_gamma in gamma_candidates:\n self.gamma = aux_gamma\n aux_perx = self.perplexity(held_out_sents)\n xs.append((aux_gamma, aux_perx))\n xs.sort(key=lambda x: x[1])\n self.gamma = xs[0][0]\n with open('old-stuff/interpolated_' + str(n) + '_parameters_'+corpus, 'a') as f:\n f.write('Order: {}\\n'.format(self.n))\n f.write('Gamma: {}\\n'.format(self.gamma))\n f.write('AddOne: {}\\n'.format(self.addone))\n f.write('Perplexity observed: {}\\n'.format(xs[0][1]))\n f.write('-------------------------------\\n')\n f.close()\n\n else:\n sents = list(map((lambda x: ['<s>']*(n-1) + x), sents))\n sents = list(map((lambda x: x + ['</s>']), sents))\n\n for sent in sents:\n # counts now holds all k-grams for 0 < k < n + 1\n for j in range(n+1):\n # move along the sent saving all its j-grams\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # added by hand\n counts[('</s>',)] = len(sents)", "def _sinit(cls):\n SGR = oq.plugin.get('SciGraph')\n IXR = oq.plugin.get('InterLex')\n #sgr.verbose = True\n for rc in (SGR, IXR):\n rc.known_inverses += (\n ('hasPart:', 'partOf:'),\n ('NIFRID:has_proper_part', 'NIFRID:proper_part_of'))\n\n sgr = SGR(apiEndpoint=auth.get('scigraph-api'))\n ixr = IXR(readonly=True)\n ixr.Graph = OntGraph\n cls.query_init(sgr, ixr) # = oq.OntQuery(sgr, ixr, instrumented=OntTerm)\n [cls.repr_level(verbose=False) for _ in range(2)]", "def _localGenerateAssembler(self,initDict):\n Grid._localGenerateAssembler(self, initDict)\n self.jobHandler = initDict['internal']['jobHandler']\n self.dists = self.transformDistDict()\n # Do a distributions check for ND\n # This sampler only accept ND distributions with variable transformation defined in this sampler\n for dist in self.dists.values():\n if isinstance(dist, Distributions.NDimensionalDistributions):\n self.raiseAnError(IOError, 'ND Dists contain the variables in the original input space are not supported for this sampler!')", "def setUp(self):\n\n self._hash_bins = 10\n self._embedding_dim = 2\n\n self._default_config = {\n \"hash_bins\": self._hash_bins,\n \"embedding_dim\": self._embedding_dim\n }", "def create_embedding(self):\n self.embedding = []\n\n for index in range(1,self.args.window_size+1):\n print(\"\\nOptimization round: \" +str(index)+\"/\"+str(self.args.window_size)+\".\")\n print(\"Creating documents.\")\n clean_documents = self.walk_extracts(index)\n print(\"Fitting model.\")\n model = Word2Vec(clean_documents,\n size = self.args.dimensions,\n window = 1,\n min_count = self.args.min_count,\n sg = 1,\n workers = self.args.workers)\n\n new_embedding = self.get_embedding(model)\n self.embedding = self.embedding +[new_embedding]\n self.embedding = np.concatenate(self.embedding, axis = 1)", "def main():\n grid = make_grid(3, 3) # change to 3x3\n dictionary = get_dictionary(\"words.txt\")\n words = search(grid, dictionary)\n display_words(words)", "def _make_test_cube(long_name):\n cs = GeogCS(EARTH_RADIUS)\n data = np.array([[1.0, 1.0, 1.0], [0.0, 0.0, 0.0], [1.0, 0.0, 1.0]])\n cube = Cube(data, long_name=long_name)\n x_coord = DimCoord(\n np.linspace(-45.0, 45.0, 3), \"latitude\", units=\"degrees\", coord_system=cs\n )\n y_coord = DimCoord(\n np.linspace(120, 180, 3), \"longitude\", units=\"degrees\", coord_system=cs\n )\n cube.add_dim_coord(x_coord, 0)\n cube.add_dim_coord(y_coord, 1)\n return cube", "def build_index(path, chunk_size):\n physical_files = set()\n boundaries = []\n examples_needed = 0\n for sgf in find_sgfs(path):\n physical_files.add(sgf.locator.physical_file)\n if examples_needed == 0:\n # The start of this SGF is a chunk boundary.\n boundaries.append(Pointer(sgf.locator, 0))\n examples_needed = chunk_size\n game_record = Sgf_game.from_string(sgf.contents)\n num_positions = len(_sequence(game_record))\n if examples_needed < num_positions:\n # The start of the next chunk is inside this SGF.\n boundaries.append(Pointer(sgf.locator, examples_needed))\n remaining_examples = num_positions - examples_needed\n examples_needed = chunk_size - remaining_examples\n else:\n # This SGF is entirely contained within the current chunk.\n examples_needed -= num_positions\n\n return CorpusIndex(physical_files, chunk_size, boundaries)", "def __initialise_smart(self, X, args):\n\t\tcentroids = np.zeros((self.K,self.D))\n\t\tif X.shape[0] > 10*self.K:\n\t\t\tdata = X[:10*self.K,:]\n\t\telse:\n\t\t\tdata = X\n\t\tN = data.shape[0]\n\n\t\t\t#choosing centroids\n\t\t\t#points are chosen from dataset with farhtest point clustering\n\t\tran_index = np.random.choice(N)\n\t\tcentroids[0,:] = data[ran_index]\n\n\t\tfor k in range(1,self.K):\n\t\t\tdistances = np.zeros((N,k)) #(N,K)\n\t\t\tfor k_prime in range(k):\n\t\t\t\tdistances[:,k_prime] = np.sum(np.square(data - centroids[k_prime,:]), axis =1) #(N,K')\n\t\t\tdistances = np.min(distances, axis = 1) #(N,)\n\t\t\tdistances /= np.sum(distances) #normalizing distances to make it a prob vector\n\t\t\tnext_cl_arg = np.random.choice(range(data.shape[0]), p = distances) #chosen argument for the next cluster center\n\t\t\tcentroids[k,:] = data[next_cl_arg,:]\n\n\t\tvar = np.var(X, axis = 0) #(D,)\n\n\t\t\t#computing initial responsibilities\n\t\tr_0 = np.zeros((X.shape[0],self.K))\n\t\tfor k in range(self.K):\n\t\t\tr_0[:,k] = np.sum(np.divide(np.square(X - centroids[k,:]), var), axis = 1) + 1e-5\n\t\tr_0 = np.divide(r_0.T, np.sum(r_0,axis=1)).T\n\n\t\tself.gating.fit(X,r_0, *args)\n\n\t\treturn r_0", "def createAllSG():\n\tfor info in conf_HVM:\n\t\tec2 = boto.ec2.connect_to_region(info['region']+'-'+info['zone'])\n\t\tcreateSG(ec2,'SG-Cassandra-'+info['region']+'-'+info['zone'],CASSANDRA_RULES)", "def construct_random_initial(self):\n x = np.random.random((self._crv_size, self._bound))\n return x", "def __init__(self, n, prey_cnt=0, predator_cnt=0):\n # print n, prey_cnt, predator_cnt\n self.grid_size = n\n self.grid = []\n for i in range(n):\n row = [0]*n # row is a list of n zeros\n self.grid.append(row)\n self.init_animals(prey_cnt, predator_cnt)", "def _init_empty_polyhedron(self):\n self._ambient_dim = 0\n\n self._Vrepresentation = Sequence([])\n self._Vrepresentation.set_immutable()\n \n self._Hrepresentation = Sequence([])\n Equation(self, [-1]);\n self._Hrepresentation.set_immutable()\n\n self._V_adjacency_matrix = matrix(ZZ, 0, 0, 0)\n self._V_adjacency_matrix.set_immutable()\n\n self._H_adjacency_matrix = matrix(ZZ, 1, 1, 0)\n self._H_adjacency_matrix.set_immutable()", "def _initialize_corpus(self):\n vocab = self.vocab # vocab is the word vector\n theta = self.theta # theta is the model parameter\n corpus = self.corpus\n\n for line in corpus:\n for word in line:\n if word not in vocab:\n vocab[word] = init_vector(self.n)\n theta[word] = init_vector(self.n)\n\n if self.verbose:\n print(f\"{len(vocab)} words have been loaded\")", "def init(self):\n self._es.create_index_template(\n name=DATASETS_INDEX_NAME,\n template=DATASETS_INDEX_TEMPLATE,\n force_recreate=True,\n )\n self._es.create_index(DATASETS_INDEX_NAME)", "def __init__(self, n):\n self._n = n\n self._grid = [[False] * n for _ in range(n)]\n # create sites for n-by-n grid and 2 \"virtual\" sites for top and bottom\n # self._uf = QuickFindUF(n * n + 2)\n self._uf = WeightedQuickUnionUF(n * n + 2) # QuickFindUF(n * n + 2)\n # connect top and bottom virtual sites with respecting sides of grid\n self._top_idx = n * n\n self._bottom_idx = n * n + 1\n for i in range(n):\n self._uf.union(self._top_idx, i)\n self._uf.union(self._bottom_idx, (n - 1) * n + i)", "def test_hunger_grid_create(self):\n self.grid = Hunger_Grid.hunger_grid()\n self.grid.newGrid = Hunger_Grid.hunger_grid().create_hunger_grid(M=30, N=30, P_LAVA = 1.0)\n self.assertTrue(self.grid.newGrid.size == 900, \"Grid size is incorrect\")\n self.assertTrue(self.grid.newGrid[2, 2] == 1, \"Lava chance is not acting correctly\")\n self.assertTrue(self.grid.newGrid[-3, -3] == 1, \"Lava chance is not acting correctly\")", "def initialize(self):\n # Initializing the counter and distribution.\n for k in range(0, self.topic_number,1):\n self.topic_term_count_matrix[k]= [0.0] * self.term_number\n self.topic_distribution_over_term[k] = [0.0] * self.term_number\n self.sum_topic_by_term_count[k] = 0.0\n for m in range(0, self.document_number,1):\n self.document_topic_count_matrix[m] = [0.0] * self.topic_number\n self.document_distribution_over_topic[m] = [0.0] * self.topic_number\n self.sum_document_by_topic_count[m] = 0.0\n\n # Initializing topics assigned to all words of all documents.\n for m in range(0, self.document_number, 1):\n N = len(self.documents[m])\n self.word_topic_assignment[m] = [-1] * N\n for n in range(0, N,1):\n topic = int(random.uniform(0,1) * self.topic_number)\n self.document_topic_count_matrix[m][topic] += 1.0\n self.topic_term_count_matrix[topic][self.documents[m][n]] += 1.0\n self.sum_topic_by_term_count[topic] += 1.0\n self.word_topic_assignment[m][n] = topic\n self.sum_document_by_topic_count[m] = N" ]
[ "0.6223337", "0.59628797", "0.58912903", "0.5874907", "0.5854502", "0.5754657", "0.5733271", "0.56222767", "0.5502466", "0.54998386", "0.54974604", "0.5460404", "0.5426823", "0.5370437", "0.53134114", "0.5280649", "0.52736545", "0.5264173", "0.52439547", "0.52239805", "0.52133995", "0.5201464", "0.5197781", "0.5195287", "0.5166887", "0.51579237", "0.5157122", "0.5151636", "0.5146957", "0.5110705", "0.51000965", "0.5098655", "0.509602", "0.5092681", "0.50834566", "0.50803155", "0.5068113", "0.5067704", "0.5064384", "0.50620085", "0.5060049", "0.5052792", "0.50522333", "0.5037757", "0.50341415", "0.5033885", "0.5029221", "0.502805", "0.5027424", "0.5025511", "0.5024078", "0.50147194", "0.50089437", "0.5004268", "0.5002548", "0.49922487", "0.49869925", "0.4986569", "0.49761832", "0.4975849", "0.49675873", "0.496703", "0.4966215", "0.49642715", "0.49641946", "0.49612823", "0.49590996", "0.49575084", "0.4950198", "0.4941925", "0.49405187", "0.49303788", "0.4929549", "0.49191633", "0.49177888", "0.49147114", "0.49141252", "0.491141", "0.49017224", "0.4901646", "0.48998806", "0.48997557", "0.48994607", "0.48976502", "0.48962414", "0.48951176", "0.48930106", "0.48927853", "0.4889207", "0.4889196", "0.488467", "0.4880462", "0.48791718", "0.4877827", "0.48759046", "0.48751605", "0.48726475", "0.48701224", "0.48685074", "0.48641506" ]
0.6189054
1
merges station letters into single list of station names
def stations(station_let): stat = ['']*np.size(station_let,0) for i in range(len(stat)): for j in range(4): if station_let[i][j] is not np.ma.masked: stat[i]+=station_let[i][j] return stat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def combine_state_names_and_abbreviations():\n lst=[]\n for k,v in us_state_abbrev.items():\n lst.append(v)\n lst = sorted(lst[:10])\n state = sorted(states)\n print(lst+state[-10:])\n return", "def combine_state_names_and_abbreviations():\n return sorted(us_state_abbrev.values())[:10] + sorted(states)[-10:]", "def get_station_names(self):\n station_names = []\n for wrapper in self.soup.find_all(\"div\", {\"class\": \"stop-wrapper\"}):\n station_name = ' '.join(wrapper.find(\"h3\").text.split(' ')[:-1])\n station_names.append(station_name)\n return np.array(station_names).T", "def stations():\n\n return station_list", "def getStationsName(self) :\n names = []\n for sts in self._stations :\n names.append(sts.getName())\n\n return names", "def stationabbreviation(station):\n stations = {'Utrecht': 'Ut',\n 'Amsterdam Centraal': 'asd'}\n if station in stations:\n return stations[station]", "def canon_station_name(s, line):\n s = s.strip()\n s = re.sub('^Heathrow$', 'Heathrow Terminals 1, 2, 3', s)\n s = re.sub('^Olympia$', 'Kensington (Olympia)', s)\n s = re.sub('^Warwick Ave$', 'Warwick Avenue', s)\n s = re.sub('^Camden$', 'Camden Town', s)\n s = re.sub('^Central$', 'Finchley Central', s) # They say \"Between Central and East Finchley\"\n s = re.sub('\\s*Platform \\d$', '', s)\n s = s + ' Station'\n s = s.replace('(Bakerloo)', 'Bakerloo').replace('Earls', 'Earl\\'s') \\\n .replace(' fast ', ' ') \\\n .replace('\\xe2\\x80\\x99', \"'\") \\\n .replace('St ', 'St. ') \\\n .replace('Elephant and Castle', 'Elephant &amp; Castle') \\\n .replace('Lambeth Station', 'Lambeth North Station') \\\n .replace('Chalfont Station', 'Chalfont &amp; Latimer Station') \\\n .replace('West Brompon', 'West Brompton') \\\n .replace('Picadilly Circus', 'Piccadilly Circus') \\\n .replace('High Barent', 'High Barnet') \\\n .replace('Bartnet', 'Barnet') \\\n .replace('Faringdon', 'Farringdon') \\\n .replace('Turnham Greens', 'Turnham Green') \\\n .replace('Ruilsip', 'Ruislip') \\\n .replace('Dagemham', 'Dagenham') \\\n .replace('Edgware Road (H &amp; C)', 'Edgware Road Circle') \\\n .replace('Hammersmith (Circle and H&amp;C)', 'Hammersmith') \\\n .replace('Shepherds Bush (Central Line)', \"Shepherd's Bush\") \\\n .replace('Terminals 123', 'Terminals 1, 2, 3').replace('Terminal 1,2,3', 'Terminals 1, 2, 3') \\\n .replace('Woodford Junction', 'Woodford') \\\n .replace(\"King's Cross Station\", \"King's Cross St. Pancras Station\") \\\n .replace(\"Kings Cross Station\", \"King's Cross St. Pancras Station\") \\\n .replace('Central Finchley', 'Finchley Central').replace('District and Picc', 'D &amp; P') \\\n .replace('South Fields', 'Southfields') \\\n .replace('Regents Park', \"Regent's Park\") \\\n .replace('Bromley-by-Bow', \"Bromley-By-Bow\") \\\n .replace('Brent Oak', 'Burnt Oak') \\\n .replace('St. Johns Wood', \"St. John's Wood\") \\\n .replace('Totteridge and Whetstone', 'Totteridge &amp; Whetstone') \\\n .replace('Newbury Park Loop', 'Newbury Park') \\\n .replace('Harrow-on-the-Hill', 'Harrow on the Hill')\n if s == 'Edgware Road Station' and line == 'B':\n s = 'Edgware Road Bakerloo Station'\n if s == 'Edgware Road Station' and line != 'B':\n s = 'Edgware Road Circle Station'\n return s", "def station_list() -> List[Dict]:\n return STATIONS", "def train_stations(self) -> List[str]:\n return sorted([train_info['HE'] for train_info in train_api.stations_info.values()])", "def parse_station_name (station_name):\n try:\n _,chinese_name,code,full_pinyin,short_pinyin = station_name.split('|')\n except ValueError:\n # print(station_name)\n _,chinese_name,code,full_pinyin,short_pinyin,_ = station_name.split('|')\n return {chinese_name:code,full_pinyin:code,short_pinyin:code}", "def stations():\n\n # Query all Stations\n station_results = session.query(Station.station).all()\n\n # Convert list of tuples into normal list\n all_station_names = list(np.ravel(station_results))\n\n return jsonify(all_station_names)", "def satname(rocketsatname):\n \n # split the rocket and satellite name at the bullet\n names = rocketsatname.split('•')\n \n # remove spaces around satellite name\n namefull = names[1].strip()\n \n # return the satellite's name\n return namefull", "def getSecondStrand(sequences):\n compDNA = []\n for dna in sequences:\n compDNAAux = dna.replace('A', 't')\n compDNAAux = compDNAAux.replace('T', 'a')\n compDNAAux = compDNAAux.replace('C', 'g')\n compDNAAux = compDNAAux.replace('G', 'c')\n compDNA.append(compDNAAux.upper())\n\n for i in range(0, len(compDNA)):\n compDNA[i] = compDNA[i][::-1]\n\n return compDNA", "def stations():\n # Return a JSON list of stations from the dataset\n session = Session(engine)\n stations = session.query(Station.name).all()\n\n # Convert list of tuples into normal list\n station_names = list(np.ravel(stations))\n\n return jsonify(station_names)", "def get_data_list_name(name):\n last = name[-1]\n if last in 'y':\n if last in 'a,e,i,o,u,y':\n name = name[0:-1] + 'ies'\n else:\n name += 's'\n elif last in 'ou':\n name += 'es'\n elif last == 'f':\n name = name[0:-1] + 'ves'\n elif name[-2:-1] == 'fe':\n name = name[0:-2] + 'ves'\n elif last in ['s', 'ss', 'x', 'sh', 'ch']:\n name += 'es'\n else:\n name += 's'\n return name", "def extract_names(register):\n names = []\n for i in range(len(register) - 1): # len() -> no of columns\n first_name = str(register.iloc[i][2]).capitalize()\n last_name = str(register.iloc[i][1]).upper()\n name = last_name + ' ' + first_name\n names.append(name)\n names = list(set(names))\n return names", "def station_name(f):\n return f.split('/')[1].split('_')[0]", "def extract_full_names(people):\n result = []\n \n for lst in names:\n x = ''\n for name in lst.values():\n x += ' ' + name \n x = x[1:] \n result.append(x)\n return result", "def miss_station(all_stations,stations):\n\tdiff = len(all_stations)-len(stations)\n k=0\n i=0\n miss_stations = ['']*diff\n a = all_stations[:]\n a.sort()\n s = stations[:]\n s.sort()\n while i < len(stations):\n while a[i] != s[i]:\n miss_stations[k]=a[i]\n del a[i]\n k+=1\n i+=1\n\treturn miss_stations", "def name_supply(stems=string.ascii_lowercase, drop_zero=True):\n k = 0\n while 1:\n for a in stems:\n yield a+str(k) if (k or not drop_zero) else a\n k = k+1", "def currentAntennaNames(carmaOnly=False) :\n a=s.getAntennaAssignments()\n namelist = []\n for i in a:\n cname = i.carmaAntennaName\n tname = i.typedAntennaName\n if (carmaOnly) :\n names = i.carmaAntennaName\n else :\n names = \"%s(%s)\" %(cname,tname)\n namelist.append(names)\n return namelist", "def stations():\n results = session.query(Station.name).all()\n station_names = list(np.ravel(results))\n\n return jsonify(station_names)", "def watershedlist():\n opts = watersheds_db()\n return [(opts[opt]['name'] + ' (' + opts[opt]['delineation'] + ')', opt) for opt in opts]", "def allstrings2(alphabet, length):\n c = []\n for i in range(length):\n c = [[x]+y for x in alphabet for y in c or [[]]]\n for value in c:\n \tfvalue = ''.join(value)\n \tprint fvalue\n return \"\"", "def formatTickers(self):\n availableTickers = []\n for pair in self.allPairs:\n if not pair.upper()[0:3] in availableTickers:\n availableTickers.append(pair.upper()[0:3])\n if not pair.upper()[3:] in availableTickers:\n availableTickers.append(pair.upper()[3:])\n return availableTickers", "def formatTickers(self):\n availableTickers = []\n for pair in self.allPairs:\n if not pair.upper()[0:3] in availableTickers:\n availableTickers.append(pair.upper()[0:3])\n if not pair.upper()[3:] in availableTickers:\n availableTickers.append(pair.upper()[3:])\n return availableTickers", "def create_station_list(self):\n sorted_station_list = sorted(self.station_dict, key=self.station_dict.get)\n\n return sorted_station_list", "def get_uniprot_names(uniprot_result):\n name_lines = [l for l in uniprot_result.split('\\n') if l.startswith('DE')]\n\n names = []\n\n for nm_line in name_lines:\n if 'Full=' in nm_line:\n names.append(nm_line.split('Full=')[-1][:-1])\n elif 'Short=' in nm_line:\n names.append(nm_line.split('Short=')[-1][:-1])\n\n return names", "def get_and_clean_student_list(students):\r\n\r\n students = split_by_comma_and_whitespace(students)\r\n students = [unicode(s.strip()) for s in students]\r\n students = [s for s in students if s != '']\r\n students_lc = [x.lower() for x in students]\r\n\r\n return students, students_lc", "def impute_station_name(tags):\n\n if not tags.get(\"StationName\"):\n try:\n tags[\"StationName\"] = tags[\"DeviceSerialNumber\"]\n except:\n try:\n tags[\"StationName\"] = tags[\"X-Ray Radiation Dose Report\"][\"Device Observer UID\"]\n except:\n tags[\"StationName\"] = \"Unknown\"\n logger = logging.getLogger(\"DcmSimplify\")\n logger.warning('No station name identified')\n\n return tags", "def make_label_names(name_lsit):\n\n hover_label_names = []\n for x in range(len(name_lsit)):\n temp1 = name_lsit[x]\n hover_label_names.append(temp1)\n\n return hover_label_names", "def deabbreviate(self, st):\n\t\tabbrs = {'gws': 'greater western sydney giants',\n\t\t\t\t 'gwsg': 'greater western sydney giants',\n\t\t\t\t 'afl': 'australian football league',\n\t\t\t\t 'nrc': 'national rugby championship',\n\t\t\t\t 'nrl': 'national rugby league',\n\t\t\t\t 'syd': 'sydney',\n\t\t\t\t 'mel': 'melbourne',\n\t\t\t\t 'melb': 'melbourne',\n\t\t\t\t 'bris': 'brisbane',\n\t\t\t\t 'brisb': 'brisbane',\n\t\t\t\t 'gc': 'gold coast',\n\t\t\t\t 'adel': 'adelaide',\n\t\t\t\t 'canb': 'canberra',\n\t\t\t\t 'mt': 'mount',\n\t\t\t\t 'utd': 'united',\n\t\t\t\t 'cty': 'city',\n\t\t\t\t 'football club': 'fc',\n\t\t\t\t 'snr': 'senior',\n\t\t\t\t 'jr': 'junion',\n\t\t\t\t 'nsw': 'new south wales' ,\n\t\t\t\t 'vic': 'victoria',\n\t\t\t\t 'tas' : 'tasmania',\n\t\t\t\t 'sa': 'south australia',\n\t\t\t\t 'wa': 'western australia',\n\t\t\t\t 'act': 'australian capital territory',\n\t\t\t\t 'nt': 'northern territory',\n\t\t\t\t 'qld': 'queensland',\n\t\t\t\t 'champs': 'championships', \n\t\t\t\t 'champ': 'championship', \n\t\t\t\t 'soc': 'society',\n\t\t\t\t 'ent': 'entertainment',\n\t\t\t\t 'intl': 'international', \n\t\t\t\t 'int': 'international', \n\t\t\t\t 'aust': 'australian'}\n\n\t\t# first replace full state names by abbreviations;\n\t\tfor ab in abbrs:\n\t\t\tst = re.sub(r'\\b' + ab + r'\\b', abbrs[ab], st)\n\n\t\treturn st", "def get_luns(pairs: list) -> list:\n return [pair.split() for pair in pairs]", "def typedAntennaNames() :\n a=s.getAntennaAssignments()\n namelist = []\n for i in a:\n namelist.append( i.typedAntennaName )\n return namelist", "def clean_plant_list(plant_list_in):\r\n\tfull_plants = [plant for plant in plants if '.' not in plant] #remove abbreviation\r\n\t#print(full_plants)\r\n\r\n\treturn list(set(full_plants)) # return unique names as list\r", "def state2stations(state):\r\n state = state[:2].upper()\r\n for row in rows():\r\n if row[5]==state:\r\n yield tuple(row)", "def get_aliases_string(trembl_list):\n aliases_list = []\n\n for row in trembl_list:\n psimi_trembl = \"trembl:\" + row[1]\n aliases_list.append(psimi_trembl)\n\n return \"|\".join(aliases_list)", "def __lettersToString(self, words):\r\n \r\n li = []\r\n \r\n for word in words:\r\n li.append(\"\".join(word))\r\n \r\n return li", "def build_next_stations(stations):\n\n station_0_bikes = stations[0]['bikesAvailable']\n station_1_bikes = stations[1]['bikesAvailable']\n\n return f\"On station {stations[0]['name']} is {station_0_bikes} \" \\\n f\"bike{'s' if station_0_bikes > 1 else ''} available and on station\" \\\n f\"{stations[1]['name']} is {station_1_bikes} \" \\\n f\"bike{'s' if station_1_bikes > 1 else ''} available. Goodbye and happy cycling!\"", "def FormatName(X):\n full = [v.split(\"OS\")[0].strip() for v in X.iloc[:, 0]]\n gene = [v.split(\"GN=\")[1].split(\" PE\")[0].strip() for v in X.iloc[:, 0]]\n return full, gene", "def merge_speeches(speeches_list):\n\n # Create a new string variable\n speeches_string = ''\n\n # Iterate over speeches in the given list\n for speech in speeches_list:\n # Append the speech and add a single space at the end\n speeches_string += speech + ' '\n\n return speeches_string", "def make_market_street(start):\r\n return [start]", "def _list_of_availability_strings():\n names = [availability.name for availability in Availability]\n return names", "def get_words_from_sysets(synset):\n synlist = []\n for s in synset:\n syns = s.lemmas()[0].name()\n synlist.append(syns)\n return synlist", "def nameList(self):\r\n return [self.name.lower(), self.code] + self._otherNames", "def __stringToLetters(self, words):\r\n li = []\r\n \r\n for word in words:\r\n li.append(list(word))\r\n \r\n return li", "def tapeToList(tape):\n tapelist = []\n if tape[0] != 'b' and tape[1] != 'b':\n tape = 'bbbb' + tape\n for i in range(5):\n tape = tape + 'b'\n for c in tape:\n tapelist.append(c)\n return tapelist", "def squeeze(word):\n return ''.join(x[0] for x in groupby(word))", "def stations():\n # Query all station names from dataset\n station_list = session.query(Measurement.station).distinct().all()\n all_stations = list(np.ravel(station_list))\n\n return jsonify(all_stations)", "def _toStr(toList):\n\n names = [formataddr(i) for i in zip(*toList)]\n return ', '.join(names)", "def monomer_names(self):\n output = set()\n for item in self.monomers():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def allstrings2(alphabet, length):\n\n c = []\n for i in range(length):\n c = [[x]+y for x in alphabet for y in c or [[]]]\n\n return c", "def hgvs2single(s):\n _validate_str(s)\n t = re_protein.findall(s)\n return [\"{}{}{}\".format(AA_CODES[m[1]], m[2], AA_CODES[m[3]]) for m in t]", "def list_stations(pdbfile):\n try:\n pdb = WriteableParmDB(pdbfile)\n return sorted(set(name.split(\":\")[-1] for name in pdb.getNames()))\n finally:\n pdb = None", "def keep_lowercase(str_list):", "def get_vos(mappings):\n regex = re.compile(\"^/(\\w+)/\")\n patterns = (m.pattern for m in mappings)\n matches = filter(None, (regex.match(p) for p in patterns))\n vo_groups = set(m.group(1).lower() for m in matches)\n\n return vo_groups", "def build_list(city):\n\tcity_text = get_city_text(city)\n\tcity_list = []\n\tcity_text = city_text.split( )\n\tbad_chars = '(){}[]\".,1234567890 '\n\n\tfor word in city_text:\n\t\tword = word.strip(bad_chars)\n\t\tif word not in city_list:\n\t\t\tcity_list.append(word)\n\treturn city_list", "def get_station_name(self, station=0):\n return self.statuslist()[station][1]", "def getStationName(analyzer,stationId):\n name = model.getStationName(analyzer,stationId)\n return name", "def player_names(players):\r\n string = ''\r\n for p in players:\r\n string = string + p.name + ', '\r\n return string", "def neuronyms(input_str, k):\n n = len(input_str)\n result = []\n\n for length in range(k, n-k+1):\n for start in range (1, n - length):\n prefix = input_str[:start]\n suffix = input_str[(start+length):]\n res_str = prefix+str(length)+suffix\n result.append(res_str)\n\n return result", "def update_city_name(name):\r\n if ', WA' or ',WA' in name:\r\n name = name.rstrip (', WA')\r\n return string.capwords(name)", "def eng_with_sub(self, eng: list, subword: list) -> list:\n subwords = subword + eng[0]\n while [] in subwords:\n subwords.remove([])\n out = \" \".join('%s' % id for id in subwords).split()\n return out", "def generate(seats: List[str]) -> List[str]:\n m = len(seats)\n n = len(seats[0]) if m else 0\n\n regen = [[\".\" for _ in range(n)] for _ in range(m)]\n\n for i in range(m):\n for j in range(n):\n if seats[i][j] == \"L\":\n regen[i][j] = \"#\" if check_empty(seats, i, j) else \"L\"\n if seats[i][j] == \"#\":\n regen[i][j] = \"L\" if check_occ(seats, i, j) else \"#\"\n \n for i in range(m):\n regen[i] = \"\".join(regen[i])\n\n return regen", "def convert_angstroms_nm(d_angstroms):\n return d_angstroms/nm_angstroms", "def fix_name_wga(artist):\n comma = artist.find(\",\")\n\n return \" \".join([artist[comma + 1:].strip(), artist[:comma].strip()]) if comma != -1 else artist", "def acgt_to_string(s: list[list[str]]) -> list[list[str]]:\r\n s_out = [[\"\"] for i in range(len(s))]\r\n for i in range(len(s) - 1):\r\n h = \"\"\r\n for j in range(len(s[i])):\r\n if s[i][j] == 0:\r\n h += \"00\"\r\n if s[i][j] == 1:\r\n h += \"01\"\r\n if s[i][j] == 2:\r\n h += \"10\"\r\n if s[i][j] == 3:\r\n h += \"11\"\r\n s_out[i][0] = h\r\n return s_out", "def parse_streetname(self):\n index = self.index\n \n name = \"\"\n for i in range(4):\n if index + i == self.length:\n break\n if self.words[index+i]['word'] == ',':\n break\n # Hack\n if self.words[index+i]['word'] == 'doctor':\n self.words[index+i]['word'] = 'drive'\n break\n try:\n word = sttype[self.words[index+i]['word']]\n break\n except:\n try:\n word = vocab[self.words[index+i]['word']]\n if Vocabulary.STREET_TYPE in word['tag']:\n break\n if name != '':\n name += ' ' + word['lemma'][0]\n else:\n name = word['lemma'][0]\n except: \n if self.words[index+i]['word'][-2:] in [ 'th', 'st', 'nd', 'rd' ]:\n name = self.words[index+i]['word'][:-2]\n else:\n self.index += i\n _dir, _n = self.parse_streetdir()\n self.index -= i\n if _dir:\n break\n if name != '':\n name += ' ' + self.words[index+i]['word']\n else:\n name = self.words[index+i]['word']\n \n if i == 0 or i == 4:\n return None, 0\n else:\n return name, i", "def get_alternate_names(self, alt_list):\n self.alternates = [a.name for a in alt_list if a.raga == self.name]", "def get_raga_swaras(self, mapping):\n self.aro_swaras = [mapping[s][0] for s in self.aro]\n self.ava_swaras = [mapping[s][0] for s in self.ava]\n aro = [\"s\\'\" if sw == 's.' else sw for sw in self.aro]\n ava = [\"s\\'\" if sw == 's.' else sw for sw in self.ava]\n self.aro_seq = '|'.join([s.upper().strip() for s in aro])\n self.ava_seq = '|'.join([s.upper().strip() for s in ava])", "def merge(decks):\n\ta = (mergesort(decks))\n\treturn ''.join([item[1] for item in a]) #if type(item[1]) == str])", "def song_by_word(ans):\r\n songs_list = \"\"\r\n ans = ans.lower()\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n song = str(song)\r\n if ans in song.lower():\r\n songs_list += song + \", \"\r\n return songs_list[:-2]", "def seperate_Loc_Data(data, us_state_abbrev):\n assert data is not None\n dictionary = dict(data)\n keys = dictionary.keys()\n tmp = list(keys)\n values = dictionary.values()\n res = []\n for elem in keys:\n state = elem[1].strip()\n if state in us_state_abbrev:\n res.append(us_state_abbrev[state])\n return res, list(values)", "def get_whole_nato_alphabet_string(mappings):\n def tuple_to_string(letter_word_pair):\n \"\"\"Convert a tuple to a mapping string.\"\"\"\n letter, word = letter_word_pair\n return '{letter}: {word}'.format(letter=letter, word=word)\n\n items = mappings.items()\n sorted_items = sorted(mappings.items())\n return '\\n'.join(map(tuple_to_string, sorted_items))", "def get_station_features(cls, station_row):\n features = station_row[2].lower(), station_row[7], station_row[8]\n return features", "def seperate_City_Data(data, us_state_abbrev):\n assert data is not None\n dictionary = dict(data)\n keys = dictionary.keys()\n tmp = list(keys)\n values = dictionary.values()\n res = []\n for elem in keys:\n state = elem[1].strip()\n city = elem[0].strip()\n# print(city)\n if state in us_state_abbrev:\n res.append(city)\n return res, list(values)", "def _repair_names_universal(\n names: Iterable[str], quiet: bool = False, base0_: bool = None\n) -> List[str]:\n min_names = _repair_names_minimal(names)\n neat_names = [re.sub(r\"[^\\w]\", \"_\", name) for name in min_names]\n new_names = _repair_names_unique(\n neat_names,\n quiet=True,\n sanitizer=lambda name: (\n f\"_{name}\"\n if keyword.iskeyword(name) or (name and name[0].isdigit())\n else name\n ),\n base0_=base0_,\n )\n if not quiet:\n changed_names = [\n (orig_name, new_name)\n for orig_name, new_name in zip(names, new_names)\n if orig_name != new_name\n ]\n _log_changed_names(changed_names)\n return new_names", "def get_processed_stations(out_dir):\n lista = [ f.split('_')[0] for f in os.listdir(out_dir) if '.nc' in f ]\n #print('Skipping these stations: ' , lista )\n return lista", "def encode_word(word: str) -> List[str]:\n inner_letters = word[1:-1]\n inner_letters = shuffle(inner_letters)\n return [word[0], *inner_letters, word[-1]]", "def encode_san_dns_names(self, san):\n dns_names = []\n for dns_name in san:\n dns_names.append(x509.DNSName(dns_name))\n return dns_names", "def expand_abbrevs(name):\n key = name.upper()\n for abbrev, word in ABBREVS.iteritems():\n key = re.sub(abbrev, word, key)\n \n #Remove (.*) from the street name\n key = re.sub(r'\\(.*?(:?\\)|$)', '', key)\n \n #Unify names\n key = NUMBER_IN_NAMES_REGEX.sub(lambda i: i.group(1) + \" \", key)\n key = re.sub(u\"Ё\", u\"Е\", key)\n key = re.sub(u\"[\\\"'«»№]\", u\" \", key)\n\n # remove \"им\" prefix\n key = re.sub(ur'[^\\s]ИМ[\\.\\s]+', u' ', key)\n\n #Change name parts order\n words = key.split(r\" \")\n words.sort()\n key = \" \".join(words)\n\n key = re.sub(u\"\\s+\", u\" \", key).strip()\n\n logging.debug(\"Street name %s was converted to %s\" % (name, key))\n \n return key", "def append_sitename(strs,site):\n strs = [x+' site:'+site for x in strs]\n return strs", "def create_word(char_list):", "def getNames():\r\n return [\"Server1\", \"Server2\", \"Client1\", \"Client2\"]", "def normalize(address):\n replacement = re.sub('\\W+', SEPARATOR, address.lower())\n\n processed = []\n for p in replacement.split(SEPARATOR):\n if not p:\n continue\n\n if p in ABBRS:\n processed.append(ABBRS[p])\n else:\n processed.append(p)\n\n processed.sort()\n\n normalized = SEPARATOR.join(processed)\n return normalized", "def format_words(words):\n return sorted(words, key=str.lower)", "def get_room_names(soup: bs4.BeautifulSoup) -> Iterable[str]:\n return set(x.string for x in soup.Lecture.find_all(\"RaumBez\"))", "def essay_to_wordlist(essay_v, remove_stopwords):\n essay_v = re.sub(\"[^a-zA-Z]\", \" \", essay_v)\n words = essay_v.lower().split()\n if remove_stopwords:\n stops = set(stopwords.words(\"english\"))\n words = [w for w in words if not w in stops]\n return (words)", "def metaphlan_krona_string(input):\n s = []\n for f in input:\n name = bn(f).replace(\"_pe.krona\", \"\").replace(\"_se.krona\", \"\")\n s.append(f\"{f},{name}\")\n return \" \".join(s)", "def fill_stempool(self):\n tokens = [apply_word_tokenize(x) for x in self.df['name']]\n\n flatten1 = itertools.chain.from_iterable\n flat = list(flatten1(tokens))\n\n stems = [self.stemmer.stem(x) for x in flat]\n\n return set(stems)", "def get_stations(nordic_file_names, output_level=0):\n stations = []\n for file in nordic_file_names:\n new_stations = get_event_stations(file, output_level)\n\n if new_stations == -1:\n continue\n\n for x in new_stations:\n if x not in stations:\n stations.append(x)\n\n return sorted(stations)", "def list_stations(intent, session):\n stations = location.get_stations(config.bikes_api)\n street_name = intent['slots']['street_name']['value']\n possible = location.matching_station_list(stations,\n street_name,\n exact=True)\n street_name = street_name.capitalize()\n\n if len(possible) == 0:\n return reply.build(\"I didn't find any stations on %s.\" % street_name,\n is_end=True)\n elif len(possible) == 1:\n sta_name = location.text_to_speech(possible[0]['name'])\n return reply.build(\"There's only one: the %s \"\n \"station.\" % sta_name,\n card_title=(\"%s Stations on %s\" %\n (config.network_name, street_name)),\n card_text=(\"One station on %s: %s\" %\n (street_name, possible[0]['name'])),\n is_end=True)\n else:\n last_name = location.text_to_speech(possible[-1]['name'])\n speech = \"There are %d stations on %s: \" % (len(possible),\n street_name)\n speech += (', '.join([location.text_to_speech(p['name'])\n for p in possible[:-1]]) +\n ', and %s' % last_name)\n card_text = (\"The following %d stations are on %s:\\n%s\" %\n (len(possible), street_name,\n '\\n'.join(p['name'] for p in possible)))\n return reply.build(speech,\n card_title=(\"%s Stations on %s\" %\n (config.network_name, street_name)),\n card_text=card_text,\n is_end=True)", "def TransformNames(self) -> _n_2_t_0[str]:", "def remove_duplicate_chars(w):\n return ''.join(c for c, _ in itertools.groupby(w))", "def split_name(fullname):", "def get_initials(name):\n\n # your code here\n result = ''\n na = name.find(' NA ')\n if na != -1:\n name = name[:na] + name[na+3:]\n name += ' '\n while name.find(' ') != -1:\n result += name[0] + '.'\n name = name[name.find(' ')+1:]\n print(result)", "def build_subway(**lines):\n for key in lines.keys():\n # print key\n value = lines[key]\n lines[key] = value.split()\n\n for key in lines.keys():\n stations.update(set(lines[key]))\n system = {}\n for station in stations:\n next_station = {}\n for key in lines:\n if station in lines[key]:\n line = lines[key]\n idx = line.index(station)\n if idx == 0:\n if next_station.has_key(line[1]): \n temp = next_station[line[1]]\n temp.append(key)\n next_station[line[1]] = temp\n else:\n next_station[line[1]] = [key]\n elif idx == len(line)-1:\n if next_station.has_key(line[idx-1]): \n temp = next_station[line[idx-1]]\n temp.append(key)\n next_station[line[idx-1]] = temp\n else:\n next_station[line[idx-1]] = [key]\n else:\n if next_station.has_key(line[idx-1]): \n temp = next_station[line[idx-1]]\n temp.append(key)\n next_station[line[idx-1]] = temp\n else:\n next_station[line[idx-1]] = [key]\n if next_station.has_key(line[idx+1]): \n temp = next_station[line[idx+1]]\n temp.append(key)\n next_station[line[idx+1]] = temp\n else:\n next_station[line[idx+1]] = [key]\n system[station] = next_station\n return system", "def Student_names(l:list)->list:\n result=[]\n for s in l:\n result.append(s.name)\n return result", "def meronym(self, sense=0):\n s = self._synset(self.text, sense=sense)\n\n if not s:\n return []\n\n return s.member_meronyms()", "def rem_str(prelist,names):\n \n for prefix in prelist:\n names=[name.replace(prefix,'') for name in names]\n \n return names" ]
[ "0.59636515", "0.5915971", "0.58542436", "0.5853559", "0.5729847", "0.5717873", "0.57039684", "0.5570993", "0.54327935", "0.54008317", "0.53579676", "0.5210223", "0.51921755", "0.5177079", "0.5170437", "0.51636785", "0.5162297", "0.514331", "0.5133783", "0.51176363", "0.5116946", "0.50965136", "0.50916845", "0.5085611", "0.5071022", "0.5071022", "0.50691146", "0.5064639", "0.5062359", "0.5034298", "0.50284106", "0.50264263", "0.5021686", "0.5017494", "0.50084084", "0.50024664", "0.5001962", "0.4988385", "0.49543414", "0.49521515", "0.4945231", "0.49293977", "0.4927092", "0.49176168", "0.4915139", "0.4900497", "0.48996523", "0.48945412", "0.48939404", "0.48938602", "0.48938423", "0.4890943", "0.48908943", "0.48907763", "0.48831102", "0.4878899", "0.4859294", "0.48584524", "0.48543245", "0.48507634", "0.48482257", "0.48480356", "0.48465312", "0.48421136", "0.4840037", "0.48369643", "0.483175", "0.4829894", "0.48257753", "0.4816988", "0.4809957", "0.48069483", "0.48032123", "0.47986624", "0.4792884", "0.47901314", "0.47772747", "0.47727233", "0.47709304", "0.47648045", "0.47610003", "0.47574395", "0.4750958", "0.4746842", "0.47467065", "0.47427657", "0.47415423", "0.47390294", "0.47340626", "0.4728951", "0.4720255", "0.47182348", "0.47155702", "0.47151142", "0.47138458", "0.47078738", "0.47067863", "0.4705929", "0.47038886", "0.47014362" ]
0.53781
10
converts time to gmt, appends to list
def gmt(time): gmt = [0]*time.size for i in range(time.size): gmt[i]=datetime.utcfromtimestamp(time[i]).strftime('%Y-%m-%d %H:%M:%S') return gmt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_time(self):\r\n\r\n curr_time = datetime.datetime.now()\r\n time = []\r\n time.append([int(x) for x in '{0:06b}'.format(curr_time.second)])\r\n time.append([int(x) for x in '{0:06b}'.format(curr_time.minute)])\r\n time.append([int(x) for x in '{0:06b}'.format(curr_time.hour)])\r\n time.append([int(x) for x in '{0:06b}'.format(curr_time.day)])\r\n time.append([int(x) for x in '{0:06b}'.format(curr_time.month)])\r\n time.append([int(x) for x in '{0:06b}'.format(curr_time.year - 2000)])\r\n return time", "def _add_time_field(self) -> None:\n self.data[\"time\"] = [datetime(int(yyyy), int(mm), int(dd)) + timedelta(hours=hh) for yyyy, mm, dd, hh in zip(self.data[\"year\"], self.data[\"month\"], self.data[\"day\"], self.data[\"hour\"])]\n for key in [\"year\", \"doy\", \"month\", \"day\", \"hour\"]:\n del self.data[key]", "def get_times():\n global times\n global times_list\n base_url = \"http://www.crawleymosque.com/\"\n r = requests.get(base_url)\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n\n times_list = []\n for salah_time in soup.find_all(class_=\"prayer-start\"):\n times_list.append(salah_time.contents[0].strip())\n\n print(times_list)\n times = []\n for i in times_list:\n datetime_object = datetime.strptime(i, \"%I:%M %p\")\n just_time = datetime.time(datetime_object)\n times.append(just_time)\n\n print(times)\n\n # spam = Label(root, text=\"checking for spam\")\n # spam.place(x=460, y=110)", "def time_to_hour_and_minute(time):\n return [time // 60, time % 60]", "def convert_time(time):\n\n s = time.split()[0]\n s_h = int(s.split(':')[0])\n\n am_pm = s.split(':')[1][-2:]\n if s_h == 12:\n s_h = s_h - 12\n if am_pm == 'PM':\n s_h = s_h + 12\n s_h = s_h + 1\n\n e = time.split()[2]\n e_h = int(e.split(':')[0])\n\n am_pm = e.split(':')[1][-2:]\n if e_h == 12:\n e_h = e_h - 12\n if am_pm == 'PM':\n e_h = e_h + 12\n e_h = e_h + 1\n\n hour_list = range(s_h, e_h + 1)\n return hour_list", "def filter_lower_datetime(time, list_time):\n return [t for t in list_time if t <= time]", "def convert_time(self, t_variable):\n date_list = []\n times = self.dataset[t_variable].values\n\n for time in times:\n try:\n time = pd.to_datetime(str(time))\n date_list.append(time.strftime('%Y-%m-%dT%H:%M:%SZ'))\n except ValueError as ve:\n print(\"Error parsing and converting '%s' variable object to CovJSON compliant string.\" % (t_variable), ve)\n\n return date_list", "def convert_seconds_to_readable(self, time_value):\n time_readable = []\n for value in time_value:\n time_readable_mini = time.strftime('%I:%M:%S%p', time.localtime(value))\n time_readable.append(time_readable_mini)\n mylog.debug('Converting %s to %s' % (value, time_readable_mini))\n return time_readable", "def get_time(t):\n return [time.clock()-t[0], time.time()-t[1]]", "def list_times(self, start: int = None, end: int = None) -> List:\n return [i.time for i in self.data[start:end]]", "def get_timestamped_strings(self):\n ret_list = []\n i = 0\n while i < len(self.__string_list):\n ret_list.append(self.__timestamp_list[i].strftime(\"%Y-%m-%d %H:%M:%S\")+\" \"+self.__string_list[i])\n i += 1\n return ret_list", "def get_time_strs(self):\n\n log(\"Getting time strings starting at {}\".format(self._t0))\n tz = dt.timezone.utc\n mkdt = lambda n: dt.datetime.fromtimestamp(\n self._t0 - (self._delta * n),\n tz=tz\n )\n ns = range(self._frames, 0, -1)\n return [mkdt(n).strftime('%Y%m%d%H%M') for n in ns]", "def get_times(my_vars):\n base_time = my_vars['base_time'].getValue()\n try:\n times=my_vars['time']\n except KeyError:\n times = my_vars['time_offset']\n\n ts = []\n for time in times:\n temp = datetime.utcfromtimestamp(base_time+time)\n if (temp.minute == 0) :\n ts.append(temp)\n return ts", "def _get_timestamps(self, time_interval: RawTimeIntervalType | None, bbox: BBox) -> list[dt.datetime]:", "async def _timein_list(self):\n\t\t\n\t\tmessage = 'Favourites\\n```Name: Timezones\\n'\n\t\t\n\t\tfor fav in self.favourites:\n\t\t\tmessage += fav + ': '\n\t\t\tmessage += self.favourites[fav].replace(',', ', ').replace('_', ' ') + '\\n'\n\t\t\n\t\tmessage += '```'\n\t\tawait self.bot.say(message)", "def add_time(data, t):\n data['year'] = t.year\n data['month'] = t.month\n data['day'] = t.day\n data['hour'] = t.hour\n data['minute'] = t.minute\n data['second'] = t.second", "def add_timecard(self,time,name):\n id = self.find_employee_id(name)\n if id in self.timecard:\n self.timecard[id].append(time)\n else:\n self.timecard[id] = [time]\n return self.timecard", "def sort_time(self):\n self.entries.sort(key=lambda x: x.date_stamp_utc)", "def get_current_time():\n cur_time = datetime.datetime.now() + offset_time\n return [cur_time.year, cur_time.month, cur_time.day, cur_time.hour, cur_time.min, cur_time.second]", "def _splitTime(self, time): \n if (time):\n x = re.split(\"[-\\/\\s:]\", time)\n else:\n x = []\n # Pad the list to four elements (year,month,day,hour)\n while (len(x) < 4):\n x.append(None)\n return x", "def order_by_ftime(tasks_lst):\n return sorted(tasks_lst, key=lambda task: task[1])", "def conv_time(l, h):\n\t# Function modified from post on ActiveState by John Nielsen\n\n\t#converts 64-bit integer specifying the number of 100-nanosecond\n\t#intervals which have passed since January 1, 1601.\n\t#This 64-bit value is split into the\n\t#two 32 bits stored in the structure.\n\td = 116444736000000000L \n\n\t# Some LNK files do not have time field populated \n\tif l + h != 0:\n\t\tnewTime = (((long(h) << 32) + long(l)) - d)/10000000 \n\telse:\n\t\tnewTime = 0\n\n\treturn time.strftime(\"%Y/%m/%d %H:%M:%S %a\", time.localtime(newTime))", "def add_times_of_travels(dfs_splited: list, time: datetime.datetime) -> list:\n results = google_api_request(dfs_splited, time)\n logger.debug(\"Ready answer for request\")\n print(results)\n travel_times = [[i.get('duration', {}).get('value')\n for i in result['rows'][0]['elements']]\n for result in results]\n logger.debug(\"Times of travel extracted\")\n\n return [df.assign(time_sec=time_t)\n for df, time_t in zip(dfs_splited, travel_times)]", "def time_to_view(view, edit, fmt):\n for s in view.sel():\n if s.empty():\n view.insert(edit, s.a, time.strftime(fmt))\n else:\n view.replace(edit, s, time.strftime(fmt))", "def get_timestring_from_int(time_array, format=\"%H:%M:%S\"):\n list = []\n for value in time_array:\n list.append((value, int2dt(value, 1).strftime(format)))\n return list", "def add_time(t):\n\n times.append(t)\n\n # update number display to show real time\n number_display.time = t\n number_display.update()\n\n # generate new scramble and update scramble_image\n new_scramble = generate_scramble(int(settings['puzzle']),\n int(settings['scramble-length']))\n scrambles.append(new_scramble)\n scramble_image.clear()\n scramble_image.chars = char(new_scramble)\n\n ao5, ao12 = update_stats()\n\n with open(session_file.string, 'a') as f:\n if len(times) == 1:\n f.write(f'{add_zero(t)}\\t{ao5}\\t{ao12}\\t{new_scramble}')\n else:\n f.write(f'\\n{add_zero(t)}\\t{ao5}\\t{ao12}\\t{new_scramble}')", "def get_time(text_time):\n # return Observer.datetime_to_astropy_time(dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M'))\n the_time = dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M')\n return Time(the_time.strftime('%Y-%m-%d %H:%M'))\n #date = [int(i) for i in date.split('/')]", "def get_teams_and_schedule():\n start_time = timedelta(hours=19)\n time_to_add = timedelta(minutes=15)\n teams = session.query(Team).all()\n\n for team in teams:\n team.time = str(start_time)\n start_time += time_to_add\n yield team", "def gprmc_convert(line):\r\n gps = line.strip().split(',')\r\n #check data\r\n if gps[2] == 'V':\r\n return\r\n raw_date = gps[9]\r\n time = ''\r\n date = raw_date[0:2]\r\n month = raw_date[2:4]\r\n year = raw_date[4:]\r\n #modify year if reaches year 2100\r\n time += date + '/' + month + '/20' + year\r\n return [time]", "def datetime_to_list(date):\n return [date.year, date.month, date.day,\n date.hour, date.minute, date.second]", "def datetime_to_list(date):\n return [date.year, date.month, date.day,\n date.hour, date.minute, date.second]", "def time_calculator(seconds):\n seconds = int(seconds)\n days = seconds // 86400\n hours = seconds % 86400 // 3600\n minutes = seconds % 86400 % 3600 // 60\n seconds = seconds % 86400 % 3600 % 60\n time_list = [days, hours, minutes, seconds] # Created a list that stores these variables in order\n return time_list", "def time_convert(intime):\n Nt = intime.shape[0]\n outtime = []\n for t in range(Nt):\n timestr = ''.join([intime[t,:][~intime[t,:].mask].data[i].decode('utf-8') for i in range(len(intime[t,:][~intime[t,:].mask].data))])\n outtime.append(datetime.strptime(timestr, '%Y-%m-%d_%H:%M:%S'))\n return outtime", "def time(self, start_time):\n \n TIME_LIST.append((time.time() - start_time))\n print(\"--- %s seconds ---\" % (time.time() - start_time))", "def time_settime(currenttime):\r\n\r\n time_query_times.append((getruntime(), currenttime))", "def record_time(times, enabled, *args):\n if not enabled:\n yield\n else:\n start = time.time()\n yield\n end = time.time()\n times.append((' '.join(args), start, end))", "def generate_time_data(self):\n # generate random dates and append to a list\n sd = self.start_date\n ed = self.end_date\n dates = [random_date(start=sd, end=ed) for d in range(0, self.obs)]\n\n # convert to ISO 8601 format and update \"Local Time\" field\n self.output['Local Time'] = map(lambda x: x.isoformat(), dates)", "def start_time():\n t = [time.clock(), time.time()]\n return t", "def format_time(self, time):\n hh = time[0:2]\n mm = time[2:4]\n ss = time[4:]\n return \"%s:%s:%s UTC\" % (hh,mm,ss)", "def process_timecards(self):\n timecard = open('timecards.txt','r')\n time_temp = []\n time = []\n for line in timecard:\n time_temp.append(line)\n for i in time_temp:\n time.append(i.split(','))\n for i in time:\n for q in range(len(i)):\n if q == 0:\n pass\n else:\n i[q] = float(i[q])\n for i in time:\n for q in range(len(i)):\n self.timecard[i[0]] = i[1:]\n #print(self.timecard)\n return self.timecard", "def convert_time_to_seconds(self, time_value):\n time_epoch = []\n mylog.debug('Converting %s to epoch time' % time_value)\n for value in time_value:\n try:\n pattern = ' %I:%M:%S%p'\n time_epoch_mini = int(time.mktime(time.strptime(value, pattern))) \n time_epoch.append(time_epoch_mini)\n except:\n mylog.debug('%s Does not seem to be in format with leading space' % value)\n try:\n pattern = '%I:%M:%S%p'\n time_epoch_mini = int(time.mktime(time.strptime(value, pattern))) \n time_epoch.append(time_epoch_mini)\n except:\n mylog.debug('%s Does not appear to be in format without leading space' % value)\n return time_epoch", "def __call__(self, x: Sequence[datetime]) -> Sequence[str]:\n if self.tz is not None:\n x = [d.astimezone(self.tz) for d in x]\n return [d.strftime(self.fmt) for d in x]", "def add_gigasecond(time = datetime(1, 1, 1, 0, 0, 0)): # -> datetime() object\n time += timedelta(seconds = 10 ** 9)\n return time", "def format_time(self, time):\n hours = time // 3600\n time = time - hours*3600\n minutes = time // 60\n seconds = time - minutes*60\n return ('%d:%d:%d' %(hours, minutes, seconds))", "def word_time(word, elapsed_time):\n return [word, elapsed_time]", "def sort_time(cls):\n CloudCtx.objCloudCtx.sort(key=lambda x: datetime.strptime(x.modTs, \"%d-%m-%Y %I:%M:%S %p\"), reverse=True)\n for elem in CloudCtx.objCloudCtx:\n print(elem.display_cloud_ctx())", "def add_minutes(self):\n r = self.minute + self.value\n x = int((r / 60))\n\n self.hour = self.hour + x\n self.minute = r - (60 * x)\n\n cycles = int(self.hour / 12)\n if cycles > 0:\n if (cycles % 2) == 0:\n pass\n else:\n if self.meridiem == 'AM':\n self.meridiem = 'PM'\n else:\n self.meridiem = 'AM'\n\n self.hour = self.hour - cycles * 12\n if self.hour == 0:\n self.hour = 1\n\n if self.minute < 10:\n self.minute = str(0) + str(self.minute)\n\n new_time: str = str(self.hour) + ':' + str(self.minute) + ' ' + self.meridiem.upper()\n return new_time", "def makeChronList(self):\n from operator import itemgetter\n ## make list of msg lists in the format accespted by reconstructLine\n self.outData_temp = [] # this will be in chronological order\n for sens in self.outData:\n if sens is not 'header':\n for meas in self.outData[sens]:\n for time in self.outData[sens][meas]:\n value = self.outData[sens][meas][time]\n thismsg = [time, sens, meas, str(value)] # leave time as float for sorting\n self.outData_temp.append(thismsg)\n self.outData_temp.sort(key=itemgetter(0)) # sort by first index\n for msg in self.outData_temp: # now we can make time a string\n msg[0] = str(msg[0])", "def oneTimepoint(timepoint):\n\tt = []\n\tfor vs in timepoint:\n\t\tt.append((timepoint.attrib.get('CollectionTime'), vs[0].text, vs[1].text))\n\treturn(t)", "def Time(row):\r\n try:\r\n timeadd = dt.datetime.strptime(row['TickIssueTime'], '%H:%M').time()\r\n except:\r\n timeadd = dt.datetime.strptime('00:00', '%H:%M').time()\r\n\r\n newtime = dt.datetime.combine(dt.datetime.strptime(row['TickIssueDate'], '%Y-%m-%d %H:%M:%S') , timeadd)\r\n return newtime", "def add_timedelta_to_time():\n dt = datetime.datetime.combine(datetime.date.today(), datetime.time(12, 30, 5)) + datetime.timedelta(hours=1)\n t = dt.time()\n print(t) # 13:30:05", "def _add_time(time_to_add: int):\n store.time += time_to_add", "def time_iterator(\n *,\n first_time: datetime.datetime,\n last_time: datetime.datetime,\n resolution: int,\n timezone: datetime.timezone,\n) -> Generator[datetime.datetime, None, None]:\n\n current_time = first_time\n while current_time < last_time:\n yield current_time.replace(tzinfo=timezone)\n current_time += relativedelta(\n hours=resolution // 3600,\n minutes=(resolution // 60) % 60,\n seconds=resolution % 60,\n )", "def dump_datetime(value):\n if value is None:\n return\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]", "def get_target_timestamps(self):\n times=[]\n curr = self.begin_ts\n while curr<=self.end_ts:\n times.append(curr)\n curr = curr + 24 * 60 * 60\n return times", "def _get_time(self) -> None:\n self.data[\"time\"] = np.zeros(len(self.data[\"yyyymmdd\"]), dtype=object)\n \n for idx, (yyyymmdd, hhmmss) in enumerate(zip(self.data[\"yyyymmdd\"], self.data[\"hhmmss\"])):\n year, month, day = yyyymmdd.split(\"/\")\n hour, minute, second = hhmmss.split(\":\")\n self.data[\"time\"][idx] = datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n \n del self.data[\"yyyymmdd\"]\n del self.data[\"hhmmss\"]", "def offset(self):\n\n offsetList = ['12 am', '1 am', '2 am', '3 am', '4 am', '5 am', '6 am', '7 am', '8 am', '9 am',\n '10 am', '11 am', '12 pm', '1 pm', '2 pm', '3 pm', '4 pm', '5 pm', '6 pm', '7 pm',\n '8 pm', '9 pm', '10 pm', '11 pm', '12 pm']\n\n firstTimeHour = self.firstTime.time().hour\n print ('First Time Hour:', firstTimeHour)\n\n m2 = str(self.firstTime.time())\n m2 = datetime.datetime.strptime(m2, '%I:%M %p')\n print(m2)", "def want_timeframe(self, timeframe):\n self._wanted.append(timeframe)", "def format_start_time(self, data):\n return data", "def get_fetch_times(last_fetch):\n now = get_now()\n times = list()\n time_format = \"%Y-%m-%dT%H:%M:%SZ\"\n if isinstance(last_fetch, str):\n times.append(last_fetch)\n last_fetch = datetime.strptime(last_fetch, time_format)\n elif isinstance(last_fetch, datetime):\n times.append(last_fetch.strftime(time_format))\n while now - last_fetch > timedelta(minutes=59):\n last_fetch += timedelta(minutes=59)\n times.append(last_fetch.strftime(time_format))\n times.append(now.strftime(time_format))\n return times", "def datetime_timeplotxml(self):\n if self.time:\n return self.date.strftime(\"%b %d %Y\") + \" \" + self.time.strftime(\"%H:%M:%S\") + \" GMT\"\n else:\n return self.date.strftime(\"%b %d %Y\") + \" \" + \"00:00:00\" + \" GMT\"", "def getTimeStamps():\n\n # Initialize\n results = dict()\n\n # UT time\n ut = utils.getUT(pointing=True).split()\n results['utday'] = ut[0]\n results['ut'] = float(ut[1])\n\n # year/month/day/second\n utStamp = time.gmtime()\n utHour = maybeAddAZero(utStamp[3])\n utMin = maybeAddAZero(utStamp[4])\n utSec = maybeAddAZero(utStamp[5])\n results['timeLab'] = ''.join([commands.yearMonthDay(),'_',utHour,utMin,utSec])\n\n # Done\n return results", "def get_ph_time(as_array=False):\n utc = timezone('UTC')\n phtz = timezone('Asia/Manila')\n now = utc.localize(datetime.utcnow())\n now = now.astimezone(phtz)\n if as_array:\n return [now.year, now.month, now.day, now.hour, now.minute, now.second]\n else:\n return datetime(now.year, now.month, now.day, now.hour, now.minute, now.second)", "def render_time(dt):\n return dt.strftime('%H:%M:%S')", "def addtomemorycollectiontime(self, datetime):\n self._memorycollectiontime.append(datetime)", "def createdatv(times):\n t0=datetime.datetime.strptime(times[0].tostring().decode('utf-8'), '%Y-%m-%d_%H:%M:%S')\n t1=datetime.datetime.strptime(times[1].tostring().decode('utf-8'), '%Y-%m-%d_%H:%M:%S')\n ts=(t1-t0).total_seconds()\n datev=[]\n for i in range(0,len(times)):\n datev.append(t0+datetime.timedelta(seconds=i*ts))\n return(datev)", "def localize_time(self, apitime):\n return self.feedzone.localize(apitime).astimezone(self.localzone)", "def parse_time(text):\n\n # When keyword is 'in' adds values to time\n if text[-3] == 'in':\n remind_time = time.gmtime(int(text[-2]) * int(text[-1]) + time.time())\n # Otherwise try to parse time as written\n else:\n remind_time = text[-1].replace(':', ' ') \\\n + \" \" \\\n + time.strftime(\"%m/%d/%y\", time.gmtime(time.time()))\n remind_time = time.strptime(remind_time, \"%H %M %m/%d/%y\")\n return remind_time", "def OPCtimetransform(data, to):\n \n remove_times = []\n outtimes = []\n times = {'ms':[],'SS':[],'MM':[],'HH':[]}\n\n for i in range(0, len(data)):\n times['HH'] = 0\n times['MM'] = 0\n times['SS'] = 0\n times['ms'] = 0\n\n item = data[i]\n \n try:\n if len(item.split('.')[1]) < 2:\n item += '0'\n except IndexError:\n item += '.00'\n if len(item) < 9:\n item = item.zfill(9)\n if int(item[:2]) > 23:\n item = '0' + item\n \n # remove items with extra zero (2319010.00 to 231910)\n if len(item) > 9:\n olditem = item\n newitem = item[:4] + item[5:]\n print( ('Repairing strange value %s into %s')%(olditem, newitem) )\n item = newitem\n else:\n pass\n try:\n md = dt.datetime.strptime(item, \"%H%M%S.%f\")\n \n # round off items which exceed 59 minutes or 59 seconds \n # (i.e. 146001 to 150001.)\n except ValueError:\n \n try:\n times['HH'] = int(item[0:2])\n times['MM'] = int(item[2:4])\n times['SS'] = int(item[4:6])\n times['ms'] = int(item[7:9])\n except ValueError:\n print(i, item)\n\n if times['SS'] > 59:\n times['MM'] += 1\n times['SS'] = 0\n if times['MM'] > 59:\n times['HH'] += 1\n times['MM'] = 0\n # discard items which exceed 23 hours\n if times['HH'] > 23:\n times['HH'] = 23\n print( ('resetting value %s')%(item) )\n \n\n md = dt.datetime(1900,1,1,times['HH'], times['MM'], times['SS']) \n\n \n outtimes.append( dt.datetime.strftime(md, to) )\n\n return outtimes", "def change_format_from_input_to_datetime(list_d_t_t):\n data_output = []\n\n for row in list_d_t_t:\n data_output.append([datetime.datetime.strptime(row[0] + \" \" + row[1], \"%Y-%m-%d %H:%M:%S\"),\n datetime.datetime.strptime(row[0] + \" \" + row[2], \"%Y-%m-%d %H:%M:%S\")])\n\n return data_output", "def render_date_time_with_relative_into(into, date_time, add_ago):\n into.append(format(date_time, DATETIME_FORMAT_CODE))\n \n into.append(' [*')\n into.append(elapsed_time(date_time))\n if add_ago:\n into.append(' ago')\n into.append('*]')\n \n return into", "def constructTimeLineItem(self):\n\t\treturn", "def get_hours_and_minutes(time):\n\n # Get the total seconds of the time object\n totalseconds = time.total_seconds()\n \n # Find the total hours in the time object\n totalhours = totalseconds//3600\n # Find the total minutes in the time object\n totalminutes = (totalseconds%3600) // 60 \n\n # Return a list composing of the total hours and minutes\n return [totalhours,totalminutes]", "def call_list_timestamp(timestamp):\n return datetime.datetime.utcfromtimestamp(timestamp).isoformat()", "def render_todays_listings(request, context, callsigns):\n context['listings_matrix'] = []\n for callsign in callsigns:\n whats_on_today_url = settings.SODOR_ENDPOINT + 'tvss/' + callsign + '/today/'\n data = requests.get(whats_on_today_url, headers={'X-PBSAUTH': settings.TVSS_KEY})\n\n if data.status_code == 200:\n jd = data.json()\n # have to loop through and covert the goofy timestamps into datetime objects\n for f in jd['feeds']:\n for l in f['listings']:\n l['start_time_obj'] = datetime.datetime.strptime(l['start_time'], \"%H%M\")\n l['callsign'] = callsign\n context['listings_matrix'].append(jd)\n return context", "def currentTime():\n return strftime(\"%H:%M:%S\", time.localtime())", "def _hour_to_time(num: int):\n return datetime.datetime.now().replace(hour=num).strftime(\"%-I %p\")", "def time_form(gdf):\n gdf['time'] = gdf['time'].dt.strftime(\"%Y-%m-%dT%H:%M:%S\")\n \n return gdf", "def set_ga_timestamp(self, time: int):\n for cl in self:\n cl.tga = time", "def _get_time(self): \n\t\t# need to variable-ize the version ??? \n\t\ttime = self.root.find('.//{http://www.opengis.net/kml/2.2}when').text\n\t\t## strip off last 5 chars, ie '.135Z in '2015-08-01T00:06:29.135Z'\n\t\tutc = dateutil.tz.tzutc() \n\t\tcentral = dateutil.tz.gettz('America/Chicago')\n\t\ttime = datetime.datetime.strptime(time[:-5], '%Y-%m-%dT%H:%M:%S')\n\t\ttime = time.replace(tzinfo=utc)\n\t\tself.time = time.astimezone(central)", "def get_timestamps(self) -> List[datetime.datetime]:\n return [activity.timestamp for activity in self.activities]", "def _update_time_cursor(self):\n for line in self.timeLines:\n line.setValue(self.playbackTime)", "def output_format(times_list):\n formatted_free_times = []\n for i in times_list:\n fmt_str = \"{} to {}.\".format(\n i[0].format('ddd, MMM D, h:mm a'),\n i[1].format('ddd, MMM D, h:mm a'))\n formatted_free_times.append(fmt_str)\n return formatted_free_times", "def _get_timestamps(self, time_interval: RawTimeIntervalType | None, bbox: BBox) -> list[dt.datetime]:\n if self.single_scene:\n return [time_interval[0]] # type: ignore[index, list-item]\n\n timestamps = get_available_timestamps(\n bbox=bbox,\n time_interval=time_interval,\n data_collection=self.data_collection,\n maxcc=self.maxcc,\n config=self.config,\n )\n\n return self.timestamp_filter(timestamps, self.time_difference)", "def getTimes():", "def getTimes():", "def getTimes():", "def addTimes(time1, time2):\n t1 = timedelta(hours=time1.hour, minutes=time1.minute, seconds=time1.second)\n t2 = timedelta(hours=time2.hour, minutes=time2.minute, seconds=time2.second)\n t3 = t1 + t2\n return (datetime.min + t3).time()", "def dump_datetime(value):\n if value is None:\n return None\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]", "def dump_datetime(value):\n if value is None:\n return None\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]", "def dump_datetime(value):\n if value is None:\n return None\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]", "def dump_datetime(value):\n if value is None:\n return None\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]", "def round_trip_time(self):\n ...", "def scale_time_to(recs, unit):\n\n for r in recs:\n if unit == 'd':\n r.t = [t / 3600 / 24 for t in r.time]\n elif unit == 'hours':\n r.t = [t / 3600 for t in r.time]\n elif unit == 'min':\n r.t = [t / 60 for t in r.time]\n elif unit in ('s', 'sec'):\n r.t = r.time\n else:\n Exception('Wrong time unit')\n\n Records.time_unit = unit\n Records.time_label = 'Time (' + unit + ')'", "def determineTimes():\r\n tm = getLocalTime()\r\n startFadeUpTime = utime.localtime(utime.mktime((tm[0], tm[1], tm[2], WAKEUP_TUPLE[0],\r\n WAKEUP_TUPLE[1] - FADE_TIME, tm[5], tm[6], tm[7])))\r\n startFadeDownTime = utime.localtime(utime.mktime((tm[0], tm[1], tm[2], WAKEUP_TUPLE[0],\r\n WAKEUP_TUPLE[1] + LIT_LENGTH, tm[5], tm[6], tm[7])))\r\n return [startFadeUpTime[3:5], startFadeDownTime[3:5]]", "def format_time_sortkey(self, data):\n return self.input['start_time'].time().strftime('%H%M').lstrip('0')", "def round_time(self, time):\n hour, mins, _ = time.split(\":\")\n return '{:02d}:00:00'.format(int(hour)+1 ) if int(mins) >= 30 else '{:02d}:00:00'.format(int(hour))", "def _groupDate(item):\n return item.time.date()", "def get_time_attr_map(t):\n now = datetime.datetime.now()\n if t + datetime.timedelta(hours=3) > now:\n return get_map(\"main_list_white\")\n if t + datetime.timedelta(days=3) > now:\n return get_map(\"main_list_lg\")\n else:\n return get_map(\"main_list_dg\")", "def planwatch(self, hours=12):\n post = {'mytime': str(hours)}\n response = self._get_page('planwatch.php', post=post)\n soup = bs4.BeautifulSoup(response.text, 'html5lib')\n results = soup.find('ul', {'id': 'new_plan_list'})\n new_plans = results.findAll('div', {'class': 'newplan'})\n resultlist = []\n for div in new_plans:\n user = div.find('a', {'class': 'planlove'}).contents[0]\n time = div.find('span').contents[0]\n time = parse_plans_date(time, tz_name=self.server_tz)\n resultlist.append((user, time))\n return resultlist" ]
[ "0.6536344", "0.61749583", "0.5915965", "0.57811874", "0.5765255", "0.5675775", "0.5667193", "0.5663627", "0.5660941", "0.5621869", "0.5613156", "0.55986845", "0.5576497", "0.5538767", "0.55233485", "0.55079854", "0.5443805", "0.5407144", "0.53734505", "0.53541595", "0.53422564", "0.5310349", "0.5274767", "0.5265787", "0.52646846", "0.5240537", "0.52287173", "0.5223971", "0.5217923", "0.52029425", "0.52029425", "0.5164896", "0.5162162", "0.5159103", "0.5147681", "0.51302767", "0.51286936", "0.5104627", "0.5104495", "0.5088883", "0.50818616", "0.5075457", "0.5064261", "0.506147", "0.5049036", "0.504405", "0.50338143", "0.50313014", "0.49972877", "0.49830714", "0.49816045", "0.4980034", "0.49789843", "0.4977556", "0.4967999", "0.49594393", "0.49569255", "0.4954857", "0.49540606", "0.4950274", "0.49422488", "0.49363834", "0.49332118", "0.49319202", "0.49293098", "0.49247378", "0.49205348", "0.4915798", "0.49111706", "0.49042174", "0.49003845", "0.48878545", "0.48783308", "0.48723876", "0.4871808", "0.4868223", "0.48548976", "0.48515105", "0.48377123", "0.48303837", "0.4826693", "0.48263168", "0.48211282", "0.48169085", "0.48163712", "0.48163712", "0.48163712", "0.48123205", "0.4810726", "0.4810726", "0.4810726", "0.4810726", "0.48086488", "0.48051482", "0.47955707", "0.47931612", "0.47929242", "0.47883552", "0.4782844", "0.47748497" ]
0.72068673
0
finds stations that don't have predictand data and appends them to a list
def miss_station(all_stations,stations): diff = len(all_stations)-len(stations) k=0 i=0 miss_stations = ['']*diff a = all_stations[:] a.sort() s = stations[:] s.sort() while i < len(stations): while a[i] != s[i]: miss_stations[k]=a[i] del a[i] k+=1 i+=1 return miss_stations
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stations():\n\n return station_list", "def prep_stations(url):\n stations = []\n _stations = requests.get(url).json()\n\n for _station in _stations['stationBeanList']:\n if _station['statusKey'] == 1:\n stations.append([_station['stationName'], _station['id'],\n _station['availableDocks'], _station['totalDocks'],\n _station['latitude'], _station['longitude']])\n\n return stations", "def _get_ogd_stations():\n return {r[\"Station\"] for r in ZamgData.current_observations()}", "def get_processed_stations(out_dir):\n lista = [ f.split('_')[0] for f in os.listdir(out_dir) if '.nc' in f ]\n #print('Skipping these stations: ' , lista )\n return lista", "def train_stations(self) -> List[str]:\n return sorted([train_info['HE'] for train_info in train_api.stations_info.values()])", "def stations(station_let):\n\tstat = ['']*np.size(station_let,0)\n\tfor i in range(len(stat)):\n\t\tfor j in range(4):\n\t\t\tif station_let[i][j] is not np.ma.masked:\n\t\t\t\tstat[i]+=station_let[i][j]\n\treturn stat", "def _build_stations(self, stop_list):\n # stations = [] TODO: What is this for\n dists = self._euclidian_distances(stop_list)\n stations = self._calculate_y_lines(dists)\n return stations", "async def get_train_stations(self, latitude: float, longitude: float,\n valid_stations=None) -> list:\n params = {\n 'location': '{},{}'.format(latitude, longitude),\n 'key': self.api_key,\n 'type': \"train_station\",\n \"radius\": 1600\n }\n\n logging.info(\"Getting train stations near (%f, %f)\", latitude, longitude)\n\n async with aiohttp.ClientSession() as session:\n async with session.post('https://maps.googleapis.com/maps/api/place/nearbysearch/json',\n params=params) as response:\n if response.status == HTTPStatus.OK:\n payload = await response.json()\n if payload['status'] == 'OK':\n if valid_stations:\n return [result[\"name\"] for result in payload[\"results\"]\n if result[\"name\"] in valid_stations]\n\n return [result[\"name\"] for result in payload[\"results\"]]\n\n return []", "def all(self, skip_cache=False):\n now = _time_ms(datetime.datetime.utcnow())\n if skip_cache or now - self._last_updated > CACHE_LIMIT:\n self._process_stations()\n return self._stations_lst", "def collect_stations(self):\n # First, iterate provinces and build url's\n site = urllib.request.urlopen(self.base_url)\n\n # Check that the site is still valid or operating by collecting a list of provinces\n print(\"Collecting provinces\")\n provinces = [s[9:11] for s in re.findall('<a href=\"../\">../</a>', site.read())]\n\n # Iterate provinces and collect list of available times\n print(\"Collecting time periods and station ID's\")\n self.stations = defaultdict(dict)\n for prov in provinces:\n site = urllib.request.urlopen(self.build_url(prov))\n expression = '<a href=\"[hd][a-zA-Z]*/\">[hd][a-zA-Z]*/</a>'\n times = [s.split('>')[1].split('<')[0].replace('/', '') for s in re.findall(expression, site.read())]\n\n # Iterate times and collect the station ID's\n for time in times:\n site = urllib.request.urlopen(self.build_url(prov, time))\n expression = '<a href=\"{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv\">{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv</a>'\n expression = expression.format(prov.upper(), time.lower())\n stations = [s.split('_')[1] for s in re.findall(expression, site.read())]\n self.stations[prov][time] = stations", "def station_list() -> List[Dict]:\n return STATIONS", "def test_get_closest_stations(self):\n\t\tpoint = \"POINT(40.71911552 -74.00666661)\"\n\t\tstations = set(server.get_closest_stations(point))\n\t\t# find the closest stations, make them a set of objects see if sets intersect completely", "def create_list() -> List[Optional[float]]:\n return [None] * num_stations", "def getStations(self) :\n return self._stations", "def get_all_stations(session: Session) -> List[Row]:\n return session.query(PlanningWeatherStation.station_code).all()", "def stations(self):\n for stat in sorted(self.station_records):\n yield self.station_records[stat]", "def read_noaa_stations(self):\n # wget -c http://weather.noaa.gov/data/nsd_bbsss.txt\n #72;656;KSFD;Winner, Bob Wiley Field Airport;SD;United States;4;43-23-26N;099-50-33W;;;619;;\n #93;246;NZRO;Rotorua Aerodrome;;New Zealand;5;38-07S;176-19E;38-07S;176-19E;285;294;\n #block;synop;icao;name;?;country;??;lat;lon;lat2;lon2;height;?;\n #0 1 2 3 4 5 6 7 8 9 10 11 12\n if not os.path.exists(self.noaa_filename):\n LOGGER.warning('could not find noaa file \"%s\"', self.noaa_filename)\n return self.known_stations\n count = 0\n with open(self.noaa_filename, 'r') as csvfile:\n stationreader = csv.reader(csvfile, delimiter=';')\n for row in stationreader:\n station_id = '{}{}'.format(row[0], row[1])\n station_id_icao = row[2].strip().upper()\n data = noaa_station_data_from_row(row)\n if data is not None:\n count += 1\n self.known_stations[station_id] = data\n if len(station_id_icao) == 4 and station_id_icao.isalpha():\n self.known_stations[station_id_icao] = data\n self.noaa_file_age = os.path.getmtime(self.noaa_filename)\n LOGGER.info(' Loaded %i noaa station records from \"%s\"', count, self.noaa_filename)\n return self.known_stations", "def _add_data(self, model_stations: Iterable[model.Station],\n validate_prefix: str = \"\") -> int:\n valid_station_count = 0\n jreast_merged_codes: dict[model.StationID, str] = load_csv_as_mapping(\n DIR_CURATED / \"jreast_merged_codes.csv\",\n itemgetter(\"sta_id\"),\n itemgetter(\"code\")\n )\n\n # Add data from model stations\n for model_sta in model_stations:\n is_invalid = False\n should_validate = model_sta.id.startswith(validate_prefix)\n\n # Find a matching geo_sta\n geo_sta = self.by_id.get(model_sta.id)\n if not geo_sta:\n if should_validate:\n self.logger.critical(f\"{Color.RED}geo.osm is missing station \"\n f\"{Color.MAGENTA}{model_sta.id}{Color.RESET}\")\n self.valid = False\n continue\n\n # Find a name\n name_id = last_part(geo_sta.id)\n geo_sta.name = self.names.get(name_id)\n if geo_sta.name is None and should_validate:\n self.logger.critical(f\"{Color.RED}sta_names.csv is missing name for \"\n f\"{Color.MAGENTA}{name_id}{Color.RESET}\")\n is_invalid = True\n\n # Copy stop_code\n geo_sta.code = model_sta.code\n\n # Check if station was valid\n if is_invalid:\n self.valid = False\n elif should_validate:\n valid_station_count += 1\n\n # Generate codes and names for mother stations\n for sta in self.by_id.values():\n if not sta.children:\n continue\n\n name_id = last_part(sta.id)\n sta.name = self.names.get(name_id)\n if not sta.name:\n self.logger.critical(f\"{Color.RED}sta_names.csv is missing name for \"\n f\"{Color.MAGENTA}{name_id}{Color.RESET}\")\n is_invalid = True\n\n # Get children codes\n children_codes = []\n jreast_merged_code = jreast_merged_codes.get(sta.id)\n if jreast_merged_code:\n children_codes.append(jreast_merged_code)\n\n for child in sta.children:\n # Ignore JR-East child codes if there's a JR-East merged code\n if child.id.startswith(\"JR-East\") and jreast_merged_code:\n continue\n elif child.code:\n children_codes.append(child.code)\n\n sta.code = \"/\".join(children_codes)\n\n return valid_station_count", "def stations(self):\n stations = []\n f = self._fetch(Citibike.STATION_URL)\n data = json.load(f)\n if 'stationBeanList' not in data or len(data['stationBeanList']) == 0:\n raise BadResponse('Station Fetch Failed', data)\n for station in data['stationBeanList']:\n stations.append(Station._from_json(station))\n logging.debug(\"Retrieved %d stations\" % len(stations))\n return stations", "async def get_stations() -> List[WeatherStation]:\n # Check if we're really using the api, or loading from pre-generated files.\n use_wfwx = config.get('USE_WFWX') == 'True'\n if use_wfwx:\n return await _get_stations_remote()\n return _get_stations_local()", "def get_all_stations(engine): \n # Query db\n sql = (\"SELECT DISTINCT a.station_id, \"\n \" a.station_code, \"\n \" a.station_name, \"\n \" c.station_type, \"\n \" d.latitude, \"\n \" d.longitude \"\n \"FROM nivadatabase.projects_stations a, \"\n \" nivadatabase.stations b, \"\n \" nivadatabase.station_types c, \"\n \" niva_geometry.sample_points d \"\n \"WHERE a.station_id = b.station_id \"\n \"AND b.station_type_id = c.station_type_id \"\n \"AND b.geom_ref_id = d.sample_point_id \"\n \"ORDER BY a.station_id\")\n df = pd.read_sql(sql, engine)\n\n return df", "def get_stations(self):\n return self.__request('stations')['stations']", "def get_stations(base_url, hts, mtype):\n stns1 = ws.site_list(base_url, hts, location='LatLong') # There's a problem with Hilltop that requires running the site list without a measurement first...\n stns1 = ws.site_list(base_url, hts, location='LatLong', measurement=mtype)\n stns2 = stns1[(stns1.lat > -47.5) & (stns1.lat < -34) & (stns1.lon > 166) & (stns1.lon < 179)].dropna().copy()\n stns2.rename(columns={'SiteName': 'ref'}, inplace=True)\n\n return stns2", "def create_station_list(self):\n sorted_station_list = sorted(self.station_dict, key=self.station_dict.get)\n\n return sorted_station_list", "def stations(self):\n try:\n stations_api = requests.get(self._stations_url)\n stations = {}\n for station in stations_api.json():\n station_id = station['id']\n station_name = station['name']\n stations[station_id] = station_name\n\n return stations\n except (RequestException, KeyError) as exc:\n LOG.error('could not read from api: %s', exc)\n raise SlfError('could not read from api: %s' % exc) from None", "def list_stations(intent, session):\n stations = location.get_stations(config.bikes_api)\n street_name = intent['slots']['street_name']['value']\n possible = location.matching_station_list(stations,\n street_name,\n exact=True)\n street_name = street_name.capitalize()\n\n if len(possible) == 0:\n return reply.build(\"I didn't find any stations on %s.\" % street_name,\n is_end=True)\n elif len(possible) == 1:\n sta_name = location.text_to_speech(possible[0]['name'])\n return reply.build(\"There's only one: the %s \"\n \"station.\" % sta_name,\n card_title=(\"%s Stations on %s\" %\n (config.network_name, street_name)),\n card_text=(\"One station on %s: %s\" %\n (street_name, possible[0]['name'])),\n is_end=True)\n else:\n last_name = location.text_to_speech(possible[-1]['name'])\n speech = \"There are %d stations on %s: \" % (len(possible),\n street_name)\n speech += (', '.join([location.text_to_speech(p['name'])\n for p in possible[:-1]]) +\n ', and %s' % last_name)\n card_text = (\"The following %d stations are on %s:\\n%s\" %\n (len(possible), street_name,\n '\\n'.join(p['name'] for p in possible)))\n return reply.build(speech,\n card_title=(\"%s Stations on %s\" %\n (config.network_name, street_name)),\n card_text=card_text,\n is_end=True)", "def get_tasks_that_fit_station(self, station: Station) -> TaskList:\n return TaskList([task for task in self._tasks if station.can_fit(task)])", "def stations():\n # Query all stations before a given date 2017\n results = session.query(Measurement.date, Measurement.tobs).filter(func.strftime(\"%Y\", Measurement.date) >= \"2017\").all()\n all_results = list(np.ravel(results))\n \n return jsonify(all_results)", "def read_table_stations(self):\n if not os.path.exists(self.station_table_filename):\n LOGGER.warning('could not find station.table file \"%s\"', self.station_table_filename)\n return self.known_stations\n count = 0\n with open(self.station_table_filename, 'r') as textfile:\n lines = textfile.read().split(LF)\n for line in lines:\n station_id, data = read_table_station_from_line(line)\n if station_id is not None:\n self.known_stations[station_id] = data\n count += 1\n self.station_file_age = os.path.getmtime(self.station_table_filename)\n LOGGER.info(' Loaded %i station records from \"%s\"', count, self.station_table_filename)\n return self.known_stations", "def _get_stations_local() -> List[dict]:\n LOGGER.info('Using pre-generated json to retrieve station list')\n with open(weather_stations_file_path) as weather_stations_file:\n json_data = json.load(weather_stations_file)\n return json_data['weather_stations']", "def search_station(st):\n\n res = []\n for key, val in _STATIONS.items():\n score = fuzz.token_set_ratio(st, key)\n res.append(\n {\n 'station': key,\n 'score': score,\n 'station_id': val\n }\n )\n if not res:\n return {}\n else:\n res = sorted(res, key=lambda k: k['score'], reverse=True)\n res = res[0]\n return res", "def get_stations():\n response = requests.get('https://api.hh.ru/metro/160')\n todos = json.loads(response.text)\n colors = {'CD0505': 'red'}\n all_stations_one_line = []\n\n for i in todos['lines']:\n all_stations_one_line = []\n\n for j in i['stations']:\n one_station = station.station()\n one_station.set_name(j['name'])\n one_station.set_color(colors.get(i['hex_color']))\n one_station.set_lat(j['lat'])\n one_station.set_lng(j['lng'])\n all_stations_one_line.append(one_station)\n return all_stations_one_line", "def stations():\n # Query all station names from dataset\n station_list = session.query(Measurement.station).distinct().all()\n all_stations = list(np.ravel(station_list))\n\n return jsonify(all_stations)", "def get_station_graph(start_station_id, end_station_list):\n start_station_graph = []\n for i in range(10):\n if end_station_list[i] is not None:\n start_station_graph.append((start_station_id, end_station_list[i]))\n return start_station_graph", "def get_stations(nordic_file_names, output_level=0):\n stations = []\n for file in nordic_file_names:\n new_stations = get_event_stations(file, output_level)\n\n if new_stations == -1:\n continue\n\n for x in new_stations:\n if x not in stations:\n stations.append(x)\n\n return sorted(stations)", "def get_all_station_feature(city):\n poi_frequency = np.load(exp_data_path + os.sep + 'poi_frequency' + os.sep + 'poi_frequency_{}.npy'.format(city),\n allow_pickle=True) # .tolist()\n poi_num = np.load(exp_data_path + os.sep + 'poi' + os.sep + 'poi_{}.npy'.format(city), allow_pickle=True)\n poi_entropy = np.load(exp_data_path + os.sep + 'poi_entropy' + os.sep + 'poi_entropy_{}.npy'.format(city),\n allow_pickle=True)\n road = np.load(exp_data_path + os.sep + 'roadnet' + os.sep + 'roadnet_{}.npy'.format(city), allow_pickle=True)\n transportation = np.load(exp_data_path + os.sep + 'transportation' + os.sep + 'transportation_{}.npy'.format(city),\n allow_pickle=True)\n commerce = np.load(exp_data_path + os.sep + 'commerce' + os.sep + 'commerce_{}.npy'.format(city), allow_pickle=True)\n\n file_name = exp_data_path + os.sep + 'station' + os.sep + 'all_demand_{}.npy'.format(city)\n demand_data = np.load(file_name, allow_pickle=True)\n num = demand_data[:, 0, -2, np.newaxis] # todo check meaning here, get quick and slow feature\n\n raw_data = np.concatenate((num, poi_frequency, poi_num, poi_entropy, road, transportation, commerce), axis=1)\n csv_data = pd.DataFrame(raw_data, columns=GENERAL_HEADER)\n\n file_path = exp_data_path + os.sep + 'static' + os.sep + 'static_feature_{}.csv'.format(city)\n if os.path.exists(file_path):\n os.remove(file_path)\n csv_data.to_csv(file_path)\n pass", "def get_zipcode_stations(add):\r\n name=get_zipcode_names(add)\r\n engine = get_sql_engine()\r\n neighborhood_stations = text(\r\n \"\"\"\r\n SELECT\r\n \"name\" as name,\r\n \"addressStreet\" as address,\r\n \"bikesAvailable\" as available_bikes,\r\n v.geom as geom,\r\n ST_X(v.geom) as lon, ST_Y(v.geom)as lat\r\n FROM indego_rt1130 as v\r\n JOIN philly_zipcode as n\r\n ON ST_Intersects(v.geom, n.geom)\r\n WHERE n.code = :name\r\n \"\"\"\r\n )\r\n stations = gpd.read_postgis(neighborhood_stations, con=engine, params={\"name\": name})\r\n return stations", "def cluster_stations(stations, empty='empty'):\n if empty == 'empty':\n tocluster = [i for i in stations if (i[3] - i[2])/float(i[3]) < .2]\n else:\n tocluster = [i for i in stations if (i[2])/float(i[3]) < .2]\n cl = KMeansClustering([(i[4], i[5]) for i in tocluster])\n clusters = cl.getclusters(4)\n\n # Note that this returns a list of lists of lat/long tuples. We're\n # going to have to re-associate them back to the rest of the stations\n\n clustered = []\n for ix, i in enumerate(clusters):\n for j in i:\n for k in tocluster:\n if (j[0], j[1]) == (k[4], k[5]):\n clustered.append([k[0], k[1], k[2],\n k[3], k[4], k[5], ix+1])\n\n return clustered", "def get_station_boroughs(self):\\", "def _get_next_available_train_list(self, context) -> Tuple[List, datetime.datetime]:\n try:\n for day in self._next_week:\n trains = list(train_api.get_available_trains(origin_station_id=context.user_data['origin_station_id'],\n dest_station_id=context.user_data['dest_station_id'],\n date=day))\n if len(trains) > 0:\n return trains, day\n\n except (ValueError, AttributeError):\n traceback.print_exc()\n raise RuntimeError(\"general error\")", "def getstationaryobslist(self):\n\n stationaryobslist = [self.__tablecm]\n return stationaryobslist", "def get_data(self):\n if not self.form.submit():\n return False\n\n parser = XMLParser(huge_tree=True)\n doc = etree.fromstring(self.form.raw_data, parser=parser)\n # There are a few stations with multiple generator id's, separated by '\\n' so\n # capture them and add each as a separate entry.\n for detail in doc.xpath(\"//*[local-name()='Detail']\"):\n stt = Station(detail)\n if '\\n' in stt.generator_id:\n ids = [x.strip() for x in stt.generator_id.split('\\n')]\n stt.generator_id = ids[0]\n for _id in ids[1:]:\n _st = copy.copy(stt)\n _st.generator_id = _id\n self.stations.append(_st)\n self.stations.append(stt)\n return len(self.stations) > 0", "async def get_stations_by_codes(station_codes: List[int]) -> List[WeatherStation]:\n use_wfwx = config.get('USE_WFWX') == 'True'\n if use_wfwx:\n return await _get_stations_by_codes_remote(station_codes)\n return _get_stations_by_codes_local(station_codes)", "def get_stations(self, limit=250):\n\n endpoint = \"/station/getStations\"\n response = self._send(endpoint, \"POST\", {\"pageSize\": limit})\n stations = response.json()[\"stations\"]\n return stations", "def get_neigh_demand(city):\n\n # get station set S with more than 10 charge equipment\n static_file_path = exp_data_path + os.sep + 'static' + os.sep + 'static_feature_{}.csv'.format(city)\n static_feature = pd.read_csv(static_file_path, header=0)\n station_set = set(static_feature[static_feature.num >= 10].index)\n\n # calculate 10 nearest neighborhoods for each station, sort by distance and store their index, get a map\n neighbor_distance_map = {}\n matrix_distance = np.load(exp_data_path + os.sep + 'similarity' + os.sep + 'similarity_distance_{}_numpy.npy'.format(city), allow_pickle=True)\n all_distance_map = {i: [] for i in range(station_count[city])}\n for i in range(station_count[city]):\n if i not in station_set:\n continue\n for j in range(station_count[city]):\n if j not in station_set:\n continue\n all_distance_map[i].append((j, matrix_distance[i][j]))\n all_distance_map[i].sort(key=lambda x : x[1], reverse=True)\n neighbor_distance_map[i] = [idx for idx, distance in all_distance_map[i][:10]]\n\n # 11 times header, get static neighborhood feature for each station(in S), get csv: neighbor_feature_{city}.csv\n ALL_HEADER = ['index']\n ALL_HEADER.extend(GENERAL_HEADER)\n for i in range(10):\n for j in GENERAL_HEADER:\n ALL_HEADER.append('{}_{}'.format(j, i))\n\n raw_data = np.empty((len(neighbor_distance_map), len(ALL_HEADER)))\n for i, idx in enumerate(neighbor_distance_map.keys()):\n raw_data[i][0] = idx\n raw_data[i][1:1+len(GENERAL_HEADER)] = static_feature.iloc[idx]['num':'mall']\n for j in range(10):\n neighbor_idx = neighbor_distance_map[idx][j]\n raw_data[i][1+len(GENERAL_HEADER)*(j+1):1+len(GENERAL_HEADER)*(j+2)] = static_feature.iloc[neighbor_idx]['num':'mall']\n neighbor_feature_data = pd.DataFrame(raw_data, columns=ALL_HEADER)\n print('neighbor feature')\n print(neighbor_feature_data)\n\n neighbor_feature_path = exp_data_path + os.sep + 'static' + os.sep + 'static_neighor_feature_{}.csv'.format(city)\n if os.path.exists(neighbor_feature_path):\n os.remove(neighbor_feature_path)\n neighbor_feature_data.to_csv(neighbor_feature_path)\n\n # create final csv(11 times header with basic info(time_index + time_embed_index))\n # if index in S, fill basic info, neighbor_feature and demand\n\n demand = np.load(exp_data_path + os.sep + 'station' + os.sep + 'demand_{}.npy'.format(city), allow_pickle=True)\n time_count = demand.shape[1]\n\n DEMAND_HEADER = []\n DEMAND_HEADER.extend(ALL_HEADER)\n DEMAND_HEADER.extend(['time_index', 'time_embed', 'demand'])\n neighbor_demand_raw_data = np.empty(((len(neighbor_distance_map)*time_count, len(DEMAND_HEADER))))\n\n # get time map like {\"0800\": 1, \"0830\": 2, ....}\n time_index_map = np.load(exp_data_path + os.sep + 'station_list' + os.sep + 'time_index.npy', allow_pickle=True)\n time_index_map = dict(time_index_map.tolist())\n time_map = {t: i for i, t in enumerate(sorted(set([k[-4:] for k in time_index_map['rev_index'].keys()])))}\n\n cur_idx = 0\n for time_idx in range(time_count):\n time_embed_idx = time_map[time_index_map['index'][time_idx][-4:]]\n for station_idx in station_set:\n neighbor_demand_raw_data[cur_idx][0:len(ALL_HEADER)] = neighbor_feature_data.loc[neighbor_feature_data['index']==station_idx, 'index':'mall_9']\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)] = time_idx\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)+1] = time_embed_idx\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)+2] = demand[station_idx][time_idx][-1]\n # todo add slow demand and quick demand here\n cur_idx = cur_idx + 1\n print(cur_idx, neighbor_demand_raw_data.shape)\n\n neighbor_demand_data = pd.DataFrame(neighbor_demand_raw_data, columns=DEMAND_HEADER)\n print('neighbor demand')\n print(neighbor_demand_data)\n\n neighbor_demand_path = exp_data_path + os.sep + 'static' + os.sep + 'neighbor_demand_{}.csv'.format(city)\n if os.path.exists(neighbor_demand_path):\n os.remove(neighbor_demand_path)\n neighbor_demand_data.to_csv(neighbor_demand_path)", "def station_analysis(data):\n unique_stations = list(set(data['start_station_name'].tolist() + data['end_station_name'].tolist()))\n\n station_counter = {station : 0 for station in unique_stations}\n for index, row in data.iterrows():\n station_counter[row['start_station_name']] += 1\n\n print('List of all stations:')\n print(unique_stations)\n\n keys = list(station_counter.keys())\n vals = list(station_counter.values())\n indexArr = np.argsort(list(station_counter.values()))\n popularStations = []\n for i in reversed(indexArr):\n popularStations.append((keys[i], vals[i]))\n\n stations1, journeys = zip(*popularStations[0:10])\n plt.bar(stations1, journeys, 0.1)\n\n plt.xticks(stations1, rotation='vertical')\n plt.title('Popular stations')\n plt.xlabel('Station names')\n plt.ylabel('Journeys')\n\n plt.show()\n return station_counter", "def gatherStationData():\n flist = list_files()\n station_dics = {}\n print(\"Reading in csv data...\")\n for f_in in flist:\n start,end = find_timespan(f_in)\n station = station_name(f=f_in)\n print(\"File: {0} Station: {1} {2}--{3}\".format(f_in, \n station, start, end))\n station_dics[station] = read_precip(fname=f_in, \n label=station, start_year=start, end_year=end)\n data_list = []\n for s in station_dics:\n data_list.append(station_dics[s]) \n return pd.concat(data_list,axis=1)", "async def _get_stations_remote() -> List[WeatherStation]:\n LOGGER.info('Using WFWX to retrieve station list')\n async with ClientSession() as session:\n # Get the authentication header\n header = await _get_auth_header(session)\n stations = []\n # Iterate through \"raw\" station data.\n async for raw_station in _fetch_raw_stations(session, header, BuildQueryAllStations()):\n # If the station is valid, add it to our list of stations.\n if _is_station_valid(raw_station):\n LOGGER.info('Processing raw_station %d',\n int(raw_station['stationCode']))\n stations.append(_parse_station(raw_station))\n LOGGER.debug('total stations: %d', len(stations))\n return stations", "def stations():\n # Query \n results = session.query(Station.station).all()\n \n list = []\n for result in results:\n list.append(result)\n return jsonify(list)", "def stations_dict(self):\n return self.__stations_dict", "def stations():\n # Create a link to the session\n session = Session(engine)\n \n # Query all station records\n results = session.query(Stations.station, Stations.name).all()\n \n session.close()\n\n # Create a dictionary from the query results\n all_stations = []\n for station, name in results:\n station_dict = {}\n station_dict[\"station\"] = station\n station_dict[\"name\"] = name\n all_stations.append(station_dict)\n \n return jsonify(all_stations)", "def stations ():\n # Query all passengers\n Stns= session.query(Measurement.station, func.count(Measurement.station)).group_by(Measurement.station).all()\n\n allStationns = list(np.ravel(Stns))\n\n return jsonify(allStations)", "def get_station_entrances(self):\n station_entrances = []\n for wrapper in self.soup.find_all(\"div\", {\"class\": \"stop-wrapper\"}):\n text = wrapper.find(\"span\").text\n if text == '' or text is None:\n entrance = ''\n else:\n entrance = text.split(',')[0].lstrip().rstrip()\n station_entrances.append(entrance)\n return np.array(station_entrances).T", "def get_station_lines(self):\n station_lines = []\n for wrapper in self.soup.find_all(\"div\", {\"class\": \"stop-wrapper\"}):\n lines = wrapper.find(\"h3\").text.split(' ')[-1][1:-1]\n station_lines.append(lines)\n return np.array(station_lines).T", "def stations():\n list_of_stations = session.query(Station.station, Station.name)\n all_stations = []\n for s, n in list_of_stations:\n station_dict = {}\n station_dict[\"station\"] = s\n station_dict[\"name\"] = n\n all_stations.append(station_dict)\n return jsonify(all_stations)", "def stations():\n results = session.query(Measurement.station).\\\n group_by(Measurement.station).all()\n\n return jsonify(results)", "def __save_all():\n \n # Use directory listing from stilt-web data. Ignore stations that\n # may be in the queue but are not finished yet.\n allStations = [s for s in os.listdir(CPC.STILTPATH) if os.path.exists(CPC.STILTPATH + s)]\n\n \n # read lis of ICOS stations\n icosStations = cpstation.getIdList()\n icosStations = list(icosStations['id'][icosStations.theme=='AS'])\n \n # dictionary to return\n stations = {}\n\n # fill dictionary with ICOS station id, latitude, longitude and altitude\n for ist in tqdm(sorted(allStations)):\n \n stations[ist] = {}\n # get filename of link (original stiltweb directory structure) and extract location information\n \n loc_ident = os.readlink(CPC.STILTPATH+ist)\n clon = loc_ident[-13:-6]\n lon = float(clon[:-1])\n if clon[-1:] == 'W':\n lon = -lon\n clat = loc_ident[-20:-14]\n lat = float(clat[:-1])\n if clat[-1:] == 'S':\n lat = -lat\n alt = int(loc_ident[-5:])\n\n stations[ist]['lat']=lat\n stations[ist]['lon']=lon\n stations[ist]['alt']=alt\n stations[ist]['locIdent']=os.path.split(loc_ident)[-1]\n \n # set the name and id\n stations[ist]['id'] = ist\n \n # set a flag if it is an ICOS station\n stn = ist[0:3].upper()\n if stn in icosStations:\n stations[ist]['icos'] = cpstation.get(stn).info()\n lat = stations[ist]['icos']['lat']\n lon = stations[ist]['icos']['lon']\n else:\n stations[ist]['icos'] = False \n lat = stations[ist]['lat']\n lon = stations[ist]['lon']\n \n stations[ist]['geoinfo'] = country.get(latlon=[lat,lon])\n \n return stations", "def mock_get_all_stations(__):\n return all_station_codes", "def stations():\n\t\n\n\tstationquery = session.query(Station.station).all()\n\n\tstationlist = list(np.ravel(stationquery))\n\t\n\treturn jsonify(stationlist)", "def Find_nearest_dwd_stations(inpt_data,\r\n date_start='20051201',\r\n date_end='20201231',\r\n dwd_time_format='%Y%m%d%H',\r\n data_category='air_temperature',\r\n temp_resolution='hourly',\r\n no_of_nearest_stations=4,\r\n memory_save=True,\r\n Output='True'):\r\n if isinstance(data_category,list):\r\n if len(list(data_category)) > 1:\r\n print(\r\n 'Currently only one dwd category allowed, please run function multiple times for each category'\r\n )\r\n return None\r\n \r\n #convert time to datetime\r\n dt_start=datetime.strptime(date_start,'%Y%m%d')\r\n dt_end=datetime.strptime(date_end,'%Y%m%d')\r\n print('Start quering data from DWD')\r\n #define the database folder\r\n pypath = os.path.dirname(os.path.abspath(__file__))\r\n table_dir = pypath + '\\\\' + 'tables'\r\n dbase_dir = pypath + '\\\\' + 'dbase' \r\n #%% we check all available stations and create a valid list\r\n filename_stations=update_stationlist(time_res='hourly',dbase_dir=table_dir)\r\n stations_all=pd.read_csv(filename_stations, dtype={'STATIONS_ID': object})\r\n # delete all stations which do not cover the category\r\n dwd_stations=stations_all[stations_all[data_category]==True].copy()\r\n #correct to datetime\r\n dwd_stations['date_end']=pd.to_datetime(stations_all.date_end,format='%Y%m%d')\r\n dwd_stations['date_start']=pd.to_datetime(stations_all.date_start,format='%Y%m%d')\r\n # clean to stations which cover the campaign time #dt_low <= dt <= dt_high:\r\n dwd_stations=dwd_stations[(dwd_stations.date_start<=dt_start) & (dwd_stations.date_end>=dt_end)]\r\n #make a geodataframe out of it\r\n dwd_stations=gpd.GeoDataFrame(dwd_stations,geometry=gpd.points_from_xy(dwd_stations.geo_lon, dwd_stations.geo_lat))\r\n \r\n #loop through all rows to get the n closest points\r\n distances=pd.DataFrame()\r\n for _, station in dwd_stations.iterrows():\r\n distances[station.STATIONS_ID]=inpt_data.distance(station.geometry)\r\n \r\n #%% get the n stations with smallest distance and update database\r\n id_nearest_stations=distances.apply(lambda s: s.nsmallest(no_of_nearest_stations).index.tolist(), axis=1).values.tolist() #station ids\r\n #get them as unique values by sum a list of lists https://bit.ly/353iZQB\r\n id_dwd_stations=list(set(sum(id_nearest_stations,[])))\r\n \r\n #update the database\r\n db_dwd_stations=import_stations(time_res=temp_resolution,time_format=dwd_time_format,campaign_time=[dt_start,dt_end],data_category=data_category,station_ids=id_dwd_stations,dbase_dir=dbase_dir,Output=Output,table_dir=table_dir,memory_save=memory_save)\r\n \r\n #distance of nearest stattions\r\n dist_nearest_stations=pd.DataFrame(np.sort(distances.values)[:,:no_of_nearest_stations]).values.tolist() #distances themself\r\n #create new columns in the input data\r\n station_col_nm=list()\r\n for i in range(0,no_of_nearest_stations):\r\n station_col_nm.append(data_category+'_station_'+str(i))\r\n for i in range(0,no_of_nearest_stations):\r\n station_col_nm.append(data_category+'_distance_'+str(i))\r\n #create new dataframe\r\n distance_data=pd.concat([pd.DataFrame(id_nearest_stations).astype(int),pd.DataFrame(dist_nearest_stations)],axis=1)\r\n distance_data.columns=station_col_nm\r\n #add to main dataset\r\n inpt_data=pd.concat([inpt_data, distance_data],axis=1) \r\n \r\n return inpt_data,db_dwd_stations", "def getEmpty(self):\n emptyList = []\n for spot in self.parkingSpots:\n if spot.status == 'empty':\n emptyList.append(spot)\n return emptyList", "def all_stations(self, provider: ID) -> List[StationInfo]:\n srv_key = self.__stations_key(provider=provider)\n value = self.get(name=srv_key)\n if value is None:\n return []\n js = utf8_decode(data=value)\n array = json_decode(string=js)\n return StationInfo.convert(array=array)", "def getSyntheticShiftLists(nmrProject):\n \n #NBNB Filtering on isSimulated might be enough, but is it reliable?\n\n if not nmrProject:\n return []\n \n result = [x for x in nmrProject.sortedMeasurementLists()\n if x.className=='ShiftList'\n and x.findFirstExperiment() is None\n #and x.isSimulated # disabled pending check on how to set it\n ]\n \n return result", "def statuslist(self):\n self.getfullstatus()\n response = self.geturl('js')\n if not response:\n return None\n data = response.json()\n states = data[\"sn\"]\n stations = list(zip(range(0, data[\"nstations\"]),\n self.lastfullresponse[\"stations\"][\"snames\"],\n ['ON' if x==1 else 'OFF' for x in states]))\n return stations", "def reload(self):\n self.known_stations = {}\n self.read_noaa_stations()\n self.read_table_stations()\n self.last_reload_check_time = datetime.datetime.utcnow()\n LOGGER.info('Have %s known stations', len(self.known_stations.keys()))", "def get_wrf_stations(pool):\n\n wrfv3_stations = {}\n\n connection = pool.connection()\n try:\n with connection.cursor() as cursor:\n sql_statement = \"SELECT `id`, `name` FROM `station` WHERE `id` like %s\"\n row_count = cursor.execute(sql_statement, \"11_____\")\n if row_count > 0:\n results = cursor.fetchall()\n for dict in results:\n wrfv3_stations[dict.get(\"name\")] = dict.get(\"id\")\n return wrfv3_stations\n else:\n return None\n except Exception as exception:\n error_message = \"Retrieving wrf stations failed\"\n logger.error(error_message)\n traceback.print_exc()\n raise exception\n finally:\n if connection is not None:\n connection.close()", "def get_station(div=0, wd=0, abbrev=None, as_dataframe=False,\n input_file=None, output_file=None, suds_cache=None):\n # ensure div is a list of ints\n if type(div) is not list: div = [div]\n # ensure wd is a list\n if type(wd) is not list: wd = [wd]\n # ensure name is a homogeneous list of str if it exists\n if abbrev is not None and type(abbrev) is not list:\n abbrev = [abbrev]\n assert all(type(a) is str for a in abbrev)\n\n stations = []\n if input_file is None:\n # use the Co DWR SOAP service\n suds_client = _get_client(CODWR_WSDL_URL)\n\n # get the water division/districts\n dists = get_water_district(div, wd, as_dataframe=False, suds_cache=suds_cache)\n if dists is None:\n # no matching division/district(s)\n return None\n\n if abbrev is None:\n for d in dists:\n # get the stations for the division/district\n sites = suds_client.service.GetSMSTransmittingStations(d['div'], d['wd'])\n if sites is None:\n return None\n\n # get the parameters for the stations\n sparms = suds_client.service.GetSMSTransmittingStationVariables(d['div'], d['wd'])\n if sparms is None:\n # hmmm - we have stations but no parameters...\n raise ValueError(\"Service returned no parameters for transmitting station(s).\")\n\n # the SOAP service returns each parameter for a station as a\n # separate row <abbrev, parameter> which we will compact into\n # a dict {abbrev,[parameter,parameter,...]}\n params = {}\n for sp in sparms.StationVariables:\n spd = dict(sp)\n if spd['abbrev'] not in params:\n params[spd['abbrev']] = []\n params[spd['abbrev']].append(spd['variable'])\n\n # build up the complete station description (including the water\n # district name and parameters) and add it to the station list\n # to the station list\n for site in sites.Station:\n sited = dict(site)\n sited['waterDistrictName'] = d['waterDistrictName']\n sited['parameters'] = params[sited['abbrev']]\n stations.append(sited)\n\n else:\n for a in abbrev:\n site = suds_client.service.GetSMSTransmittingStations(0, 0, a)\n if site is not None:\n sited = dict(site)\n\n for d in dists:\n if d['div'] == sited['div'] and d['wd'] == sited['wd']:\n sited['waterDistrictName'] = d['waterDistrictName']\n break\n\n # retrieve the station parameters and attach them to the\n # station information\n sparms = suds_client.service.GetSMSTransmittingStationVariables(sited['div'],\n sited['wd'],\n sited['abbrev'])\n if sparms is None:\n # hmmm - we have stations but no parameters...\n raise ValueError(\"Service returned no parameters for station \"\n + sited['abbrev'])\n for sp in sparms.StationVariables:\n spd = dict(sp)\n if sited['parameters'] is None:\n sited['parameters'] = []\n assert(spd['abbrev'] == sited['abbrev'])\n sited['parameters'].append(spd['variable'])\n\n else:\n # retrieve the list of sites in the specified file\n print(\"Nothing yet\")\n\n if as_dataframe is True:\n stations = pd.DataFrame(stations)\n\n return stations if len(stations) > 0 else None", "def stations():\n session = Session(engine)\n # Query all Stations\n stations = session.query(Station.station).all()\n\n # Convert list of tuples into normal list\n all_stations = list(np.ravel(stations))\n\n return jsonify(all_stations)", "def add_stations(stations, pool):\n\n for station in stations:\n\n print(add_station(pool=pool, name=station.get('name'), latitude=station.get('latitude'),\n longitude=station.get('longitude'), station_type=station.get('station_type'),\n description=station.get('description')))\n print(station.get('name'))", "def get_all_stations():\n latest_scraping_time = db.session \\\n .query(func.max(DublinBike.scraping_time)) \\\n .one()[0]\n\n stations = db.session.query(DublinBike) \\\n .filter(DublinBike.scraping_time == latest_scraping_time) \\\n .order_by(DublinBike.number.asc()) \\\n .all()\n\n return jsonify({\n 'data': [station.serialize for station in stations]\n })", "def get_station_model_predictions(\n session: Session,\n station_codes: List,\n model: str,\n start_date: datetime.datetime,\n end_date: datetime.datetime) -> List[\n Union[WeatherStationModelPrediction, PredictionModelRunTimestamp, PredictionModel]]:\n query = session.query(WeatherStationModelPrediction, PredictionModelRunTimestamp, PredictionModel).\\\n filter(WeatherStationModelPrediction.station_code.in_(station_codes)).\\\n filter(WeatherStationModelPrediction.prediction_timestamp >= start_date).\\\n filter(WeatherStationModelPrediction.prediction_timestamp <= end_date).\\\n filter(PredictionModelRunTimestamp.id ==\n WeatherStationModelPrediction.prediction_model_run_timestamp_id).\\\n filter(PredictionModelRunTimestamp.prediction_model_id == PredictionModel.id,\n PredictionModel.abbreviation == model).\\\n order_by(WeatherStationModelPrediction.station_code).\\\n order_by(WeatherStationModelPrediction.prediction_timestamp).\\\n order_by(PredictionModelRunTimestamp.prediction_run_timestamp.asc())\n return query", "def stations():\n\n # Query all Stations\n station_results = session.query(Station.station).all()\n\n # Convert list of tuples into normal list\n all_station_names = list(np.ravel(station_results))\n\n return jsonify(all_station_names)", "def stations(): \n # creating the Docstring\n session = Session(engine)\n\n # creat the Query stations\n\n stations_qu = session.query(measurement.station).group_by(measurement.station).all()\n\n # Converting the list of tuples into a normal list\n stations_qu_dict = list(np.ravel(stations_qu))\n session.close()\n\n return jsonify(stations_qu_dict)", "def stations():\n\n station_results = session.query(Stations.station, Stations.name).all()\n\n station_data = []\n for row in station_results:\n station_dict = {}\n station_dict[\"station\"] = row.station\n station_dict[\"name\"] = row.name\n station_data.append(station_dict)\n\n return jsonify(station_data)", "def processStationInfo(obs_loc_df, source, st_list=None):\n if not st_list:\n st_list = dict()\n st_data = obs_loc_df['station_id']\n lat_data = obs_loc_df['latitude (degree)']\n lon_data = obs_loc_df['longitude (degree)']\n\n for k, station_name in enumerate(st_data):\n if station_name in st_list:\n pass\n else:\n st_list[station_name] = dict()\n st_list[station_name][\"lat\"] = lat_data[k]\n st_list[station_name][\"source\"] = source\n st_list[station_name][\"lon\"] = lon_data[k]\n print(station_name)\n\n print(\"Number of stations in bbox {}\".format(len(st_list.keys())))\n return st_list", "def get_flo2d_output_stations(pool, flo2d_model):\n\n flo2d_output_stations = {}\n\n id_pattern = '{}_____'.format((str(flo2d_model.value)).split('0')[0])\n\n connection = pool.connection()\n try:\n with connection.cursor() as cursor:\n sql_statement = \"SELECT * FROM `station` WHERE `id` like %s\"\n row_count = cursor.execute(sql_statement, id_pattern)\n if row_count > 0:\n results = cursor.fetchall()\n for dict in results:\n station_name = dict.get(\"name\")\n flo2d_output_stations[station_name.split(\"_\")[0]] = [dict.get(\"id\"), dict.get(\"latitude\"),\n dict.get(\"longitude\"),\n '_'.join(station_name.split('_')[1:])]\n return flo2d_output_stations\n else:\n return None\n except Exception as exception:\n error_message = \"Retrieving flo2d output stations failed\"\n logger.error(error_message)\n traceback.print_exc()\n raise exception\n finally:\n if connection is not None:\n connection.close()", "def run():\n\n # Build list of tuples of station names and distance \n stations = build_station_list()\n p = (52.2053, 0.1218)\n by_distance = stations_by_distance(stations, p)\n for n in range(10):\n print(by_distance[n])\n for n in range(10):\n i = len(by_distance) - 10 + n\n print(by_distance[i])", "def _automatic_training_set(self, n_cutouts=100):\n dstl = Load_DSTL()\n np.random.seed(42)\n for ii in range(n_cutouts):\n # Get region around a shape\n triples, mask, ind_shape, img_dim = dstl.extract_region(object_class=self.class_type, image_id='6120_2_2', buffer_size=10)\n if self.radius is not None:\n triples, mask = self._sliding_window(triples.reshape(*img_dim, 3), mask.reshape(img_dim), window_radius=self.radius)\n # Add to Feature Matrix\n if ii == 0:\n features = triples\n labels = mask\n else:\n features = np.vstack([features, triples])\n labels = np.hstack([labels, mask])\n return features, labels", "def create_training_set_blending():\n stime = time.time()\n indexes = []\n y_train = np.zeros((nb_places * nb_pis,))\n x_train = np.zeros((nb_places * nb_pis, nb_clfs))\n for i, place_id in enumerate(place_ids):\n # 1. Get the relevance ratings for the place (y).\n ratings = get_relevance_ratings(place_id, connection=c_study, cursor=cur_study)\n if len(ratings) == 0: # filter the ratings.\n continue\n r = [np.mean(ratings[pi_id]['ratings']) if pi_id in ratings else 0 for pi_id in pis_ids]\n for k, pi_id in enumerate(pis_ids):\n cl = 0\n if r[k] >= 4:\n cl = 1\n y_train[i * nb_pis + k] = cl # int(np.ceil(r[k]))\n indexes.append((place_id, pi_id))\n\n # 2. Get the predictions from the models for the place (x).\n for j, clf in enumerate(clfs):\n predictions = clf._get_prediction(place_id)\n p = [predictions[pi_id]['score'] if pi_id in predictions else 0 for pi_id in pis_ids]\n for k in range(nb_pis):\n x_train[i * nb_pis + k, j] = p[k]\n print(\"[.] Done with x_train: %s, y_train: %s, indexes: %s (%.2f)\" % (x_train.shape, y_train.shape, len(indexes), time.time()-stime))\n return x_train, y_train, indexes", "def get_station_names(self):\n station_names = []\n for wrapper in self.soup.find_all(\"div\", {\"class\": \"stop-wrapper\"}):\n station_name = ' '.join(wrapper.find(\"h3\").text.split(' ')[:-1])\n station_names.append(station_name)\n return np.array(station_names).T", "def get_top_station_set(city):\n s = {}\n for file in os.listdir(exp_data_path + os.sep + 'station' + os.sep + city):\n with open(exp_data_path + os.sep + 'station' + os.sep + city + os.sep + file) as f:\n reader = csv.reader(f)\n for row in reader:\n if row[0] not in s:\n s[row[0]] = 1\n else:\n s[row[0]] = s[row[0]] + 1\n\n sort_s = dict(sorted(s.items(), key=lambda x : x[1], reverse=True))\n first = True\n res = []\n for k, v in sort_s.items():\n if first:\n top = v\n first = False\n if top - v <= 30:\n res.append(k)\n print('before', len(sort_s))\n print('after', len(res))\n\n # restore new map [old_index, new_index]\n list_remap = {}\n new_index = 0\n for index in range(0, data_length[city]):\n if str(index) in res:\n list_remap[index] = new_index\n new_index = new_index + 1\n\n # print(list_remap)\n check_path(exp_data_path + os.sep + 'station_list')\n file_name = exp_data_path + os.sep + 'station_list' + os.sep + 'list_remap_{}'.format(city) + '.npy'\n if os.path.exists(file_name):\n os.remove(file_name)\n np.save(file_name, list_remap)", "def test_nearest_filter(self):\n for airport, reports, count in (\n (True, True, 6),\n (True, False, 16),\n (False, True, 6),\n (False, False, 30),\n ):\n stations = station.nearest(30, -80, 30, airport, reports, 1.5)\n self.assertEqual(len(stations), count)", "def stations():\n \n # Query all the stations\n results = session.query(Station).all()\n\n # Create a dictionary to append the station data\n stations_info = []\n for stations in results:\n stations_dict = {}\n stations_dict[\"Station\"] = stations.station\n stations_dict[\"Station Name\"] = stations.name\n stations_dict[\"Latitude\"] = stations.latitude\n stations_dict[\"Longitude\"] = stations.longitude\n stations_dict[\"Elevation\"] = stations.elevation\n all_stations.append(stations_dict)\n \n return jsonify(stations_info)", "def stations():\n \n station_result = session.query(Station.station).all()\n stations = []\n # Convert list of tuples into normal list\n stations = list(np.ravel(station_result))\n return jsonify(stations)", "def stations():\n results = session.query(Station.station).all()\n stations = list(np.revel(results))\n return jsonify(stations)", "def stations():\n # Return a JSON list of stations from the dataset\n session = Session(engine)\n stations = session.query(Station.name).all()\n\n # Convert list of tuples into normal list\n station_names = list(np.ravel(stations))\n\n return jsonify(station_names)", "def inlezen_beginstation(stations):\n beginstation = input(\"Wat is je beginstation? : \")\n\n while beginstation not in stations:\n print(\"Geen correcte invoer.. Probeer opnieuw\")\n\n return beginstation", "def rm_duplicate(Sta_all, address):\n \n sta_all = []\n saved = []\n \n for i in Sta_all:\n if i[2] == '--' or i[2] == ' ':\n i[2] = ''\n for j in range(0, len(i)):\n if i[j] != str(i[j]):\n i[j] = str(i[j]) \n if len(i) == 7:\n sta_all.append(str(i[0] + '_' + i[1] + '_' + i[2] + '_' + \\\n i[3] + '_' + i[4] + '_' + i[5] + '_' + i[6]))\n elif len(i) == 8:\n sta_all.append(str(i[0] + '_' + i[1] + '_' + i[2] + '_' + \\\n i[3] + '_' + i[4] + '_' + i[5] + '_' + i[6]\\\n + '_' + i[7]))\n \n sta_ev = read_station_event(address)\n ls_saved_stas = sta_ev[0]\n \n for i in range(0, len(ls_saved_stas)):\n sta_info = ls_saved_stas[i]\n saved.append(sta_info[0] + '_' + sta_info[1] + '_' + \\\n sta_info[2] + '_' + sta_info[3])\n \n Stas_req = sta_all\n \n len_all_sta = len(sta_all)\n num = []\n for i in range(0, len(saved)):\n for j in range(0, len(Stas_req)):\n if saved[i] in Stas_req[j]:\n num.append(j)\n\n num.sort(reverse=True)\n for i in num:\n del Stas_req[i] \n \n for m in range(0, len(Stas_req)):\n Stas_req[m] = Stas_req[m].split('_')\n \n Stas_req.sort()\n \n print '------------------------------------------'\n print 'Info:'\n print 'Number of all saved stations: ' + str(len(saved))\n print 'Number of all available stations: ' + str(len_all_sta)\n print 'Number of stations to update for: ' + str(len(Stas_req))\n print '------------------------------------------'\n \n return Stas_req", "def stations():\r\n # Query all passengers\r\n results = session.query(Station.station, \r\n Station.name, \r\n Station.latitude,\r\n Station.longitude,\r\n Station.elevation).all()\r\n\r\n return jsonify(results)", "def __len__(self):\n return len(self.stations)", "def stations():\n # Create link from Python to db\n session = Session(engine)\n\n # Query stations.\n stations = session.query(Station.station, Station.name, Station.latitude, Station.longitude, Station.elevation).all()\n\n session.close()\n\n # Convert to a dictionary.\n all_stations = []\n for station, name, latitude, longitude, elevation in stations:\n station_dict = {}\n station_dict[\"station\"] = station\n station_dict[\"name\"] = name\n station_dict[\"latitude\"] = latitude\n station_dict[\"longitude\"] = longitude\n station_dict[\"elevation\"] = elevation\n all_stations.append(station_dict)\n\n # Return JSON\n return jsonify(all_stations)", "def add_station(self, station):\n self.__stations.append(station)", "def _load_stations(self, nodes: List[OSMNode]) -> None:\n # Process OSM nodes into intermediate stations\n grouped_stations: defaultdict[str, list[IntermediateStation]] = defaultdict(list)\n\n # Iterate thru nodes while popping them from the provided list\n # to allow used nodes to bne garbage collected.\n while nodes:\n node = nodes.pop()\n name_id = node.tags[\"name\"]\n grouped_stations[name_id].append(IntermediateStation(\n node.id,\n name_id,\n node.lat,\n node.lon,\n [k for (k, v) in node.tags.items() if \".\" in k and v == \"yes\"],\n node.tags.get(\"merged\") == \"all\",\n ))\n\n # Convert the intermediate representations to GeoStation\n # (again popping from grouped_stations to allow intermediate representation to be gc-ed)\n while grouped_stations:\n name_id, stations = grouped_stations.popitem()\n merged_all_node = get_merged_all_node(stations)\n\n if len(stations) == 1 and len(stations[0].routes) == 1:\n # Case 1 - one station and one line.\n sta = stations[0]\n sta_id = sta.routes[0] + \".\" + name_id\n self.by_id[sta_id] = GeoStation(sta_id, sta.lat, sta.lon)\n\n elif len(stations) == 1:\n # Case 2 - one station and multiple lines.\n # Simple parent-children structure, all in one location.\n sta = stations[0]\n parent = GeoStation(\"Merged.\" + name_id, sta.lat, sta.lon)\n self.by_id[parent.id] = parent\n\n for route in sta.routes:\n child = GeoStation(route + \".\" + name_id, sta.lat, sta.lon, parent=parent)\n self.by_id[child.id] = child\n parent.children.append(child)\n\n elif merged_all_node:\n # Case 3: many nodes, but all under one parent\n parent = GeoStation(\"Merged.\" + name_id, merged_all_node.lat, merged_all_node.lon)\n self.by_id[parent.id] = parent\n\n for ista in stations:\n for route in ista.routes:\n child = GeoStation(route + \".\" + name_id, ista.lat, ista.lon,\n parent=parent)\n self.by_id[child.id] = child\n parent.children.append(child)\n\n else:\n # Case 4: many nodes, no parent-of-all\n needs_merged_no = count_multiple_routes(stations) > 1\n merged_no = 1\n\n for sta in stations:\n if len(sta.routes) == 1:\n # Case 4.1 - single line - behavior as in case 1\n sta_id = sta.routes[0] + \".\" + name_id\n self.by_id[sta_id] = GeoStation(sta_id, sta.lat, sta.lon)\n\n else:\n # Case 4.2 - multiple lines - behavior as in case 2\n parent_prefix = \"Merged.\"\n if needs_merged_no:\n parent_prefix = f\"Merged.{merged_no}.\"\n merged_no += 1\n\n parent = GeoStation(parent_prefix + name_id, sta.lat, sta.lon)\n self.by_id[parent.id] = parent\n\n for route in sta.routes:\n child = GeoStation(route + \".\" + name_id, sta.lat, sta.lon,\n parent=parent)\n self.by_id[child.id] = child\n parent.children.append(child)", "def station_from_lat_lon(lat, lon, stations, n_nearest=3):\n lat, lon = float(lat), float(lon)\n distances = [(distance(lat, lon, st['lat'], st['lon']), st)\n for st in stations\n if (st['is_renting'] and st['is_installed'])]\n distances = sorted(distances)\n return [pair[1] for pair in distances[:n_nearest]]", "def stations():\n \n session = Session(engine)\n # Query to bring all stations\n results = pd.DataFrame(session.query(S.id.label('ID'),S.station.label('Station'),S.name.label('Name'),\\\n S.latitude.label('Latitude'),S.longitude.label('Longitude'), \\\n S.elevation.label('Elevation')).all())\n \n session.close()\n \n # Create a dictionary from the row data of the dataframe and return it as a JSON\n return jsonify(results.to_dict(orient = 'records'))", "def _parse_departures(self, data, stop, servernow):\n servernow.replace(second=0, microsecond=0)\n results = []\n departures = data.findall('./itdDeparture')\n for departure in departures:\n # Get Line Information\n origin, destination, line, ridenum, ridedir, canceled = self._parse_mot(departure.find('./itdServingLine'))\n\n if departure.find('./genAttrList/genAttrElem[value=\"HIGHSPEEDTRAIN\"]') is not None:\n line.linetype = LineType('train.longdistance.highspeed')\n elif departure.find('./genAttrList/genAttrElem[value=\"LONG_DISTANCE_TRAINS\"]') is not None:\n line.linetype = LineType('train.longdistance')\n\n # if ridenum is None:\n # ridedata = departure.find('./itdServingTrip')\n # if ridedata is not None:\n # ridenum = ridedata.attrib.get('tripCode', None)\n # if ridenum is not None:\n # ridenum = ridenum.strip()\n\n # Build Ride Objekt with known stops\n ride = Ride(line, ridenum)\n ride.direction = ridedir\n ride.canceled = canceled\n\n train_line = line.linetype in self.train_station_lines\n\n # todo: take delay and add it to next stops\n mypoint = self._parse_trip_point(departure, train_line=train_line)\n\n before_delay = None\n if mypoint.arrival:\n before_delay = mypoint.arrival.delay\n after_delay = None\n if mypoint.departure:\n after_delay = mypoint.departure.delay\n\n delay = None\n if departure.find('./itdServingLine/itdNoTrain'):\n delay = departure.find('./itdServingLine/itdNoTrain').attrib.get('delay', None)\n if delay is not None:\n delay = timedelta(minutes=delay)\n\n if delay is not None:\n if (mypoint.arrival and servernow < mypoint.arrival.livetime) or (mypoint.departure and servernow < mypoint.departure.livetime):\n before_delay = delay\n else:\n after_delay = delay\n\n prevs = False\n for pointdata in departure.findall('./itdPrevStopSeq/itdPoint'):\n point = self._parse_trip_point(pointdata, train_line=train_line)\n if point is not None:\n if before_delay is not None:\n if point.arrival is not None and point.arrival.delay is None and point.arrival.time + before_delay >= servernow:\n point.arrival.delay = before_delay\n if point.departure is not None and point.departure.delay is None and point.departure.time + before_delay >= servernow:\n point.departure.delay = before_delay\n prevs = True\n ride.append(point)\n\n pointer = ride.append(mypoint)\n\n onwards = False\n for pointdata in departure.findall('./itdOnwardStopSeq/itdPoint'):\n point = self._parse_trip_point(pointdata, train_line=train_line)\n if point is not None:\n if after_delay is not None:\n if point.arrival is not None and point.arrival.delay is None and point.arrival.time + after_delay >= servernow:\n point.arrival.delay = after_delay\n if point.departure is not None and point.departure.delay is None and point.departure.time + after_delay >= servernow:\n point.departure.delay = after_delay\n onwards = True\n ride.append(point)\n\n if not prevs and not onwards:\n ride.prepend(None)\n if origin is not None:\n ride.prepend(TimeAndPlace(Platform(origin)))\n\n ride.append(None)\n if destination is not None:\n ride.append(TimeAndPlace(Platform(destination)))\n\n # Return RideSegment from the Station we depart from on\n results.append(ride[pointer:])\n return Ride.Results(results)", "def __getpredictors_distance(self, staname, distance):\n\n distfromsta = distance[staname]\n del distfromsta[staname] # remove the station to be fill from the dataframe\n distfromsta = distfromsta.sort_values()\n\n stations = self.network.getsta(distfromsta.index.values)\n # station = self.network.getsta(staname)\n\n # Only 3 closest stations\n # sel1 = [ (i,e) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1\n # sel2 = [ (i,e) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 2\n\n # Use all stations\n sel1 = [(i, e) for i, e in zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1\n sel2 = [(i, e) for i, e in zip(stations[0:-2], stations[2:])] # selction predictors with spacing 2\n\n # sel3 = [ (i,e) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 3\n # sel4 = [ (i,e) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 4\n\n # Only 3 closest stations\n # sel1names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1\n # sel2names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 1\n\n # using all stations\n sel1names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in\n zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1\n sel2names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in\n zip(stations[0:-2], stations[2:])] # selction predictors with spacing 1\n\n # sel3names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 1\n # sel4names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 1\n\n selection = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1, sel2)) if x]\n selectionnames = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1names, sel2names)) if x]\n\n return selection, selectionnames", "def sort_bike_stations(bike_stations, location):\n\n stations = bike_stations.copy()\n\n for index, station in enumerate(stations):\n station_location = (station[\"lat\"], station[\"lon\"])\n dist = distance.distance(station_location, location).m\n stations[index][\"distance\"] = dist\n\n stations = sorted(stations, key=lambda station: station[\"distance\"])\n stations = list(filter(lambda station: station[\"bikesAvailable\"] > 0, stations))\n\n return stations", "def stations():\n # Query all stations\n\n stations = session.query(Station.station).all()\n all_stations = list(np.ravel(stations))\n\n return jsonify(all_stations)", "def stations():\n \n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n results = session.query(Station.station, Station.name, Station.latitude, Station.longitude, Station.elevation).all()\n\n session.close()\n\n stations = []\n for result in results:\n station_dict = {}\n station_dict[\"station\"] = result.station\n station_dict[\"name\"] = result.name\n station_dict[\"latitude\"] = result.latitude\n station_dict[\"longitude\"] = result.longitude\n station_dict[\"elevation\"] = result.elevation\n stations.append(station_dict)\n \n return jsonify(stations)" ]
[ "0.6918483", "0.63394576", "0.6295055", "0.6175655", "0.6134076", "0.6036043", "0.59990704", "0.5990439", "0.5939404", "0.59325945", "0.5899833", "0.5873434", "0.58216053", "0.5731162", "0.5703947", "0.5658584", "0.5635071", "0.563275", "0.56268054", "0.5625375", "0.56150836", "0.5578745", "0.5568343", "0.5566908", "0.5550945", "0.5497827", "0.5480448", "0.54793215", "0.5447455", "0.54438853", "0.54423", "0.5408923", "0.53940755", "0.537816", "0.53723323", "0.53699684", "0.5344327", "0.53432655", "0.53240126", "0.53021026", "0.52799815", "0.5248857", "0.5243314", "0.5238324", "0.5230706", "0.5225993", "0.52098584", "0.52077234", "0.5183783", "0.5183574", "0.51826656", "0.51740634", "0.515469", "0.5153958", "0.5152967", "0.5143183", "0.514239", "0.51363176", "0.5127989", "0.51132345", "0.51124007", "0.5106552", "0.51044154", "0.5102174", "0.5086586", "0.508632", "0.5083396", "0.50797653", "0.5066607", "0.50582135", "0.5058194", "0.50395805", "0.50383264", "0.5037706", "0.5029194", "0.50290066", "0.5027434", "0.5024481", "0.5021352", "0.50211394", "0.50206435", "0.5019944", "0.50107074", "0.50099236", "0.50091255", "0.49914712", "0.49885646", "0.4980024", "0.49778178", "0.49762672", "0.49662644", "0.49654043", "0.49487618", "0.4948441", "0.49457708", "0.4945759", "0.49422902", "0.49404457", "0.4934721", "0.49289748" ]
0.66159266
1
builds and saves dataframe to be used for graphs
def dataframe(): #allows function to access station, gmt, and miss_station functions global stations global gmt global miss_station #read predictor file control = cfg.read_yaml('../registry/graphs.yaml') pred_ctrl = cfg.read_yaml(cfg.get_config_path(control.pred_file)) predd_ctrl = cfg.read_yaml(cfg.get_config_path(control.predd_file)) #get file paths and update database predictor_file_path = control.predictor_file_path predictand_file_path = control.predictand_file_path pred_file_id = update(predictor_file_path) predd_file_id = update(predictand_file_path) #store lead time and date range lead_time = control.lead_time date_range = control.date_range #get info for fetch many dates start,end,stride = read_pred.parse_range(date_range) fcst_ref_time = control.date_range[0].split('-')[0][-2:] #initialize list of predictors pred_list = pred_ctrl.predictors predictor = [] #loops through predictors to build camps data objects for entry_dict in pred_list: #formats metadata pred = create.preprocess_entries(entry_dict, fcst_ref_time) #adds info to metadata that's not currently being stored pred.search_metadata['reserved2'] = lead_time*3600 pred.search_metadata['file_id'] = pred_file_id pred.search_metadata['reserved1'] = 'vector' #build camps data objects for each day variable = fetch_many_dates(predictor_file_path,start,end,stride,pred.search_metadata) #appends all data to single camps object if variable[0] is not None: var = variable[0] arrs = [] for i in range(len(variable)): arrs.append(variable[i].data) var.data = np.stack(arrs) predictor.append(var) #initializes list of predictands predd_list = predd_ctrl.predictands predictand = [] #loops through predictands to build camps data objects for entry_dict in predd_list: #formats metadata vertical_coordinate = entry_dict.pop('Vertical_Coordinate') entry_dict['file_id'] = predd_file_id #build camps objects for each day variable = fetch_many_dates(predictand_file_path,start, end, stride, entry_dict) #append all data to single camps object var = variable[0] arrs = [] for i in range(len(variable)): arrs.append(variable[i].data) try: var.data = np.stack(arrs) predictand.append(var) except: print("Can't read " + variable.name) #getting predictor station and time data predr = Dataset(predictor_file_path[0]) predr_stat = predr.variables['station'][:] if lead_time == 3: predr_time = predr.variables['OM__phenomenonTimeInstant'][:] elif lead_time == 6: predr_time = predr.variables['OM__phenomenonTimeInstant1'][:] elif lead_time == 12: predr_time = predr.variables['OM__phenomenonTimeInstant2'][:] predr.close() #reformatting predictor station and time data predr_stations = stations(predr_stat) predr_gmt = gmt(predr_time) #getting predictand station and time data predd = Dataset(predictand_file_path[0]) predd_stat = predd.variables['station'][:] predd_time = predd.variables['OM__resultTime'][:] predd.close() #reformatting predictand station and time data predd_stations = stations(predd_stat) predd_gmt = gmt(predd_time) #choosing predictand observations that line up with predictor time hour = (predictor[0].metadata['FcstTime_hour']/3600) + lead_time days = len(predd_gmt)/24 predd_hours = [0]*days k=0 for i in range(len(predd_gmt)): if i%24 == hour: predd_hours[k]=predd_gmt[i] k+=1 #catches when GFS data doesn't cover the last day of the month if len(predr_gmt) < len(predd_hours): predd_hours = predd_hours[:-1] #find missing stations miss_stations = miss_station(predr_stations,predd_stations) stations = predd_stations #station and time array info = [['',''] for k in range(len(predr_gmt)*len(stations))] for i in range(len(predr_gmt)): for j in range(len(stations)): k = i*len(stations)+j info[k][0]=predr_gmt[i] info[k][1]=stations[j] #create column names names = ['']*(len(predictor)+len(predictand)+2) names[0]='Time' names[1]='Station' #creating array arr = np.zeros((len(stations)*len(predr_gmt),len(predictor)+len(predictand))) #adding predictor data for i in range(len(predictor)): #remove lead time and forecast reference time from variable name #and add variable name to column list of final dataframe if lead_time == 12: names[i+2]='GFS_'+predictor[i].get_variable_name()[:-11] else: names[i+2]='GFS_'+predictor[i].get_variable_name()[:-10] #create pandas dataframe of data and sort alphabetically by station name predictor[i].data = np.squeeze(predictor[i].data,axis=2) predictor[i].data = pd.DataFrame(predictor[i].data,columns=predr_stations,index=predr_gmt) predictor[i].data = predictor[i].data.reindex(sorted(predictor[i].data.columns),axis=1) #remove stations with no predictand data k=0 a=miss_stations[:] for j in predictor[i].data.columns: if not a: break if j==a[k]: predictor[i].data=predictor[i].data.drop(j,axis=1) del a[k] #add data to final dataframe for b in range(len(predr_gmt)): for c in range(len(stations)): k = b*len(stations)+c arr[k][i] = predictor[i].data.iloc[b][c] #add predictand data for i in range(len(predictand)): #removing extra underscore, adding variable name to column names names[len(predictor)+2+i]='METAR_'+predictand[i].get_variable_name()[:-1] #resize array and create pandas dataframe predictand[i].data = np.squeeze(predictand[i].data,axis=2) predictand[i].data = pd.DataFrame(predictand[i].data,columns=predd_stations,index=predd_hours) predictand[i].data = predictand[i].data.reindex(sorted(predictand[i].data.columns),axis=1) #remove extra days of predictand data predictand[i].data = predictand[i].data.iloc[0:len(predr_time),:] #add predictand data to array for b in range(len(predr_gmt)): for c in range(len(stations)): k = b*len(stations)+c val = predictand[i].data.iloc[b][c] #catch metar fill data if val == 9999: val = np.nan arr[k][len(predictor)+i]=val #add station and time data to array and save as csv data = np.concatenate([info,arr],axis = 1) to_save = pd.DataFrame(data,columns=names) to_save.to_csv(str(start)+'_'+str(end)+'_'+str(lead_time)+'hrs.csv')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_dataframe(self):\n\n df = pd.DataFrame({'date': [],\n 'RUN': [],\n 'CLONE': [],\n 'GEN': pd.Series(0, index=[], dtype='int'),\n 'frame': pd.Series([], index=[], dtype='int'),\n 'time (ns)': [] }) # set the index\n df.set_index('date')\n print(df)\n\n # Save the DataFrame to disk\n\n ### create a file handle to store the data in (a dict-like) HDF5 format\n store = pd.HDFStore(self.dataframe_path)\n print(store)\n store.put('df', df)\n return store", "def __create_data_frame(self, soup):\n self.__data_frame = pd.read_html(str(soup))[0]\n timestamp = self.__navigate_rows(soup)\n # rename dataframe columns by columns name in sqlite\n self.__data_frame = self.__data_frame.rename(\n columns=self.__columns_name)\n self.__data_frame['time'] = pd.Series(timestamp)\n self.__data_frame['chg_perc'] = self.__data_frame['chg_perc'].\\\n str.replace('%', '')\n self.__data_frame['created_date'] = datetime.now()\n # save_file(self.__name_file, self.__data_frame.to_string())", "def make_dataframe(self):\n logging.info('*** Creating the dataframes from the source files ' )\n \n for k in self.datasets_keys:\n #for k in ['igra2' , 'ncar']:\n \n logging.info('*** Creating the dataframe for the dataset: %s ' , k ) \n \n p_levels = self.data[k]['df']['observations_table']['z_coordinate'][:]\n logging.debug(' Loaded the z_coordinate')\n \n z_type = self.data[k]['df']['observations_table']['z_coordinate_type'][:]\n logging.debug(' Loaded the z_coordinate_type')\n \n obs_variable = self.data[k]['df']['observations_table']['observed_variable'][:]\n logging.debug(' Loaded the observed_variable')\n \n obs_values = self.data[k]['df']['observations_table']['observation_value'][:]\n logging.debug(' Loaded the observation_value')\n \n observation_id = self.data[k]['df']['observations_table']['observation_id'][:]\n logging.debug(' Loaded the observation_id')\n \n units = self.data[k]['df']['observations_table']['units'][:].astype(int)\n logging.debug(' Loaded the units') \n \n report_id = self.data[k]['df']['observations_table']['report_id'][:] \n logging.debug(' Loaded the report_id')\n \n date_time = self.data[k]['df']['observations_table']['date_time'][:]\n logging.debug(' Loaded the date_time (deltas)')\n \n lat , lon = self.data[k]['df']['observations_table']['latitude'][:] , self.data[k]['df']['observations_table']['longitude'][:]\n logging.debug(' Loaded the lat,lon ')\n \n \n self.obs_table_columns = list(self.data[k]['df']['observations_table'].keys() )\n \n self.data[k]['df'].close()\n \n \"\"\" Creating a dataframe \"\"\"\n columns = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units']\n logging.info(' Loaded the data, creating dataframe ')\n \n df = pd.DataFrame( list(zip( date_time, p_levels, z_type, obs_variable , obs_values, report_id, observation_id , lat , lon, units ) ) , columns = columns ) \n \n \n \"\"\" Storing the dataframe \"\"\" ### try using xarrays ??? \n logging.debug('Storing the DF ' ) \n self.data[k]['dataframe'] = df\n \n logging.debug(' PD dataframe created !!! ')", "def save_dataframe(state: State):\n\n try:\n state.games.df.to_csv(ROOT_PATH + \"/results/data/raw_data.csv\")\n LOGGER.debug(\"Successfully saved data in ../results/data/\")\n\n except Exception as e:\n LOGGER.error(f\"Could not save dataframe file - {e}\")", "def data_frame_creator(self):\n sequence_folder = [\n '/SEQ1', '/SEQ2', '/SEQ3', '/SEQ4', '/SEQ5', '/SEQ6'\n ]\n rgb_folder = ['/RGBLeft/', '/RGBRight/']\n depth_folder = ['/DepthLeft/', '/DepthRight/']\n segmentation_folder = ['/GTLeft/', '/GTright/']\n rgb_dir = [\n self.dataset_dir + sequence_f + rgb_f for rgb_f in rgb_folder\n for sequence_f in sequence_folder\n ]\n rgb_data = [\n rgb_d + rgb for rgb_d in rgb_dir for rgb in os.listdir(rgb_d)\n ]\n\n depth_dir = [\n self.dataset_dir + sequence_f + depth_f\n for depth_f in depth_folder\n for sequence_f in sequence_folder\n ]\n depth_data = [\n depth_d + depth for depth_d in depth_dir\n for depth in os.listdir(depth_d)\n ]\n\n segmentation_dir = [\n self.dataset_dir + sequence_f + segmentation_f\n for segmentation_f in segmentation_folder\n for sequence_f in sequence_folder\n ]\n segmentation_data = [\n segmentation_d + segmentation\n for segmentation_d in segmentation_dir\n for segmentation in os.listdir(segmentation_d)\n ]\n\n dataset = {\n 'RGB': rgb_data,\n 'DEPTH': depth_data,\n 'SEGMENTATION': segmentation_data\n }\n\n if self.shuffle:\n return pd.DataFrame(dataset).sample(frac=1, random_state=123)\n\n return pd.DataFrame(dataset)", "def make_output_df(self):\n df = pd.concat([pd.DataFrame(dat) for dat in [self.qdata, self.pdata]], axis=1)\n columns = np.hstack(([['{}{}'.format(x, c) for c in self.actions] for x in ['q', 'p']]))\n df.columns = columns\n df.insert(0, 'trial', np.arange(1, df.shape[0]+1))\n df['choice'] = self.choices\n df['feedback'] = self.feedback\n# r = np.array(self.bandits.rvalues)\n# p = np.array(self.bandits.preward)\n df['optimal'] = self.demand\n df.insert(0, 'agent', 1)\n self.data = df.copy()", "def data_frame_creator(self):\n\n rgb_dir = [\n self.dataset_address + sequence_f + rgb_f\n for rgb_f in self.rgb_folder for sequence_f in self.sequence_folder\n ]\n rgb_data = [\n rgb_d + rgb for rgb_d in rgb_dir for rgb in os.listdir(rgb_d)\n ]\n\n depth_dir = [\n self.dataset_address + sequence_f + depth_f\n for depth_f in self.depth_folder\n for sequence_f in self.sequence_folder\n ]\n depth_data = [\n depth_d + depth for depth_d in depth_dir\n for depth in os.listdir(depth_d)\n ]\n\n segmentation_dir = [\n self.dataset_address + sequence_f + segmentation_f\n for segmentation_f in self.segmentation_folder\n for sequence_f in self.sequence_folder\n ]\n segmentation_data = [\n segmentation_d + segmentation\n for segmentation_d in segmentation_dir\n for segmentation in os.listdir(segmentation_d)\n ]\n\n dataset = {\n 'RGB': rgb_data,\n 'DEPTH': depth_data,\n 'SEGMENTATION': segmentation_data\n }\n\n if self.shuffle:\n return pd.DataFrame(dataset).sample(frac=1)\n\n return pd.DataFrame(dataset)", "def df():\n fs.df()", "def make_dataset(self, df, **kwargs):\n\t\treturn df", "def create_dataframe():\r\n\r\n df = pd.read_csv('data/data.csv', header=0)\r\n return df", "def save_to_dataframe(self):\n titles, years, months, days, authors = list(), list(), list(), list(), list()\n for doc in self.results[\"documents\"]:\n titles.append(doc['title'])\n years.append(doc['year'])\n months.append(doc['month'])\n days.append(doc['day'])\n authors.append(doc['authors'])\n return pd.DataFrame({\"title\": titles, \"years\": years, \"months\": months, \"days\": days, \"author\": authors})", "def save_df(data_frame, file_path):\r\n data_frame.to_csv(file_path)\r\n return None", "def df():\n path, _ = os.path.split(os.path.abspath(__file__))\n project_path = os.path.join(path, os.pardir, os.pardir)\n\n values_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_values.csv\")\n labels_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_labels.csv\")\n\n train = pd.read_csv(values_path, index_col='id', parse_dates=[\"date_recorded\"])\n labels = pd.read_csv(labels_path, index_col='id')\n\n return train.join(labels)", "def build_df(path_orig = r'.\\chest_xray', orig_file_ext = 'jpeg', path_seg = r'.\\segmentation', seg_file_ext = 'png', save_path = '.\\df_all.csv'):\n \n read_df = 'C'\n list_df = [] \n \n if os.path.exists(save_path):\n read_df = input('DataFrame was found, would you like to read it (R) or recreate it (C) (default Read)?\\n') or 'R'\n if read_df == 'R':\n df = pd.read_csv(save_path, index_col = 0)\n return df\n \n if read_df == 'C':\n for dirname, _, filenames in os.walk(path_orig):\n for filename in tqdm(filenames, disable=len(filenames)==0):\n if ('.' + orig_file_ext) in filename:\n list_val = []\n list_val.append('PNEUMONIA' if 'PNEUMONIA' in dirname else 'NORMAL')\n list_val.append(1 if 'PNEUMONIA' in dirname else 0)\n list_val.append('bacteria' if 'bacteria' in filename.lower() else 'virus' if 'virus' in filename.lower() else 'normal')\n list_val.append(1 if 'bacteria' in filename.lower() else 2 if 'virus' in filename.lower() else 0)\n list_val.append(filename)\n list_val.append(os.path.join(dirname, filename)) \n list_val.append(filename.replace(orig_file_ext, seg_file_ext))\n list_val.append(os.path.join(dirname.replace(path_orig, path_seg), filename.replace(orig_file_ext, seg_file_ext)))\n list_df.append(list_val)\n\n df = pd.DataFrame(list_df, columns = ['Label_name', 'Label_int', 'Label_pathology', 'Label_pathology_int', 'Filename_orig', 'Filepath_orig', 'Filename_seg', 'Filepath_seg'])\n df.to_csv(save_path)\n \n print('Done')\n \n return df", "def save_data(self) -> None:\n # Construct a grid in physical space\n rvals = np.logspace(start=-3,\n stop=2.5,\n num=21,\n endpoint=True)\n # Compute C, D, K1 and F on that grid\n Cvals = np.array([self.compute_C(r, Suppression.RAW) for r in rvals])\n Dvals = np.array([self.compute_D(r, Suppression.RAW) for r in rvals])\n K1vals = np.array([self.compute_K1(r, Suppression.RAW) for r in rvals])\n Fvals = np.array([self.compute_F(r, Suppression.RAW) for r in rvals])\n # Save them to file\n df = pd.DataFrame([rvals, Cvals[:, 0], Dvals[:, 0], K1vals[:, 0], Fvals[:, 0],\n Cvals[:, 1], Dvals[:, 1], K1vals[:, 1], Fvals[:, 1]]).transpose()\n df.columns = ['r', 'C(r)', 'D(r)', 'K1(r)', 'F(r)', 'dC(r)', 'dD(r)', 'dK1(r)', 'dF(r)']\n df.to_csv(self.file_path(self.filename + '.csv'), index=False)", "def build_dataframe(self):\n #Freq 0.0 2.5\n #ElementID NodeID Item\n #6901 6901 angle 0.000000+0.000000j 0.000000+0.000000j\n # sc 13.847674-0.461543j 13.855294-0.462052j\n # sd 0.625892-0.020861j 0.623742-0.020717j\n # se -12.178029+0.405894j -12.185331+0.406381j\n # sf 1.043753-0.034788j 1.046222-0.034953j\n # 6904 angle 0.000000+0.000000j 0.000000+0.000000j\n # sc -1.660571-0.416504j -1.663256-0.416978j\n # sd -2.790551+0.024178j -2.789738+0.024356j\n # se 0.627616+0.450933j 0.629571+0.451455j\n # sf 1.757596+0.010251j 1.756053+0.010121j\n #6902 6901 angle 0.000000+0.000000j 0.000000+0.000000j\n headers = self.headers\n column_names, column_values = self._build_dataframe_transient_header()\n self.data_frame = self._build_pandas_transient_element_node(\n column_values, column_names,\n headers, self.element_node, self.data)", "def build_dataframe() -> pd.DataFrame:\n df = pd.DataFrame(\n np.random.randint(0, 1000, size=(1000, 6)), columns=list(\"ABCDEF\")\n )\n\n return df", "def construct_data_frame(self) -> pd.DataFrame:\n data_frame = self.base_data_frame[\n [self.name_col, self.description_col]\n ].reset_index()\n data_frame.columns = [\"label_encoder\", \"name\", \"description\"]\n\n return data_frame.set_index(\"label_encoder\")", "def make_dataframe(self, dataframe_path, corpus_path):\n directory_list = os.listdir(corpus_path)\n pub_year = []\n pii = []\n doi = []\n title = []\n authors = []\n num_authors = []\n abstract = []\n journal_name = []\n\n for i in trange(len(directory_list)):\n directory = directory_list[i]\n json_dict = self.load_journal_json(f'{corpus_path}/{directory}/{directory}.json')\n\n for year in json_dict:\n for pub in json_dict[year]:\n pub_year.append(year)\n pii.append(json_dict[year][pub]['pii'])\n doi.append(json_dict[year][pub]['doi'])\n title.append(json_dict[year][pub]['title'])\n authors.append(json_dict[year][pub]['authors'])\n num_authors.append(json_dict[year][pub]['num_authors'])\n abstract.append(json_dict[year][pub]['description'])\n journal_name.append(directory)\n\n columns = ['pub_year', 'pii', 'doi', 'title', 'authors', 'num_authors', 'abstract', 'journal_name']\n df = pd.DataFrame(np.array([pub_year, pii, doi, title, authors, num_authors, abstract, journal_name], dtype=object).transpose(), columns=columns)\n df.to_pickle(dataframe_path + '/dataframe_from_CorpusGenerator' +'.pkl')", "def to_frame(self) -> DataFrame:\n if not self.is_initialized:\n _logger.info(\"Grid has not been initialized. Ensure to run DataGrid.initialize()\")\n return DataFrame()\n\n return self._post_process()", "def create_geneIDsDF():\n datas=data.plfam_to_matrix()\n datas.run()\n print('***Dataframe created***')", "def output_df(outdict, out_file):\n\tcols = ['#chrom', 'source', 'feature', 'chromStart', 'chromEnd', 'score', 'strand', 'frame', 'transcript_id']\n\tcolOut = ['#chrom', 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame', 'transcript_id']\n\tgtfDF = pd.DataFrame(columns=cols)\n\n\tfor trsp in outdict:\n\t\tgtfDF = gtfDF.append(outdict[trsp], ignore_index=True)\n\t\t\n\tgtfDF.columns = colOut\n\t# print gtfDF.head()\n\tgtfDF.to_csv(out_file, compression='gzip', sep='\\t', index=False)", "def construct_df():\n iterable = [['approach', 'contact', 'retract', 'pause'], ['force', 'height']]\n index = pd.MultiIndex.from_product(iterable, names=['segment', 'channel'])\n return pd.DataFrame(columns=index)", "def df(self):\n\n # populate dataframe with level data\n columns = {\n \"z\": self.z(),\n \"z_level_qc\": self.z_level_qc(),\n \"z_unc\": self.z_unc(),\n \"t\": self.t(),\n \"t_level_qc\": self.t_level_qc(),\n \"t_unc\": self.t_unc(),\n \"s\": self.s(),\n \"s_level_qc\": self.s_level_qc(),\n \"s_unc\": self.s_unc(),\n \"oxygen\": self.oxygen(),\n \"phosphate\": self.phosphate(),\n \"silicate\": self.silicate(),\n \"pH\": self.pH(),\n \"p\": self.p()\n }\n\n df = pd.DataFrame(columns)\n\n # record profile data in a metadata object on the dataframe\n df.attrs[\"latitude\"] = self.latitude()\n df.attrs[\"latitude_unc\"] = self.latitude_unc()\n df.attrs[\"longitude\"] = self.longitude()\n df.attrs[\"longitude_unc\"] = self.longitude_unc()\n df.attrs[\"uid\"] = self.uid()\n df.attrs[\"n_levels\"] = self.n_levels()\n df.attrs[\"year\"] = self.year()\n df.attrs[\"month\"] = self.month()\n df.attrs[\"day\"] = self.day()\n df.attrs[\"time\"] = self.time()\n df.attrs[\"cruise\"] = self.cruise()\n df.attrs[\"probe_type\"] = self.probe_type()\n df.attrs[\"originator_flag_type\"] = self.originator_flag_type()\n df.attrs[\"PIs\"] = self.PIs()\n df.attrs[\"originator_station\"] = self.originator_station()\n df.attrs[\"originator_cruise\"] = self.originator_cruise()\n df.attrs[\"t_metadata\"] = self.t_metadata()\n df.attrs[\"s_metadata\"] = self.s_metadata()\n\n return df", "def save(df, out_file):\n print('------------< save >------------')\n out_path = './data'\n makedirs(out_path, exist_ok=True)\n print(f'path: {out_path}/{out_file}')\n print(f'shape: {df.shape}')\n df.to_csv(f'{out_path}/{out_file}', index=False)\n print('--------------------------------')", "def data_visualization_general(data):\n path_to_save = str(Path(__file__).parent.parent) + '/jupyter_notebook/'\n key = list(data.keys())[0] # we do this way because keys() return a dict-keys which is not subscriptable\n df = pd.DataFrame(data[key])\n file_name = key + \".csv\"\n df.to_csv(os.path.join(path_to_save, file_name))\n # we save it in the jupyter_notebook folder so that it will be easier to show data on jupyter notebook", "def save_data(df, database_filename):\n engine = create_engine('sqlite:///' +database_filename)\n df.to_sql('Project2', engine, index=False)", "def _make_results_dataframe(self):\n LOG.debug(\"Creating Results Dataframes.\")\n results_df = tfs.TfsDataFrame(index=self.twiss_df.index)\n results_df[\"S\"] = self.twiss_df[\"S\"]\n return results_df", "def sourceToDataframe(self):\n df = pd.read_excel(self.filename)\n df.columns = df.iloc[10]\n df = df.drop(df.index[:11])\n self.df = df #makes this df accessible to the whole class now\n self.insertODN()\n display(df.head())", "def data_frame_creator(self):\n\n return pd.DataFrame()", "def dataframe():\n headers = get_headers()\n headers = {'headers': headers}\n headers = pd.DataFrame.from_dict(headers, orient='index')\n headers = headers.replace(r'\\n', ' ', regex=True)\n headers = headers.replace(r'\\r', ' ', regex=True)\n headers = headers.replace(r'\\t', ' ', regex=True)\n headers = headers.replace(r'\\\\t', ' ', regex=True)\n headers = headers.replace(r' ', ' ', regex=True)\n headers = headers.replace(r' ', ' ', regex=True)\n\n paragraphs = get_paragraphs()\n paragraphs = {'paragraphs': paragraphs}\n paragraphs = pd.DataFrame.from_dict(paragraphs, orient='index')\n paragraphs = paragraphs.replace(r'\\n', ' ', regex=True)\n paragraphs = paragraphs.replace(r'\\r', ' ', regex=True)\n paragraphs = paragraphs.replace(r'\\t', ' ', regex=True)\n paragraphs = paragraphs.replace(r'\\\\t', ' ', regex=True)\n paragraphs = paragraphs.replace(r' ', ' ', regex=True)\n paragraphs = paragraphs.replace(r' ', ' ', regex=True)\n\n return headers.to_csv('headers.csv', index=False), paragraphs.to_csv('paragraphs.csv', index=False)", "def execute(self):\n try:\n self.data_frame.write.mode('append').format(self.file_format).save(self.location)\n return self.data_frame\n except AnalysisException as exp:\n raise", "def glass_pandas(self):\n # pandas.set_option('display.width', 120)\n # TODO timeit (git_implementation) vs (my_implementation)\n # * df = pd.DataFrame(json.loads(r.text))\n # * df = df.set_index('t')\n # * df.index = pd.to_datetime(df.index, unit='s')\n # * df = df.sort_index()\n # * s = df.v\n # * s.name = '_'.join(url.split('/')[-2:])\n # * return s\n # for elem in self.loaded:\n # _metric, _data = elem[1]['_metrics'], elem[1]['_data']\n # try:\n # frame_keys = ['t'] + list(_data[0]['o'].keys())\n # framed = pandas.DataFrame(\n # data=[{k: (_data[iters]['t'] if k in 't' else _data[iters]['o'][k])\n # for k in frame_keys} for iters in range(len(_data))],\n # columns=frame_keys)\n # except KeyError:\n # framed = pandas.DataFrame(_data)\n # framed.set_index('t', inplace=True)\n # framed.index = pandas.to_datetime(\n # framed.index.to_flat_index(), unit='s', infer_datetime_format=True)\n # framed.sort_index(inplace=True)\n # framed.name = _metric\n # print(framed.name)\n # print(framed)", "def create_data_frame(self):\n column_names = Annotations.create_columns(self.headers, self.annot_types)\n dtypes = Annotations.get_dtypes_for_group_annots(self.headers, self.annot_types)\n df = self.open_file(\n self.file_path,\n open_as=\"dataframe\",\n # Coerce values in group annotations\n converters=dtypes,\n # Header/column names\n names=self.headers,\n # Prevent pandas from reading first 2 lines in file\n # since they're passed in with param 'names'\n skiprows=2,\n )[0]\n self.file = Annotations.convert_header_to_multi_index(df, column_names)", "def createDataFrame(path):\n df = pd.read_csv(path)\n df = df[['planet_name', 'planet_mass', 'orbital_radius', 'host_name', \n 'spectral_type', 'stellar_age', 'stellar_radius', \n 'stellar_mass', 'stellar_temperature', 'stellar_luminosity', \n 'optical_magnitude', 'near_ir_magnitude', \n 'stellar_surface_gravity', 'stellar_metallicity']]\n \n df = df.dropna(subset=['spectral_type'])\n df.spectral_type = df.spectral_type.str[0:1]\n df.spectral_type = df.spectral_type.str.strip()\n classification = np.array(['O','B','A','F','G','K','M'])\n df = df[df.spectral_type.isin(classification)]\n df.insert(4, \"amount_of_planets\", 0)\n df.amount_of_planets = df.groupby('host_name')['host_name'].transform('count')\n \n df.planet_mass = np.log10(df.planet_mass)\n df.orbital_radius = np.log10(df.orbital_radius)\n \n df = df.sort_values(by=['host_name'])\n df = df.reset_index(drop=True) \n \n return df", "def create_data_table(df: pd.DataFrame) -> pd.DataFrame:\n\n df = df.copy()\n\n # Normalize times by labeling all of today's data with its future label, 00:00\n # tomorrow (as that's the timestamp marking the end of the 24-hour data collection\n # period). No need to adjust data not from today; it's already been adjusted and is\n # labeled with the date whose 00:00 marked the end of data collection (i.e., data\n # generated on Mar 20 is labeled Mar 21).\n normalized_dates = df[Columns.DATE].dt.normalize()\n is_at_midnight = df[Columns.DATE] == normalized_dates\n df.loc[~is_at_midnight, Columns.DATE] = normalized_dates[\n ~is_at_midnight\n ] + pd.Timedelta(days=1)\n df[Columns.DATE] = df[Columns.DATE].dt.strftime(r\"%Y-%m-%d\")\n\n df = df.drop(\n columns=[\n Columns.IS_STATE,\n Columns.LOCATION_NAME,\n Columns.OUTBREAK_START_DATE_COL,\n Columns.DAYS_SINCE_OUTBREAK,\n Columns.POPULATION,\n Columns.STAGE,\n Columns.COUNT_TYPE,\n ]\n )\n\n df = (\n df.pivot_table(\n index=[\n c\n for c in df.columns\n if c not in [Columns.CASE_TYPE, Columns.CASE_COUNT]\n ],\n columns=Columns.CASE_TYPE,\n values=Columns.CASE_COUNT,\n aggfunc=\"first\",\n )\n .reset_index()\n .sort_values([Columns.COUNTRY, Columns.STATE, Columns.DATE])\n )\n\n for col in CaseInfo.get_info_items_for(\n InfoField.CASE_TYPE, count=Counting.TOTAL_CASES\n ):\n df[col] = pd.to_numeric(df[col], downcast=\"integer\")\n\n # save_path = Paths.DATA / \"data_table.csv\"\n # df.to_csv(save_path, index=False)\n # print(f\"Saved data to {save_path.relative_to(Paths.ROOT)}\")\n\n return df", "def save_fitted_dataframe(comp_data_df, filename):\r\n comp_data_df.to_csv('{}.csv'.format(filename))\r\n return 0", "def generateDataFrame(self):\n labelArray = []\n\n # At this level ignored files are excluded\n for item in self.added:\n self.folderTree.append(item)\n\n for item in self.folderTree:\n if item in self.modified:\n labelArray.append('modified')\n elif item in self.deleted:\n labelArray.append('deleted')\n elif item in self.ignored:\n labelArray.append('ignored')\n elif item in self.added:\n labelArray.append('added')\n else:\n labelArray.append('baseFile')\n\n df = pd.DataFrame(list(zip(self.folderTree, labelArray)), \\\n columns=['File', 'Type'])\n self.fileDataFrame = df", "def _get_outputdf(self):\n keys = self.info_df['Trace'].values.tolist()\n frame = deepcopy([line.df for line in self.info_df['Line'].values.tolist()])\n for i in range(len(frame)):\n df = frame[i]\n num = list(range(len(df)))\n angle_gr = list(map(deg2grad,df['Angle Horizontal'].values))\n df.insert(0,'Number', ['s' + '-'.join(x) + 'gr' for x in zip(map(str,num),map(str,map(int,angle_gr)))])\n df.insert(1, 'Name', keys[i])\n return pd.concat(frame, keys=keys, join='inner', ignore_index=True)", "def save_to_csv(self):\n path = partial(os.path.join, 'datasets')\n save_name = self.name.lower().replace(' ', '_')\n self.df['values'].sum(axis=1).to_csv(path('{0}_values.csv'.format(save_name)))\n self.df['allocations'].to_csv(path('{0}_allocations.csv'.format(save_name)))\n self.df['returns'].to_csv(path('{0}_returns.csv'.format(save_name)))\n self.trades.to_csv(path('{0}_trades.csv'.format(save_name)))", "def gen_main_df(add_list: list):\r\n # 由Bert 计算得来的 sentiment信息\r\n if 'sentiment' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('sentiment')\r\n sentiment = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'daily_svm_sentiment_6class' , 'csv')[0],\r\n 'date', ['0'], 'sentiment') # 'daily_svm_sentiment_2class' '0', '1', '2', '3', '4', '5'\r\n data_manipulator.add_column(sentiment)\r\n # 中国CPI指数\r\n if 'cpi' in add_list and 'cpi' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('cpi')\r\n cpi = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'CPI', 'csv')[0],\r\n '日期', ['最新值', '涨跌幅', '近3月涨跌幅'], 'CPI')\r\n data_manipulator.add_column(cpi)\r\n # 上海银行间同业拆放利率\r\n if 'shibor' in add_list and 'shibor' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('shibor')\r\n shibor = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'shibor', 'csv')[0],\r\n 'date', ['on', '1w', '2w', '1m', '3m'], 'Shibor')\r\n data_manipulator.add_column(shibor)\r\n # 上证综指\r\n if 'shangzheng' in add_list and 'shangzheng' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('shangzheng')\r\n shangzheng = data_manipulator.read_in_file(\r\n data_manipulator.get_file_names(root, 'datasets', 'ShangZheng', 'csv')[0],\r\n 'trade_date', ['open', 'high', 'low', 'close', 'pct_chg', 'vol', 'amount',\r\n 'total_mv', 'float_mv', 'total_share', 'float_share',\r\n 'free_share', 'turnover_rate', 'turnover_rate_f', 'pe',\r\n 'pe_ttm', 'pb'],\r\n 'ShangZheng')\r\n data_manipulator.add_column(shangzheng)\r\n data_manipulator.shift_columns(['ShangZheng_pct_chg'], (-1,),\r\n add=True) # name has changed to shift-1_ShangZheng_pct_chg\r\n data_manipulator.rank_df_column(['shift-1_ShangZheng_pct_chg'],\r\n rank_list=[-10, -1, -0.5, 0, 0.5, 1, 10]) # rank_list=[-10, 0, 10] [-10, -1, -0.5, 0, 0.5, 1, 10]\r\n shangzheng_30min = data_manipulator.read_in_file(\r\n data_manipulator.get_file_names(root, 'datasets', 'ShangZheng_index_30min', 'csv')[0],\r\n 'trade_time', ['open', 'high', 'low', 'close', 'pct_chg', 'vol', 'amount'],\r\n 'ShangZheng_30min')\r\n data_manipulator.news_df_add_column(shangzheng_30min)\r\n data_manipulator.shift_minute_columns(['ShangZheng_30min_pct_chg'], (-1,),\r\n add=True)\r\n data_manipulator.rank_minute_df_columns(['shift-1_ShangZheng_30min_pct_chg'],\r\n rank_list=[-10, -1, -0.5, 0, 0.5, 1, 10]) # rank_list=[-10, 0, 10] [-10, -1, -0.5, 0, 0.5, 1, 10]\r\n\r\n # M2 广义货币量\r\n if 'm2' in add_list and 'm2' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('m2')\r\n m2 = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'M2', 'csv')[0],\r\n '月份', ['M2数量(亿元)', 'M2同比增长', 'M2环比增长'], 'M2')\r\n m2 = data_manipulator.complement_df(m2, 'date')\r\n data_manipulator.add_column(m2)\r\n\r\n # 人民币美元汇率\r\n if 'rmb_usd' in add_list and 'rmb_usd' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('rmb_usd')\r\n rmb_usd = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'RMB_USD', 'csv')[0],\r\n 'trade_date',\r\n ['bid_open', 'bid_close', 'bid_high', 'bid_low', 'ask_open',\r\n 'ask_close', 'ask_high', 'ask_low', 'tick_qty'], 'exchange')\r\n data_manipulator.add_column(rmb_usd)\r\n\r\n # 沪港通 沪深通 到岸 离岸资金流\r\n if 'fund_flow' in add_list and 'fund_flow' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('fund_flow')\r\n fund_flow = data_manipulator.read_in_file(\r\n data_manipulator.get_file_names(root, 'datasets', 'fund_flow', 'csv')[0],\r\n 'trade_date', ['north_money', 'south_money'], 'fund_flow')\r\n data_manipulator.add_column(fund_flow)\r\n\r\n # 债券回购日行情\r\n if 'repo' in add_list and 'repo' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('repo')\r\n repo = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'repo', 'csv')[0],\r\n 'trade_date', ['repo_maturity', 'open', 'high', 'low', 'close',\r\n 'amount'], 'repo', data_manipulator.cut_time_string,\r\n (0, 10,))\r\n repo = data_manipulator.select_col_group_by(repo, 'repo_repo_maturity', ['GC001', 'GC007', 'GC014', 'GC028'],\r\n 'date')\r\n data_manipulator.add_column(repo)\r\n\r\n # 新浪新闻\r\n if 'sina_news' in add_list and 'sina_news' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('sina_news')\r\n columns_type = {'create_time': str, 'text': str}\r\n sina_news = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'sina', 'csv')[0],\r\n 'create_time', ['text', ], 'sina', dtypes=columns_type)\r\n data_manipulator.add_change_news('sina', (7, 9), columns_type, sina_news, time_col_name='create_time')\r\n data_manipulator.add_minute_change_news('sina', columns_type, sina_news, time_col_name='create_time')\r\n if 'scale' in add_list:\r\n data_manipulator.scaling_col()\r\n if 'clear' in add_list:\r\n data_manipulator.clear()", "def data(self):\n dfdata = pd.concat([self.weights, self.returns, self.category], axis=1)\n dfdata.columns = ['weights', 'returns', self.category_name]\n if self.period is not None:\n dfdata['date'] = self.period\n return dfdata", "def create_query_csv(self):\n\n self.query_df.to_csv(self.query_output_file)", "def run(self) -> DataFrame:\n with self.create_census_api_session():\n logger.info('Retrieving variables...')\n variables: Variables = self.get_variables()\n logger.info('Retrieving ACS tables...')\n tables = self.get_tables()\n\n # Add geometry\n gazetteer_files: List[GazetteerFile] = []\n shapefiles: List[Shapefile] = []\n if self.geometry == 'points':\n logger.info('Retrieving Gazetteer files...')\n gazetteer_files.extend(self.get_gazetteer_files())\n elif self.geometry == 'polygons':\n logger.info('Retrieving shapefiles...')\n shapefiles.extend(self.get_shapefiles())\n dataframe = self.assemble_dataframe(variables, tables, gazetteer_files, shapefiles)\n return dataframe", "def build_graphs(df: pd.DataFrame, ir_dir: Path, graph_dir: Path):\n for _, row in df.iterrows():\n with open(ir_dir / f\"{row['name']}.ll\") as f:\n ir = f.read()\n graph = pg.from_llvm_ir(ir)\n graph.features.feature[\"devmap_label\"].int64_list.value[:] = [row[\"label\"]]\n graph.features.feature[\"wgsize\"].int64_list.value[:] = [row[\"wgsize\"]]\n graph.features.feature[\"transfer_bytes\"].int64_list.value[:] = [\n row[\"transfer_bytes\"]\n ]\n graph.features.feature[\"wgsize_log1p\"].float_list.value[:] = [\n row[\"wgsize_log1p\"]\n ]\n graph.features.feature[\"transfer_bytes_log1p\"].float_list.value[:] = [\n row[\"transfer_bytes_log1p\"]\n ]\n pbutil.ToFile(\n graph, graph_dir / f\"{row['name']}.ProgramGraph.pb\", exist_ok=False\n )", "def write(self):\n \n self.df.to_csv('/home/austin/Desktop/Falcon/realestate/Falcon/Datasets/mls.csv')", "def to_df(self):\n if self.shape > 1:\n range_str = [s for s in range(self.shape)]\n iterables = [self.columns, range_str]\n multiindex = pd.MultiIndex.from_product(iterables, names=['song', 'frame'])\n # multiindex = [i for i in itertools.product(self.columns, range_str, repeat=1)]\n df = pd.DataFrame(columns=multiindex, index=self.columns, dtype=np.float64)\n\n for c_1 in self.columns:\n for c_2 in self.columns:\n for s in range_str:\n df.loc[c_1][c_2, s] = self.dict_[c_1][c_2][s]\n df = df.T\n else:\n df = pd.DataFrame(columns=self.columns + ['song'], dtype=np.float64)\n df['song'] = self.columns\n df = df.set_index('song')\n\n for c_1 in self.columns:\n for c_2 in self.columns:\n df.loc[c_1, c_2] = self.max_diff(c_1, c_2)\n\n return df", "def create_sample_dataframe():\n ax_readings = []\n ay_readings = []\n az_readings = []\n mx_readings = []\n my_readings = []\n mz_readings = []\n gx_readings = []\n gy_readings = []\n gz_readings = []\n activity_list = [LABELS_NAMES[0] for _ in range(SEGMENT_TIME_SIZE)]\n\n\n for _ in range(SEGMENT_TIME_SIZE):\n ax_readings.append(random.uniform(-10,10))\n ay_readings.append(random.uniform(-10,10))\n az_readings.append(random.uniform(-10,10))\n mx_readings.append(random.uniform(-10,10))\n my_readings.append(random.uniform(-10,10))\n mz_readings.append(random.uniform(-10,10))\n gx_readings.append(random.uniform(-10,10))\n gy_readings.append(random.uniform(-10,10))\n gz_readings.append(random.uniform(-10,10))\n\n data_dict = {\n COLUMN_NAMES[0]: activity_list, COLUMN_NAMES[1]: ax_readings,\n COLUMN_NAMES[2]: ay_readings, COLUMN_NAMES[3]: az_readings,\n COLUMN_NAMES[4]: gx_readings, COLUMN_NAMES[5]: gy_readings,\n COLUMN_NAMES[6]: gz_readings, COLUMN_NAMES[7]: mx_readings,\n COLUMN_NAMES[8]: my_readings, COLUMN_NAMES[9]: mz_readings\n }\n\n df = pd.DataFrame(data=data_dict)\n return df", "def export_data(self, pth):\n self.cleanup_allowed = False\n self.train_df.to_csv(os.path.join(pth, \"train.csv\"))\n self.valid_df.to_csv(os.path.join(pth, \"valid.csv\"))\n self.test_df.to_csv(os.path.join(pth, \"test.csv\"))", "def save_dataframe(dataframe, filename):\n with open(filename, \"w\", encoding=\"utf8\") as outfile: \n dataframe.to_csv(outfile, sep=\",\")", "def create_data_frame_for_figures(\n results_path, save_path, results_folder_name, is_baseline=False\n):\n\n # Columns will be loading in data for\n\n columns = [\n \"filename\",\n \"sim_id\",\n \"virtual_patient_num\",\n \"sensor_num\",\n \"patient_scenario_filename\",\n \"age\",\n \"ylw\",\n \"cir\",\n \"isf\",\n \"sbr\",\n \"starting_bg\",\n \"starting_bg_sensor\",\n \"true_bolus\",\n \"initial_bias\",\n \"bias_norm_factor\",\n \"bias_drift_oscillations\",\n \"bias_drift_range_start\",\n \"bias_drift_range_end\",\n \"noise_coefficient\",\n \"delay\",\n \"bias_drift_type\",\n \"bias_type\",\n \"noise_per_sensor\",\n \"noise\",\n \"bias_factor\",\n \"phi_drift\",\n \"drift_multiplier\",\n \"drift_multiplier_start\",\n \"drift_multiplier_end\",\n \"noise_max\",\n \"mard\",\n \"mbe\",\n \"bg_test_condition\",\n \"analysis_type\",\n \"lbgi\",\n \"lbgi_risk_score\",\n \"dkai\",\n \"dkai_risk_score\",\n \"hbgi\",\n \"bgri\",\n \"percent_lt_54\",\n ]\n\n # Blank list to keep track of scenarios removed because settings are outside of clinical bounds.\n removed_scenarios = []\n\n # Blank list for adding data to\n data = []\n\n # Iterate through each of the files\n for i, filename in enumerate(\n sorted(os.listdir(results_path))\n ): # [0:100])): #(for testing)\n # Identify file is simulation file\n if filename.endswith(\".tsv\"):\n\n print(i, filename)\n\n # Read in that simulation data to a dataframe\n simulation_df = pd.read_csv(os.path.join(results_path, filename), sep=\"\\t\")\n\n # Check that the first two bg values are equal\n assert (\n simulation_df.loc[0][\"bg\"] == simulation_df.loc[1][\"bg\"]\n ), \"First two BG values of simulation are not equal\"\n\n # Find and read in the corresponding json data\n f = open(os.path.join(results_path, filename.replace(\".tsv\", \".json\")), \"r\")\n simulation_characteristics_json_data = json.loads(f.read())\n\n # Get the scenario settings characteristics so can check whether outside clinical bounds\n cir = simulation_characteristics_json_data[\"patient\"][\"config\"][\n \"carb_ratio_schedule\"\n ][\"schedule\"][0][\"setting\"].replace(\" g\", \"\")\n\n isf = simulation_characteristics_json_data[\"patient\"][\"config\"][\n \"insulin_sensitivity_schedule\"\n ][\"schedule\"][0][\"setting\"].replace(\" m\", \"\")\n\n sbr = simulation_characteristics_json_data[\"patient\"][\"config\"][\n \"basal_schedule\"\n ][\"schedule\"][0][\"setting\"].replace(\" U\", \"\")\n\n # If any of the settings are outside clinical bounds, add to removed scnearios list and do not load\n # into aggregated dataframe\n if settings_outside_clinical_bounds(cir, isf, sbr):\n print(filename + \" has settings outside clinical bounds.\")\n removed_scenarios.append([filename, cir, isf, sbr])\n\n # Otherwise add that data to data list\n else:\n\n # Get a row of data for that particular simulation and scenario characteristics\n data_row = get_data(\n filename,\n simulation_df,\n simulation_characteristics_json_data,\n baseline=is_baseline,\n )\n\n # Confirm that the length of the returned data matches the number of columns for\n # ultimate aggregate datafrrame\n assert len(data_row) == len(\n columns\n ), \"length of returned data does not match number of columns\"\n\n data.append(data_row)\n\n # Create and save dataframe of removed scenarios\n removed_scenarios_df = pd.DataFrame(\n removed_scenarios, columns=[\"filename\", \"cir\", \"isf\", \"sbr\"]\n )\n removed_scenarios_filename = \"{}_{}_{}_{}\".format(\n \"removed_scenarios_df\", results_folder_name, utc_string, code_version\n )\n\n removed_scenarios_df.to_csv(\n path_or_buf=os.path.join(save_path, removed_scenarios_filename + \".csv\"),\n index=False,\n )\n\n # Create the results dataframe\n results_df = pd.DataFrame(data, columns=columns)\n\n # Clean up the results dataframe\n results_df = clean_up_results_df(results_df)\n\n # Save the results dataframe to a csv\n\n results_df_filename = \"{}_{}_{}_{}\".format(\n \"results_df\", results_folder_name, utc_string, code_version\n )\n\n results_df.to_csv(\n path_or_buf=os.path.join(save_path, results_df_filename + \".csv\"),\n index=False,\n )\n\n return results_df", "def create_model_csv(self):\n\n self.model_df.to_csv(self.model_output_file)", "def to_df(self):\n from ..df import DataFrame\n\n return DataFrame(self)", "def __generate_data_table__(self):\n # | - __generate_data_table__\n rows_list = []\n for job in self.job_var_lst:\n revisions = self.job_revision_number(job)\n for revision in range(revisions + 1)[1:]:\n # | - FOR LOOP BODY\n entry_param_dict = {}\n for prop in job:\n entry_param_dict[prop[\"property\"]] = prop[\"value\"]\n\n entry_param_dict[\"variable_list\"] = job\n entry_param_dict[\"path\"] = self.var_lst_to_path(job)\n\n entry_param_dict[\"max_revision\"] = revisions\n entry_param_dict[\"revision_number\"] = revision\n\n rows_list.append(entry_param_dict)\n # __|\n\n data_frame = pd.DataFrame(rows_list)\n\n return(data_frame)\n # __|", "def generate_graph_feature(self):\n traj_graph_feature = [traj.get_graph_feature() for traj in self.trajectories]\n self.df_graph_feature = pd.DataFrame(traj_graph_feature)\n self.df_graph_feature[\"LABEL\"] = self.df[\"LABEL\"]\n return self.df_graph_feature", "def to_dataframe(self, include_metadata: bool = True) -> pd.DataFrame:\n # Get all our data first with async\n # Note that all our pandas work will tax CPU so we wouldn't expect any\n # performance gains from doing the data parsing as a callback\n records = self.to_dict()\n data = []\n for series in records:\n df = pd.DataFrame(series.pop(\"data\"), columns=[\"period\", \"value\"])\n if include_metadata:\n df = df.assign(**series)\n data.append(df)\n return pd.concat(data, ignore_index=True)", "def create_df(Varr, Iarr, POA, T, mode):\n df = pd.DataFrame()\n df['voltage'] = Varr\n df['current'] = Iarr\n df['E'] = POA\n df['T'] = T\n df['mode'] = mode\n return df", "def create_df(datadir: str, ext: str='txt') -> pd.DataFrame:\n\n datalist = []\n for name in os.listdir(datadir):\n filename = '/'.join([datadir, name])\n if os.path.isfile(filename) and ext in name[-len(ext):]:\n row_data = []\n content = read_file.read_file(filename)\n row_data.append(read_file.extract_name(content))\n row_data.append(read_file.extract_year(content))\n row_data.append(read_file.extract_form_factor(content))\n row_data.append(read_file.extract_max_power(content))\n row_data.append(read_file.extract_min_power(content))\n row_data.append(read_file.extract_cpu_speed(content))\n row_data.append(read_file.extract_core_num(content))\n for ind in range(10, 100, 10):\n row_data.append(read_file.extract_int_power(content, ind))\n datalist.append(row_data)\n\n return pd.DataFrame(data=datalist, columns=[\n 'Name', 'Year', 'FormFac', 'MaxPower', 'IdlePower', 'CPU speed',\n 'NumCores'\n ]+[''.join([str(ind), '%Power']) for ind in range(10, 100, 10)])", "def save_data(df, database_filename):\n # Create a database connection \n engine = create_engine('sqlite:///' + database_filename)\n \n # Insert df into DisasterCategories table\n df.to_sql('DisasterCategories', engine, index=False)", "def save_data(df: pd.DataFrame, database_filename: str) -> None:\n engine = create_engine(f\"sqlite:///{database_filename}\")\n df.to_sql(Path(database_filename).stem, engine, index=False, if_exists=\"replace\")", "def save_data(df, database_filename):\n engine = create_engine('sqlite:///'+database_filename)\n df.to_sql('disasterdata', engine, index=False)", "def make_dataframes(self):\n self._data_frame_30days = pd.DataFrame(self._all30_dict, index=SIRPPipeline.data_frame_INDEX).transpose()\n self._data_frame_60days = pd.DataFrame(self._all60_dict, index=SIRPPipeline.data_frame_INDEX).transpose()\n self._data_frame_90days = pd.DataFrame(self._all90_dict, index=SIRPPipeline.data_frame_INDEX).transpose()\n self._data_frame_counts = pd.DataFrame(\n {\n \"Created\": {\"totals\": self._data_frame_30days.count()[\"Created\"]},\n \"Closed\": {\"totals\": self._data_frame_30days.count()[\"Closed\"]},\n \"Owner\": (self._data_frame_30days[\"Owner\"].value_counts().to_dict()),\n \"Resolution\": (self._data_frame_30days[\"Resolution\"].value_counts().to_dict()),\n \"Severity\": (self._data_frame_30days[\"Severity\"].value_counts().to_dict()),\n },\n index=self.counts_frame_INDEX,\n )\n self._data_frame_counts.fillna(0, inplace=True)", "def dataFrame(self):\n\n memory_file = StringIO(initial_value=self.sparql_result.decode('utf-8'), newline='\\n')\n reader = DictReader(memory_file)\n\n schema = StructType(\n list(map(lambda f: StructField(f, StringType()), reader.fieldnames))\n )\n\n data = list(map(lambda d: [d[f] for f in reader.fieldnames], list(reader)))\n\n return self.spark.createDataFrame(data, schema)", "def run(self, *args, **kw):\n super(GeopandasWriter, self).run(*args, **kw)\n data = self.get_input_data()\n data.to_file(self.file_name, self.format)", "def write_df_to_db(df, db_path):\n print \"Writing to 'results' table in db: \", db_path\n conn = sqlite3.connect(db_path)\n df.to_sql(\"results\", con=conn,if_exists='replace')", "def create_catalog_dataframe(save_dataframe):\n if not save_dataframe:\n return pd.read_pickle(f'{path_dictionary[\"catalog_dataframe_grouped_path\"]}')\n\n catalog_items = DbHelper.get_all_catalog_items()\n\n # Create dataframe to put all information together\n columns = ['user_id', 'item_id', 'session_id', 'window_size_x', 'window_size_y', 'page_size_x', 'page_size_y', 'catalog_item_list', 'user_log_list']\n catalog_items_df = pd.DataFrame(catalog_items, columns=columns)\n\n # Clean 'Catalog Items' that user see during a session\n catalog_items_df['catalog_item_list'] = catalog_items_df.apply(clean_objects_listed, axis=1)\n\n # Clean Log Files\n catalog_items_df['user_log_list'] = catalog_items_df.apply(clean_logs, axis=1)\n\n # Get Catalog Items that user hover or has a click action, her/his mouse\n catalog_items_df = get_interacted_catalog_items(catalog_items_df)\n\n # Label the catalog items as 0\n catalog_items_df['catalog_item_list'] = catalog_items_df.apply(label_page_type, axis=1)\n\n catalog_items_df_grouped = catalog_items_df.groupby(['user_id', 'session_id'], as_index=False).agg(lambda x: list(x))\n catalog_items_df_grouped.drop(['item_id', 'window_size_x', 'window_size_y', 'page_size_x', 'page_size_y'], axis=1, inplace=True)\n\n if save_dataframe:\n catalog_items_df.to_pickle(f'{path_dictionary[\"path_raw_catalog_dataframe\"]}')\n catalog_items_df_grouped.to_pickle(f'{path_dictionary[\"path_catalog_dataframe\"]}')\n catalog_items_df_grouped.to_csv(f'{path_dictionary[\"path_catalog_csv\"]}', index=False, sep='|')\n return catalog_items_df_grouped", "def makeDF(csv_path):\n DF = pd.read_csv(csv_path)\n\n DF['height'] = DF.apply(lambda DF: abs(DF['ymax'] - DF['ymin']), axis=1)\n DF['width'] = DF.apply(lambda DF: abs(DF['xmax'] - DF['xmin']), axis=1)\n DF['objArea'] = DF.apply(lambda DF: (DF['width'] * DF['height']), axis=1)\n imageArea = 2704 * 1524\n DF['objPortion'] = DF.apply(lambda DF: (DF['objArea'] / imageArea), axis=1)\n\n # DF.to_csv('/NewDF.csv')\n DF.to_json('json_annot_all.json')\n\n # Looking at the first 5 rows to get the insigt on the data.\n print(DF.head(5))\n print(DF.label.unique())\n return DF", "def make_df(self):\n # read in file\n df = pd.read_csv(self.data_file)\n cols_to_drop = [f'view{x}' for x in range(1,4)]+['response']\n # subtract loc3 viewing from location of interest\n df[self.label_key] = df[self.predictor] - df['view3']\n df.drop(cols_to_drop, axis=1, inplace=True)\n df.reset_index(drop=True, inplace=True)\n\n return df", "def export_data(self):\r\n stocks = {}\r\n headings = ['Security', 'Price', 'Change', 'Change %', '52 Week', 'Market Cap']\r\n\r\n for data in range(6):\r\n for items in self.root.main.treeview.get_children():\r\n values = self.root.main.treeview.item(items, 'values')\r\n if headings[data] not in stocks:\r\n stocks[headings[data]] = []\r\n stocks.get(headings[data]).append(values[data])\r\n\r\n df = pd.DataFrame(stocks, columns=headings)\r\n path = tk.filedialog.asksaveasfilename(title='Save File As...',\r\n filetypes=((\"CComma-separated values (.csv)\", \"*.csv\"), (\"Text Document(.txt)\", \"*.txt\")))\r\n\r\n if not path:\r\n return\r\n else:\r\n df.to_excel(path, index=False, header=True)", "def dataframe(self):\n df = pd.DataFrame({'x':self.x, 'y':self.y, 'd':self.d})\n\n if self.z is not None:\n for k, v in self.z.items():\n df[k] = v\n\n return df", "def save(self):\n\t\t# save self.dfAnalysis\n\t\tcsvPath = self._getSavePath()\n\t\tprint('saving:', csvPath)\n\t\tself.dfAnalysis.to_csv(csvPath)", "def data_solution_save(sol, column_names, file_name_prefix='vaccination_data_solution'):\n df = pd.DataFrame(sol)\n df.columns = column_names\n string_date = str(datetime.date(datetime.now()))\n file_name = file_name_prefix + string_date + \".pkl\" #\n df.to_pickle(file_name)\n return", "def save_data(dataset_path: str,\n graphs: np.ndarray,\n influence_features: np.ndarray,\n labels: np.ndarray,\n distances: np.ndarray,\n embeddings: np.ndarray,\n own_company: np.ndarray,\n data_splits: np.ndarray,\n family_flag: np.ndarray) -> None:\n df_graphs = pd.concat(\n [pd.DataFrame(graph, dtype=int)\n for graph in graphs],\n keys=range(graphs.shape[0]),\n names=['observation id', 'neighbor id'])\n df_graphs.columns.name = 'neighbor id'\n\n df_normalized_embedding = pd.concat(\n [pd.DataFrame(normalized_embedding, dtype=float)\n for normalized_embedding in embeddings],\n keys=range(graphs.shape[0]),\n names=['observation id', 'neighbor id'])\n df_normalized_embedding.columns.name = 'embedding dimension'\n\n df_influence = pd.concat(\n [pd.DataFrame(influence_feature,\n columns=['influence', 'ego id'], dtype=int)\n for influence_feature in influence_features],\n keys=range(influence_features.shape[0]),\n names=['observation id', 'neighbor id'])\n\n df_labels = pd.Series(labels, name='label')\n df_labels.index.name = 'observation id'\n\n df_data_splits = pd.Series(data_splits, name='data_split')\n df_data_splits.index.name = 'observation id'\n\n df_family_flag = pd.Series(family_flag, name='family_flag')\n df_family_flag.index.name = 'observation id'\n\n df_distances = pd.Series(distances, name='distance')\n df_distances.index.name = 'observation id'\n\n df_own_company = pd.Series(own_company, name='own_company')\n df_own_company.index.name = 'observation id'\n\n dataset = os.path.basename(dataset_path[:-1])\n save_path = f'./public/data/{dataset}'\n os.makedirs(save_path, exist_ok=True)\n df_normalized_embedding.to_csv(save_path + '/Public_Normalized_Embedding.csv')\n # np.save(save_path + '/Public_Normalized_Embedding', embeddings)\n # torch.save(embeddings.detach(), save_path + '/Public_Normalized_Embedding_tensor.pt')\n df_influence.to_csv(save_path + '/Public_Influence_Features.csv')\n df_labels.to_csv(save_path + '/Public_Labels.csv')\n df_graphs.to_csv(save_path + '/Public_Graphs.csv')\n df_distances.to_csv(save_path + '/Public_Distances.csv')\n df_own_company.to_csv(save_path + '/Public_Own_Company_flag.csv')\n df_data_splits.to_csv(save_path + '/Public_Dataset_splits.csv')\n df_family_flag.to_csv(save_path + '/Public_Family_flag.csv')", "def execute(self):\n try:\n self.data_frame.write.partitionBy(self.partition_by).mode('append') \\\n .format(self.file_format).save(self.location)\n return self.data_frame\n except AnalysisException as exp:\n raise", "def return_combine_df_for_graph():\n\n\tkey = '18c95216b1230de68164158aeb02e2c2'\n\t# bade login with key\n\tbase = Dashboard(key)\n\t# get csv with write vars\n\tstart =os.path.dirname(os.path.realpath(sys.argv[0])) \n\tpath = os.path.join(start, 'Fred Graphs')\n\t# this path was used for flask,anothe day to fix this one \n\t#path = '/home/mike/Documents/coding_all/ModelApp/app/Fred Graphs'\n\t#base.write_fred_data_to_csv_from_dict(fred_econ_data, path)\n\n\t# convert csv to dict to use\n\tdf_dict = base.saved_csvs_to_dict(fred_econ_data.keys(), path)\n\n\t# skipped step, drop down, whyich can be typed into bc at some point will have list of all vars\n\t# and display graph indivusal with relevent data (type, seaonailty) displayed liek fed\n\n\t# next combine wanted vars to single df\n\tcombined_df = base.get_full_group_df(df_dict, time_interval='6M', group_unit='mean')\n\t# get spreads for IR rates\n\tcols_against = ['10 YR Treasury','Moody Aaa Yield','Moody Baa Yield','30 Year Mortgage', ]\n\tbase_col = 'Federal Funds Rate'\n\tspread_dict = base.get_yield_spreads_new(combined_df, cols_against, base_col, graph='no')\n\tspread_dict['date'] = combined_df.index\n\tcombined_spread_df = pd.DataFrame.from_dict(spread_dict)\n\tcombined_spread_df = combined_spread_df.set_index('date')\n\tcombined_spread_df.index = pd.to_datetime(combined_spread_df.index)\n\treturn combined_df, combined_spread_df", "def fetch_training_df(df):\n\n gen_df = df.copy()\n gen_df.drop(['artist_name', 'title', 'release'], axis=1, inplace=True)\n return gen_df", "def generate_DataFrame(file_path):\n # print (\"Generating DataFrame\")\n __log(1, 'Generating DataFrame....')\n\n df = pd.read_csv(file_path)\n df = df.rename(columns=lambda x: x.strip())\n df = df.dropna()\n\n for i in list(df.keys()):\n df[i] = df[i].apply(cleaning)\n\n # print (\"DataFrame Generated Successfully\")\n __log(1, 'DataFrame Generated Sucessfully.')\n return df", "def createDataframe(filepath_train, filepath_test, domain, type_dataset):\n \n print(\"Reading pickle files...\")\n #read pickle files\n with open(filepath_train, \"rb\") as pkl_file:\n traindata = pickle.load(pkl_file)\n \n with open(filepath_test, \"rb\") as pkl_file:\n testdata = pickle.load(pkl_file)\n \n if domain == 'lopen':\n d = '.D450: Lopen en zich verplaatsen'\n l = 'FAC '\n elif domain == 'stemming':\n d = '.B152: Stemming'\n l = 'STM '\n elif domain == 'beroep':\n d = '.D840-859: Beroep en werk'\n l = 'BER '\n elif domain == 'inspanningstolerantie':\n d = '.B455: Inspanningstolerantie'\n l = 'INS '\n \n df_train, df_list_tr = completeDataframe(traindata)\n df_test, df_list_te = completeDataframe(testdata)\n \n #get note id's from keys in dataframe\n if type_dataset == 'covid':\n ids= []\n list_keys = df_test['key'].tolist()\n for key in list_keys:\n y = key.split('--')[3]\n ids.append(y)\n df_test['note_id'] = ids\n \n if type_dataset == 'noncovid':\n ids = []\n for instance in df_list_te:\n le = len(str(instance[2]))\n if instance[3] == \"['']\":\n ids.append(instance[0].split('---')[1])\n else:\n ids.append(instance[0].split('---')[1][:-le])\n df_test['note_id'] = ids\n \n \n df_train[domain] = 0\n df_train['level'] = 'None'\n df_test[domain] = 0\n df_test['level'] = 'None'\n \n #Add domain labels to a seperate column\n df_train['domain'][df_train['labels'].apply(lambda x: d in x)] = d\n df_train['level'][df_train['labels'].apply(lambda x: l+'0'in x)] = 0\n df_train['level'][df_train['labels'].apply(lambda x: l+'1'in x)] = 1\n df_train['level'][df_train['labels'].apply(lambda x: l+'2'in x)] = 2\n df_train['level'][df_train['labels'].apply(lambda x: l+'3'in x)] = 3\n df_train['level'][df_train['labels'].apply(lambda x: l+'4'in x)] = 4\n df_train['level'][df_train['labels'].apply(lambda x: l+'5'in x)] = 5\n \n df_test['domain'][df_test['labels'].apply(lambda x: d in x)] = d\n df_test['level'][df_test['labels'].apply(lambda x: l+'0'in x)] = 0\n df_test['level'][df_test['labels'].apply(lambda x: l+'1'in x)] = 1\n df_test['level'][df_test['labels'].apply(lambda x: l+'2'in x)] = 2\n df_test['level'][df_test['labels'].apply(lambda x: l+'3'in x)] = 3\n df_test['level'][df_test['labels'].apply(lambda x: l+'4'in x)] = 4\n df_test['level'][df_test['labels'].apply(lambda x: l+'5'in x)] = 5\n\n df_train.loc[df_train['domain'] == d, domain] = 1\n df_test.loc[df_test['domain'] == d, domain] = 1\n \n \n print(\"Filtering dataframes...\")\n #filter dataframe\n del_rows_tr, filtered_df_train = filterDataframe(df_train)\n #only select instances where there is an entry for level\n df_selection_train = filtered_df_train[(filtered_df_train[domain] == 1) & (filtered_df_train['level'] != 'None')]\n\n #test1\n del_rows_te, filtered_df_test = filterDataframe(df_test)\n #gold output\n df_selection_test = filtered_df_test[(df_test[domain] == 1) & (filtered_df_test['level'] != 'None')]\n \n return(df_selection_train, df_selection_test)", "def build(self):\n list_of_mafs = []\n maf_generator = self.get_dataframe()\n\n for maf_as_dict in maf_generator:\n list_of_mafs.extend(maf_as_dict)\n\n reporting_path = os.path.join(app.config.get('REPORTING_ROOT_PATH'), app.config.get('REPORTING_PATH'), 'global')\n combined_maf = None\n try:\n combined_maf = pandas.DataFrame(list_of_mafs)\n except Exception as e:\n logger.error(f'Problem creating dataframe from list of dicts: {str(e)}')\n try:\n combined_maf.to_csv(\n os.path.join(reporting_path, f'{self.method}_combined_maf.tsv'),\n sep=\"\\t\",\n encoding='utf-8',\n index='false'\n )\n except Exception as e:\n # bad practice here catching base exception, but the pandas documentation did not reveal what errors or\n # exceptions to expect\n logger.error(f'Problem writing the combined maf file to csv:{str(e)}')\n abort(500)", "def save_data(df, database_filepath):\n # create a database connect\n conn = sqlite3.connect(database_filepath)\n # replace .db with empty space for new table name\n table_name = database_filepath.replace('.db', '')\n \n return df.to_sql(table_name, con=conn, if_exists='replace', index=False)", "def to_df(self) -> pd.DataFrame:\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient=\"list\")\n return df[self.fields]", "def create_dataframe_for_training(data):\n feature_column_name = 'X'\n #data_cp = data[['label']].copy()\n for i, row in tqdm(data.iterrows(), total=len(data)):\n all_features = f'{row.claimant} {row.claim} {row.article_content}'\n data.loc[i, feature_column_name] = all_features\n\n return data[feature_column_name]", "def Make_DF(File=\"N1_50\", Period=\"\", Truth=False, Save=False):\n Folder = \"/scratch2/Master_krilangs/Trilepton_Ntuples/Skimslim/\"\n suffix = \"_merged_processed\"\n print(\"\\u0332\".join(File + \" \"))\n\n if Period == \"18\":\n SubFolder = \"data18_mc16e/\"\n elif Period == \"17\":\n SubFolder = \"data17_mc16d/\"\n elif Period == \"1516\":\n SubFolder = \"data1516_mc16a/\"\n else:\n SubFolder = \"\"\n\n \n if File == \"N1_50\":\n file50 = \"../myfile_allevents\"\n tree = uproot.open(Folder+file50+\".root\")[\"mytree\"]\n df = tree.pandas.df(flatten = False)\n del(tree) # Free up memory\n\n elif File == \"N1_150\":\n file150 = \"myfile_VERTEX_LFC_150_200_250\"\n tree = uproot.open(Folder + file150 + \".root\")[\"mytree\"]\n df = tree.pandas.df(flatten = False)\n del(tree) # Free up memory\n df = df.drop([df.index[11071], df.index[23774], df.index[60373], df.index[40743]])\n\n elif File == \"N1_450\":\n file450 = \"myfile_VERTEX_LFC_450_500_550\"\n tree = uproot.open(Folder + file450 + \".root\")[\"mytree\"]\n df = tree.pandas.df(flatten = False)\n del(tree) # Free up memory\n df = df.drop([df.index[14678], df.index[26355], df.index[39870], df.index[76527], df.index[60540], df.index[125862]])\n\n else:\n file = SubFolder + File + suffix\n tree = uproot.open(Folder + file + \".root\")[File + \"_NoSys\"]\n df_tree = tree.pandas.df(flatten = False)\n del(tree) # Free up memory\n df = df_tree.iloc[:,64:74]\n del(df_tree) # Free up memory\n if File == \"diboson3L\":\n df = df.drop([df.index[178412]])\n if File == \"diboson4L\":\n df = df.drop([df.index[3826200]])\n if File == \"topOther\":\n df = df.drop([df.index[3281191]])\n if File == \"ttbar\":\n df = df.drop([df.index[5690459], df.index[3723599]])\n\n newdf = lepaugmentation(df, 4, Truth)\n del(df) # Free up memory\n\n if Save:\n print(\"Save:\")\n newdf.to_hdf(\"Trilepton_ML.h5\", key=File+Period) # Save dataframe to file.\n\n print(newdf.info(verbose=True))\n #print(newdf[\"target\"].value_counts())\n \n del(newdf) # Free up memory", "def as_DF(self):\n\n gs_df = pd.DataFrame(self.P, columns=self.xvec, index=self.yvec)\n gs_df.columns.name = 'x'\n gs_df.index.name = 'y'\n\n return gs_df", "def save(self):\r\n self.df_app_data = self.df_app_data.to_csv(\"app_data.csv\", index=False)", "def df_builder(path):\n\n ###CHANGE FILE ENDING (.csv or .csv.gz)\n all_files = glob.glob(\n os.path.join(path, \"probe_data_I210.201710*.csv\")) # advisable to use os.path.join as this makes concatenation OS independent\n df_from_each_file = (pd.read_csv(f) for f in all_files)\n return pd.concat(df_from_each_file, ignore_index=True)", "def set_data(self):\n # take care of samples\n patients = self.samples.iloc[:,1].tolist()\n samples = self.samples.iloc[:,0].tolist()\n self.samples = pd.DataFrame(patients,index = samples,columns = ['patient']) # indexed by sample\n #\n # take care of expression data\n cols = self.expression.SYMBOL.tolist() # set new column names to transposed expression_data \n \n new_exp = self.expression.T.ix[1:,:] # transpose\n new_exp.columns = cols\n self.expression = new_exp # add columns\n self.data = pd.merge(self.expression,self.samples,left_index = True,right_index=True) # merged data sets\n #pd.merge(df1,df2,how = 'left',left_index=True,right_index=True) # do a left join", "def make_stats_df(self):\n columns = ['DATE', 'TEAM', 'teamId', 'R', 'HR', 'RBI', 'SBN', 'OBP', \n 'K', 'QS', 'SV', 'ERA', 'WHIP', 'MOVES', 'CHANGE']\n trimmed_table = self.parse_soup(self.stats)\n self.df_stats = pd.DataFrame(trimmed_table, columns=columns) \n # load season standings csv from file\n try: # if it already exists\n df = pd.read_csv('2016_stats.csv', index_col=0)\n except OSError:\n df = pd.DataFrame(columns=columns) # if it doesn't already exist\n df = df.append(self.df_stats)\n df.to_csv('2016_stats.csv')", "def glue_table(name: str, df: pd.DataFrame, build_path=\"_build\"):\n\n if not os.path.exists(build_path):\n os.mkdir(build_path)\n df.to_excel(os.path.join(build_path, f\"{name}.xlsx\"))\n\n glue(name, df)", "def create_dataframe(ids, names, p_links, c_links, cl_links):\n try:\n dict = {'ID':ids, 'Name': names, 'Photo':p_links, 'Flag':c_links, 'Club Logo':cl_links}\n df = pd.DataFrame(dict)\n return df\n except Exception as e:\n print(\"Exception creating or storing the dataframe: \" + str(e))", "def __call__(self):\n\n # create dataframes of relevant sections from the INP\n for ix, sect in enumerate(self.config['inp_sections']):\n if ix == 0:\n df = create_dataframeINP(self.inp.path, sect, comment_cols=False)\n else:\n df_other = create_dataframeINP(self.inp.path, sect, comment_cols=False)\n df = df.join(df_other)\n\n if self.rpt:\n for rpt_sect in self.config['rpt_sections']:\n df = df.join(create_dataframeRPT(self.rpt.path, rpt_sect))\n\n # add conduit coordinates\n xys = df.apply(lambda r: get_link_coords(r, self.inp.coordinates, self.inp.vertices), axis=1)\n df = df.assign(coords=xys.map(lambda x: x[0]))\n\n # make inlet/outlet node IDs string type\n df.InletNode = df.InletNode.astype(str)\n df.OutletNode = df.OutletNode.astype(str)\n\n return df", "def to_pandas(self):\n pass", "def to_pandas(self):\n pass", "def save_prediction(self, meta, y_pred, y, filename):\n df = pd.DataFrame(meta)\n df['y_pred'] = y_pred\n df['y'] = y\n print(df)\n df.loc[:, 'id'] = df.index\n self.df_to_csv(df, filename, store_header=False)", "def dataframe(self):\n return self.generator.dataframe", "def prepare_data(args):\n logger.info('Loading dataframe from %s' % args.newspath)\n df = pd.read_csv(args.newspath, encoding='gb18030')\n logger.info('Dataframe size: %d observations %d features after loaded' % (df.shape[0], df.shape[1]))\n\n # exclude rows with column source == NaN\n logger.info('Data cleansing...')\n df = df[~pd.isna(df['source'])]\n logger.info('Dataframe size: %d observations %d features after data cleansing' % (df.shape[0], df.shape[1]))\n\n # split the dataframe into training set and test set\n logger.info('Making training set & test set...')\n train_set, test_set = split_data(df)\n logger.info('Traning set size: %d' % train_set.shape[0])\n logger.info('Test set size: %d' % test_set.shape[0])\n\n # save the train set and test set to picke files\n logger.info('Save dataframes to files...')\n train_set.to_pickle(args.trainpath)\n test_set.to_pickle(args.testpath)", "def save(df, save_preprocessed_dataframe_path, name):\n\n df.to_csv(save_preprocessed_dataframe_path + name + '.csv', index=False)", "def get_training_dataframe(dataset_path, two_class=True, get_node_df=False):\n training_df = pd.DataFrame()\n graph_dict = {}\n\n\n rows_for_training_df = []\n \n if get_node_df:\n node_df = pd.DataFrame()\n rows_for_node_df = []\n\n\n # add conspiracy graphs to dataframe\n conspiracy_graphs = []\n for i in range(1,271): # 270 total\n graph_id = 'consp'+str(i)\n conspiracy_path = dataset_path + \"5g_corona_conspiracy/\"\n nodes = pd.read_csv(conspiracy_path + str(i)+ \"/nodes.csv\")\n edges = open(conspiracy_path + str(i)+ \"/edges.txt\")\n\n features, G = prepare_graph(1, nodes, edges, graph_id)\n rows_for_training_df.append(features)\n if get_node_df:\n prep_node_info(rows_for_node_df, nodes, graph_id)\n edges.close()\n conspiracy_graphs.append(G)\n graph_dict['conspiracy_graphs'] = conspiracy_graphs\n\n non_conspiracy_graphs = []\n for i in range(1,1661): # 1660 total\n graph_id = 'non_consp'+str(i)\n path = dataset_path + \"non_conspiracy/\"\n nodes = pd.read_csv(path + str(i)+ \"/nodes.csv\")\n edges = open(path + str(i)+ \"/edges.txt\")\n\n label = 0 if two_class else 3\n features, G = prepare_graph(label, nodes, edges, graph_id)\n rows_for_training_df.append(features)\n if get_node_df:\n prep_node_info(rows_for_node_df, nodes, graph_id)\n edges.close()\n edges.close()\n non_conspiracy_graphs.append(G)\n graph_dict['non_conspiracy_graphs'] = non_conspiracy_graphs\n\n other_conspiracy_graphs = []\n for i in range(1,398): # 397 total\n graph_id = 'other_consp'+str(i)\n path = dataset_path + \"other_conspiracy/\"\n nodes = pd.read_csv(path + str(i)+ \"/nodes.csv\")\n edges = open(path + str(i)+ \"/edges.txt\")\n\n label = 0 if two_class else 2\n features, G = prepare_graph(label, nodes, edges, graph_id)\n rows_for_training_df.append(features)\n if get_node_df:\n prep_node_info(rows_for_node_df, nodes, graph_id)\n edges.close()\n edges.close()\n other_conspiracy_graphs.append(G)\n graph_dict['other_conspiracy_graphs'] = other_conspiracy_graphs\n\n training_df = training_df.append(rows_for_training_df)\n if get_node_df:\n node_df = node_df.append(rows_for_node_df)\n return training_df, graph_dict, node_df\n return training_df, graph_dict", "def to_archive(self, filename):\n with pd.HDFStore(filename, mode=\"w\") as store:\n if self.size() > 0:\n df = pd.DataFrame.from_records(self.data.to_records(index=True))\n else:\n df = pd.DataFrame(columns=self.columns, index=self.data.index)\n store.put(ARCH_KEY, df)\n metadata = {\n k: v\n for k, v in self.__dict__.items()\n if k not in [\"data\", \"filelist\", \"conf\", \"cats\"]\n }\n metadata[\"conf\"] = getattr(self.conf, \"to_dict\", lambda: {})()\n store.get_storer(ARCH_KEY).attrs.metadata = metadata\n # Store DataFrame with categorisation data\n if self.is_categorised:\n df_cat = pd.DataFrame.from_records(self.cats.astype(\"uint8\").to_records(index=True))\n store.put(ARCH_KEY_CAT, df_cat)", "def create_data_frame(input_filepath):\n df = pd.read_json(input_filepath)\n logger = logging.getLogger(__name__)\n logger.info('Imported dataframe:')\n logger.info(df.info())\n logger.info(df.describe())\n logger.info(df.head())\n return df" ]
[ "0.72918093", "0.6938951", "0.69203115", "0.67623717", "0.6709177", "0.661987", "0.66142595", "0.6613234", "0.65876186", "0.65471715", "0.65268254", "0.6518715", "0.6476695", "0.6442566", "0.63830763", "0.63486266", "0.6345487", "0.6316515", "0.6284121", "0.6281206", "0.6278438", "0.62686926", "0.62654364", "0.62378913", "0.6235741", "0.62254065", "0.6189013", "0.61483115", "0.6141178", "0.6137117", "0.6122946", "0.61114436", "0.61020136", "0.60915416", "0.6089382", "0.6072203", "0.6060114", "0.6057092", "0.6056945", "0.605357", "0.6052877", "0.6048948", "0.6031402", "0.6025524", "0.6002891", "0.59991556", "0.59950984", "0.5993048", "0.5986284", "0.59854627", "0.5983826", "0.59786975", "0.5974424", "0.5972987", "0.5967404", "0.59553546", "0.5950378", "0.59476876", "0.59466547", "0.5946154", "0.5943337", "0.5938696", "0.5937919", "0.5936918", "0.5934892", "0.5933923", "0.5932648", "0.59184736", "0.59182835", "0.5914071", "0.59135026", "0.5909076", "0.5908037", "0.5901011", "0.5899025", "0.5898912", "0.58956623", "0.58896893", "0.58884835", "0.58822256", "0.5874656", "0.58743656", "0.587427", "0.5873021", "0.5872103", "0.58699733", "0.58693355", "0.58660054", "0.58625996", "0.5859404", "0.58514446", "0.5850162", "0.5850162", "0.58459675", "0.58406836", "0.58389467", "0.5832244", "0.5831809", "0.5830671", "0.58274925" ]
0.6171512
27
Hexlify raw text, return hexlified text.
def hexlify(text): if six.PY3: text = text.encode('utf-8') hexlified = binascii.hexlify(text) if six.PY3: hexlified = hexlified.decode('utf-8') return hexlified
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unhexlify(text):\n unhexlified = binascii.unhexlify(text)\n\n if six.PY3:\n unhexlified = unhexlified.decode('utf-8')\n\n return unhexlified", "def hexify(text):\r\n return ' '.join([hexify_word(word) for word in text.split()])", "def normalize(self, text):\n\n return binascii.hexlify(text)", "def to_hex(text):\n return ' '.join([hex(ord(char)) for char in unicode(text, 'UTF-8')])", "def encrypt(text):\r\n\r\n cipher = fuzz(text)\r\n return hexify(cipher)", "def test_unhexlify_not_python():\n assert '' == uflash.unhexlify(\n ':020000040003F7\\n:10E000000000000000000000000000000000000010')", "def test_hexlify():\n result = uflash.hexlify(TEST_SCRIPT)\n lines = result.split()\n # The first line should be the extended linear address, ox0003\n assert lines[0] == ':020000040003F7'\n # There should be the expected number of lines.\n assert len(lines) == 5", "def hex(self):\n return binascii.hexlify(self.data)", "def test_unhexlify():\n hexlified = uflash.hexlify(TEST_SCRIPT)\n unhexlified = uflash.unhexlify(hexlified)\n assert unhexlified == TEST_SCRIPT.decode('utf-8')", "def hexlify(self: str, verbose=False):\n nbytes = len(_chunk_bs(self))\n buf = b''\n strlen = ''\n for b in to_bytes(_chunk_bs(self)):\n buf+=b\n# for s in _from_list(_chunk_bs(self)):\n# strlen+=f'{ _bit_length(s): 02d}'\n if verbose:\n for n in range(nbytes):\n strlen += f'{_bit_length(_from_list(_chunk_bs(self))[n])} @[{n}] '\n print(strlen)\n return buf", "def _hexlify(data):\n if data is None:\n return None\n elif isinstance(data, bytes) or isinstance(data, bytearray):\n return data.hex()\n elif isinstance(data, list):\n return [_hexlify(item) for item in data]\n elif isinstance(data, dict):\n return {k: _hexlify(v) for k, v in data.items()}\n else:\n return data", "def hexify_word(word):\r\n\r\n return ''.join([str(hex(ord(c))[2::]) for c in word])", "def sanatize_hex(data: str) -> str:\n return data.replace(\"0x\", \"\").replace(\"0X\", \"\")", "def as_hex(self):\n return binascii.hexlify(self.as_bytes()).decode('ascii')", "def hex(cls, x):\n return c_hex(x)", "def _dehex(s):\n import re\n import binascii\n\n # Remove all non-hexadecimal digits\n s = re.sub(br'[^a-fA-F\\d]', b'', s)\n # binscii.unhexlify works in Python 2 and Python 3 (unlike\n # thing.decode('hex')).\n return binascii.unhexlify(s)", "def test_unhexlify_bad_unicode():\n assert '' == uflash.unhexlify(\n ':020000040003F7\\n:10E000004D50FFFFFFFFFFFFFFFFFFFFFFFFFFFFFF')", "def hexstring(self):\n if self.current != b\"<\":\n self.on_parser_error(\"Hexadecimal string expected\")\n self.next()\n token = b''\n self.maybe_spaces_or_comments()\n while self.is_hex_digit:\n token += self.next()\n self.maybe_spaces_or_comments()\n\n ch = self.next()\n if ch != b'>':\n self.on_parser_error(\"Wrong hexadecimal string\")\n if len(token) % 2:\n # if there is an odd number of digits - the last one should be assumed 0\n token += b'0'\n return HexString(token.decode(DEFAULT_ENCODING).upper())", "def preprocess_hashes(tex):\n blocks = catlist()\n rx = hash_rx\n m = rx.search(tex)\n while m:\n if len(m.group(2)) > 40:\n tex2htm.warn(\"Possible runaway hash: {}\".format(text_sample(m.group(2))))\n raise(None)\n blocks.append(tex[:m.start()])\n blocks.append(re.sub(r'(^|[^\\\\])%', r'\\1\\%', m.group(0)))\n tex = tex[m.end():]\n m = rx.search(tex)\n blocks.append(tex)\n return \"\".join(blocks)", "def ascii_to_phred64(c):\r\n return ascii_to_phred(c, 64)", "def remove_hex(text): \n return re.sub(r'&.*?;', r'', text)", "def test_embed_hex():\n python = uflash.hexlify(TEST_SCRIPT)\n result = uflash.embed_hex(uflash._RUNTIME, python)\n # The resulting hex should be of the expected length.\n assert len(result) == len(python) + len(uflash._RUNTIME) + 1 # +1 for \\n\n # The hex should end with a newline '\\n'\n assert result[-1:] == '\\n'\n # The Python hex should be in the correct location.\n py_list = python.split()\n result_list = result.split()\n start_of_python_from_end = len(py_list) + 5\n start_of_python = len(result_list) - start_of_python_from_end\n assert result_list[start_of_python:-5] == py_list\n # The firmware should enclose the Python correctly.\n firmware_list = uflash._RUNTIME.split()\n assert firmware_list[:-5] == result_list[:-start_of_python_from_end]\n assert firmware_list[-5:] == result_list[-5:]", "def hex(string):\n return string.encode('hex')", "def tohex(data: str) -> str:\n match = re.fullmatch(r\"^0[x|X][0-9a-fA-F]+\", data)\n if match:\n return data.lower()\n match = re.fullmatch(r\"^[0-9a-fA-F]+[h|H]$\", data)\n if not match:\n raise ValueError(f\"Required hex of the form `0x` or `H` found {data}\")\n match = re.match(r\"^[0-9a-fA-F]+\", data)\n return f\"0x{match.group().lower()}\"", "def entity_encode_hex(input, errors='strict'):\n output = ''\n for character in input:\n if character in ('&', '<', '>'):\n output += \"&#x%s;\" % character.encode('hex')\n else:\n output += character\n\n return (output, len(input))", "def str_sha(raw_sha):\n return hexlify(raw_sha)[:12]", "def hexify(c):\n try:\n s = c.encode(\"utf-8\").encode(\"hex\")\n except UnicodeDecodeError:\n s = 0\n n = len(s)\n if n <= 2: return s\n a = ' - '.join([s[i:i+2] for i in range(0,n,2)])\n return a[:-1]", "def hexdigest(self):\n # bytes.hex() is simpler, but not available For Python <= 3.4\n return \"\".join(\"{0:0>2x}\".format(b) for b in self.digest())", "def _encode_text(self):\n\n print(f\"Hex encode; received message is {self.message}\")\n return self.message.encode(\"utf-8\").hex()", "def toHexa(data):\n\tresult = \"\"\n\tif isBytes(data):\n\t\tdata = data.decode(\"latin-1\")\n\tfor i in data:\n\t\tresult += \"\\\\x%02X\"%ord(i)\n\treturn result", "def hexify(buffer):\n return ''.join('%02x' % ord(c) for c in buffer)", "def hexdump(data):\n\n def is_hexdump_printable(b):\n return b in b' 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`~!@#$%^&*()-_=+[]{}\\\\|\\'\";:/?.,<>'\n\n lines = []\n chunks = (data[i*16:i*16+16] for i in range((len(data) + 15) // 16))\n\n for i, chunk in enumerate(chunks):\n hexblock = ['{:02x}'.format(b) for b in chunk]\n left, right = ' '.join(hexblock[:8]), ' '.join(hexblock[8:])\n asciiblock = ''.join(chr(b) if is_hexdump_printable(b) else '.' for b in chunk)\n lines.append('{:08x} {:23} {:23} |{}|'.format(i*16, left, right, asciiblock))\n\n return '\\n'.join(lines)", "def _encode_code(self, text):\r\n replacements = [\r\n # Encode all ampersands; HTML entities are not\r\n # entities within a Markdown code span.\r\n ('&', '&amp;'),\r\n # Do the angle bracket song and dance:\r\n ('<', '&lt;'),\r\n ('>', '&gt;'),\r\n ]\r\n for before, after in replacements:\r\n text = text.replace(before, after)\r\n hashed = _hash_text(text)\r\n self._escape_table[text] = hashed\r\n return hashed", "def test_hexlify_empty_script():\n assert uflash.hexlify('') == ''", "def hash_coloured_escapes(text):\n ansi_code = int(sha256(text.encode(\"utf-8\")).hexdigest(), 16) % 230\n prefix, suffix = colored(\"SPLIT\", ansi_code=ansi_code).split(\"SPLIT\")\n return prefix, suffix", "def _hex2bin(cls, h):\n b = []\n while h:\n if h[0] in ' \\n': # ignore whitespace\n h = h[1:]\n elif h[0] in '0123456789abcdef': # hex byte\n b.append(int(h[:2], 16))\n h = h[2:]\n elif h[0] == '.': # for chunk type\n b.extend(ord(h[i]) for i in range(1,5))\n h = h[5:]\n else: # for PNG magic\n b.append(ord(h[0]))\n h = h[1:]\n return ''.join(map(chr,b))", "def __set_has_hexadecimal(text=str):\n reg_ex = constants.HEXADECIMAL_REG_EX_PATTERN\n if reg_ex.search(text) is None:\n return text\n return reg_ex.sub(constants.QUESTION_HAS_HEXADECIMAL_KEY, text)", "def sha256_hexoutput(in_str):\r\n return sha256(in_str.encode('ascii')).hexdigest()", "def to_ascii(text):\n return re.sub(r'[^\\x00-\\x7F]+', ' ', text)", "def test_ascii_to_phred(self):\r\n self.assertEqual(ascii_to_phred('x', 120), 0)\r\n self.assertEqual(ascii_to_phred('x', 119), 1)", "def genHexStr(instr: str) -> str:\n\n return hashlib.md5(instr.encode(\"utf-8\")).hexdigest()", "def hexdigest(self):\n return \"\".join(\"%02x\" % ord(x)\n for x in MegaCrypto.a32_to_str(self.digest()))", "def hex_form(hash):\n final_hash = ''\n for i in range(len(hash)):\n final_hash += format(hash[i], '02x')\n return final_hash", "def hexdigest(self):\r\n return ''.join(['%02x' % ord(c) for c in self.digest()])", "def entity_decode_hex(input, errors='strict'):\n if _is_unicode(input):\n if '%' not in input:\n return s\n bits = _asciire.split(input)\n res = [bits[0]]\n append = res.append\n for i in range(1, len(bits), 2):\n append(unquote(str(bits[i])).decode('latin1'))\n append(bits[i + 1])\n return (''.join(res), len(input))\n\n preamble_regex = re.compile(r\"&#x\", flags=re.I)\n bits = preamble_regex.split(input)\n # fastpath\n if len(bits) == 1:\n return input\n res = [bits[0]]\n append = res.append\n for item in bits[1:]:\n try:\n append(_hextochr[item[:2]])\n append(item[3:])\n except KeyError:\n append('&#x')\n append(item)\n append(';')\n\n return (''.join(res), len(input))", "def handle_hex_stream(data):\n stream = extract_stream(data)\n if stream is not False: # stream can actually be null (HexStream = \\\\x00\\\\x00). so we explicitly check for False\n is_binary = not all(c in string.printable for c in stream)\n return stream.encode(\"base64\") if is_binary else stream\n return data", "def hash(self, text):\n hashval = 0\n for i in xrange(0, len(text)):\n hashval += ord(text[i])**i\n return hashval", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s)\n return h.hexdigest()", "def _convert_hex(self, hex_value):\n if not isinstance(hex_value, str):\n raise TypeError(\"given hex value must be str\")\n m = HEX_RE.match(hex_value)\n if m is None:\n raise ValueError(\"given string does not seem to be Python hex\")\n sign_char, base, exp_sign, exp = [m.group(i) for i in range(1,5)]\n new_sign = \"+\" if sign_char is None else sign_char\n # Line below converts exp to hex value. The \"0x\" prefix is removed \n # with [2:]. The exponent is padded with (too many) zeros (Stata \n # requires 3 digits), and reduced to last 3 digits with [-3:].\n new_exp = (\"000\" + hex(int(exp))[2:])[-3:]\n return \"\".join((new_sign, base, 'X', exp_sign, new_exp))", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s.encode('utf-8'))\n return h.hexdigest()", "def hex_str (self):\n return \"#%02X%02X%02X\"%(self._intern[0],self._intern[1],self._intern[2])", "def convertirHexadecimal(self):\n self.convertir(lambda c: hex(ord(c))[2:], sep=' ')", "def hex_hash(s):\n if not s:\n return '0'\n s = s.encode('utf-8')\n return '{:x}'.format(adler32(s) & 0xffffffff)", "def _hash_content(data):\n return hashlib.sha512(str(data).encode('utf-8')).hexdigest()", "def ascii_convert(the_bytes: bytes):\n return ANSI_ESCAPE_B.sub(rb\"\", the_bytes).decode(\"utf-8\")", "def b2a_hex(data):\n\n return binascii.b2a_hex(data)", "def unH(s):\n return ''.join([chr(int(s[i:i+2],16)) for i in range(2, len(s),2)])", "def as_hex(self, *, align='left'):\n return self.as_bytes(align=align).hex()", "def bytes_to_hexstr(data: bytes) -> str:\n return ''.join(int_to_hexstr(byte) for byte in data)", "def from_hex_str(value):\n \n return SHex(value)", "def toHex(self):\n return hexlify(self.serialize()).decode(\"utf-8\")", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s.encode())\n return h.hexdigest()", "def HashForText (text):\n if isinstance(text, six.text_type):\n text = text.encode('utf-8')\n return __Hasher(text).hexdigest()", "def test_bytes_to_intel_hex():\n data = [1, 2, 3, 4, 5]\n expected_hex_str = \"\\n\".join([\":050000000102030405EC\", INTEL_HEX_EOF])\n\n result = cmds._bytes_to_intel_hex(data=data)\n\n assert expected_hex_str == result", "def hexbyte(string):\n#\treturn repr(string)\n\ts = \"\"\n\tfor i in string:\n\t\tif (ord(i) >= ord('A') and ord(i) <= ord('z')) \\\n\t\t\tor (ord(i) >= ord('0') and ord(i) <= ord('9')) \\\n\t\t\tor (ord(i) == ord(\" \")):\n\t\t\ts += \"%s\" % i\n\t\telse:\n\t\t\ts += \"\\\\x%02x\" % ord(i)\n\n#\t\ts += \" \"\n\treturn s", "def hex_str (self):\n return \"#%02X%02X%02X\"%(self.r, self.g, self.b)", "def hex(space, w_val):\n return space.hex(w_val)", "def hexDump(bytes):\n for i in range(len(bytes)):\n sys.stdout.write(\"%2x \" % (ord(bytes[i])))\n if (i+1) % 8 == 0:\n print repr(bytes[i-7:i+1])\n\n if(len(bytes) % 8 != 0):\n print string.rjust(\"\", 11), repr(bytes[i-7:i+1])", "def wkb_hex(self): # -> str:\n ...", "def test_phred_to_ascii(self):\r\n self.assertEqual(phred_to_ascii(0, 120), 'x')\r\n self.assertEqual(phred_to_ascii(1, 119), 'x')", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s.encode('utf-8'))\n return h.hexdigest()", "def encode_hex(b):\n if isinstance(b, str):\n b = bytes(b, \"utf-8\")\n if isinstance(b, bytes):\n return str(hexlify(b), \"utf-8\")\n raise TypeError(\"Value must be an instance of str or bytes\")", "def bytes_to_hex(s):\n\n return s.encode(\"hex\")", "def _hashsanitize(bytesin):\n # Used for converting raw byte data into a hex string. If the byte isn't a hex digit, use nothing instead.\n return \"\".join([x if x.lower() in 'abcdef0123456789' else '' for x in bytesin])", "def text(self, text: str) -> bytes:\n\n buffer = text.encode(\"utf-8\")\n return struct.pack(\">i\", len(buffer)) + buffer", "def hx(i):\n a = hex(i)[2:]\n if len(a)<2: a = ''.join(['0',a])\n return a", "def dump_hex(output_file, input_file):\n\n with open(input_file, 'rb') as ifile, open(output_file, 'wb') as ofile:\n while True:\n word = ifile.read(4)\n if not word:\n break\n\n ofile.write(binascii.hexlify(word))\n ofile.write(b'\\n')", "def decode_and_hexlify_hashes(hash_str: str) -> typing.Union[str, None]:\n\n return binascii.hexlify(base64.b64decode(hash_str.encode())).decode() if hash_str else None", "def longtohex(n):\n\n plain=(re.match(r\"0x([0-9A-Fa-f]*)l?$\", hex(n), re.I).group(1)).lower()\n return \"0x\" + plain", "def w__format_hex(self, string):\n d = map(None, string)\n d = map(ord, d)\n d = map(lambda x: \"%02x\" % x, d)\n return ' '.join(d)", "def to_h(self):\n return str(self).encode('hex')", "def hash_coloured(text):\n ansi_code = int(sha256(text.encode(\"utf-8\")).hexdigest(), 16) % 230\n return colored(text, ansi_code=ansi_code)", "def codeline_to_hexadecimal(codeline, data):\n # Extract each of the three locations in turn, the first two as data\n # items and the third as an address\n first_op = data[codeline[0]][0]\n second_op = data[codeline[1]][0]\n third_op = codeline[2]\n # Create a binary string from the operands\n binary_string = \"{0}{1}{2}00\".format(\n format(first_op, \"010b\"), format(second_op, \"010b\"),\n format(third_op, \"010b\"))\n # Take the binary string and return it as a 32-bit hexadecimal string.\n return format(int(binary_string, base=2), \"#010x\")", "def base64_to_hex(b64_string):\n # Remove hex encoding\n unencoded_string = base64.b64decode(b64_string)\n # Encode base64 and return\n return binascii.hexlify(unencoded_string)", "def c_hex(x):\n #print(\"c_hex\", x, type(x))\n h = hex(x & ((1 << INT_BITS) - 1))\n while h[-1:] == \"L\": h = h[:-1] # for python 2\n return h + UINT_SUFFIX", "def encodeText(text):\r\n#\treturn repr( quote_plus(text.replace(\"'\", '\"')) )\r\n\ttry:\r\n\t\treturn repr( quote_plus(text.replace(\"'\", '\"').encode('utf-8')) )\r\n\texcept:\r\n\t\tlogError(\"encodeText()\")\r\n\treturn repr(text.replace(\"'\", '\"'))", "def print_as_hex(s):\n print(\":\".join(\"{0:x}\".format(ord(c)) for c in s))", "def string_raw(self):\n return \"x%x\" % self.encoded", "def encodeHex(s):\n encoder = codecs.getencoder('hex_codec')\n hex, length = encoder(s)\n assert len(s) == length, \"Hex encoding incomplete\"\n return hex", "def printable_hash(h):\n return int(h).to_bytes(32, byteorder='big', signed=False).hex()", "def elf_hash(s):\n h = 0\n for c in s:\n h = (h << 4) + ord(c)\n t = (h & 0xF0000000)\n if t != 0:\n h = h ^ (t >> 24)\n h = h & ~t\n return h", "def int2hex(n: int) -> str:", "def test_phred_to_ascii64(self):\r\n self.assertEqual(phred_to_ascii64(0), '@')\r\n self.assertEqual(phred_to_ascii64(30), '^')", "def test_bytes_to_pretty_hex():\n data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]\n expected = (\n \"0000 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 \"\n \"|................|\\n\"\n )\n\n result = cmds._bytes_to_pretty_hex(data=data)\n\n assert expected == result", "def stringify(self):\n hexcode = \"#\"\n for x in self.value:\n part = hex(x)[2:]\n if len(part) < 2: part = \"0\" + part\n hexcode += part\n return hexcode", "def hash(plainString):\n result = plainString\n for i in range(0,12):\n result = hashHelp(result)\n return result", "def hex_dump(string):\n return ' '.join([\"%0.2X\" % ord(x) for x in string])", "def look(source_path):\r\n hex_data_formated = get(source_path, \"f\")\r\n hex_list = hex_data_formated.split(\" \")\r\n\r\n result = \" 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F\\n\"\r\n for index, value in enumerate(hex_list):\r\n if index == 0:\r\n result += (\"0x000000 \" + value + \" \")\r\n\r\n elif index % 16 == 15:\r\n address = hex(index + 1)\r\n result += (value + \"\\n\" + address[0:2] + address[2:].zfill(6) + \" \")\r\n\r\n elif index % 16 == 7:\r\n result += (value + \" \")\r\n\r\n else:\r\n result += (value + \" \")\r\n\r\n print(result)", "def hexcode(self):\n hexc = \"#%.02X%.02X%.02X\" % (int(self.rgb_255[0]), int(self.rgb_255[1]), int(self.rgb_255[2]))\n return hexc", "def ansi_escape(text: object) -> str:\n return str(text).replace(\"\\x1b\", \"?\").replace(\"\\b\", \"?\")" ]
[ "0.7632533", "0.7439492", "0.70764095", "0.65433866", "0.6380229", "0.61830765", "0.61594963", "0.61447966", "0.6132453", "0.6124988", "0.61229116", "0.6114684", "0.59884095", "0.5947459", "0.59325373", "0.5788667", "0.5782058", "0.57663274", "0.5741124", "0.57352805", "0.57184196", "0.569126", "0.5679554", "0.56763494", "0.5665832", "0.56610286", "0.5636232", "0.56199336", "0.5613361", "0.56061125", "0.55995613", "0.5596987", "0.5582064", "0.5571382", "0.55577147", "0.55570304", "0.5555", "0.5551178", "0.55373037", "0.5524953", "0.5517788", "0.5492711", "0.5448082", "0.54381806", "0.54351413", "0.5428333", "0.54275894", "0.54261357", "0.54196656", "0.5413973", "0.5410879", "0.54056716", "0.540348", "0.53913975", "0.53872806", "0.5387104", "0.53835076", "0.53742814", "0.53737146", "0.53733015", "0.5371563", "0.53584415", "0.5357567", "0.5348881", "0.53463143", "0.53372604", "0.5333345", "0.53272647", "0.5326326", "0.5326077", "0.5325715", "0.53086215", "0.52947664", "0.5287426", "0.52853036", "0.52722824", "0.5266037", "0.5263579", "0.52596605", "0.5252476", "0.52445513", "0.5228706", "0.52240354", "0.5220531", "0.5208807", "0.5201498", "0.51985955", "0.51960015", "0.5190079", "0.5186943", "0.51836264", "0.518356", "0.5178052", "0.51759917", "0.51697165", "0.5167198", "0.51614773", "0.51598424", "0.5152149", "0.5143026" ]
0.78595716
0
Unhexlify raw text, return unhexlified text.
def unhexlify(text): unhexlified = binascii.unhexlify(text) if six.PY3: unhexlified = unhexlified.decode('utf-8') return unhexlified
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hexlify(text):\n if six.PY3:\n text = text.encode('utf-8')\n\n hexlified = binascii.hexlify(text)\n\n if six.PY3:\n hexlified = hexlified.decode('utf-8')\n\n return hexlified", "def test_unhexlify_not_python():\n assert '' == uflash.unhexlify(\n ':020000040003F7\\n:10E000000000000000000000000000000000000010')", "def test_unhexlify():\n hexlified = uflash.hexlify(TEST_SCRIPT)\n unhexlified = uflash.unhexlify(hexlified)\n assert unhexlified == TEST_SCRIPT.decode('utf-8')", "def normalize(self, text):\n\n return binascii.hexlify(text)", "def test_unhexlify_bad_unicode():\n assert '' == uflash.unhexlify(\n ':020000040003F7\\n:10E000004D50FFFFFFFFFFFFFFFFFFFFFFFFFFFFFF')", "def _hexlify(data):\n if data is None:\n return None\n elif isinstance(data, bytes) or isinstance(data, bytearray):\n return data.hex()\n elif isinstance(data, list):\n return [_hexlify(item) for item in data]\n elif isinstance(data, dict):\n return {k: _hexlify(v) for k, v in data.items()}\n else:\n return data", "def test_hexlify():\n result = uflash.hexlify(TEST_SCRIPT)\n lines = result.split()\n # The first line should be the extended linear address, ox0003\n assert lines[0] == ':020000040003F7'\n # There should be the expected number of lines.\n assert len(lines) == 5", "def _dehex(s):\n import re\n import binascii\n\n # Remove all non-hexadecimal digits\n s = re.sub(br'[^a-fA-F\\d]', b'', s)\n # binscii.unhexlify works in Python 2 and Python 3 (unlike\n # thing.decode('hex')).\n return binascii.unhexlify(s)", "def hexify(text):\r\n return ' '.join([hexify_word(word) for word in text.split()])", "def test_hexlify_empty_script():\n assert uflash.hexlify('') == ''", "def CUnescape(text):\n # type: (str) -> bytes\n\n def ReplaceHex(m):\n # Only replace the match if the number of leading back slashes is odd. i.e.\n # the slash itself is not escaped.\n if len(m.group(1)) & 1:\n return m.group(1) + 'x0' + m.group(2)\n return m.group(0)\n\n # This is required because the 'string_escape' encoding doesn't\n # allow single-digit hex escapes (like '\\xf').\n result = _CUNESCAPE_HEX.sub(ReplaceHex, text)\n\n return (result.encode('utf-8') # Make it bytes to allow decode.\n .decode('unicode_escape')\n # Make it bytes again to return the proper type.\n .encode('raw_unicode_escape'))", "def unH(s):\n return ''.join([chr(int(s[i:i+2],16)) for i in range(2, len(s),2)])", "def str_sha(raw_sha):\n return hexlify(raw_sha)[:12]", "def hexlify(self: str, verbose=False):\n nbytes = len(_chunk_bs(self))\n buf = b''\n strlen = ''\n for b in to_bytes(_chunk_bs(self)):\n buf+=b\n# for s in _from_list(_chunk_bs(self)):\n# strlen+=f'{ _bit_length(s): 02d}'\n if verbose:\n for n in range(nbytes):\n strlen += f'{_bit_length(_from_list(_chunk_bs(self))[n])} @[{n}] '\n print(strlen)\n return buf", "def unescape(msg):\n skip = False\n unescaped = bytearray()\n\n for i in range(len(msg)):\n\n if not skip and msg[i] is 0x7D:\n\n if not (i + 1) >= len(msg):\n unescaped.append(msg[i + 1] ^ 0x20)\n skip = True\n\n elif not skip:\n unescaped.append(msg[i])\n else:\n skip = False\n\n return unescaped", "def decode_and_hexlify_hashes(hash_str: str) -> typing.Union[str, None]:\n\n return binascii.hexlify(base64.b64decode(hash_str.encode())).decode() if hash_str else None", "def _grab_unascii(self):\r\n unascii = \"\"\r\n while self._char != -1 and not self._char in \"\\x00\\t\\r\\n\":\r\n unascii += self._char\r\n self._get_char()\r\n return unascii", "def unescape(input):\n output=atpic.cleaner_escape.unescape(input)\n return output", "def sanatize_hex(data: str) -> str:\n return data.replace(\"0x\", \"\").replace(\"0X\", \"\")", "def bh2u(x: bytes) -> str:\n return hfu(x).decode('ascii')", "def _hashsanitize(bytesin):\n # Used for converting raw byte data into a hex string. If the byte isn't a hex digit, use nothing instead.\n return \"\".join([x if x.lower() in 'abcdef0123456789' else '' for x in bytesin])", "def entity_decode_hex(input, errors='strict'):\n if _is_unicode(input):\n if '%' not in input:\n return s\n bits = _asciire.split(input)\n res = [bits[0]]\n append = res.append\n for i in range(1, len(bits), 2):\n append(unquote(str(bits[i])).decode('latin1'))\n append(bits[i + 1])\n return (''.join(res), len(input))\n\n preamble_regex = re.compile(r\"&#x\", flags=re.I)\n bits = preamble_regex.split(input)\n # fastpath\n if len(bits) == 1:\n return input\n res = [bits[0]]\n append = res.append\n for item in bits[1:]:\n try:\n append(_hextochr[item[:2]])\n append(item[3:])\n except KeyError:\n append('&#x')\n append(item)\n append(';')\n\n return (''.join(res), len(input))", "def unescape(escaped_string):\n\n hex_msg = \"^x not followed by a valid 2-digit hex number\"\n\n token_start = 0\n l = len(escaped_string)\n i = 0\n output = []\n while i < l:\n c = escaped_string[i]\n\n if c in _unprintables:\n raise vps.errorhandler.StringUnspecial_characterException(\n escaped_string, \"unprintable character\",\n token_start, i)\n elif c != \"^\":\n output.append(c)\n else:\n if i == l - 1:\n raise vps.errorhandler.StringUnspecial_characterException(\n escaped_string, \"caret at end of string\",\n token_start, i)\n i += 1\n next_c = escaped_string[i]\n if next_c not in \"'\\\"^x\":\n if next_c in _unprintables:\n raise vps.errorhandler.StringUnspecial_characterException(\n escaped_string,\n \"^ followed by unprintable character\",\n token_start, i)\n else:\n raise vps.errorhandler.StringUnspecial_characterException(\n escaped_string,\n \"^ followed by invalid character %s\" % (next_c,),\n token_start, i)\n if next_c != 'x':\n output.append(next_c)\n else:\n if i >= l - 2:\n raise vps.errorhandler.StringUnspecial_characterException(\n escaped_string, hex_msg, token_start, i)\n i += 1\n hex1 = escaped_string[i]\n i += 1\n hex2 = escaped_string[i]\n if hex1 not in _ALLOWED_SAMPLE_HEX_DIGITS:\n raise vps.errorhandler.StringUnspecial_characterException(\n escaped_string, hex_msg, token_start, i - 1)\n if hex2 not in _ALLOWED_SAMPLE_HEX_DIGITS:\n raise vps.errorhandler.StringUnspecial_characterException(\n escaped_string, hex_msg, token_start, i)\n val = int(hex1 + hex2, 16)\n output.append(chr(val))\n # incrementing i should happen at the end of the loop body for\n # all paths\n i += 1\n return ''.join(output)", "def unobscure(obscured: bytes) -> bytes:\n return decompress(b64d(obscured))", "def remove_hex(text): \n return re.sub(r'&.*?;', r'', text)", "def to_hex(text):\n return ' '.join([hex(ord(char)) for char in unicode(text, 'UTF-8')])", "def cook(raw):\n if sys.version_info[0] < 3:\n # python 2\n if isinstance(raw, str):\n try:\n cooked = raw.decode('utf-8')\n except UnicodeDecodeError:\n cooked = raw.decode('ascii', 'ignore')\n else:\n cooked = raw\n else:\n # python 3\n if isinstance(raw, bytes):\n try:\n cooked = raw.decode('utf-8')\n except UnicodeDecodeError:\n cooked = raw.decode('ascii', 'ignore')\n else:\n cooked = raw\n return cooked", "def unescape(text):\r\n\r\n def fixup(m):\r\n text = m.group(0)\r\n if text[:2] == '&#':\r\n try:\r\n if text[:3] == '&#x':\r\n return unichr(int(text[3:-1], 16)).encode('utf-8')\r\n return unichr(int(text[2:-1])).encode('utf-8')\r\n except ValueError:\r\n logger.info('error de valor')\r\n\r\n else:\r\n try:\r\n import htmlentitydefs\r\n text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode('utf-8')\r\n except KeyError:\r\n logger.info('keyerror')\r\n except:\r\n pass\r\n\r\n return text\r\n\r\n return re.sub('&#?\\\\w+;', fixup, text)", "def str_to_raw(s):\n raw_map = {8:r'\\b', 7:r'\\a', 12:r'\\f', 10:r'\\n', 13:r'\\r', 9:r'\\t', 11:r'\\v'}\n return r''.join(i if ord(i) > 32 else raw_map.get(ord(i), i) for i in s)", "def hex(self):\n return binascii.hexlify(self.data)", "def to_ascii(text):\n return re.sub(r'[^\\x00-\\x7F]+', ' ', text)", "def force_ascii(text):\n return \"\".join([c for c in text if ord(c) < 128])", "def unescape(text):\n import re, htmlentitydefs\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"&#\":\n # character ref\n try:\n if text[:3] == \"&#x\":\n return unichr(int(text[3:-1],1))\n else:\n return unichr(int(text[2:-1]))\n except ValueError:\n pass\n else:\n #named entity\n try:\n text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])\n except KeyError:\n pass\n return text\n return re.sub(\"&#?\\w+;\", fixup, text)", "def handle_hex_stream(data):\n stream = extract_stream(data)\n if stream is not False: # stream can actually be null (HexStream = \\\\x00\\\\x00). so we explicitly check for False\n is_binary = not all(c in string.printable for c in stream)\n return stream.encode(\"base64\") if is_binary else stream\n return data", "def b2a(b):\n return binascii.hexlify(b)", "def hexify_word(word):\r\n\r\n return ''.join([str(hex(ord(c))[2::]) for c in word])", "def html_unescape(text):\n return html.unescape(text)", "def test_embed_hex():\n python = uflash.hexlify(TEST_SCRIPT)\n result = uflash.embed_hex(uflash._RUNTIME, python)\n # The resulting hex should be of the expected length.\n assert len(result) == len(python) + len(uflash._RUNTIME) + 1 # +1 for \\n\n # The hex should end with a newline '\\n'\n assert result[-1:] == '\\n'\n # The Python hex should be in the correct location.\n py_list = python.split()\n result_list = result.split()\n start_of_python_from_end = len(py_list) + 5\n start_of_python = len(result_list) - start_of_python_from_end\n assert result_list[start_of_python:-5] == py_list\n # The firmware should enclose the Python correctly.\n firmware_list = uflash._RUNTIME.split()\n assert firmware_list[:-5] == result_list[:-start_of_python_from_end]\n assert firmware_list[-5:] == result_list[-5:]", "def UnescapeHTMLEntities(self, data):\n if '#39' not in htmlentitydefs.name2codepoint:\n htmlentitydefs.name2codepoint['#39'] = 39\n return re.sub('&(%s);' % '|'.join(htmlentitydefs.name2codepoint),\n lambda m: unichr(htmlentitydefs.name2codepoint[m.group(1)]),\n data)", "def ascii_convert(the_bytes: bytes):\n return ANSI_ESCAPE_B.sub(rb\"\", the_bytes).decode(\"utf-8\")", "def _unquote(src, encoding=\"utf-8\"):\n return urllib.unquote(src).decode(encoding)", "def de_hex(msg):\n try:\n return bytes.fromhex(msg).decode('utf-8')\n except (UnicodeDecodeError, ValueError):\n print('Invalid hexadecimal-encoded string')", "def decode_high(self, text):\n h = HTMLParser()\n text = '&#%s;' % text\n return h.unescape(text)", "def parse_sysex_string(s):\n return binascii.unhexlify(s.replace(' ', ''))", "def normalize_input_shellcode(shellcode):\n shellcode = shellcode.replace(' ', '')\n shellcode = shellcode.replace('\\\\x', '')\n shellcode = shellcode.replace('\\\\X', '')\n return shellcode", "def HtmlUnescape(text):\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"&#\":\n # character reference\n try:\n if text[:3] == \"&#x\":\n return unichr(int(text[3:-1], 16))\n else:\n return unichr(int(text[2:-1]))\n except ValueError:\n pass\n else:\n # named entity\n try:\n text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])\n except KeyError:\n pass\n return text # leave as is\n return re.sub(\"&#?\\w+;\", fixup, text)", "def __unicode_to_ascii(text):\n line = unicodedata.normalize('NFKD', text)\n return ''.join(c for c in line if not unicodedata.combining(c))", "def _hex2bin(cls, h):\n b = []\n while h:\n if h[0] in ' \\n': # ignore whitespace\n h = h[1:]\n elif h[0] in '0123456789abcdef': # hex byte\n b.append(int(h[:2], 16))\n h = h[2:]\n elif h[0] == '.': # for chunk type\n b.extend(ord(h[i]) for i in range(1,5))\n h = h[5:]\n else: # for PNG magic\n b.append(ord(h[0]))\n h = h[1:]\n return ''.join(map(chr,b))", "def unescape(text):\n if isinstance(text, list):\n for i, t in enumerate(text):\n t = t.replace(r'&amp;', r'\\&')\n t = t.replace(r'&lt;', r'<')\n t = t.replace(r'&gt;', r'>')\n text[i] = t\n else:\n text = text.replace(r'&amp;', r'\\&')\n text = text.replace(r'&lt;', r'<')\n text = text.replace(r'&gt;', r'>')\n return text", "def removeNonAsciiFromText(self, text):\n\t\treturn ''.join([i if ord(i) < 128 else '' for i in text])", "def unhexchar(c):\n c = c[:1]\n if \"abcdefABCDEF0123456789\".find(c) != -1:\n return int(c,16)\n else:\n return None", "def tohex(data: str) -> str:\n match = re.fullmatch(r\"^0[x|X][0-9a-fA-F]+\", data)\n if match:\n return data.lower()\n match = re.fullmatch(r\"^[0-9a-fA-F]+[h|H]$\", data)\n if not match:\n raise ValueError(f\"Required hex of the form `0x` or `H` found {data}\")\n match = re.match(r\"^[0-9a-fA-F]+\", data)\n return f\"0x{match.group().lower()}\"", "def strip_escape(data):\n m = excel_escape_re.match(data)\n if m:\n return m.group(1)\n else:\n return data", "def _unescape_str(text):\n text = text.decode('utf-8') if isinstance(text, six.binary_type) else text\n tokens = []\n i = 0\n basicstr_re = re.compile(r'[^\"\\\\\\000-\\037]*')\n unicode_re = re.compile(r'[uU]((?<=u)[a-fA-F0-9]{4}|(?<=U)[a-fA-F0-9]{8})')\n escapes = {\n 'b': '\\b',\n 't': '\\t',\n 'n': '\\n',\n 'f': '\\f',\n 'r': '\\r',\n '\\\\': '\\\\',\n '\"': '\"',\n '/': '/',\n \"'\": \"'\"\n }\n while True:\n m = basicstr_re.match(text, i)\n i = m.end()\n tokens.append(m.group())\n if i == len(text) or text[i] != '\\\\':\n break\n else:\n i += 1\n if unicode_re.match(text, i):\n m = unicode_re.match(text, i)\n i = m.end()\n tokens.append(six.unichr(int(m.group(1), 16)))\n else:\n if text[i] not in escapes:\n raise BadEscapeCharacter\n tokens.append(escapes[text[i]])\n i += 1\n return ''.join(tokens)", "def clean_txt(txt):\n r = txt.encode(\"utf-8\", errors=\"backslashreplace\").decode('utf-8').replace(\"\\\\u0144\", \"\")\n return r", "def preprocess_hashes(tex):\n blocks = catlist()\n rx = hash_rx\n m = rx.search(tex)\n while m:\n if len(m.group(2)) > 40:\n tex2htm.warn(\"Possible runaway hash: {}\".format(text_sample(m.group(2))))\n raise(None)\n blocks.append(tex[:m.start()])\n blocks.append(re.sub(r'(^|[^\\\\])%', r'\\1\\%', m.group(0)))\n tex = tex[m.end():]\n m = rx.search(tex)\n blocks.append(tex)\n return \"\".join(blocks)", "def normalizeRawFromHeader(value):\n return value.replace('\\n', '').replace('\\r', '').strip()", "def normalize(text):\n text = text.encode('utf-8')\n # Python idiom to remove extraneous spaces\n text = ' '.join(text.split())\n return text.strip('\"')", "def _pythonifyString(s):\n\n if \"\\x00\" in s:\n s = s[:s.index(\"\\x00\")]\n return s", "def encrypt(text):\r\n\r\n cipher = fuzz(text)\r\n return hexify(cipher)", "def toHexa(data):\n\tresult = \"\"\n\tif isBytes(data):\n\t\tdata = data.decode(\"latin-1\")\n\tfor i in data:\n\t\tresult += \"\\\\x%02X\"%ord(i)\n\treturn result", "def sanitize(buf,\n backspaces=['\\x08\\x1b[K', '\\x08 \\x08'],\n escape_regex=re.compile(r'\\x1b(\\[|\\]|\\(|\\))[;?0-9]*[0-9A-Za-z](.*\\x07)?')):\n # Filter out control characters\n\n # First, handle the backspaces.\n for backspace in backspaces:\n try:\n while True:\n ind = buf.index(backspace)\n buf = ''.join((buf[0:ind-1],buf[ind+len(backspace):]))\n except:\n pass\n\n strip_escapes = escape_regex.sub('',buf)\n\n # strip non-printable ASCII characters\n\n clean = ''.join([x for x in strip_escapes if is_printable(x)])\n return clean", "def unquote(cls, value):\n if six.PY2:\n return unquote(value).decode(\"utf8\")\n else:\n return unquote(value.decode(\"ascii\"))", "def base64_to_hex(b64_string):\n # Remove hex encoding\n unencoded_string = base64.b64decode(b64_string)\n # Encode base64 and return\n return binascii.hexlify(unencoded_string)", "def unquote_safe(s, unsafe_list):\n # note: this build utf8 raw strings ,then does a .decode('utf8') at the end.\n # as a result it's doing .encode('utf8') on each block of the string as it's processed.\n res = _utf8(s).split('%')\n for i in xrange(1, len(res)):\n item = res[i]\n try:\n raw_chr = _hextochr[item[:2]]\n if raw_chr in unsafe_list or ord(raw_chr) < 20:\n # leave it unescaped (but uppercase the percent escape)\n res[i] = '%' + item[:2].upper() + item[2:]\n else:\n res[i] = raw_chr + item[2:]\n except KeyError:\n res[i] = '%' + item\n except UnicodeDecodeError:\n # note: i'm not sure what this does\n res[i] = unichr(int(item[:2], 16)) + item[2:]\n o = \"\".join(res)\n return _unicode(o)", "def a2b(a):\n return binascii.unhexlify(a)", "def ascii_to_phred64(c):\r\n return ascii_to_phred(c, 64)", "def parse_text(text):\n return str(str(text).encode(\"ascii\", \"ignore\")).replace(\"\\\\n\",\"\\n\").replace(\"b'\",\"\")", "def surrogate(text):\n if isinstance(text, bytes):\n return text.decode('utf-8', errors='surrogateescape')\n return text", "def prepare_for_hashing(text):\n if not text:\n return ''\n return text.translate(CHARS_TO_DELETE).lower()", "def string_raw(self):\n return \"x%x\" % self.encoded", "def as_hex(self):\n return binascii.hexlify(self.as_bytes()).decode('ascii')", "def clean_string(text: str, ascii_only=False) -> str:\n done = False\n while text and not done:\n done = True\n if ((text[0] == '\"' and text[-1] == '\"') or\n (text[0] == '[' and text[-1] == ']')):\n text = text[1:-1]\n done = False\n if text[:2] == \"u'\" and text[-1] == \"'\":\n text = text[2:-1]\n done = False\n if ascii_only:\n try:\n # Python v3.7\n if text.isascii(): # type: ignore\n return text\n except AttributeError:\n # Python less than v3.7\n pass\n return ''.join(filter(lambda c: ord(c) >= 32 and ord(c) < 0x7F,\n list(text)))\n return text", "def removeUnicode(text):\n text = re.sub(r'(\\\\u[0-9A-Fa-f]+)',r'', text) \n text = re.sub(r'[^\\x00-\\x7f]',r'',text)\n return text", "def _unascii(s):\n\n # make the fast path fast: if there are no matches in the string, the\n # whole thing is ascii. On python 2, that means we're done. On python 3,\n # we have to turn it into a bytes, which is quickest with encode('utf-8')\n m = _U_ESCAPE.search(s)\n if not m:\n return s if PY2 else s.encode('utf-8')\n\n # appending to a string (or a bytes) is slooow, so we accumulate sections\n # of string result in 'chunks', and join them all together later.\n # (It doesn't seem to make much difference whether we accumulate\n # utf8-encoded bytes, or strings which we utf-8 encode after rejoining)\n #\n chunks = []\n\n # 'pos' tracks the index in 's' that we have processed into 'chunks' so\n # far.\n pos = 0\n\n while m:\n start = m.start()\n end = m.end()\n\n g = m.group(1)\n\n if g is None:\n # escaped backslash: pass it through along with anything before the\n # match\n chunks.append(s[pos:end])\n else:\n # \\uNNNN, but we have to watch out for surrogate pairs.\n #\n # On python 2, str.encode(\"utf-8\") will decode utf-16 surrogates\n # before re-encoding, so it's fine for us to pass the surrogates\n # through. (Indeed we must, to deal with UCS-2 python builds, per\n # https://github.com/matrix-org/python-canonicaljson/issues/12).\n #\n # On python 3, str.encode(\"utf-8\") complains about surrogates, so\n # we have to unpack them.\n c = int(g, 16)\n\n if c < 0x20:\n # leave as a \\uNNNN escape\n chunks.append(s[pos:end])\n else:\n if PY3: # pragma nocover\n if c & 0xfc00 == 0xd800 and s[end:end + 2] == '\\\\u':\n esc2 = s[end + 2:end + 6]\n c2 = int(esc2, 16)\n if c2 & 0xfc00 == 0xdc00:\n c = 0x10000 + (((c - 0xd800) << 10) |\n (c2 - 0xdc00))\n end += 6\n\n chunks.append(s[pos:start])\n chunks.append(unichr(c))\n\n pos = end\n m = _U_ESCAPE.search(s, pos)\n\n # pass through anything after the last match\n chunks.append(s[pos:])\n\n return (''.join(chunks)).encode(\"utf-8\")", "def unicode2ascii(_unicrap):\n xlate = {0xc0:'A', 0xc1:'A', 0xc2:'A', 0xc3:'A', 0xc4:'A', 0xc5:'A',\n 0xc6:'Ae', 0xc7:'C',\n 0xc8:'E', 0xc9:'E', 0xca:'E', 0xcb:'E',\n 0xcc:'I', 0xcd:'I', 0xce:'I', 0xcf:'I',\n 0xd0:'Th', 0xd1:'N',\n 0xd2:'O', 0xd3:'O', 0xd4:'O', 0xd5:'O', 0xd6:'O', 0xd8:'O',\n 0xd9:'U', 0xda:'U', 0xdb:'U', 0xdc:'U',\n 0xdd:'Y', 0xde:'th', 0xdf:'ss',\n 0xe0:'a', 0xe1:'a', 0xe2:'a', 0xe3:'a', 0xe4:'a', 0xe5:'a',\n 0xe6:'ae', 0xe7:'c',\n 0xe8:'e', 0xe9:'e', 0xea:'e', 0xeb:'e',\n 0xec:'i', 0xed:'i', 0xee:'i', 0xef:'i',\n 0xf0:'th', 0xf1:'n',\n 0xf2:'o', 0xf3:'o', 0xf4:'o', 0xf5:'o', 0xf6:'o', 0xf8:'o',\n 0xf9:'u', 0xfa:'u', 0xfb:'u', 0xfc:'u',\n 0xfd:'y', 0xfe:'th', 0xff:'y',\n 0xa1:'!', 0xa2:'{cent}', 0xa3:'{pound}', 0xa4:'{currency}',\n 0xa5:'{yen}', 0xa6:'|', 0xa7:'{section}', 0xa8:'{umlaut}',\n 0xa9:'{C}', 0xaa:'{^a}', 0xab:'<<', 0xac:'{not}',\n 0xad:'-', 0xae:'{R}', 0xaf:'_', 0xb0:'{degrees}',\n 0xb1:'{+/-}', 0xb2:'{^2}', 0xb3:'{^3}', 0xb4:\"'\",\n 0xb5:'{micro}', 0xb6:'{paragraph}', 0xb7:'*', 0xb8:'{cedilla}',\n 0xb9:'{^1}', 0xba:'{^o}', 0xbb:'>>',\n 0xbc:'{1/4}', 0xbd:'{1/2}', 0xbe:'{3/4}', 0xbf:'?',\n 0xd7:'*', 0xf7:'/'\n }\n\n s = \"\"\n for i in _unicrap:\n ordi = ord(i)\n if ordi in xlate:\n s += xlate[ordi]\n elif ordi >= 0x80:\n pass\n else:\n s += str(i)\n return s", "def hex_to_unichr(hex_string):\n if (hex_string is None) or (len(hex_string) < 1):\n return None\n if hex_string.startswith(\"U+\"):\n hex_string = hex_string[2:]\n return int_to_unichr(int(hex_string, base=16))", "def unphred_string(phred):\n arr = [(ord(c) - 33) / 30. for c in phred]\n return arr", "def tidy_string(s: str\n ) -> str:\n s = s.encode('ascii', errors='ignore').decode(FORMAT)\n s = s.replace(\"\\r\", \"\").replace(\"\\t\", \"\").replace('\\n', '') \n return s", "def obscure(data: bytes) -> bytes:\n return b64e(compress(data, 9))", "def decode_bytes(data: bytearray) -> str:\n pattern = re.compile('\\r', re.UNICODE)\n res = data.decode('utf-8', 'ignore')\n res = pattern.sub('', res)\n return res", "def unescape(s):\n\n\tif s is None:\n\t\treturn \"\"\n\n\t# html entities\n\ts = s.replace(\"&#13;\", \"\\r\")\n\n\t# standard html\n\ts = s.replace(\"&lt;\", \"<\")\n\ts = s.replace(\"&gt;\", \">\")\n\ts = s.replace(\"&amp;\", \"&\") # this has to be last\n\n\treturn s", "def escapeDecode(s: unicode) -> unicode:\n ...", "def rl_unescape_prompt(prompt: str) -> str:\n if rl_type == RlType.GNU:\n escape_start = \"\\x01\"\n escape_end = \"\\x02\"\n prompt = prompt.replace(escape_start, \"\").replace(escape_end, \"\")\n\n return prompt", "def unicoder(string):\n\treturn \"\\x00\".join(string) + \"\\x00\"", "def text2Int(text):\n return reduce(lambda x, y : (x << 8) + y, map(ord, text))", "def unescape(t):\r\n return (t\r\n .replace(\"&amp;\", \"&\").replace(\"&lt;\", \"<\").replace(\"&gt;\", \">\")\r\n .replace(\"&#39;\", \"´\").replace(\"&quot;\", '\"').replace('&apos;',\"'\")\r\n )", "def html_unescape(text):\n\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"&#\":\n # character reference\n try:\n if text[:3] == \"&#x\":\n return chr(int(text[3:-1], 16))\n else:\n return chr(int(text[2:-1]))\n except ValueError:\n pass\n else:\n # named entity\n try:\n text = chr(html.entities.name2codepoint[text[1:-1]])\n except KeyError:\n pass\n return text # leave as is\n return re.sub(\"&#?\\w+;\", fixup, text)", "def decode(text: str) -> str:\n # Reverse of reverse is original text.\n return encode(text)", "def clean_up_text(text):\n text = html.unescape(text)\n return remove_emoji(text)", "def value_convert(x):\n try:\n return x.decode(\"ascii\")\n except UnicodeDecodeError:\n return x.hex()", "def unquote(uri):\r\n uri = uri.encode('ascii')\r\n unquoted = urllib_unquote(uri)\r\n return unquoted.decode('utf-8')", "def unicode_unquote(value):\n return unquote(value).decode('utf-8')", "def test_unquote(self):\n fwa = FakeWikiArchivo('abcd <a href=\"/wiki/f%C3%B3u\">FooBar</a> dcba')\n _, r = self.peishranc(fwa)\n self.assertEqual(r, [(u'fóu', SCORE_PEISHRANC)])", "def hexify(c):\n try:\n s = c.encode(\"utf-8\").encode(\"hex\")\n except UnicodeDecodeError:\n s = 0\n n = len(s)\n if n <= 2: return s\n a = ' - '.join([s[i:i+2] for i in range(0,n,2)])\n return a[:-1]", "def decode_hex(self, s):\n return self.transcode(int(s, 16))", "def _decode_html_entities(text: str) -> str:\n return html.unescape(text)", "def remove_non_ascii(text):\n return re.sub(r'[^\\x00-\\x7F]', ' ', text)", "def unescape_tweet(tweet):\r\n return html.unescape(tweet)", "def __html_unescape(self, text):\n\n return re.sub(\"&(%s);\" % \"|\".join(name2codepoint),\n lambda m: unichr(name2codepoint[m.group(1)]),\n text)" ]
[ "0.76805043", "0.724818", "0.71128637", "0.6909091", "0.6862392", "0.6219935", "0.6197366", "0.59515", "0.5922873", "0.5843483", "0.57907474", "0.5650561", "0.56349534", "0.56267136", "0.55028135", "0.550243", "0.5497304", "0.54252976", "0.5407516", "0.53313655", "0.52876633", "0.52724916", "0.5268687", "0.5219308", "0.52061766", "0.52057064", "0.5189475", "0.5174156", "0.5166434", "0.51478815", "0.5142549", "0.511327", "0.50778574", "0.5072602", "0.50717604", "0.5070542", "0.5068844", "0.5062356", "0.5055262", "0.50538003", "0.50488925", "0.50438184", "0.5035686", "0.50311035", "0.5029688", "0.5028371", "0.5016538", "0.5012723", "0.49692258", "0.49406317", "0.493382", "0.49318412", "0.49298275", "0.49275565", "0.49159658", "0.4913919", "0.49047682", "0.49008802", "0.4898744", "0.48972988", "0.48834342", "0.48784578", "0.48740408", "0.4845163", "0.4843533", "0.48340705", "0.48280346", "0.482675", "0.4822948", "0.4820369", "0.48159528", "0.481389", "0.47928926", "0.47904122", "0.4782441", "0.4782262", "0.4782021", "0.4766663", "0.47497153", "0.47476533", "0.47440577", "0.47425053", "0.4738442", "0.47365698", "0.47270638", "0.4722092", "0.47148982", "0.4713723", "0.47133064", "0.47078067", "0.4705975", "0.46987578", "0.46878996", "0.46837682", "0.46822342", "0.46759978", "0.46759155", "0.46717483", "0.4670215", "0.46678558" ]
0.8604057
0
Parse a line of text from the plot_data file.
def parse_line(self, line): if line[0] == "#": return False parts = [x.strip() for x in line.strip().split(",")] self.unix_time = int(parts[0]) self.cycles_done = int(parts[1]) self.cur_path = int(parts[2]) self.paths_total = int(parts[3]) self.pending_total = int(parts[4]) self.pending_favs = int(parts[5]) self.map_size = float(parts[6].replace("%","")) self.unique_crashes = int(parts[7]) self.unique_hangs = int(parts[8]) self.max_depth = int(parts[9]) self.execs_per_sec = float(parts[10]) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_plot_cmd(self, line):\n line, any_vars = self.find_vars_in_str(line)\n words = line.split()\n words = self.fix_words(words)\n\n # Parse line\n has_out_var = False\n if len(words) == 6:\n has_out_var = True\n _, plot_type, _, in_data, _, out_data = words\n else: _, plot_type, _, in_data = words\n\n in_data = getattr(self, in_data)\n plot_fnc = f_dicts.plot_fncs[plot_type]\n\n if has_out_var:\n OutVar = plot_fnc(in_data)\n else: plot_fnc(in_data)\n\n if has_out_var:\n self.set_var(out_data,\n {plot_type: OutVar}, {})", "def parse_point(self, text_line):\n record_type = self.substr(text_line, sps21point['RECORD_ID'][0], sps21point['RECORD_ID'][1]).strip()\n if record_type not in (SRC_DATA_RECORD, RCV_DATA_RECORD):\n return\n self.set_definition(sps21point)\n return self.parse(text_line)", "def read_line(line):\n label = line[0:11]\n text = line[11:]\n y = 1 if label == '__label__2 ' else 0\n return text, y", "def parse_line(self, line):\n raise NotImplementedError", "def parse_line(self, line):\n success = self.parser.handle_line(line)\n if success:\n self.data.update()\n else:\n self.bot.log(\"didn't handle line: '{}'\".format(line))", "def _parseLine(self, line, delimiter = \":\"):\r\n\t\tsplt = line.split(delimiter)\r\n\t\tinVec = self._parseVec(splt[0])\r\n\t\toutVec = self._parseVec(splt[1])\r\n\t\tif (len(splt) == 2):\r\n\t\t\tlabel = \"\"\r\n\t\telse:\r\n\t\t\tlabel = splt[2]\r\n\t\tself.data.append({'in':inVec, 'out':outVec, 'label':label})", "def parse_data(fp):\n pass", "def parse(cls, line):\r\n raise NotImplementedError", "def parse_line(line):\n return parse('#{id_:d} @ {x:d},{y:d}: {w:d}x{h:d}', line)", "def parseLine(self, line):\n # Removes surrounding whitespace\n line = self.separateElements(line)\n if len(line) == 0: return\n # Checks if the line is a label declaration\n if line[0].lower() == \"label\":\n # --Validates the line\n if len(line) != 2: raise Exception(\"Invalid Label\")\n if len(line[1]) < 2: raise Exception(\"Invalid Label\") \n if line[1][-1] != ':': raise Exception(\"Invalid Label\")\n # Gets the label name\n labelName = line[1][:-1]\n\n # Creates a new symbol entry for the label, the pointer refers to the memory location of the label\n # It defaults to the location of the label in the instruction sequence\n self.symbolTable.append({ \"type\": \"LABEL\", \"name\": labelName, \"pointer\": len(self.instructionList) * 4})\n # Checks if the line is data declaration\n elif line[0].lower() == \"data\" or (line[0].lower()[:4] == \"data\" and line[0][4] == \"[\"):\n # Removes the DATA tag from the data\n line[0] = line[0][4:]\n # --Validates the line\n if len(line) < 2: raise Exception(\"Invalid DATA\")\n # Gets the data name\n dataName = line[1]\n # Stores the data length\n dataLength = 4 # A word\n # Gets any default data\n defaultData = 0\n # Stores the data type\n dataType = \"int\"\n if len(line) == 3:\n if line[2][0] == \"\\\"\" and line[2][-1] == \"\\\"\":\n dataType = \"string\"\n defaultData = line[2][1:-1]\n dataLength = len(defaultData)\n elif line[2].isnumeric():\n defaultData = line[2]\n elif line[2][-1] == 'f' and line[2][:-1].isnumeric():\n dataType = \"float\"\n defaultData = line[2][0]\n # Checks if a data length was given\n if len(line[0]) > 2 and (line[0][0] == \"[\" and line[0][-1] == \"]\"):\n data = line[0][1:-1]\n if not data.isnumeric(): raise TypeError(\"Invalid data length type\")\n dataLength = int(data)\n\n # Creates a new symbol entry for the data\n self.symbolTable.append({ \"type\": \"DATA\", \"name\": dataName, \"default\": defaultData, \"dataType\": dataType, \"length\": dataLength})\n # The line is most likely an instruction\n else:\n # --Validates the line\n #Stores the control bits\n controlBits = 1 << 5 # Sets it to 0b100000\n # Checks if the first element is control bits\n if line[0][0] == \"{\" and line[0][-1] == \"}\": # First element is control bits\n # Separates the two sections of the control bits\n controlSections = line[0].split(':')\n #Goes through the characters and constructs the control bits for the instruction\n carryBits = controlSections[0].lower()\n carryFlag = int('c' in carryBits)\n zeroFlag = int('z' in carryBits)\n negativeFlag = int('n' in carryBits)\n signedOverflowFlag = int('s' in carryBits)\n #Gets the conditions bits\n if len(controlSections) == 2:\n conditionBits = controlSections[1].lower()\n isAnd = int('x' in conditionBits)\n isOne = int('1' in conditionBits)\n #Sets the last two bits on controlBits to the conditionBits\n controlBits ^= isAnd << 1\n controlBits ^= isOne\n # Constructs the control bits section\n controlBits ^= carryFlag << 5\n controlBits ^= zeroFlag << 4\n controlBits ^= negativeFlag << 3\n controlBits ^= signedOverflowFlag << 2\n # Removes the control bits section from the line\n line.pop(0)\n # Performs this check as the controlbits element gets removed (if it existed) and so the length of the elments could be zerp\n if len(line) == 0: raise Exception(\"Invalid Instruction\")\n # --The first element is the instruction\n # Identifies the instruction from the mnemonic using the lookup table\n if line[0] in self.InstructionLookupTable:\n ins = self.InstructionLookupTable[line[0]]\n insCode = ins[\"code\"]\n insControlBits = ins['controlBits'] if ins['controlBits'] else controlBits\n # Creates a representation of the instruction, this is stored in the instructionList and is assembled later\n instrucitonRepr = {\n \"code\": insCode,\n \"controlBits\": insControlBits,\n }\n # Parses the arguments given and stores the operandStruct returned in the instruciton representation\n if len(line) > 1: instrucitonRepr[\"operand\"] = self.parseArgs(line[1:], insCode)\n self.instructionList.append(instrucitonRepr)", "def line_parser(path):\n lines = []\n with open(path, 'r') as input:\n lines = [line.rstrip().split(',') for line in input]\n lines = [\n [[float(x1), float(y1)],\n [float(x2), float(y2)]] \n for x1, y1, x2, y2 in lines]\n return lines", "def parse_string_line(self, data_line):\n if data_line:\n data_line = data_line.rstrip()\n if data_line:\n if data_line[0] == '#':\n extraparams = json.loads(data_line[1:])\n if 'glyph_cap_line' in extraparams:\n self.__capline = extraparams['glyph_cap_line']\n if 'glyph_base_line' in extraparams:\n self.__baseline = extraparams['glyph_base_line']\n if 'glyph_bottom_line' in extraparams:\n self.__bottomline = extraparams['glyph_bottom_line']\n elif len(data_line) > 9:\n strokes = []\n xmin = xmax = ymin = ymax = None\n # individual strokes are stored separated by <space>+R\n # starting at col 11\n for s in split(data_line[10:], ' R'):\n if len(s):\n stroke = list(zip(map(self.__char2val, s[::2]), map(self.__char2val, s[1::2])))\n xmin = min(stroke + ([xmin] if xmin else []), key=lambda t: t[0])\n ymin = min(stroke + ([ymin] if ymin else []), key=lambda t: t[1])\n xmax = max(stroke + ([xmax] if xmax else []), key=lambda t: t[0])\n ymax = max(stroke + ([ymax] if ymax else []), key=lambda t: t[1])\n strokes.append(stroke)\n self.__charcode = int(data_line[0:5])\n self.__left_side = self.__char2val(data_line[8])\n self.__right_side = self.__char2val(data_line[9])\n self.__strokes = strokes\n self.__xmin, self.__ymin, self.__xmax, self.__ymax = (xmin[0], ymin[1], xmax[0], ymax[1]) if strokes else (0, 0, 0, 0)\n return True\n return False", "def _process_data_file(self):\n \n with open(self.data_file, 'r') as f:\n self.description = f.readline().strip()\n data = np.loadtxt(self.data_file, skiprows=1)\n\n return data", "def parse_file(self):\n # read the first line in the file\n line = self._stream_handle.readline()\n\n while line:\n # check for a data line or a dcl logger line we specifically ignore\n data_match = DATA_LINE_MATCHER.match(line)\n ignore_match = IGNORE_LINE_MATCHER.match(line)\n\n if data_match:\n # found a data line, extract this particle\n # DCL controller timestamp is the port_timestamp\n dcl_controller_timestamp = data_match.groups()[DCL_TIMESTAMP_GROUP]\n port_timestamp = dcl_time_to_ntp(dcl_controller_timestamp)\n\n particle = self._extract_sample(self.particle_class,\n None,\n data_match,\n port_timestamp=port_timestamp,\n preferred_ts=DataParticleKey.PORT_TIMESTAMP)\n\n self._record_buffer.append(particle)\n\n elif not ignore_match:\n # we found a line with an unknown format, call an exception\n error_message = 'Found line with unknown format %s' % line\n log.warn(error_message)\n self._exception_callback(SampleException(error_message))\n\n # read the next line\n line = self._stream_handle.readline()", "def parse_point(line):\n return json.loads(line)", "def parse_line(self, line):\n if self.signal_eof:\n return \"\"\n\n match = re.search(\"^([\\w\\s]+from) ([^:]+):(\\d+)(:|,)$\", line)\n if match:\n return self.parse_line_from(match)\n\n match = re.search(\"^([^:]+):(?:((?:\\d+:)?\\d+):)?(?:(error|warning|note):)?(.+)$\", line)\n if match:\n return self.parse_line_err(match)\n\n return line", "def line_to_data(line):\n elems = line.strip().split(\"\\t\")\n assert len(elems) in [1,2]\n text = None\n label = None\n if len(elems) == 1:\n text = elems[0]\n if len(elems) == 2:\n text = elems[0]\n label = elems[1]\n return (text, label)", "def __parse_position_data(self):\n self.add_debug('Parse position data ...')\n\n for i in range(len(self._lines)):\n if self.has_errors(): break\n line = self._lines[i]\n if len(line) < 1: continue\n if self.TIMESTAMP_MARKER in line: continue\n if self.RACK_BARCODE_MARKER in line: continue\n\n msg = 'Unexpected content in line %i: %s' % (i + 1, line)\n if not self.SEPARATOR in line: self.add_error(msg)\n tokens = line.split(self.SEPARATOR)\n if not len(tokens) == 2: self.add_error(msg)\n if self.has_errors(): continue\n\n pos_label = tokens[0].strip()\n if self.position_map.has_key(pos_label):\n msg = 'Duplicate position label \"%s\"' % (pos_label)\n self.add_error(msg)\n if self.has_errors(): continue\n\n tube_barcode = tokens[1].strip()\n if tube_barcode == self.NO_TUBE_PLACEHOLDER: tube_barcode = None\n self.position_map[pos_label] = tube_barcode", "def parse_dataset(self, data):\n pass", "def csv_parser(lines): \n\n data_points = []\n for line in lines:\n items = line.strip().split(\",\")\n try: #will fail on header line in file\n data_points.append(map(float, items[1:])) #first item is the label\n except ValueError: #must be the header\n continue\n return data_points", "def _parse_txt(path, n_channels):\n f = open(path)\n lines = f.readlines()\n f.close()\n\n geom = np.zeros((0, 2))\n\n for i, line in zip(range(n_channels), lines):\n line = line.replace('\\r', '')\n line = line.replace('\\n', '')\n row = line.split(' ')\n geom = np.vstack((geom, row[:2])).astype('float')\n\n return geom", "def parse(self, line):\n try:\n (year, month, day, hour, minute, second, microseconds, offset_hour, offset_minute, source, process, logentry) = re.match('^(\\d\\d\\d\\d)-(\\d\\d)-(\\d\\d)T(\\d\\d):(\\d\\d):(\\d\\d)\\.([\\d]+)\\+(\\d\\d):(\\d\\d) ([a-z]+)\\[([a-zA-Z0-9_.]+)\\]: ([0-9a-z-A-Z\\-_\\.\\[\\]:\\?\\#\\\",/\\ ={}\\'\\(\\)<>]+)$', line).groups()\n except:\n pass\n \n try:\n parsed_data = dict()\n parsed_data['timestamp'] = \" \".join([\"-\".join([year, month, day]), \":\".join([hour, minute, second])])\n parsed_data['log_time'] = datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n parsed_data['log_source'] = source\n parsed_data['log_type'] = process\n except (AttributeError, UnboundLocalError):\n PARSE_ERRORS.append(line)\n return False\n\n #TODO: This still needs work on spaces in values surrounded by \" \" \n if parsed_data['log_source'] == \"heroku\":\n if logentry.__len__() > 1:\n logentry = re.sub(', ', ',', logentry)\n line_chunks = re.split(' ', logentry)\n for chunk in line_chunks:\n line_chunks = re.split('=', chunk)\n if line_chunks.__len__() > 2:\n #fwd and path are a little clunky to parse\n pass\n elif line_chunks.__len__() > 1:\n parsed_data[line_chunks[0]] = line_chunks[1]\n else:\n pass\n else:\n return False\n else:\n # TODO: [app] \n # Needs parsing. Do that here.\n return False\n\n return parsed_data", "def parse_text(self):\n text = self.get_data()\n line1 = text[0]\n index_list = [0]\n start_index = 3\n for i in range(1, len(text)):\n\n if line1.startswith('*'):\n index_list, start_index = self.star_parser(index_list, line1)\n elif line1.startswith('.'):\n start_index = self.dot_parser(start_index, line1, text, i)\n else:\n print \"\".rjust(start_index) + line1\n line1 = text[i]\n # Parse the last line\n if text[-1].startswith('*'):\n self.star_parser(index_list, text[-1])\n elif text[-1].startswith('.'):\n print '-'.rjust(start_index) + text[-1].lstrip('.')\n else:\n print \"\".rjust(start_index) + text[-1]", "def process_line(line):\n [label, text] = line.split('\\t')\n return text.split()", "def parse_line(line: str) -> str:\n return line", "def parse_line(line: str) -> str:\n return line", "def parse_line(line: str) -> str:\n return line", "def parse_line(line: str) -> str:\n return line", "def parse_text(self, text):\n self._text_paragraph = text.split(\"\\n\")\n self._render()", "def process_line(self, line, data):\n return data", "def isLineData(self, line):\n\n if line is None or line.strip().startswith('#'):\n return False, None, 0\n\n dataType = self.getDataType()\n\n if dataType == 'Y':\n # Y with 1 column\n try:\n yValue = float(line)\n\n return True, 'Y', 1\n except:\n pass\n\n # Y with comma 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split(',')\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n newYValues = []\n for yValue in yValueList:\n try:\n yValue = float(yValue)\n newYValues.append(yValue)\n except ValueError:\n pass\n\n return True, 'Y', len(newYValues)\n except:\n pass\n\n # Y with space 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split()\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n for yValue in yValueList:\n yValue = float(yValue)\n\n return True, 'Y', len(yValueList)\n except:\n pass\n elif dataType == 'XY':\n # XY with comma\n try:\n (xValue, yValue) = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with comma\n try:\n xValue, yValue, dummy = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with space\n try:\n (xValue, yValue) = line.split()\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n else:\n # Y with 1 column\n try:\n yValue = float(line)\n\n return True, 'Y', 1\n except:\n pass\n\n # Y with comma 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split(',')\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n numberValues = 0\n for yValue in yValueList:\n try:\n yValue = float(yValue)\n numberValues += 1\n except ValueError:\n pass\n\n return True, 'Y', numberValues\n except:\n pass\n\n # Y with space 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split()\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n for yValue in yValueList:\n yValue = float(yValue)\n\n return True, 'Y', len(yValueList)\n except:\n pass\n\n # XY with comma\n try:\n (xValue, yValue) = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with comma\n try:\n xValue, yValue, dummy = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with space\n try:\n (xValue, yValue) = line.split()\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n return False, None, 0", "def parse_string(self, data):\n pass", "def _parse(self, lines):\n global VELSCALE\n self.title = lines[0].strip()\n self.time = None\n\n try:\n words = lines[1].split()\n self.natom = int(words[0])\n except (IndexError, ValueError):\n raise TypeError('Unrecognized file type [%s]' % self.filename)\n\n if len(words) >= 2:\n self.time = float(words[1]) * units.picoseconds\n\n if len(lines) == int(ceil(self.natom / 2.0) + 2):\n hasbox = hasvels = False\n self.boxVectors = self.velocities = None\n elif self.natom in (1, 2) and len(lines) == 4:\n # This is the _only_ case where line counting does not work -- there\n # is either 1 or 2 atoms and there are 4 lines. The 1st 3 lines are\n # the title, natom/time, and coordinates. The 4th are almost always\n # velocities since Amber does not make it easy to make a periodic\n # system with only 2 atoms. If natom is 1, the 4th line is either a\n # velocity (3 #'s) or a box (6 #'s). If natom is 2, it is a bit\n # ambiguous. However, velocities (which are scaled by 20.445) have a\n # ~0% chance of being 60+, so we can pretty easily tell if the last\n # line has box dimensions and angles or velocities. I cannot\n # envision a _plausible_ scenario where the detection here will fail\n # in real life.\n line = lines[3]\n if self.natom == 1:\n tmp = [line[i:i+12] for i in range(0, 72, 12) if line[i:i+12]]\n if len(tmp) == 3:\n hasvels = True\n hasbox = False\n self.boxVectors = False\n elif len(tmp) == 6:\n hasbox = True\n hasvels = False\n self.velocities = None\n else:\n raise TypeError('Unrecognized line in restart file %s' %\n self.filename)\n else:\n # Ambiguous case\n tmp = [float(line[i:i+12]) >= 60.0 for i in range(0, 72, 12)]\n if any(tmp):\n hasbox = True\n hasvels = False\n self.velocities = False\n else:\n hasvels = True\n hasbox = False\n self.boxVectors = False\n elif len(lines) == int(ceil(self.natom / 2.0) + 3):\n hasbox = True\n hasvels = False\n self.velocities = None\n elif len(lines) == int(2 * ceil(self.natom / 2.0) + 2):\n hasbox = False\n self.boxVectors = None\n hasvels = True\n elif len(lines) == int(2 * ceil(self.natom / 2.0) + 3):\n hasbox = hasvels = True\n else:\n raise TypeError('Badly formatted restart file. Has %d lines '\n 'for %d atoms.' % (len(self.lines), self.natom))\n\n if self._asNumpy:\n coordinates = np.zeros((self.natom, 3), np.float32)\n if hasvels:\n velocities = np.zeros((self.natom, 3), np.float32)\n else:\n coordinates = [Vec3(0.0, 0.0, 0.0) for i in range(self.natom)]\n if hasvels:\n velocities = [Vec3(0.0, 0.0, 0.0) for i in range(self.natom)]\n\n # Now it's time to parse. Coordinates first\n startline = 2\n endline = startline + int(ceil(self.natom / 2.0))\n idx = 0\n for i in range(startline, endline):\n line = lines[i]\n x = float(line[ 0:12])\n y = float(line[12:24])\n z = float(line[24:36])\n coordinates[idx] = Vec3(x, y, z)\n idx += 1\n if idx < self.natom:\n x = float(line[36:48])\n y = float(line[48:60])\n z = float(line[60:72])\n coordinates[idx] = Vec3(x, y, z)\n idx += 1\n self.coordinates = units.Quantity(coordinates, units.angstroms)\n startline = endline\n # Now it's time to parse velocities if we have them\n if hasvels:\n endline = startline + int(ceil(self.natom / 2.0))\n idx = 0\n for i in range(startline, endline):\n line = lines[i]\n x = float(line[ 0:12]) * VELSCALE\n y = float(line[12:24]) * VELSCALE\n z = float(line[24:36]) * VELSCALE\n velocities[idx] = Vec3(x, y, z)\n idx += 1\n if idx < self.natom:\n x = float(line[36:48]) * VELSCALE\n y = float(line[48:60]) * VELSCALE\n z = float(line[60:72]) * VELSCALE\n velocities[idx] = Vec3(x, y, z)\n idx += 1\n startline = endline\n self.velocities = units.Quantity(velocities,\n units.angstroms/units.picoseconds)\n if hasbox:\n line = lines[startline]\n try:\n tmp = [float(line[i:i+12]) for i in range(0, 72, 12)]\n except (IndexError, ValueError):\n raise ValueError('Could not parse box line in %s' %\n self.filename)\n lengths = tmp[:3] * units.angstroms\n angles = tmp[3:] * units.degrees\n self.boxVectors = computePeriodicBoxVectors(lengths[0], lengths[1],\n lengths[2], angles[0], angles[1], angles[2])", "def parse_report_line(self,line):\n\n report = self.new_police_report()\n report['original_text'] = line\n \n #\n # extract month and day\n match_date = REPORT_DATE_REGEXP.search(line)\n assert(match_date)\n start_index=match_date.start('month')\n stop_index=match_date.end('month')\n report['date_month'] = int(line[start_index:stop_index])\n\n start_index=match_date.start('day')\n stop_index=match_date.end('day')\n report['date_day'] = int(line[start_index:stop_index])\n\n my_logger.debug('extracted date (%d/%d)' % (report['date_month'],report['date_day']))\n\n #############################################\n # extract location & scale\n line = line[0:match_date.start('month')-1] # truncate after start of date\n \n #\n # trim off preceding html and trailing comma\n start_index=line.rfind('>')+1\n assert(start_index>0)\n\n stop_index=line.rfind(',',start_index)\n \n if stop_index >= 2:\n #\n # found a comma, \n line = line[start_index:stop_index]\n else:\n #\n # no comma found\n line = line[start_index:]\n my_logger.debug('truncated string: (%s)' % line)\n report['address']=line\n #\n # try to determine which case:\n # a block\n # an exact address\n # an establishment\n # an intersection\n # special cases, like: \"downtown mountain view\"\n # \n\n if (BLOCK_REGEXP.match(line)!=None):\n my_logger.debug('BLOCK detected')\n report['map_scale']=mapscale.BLOCK\n elif (INTERSECTION_REGEXP.match(line)!=None):\n my_logger.debug('INTERSECTION detected')\n report['map_scale']=mapscale.INTERSECTION\n elif (EXACT_REGEXP.match(line)!=None):\n my_logger.debug('EXACT detected')\n report['map_scale']=mapscale.EXACT\n else:\n #\n # must be manually assigned\n report['map_scale']=mapscale.OTHER\n\n\n return report", "def parse(self, f):\n lines = []\n for line in f:\n _line = line.split(\"//\")[0].strip()\n if _line.startswith(\"(\"): # is a label\n label_name = _line[1:-1]\n self.labels[label_name] = len(lines) # line number / address of label\n elif _line:\n lines.append(_line)\n # else: it's just a whitespace/comment line (ignore)\n return lines", "def parse(text):\n parts = [int(part) for part in text.strip().split(',')]\n point = Point(*parts)\n actual = \"{},{},{},{}\".format(point.x, point.y, point.z, point.t)\n assert actual == text, diff(actual, text)\n return point", "def __parse(self):\n lines = self.data.readlines()\n for i in range(0, len(lines)):\n line = lines[i]\n if line[0] == '#':\n continue\n tokens = line.split(\"\\t\")\n time_str = tokens[self.timecol]\n if time_str.find('start:') != -1:\n time_str = time_str.split()[1] + \" \" + time_str.split()[2]\n self.calls.append((0, 0, 0))\n self.durations.append(0.0)\n elif time_str.find('end:') != -1:\n time_str = time_str.split()[1] + \" \" + time_str.split()[2]\n time = datetime.strptime(time_str, \"%Y-%m-%d %H:%M:%S\")\n self.times.append(time)\n self.calls.append((0, 0, 0))\n self.durations.append(0.0)\n break\n else:\n duration = float(tokens[6])\n fms = int(tokens[2])\n hfms = int(tokens[3])\n svs = int(tokens[4])\n self.calls.append((fms, hfms, svs))\n self.durations.append(duration)\n time = datetime.strptime(time_str, \"%Y-%m-%d %H:%M:%S\")\n self.times.append(time)\n self.length = (self.times[len(self.times) - 1] -\\\n self.times[0]).seconds", "def plot(self, plotType):\n # Build plotting data\n self.data_x_axis = []\n self.data_y_axis = []\n for i in range(0, self.csv_data_table.rowCount()):\n value = self.csv_data_table.item(i, self.selected_columns[0]).text()\n self.data_x_axis.append(value)\n value = self.csv_data_table.item(i, self.selected_columns[1]).text()\n self.data_y_axis.append(value)\n\n self.label_x_axis = self.csv_data_table.horizontalHeaderItem(self.selected_columns[0]).text()\n self.label_y_axis = self.csv_data_table.horizontalHeaderItem(self.selected_columns[1]).text()\n\n # Avoid duplication of resources if already allocated\n if self.figure is None:\n self.figure = plt.figure()\n self.canvas = FigureCanvas(self.figure)\n\n # self.plot_frame_horizontal.addStretch()\n self.plot_frame_horizontal.addWidget(self.canvas)\n # self.plot_frame_horizontal.addStretch()\n\n # Ensures only 2 tabs at max are open at a time - file and plot tabs respectively\n if self.tabWidget.count() == 1:\n self.tabWidget.insertTab(1, self.plot_page_tab, \"Plot\")\n\n self.tabWidget.setCurrentIndex(1)\n\n # Set plot type (1,2,3 => order according to scatter, scatter-line, line)\n self.plotType = plotType\n\n # Convert the data to np arrays if it is purely numerical\n try:\n for i in range(0, len(self.data_x_axis)):\n if self.data_x_axis[i] == '':\n self.data_x_axis[i] = 0\n if self.data_y_axis[i] == '':\n self.data_y_axis[i] = 0\n\n self.data_x_axis[i] = self.coerce_str_to_number(self.data_x_axis[i])\n self.data_y_axis[i] = self.coerce_str_to_number(self.data_y_axis[i])\n\n self.data_x_axis = np.array(self.data_x_axis)\n self.data_y_axis = np.array(self.data_y_axis)\n\n print(self.data_x_axis)\n print(self.data_y_axis)\n\n print(\"In specialized plotting\")\n\n except:\n pass\n # Dont attempt the conversion, directly plot\n print(\"In generic plotting\")\n\n self.draw_plot(self.data_x_axis, self.data_y_axis, self.label_x_axis, self.label_y_axis)", "def check_plot_command(self, line):\n err_msg = \"The plot command takes the syntax:\\n\\n\"\n err_msg += \"\\t'plot <plot type> from <data name> as <plot name>'\"\n err_msg += \"\\n\\n\\t\\t\\tOR\\n\\n\\t'plot <plot type> from <data name>'\"\n\n line, any_vars = self.find_vars_in_str(line)\n\n # Check syntax\n words = line.split()\n words = self.fix_words(words)\n self.E_str = \"check_plot_command\"\n has_out_var = False\n if len(words) != 4:\n if len(words) != 6:\n self.print_error(err_msg)\n else:\n has_out_var = True\n _, plot_type, _, in_data, _, out_data = words\n else:\n _, plot_type, _, in_data = words\n\n # Check we can plot the thing asked for\n if plot_type not in f_dicts.plot_fncs:\n err_msg = f\"I don't know how to plot '{words[1]}'.\"\n err_msg += \"\\n\\nFor a full list of plots that can be done see below:\\n\\t* \"\n err_msg += \"\\n\\t* \".join(list(f_dicts.plot_fncs.keys()))\n self.print_error(err_msg)\n\n if has_out_var:\n metadata = f_dicts.plot_fncs[words[1]].metadata\n self.set_var(out_data, \"^EMPTY^\", metadata)\n return out_data\n\n return None", "def readInput(fileName):\n with open(fileName, 'r') as file:\n\n plotArray = []\n for line in file:\n plotArray.append(list(line.strip()))\n\n return plotArray", "def parsePoint(line):\n parts = line.split(\",\")\n return LabeledPoint(parts[0], [parts[1], parts[2]])", "def read_line(l):\n return [read_float(l[s]) for s in slices['data']]", "def parse(self, data):\n raise NotImplementedError", "def parse_line(self, line: str) -> None:\n self._count += 1", "def parse_datum( self, data ):\n return data", "def parse_datum( self, data ):\n return data", "def plot(self, plotType):\n # Build plotting data\n self.data_x_axis = []\n self.data_y_axis = []\n for i in range(0, self.csv_data_table.rowCount()):\n value = self.csv_data_table.item(i, self.selected_columns[0]).text()\n self.data_x_axis.append(value)\n value = self.csv_data_table.item(i, self.selected_columns[1]).text()\n self.data_y_axis.append(value)\n\n self.label_x_axis = self.csv_data_table.horizontalHeaderItem(self.selected_columns[0]).text()\n self.label_y_axis = self.csv_data_table.horizontalHeaderItem(self.selected_columns[1]).text()\n\n # Avoid duplication of resources if already allocated\n if self.figure is None:\n self.figure = plt.figure()\n self.canvas = FigureCanvas(self.figure)\n\n # self.plot_frame_horizontal.addStretch()\n self.plot_frame_horizontal.addWidget(self.canvas)\n # self.plot_frame_horizontal.addStretch()\n\n # Ensures only 2 tabs at max are open at a time - file and plot tabs respectively\n if self.tabWidget.count() == 1:\n self.tabWidget.insertTab(1, self.plot_page_tab, \"Plot\")\n\n self.tabWidget.setCurrentIndex(1)\n\n self.plotType = plotType\n\n try:\n for i in range(0, len(self.data_x_axis)):\n if self.data_x_axis[i] == '':\n self.data_x_axis[i] = 0\n if self.data_y_axis[i] == '':\n self.data_y_axis[i] = 0\n\n self.data_x_axis[i] = self.strToNumber(self.data_x_axis[i])\n self.data_y_axis[i] = self.strToNumber(self.data_y_axis[i])\n\n self.data_x_axis = np.array(self.data_x_axis)\n self.data_y_axis = np.array(self.data_y_axis)\n\n print(self.data_x_axis)\n print(self.data_y_axis)\n\n except:\n pass\n print(\"In generic plotting\")\n\n self.drawPlot(self.data_x_axis, self.data_y_axis, self.label_x_axis, self.label_y_axis)", "def _find_first_data_point(self, lines):\r\n\r\n for i in range(len(lines)):\r\n if lines[i][0] in ['+', '-']:\r\n return i\r\n\r\n raise DataFormatError(\"No data found in file. Check data format spec.\")", "def parse_line(data, line, keyword):\n if '[' in line:\n pos = re.search(r'\\[(.*)\\]', line).group(1).split(':')\n pos = sorted([int(i.strip()) for i in pos])\n for i in range(pos[0], pos[1]):\n for j in re.search(r'\\](.*);', line).group(1).split(','):\n data[keyword].append(j.strip() + f'[{i}]')\n else:\n for i in re.search(rf'{keyword}(.*);', line).group(1).split(','):\n data[keyword].append(i.strip())", "def cli_text_scatter_plot(sep, graph_width, graph_height, input_file):\n values = []\n for line in input_file:\n try:\n x, y = line.split(sep)\n values.append((float(x), float(y)))\n except ValueError:\n click.echo(f'Warning: \"{line.strip()}\" could not be parsed', err=True)\n\n click.echo(text_scatter_plot(\n values, graph_width=graph_width, graph_height=graph_height\n ))", "def Parse(filename):\n\n f = open(filename, 'r')\n\n metadata = Metadata()\n data = [] # array of dataset\n dataset = None\n\n for num, line in enumerate(f):\n try:\n line = line.strip()\n if not line: continue\n\n if not metadata.complete:\n metadata.Parse(line)\n continue\n\n if re.match('[a-z_]', line):\n continue\n\n if line.startswith('# StopWatch'): # Start of a new dataset\n if dataset:\n if dataset.summary:\n metadata.UpdateWith(dataset)\n else:\n data.append(dataset)\n\n dataset = DataSet(line)\n continue\n\n if line.startswith('#'):\n continue\n\n # must be data at this stage\n try:\n (time, value) = line.split(None, 1)\n except ValueError:\n print 'skipping line %d: %s' % (num, line)\n continue\n\n if dataset and not dataset.summary:\n dataset.Add(float(time), float(value))\n\n except Exception:\n print 'Error parsing line %d' % num, sys.exc_info()[0]\n raise\n data.append(dataset)\n if not metadata.complete:\n print \"\"\"Error missing metadata. Did you mount debugfs?\n [adb shell mount -t debugfs none /sys/kernel/debug]\"\"\"\n sys.exit(1)\n return (metadata, data)", "def parse(self):\n try:\n self.open_file()\n lines = list(self._file)\n\n if len(lines) > 0:\n text = ''.join(lines)\n regex = 'Song \\d+\\nStart (\\d+:\\d+:\\d+)\\nEnd (\\d+:\\d+:\\d+)\\nLength (\\d+.\\d+)'\n match = re.findall(regex, text)\n if len(match):\n starts = []\n ends = []\n lengths = []\n\n for i in range(len(match)):\n starts.append(match[i][0])\n ends.append(match[i][1])\n lengths.append(float(match[i][2]))\n\n for i in range(len(match)):\n self.debug_data.append({\n 'start':starts[i],'end':ends[i],'length':lengths[i]})\n\n match = re.search('T\\d_S(\\d{4})_.*.txt', self._filepath)\n if match:\n self._experiment_metadata['session_id'] = int(match.groups()[0])\n else:\n raise EIMParsingError(\"No valid session id found in filename %s\" % self._filepath)\n\n finally:\n if self._file and not self._file.closed:\n self.close_file()", "def parseFileLine(self, line):\n c = line.strip().split(\":\")\n return (c[0], c[1], c[2], c[3])", "def parse(self, f):\n \n for line in f:\n self.parse_line(line)", "def logplot(in_dir, fname, xlim, ylim, title):\n\n with open(in_dir + fname,'r') as logfile:\n lf_lines = logfile.readlines()\n\n traj_x = []\n traj_y = []\n\n for row in lf_lines:\n if row[:4] == 'pose':\n #print(float(row[10:-2]))\n tup = row[7:]\n sep_pos = tup.find(' , ')\n traj_x.append(float(tup[:sep_pos]))\n traj_y.append(float(tup[sep_pos+3:]))\n\n liveplot(traj_x, traj_y, xlim, ylim, title)", "def parse_line(line : str) -> Tuple[str, str, int]:\n line = line[:-1] # Strips newline character\n question_end = line.find(';')\n question = line[:question_end]\n\n line = line[question_end+1:]\n answer_end = line.find(';')\n answer = line[:answer_end]\n\n points = int(line[answer_end+1:])\n\n return question, answer, points", "def parse_movie(self, line):\n pass", "def _parse_data(self):\n for i, val in enumerate(self.values.keys()):\n x_, y_ = [], []\n xy = self.values[val]\n for value in self.values.index:\n x_.append(xy[value][0])\n y_.append(xy[value][1])\n\n self.set_and_get(\"x_\", val, x_)\n self.set_and_get(\"y_\", val, y_)", "def retrieve_plot_data(self):\n spec2nexus.specplot.LinePlotter.retrieve_plot_data(self)\n\n if self.signal in self.data:\n # can't plot negative Y on log scale\n # Alternative to raising NotPlottable would be\n # to remove any data where Y <= 0\n if min(self.data[self.signal]) <= 0:\n msg = \"cannot plot Y<0: \" + str(self.scan)\n raise spec2nexus.specplot.NotPlottable(msg)\n\n # in the uascan, a name for the sample is given in `self.scan.comments[0]`\n self.set_y_log(True)\n self.set_plot_subtitle(\n \"#%s uascan: %s\" % (str(self.scan.scanNum), self.scan.comments[0])\n )", "def parse_new_mwdata_line(L):\n data = L.split()\n if len(data) != len(column_names['mwdata']):\n print(\"mwdata line {} has only {} fields, skipping\".format(L, len(data)))\n return\n label, record = parse_line_label_cols(L)\n\n def decode_col(col, decoder): # use for columns which may have '?'\n return None if col in ['?', 'None'] else decoder(col)\n\n record['rank'] = decode_col(data[4], int)\n record['rank_bounds'] = decode_col(data[5], decode_int_list)\n record['analytic_rank'] = decode_col(data[6], int)\n record['ngens'] = int(data[7])\n record['gens'] = decode_points_one2many(data[8])\n record['heights'] = data[9]\n #record['reg'] = decode_col(data[10], RealNumber) if record['ngens'] else 1\n record['reg'] = data[10] if record['ngens'] else 1\n record['torsion_order'] = nt = int(data[11])\n record['torsion_primes'] = ZZ(nt).prime_divisors()\n record['torsion_structure'] = decode_int_list(data[12])\n record['torsion_gens'] = decode_points_one2many(data[13])\n if len(data) == 17:\n #record['omega'] = decode_col(data[14], RealNumber)\n #record['Lvalue'] = decode_col(data[15], RealNumber)\n record['sha'] = decode_col(data[16], int)\n record['omega'] = data[14]\n record['Lvalue'] = data[15]\n else:\n record['omega'] = None\n record['Lvalue'] = None\n record['sha'] = None\n return label, record", "def tag_parser(file_path: str):\n with open(file_path) as f:\n t = f.read()\n t = t.split(\"Points =\\n\")[1]\n t = t.replace(\" 0.1 1 1 \\\"Marker\\\"\", \"\")\n t = t.replace(\";\", \"\")\n t = t.replace(\" \\n\", \"\\n\")\n t = t[1:]\n t = StringIO(t)\n\n return np.genfromtxt(t, delimiter=' ')", "def parse_mwdata_line(L):\n data = L.split()\n # if len(data)!=14:\n # print(\"mwdata line {} does not have 14 fields, skipping\".format(L))\n # return\n label, record = parse_line_label_cols(L)\n\n r = data[4]\n record['rank'] = None if r == '?' else int(r)\n r = data[5]\n record['rank_bounds'] = '?' if r == '?' else [int(rb) for rb in r[1:-1].split(\",\")]\n r = data[6]\n record['analytic_rank'] = None if r == '?' else int(r)\n record['ngens'] = int(data[7])\n gens = data[8]\n record['gens'] = [] if gens == '[]' else gens.replace(\"[[[\", \"[[\").replace(\"]]]\", \"]]\").replace(\"]],[[\", \"]];[[\").split(\";\")\n record['heights'] = data[9]\n record['reg'] = data[10]\n record['torsion_order'] = nt = int(data[11])\n ts = data[12]\n record['torsion_structure'] = [] if ts == '[]' else [int(t) for t in ts[1:-1].split(\",\")]\n record['torsion_primes'] = ZZ(nt).prime_divisors()\n record['torsion_gens'] = decode_points_one2many(data[13])\n\n record['omega'] = None\n record['Lvalue'] = None\n record['sha'] = None\n\n return label, record", "def _parse_line(line):\n\n number_pattern = '(\\d+(?:\\.\\d+)?)'\n line_pattern = '^\\s+%s\\s+$' % ('\\s+'.join([number_pattern for x in range(10)]))\n\n match = re.match(line_pattern, line)\n if match:\n print(match.groups())\n return match.groups()\n # if there are no matches\n return None", "def parse_line(self, line, time_shift=0.0):\n raise NotImplementedError(\"must be defined by subclass\")", "def datareader(self, path):\n\n f = open(path, 'r')\n data = f.read()\n data = data.split('\\n')\n data_tmp = []\n for idx in range(len(data)):\n if str(data[idx]).find('@data') >= 0:\n data_tmp = data[idx + 1:]\n break\n res = []\n for record in data_tmp:\n record = record.split(',')\n record = map(float, record)\n res.append(record)\n return res", "def loadLine(self, line):\n self.text.append(nlp(line[0]))\n self.full_text = self.full_text + \" \" + line[0]\n self.rec_paths.append(line[1])\n with contextlib.closing(wave.open(self.rec_paths[-1],'r')) as f:\n frames = f.getnframes()\n rate = f.getframerate()\n self.durations.append(frames / float(rate))\n\n # self.media.append(self.instance.media_new(self.rec_paths[-1]))\n self.total_duration = self.calculateTotalDuration(self.durations)", "def ParseWeatherData(parrData):\n ClearDisplay()\n for i in range(0, len(parrData)):\n if i < (len(parrData) - 1):\n DisplayMsg(parrData[i], int(i * 8))\n else:\n FinalLine = parrData[i].split(\": \")\n DisplayMsg(FinalLine[0], 40)\n DisplayMsg('{:^16}'.format(FinalLine[1]), 48)\n DrawHLine(Width, 37)\n display.show()", "def _analyzeLine(self, line, date, data):\n user = re.search(': (.*)', line).group().split()[1]\n data[user].add(ConvertTime(date, line))", "def parse_file(line, position):\n movement = line.split(\" \")\n if movement[0] == \"SUS\":\n position[\"x\"] += float(movement[1])\n elif movement[0] == \"JOS\":\n position[\"x\"] -= float(movement[1])\n elif movement[0] == \"STANGA\":\n position[\"y\"] -= float(movement[1])\n elif movement[0] == \"DREAPTA\":\n position[\"y\"] += float(movement[1])\n else:\n print(\"Incorrect input\")", "def parse(self):\r\n # open the file\r\n # try to find the first line with Time =\r\n # send the file to a recursive residualParser\r\n try:\r\n with open(self.filepath, 'rb') as f:\r\n for line in f:\r\n if line.startswith('Time ='):\r\n self.timestep = self.__getTime(line)\r\n self.__residuals[self.timestep] = {}\r\n self.__parseResiduals(f)\r\n except Exception as e:\r\n raise 'Failed to parse {}:\\n\\t{}'.format(self.filepath, e)", "def read_text(self, text):\n if isinstance(text, (str, unicode)):\n lines = text.split('\\n')\n else:\n lines = text\n\n for line in lines:\n l = line.strip()\n\n if line == ST_POS0:\n self._state = ST_POS0\n elif line == ST_TRNS:\n self._state = ST_TRNS\n elif line == ST_POS0:\n self._state = ST_POS0\n else:\n self._parse_line(line)", "def parse_position_line(line):\n\n match = Response.regex_position.search(line)\n if match is not None:\n result = dict(\n x=float(match.group(\"x\")),\n y=float(match.group(\"y\")),\n z=float(match.group(\"z\")),\n )\n if match.group(\"e\") is not None:\n # report contains only one E\n result[\"e\"] = float(match.group(\"e\"))\n\n elif match.group(\"es\") is not None:\n # report contains individual entries for multiple extruders (\"E0:... E1:... E2:...\")\n es = match.group(\"es\")\n for m in Response.regex_e_positions.finditer(es):\n result[\"e{}\".format(m.group(\"id\"))] = float(m.group(\"value\"))\n\n else:\n # apparently no E at all, should never happen but let's still handle this\n return None\n\n return result\n\n return None", "def _parse_coords(self):\n\n coords = []\n\n while True:\n try:\n _, x, y = self._lines.current.split()\n coords.append((float(x), float(y)))\n except ValueError:\n break\n\n try:\n next(self._lines)\n except StopIteration:\n break\n\n return coords", "def _build_data_from_text(self, text):\n try:\n record = json.loads(text)\n except Exception as e:\n logging.error(f\"Exception: {e}\")\n logging.error(f\"datapoint: {text}\")\n raise e\n return record", "def parse_line(self, line):\n\n kv_match = self.kv_rex.match(line)\n\n if kv_match:\n kv_dict = kv_match.groupdict()\n kv_key = kv_dict['key']\n kv_value = kv_dict['value']\n\n if 'time_' in kv_key:\n kv_unit = kv_dict['unit']\n\n if not kv_key in self.kv_times:\n self.kv_times[kv_key] = {'unit': kv_unit, 'values': []}\n\n self.kv_times[kv_key]['values'].append(float(kv_value))\n else:\n if not kv_key in self.kv_counts:\n self.kv_counts[kv_key] = 0.0\n\n self.kv_counts[kv_key] += float(kv_value)", "def line_to_data( line ):\n data = [ ]\n if '>>>' in line:\n print(line)\n return data\n\n secs = filter( None, line.split(',') )\n for i, x in enumerate( secs ):\n try:\n data.append( float(x.strip()) )\n except Exception as e:\n data = None\n # print( data )\n return data", "def populate_plot(self, plot, data):\n\n # Determine which type of line gets which color\n color_map = {\n 'REF': Category20c_20[16],\n 'REF1': Category20c_20[16],\n 'REF2': Category20c_20[16],\n 'REF3': Category20c_20[16],\n 'REF4': Category20c_20[16],\n 'SCRIBE_LINE': Category20c_20[0],\n 'SCRIBE_LINE1': Category20c_20[0],\n 'SCRIBE_LINE2': Category20c_20[1],\n 'SCRIBE_LINE3': Category20c_20[2],\n 'SCRIBE_LINE4': Category20c_20[3],\n 'BUSBAR_LINE': Category20c_20[4],\n 'BUSBAR_LINE1': Category20c_20[4],\n 'BUSBAR_LINE2': Category20c_20[5],\n 'BUSBAR_LINE3': Category20c_20[6],\n 'BUSBAR_LINE4': Category20c_20[7],\n 'EDGEDEL_LINE': Category20c_20[8],\n 'EDGEDEL_LINE1': Category20c_20[8],\n 'EDGEDEL_LINE2': Category20c_20[9],\n 'EDGEDEL_LINE3': Category20c_20[10],\n 'EDGEDEL_LINE4': Category20c_20[11]\n }\n\n # Color of the non cutting line\n radius = 13\n line_width = 3\n\n scatter_points = {}\n for line in data:\n group_name = line.get_line_type() + line.get_recipe()\n sp = line.get_starting_point()\n ep = line.get_endpoint()\n\n # Sort scatter points\n if group_name not in scatter_points:\n scatter_points[group_name] = {\n 'x': [sp[0], ep[0]],\n 'y': [sp[1], ep[1]]\n }\n else:\n scatter_points[group_name]['x'].append(sp[0])\n scatter_points[group_name]['x'].append(ep[0])\n scatter_points[group_name]['y'].append(sp[1])\n scatter_points[group_name]['y'].append(ep[1])\n\n # Cutting line\n plot.line(\n [sp[0], ep[0]],\n [sp[1], ep[1]],\n color=color_map[group_name],\n line_width=line_width\n )\n\n # Add a scatter plot for every group\n for group_name, group in scatter_points.items():\n plot.scatter(\n group['x'],\n group['y'],\n color=color_map[group_name],\n radius=radius,\n legend=group_name\n )\n\n # Add travel lines\n for line in range(len(data) - 1):\n # Get the endpoint of the current line, as well as the starting\n # point of the next line\n ep0 = data[line].get_endpoint()\n sp1 = data[line + 1].get_starting_point()\n\n # Plot the travel line (non-cutting line)\n plot.line(\n [\n ep0[0],\n sp1[0]],\n [\n ep0[1],\n sp1[1]\n ],\n color='black',\n legend='Non Cutting'\n )\n\n return plot", "def isDataLine(line):\n if len(line) > 1:\n return line[0] != \"#\"\n return False", "def isDataLine(line):\n if len(line) > 1:\n return line[0] != \"#\"\n return False", "def _parse(self, line):\n comd, value = cmd.parse(line, SERVER_PREFIX)\n player = self.player\n if comd == 'play':\n audio, seek = cmd.separate(value)\n player.playFile(audio, seek=float(seek))\n newline = 'Player started playing %s' %(value)\n elif comd == 'seek':\n value = float(value)\n player.seekto(value)\n newline = 'Forwared to %d seconds' %(value)\n elif comd == 'pause':\n player.pause()\n newline = 'Player Paused'\n elif comd == 'volume':\n value = float(value)\n player.volume = value\n newline = 'Volume changed to %d' %(value)\n else:\n return line\n return newline", "def read_text(filename):\n with open(filename, 'r') as f:\n com = f.readline()[0]\n wavelength, flux = np.loadtxt(filename, unpack=True,\n usecols=(0, 1), comments=com)\n return wavelength, flux", "def __parse(self):\n lines = self.file.readlines()\n for i in range(0, len(lines)):\n line = lines[i]\n tokens = line.split()\n if tokens[0] == \"#start\":\n trial_name = tokens[1]\n trial = Trial(trial_name)\n self.trials[trial_name] = trial\n elif tokens[0] == \"#end\":\n continue\n else:\n date_str = tokens[0] + \" \" + tokens[1]\n date = datetime.strptime(date_str, \"%m/%d/%y %H:%M:%S\")\n sound_file = line[18:-1].strip()\n event = Event(date, sound_file, 0)\n trial.addevent(event)", "def parse_pdb(self, line):\n if line is not None:\n self.original_text.append(line.rstrip(\"\\r\\n\"))", "def make_line_plot(data, x_label=\"Data\", y_label=\"Data Point\"):\n\n y = data\n x = range(len(y))\n\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.plot(x, y)\n plt.show()", "def parse(cls, data):\n raise NotImplementedError", "def parse_galrep_line(L):\n data = L.split(maxsplit=1)\n record = parse_galrep_data_string(\"\" if len(data) == 1 else data[1])\n record['label'] = label = data[0]\n return label, record", "def function2():\r\n with open('data.txt', 'r') as file:\r\n read_data = file.read()\r\n data = read_data.split()\r\n line_chart = pygal.Line()\r\n line_chart.title = data[26]\r\n line_chart.x_labels = map(str, range(2554, 2558))\r\n line_chart.add(data[27], [int(data[28]), int(data[29]), int(data[30]), int(data[31])])\r\n line_chart.add(data[32], [int(data[33]), int(data[34]), int(data[35]), int(data[36])])\r\n line_chart.add(data[37], [int(data[38]), int(data[38]), int(data[40]), int(data[41])])\r\n line_chart.add(data[42], [int(data[43]), int(data[44]), int(data[45]), int(data[46])])\r\n line_chart.add(data[47], [int(data[48]), int(data[49]), int(data[50]), int(data[51])])\r\n line_chart.render_to_file('02.svg')", "def parse_txt_file(txtfile):\n array = np.genfromtxt(txtfile)\n return array", "def parse_lines(self, start_line=0, end_line=False):\n if end_line is False: end_line = len(self.file_ltxt)\n\n lines = self.file_ltxt\n self.E_str = \"parse_lines\"\n self.line_num = start_line\n\n # Loop over lines and parse\n while self.line_num < end_line:\n line = lines[self.line_num].strip()\n\n if line == \"echo\": print(\"\")\n\n # Parse any variables\n elif self.line_declarations['variable'](line):\n self.parse_variable_line(line)\n\n # Parse any file loading commands\n elif self.line_declarations['load'](line):\n self.parse_load_cmd(line)\n\n # Parse any file loading commands\n elif self.line_declarations['plot'](line):\n self.parse_plot_cmd(line)\n\n # Parse any file loading commands\n elif self.line_declarations['write'](line):\n self.parse_write_cmd(line)\n\n # Parse any math commands\n elif self.line_declarations['math'](line):\n self.parse_math_cmd(line)\n\n # Parse any echo commands\n elif self.line_declarations['echo'](line):\n self.parse_echo_cmd(line)\n\n # Parse any echo commands\n elif self.line_declarations['calc'](line):\n self.parse_calc_cmd(line)\n\n # Parse any echo commands\n elif self.line_declarations['set'](line):\n self.parse_set_cmd(line)\n\n # Parse any shell commands\n elif self.line_declarations['shell'](line):\n self.parse_shell_cmd()\n\n # Parse any for loop commands\n elif self.line_declarations['for'](line):\n self.parse_for_cmd(line)\n\n # Parse any echo commands\n elif self.line_declarations['script'](line):\n self.parse_script_cmd(line)\n\n elif self.line_declarations['inline_code'](line):\n getattr(self, f\"parse_{line.split()[0]}_cmd\")(line)\n\n elif self.line_declarations['if'](line):\n self.parse_if_cmd(line)\n\n # elif self.line_declarations['splice'](line):\n # self.parse_splice_cmd(line)\n\n elif self.line_declarations['glue'](line):\n self.parse_glue_cmd(line)\n\n elif self.line_declarations['exit'](line):\n print(\"\\n\\nStopped Code -exit was called.\")\n raise SystemExit\n\n # The end of control statements\n elif '}' in line:\n pass\n\n # Print a warning about unknown line\n else:\n self.print_warning(f\"I don't understand a line: '{line}'\")\n\n self.line_num += 1", "def get_data(dataf):\n with open(dataf) as f:\n label = []\n e_val = []\n for line in f:\n label.append(float(line.split()[1]))\n e_val.append(-1 * float(line.split()[0]))\n return label, e_val", "def parse_line(self, atline: List, list_of_lines: List, part: PART, afix: AFIX, resi: RESI) -> None:\n uvals = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n self.name = atline[0][:4] # Atom names are limited to 4 characters\n for n, u in enumerate(atline[6:12]):\n uvals[n] = float(u)\n self.uvals_orig = uvals[:]\n self.set_uvals(uvals)\n self._line_numbers = list_of_lines\n self.part = part\n self.afix = afix\n self.resi = resi\n self._get_part_and_occupation(atline)\n self.x, self.y, self.z = self._get_atom_coordinates(atline)\n self.xc, self.yc, self.zc = self._cell.o * Array(self.frac_coords)\n if abs(self.uvals[1]) > 0.0 and self.uvals[2] == 0.0 and self.shx.hklf: # qpeaks are always behind hklf\n self.peak_height = uvals[1]\n self.qpeak = True\n if self.shx.end: # After 'END' can only be Q-peaks!\n self.qpeak = True\n self.sfac_num = int(atline[1])\n self.shx.fvars.set_fvar_usage(self.fvar)\n self.Ucif = self.set_ucif(uvals)\n # TODO: I am still unsure if this these are correct values:\n # self.Ustar = self.Ucif * self._cell.N * self._cell.N.T\n # self.Ucart = self.Ustar * self._cell.o * self._cell.o.T\n # self.Ueq = self.set_ueq(uvals)\n # self.Uiso = self.Ueq\n # transformed_u = self.transform_u_by_symmetry(2)\n # print(self.name, [round(x, 6) for x in transformed_u], self.frac_coords)", "def parse_relation(self, text_line):\n record_type = self.substr(text_line, sps21point['RECORD_ID'][0], sps21point['RECORD_ID'][1]).strip()\n if record_type not in REL_DATA_RECORD:\n return\n\n self.set_definition(sps21relation)\n return self.parse(text_line)", "def read_data(self,data_array):\n value_type = self.config['value_type']\n try:\n curr_value = data_array[self.index]\n except:\n curr_value = 'None'\n \n #if value is none\n if curr_value.strip() == 'None':\n #break line\n curr_value = np.nan\n else:\n value_type = self.str_to_command(value_type)\n curr_value = value_type(curr_value)\n\n # TODO: Read time and not log id\n try:\n time = int(data_array[0])\n except:\n time = self.x[-1] + 1\n\n self.x = np.append(self.x,time)\n self.y = np.append(self.y,curr_value)\n\n if(self.auto_scale and len(self.x) > self.config[\"limit\"]):\n #delete old values\n self.x = np.delete(self.x, 0)\n self.y = np.delete(self.y, 0)\n\n self.set_fig_color(curr_value)", "def parse_data(name):\n with open(name) as f:\n lines = f.read().splitlines()\n lines = filter(lambda x: x.split(' ')[0].isdigit(), lines)\n lx = [int(p.split(' ')[1]) for p in lines]\n ly = [int(p.split(' ')[2]) for p in lines]\n return lx, ly", "def parser_txt_file(self, content):\n ai_cpu_str = str(content.replace(b'\\n\\x00', b' ___ ').replace(b'\\x00', b' ___ '))[2:-1]\n ai_cpu_lines = ai_cpu_str.split(\" ___ \")\n result_list = list()\n ai_cpu_total_time_summary = 0\n # Node serial number.\n serial_number = 1\n for i in range(len(ai_cpu_lines) - 1):\n node_line = ai_cpu_lines[i]\n thread_line = ai_cpu_lines[i + 1]\n if \"Node\" in node_line and \"Thread\" in thread_line:\n # Get the node data from node_line\n result = self._get_kernel_result(\n serial_number,\n node_line.split(','),\n thread_line.split(',')\n )\n\n if result is None:\n continue\n\n result_list.append(result)\n # Calculate the total time.\n total_time = result[2]\n ai_cpu_total_time_summary += total_time\n # Increase node serial number.\n serial_number += 1\n elif \"Node\" in node_line and \"Thread\" not in thread_line:\n node_type_name = node_line.split(',')[0].split(':')[-1]\n logger.warning(\"The node type:%s cannot find thread data\", node_type_name)\n return ai_cpu_total_time_summary, result_list", "def parse_feature(self, feature_key, lines):\n ...", "def doomed_parser(line):\n raise exceptions.LineParseException('Error occurred')", "def readlogfile(file_name):\n F = open(file_name, \"r\")\n M = F.readlines()\n F.close()\n x = []\n y = []\n \n for i in range(len(M)):\n if find(M[i], \"#\") <0:\n # Since the x,y are recorded as, e.g., '[398:402,300:304]'\n x.append(float(M[i].split('[')[1].split(':')[0]) + 2.0)\n y.append(float(M[i].split(',')[1].split(':')[0]) + 2.0)\n \n # Notice this function allows multiple stars to be chosen. For our purposes here,\n # we need pick only one, and so return the first values in the list.\n return x[0], y[0]", "def load_regular_coord_by_line(line):\n elems = line.split('\\t')\n if len(elems) < 4:\n elems = line.split(',')\n if len(elems) < 4:\n elems = line.split(' ')\n\n [X1, Y1, W, H] = elems[0:4]\n coord_regular = [int(X1), int(Y1), int(W), int(H)]\n return coord_regular", "def parse(self):\n\t\tfirst = None\n\t\tf = open(self.input_file)\n\t\tfor line in f.readlines():\n\t\t\tif line.startswith(\"#\"):\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tflow,t,sequence,size = line.split()\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\t# append data to a list of tuples\n\t\t\tflow = int(flow)\n\t\t\tt = float(t)\n\t\t\tsequence = int(sequence)\n\t\t\tif size == \"x\":\n\t\t\t\tcontinue\n\t\t\tsize = int(size)\n\t\t\tif not size == 0:\n\t\t\t\tif flow == 1:\n\t\t\t\t\tself.data1.append((t,sequence,size))\n\t\t\t\telif flow == 2:\n\t\t\t\t\tself.data2.append((t,sequence,size))\n\t\t\t\telif flow == 3:\n\t\t\t\t\tself.data3.append((t, sequence, size))\n\t\t\t\telif flow == 4:\n\t\t\t\t\tself.data4.append((t, sequence, size))\n\t\t\t\telif flow == 5:\n\t\t\t\t\tself.data5.append((t, sequence, size))\n\t\t\t\telse:\n\t\t\t\t\tprint \"Erroneous data: \",flow, t, sequence, size\n\t\t\t# Keep track of the minimum and maximum time seen\n\t\t\tif not self.min_time or t < self.min_time:\n\t\t\t\tself.min_time = t\n\t\t\tif not self.max_time or t > self.max_time:\n\t\t\t\tself.max_time = t\n\n\t\t\t# print len(self.data1),len(self.data2),len(self.data3),len(self.data4),len(self.data5)" ]
[ "0.6670215", "0.6484195", "0.6202049", "0.61103636", "0.5995822", "0.5897944", "0.5888218", "0.58159584", "0.57317466", "0.5645726", "0.56055975", "0.55672276", "0.55463046", "0.55388415", "0.55194986", "0.55111915", "0.5509744", "0.55055726", "0.5483326", "0.54770184", "0.5465886", "0.54570895", "0.54524744", "0.5423845", "0.5398041", "0.5398041", "0.5398041", "0.5398041", "0.5397984", "0.53813803", "0.5368919", "0.53672004", "0.535464", "0.5333608", "0.53215057", "0.5316749", "0.5315787", "0.529568", "0.5292059", "0.52672243", "0.5265506", "0.5248204", "0.5237473", "0.5235293", "0.52241886", "0.52241886", "0.5223019", "0.52171373", "0.5215269", "0.52010906", "0.51854026", "0.51793504", "0.5178843", "0.5166506", "0.516077", "0.51564825", "0.51531285", "0.51522624", "0.5148474", "0.51424754", "0.51416415", "0.51373637", "0.5136216", "0.5099763", "0.5097705", "0.5083215", "0.50815076", "0.5073259", "0.5066409", "0.5066381", "0.5060576", "0.50597644", "0.5058827", "0.5056352", "0.5052406", "0.5051651", "0.5051203", "0.50432134", "0.50432134", "0.5042337", "0.5033903", "0.5025639", "0.50222784", "0.50205165", "0.50095886", "0.5008532", "0.5004431", "0.50043684", "0.500129", "0.4996022", "0.49944782", "0.4993242", "0.49883732", "0.49874756", "0.49865717", "0.49836966", "0.49739528", "0.4973132", "0.49689922", "0.49523634" ]
0.49969345
89
Obtains the record in the set with the time closest to the given $unix_time. If this record with not $within the correct number of seconds, an exception is raised.
def get_record(self, unix_time, within): if len(self.records) <= 0: raise Exception("No records in this set") r = self.records[0] closest_record = r closest_delta = abs(r.unix_time - unix_time) for r in self.records[1:]: delta = abs(r.unix_time - unix_time) if delta < closest_delta: closest_record = r closest_delta = delta if closest_delta > within: raise Exception("Closest record to %d was %d (delta=%d) which exceeds limit of %d" % (unix_time, closest_record.unix_time, closest_delta, within)) return closest_record
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_closest_record(self, time):\n dist = 10000000\n record = -1\n # TODO: optimise a bit\n for i, itime in enumerate(self.times):\n if (abs(time-itime)) < dist:\n dist = abs(time-itime)\n record = i\n\n return record", "def find_nearest_time(self, time):\n\n idx = np.searchsorted(self.times, time, side=\"left\")\n if idx > 0 and (idx == len(self.times) or math.fabs(time - self.times[idx-1]) < math.fabs(time - self.times[idx])):\n return self.times[idx-1]\n else:\n return self.times[idx]", "def closest_time(self, when):\n try:\n return np.argmin(np.abs(self.time.datetime - when))\n except AttributeError:\n self.load_time()\n return np.argmin(np.abs(self.time.datetime - when))", "def nearest_time_sql(self, t):\n if self.verbose:\n sys.stderr.write('SQL Time %s' % t)\n self.cursor.execute('SELECT * FROM tracklog WHERE dt <= ? ORDER BY dt DESC LIMIT 1', (t,))\n d = self.cursor.fetchone()\n if d != None:\n t0 = [datetime.strptime(d[0],'%Y-%m-%d %H:%M:%S')]\n t0.extend(d[1:])\n else:\n t0 = None\n self.cursor.fetchall()\n self.cursor.execute('SELECT * FROM tracklog WHERE dt >= ? ORDER BY dt LIMIT 1', (t,))\n d2 = self.cursor.fetchone()\n if d2 != None:\n t1 = [datetime.strptime(d2[0],'%Y-%m-%d %H:%M:%S')]\n t1.extend(d2[1:])\n else:\n t1 = None\n self.cursor.fetchall()\n if self.verbose:\n sys.stderr.write('SQL Resuls %s %s' % (t0,t1))\n if t0 == None or t1 == None:\n return None\n return t0,t1", "def nearest():\n\n # Time this functions.\n timer = coils.Timer()\n\n # Parse the URL parameter \"time\".\n errors = list()\n try:\n tstamp_query = flask.request.args.get('time')\n time_query = coils.string2time(tstamp_query)\n assert time_query != None\n except:\n errors.append('Failed to parse \"time\" parameter.')\n\n # Bail on any errors.\n if errors:\n return flask.jsonify(errors=errors)\n \n return flask.jsonify(\n result=getNearestTime(time_query),\n elapsed=timer.get().total_seconds(),\n )", "def closest_to(self, a, b):\n diff_a = abs(a.ts - self.ts)\n diff_b = abs(b.ts - self.ts)\n if diff_a < diff_b and diff_a < TIME_THRESHOLD:\n return a\n elif diff_b < TIME_THRESHOLD:\n return b\n return None", "def _nearest_datetime(self, datetime_list, target_datetime):\n if not datetime_list:\n raise errors.ParserError(\n \"Input parameter datetime_list length is zero. Required\"\n \" parameters: [datetime.datetime], datetime.datetime\")\n work_list = [entry for entry in datetime_list if entry < target_datetime]\n if not work_list:\n raise errors.ParserError(\n \"work_list length is zero. Entries in datetime_list\"\n \" {} are not < target_datetime {}\".format(datetime_list,\n target_datetime))\n return min(\n work_list,\n key=lambda datetime_entry: abs(datetime_entry - target_datetime))", "def findNearestTime(foamCase, time):\n times = list(getTimeFolders(foamCase,returnType=\"float\"))\n strTimes = np.array(getTimeFolders(foamCase,returnType=\"string\"))\n if time in times:\n try:\n intTime = int(strTimes[times.index(time)])\n return int(time)\n except:\n return time\n else:\n nearestTime = times[np.argmin(np.abs(np.array(times)-time))]\n print(\"Time %f is not available, choosing nearest time %f\" % ( time, nearestTime))\n try:\n intTime = int(strTimes[times.index(nearestTime)])\n return int(nearestTime)\n except:\n return nearestTime", "def bisect(self, dtime, b=0): # pylint: disable=invalid-name\n return self._collection.bisect(dtime, b)", "def getNearestTime(time_query):\n\n # Convert datetime object to string, for lookup in database.\n tstamp_query = coils.time2string(time_query)\n\n # Retrieve image timestamps.\n try:\n tstamp_left = db.session.query(mapping.Image.time).\\\n filter(mapping.Image.time <= tstamp_query).\\\n order_by(mapping.Image.time.desc()).limit(1)\n tstamp_left = tstamp_left[0].time\n delta_left = abs(coils.string2time(tstamp_left) - time_query)\n except:\n tstamp_left = None\n delta_left = dt.timedelta.max\n \n try:\n tstamp_right = db.session.query(mapping.Image.time).\\\n filter(mapping.Image.time >= tstamp_query).\\\n order_by(mapping.Image.time).limit(1)\n tstamp_right = tstamp_right[0].time\n delta_right = abs(coils.string2time(tstamp_right) - time_query)\n except:\n tstamp_right = None\n delta_right = dt.timedelta.max\n \n # The nearest value has the smallest delta from the query.\n result = tstamp_left if (delta_left < delta_right) else tstamp_right\n return result", "def mempool_assert_relative_time_exceeds(\n condition: ConditionWithArgs, unspent: CoinRecord, timestamp: uint64\n) -> Optional[Err]:\n try:\n expected_seconds = int_from_bytes(condition.vars[0])\n except ValueError:\n return Err.INVALID_CONDITION\n\n if timestamp is None:\n timestamp = uint64(int(time.time()))\n if timestamp < expected_seconds + unspent.timestamp:\n return Err.ASSERT_SECONDS_RELATIVE_FAILED\n return None", "def findontarget(starttime, event_list):\n for r in event_list:\n if r[0]==18 and r[1]>starttime: return r[1]\n return None", "def getElemAfterTime(self, stamp):\n newer = [msg for (msg, time) in zip(self.cache_msgs, self.cache_times)\n if time >= stamp]\n if not newer:\n return None\n return newer[0]", "def locate_nearest_event(self):\n nearest_event_date = ''\n min = 1000000\n today = self.get_today()\n event_array = self.events.keys()\n for event_date in event_array:\n event_date = self.date_to_operate_format(event_date)\n if int(event_date) - int(today) > 0:\n if int(event_date) - int(today) < min:\n min = int(event_date) - int(today)\n nearest_event_date = event_date\n\n nearest_event = '0'\n if len(event_array) > 0:\n nearest_event = self.change_format_to_database_index(nearest_event_date)\n\n return nearest_event", "def getElemBeforeTime(self, stamp):\n older = [msg for (msg, time) in zip(self.cache_msgs, self.cache_times)\n if time <= stamp]\n if not older:\n return None\n return older[-1]", "def mempool_assert_absolute_time_exceeds(condition: ConditionWithArgs, timestamp: uint64) -> Optional[Err]:\n try:\n expected_seconds = int_from_bytes(condition.vars[0])\n except ValueError:\n return Err.INVALID_CONDITION\n\n if timestamp is None:\n timestamp = uint64(int(time.time()))\n if timestamp < expected_seconds:\n return Err.ASSERT_SECONDS_ABSOLUTE_FAILED\n return None", "def find_above(self, time, level):\n\n if self.get(time) >= level:\n return time\n ix = self._trace.bisect_right(time)\n for t, lvl in self._trace.items()[ix:]:\n if lvl >= level:\n return t\n return None", "def get_prev_time(time, c_type=None, c_pk=None):\n\n # TODO if efficiency is an issue here, make occ_times a generator and\n # keep a queue (collections.deque) of the relevant times; this should\n # work because we're moving through them in sequence so could safely\n # discard those before the current time as we go\n\n if c_pk:\n def filter_func(obj):\n return obj['colloquialism__pk'] == c_pk\n elif c_type:\n def filter_func(obj):\n return obj['colloquialism__type'] == c_type\n else:\n def filter_func(obj):\n return True\n\n # filter out relevant times\n filtered = filter(\n lambda obj: filter_func(obj) and obj['start_exact'] < time,\n occ_times)\n\n if not len(filtered):\n return None\n\n # return last filtered time since they are in order\n return filtered[-1]['start_exact']", "def from_unix_sec(self):\n try:\n self.in_unix_sec = dt.utcfromtimestamp(float(unix)).strftime('%Y-%m-%d %H:%M:%S.%f')\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.in_unix_sec = False\n return self.in_unix_sec", "def cacheFindEntry(cache, cameraID, desiredTime):\n if not cameraID in cache:\n return None\n cameraTimes = cache[cameraID]\n closestEntry = min(cameraTimes, key=lambda x: abs(x['time'] - desiredTime))\n if abs(closestEntry['time'] - desiredTime) < 30:\n # logging.warning('close: %s', str(closestEntry))\n return os.path.join(cache['readDir'], closestEntry['fileName'])\n else:\n # logging.warning('far: %s, %s', str(desiredTime), str(closestEntry))\n return None", "def object_at(self, time):\n for event in self._timeline: \n if time >= event.start_time and time <= event.end_time:\n return event.obj\n return self._timeline[-1].obj", "def subset_by_time(prediction_dict, desired_times_unix_sec):\n\n error_checking.assert_is_numpy_array(\n desired_times_unix_sec, num_dimensions=1\n )\n error_checking.assert_is_integer_numpy_array(desired_times_unix_sec)\n\n desired_indices = numpy.array([\n numpy.where(prediction_dict[VALID_TIMES_KEY] == t)[0][0]\n for t in desired_times_unix_sec\n ], dtype=int)\n\n prediction_dict = subset_by_index(\n prediction_dict=prediction_dict, desired_indices=desired_indices\n )\n\n return prediction_dict, desired_indices", "def find_below(self, time, level):\n\n if self.get(time) <= level:\n return time\n ix = self._trace.bisect_right(time)\n for t, lvl in self._trace.items()[ix:]:\n if lvl <= level:\n return t\n return None", "def lookup_time_spent():\n while True:\n search_query = input('Show entries in which time spent '\n '(in minutes) is: ')\n if validate_lookup_time_spent_format(search_query):\n break\n print('** Please enter positive integer **')\n return Entry.select().where(Entry.time_spent == search_query)", "def test_4_data_fetching_unix_time_and_insertion(self):\n d1 = date.today()\n dt1 = datetime(d1.year, d1.month, d1.day) + timedelta(hours=8)\n result, success = self.fitness.get_columns_given_range(dt1, dt1+timedelta(days=1))\n self.assertTrue(success)\n self.assertEqual(result[0]['Datetime'],self.unix_time)", "def GetPriorUniquePoint(lap: gps_pb2.Lap,\n point_c: gps_pb2.Point) -> gps_pb2.Point:\n index = -1\n point = lap.points[-1]\n while point.time.ToNanoseconds() == point_c.time.ToNanoseconds():\n index -= 1\n point = lap.points[index]\n return point", "def get_closest_minute(t):\n ts = dt.datetime.utcfromtimestamp(t/1000)\n s = ts.second\n if s < 30:\n return dt.datetime(ts.year, ts.month, ts.day, ts.hour, ts.minute)\n else:\n return dt.datetime(ts.year, ts.month, ts.day, ts.hour, ts.minute) + dt.timedelta(minutes=1)", "def validity_by_time(self):\n conn = psycopg2.connect(self.conn)\n permissable_maximum_age_secs = 600 # 600s = 10mins\n query = \"SELECT time FROM steve_sense_sensor_logs ORDER BY time DESC LIMIT 1\"\n cur = conn.cursor()\n cur.execute(query)\n queryResult = cur.fetchall()\n age_seconds = (datetime.datetime.now(\n timezone.utc) - queryResult[0][0]).seconds\n cur.close()\n conn.close()\n if age_seconds > permissable_maximum_age_secs:\n print(\"Check Sensor, last sample is \"+str(age_seconds)+\" old\")\n return False\n else:\n return True", "def fetch_entry(unique_id, time_stamp):\n print('Fetching items with unique_id: {}'.format(unique_id))\n entry_exists = False\n item = None\n try:\n resp = TIME_TABLE.get_item(Key={'uniqueId': unique_id, 'timeStamp': time_stamp})\n print(resp)\n item = resp.get('Item')\n print(item)\n if item:\n entry_exists = True\n except Exception as e:\n print('Unique Item does not exists: {0}. Error: {1}'.format(unique_id, e))\n\n return entry_exists, item", "def findFirstHigh(thisStFile):\n with open(thisStFile) as f:\n reader = csv.DictReader(f, delimiter='\\t')\n for row in reader:\n return datetime.datetime.strptime(row['time'], fmt)", "def closest_point_in_cloud(point, cloud):\n data = sort_points(point, cloud)\n return data[0]", "def subset_by_time(example_dict, first_time_unix_sec, last_time_unix_sec):\n\n error_checking.assert_is_integer(first_time_unix_sec)\n error_checking.assert_is_integer(last_time_unix_sec)\n error_checking.assert_is_geq(last_time_unix_sec, first_time_unix_sec)\n\n good_indices = numpy.where(numpy.logical_and(\n example_dict[VALID_TIMES_KEY] >= first_time_unix_sec,\n example_dict[VALID_TIMES_KEY] <= last_time_unix_sec\n ))[0]\n\n for this_key in ONE_PER_EXAMPLE_KEYS:\n if isinstance(example_dict[this_key], list):\n example_dict[this_key] = [\n example_dict[this_key][k] for k in good_indices\n ]\n else:\n example_dict[this_key] = (\n example_dict[this_key][good_indices, ...]\n )\n\n return example_dict, good_indices", "def check_time_since_last_data(device_origin):\n actual_time = time.time()\n sec_since_last_data = actual_time - mon_item.read_device_status_values(device_origin)[1]\n min_since_last_data = sec_since_last_data / 60\n min_since_last_data = int(min_since_last_data)\n latest_data_hr = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(latest_data))\n return min_since_last_data", "def test_get_between_datetime_same_microseconds(self):\n now = datetime.datetime.utcnow()\n start_dt = testdata.get_past_datetime(now)\n stop_dt = testdata.get_between_datetime(start_dt, now)\n self.assertGreater(stop_dt, start_dt)", "def get(self, target_time=None):\n if target_time is None:\n target_time = Servo.ctime()\n size = len(self.history)\n idx = -1\n with History.lock:\n while idx >= -size:\n data = self.history[idx]\n timestamp, position = data[0], data[1:]\n if timestamp <= target_time:\n return data\n elif idx - 1 > -size:\n prev_timestamp = self.history[idx - 1]\n if prev_timestamp <= target_time:\n return self.history[idx - 1] # It's better to interpolate\n else:\n idx -= 1\n continue\n else:\n return self.history[-size]", "def get_time_ceiling(time, data):\n if time >= data.index.max():\n return data.iloc[-1]\n elif time <= data.index.min():\n return data.iloc[0]\n return data[str(time):].iloc[0]", "def get_unixtime(self):\n if not self.Complete():\n raise DateTimeError(\"get_unixtime requires complete timepoint\")\n zoffset = self.time.GetZoneOffset()\n if zoffset is None:\n raise DateTimeError(\"get_unixtime requires timezone\")\n elif zoffset == 0:\n zt = self\n else:\n zt = self.ShiftZone(zDirection=0)\n days = zt.date.GetAbsoluteDay() - EPOCH.date.GetAbsoluteDay()\n seconds = zt.time.GetTotalSeconds() - EPOCH.time.GetTotalSeconds()\n return 86400 * days + seconds", "def get_index_before_time(messages, time):\n # Getting the timestamp that specifies the cutoff.\n timestamp = messages[0].timestamp + time\n\n for index, message in enumerate(messages):\n if message.timestamp > timestamp:\n return index - 1", "def find_nearest_stop(self, gtfs, trip_id, lat, long):\n stop_ids = [stop_times['stop_id'] for stop_times in gtfs[\"stop_times\"][trip_id]]\n min_distance = 10000000\n closest_stop = stop_ids[0]\n for stop_id in stop_ids:\n stop = gtfs[\"stops\"][stop_id]\n distance = geopy.distance.distance((lat, long), (stop['stop_lat'], stop['stop_lon'])).km\n if distance < min_distance:\n closest_stop_id = stop_id\n min_distance = distance\n return closest_stop_id", "def _FindNearestAnat(self, acqtime):\n tdiff_min = 1e6\n for anat in self.entry_map['anat']:\n if self.info[anat]['type'] == 'T1High' and \\\n self.info[anat]['InversionTime'] > 0.:\n tdiff = abs(acqtime - self.info[anat]['acqtime'])\n if tdiff < tdiff_min:\n tdiff_min = tdiff\n anat_min = anat\n return anat_min", "def takeclosest(takecloselist, takecloseint):\n pos = bisect_left(takecloselist, takecloseint)\n if pos == 0:\n return takecloselist[0]\n if pos == len(takecloselist):\n return takecloselist[-1]\n before = takecloselist[pos - 1]\n after = takecloselist[pos]\n if after - takecloseint < takecloseint - before:\n return after\n else:\n return before", "def _matchTime(self, time: float):\n return self._comparator['Time'] < time", "def get_booking_at(self, datetime):\n for booking in self.booking_set.all():\n if booking.schedule_start <= datetime < booking.schedule_end and not booking.is_cancelled():\n return booking\n return None", "def at_time(self, time):\n return self._collection.at_time(time)", "def test_searchSince(self):\n self.assertTrue(\n self.server.search_SINCE(self.earlierQuery, self.seq, self.msg))\n self.assertTrue(\n self.server.search_SINCE(self.sameDateQuery, self.seq, self.msg))\n self.assertFalse(\n self.server.search_SINCE(self.laterQuery, self.seq, self.msg))", "def unix_to_timestamp(unix):\n return int(round(unix * 1e6))", "def FromUnixTime(cls, unixTime):\n utcTuple = pytime.gmtime(0)\n t, overflow = Time.FromStructTime(utcTuple).Offset(seconds=unixTime)\n d = Date.FromStructTime(utcTuple).Offset(days=overflow)\n return cls(date=d, time=t.WithZone(zDirection=0))", "def get_best_time():\n\n # Always remember about db.session.rollback() when debugging\n\n max_timing = db.session.query(func.min(Game.timing)).filter(Game.status == \"won\").first()\n time_user = db.session.query(Game.timing, User.username).join(User).filter(Game.timing == max_timing, Game.status == \"won\").first()\n\n return time_user", "def getValueAt(self, time):\n for tvp in self.timeValuePairs:\n if time <= tvp[0]:\n return tvp[1]\n return self.defaultValue", "def _parse_timestamp(self, api_time):\n return (\n pendulum.parse(api_time)\n if api_time is not None\n else pendulum.from_timestamp(-1)\n )", "def findguidingstop(starttime, event_list):\n for r in event_list:\n if r[0]==6 and r[1]+datetime.timedelta(seconds=0)>starttime: return r[1]\n return None", "def olderThan(record, value=Constants.older_than):\n # Check first if the record is valid\n if isRecordNameValid(record):\n # Get the record values and split them - year:month:day:hour:minute\n splitted_record = record.split(':')\n # Get the current date and time from the system and split them\n splitted_date = getTime().split(':')\n # Check if the year, month and day are the same\n if splitted_record[0] == splitted_date[0] \\\n and splitted_record[1] == splitted_date[1] \\\n and splitted_record[2] == splitted_date[2]:\n # Change the record hour in minutes and add the record minutes - how many minutes since the record stored\n record_minutes = int(splitted_record[3]) * 60 + int(splitted_record[4])\n # Change the current hour in minutes and add the current minutes - how many minutes passed today\n minutes = int(splitted_date[3]) * 60 + int(splitted_date[4])\n # Check if the difference is bigger than the constant - the record is too old and must be deleted\n if int(minutes - record_minutes) >= value:\n return True\n else:\n return False\n else:\n # The record is way too old - at least 1 day\n return True\n # The record is not valid and must be deleted\n return True", "def test_larger_rhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = datetime(2012, 9, 20, 2, 59)\n rhs = datetime(2012, 9, 20, 3, 00)\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(rhs, result)", "def findguidingstart(starttime, event_list):\n for r in event_list:\n if r[0]==5 and r[1]>starttime: return r[1]\n return None", "def get_next_available_open_timeset(\n a_timestamp: str, list_of_timesets: list, debug_mode: bool = False\n) -> dict:\n\n results = {\"next_free_timeset\": None, \"reached_end_of_list\": True}\n\n sorted_list_of_timesets = sorted(list_of_timesets, key=lambda k: k[0])\n\n filtered_list_of_timesets = []\n for timeset in sorted_list_of_timesets:\n if datetime.fromisoformat(a_timestamp) <= datetime.fromisoformat(timeset[1]):\n filtered_list_of_timesets.append(timeset)\n\n # get rid of timesets that end before timestamp\n if filtered_list_of_timesets != sorted_list_of_timesets:\n print_time_data(\n \"Next available_timeset: filtering effect from:\",\n sorted_list_of_timesets,\n debug_mode,\n )\n print_time_data(\n \"Next available_timeset: filtering effect to:\",\n filtered_list_of_timesets,\n debug_mode,\n )\n\n # the last timeset triggers some actions. However if the last is also the first\n # i.e. list of 1 timeset, then its too early to set off the trigger\n index_of_last_timeset = (len(filtered_list_of_timesets) - 1) or 1\n\n temp_timestamp = a_timestamp\n\n for timeset_index, timeset in enumerate(filtered_list_of_timesets):\n if datetime.fromisoformat(timeset[0]) > datetime.fromisoformat(temp_timestamp):\n\n results[\"next_free_timeset\"] = [temp_timestamp, timeset[0]]\n if timeset_index != index_of_last_timeset:\n results[\"reached_end_of_list\"] = False\n\n print_time_data(\n \"Next available_timeset: Going to break: current timeset\",\n timeset,\n debug_mode,\n )\n print_time_data(\n \"Next available_timeset: Going to break: timestamp\",\n temp_timestamp,\n debug_mode,\n )\n print_time_data(\n \"Next available_timeset: Going to break: results\", results, debug_mode\n )\n break\n\n temp_timestamp = timeset[1]\n\n # Check if the found timeset has a startTime\n # inside another timeset\n if results[\"next_free_timeset\"]:\n temp_timeset = validate_update_timestamp(\n results[\"next_free_timeset\"], filtered_list_of_timesets, debug_mode\n )\n results[\"next_free_timeset\"] = temp_timeset\n\n print_time_data(\"Next available_timeset: Final results\", results, debug_mode)\n\n return results", "def iso_from_unix_time(unix_time: float, precision: int = 9) -> ISOTimestamp:\n frac_part, int_part = math.modf(unix_time)\n\n seconds = int(int_part)\n\n if frac_part < 0:\n seconds -= 1\n frac_part += 1\n\n decimals = f\"{{0:.{precision}f}}\".format(frac_part)[1:].rstrip('0.') # noqa\n\n return _from_unix_time(seconds, decimals)", "def time(self,orid_time,window=5):\n #{{{ Function to get possible matches of events for some epoch time.\n\n results = {}\n\n #\n # If running in simple mode we don't have access to the tables we need\n #\n if config.simple:\n return results\n\n orid_time = _isNumber(orid_time)\n\n if not orid_time:\n print \"Not a valid number in function call: %s\" % orid_time\n return\n \n start = float(orid_time)-float(window)\n end = float(orid_time)+float(window)\n\n dbname = self.dbcentral(orid_time)\n\n if not db:\n print \"No match for orid_time in dbcentral object: (%s,%s)\" % (orid_time,self.dbcentral(orid_time))\n return\n\n try: \n db = datascope.dbopen( dbname , 'r' )\n db.lookup( table='origin')\n db.query(datascope.dbTABLE_PRESENT) \n except Exception,e:\n print \"Exception on Events() time(%s): Error on db pointer %s [%s]\" % (orid_time,db,e)\n return\n\n db.subset( 'time >= %f' % start )\n db.subset( 'time <= %f' % end )\n\n try:\n db = datascope.dbopen( dbname , 'r' )\n db.lookup( table='wfdisc' )\n records = db.query(datascope.dbRECORD_COUNT)\n\n except:\n records = 0\n\n if records:\n\n for i in range(records):\n\n db.record = i\n\n (orid,time) = db.getv('orid','time')\n\n orid = _isNumber(orid)\n time = _isNumber(time)\n results[orid] = time\n\n return results", "def closest_approach_to_camera(scene, speaker_object) -> (float, int):\n max_dist = sys.float_info.max\n at_time = scene.frame_start\n for frame in range(scene.frame_start, scene.frame_end + 1):\n scene.frame_set(frame)\n rel = speaker_object.matrix_world.to_translation() - scene.camera.matrix_world.to_translation()\n dist = norm(rel)\n\n if dist < max_dist:\n max_dist = dist\n at_time = frame\n\n return max_dist, at_time", "def get(self, column, key, time):\n # Check connection\n self._checkInit()\n \n # Split the time string\n (year,month,day,hour) = self._splitTime(time)\n\n # Construct the query\n query = \"SELECT {} FROM {} WHERE {} ORDER BY TIMESTAMP DESC LIMIT 1\".format(\n column,\n self.table,\n self._exactMatchClause(key,year,month,day,hour)) \n\n #logging.debug(\"query: \\\"{}\\\"\".format(query))\n\n # Get Connection\n cnx = self.getConnection()\n cur = cnx.cursor()\n cur.execute(query)\n retval = None\n for fields in cur:\n retval = fields[0]\n break\n cur.close()\n cnx.close() \n return retval", "def closest(self, x):\n # http://www.ahinson.com/algorithms_general/Sections/Geometry/PluckerLine.pdf\n # has different equation for moment, the negative\n\n x = arg.getvector(x, 3)\n\n lam = np.dot(x - self.pp, self.uw)\n p = self.point(lam) # is the closest point on the line\n d = np.linalg.norm( x - p)\n \n return namedtuple('closest', 'p d lam')(p, d, lam)", "def find_closest_trajectory(cls, **kwargs):\n # if we can find an approximation that works to two\n # decimal places, just return that\n ideal_min_pitch = kwargs[\"pitch\"] - \\\n kwargs.get(\"ideal_min_pitch_differential\", cls.IDEAL_DIFFERENTIAL)\n ideal_max_pitch = kwargs[\"pitch\"] + \\\n kwargs.get(\"ideal_min_pitch_differential\", cls.IDEAL_DIFFERENTIAL)\n\n ideal_min_roll = kwargs[\"roll\"] - \\\n kwargs.get(\"ideal_min_roll_differential\", cls.IDEAL_DIFFERENTIAL)\n ideal_max_roll = kwargs[\"roll\"] + \\\n kwargs.get(\"ideal_min_roll_differential\", cls.IDEAL_DIFFERENTIAL)\n\n # find trajectories that we are good with even if they aren't the absolute\n # best\n ideal_trajectory = SolvedTrajectory.objects.filter(\n pitch__gt=ideal_min_pitch,\n roll__gt=ideal_min_roll\n ).filter(\n pitch__lt=ideal_max_pitch,\n roll__lt=ideal_max_roll)\n ideal_trajectory = ideal_trajectory.first()\n\n # if we found something in the ideal trajectory, just return that!\n if ideal_trajectory:\n best_trajectory = ideal_trajectory\n best_match_score = cls.get_match_score(\n best_trajectory, kwargs[\"pitch\"], kwargs[\"roll\"])\n\n # otherwise, we expand our filter and include more results\n else:\n\n # determine bounds on the pitch and the roll\n # of the trajectory we will return\n min_pitch = kwargs[\"pitch\"] - kwargs[\"min_pitch_differential\"]\n max_pitch = kwargs[\"pitch\"] + kwargs[\"min_pitch_differential\"]\n\n min_roll = kwargs[\"roll\"] - kwargs[\"min_roll_differential\"]\n max_roll = kwargs[\"roll\"] + kwargs[\"min_roll_differential\"]\n\n # determine the candidate trajectories\n candidate_trajectories = SolvedTrajectory.objects.filter(\n pitch__gt=min_pitch,\n roll__gt=min_roll\n ).filter(\n pitch__lt=max_pitch,\n roll__lt=max_roll\n )\n\n # determine the best match from what we have available\n best_trajectory = None\n best_match_score = float(\"inf\")\n\n for trajectory in candidate_trajectories:\n match_score = cls.get_match_score(\n trajectory, kwargs[\"pitch\"], kwargs[\"roll\"])\n\n if match_score < best_match_score:\n best_trajectory = trajectory\n best_match_score = match_score\n\n # calculate the norm of the deviation\n deviation = math.sqrt(best_match_score)\n return best_trajectory.file_name, deviation", "def get_data_at_time(self, time=None, tolerance=0.0):\n if time is None:\n # If time is not specified, assume we want the entire time\n # set. Skip all the overhead, don't create a new object, and\n # return self.\n return self\n is_iterable = _is_iterable(time)\n time_iter = iter(time) if is_iterable else (time,)\n indices = []\n # Allocate indices list dynamically to support a general iterator\n # for time. Not sure if this will ever matter...\n for t in time_iter:\n if t in self._time_idx_map:\n idx = self._time_idx_map[t]\n else:\n idx = find_nearest_index(self._time, t, tolerance=tolerance)\n if idx is None:\n raise RuntimeError(\n \"Time point %s is invalid within tolerance %s\" % (t, tolerance)\n )\n indices.append(idx)\n if not is_iterable:\n indices = indices[0]\n return self.get_data_at_time_indices(indices)", "def exists_at_time(e, t):\n t0 = 0\n for l, x in e:\n t1 = t0 + l\n if t > t0 and t < t1:\n return x\n t0 = t1\n return x", "def since(self, ts):\n spec = {'ts': {'$gt': ts}}\n cursor = self.query(spec)\n while True:\n # todo: trap InvalidDocument errors:\n # except bson.errors.InvalidDocument as e:\n # logging.info(repr(e))\n for doc in cursor:\n yield doc\n if not cursor.alive:\n break\n time.sleep(1)", "def test_larger_lhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = datetime(2012, 9, 20, 3, 45)\n rhs = datetime(2012, 9, 20, 2, 45)\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(lhs, result)", "def test_blind_delete_with_datetime(self):\r\n uid = uuid4()\r\n tmp = TestTimestampModel.create(id=uid, count=1)\r\n\r\n TestTimestampModel.get(id=uid).should.be.ok\r\n\r\n plus_five_seconds = datetime.now() + timedelta(seconds=5)\r\n\r\n TestTimestampModel.objects(id=uid).timestamp(plus_five_seconds).delete()\r\n\r\n with self.assertRaises(TestTimestampModel.DoesNotExist):\r\n TestTimestampModel.get(id=uid)\r\n\r\n tmp = TestTimestampModel.create(id=uid, count=1)\r\n\r\n with self.assertRaises(TestTimestampModel.DoesNotExist):\r\n TestTimestampModel.get(id=uid)", "def _get_closest_eth1_voting_period_start_block(\n self, timestamp: Timestamp\n ) -> BlockNumber:\n # Compare with the largest recoreded block timestamp first before querying\n # for the latest block.\n # If timestamp larger than largest block timestamp, request block from eth1 provider.\n if (\n self._largest_block_timestamp is None\n or timestamp > self._largest_block_timestamp\n ):\n try:\n block = self._eth1_data_provider.get_block(\"latest\")\n except BlockNotFound:\n raise Eth1MonitorValidationError(\"Fail to get latest block\")\n if block.timestamp <= timestamp:\n return block.number\n else:\n block_number = block.number\n # Try the latest `self._num_blocks_confirmed` blocks until we give up\n for i in range(1, self._num_blocks_confirmed + 1):\n lookback_number = block_number - i\n if lookback_number < 0:\n break\n else:\n shifted_block = BlockNumber(lookback_number)\n block = self._eth1_data_provider.get_block(shifted_block)\n if block.timestamp <= timestamp:\n return block.number\n raise Eth1BlockNotFound(\n \"Can not find block with timestamp closest\"\n \"to voting period start timestamp: %s\",\n timestamp,\n )\n else:\n # NOTE: It can be done by binary search with web3 queries.\n # Regarding the current block number is around `9000000`, not sure if it is worthwhile\n # to do it through web3 with `log(9000000, 2)` ~= 24 `getBlock` queries.\n # It's quite expensive compared to calculating it by the cached data\n # which involves 0 query.\n\n # Binary search for the right-most timestamp smaller than `timestamp`.\n all_timestamps = tuple(self._block_timestamp_to_number.keys())\n target_timestamp_index = bisect.bisect_right(all_timestamps, timestamp)\n # Though `index < 0` should never happen, check it for safety.\n if target_timestamp_index <= 0:\n raise Eth1BlockNotFound(\n \"Failed to find the closest eth1 voting period start block to \"\n f\"timestamp {timestamp}\"\n )\n else:\n # `bisect.bisect_right` returns the index we should insert `timestamp` into\n # `all_timestamps`, to make `all_timestamps` still in order. The element we are\n # looking for is actually `index - 1`\n index = target_timestamp_index - 1\n target_key = all_timestamps[index]\n return self._block_timestamp_to_number[target_key]", "def unix_timestamp_date(unix=1459141485):\n return datetime.datetime.fromtimestamp(int(unix))", "def get_closest_rt_match(self, name_rt_dict):\n abs_dic = {}\n for index, name_rt in name_rt_dict.items():\n dup_name = name_rt[0]\n dup_rt = name_rt[1]\n for name, rt in self.std_temp_dict.items():\n if name == dup_name:\n abs_value = abs(dup_rt - rt)\n abs_dic[index] = abs_value\n\n print(abs_dic)\n keep_index = min(abs_dic, key=lambda x: abs_dic.get(x))\n print(\"The keep_index is\", keep_index) # Return this and then use it as below.\n\n return keep_index", "def _validate_timestamp(timestamp):\n dts = datetime.datetime.utcnow()\n current_time = round(time.mktime(dts.timetuple()) + dts.microsecond/1e6)\n if (timestamp - current_time) > SYNC_TOLERANCE:\n raise InvalidTransaction(\n 'Timestamp must be less than local time.'\n ' Expected {0} in ({1}-{2}, {1}+{2})'.format(\n timestamp, current_time, SYNC_TOLERANCE))", "def filter_lower_datetime(time, list_time):\n return [t for t in list_time if t <= time]", "def _closest_opponent_to_object(self, raw_obs, o):\n min_d = None\n closest = None\n for p in raw_obs['right_team']:\n d = self._object_distance(o, p)\n if min_d is None or d < min_d:\n min_d = d\n closest = p\n assert closest is not None\n return closest", "def get_time_from_db():\n if not (date := select(date for date in LastestCheck).order_by(lambda date: desc(date.id)).first()):\n date = LastestCheck(timestamp=int(datetime.timestamp(datetime.now())))\n commit()\n return date.timestamp", "def to_python_datetime(unix_timestamp):\n return datetime.datetime.fromtimestamp(int(unix_timestamp),\n pytz.timezone(settings.TIME_ZONE))", "def get_specific_nc_timeindex(fname,\n time_value,\n time_varname='time'):\n\n assert isinstance(time_value, datetime.date)\n\n nc_fid = netCDF4.Dataset(fname, 'r')\n time_values = netCDF4.num2date(nc_fid.variables[time_varname][:],\n nc_fid.variables[time_varname].units,\n nc_fid.variables[time_varname].calendar)\n nearest_index = 0\n nearest_value = time_values[nearest_index]\n nearest_diff = time_difference(nearest_value, time_value)\n for idx, tvalue in enumerate(time_values):\n this_diff = time_difference(tvalue, time_value)\n if this_diff < nearest_diff:\n nearest_index = idx\n nearest_value = time_values[idx]\n nearest_diff = this_diff\n\n return nearest_index", "def _xid_pick_only_closest(self, xid):\n\t\tif len(xid) == 1:\n\t\t\tself._xid_sanity_check(xid)\n\n\t\telif len(xid) > 1: \n\t\t\tprint(\"[hscObj] multiple objects found, picking the closest\")\n\t\t\txid = self._resolve_multiple_sources(xid)\n\t\t\tself._xid_sanity_check(xid)\n\n\t\telif len(xid) < 1:\n\t\t\tprint(\"[hscObj] no object found\")\n\t\t\txid = None\n\n\t\treturn xid", "def testTimestamps(self):\n predicate = \"metadata:predicate\"\n subject = \"aff4:/metadata:8\"\n\n # Extend the range of valid timestamps returned from the table to account\n # for potential clock skew.\n start = long(time.time() - 60) * 1e6\n data_store.DB.Set(subject, predicate, \"1\", token=self.token)\n\n (stored, ts) = data_store.DB.Resolve(subject, predicate, token=self.token)\n\n # Check the time is reasonable\n end = long(time.time() + 60) * 1e6\n\n self.assert_(ts >= start and ts <= end)\n self.assertEqual(stored, \"1\")", "def get_closest_bus_stop(due_time, stop_src, stop_ids, route_id):\n\n # Get index of where the SRC stop is in the tupple to serve as the high-bound, and store that position in original. Also, store the original due time, as it will be needed\n high_bound = 0\n original = 0\n original_due_time = due_time\n for i in range(0, len(stop_ids)):\n if str(stop_ids[i]) == stop_src:\n high_bound = i\n original = i\n break\n\n # Innitialize pointer to be halfway between the lowbound (set to 0 index) and the highbound (the SRC stop).\n pointer = original//4\n low_bound = 0\n\n # Optimally we want to find the stop where our bus is just 1 minute away, for better accuracy. But sometimes that is not possible, so we will\n # need to look for a bus further away. This variable, arrival_within_minutes, starts with 1 minutes, and will be increased as necessary.\n arrival_within_minutes = 1\n\n # Search until we find where the bus is\n while True:\n last_due_time = 0\n # Search while our due time is not 'Due' or within the specified minutes\n while due_time != 'Due' or int(due_time) > arrival_within_minutes:\n # Once more, get the buses for the stop we are currently looking at\n first_3_buses = get_due_time(str(stop_ids[pointer]), route_id)\n\n # Get just the first bus, since we already have the 3 buses from our SRC stop (this one is just looking for where one of those 3 buses is)\n possible_stop = filter_buses(first_3_buses)\n # Store the new due time, from the bus stop our binary algorithm selected\n new_due_time_due = possible_stop['duetime']\n\n # If the new due_time is the same as the last_due_time it means the algorithm got stuck without finding a better value, and we need to break, and change our\n # arrival_within_minutes for a longer time\n if new_due_time_due == last_due_time:\n break\n\n # If we found a 'Due' or within the arrival_within_minutes, return that index. That is the index of the stop where our bus is at/close to.\n if possible_stop['duetime'] == 'Due' or int(possible_stop['duetime']) <= arrival_within_minutes:\n # ('Found the bus with', new_due_time_due, 'minutes due time.')\n # This for loop is to check if the previous bus stop(s) have the same due time, and find a closer more accurae stop\n # print('Original pointer:', pointer)\n for i in range(pointer - 1, 0, -1):\n if new_due_time_due == (filter_buses(get_due_time(str(stop_ids[i]), route_id))['duetime']):\n pointer = i\n # print('New pointer:', pointer)\n else:\n break\n # Return the pointer, the index of the stop\n return pointer\n else:\n # If the due time at the possible stop is less than the one at SRC, we're on the right path, and need to look for a stop farther from the SRC\n if int(possible_stop['duetime']) < int(due_time):\n # Store the new, better due time\n due_time = possible_stop['duetime']\n # Change the highbound to the pointer and reduce our pointer again to halfway between lowbound and highbound\n high_bound = pointer\n pointer -= ((high_bound - low_bound)//4)\n else:\n # If the due time at the possible stop is bigger than the one at SRC, we've gone too far, and need to look for a stop closer to the SRC\n # The lowbound becomes the pointer and we move the pointer, again, to halfway between the lowbound and the highbound\n low_bound = pointer\n pointer += ((high_bound - low_bound)//4)\n # If we found a better (shortter) due time, we store this one for the next iteration and keep looking for an even better one\n last_due_time = new_due_time_due\n\n # If the algorithm comes to this part, it means we didn't find a stop where our bus was due wihin 1 (or more) minutes. So we need to increase the\n # arrival_within minutes to keep searching.\n arrival_within_minutes += 1\n\n # Reset our lowbound, highbound and pointer to restart the search\n low_bound = 0\n high_bound = original\n pointer = original // 4\n\n # If we start looking for a stop, previous to the SRC, were our bus has MORE duetime, we've gonne too far. Possibly, there are two buses running very close to one another,\n # and they may be due to our SRC stop at the same time (seen before too many times with the 17). In this case, we need to increase the original bound to take the stop where\n # we found the previous bus.\n if arrival_within_minutes > int(original_due_time):\n high_bound += 1\n return high_bound\n\n # Just a token return\n return 0", "def test_parse_time_unix_timestamp(self):\n self.assertEqual(\n parse_time(\"1422748800\", None), datetime(2015, 2, 1, 0, 0, 0))\n self.assertEqual(parse_time(\"0\", None), datetime(1970, 1, 1, 0, 0, 0))\n # The following are treated as unix timestamps, not YYYYMMDD strings.\n self.assertEqual(\n parse_time(\"19000101\", None), datetime(1970, 8, 8, 21, 48, 21))\n self.assertEqual(\n parse_time(\"20150132\", None), datetime(1970, 8, 22, 5, 15, 32))\n self.assertEqual(\n parse_time(\"20151301\", None), datetime(1970, 8, 22, 5, 35, 1))", "def test_subset_by_time(self):\n\n this_satellite_dict = satellite_io.subset_by_time(\n satellite_dict=copy.deepcopy(SATELLITE_DICT_ALL_EXAMPLES),\n desired_times_unix_sec=DESIRED_TIMES_UNIX_SEC\n )[0]\n\n self.assertTrue(compare_satellite_dicts(\n this_satellite_dict, SATELLITE_DICT_SUBSET_BY_TIME\n ))", "def find_nearest_wav(self, wavelength):\n\n idx = np.searchsorted(self.wavelengths, wavelength, side=\"left\")\n if idx > 0 and (idx == len(self.wavelengths) or math.fabs(wavelength - self.wavelengths[idx-1]) < math.fabs(wavelength - self.wavelengths[idx])):\n return self.wavelengths[idx-1]\n else:\n return self.wavelengths[idx]", "def closest_element(self, where, cartesian=False, threshold=None, vincenty=False):\n if not vincenty:\n if cartesian:\n x, y = self.grid.xc, self.grid.yc\n else:\n x, y = self.grid.lonc, self.grid.latc\n dist = np.sqrt((x - where[0])**2 + (y - where[1])**2)\n else:\n grid_pts = np.asarray([self.grid.lonc, self.grid.latc]).T\n where_pt_rep = np.tile(np.asarray(where), (len(self.grid.lonc),1))\n dist = np.asarray([vincenty_distance(pt_1, pt_2) for pt_1, pt_2 in zip(grid_pts, where_pt_rep)])*1000\n\n index = np.argmin(dist)\n if threshold:\n if dist.min() < threshold:\n index = np.argmin(dist)\n else:\n index = None\n\n return index", "def find_nearest_spot(this_coord, coord_list, scale_z, scale_xy):\n closest_sed = np.inf\n closest_spot = 0\n for test_data in coord_list:\n test_spot_id = test_data[0]\n test_coords = (test_data[1:4])\n sed = sq_euc_distance(test_coords, this_coord, scale_z, scale_xy)\n if (sed < closest_sed):\n closest_sed = sed\n closest_spot = test_spot_id\n closest_spot_coords = test_coords\n return closest_spot, np.sqrt(closest_sed), closest_spot_coords", "def _get_offset(self, seconds, default):\n now_nanosec = time.time_ns()\n target_nanosec = now_nanosec - utils.seconds_in_nano(seconds)\n\n query_values = {\n \"target_nanosec\": target_nanosec\n }\n\n self._cursor.execute(f\"\"\"\n SELECT rowid\n FROM {self._table_name}\n WHERE timestamp >= :target_nanosec\n ORDER BY timestamp\n LIMIT 1;\"\"\", query_values)\n\n offset_row = self._cursor.fetchone()\n rowid = offset_row[0] if offset_row else default\n\n return rowid - 1", "def _mktime(self):\n epoch = datetime(1970, 1, 1)\n max_fold_seconds = 24 * 3600\n t = (self - epoch) // timedelta(0, 1)\n\n def local(u):\n y, m, d, hh, mm, ss = _time.localtime(u)[:6]\n return (datetime(y, m, d, hh, mm, ss) - epoch) // timedelta(0, 1)\n\n # Our goal is to solve t = local(u) for u.\n a = local(t) - t\n u1 = t - a\n t1 = local(u1)\n if t1 == t:\n # We found one solution, but it may not be the one we need.\n # Look for an earlier solution (if `fold` is 0), or a\n # later one (if `fold` is 1).\n u2 = u1 + (-max_fold_seconds, max_fold_seconds)[self.fold]\n b = local(u2) - u2\n if a == b:\n return u1\n else:\n b = t1 - u1\n assert a != b\n u2 = t - b\n t2 = local(u2)\n if t2 == t:\n return u2\n if t1 == t:\n return u1\n # We have found both offsets a and b, but neither t - a nor t - b is\n # a solution. This means t is in the gap.\n return (max, min)[self.fold](u1, u2)", "def from_unix(cls, seconds, milliseconds=0):\n return datetime.datetime.fromtimestamp(seconds + milliseconds * .001)", "def __find_nearest_weathers(\n self, timestamp: datetime, weather_list: list,\n ) -> dict:\n if (\n timestamp.tzinfo is None\n ): # If timestamp is naive (tzinfo = None),\n # make it so that it is the same as weather_list timestamp.\n timestamp = timestamp.replace(tzinfo=weather_list[0].date.tzinfo)\n\n beforeWeathers = list(\n filter(\n lambda x: timestamp >= x.date - timedelta(minutes=1),\n weather_list,\n ),\n )\n afterWeathers = list(\n filter(lambda x: timestamp < x.date, weather_list),\n )\n before = None\n beforeSeconds = 999999999999999999999999999\n after = None\n afterSeconds = 999999999999999999999999999\n\n for bw in beforeWeathers:\n if timestamp > bw.date:\n t = timestamp - bw.date\n else:\n t = bw.date - timestamp\n if beforeSeconds > t.seconds:\n before = bw\n beforeSeconds = t.seconds\n for aw in afterWeathers:\n if timestamp > aw.date:\n t = timestamp - aw.date\n else:\n t = aw.date - timestamp\n if afterSeconds > t.seconds:\n after = aw\n afterSeconds = t.seconds\n return {\n 'before': {'weather': before, 'seconds': beforeSeconds},\n 'after': {'weather': after, 'seconds': afterSeconds},\n }", "def closest_distance(self, time, other_object, other_time):\n ti = np.where(self.times == time)[0][0]\n oti = np.where(other_object.times == other_time)[0][0]\n xs = self.x[ti].ravel()[self.masks[ti].ravel() == 1]\n xs = xs.reshape(xs.size, 1)\n ys = self.y[ti].ravel()[self.masks[ti].ravel() == 1]\n ys = ys.reshape(ys.size, 1)\n o_xs = other_object.x[oti].ravel()[other_object.masks[oti].ravel() == 1]\n o_xs = o_xs.reshape(1, o_xs.size)\n o_ys = other_object.y[oti].ravel()[other_object.masks[oti].ravel() == 1]\n o_ys = o_ys.reshape(1, o_ys.size)\n distances = (xs - o_xs) ** 2 + (ys - o_ys) ** 2\n return np.sqrt(distances.min())", "def find_last_value(self, value, closest=False):\n value = pd.to_datetime(value)\n value = column.as_column(value).as_numerical[0]\n return self.as_numerical.find_last_value(value, closest=closest)", "def obtain_time_duration(collection, new_document):\r\n\r\n # Obtain the previously existing two document for the incoming bizLocation\r\n # Sort them in descending order\r\n # The first in the list is the newly inserted document detected by Change Streams\r\n # the second document is of interest\r\n prev_documents = collection.find({'epcList.epc': new_document['epcList'][0]['epc']}).limit(2).sort([(\"eventTime\", DESCENDING)])\r\n\r\n if prev_documents is not None:\r\n # if there is a previous set of documents\r\n prev_doc_list = list(prev_documents)\r\n # print(prev_doc_list)\r\n if len(prev_doc_list) == 1:\r\n logger.info('Only Single entry exists for Product.. It implies it is the a new product with no previous events.')\r\n return None\r\n else:\r\n logger.debug('Previous BizLocation of Product: {}, Present BizLocation of Product: {}'.format(\r\n prev_doc_list[1]['bizLocation']['id'], new_document['bizLocation']['id']))\r\n logger.debug('Time Duration: From {} to {}'.format(prev_doc_list[1]['eventTime'], new_document['eventTime']))\r\n\r\n # make the dictionary to return\r\n duration = {\r\n 'bizLocation': {\r\n 'prev': prev_doc_list[1]['bizLocation']['id'],\r\n 'present': new_document['bizLocation']['id']\r\n },\r\n 'from_time': prev_doc_list[1]['eventTime'].isoformat(timespec='milliseconds') + 'Z',\r\n 'to_time': new_document['eventTime'].isoformat(timespec='milliseconds') + 'Z'\r\n }\r\n # print(duration)\r\n return duration\r\n else:\r\n logger.info('No Previous Information of Event Found')\r\n return None", "def filter_times(timestamps, time_difference):\n timestamps = sorted(set(timestamps))\n\n filtered_timestamps = []\n for current_timestamp in timestamps:\n if not filtered_timestamps or current_timestamp - filtered_timestamps[-1] > time_difference:\n filtered_timestamps.append(current_timestamp)\n\n return filtered_timestamps", "def _closest_date(target_dt, date_list, before_target=None) -> datetime.date | None:\n\n def time_before(d):\n return target_dt - d if d <= target_dt else datetime.timedelta.max\n\n def time_after(d):\n return d - target_dt if d >= target_dt else datetime.timedelta.max\n\n def any_time(d):\n return target_dt - d if d < target_dt else d - target_dt\n\n if before_target is None:\n return min(date_list, key=any_time).date()\n if before_target:\n return min(date_list, key=time_before).date()\n else:\n return min(date_list, key=time_after).date()", "def check_time_borders(self, sam_ev, ):\n mask = np.logical_and(sam_ev['timeMJD'] > self.DataStart,\n sam_ev['timeMJD'] < self.DataEnd)\n return sam_ev[mask]", "def findspan(self, u):\n #if u >= self.kv[-self.p-1]:\n # return self.kv.size - self.p - 2 # last interval\n #else:\n # return self.kv.searchsorted(u, side='right') - 1\n return pyx_findspan(self.kv, self.p, u)", "def get_last_entry_time():\r\n try:\r\n last_entry_time = list(mongo_coll_tweets.find().sort(\r\n [(\"_id\", -1)]).limit(1))[0][\"_id\"].generation_time\r\n except:\r\n last_entry_time = 0\r\n\r\n return last_entry_time", "def get_scan_by_time(self, time):\n scan_ids = tuple(self.index)\n lo = 0\n hi = len(scan_ids)\n while hi != lo:\n mid = (hi + lo) // 2\n sid = scan_ids[mid]\n sid = sid.decode('utf-8')\n scan = self.get_scan_by_id(sid)\n if not self._validate(scan):\n sid = scan_ids[mid - 1]\n scan = self.get_scan_by_id(sid)\n if not self._validate(scan):\n sid = scan_ids[mid - 2]\n scan = self.get_scan_by_id(sid)\n\n scan_time = scan.scan_time\n if scan_time == time:\n return scan\n elif (hi - lo) == 1:\n return scan\n elif scan_time > time:\n hi = mid\n else:\n lo = mid\n if hi == 0 and not self._use_index:\n raise TypeError(\"This method requires the index. Please pass `use_index=True` during initialization\")", "def observation_for_closest(\n lat: float, lon: float, lang: str = _DEFAULT_LANG, num_stations_to_try: int = 3\n) -> Tuple[Dict, Dict]:\n assert lang in _SUPPORTED_LANGS\n\n stations = closest_stations(lat, lon, limit=num_stations_to_try)\n for s in stations:\n o = observation_for_station(s[\"id\"], lang=lang)\n if o[\"results\"] and not o[\"results\"][0].get(\"err\") and o[\"results\"][0][\"valid\"]:\n return o, s\n return observation_for_station(stations[0][\"id\"], lang=lang), stations[0]", "def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference < post_time_tol_seconds", "def binary_search_time(values: list[any], low: int, high: int, target: int) -> any:\n if low <= high:\n middle_index = (low+high) // 2\n middle_value = values[middle_index]['time']\n if middle_value < target:\n return binary_search_time(values, middle_index+1, high, target)\n elif middle_value > target:\n return binary_search_time(values, low, middle_index-1, target)\n else:\n return values[middle_index]\n return values[high]", "def find_conflict(self, time):\n node = self.root\n wait_time = self.wait_time\n\n while node is not None: \n lower_limit = node.key - wait_time\n upper_limit = node.key + wait_time\n \n if time > lower_limit and time < upper_limit: \n return node\n if time < node.key: \n node = node.left\n else: \n node = node.right\n return None" ]
[ "0.6383025", "0.57725835", "0.5726713", "0.5603155", "0.550198", "0.5465888", "0.51996434", "0.51091826", "0.49394408", "0.49366295", "0.49245772", "0.48886275", "0.48809275", "0.48312008", "0.48274845", "0.4817998", "0.4760067", "0.47593623", "0.47202304", "0.47175002", "0.4663312", "0.4646897", "0.46306196", "0.4605473", "0.46016923", "0.45864683", "0.45778677", "0.45742598", "0.45709598", "0.4556572", "0.44782287", "0.44638187", "0.44625032", "0.44500652", "0.44321182", "0.44237286", "0.44229278", "0.4411701", "0.43894556", "0.43883225", "0.43815115", "0.4377511", "0.43738", "0.43644208", "0.4358279", "0.43360013", "0.43313226", "0.43262392", "0.43261746", "0.43214053", "0.43187287", "0.42907608", "0.42834088", "0.42813873", "0.42751548", "0.42689177", "0.4260629", "0.4252465", "0.42509535", "0.424769", "0.4241173", "0.42321485", "0.42314976", "0.4228018", "0.42275235", "0.4227106", "0.422132", "0.41999498", "0.4195137", "0.41850537", "0.4179975", "0.41785333", "0.41784263", "0.41742566", "0.415735", "0.41515374", "0.4150833", "0.41500276", "0.41482443", "0.4146893", "0.41410995", "0.41370693", "0.4132076", "0.41258848", "0.4104681", "0.41036394", "0.40837675", "0.40771002", "0.40754423", "0.4069814", "0.4066251", "0.4049001", "0.40455782", "0.4032667", "0.4029973", "0.4029847", "0.40284482", "0.4027219", "0.40258557", "0.40145198" ]
0.84365386
0
Pulls in the records from other into self with the other, but since the timestamps won't match up perfectly, the output will only have a record per $period number of seconds.
def merge_with(self, other, period=60): new_list = [] last_timestamp = 0 for r in self.records: if abs(r.unix_time - last_timestamp) > period: # Accept this record last_timestamp = r.unix_time other_r = other.get_record(r.unix_time, period/2) r.merge_with(other_r) new_list.append(r) self.records = new_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __add__ ( self, other, resample_opts=None ):\n result = ObservationStorage (datadir=self.datadir, \\\n resample_opts=resample_opts )\n if self.date[0] > other.date[0]:\n start_date = other.date[0]\n else:\n start_date = self.date[0]\n if self.date[-1] > other.date[-1]:\n end_date = other.date[-1]\n else:\n end_date = self.date[-1]\n \n delta = datetime.timedelta ( days=1 )\n this_date = start_date.date()\n end_date = end_date.date() + delta\n \n this_obs_dates = [ x.date() for x in self.date ]\n other_obs_dates = [ x.date() for x in other.date ]\n \n date = [] ; vza = [] ; vaa = [] ; sza = [] ; saa = []\n emulator = [] ; mask = [] ; data_pntr = [] ; spectral = []\n sensor = []\n \n while this_date < end_date:\n if this_date in this_obs_dates:\n iloc = this_obs_dates.index ( this_date )\n date.append ( self.date[iloc] )\n emulator.append ( self.emulator[iloc] )\n vza.append ( self.vza[iloc] )\n sza.append ( self.sza[iloc] )\n vaa.append ( self.vaa[iloc] )\n saa.append ( self.saa[iloc] )\n spectral.append ( self.spectral )\n mask.append ( ( self.get_mask, [iloc] ) )\n sensor.append ( self.sensor )\n \n data_pntr.append ( self._data_pntr[iloc] )\n if this_date in other_obs_dates:\n iloc = other_obs_dates.index ( this_date )\n date.append ( other.date[iloc] )\n emulator.append ( other.emulator[iloc] )\n vza.append ( other.vza[iloc] )\n sza.append ( other.sza[iloc] )\n vaa.append ( other.vaa[iloc] )\n saa.append ( other.saa[iloc] )\n spectral.append ( other.spectral )\n mask.append ( ( other.get_mask, [iloc] ) )\n sensor.append ( other.sensor )\n data_pntr.append ( other._data_pntr[iloc] )\n this_date += delta\n result.vza = vza\n result.vaa = vaa\n result.sza = sza \n result.saa = saa \n result.date = date\n result.spectral = spectral\n result.masks = mask\n result.sensor = sensor\n result.emulator = emulator\n result._data_pntr = data_pntr\n return result", "def merge_new(dfc, pairs, span=None):\n global last_update\n t1 = Timer()\n columns = ['open', 'close', 'trades', 'volume', 'buy_ratio']\n exclude = ['_id','high','low','quote_vol','sell_vol', 'close_time']\n projection = dict(zip(exclude, [False]*len(exclude)))\n idx, data = [], []\n db = app.get_db()\n\n if span is None and last_update:\n # If no span, query/merge db records inserted since last update.\n oid = ObjectId.from_datetime(last_update)\n last_update = now()\n _filter = {'_id':{'$gte':oid}}\n else:\n # Else query/merge all since timespan.\n span = span if span else timedelta(days=7)\n last_update = now()\n _filter = {'pair':{'$in':pairs}, 'close_time':{'$gte':now()-span}}\n\n batches = db.candles.find_raw_batches(_filter, projection)\n\n if batches.count() < 1:\n return dfc\n\n try:\n ndarray = bsonnumpy.sequence_to_ndarray(\n batches,\n dtype,\n db.candles.count()\n )\n except Exception as e:\n log.error(str(e))\n return dfc\n #raise\n\n df = pd.DataFrame(ndarray)\n df['open_time'] = pd.to_datetime(df['open_time'], unit='ms')\n df['freq'] = df['freq'].str.decode('utf-8')\n df['pair'] = df['pair'].str.decode('utf-8')\n\n df['freq'] = df['freq'].replace('1m',60)\n df['freq'] = df['freq'].replace('5m',300)\n df['freq'] = df['freq'].replace('1h',3600)\n df['freq'] = df['freq'].replace('1d',86400)\n df = df.sort_values(by=['pair','freq','open_time'])\n\n df2 = pd.DataFrame(df[columns].values,\n index = pd.MultiIndex.from_arrays(\n [df['pair'], df['freq'], df['open_time']],\n names = ['pair','freq','open_time']),\n columns = columns\n ).sort_index()\n\n df3 = pd.concat([dfc, df2]).drop_duplicates().sort_index()\n\n log.debug(\"{:,} records loaded into numpy. [{:,.1f} ms]\".format(\n len(df3), t1))\n #print(\"Done in %s ms\" % t1)\n return df3", "def _fill_results(self,spec,measurements,period,duration):\r\n logging.info(\"Fill measurements for spec {0}\".format(spec))\r\n \r\n if self._verb==mplane.model.VERB_QUERY:\r\n \"\"\"\r\n Query according to the time specified in the specification\r\n \"\"\"\r\n (first_time,last_time) = spec.when().datetimes()\r\n first_time=int(first_time.replace(tzinfo=datetime.timezone.utc).timestamp())\r\n last_time=int(last_time.replace(tzinfo=datetime.timezone.utc).timestamp())\r\n sleep_time = 0\r\n else:\r\n \"\"\"\r\n Query from NOW\r\n \"\"\"\r\n first_time = int(time.time())\r\n if (len(measurements[1])>0 or len(measurements[2])>0) and period<=self._pvsr_default_conf_check_cycle:\r\n #there are newly created or modified measurements\r\n first_time = first_time + self._pvsr_default_conf_check_cycle\r\n if first_time % period > 0:\r\n first_time = first_time - (first_time % period)\r\n last_time = first_time + int(duration / period) * period\r\n sleep_time = duration\r\n\r\n logging.debug(\"From: {0}, To: {1}\".format(datetime.datetime.fromtimestamp(first_time),datetime.datetime.fromtimestamp(last_time)))\r\n \r\n meas_data = {}\r\n\r\n while True:\r\n logging.info(\"Wait {0} seconds\".format(sleep_time))\r\n time.sleep(sleep_time)\r\n sleep_time = 30\r\n \r\n loaded_until=self._pvsr.getLastLoadedDataTimestamp(period)\r\n if int(loaded_until.timestamp())>=last_time or time.time()>last_time+period+300:\r\n for i in (0,1,2):\r\n for j in range(len(measurements[i])):\r\n self._fill_meas_result(measurements[i][j],first_time,last_time,meas_data)\r\n break\r\n else:\r\n logging.debug(\"last loaded is still {0}\".format(loaded_until))\r\n \r\n res = mplane.model.Result(specification=spec)\r\n res.set_when(mplane.model.When(a = datetime.datetime.utcfromtimestamp(first_time+period), b = datetime.datetime.utcfromtimestamp(last_time)))\r\n \r\n tmp_time=first_time+period\r\n row_index=0\r\n while tmp_time<=last_time:\r\n tmp_time2 = datetime.datetime.fromtimestamp(tmp_time)\r\n tmp_time3 = datetime.datetime.utcfromtimestamp(tmp_time)\r\n res.set_result_value(\"time\", tmp_time3, row_index)\r\n if tmp_time2 in meas_data:\r\n for mplane_name in meas_data[tmp_time2]:\r\n value = str(meas_data[tmp_time2][mplane_name])\r\n res.set_result_value(mplane_name, value, row_index)\r\n row_index+=1\r\n tmp_time+=period\r\n \r\n return res", "def filter_time_match(file1, file2):\n freq1 = int(file1.split(\".\")[1].split(\"_\")[1].replace(\"M\", \"\"))\n freq2 = int(file2.split(\".\")[1].split(\"_\")[1].replace(\"M\", \"\"))\n df1, df2 = filter_overlapping_files_dfs(file1, file2)\n\n dt1 = pandas.to_datetime(df1[\"date\"] + \" \" + df1[\"hour\"])\n dt2 = pandas.to_datetime(df2[\"date\"] + \" \" + df2[\"hour\"])\n\n dt_delta = datetime.timedelta(minutes=freq2 - freq1)\n time_match_df1 = dt1.copy()\n time_match_df2 = dt2.copy()\n for idx, dt in dt2.items():\n match = dt1[(dt1 >= dt) & (dt1 <= dt + dt_delta)]\n time_match_df1[match.index] = idx\n time_match_df2[idx] = 0\n time_match_df2[idx] = tuple(match.index)\n\n time_match_df2[time_match_df2.apply(len) != 10]\n return time_match_df1, time_match_df2", "def _merge_report(self, target, new):\r\n time = None\r\n if 'ts' in new['parsed']:\r\n time = new['parsed']['ts']\r\n\r\n if (target.get('lastSeenDate', None) and\r\n time and\r\n target['lastSeenDate'] < time):\r\n target['lastSeenDate'] = time\r\n\r\n query_millis = int(new['parsed']['stats']['millis'])\r\n target['stats']['totalTimeMillis'] += query_millis\r\n target['stats']['count'] += 1\r\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def test_find_df_period(self):\n test_search_df = pd.read_csv(DF_PATH)\n result_1 = find_df_period(test_search_df, 'pickup_datetime', 6)\n p_time_periods_1 = result_1['time_period'].tolist()\n p_intervals_1 = [2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4]\n\n result_2 = find_df_period(test_search_df, 'pickup_datetime', 4)\n p_time_periods_2 = result_2['time_period'].tolist()\n p_intervals_2 = [1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2]\n\n self.assertTrue(p_time_periods_1 == p_intervals_1)\n self.assertTrue(p_time_periods_2 == p_intervals_2)", "def make_all_datetime(self):\n \n logging.info('\\n *** Running make_all_datetime ' )\n\n all_uniques = [] # storing a list with all the unique date_times \n which_k_in_dt = {} # list of avilable dataset for each unique date_time, so that when looping over the distinct date_times, only the proper dataset will be read and compared \n\n \"\"\" Loop over all the datasets \n k: name of the dataset\n v: list of file paths, eg 'era5_1':[filepath_1, filepath_2 ]\"\"\"\n\n for k,v in self.datasets.items() :\n self.unique_dates[k] = {}\n for F in v: \n self.unique_dates[k][F] = {}\n \n self.unique_dates[k][F]['indices'] = {} \n self.unique_dates[k][F]['index_offset_next'] = 0 # to be replaced later when slicing \n self.unique_dates[k][F]['index_offset'] = 0 # to be replaced later when slicing \n\n unique_dt = list(data[k][F]['recordtimestamp'])\n \n indices = list(data[k][F]['recordindex'])\n all_uniques += unique_dt # adding to the total unique date_times \n\n \"\"\" Loop over all the date_times of each dataset \"\"\"\n for dt, index_low, count in zip (unique_dt, indices, range(len(unique_dt)) ):\n\n if dt not in which_k_in_dt.keys():\n which_k_in_dt[dt] = {}\n if k not in which_k_in_dt[dt].keys():\n which_k_in_dt[dt][k] = [] \n if F not in which_k_in_dt[dt][k]:\n which_k_in_dt[dt][k].append(F)\n # at this point I have e.g. which_k_in_dt= {1990-01-01-12-00: {era5_1:[file1,file2] , ncar:[file3] } }\n\n self.unique_dates[k][F]['indices'][dt] = {}\n self.unique_dates[k][F]['indices'][dt]['low'] = index_low \n try:\n index_up = indices[ count + 1 ] # works until the last available recordindex\n except: \n index_up = max(indices)+1000000 # dummy large number \n\n self.unique_dates[k][F]['indices'][dt]['up'] = index_up\n self.unique_dates[k][F]['up_to_dt_slice'] = data[k][F]['min_date'] \n \n\n self.dataset_per_dt = which_k_in_dt \n self.merged_unique_dates = np.unique(np.array(all_uniques) ) # storing the set of *ALL* distinct dt values of all datasets and all files \n logging.debug('*** make_all_datetime finished ')", "def Merge(self, other):\n\n # Logging just in case\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: before\", %s);'\n %(self.persistant['id'],\n sql.FormatSqlValue('details',\n repr(self.persistant))))\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: deleted\", %s);'\n %(other.persistant['id'], \n sql.FormatSqlValue('details',\n repr(other.persistant))))\n\n # Fields which can be summed\n for f in ['plays', 'skips']:\n self.persistant[f] = (self.persistant.get(f, 0) +\n other.persistant.get(f, 0))\n\n # Date fields where we take the newest\n for f in ['last_played', 'last_skipped', 'last_action']:\n a = self.persistant.get(f, datetime.datetime(1970, 1, 1))\n b = other.persistant.get(f, datetime.datetime(1970, 1, 1))\n if a > b:\n v = a\n else:\n v = b\n if v != datetime.datetime(1970, 1, 1):\n self.persistant[f] = v\n\n # Date fields where we take the oldest\n for f in ['creation_time']:\n a = self.persistant.get(f, datetime.datetime(1970, 1, 1))\n b = other.persistant.get(f, datetime.datetime(1970, 1, 1))\n if a < b:\n v = a\n else:\n v = b\n if v != datetime.datetime(1970, 1, 1):\n self.persistant[f] = v\n\n # Fields where we only clobber ours if we don't have a value\n for f in ['artist', 'album', 'song']:\n if not self.persistant.has_key(f) or not self.persistant[f]:\n self.persistant[f] = other.persistant.get(f, None)\n\n # Sometimes the number is a placeholder\n if self.persistant.has_key('number') and self.persistant['number'] == -1:\n self.persistant['number'] = other.persistant.get('number', -1)\n if not self.persistant.has_key('number'):\n self.persistant['number'] = other.persistant.get('number', -1)\n\n # Update the id in the tags table\n tags = self.db.GetRows('select tag from tags where track_id=%d;'\n % other.persistant['id'])\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: tags: %d\", %s);'\n %(self.persistant['id'], other.persistant['id'],\n sql.FormatSqlValue('details', repr(tags))))\n\n try:\n self.db.ExecuteSql('update tags set track_id=%d where track_id=%d;'\n %(self.persistant['id'], other.persistant['id']))\n self.db.ExecuteSql('commit;')\n except:\n # This can happen if the is already a matching tag for the first track\n pass\n\n # Update the id in the paths table\n paths = self.db.GetRows('select path from paths where track_id=%d;'\n % other.persistant['id'])\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: paths: %d\", %s);'\n %(self.persistant['id'], other.persistant['id'],\n sql.FormatSqlValue('details', repr(paths))))\n \n self.db.ExecuteSql('update paths set track_id=%d where track_id=%d;'\n %(self.persistant['id'], other.persistant['id']))\n self.db.ExecuteSql('commit;')\n\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: after\", %s);'\n %(self.persistant['id'],\n sql.FormatSqlValue('details',\n repr(self.persistant))))\n self.db.ExecuteSql('commit;')", "def merge_all_data(self):\n\n logging.info('***** Starting the merging process merge_all_data')\n\n \"\"\" All possible unique_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n date_times = np.array(date_times) \n\n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, = [] , [] , [] , [] , []\n best_ds_list = [] \n source_files = []\n station_configurations = []\n\n \"\"\" The items contained in the lists in the list below can be removed from the list when the record that was previously stored is removed. \"\"\"\n all_list = [all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, best_ds_list, source_files , station_configurations ] # holder of all the above lists\n all_list_name = ['all_combined_obs' , 'all_combined_head', 'all_combined_era5fb' , 'combined_indices' , 'combined_date_time' , 'best_ds_list', 'source_files' ] \n \n removed_record, kept_record = [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #dt_bestds_dic = {} # store the selected best dataset for each dt \n #date_times=date_times[0:30000]\n tot = len(date_times)\n tt=time.time()\n print('*** Merging ' , tot, ' records ***')\n \n early_datasets = True\n \n self.processed_dt = [] \n \n for dt, c in zip(date_times, range(tot) ): # loop over all the possible date_times \n\n if (c+1)%1000==0:\n print('Analize : ', str(c+1) , '/', str(tot) , ' ', dt , ' ',\n now(time.time()),'{:5.3f}'.format(time.time()-tt ))\n\n delete = self.delete_ds(dt) # check if there is a dataset to delete \n \n \"\"\" Finding if this record is the same as the previous one analyzed, according to the given time_shift \"\"\"\n if c == 0:\n is_same_record = False\n else:\n is_same_record = self.is_same_record( time_shift = self.hour_time_delta , dt = dt)\n \n \"\"\" Updating list of processed datetimes \"\"\"\n self.processed_dt.append(dt) # cannot put it before the check_timeshift or it will check itself \n\n \n cleaned_df_container = {} \n all_len = [] # will hold the length of all the obs_tabs \n \n for k in self.dataset_per_dt[dt].keys() : # checking the list of available datasets \n ''' {'era5_2': ['example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._1:82930.gz.nc', \n 'example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._82930.gz.nc']}\n ''' \n for F in self.dataset_per_dt[dt][k]: # checking the list of available files for the dataset\n \n if data[k][F][\"counter\"] %self.slice_size==0 or data[k][F][\"counter\"] == 0: # loading the data only at specific slices \n load = self.load_obstab_feedback_sliced(datetime=dt, dataset=k, file = F)\n \n data[k][F][\"counter\"] = data[k][F][\"counter\"] + 1 \n \n obs_tab, era5fb_tab = self.make_obstab_era5fb_dic(dataset = k , date_time = dt, File = F )\n\n if len(obs_tab['date_time'][:])==0: # go to next file if obs_tab is empty \n #print('ZERO length')\n continue \n\n all_len.append( len(obs_tab['date_time'][:] ) )\n \n if k not in cleaned_df_container.keys():\n cleaned_df_container[k] = {}\n\n cleaned_df_container[k][F] = {}\n cleaned_df_container[k][F]['obs_tab'] = obs_tab # cleaned dataframe \n cleaned_df_container[k][F]['era5fb_tab'] = era5fb_tab # cleaned dataframe \n \n \"\"\" Merging the different records found in the sifferent sources \"\"\"\n if bool(all_len): # skipping empty container dictionary. At this point I certainyl have one valid record \n best_ds, combined_obs_tab, combined_era5fb_tab, combined_head_tab, selected_file, best_file = self.combine_record(dt, container = cleaned_df_container)\n \n if is_same_record: # decide what to keep in case of same record\n temporary_previous = all_combined_obs[-1] # keep the temporary previous record \n\n if best_ds in ['era5_1','era5_2']: # best_ds from era5\n if best_ds_list[-1] not in ['era5_1','era5_2']: # remove previous non era5_1 or era5_2 record \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n\n elif best_ds_list[-1] in ['era5_1','era5_2']:\n if len(combined_obs_tab) <= len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab)\n continue # nothing to do, will keep the previous records -> go to next dt \n \n else: # case where both the current and previous are from era5_1 and era5_2, but the previous has smaller number of data \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # best_ds not from era5\n if best_ds_list[-1] in ['era5_1','era5_2']:\n #print('This best ds is ' , best_ds , ' but I will keep ' , best_ds_list[-1] )\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else:\n if len(combined_obs_tab) < len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue # nothing to do, will keep the previous records -> go to next dt \n \n elif len(combined_obs_tab) > len(all_combined_obs[-1] ): # remove previous, keep current \n for lista in all_list:\n lista.pop() \n #kept_record.append(combined_obs_tab) \n #removed_record.append(temporary_previous)\n \n elif len(combined_obs_tab) == len(all_combined_obs[-1] ): # prefer igra2, otherwise\n if best_ds == 'igra2':\n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # case where data source is not important, I keep the previous and do nothing \n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else: # not the same record, nothing special to do, keep both previous and current \n pass \n else:\n print(' Found an empty record / time shifted record ')\n continue\n \n\n \"\"\" Fill the best_ds list \"\"\"\n best_ds_list.append(best_ds)\n\n \"\"\" Storing the selected file for the source_configuration \"\"\"\n source_files.append(selected_file)\n \"\"\" Selecting the station_configuration \"\"\"\n station_configurations.append(self.data[best_ds][best_file]['station_configuration'] )\n \n \"\"\" Storing the combined era5fb, header and observations tables\"\"\"\n all_combined_era5fb.append(combined_era5fb_tab)\n all_combined_obs .append(combined_obs_tab)\n \n primary, name = self.data[best_ds][best_file]['station_configuration']['primary_id'][0] , self.data[best_ds][best_file]['station_configuration']['station_name'][0] \n #combined_head_tab['primary_station_id'] = [ primary ] * len( combined_head_tab ) \n #combined_head_tab['station_name'] = [ name ] * len( combined_head_tab ) \n \n combined_head_tab['primary_station_id'] = np.array( [primary] )\n combined_head_tab['station_name'] = np.array( [name] )\n \n all_combined_head .append(combined_head_tab)\n\n \"\"\" Dictionary to fill the best_ds for duplicates \"\"\"\n #dt_bestds_dic[dt] = {}\n #dt_bestds_dic[dt]['best_ds'] = best_ds\n #dt_bestds_dic[dt]['len'] = len(combined_obs_tab['date_time'])\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n combined_indices.append(len(combined_obs_tab['date_time'])) \n combined_date_time.append(dt)\n\n del cleaned_df_container \n \n \n \n #print(blue + 'Memory used after deleting the cleaned_df_container: ', process.memory_info().rss/1000000000 , cend)\n\n \"\"\" Removing remaining loaded df \"\"\"\n for k in self.datasets_keys:\n for F in self.datasets[k]:\n try:\n del data[k][F]['era5fb_tab']\n print('=== removed era5fb ' , k , F )\n except:\n pass\n try:\n del data[k][F]['observations_table']\n print('=== removed obstab ' , k , F ) \n except:\n pass\n \n \n \"\"\" Saving a numpy dictionary \"\"\"\n print(\" === Saving the numpy dictionary of removed and kept records +++ \")\n #dic_records = { 'kept' : kept_record , 'removed': removed_record }\n #np.save(self.station + '_time_shift_removed_kept.npy',dic_records )\n \n \n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n combined_date_time = np.array(combined_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : combined_date_time.shape } , combined_date_time )\n di['recordtimestamp'].attrs['units']='seconds since 1900-01-01 00:00:00'\n\n \"\"\" Creating the merged indices mi \"\"\"\n mi = [] \n mi.append(0)\n for i in range(len(combined_indices)):\n mi.append( combined_indices[i] + mi[-1] )\n mi.pop()\n pop = np.array(mi) # removing last unecessary index \n di['recordindex'] = ( {'recordindex' : pop.shape } , pop )\n\n\n \"\"\" Creating the combined data \"\"\"\n logging.debug('*** Concatenating the observations_table ' ) \n combined_obs = {}\n #### Writing combined observations_table dic\n logging.info(' ***** Writing the observations_table to the netCDF output ***** ' ) \n for k in all_combined_obs[0].keys(): \n a = np.concatenate([all_combined_obs[i][k][:] for i in range(len(all_combined_obs))])\n if k == 'date_time':\n combined_obs[k]= a \n self.tot_records = len(combined_obs[k])\n self.write_merged(content = 'observations_table', table= {k:a})\n #logging.info('*** Written observations table %s: ', k)\n\n\n #self.tot_records = len(combined_obs['date_time'])\n del all_combined_obs\n print(blue + 'Memory used after deleting all_combined_obs dic: ', process.memory_info().rss/1000000000 , cend )\n \n dateindex = combined_obs['date_time']//86400 \n date_times, indices, counts = np.unique(dateindex, return_counts = True, return_index= True) \n di['dateindex'] = ( {'dateindex' : indices.shape } , indices ) # considers the day only \n del combined_obs\n \n combined_era5fb = {}\n #### Writing combined era5fb_table dic \n for k in all_combined_era5fb[0].keys():\n try:\n #combined_era5fb[k]=np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n #self.write_merged(content = 'era5fb', table= {k:combined_era5fb[k]})\n \"\"\" try replacing , remove combined_era5fb = {} \"\"\"\n a = np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n self.write_merged(content = 'era5fb', table= {k:a})\n logging.debug('*** Written era5fb %s: ', k)\n except:\n print(\"FAILED feedback variable \" , k)\n\n del all_combined_era5fb\n print(blue + 'Memory used after deleting era5fb_tab dic: ', process.memory_info().rss/1000000000 , cend)\n\n\n #### Writing combined header_table dic \n for k in all_combined_head[0].keys():\n print('head variable is', k )\n if ( k == 'comments' or k == 'history'):\n continue\n try:\n tab=np.concatenate([all_combined_head[i][k][:] for i in range(len(all_combined_head))])\n self.write_merged(content = 'header_table', table= {k: tab}) # { key: np.array([])}\n logging.info('*** Written header table %s: ', k)\n except:\n print('FFF FAILED variable in header table', k )\n\n del all_combined_head\n print(blue + 'Memory used after deleting all_merged head_tab dic: ', process.memory_info().rss/1000000000 , cend)\n \n self.write_merged(content = 'recordindex', table = di) \n self.write_merged(content = 'cdm_tables', table= '')\n\n\n source_conf=xr.Dataset()\n source_files = np.array(source_files).astype(dtype='|S70')\n source_conf['source_file'] = ( {'source_file' : source_files.shape } , source_files )\n self.write_merged(content = 'source_configuration', table= source_conf )\n\n print(0)\n\n\n \"\"\" Concatenation of station_configurations \"\"\"\n station_conf = pd.concat( station_configurations ) \n for k in station_conf.columns:\n try:\n a =np.array( station_conf[k])\n self.write_merged(content = 'station_configuration', table= {k:a})\n logging.debug('*** Written station_configuration %s: ', k)\n except:\n print(\" Failed station_configuration \" , k )\n \n return 0", "def get_records(self, since_ts=0, num_rec=0):\n nerr = 0\n while True:\n try:\n fixed_block = self.get_fixed_block(unbuffered=True)\n if fixed_block['read_period'] is None:\n raise weewx.WeeWxIOError('invalid read_period in get_records')\n if fixed_block['data_count'] is None:\n raise weewx.WeeWxIOError('invalid data_count in get_records')\n if since_ts:\n dt = datetime.datetime.utcfromtimestamp(since_ts)\n dt += datetime.timedelta(seconds=fixed_block['read_period']*30)\n else:\n dt = datetime.datetime.min\n max_count = fixed_block['data_count'] - 1\n if num_rec == 0 or num_rec > max_count:\n num_rec = max_count\n log.debug('get %d records since %s' % (num_rec, dt))\n dts, ptr = self.sync(read_period=fixed_block['read_period'])\n count = 0\n records = []\n while dts > dt and count < num_rec:\n raw_data = self.get_raw_data(ptr)\n data = self.decode(raw_data)\n if data['delay'] is None or data['delay'] < 1 or data['delay'] > 30:\n log.error('invalid data in get_records at 0x%04x, %s' %\n (ptr, dts.isoformat()))\n dts -= datetime.timedelta(minutes=fixed_block['read_period'])\n else:\n record = dict()\n record['ptr'] = ptr\n record['datetime'] = dts\n record['data'] = data\n record['raw_data'] = raw_data\n record['interval'] = data['delay']\n records.insert(0, record)\n count += 1\n dts -= datetime.timedelta(minutes=data['delay'])\n ptr = self.dec_ptr(ptr)\n return records\n except (IndexError, usb.USBError, ObservationError) as e:\n log.error('get_records failed: %s' % e)\n nerr += 1\n if nerr > self.max_tries:\n raise weewx.WeeWxIOError(\"Max retries exceeded while fetching records\")\n time.sleep(self.wait_before_retry)", "def pull(self, period):\n # Compile the regex expressions we'll use to parse the title text\n self.identity_regex = re.compile(r\"(\\d{4} \\d{3} \\d{3})\\s{2,}(\\S+)\\s{2,}(\\d{3} \\d{3} \\d{3} *\\S*)\")\n self.ats_regex = re.compile(r\"ATS REFERENCE: (\\S*)\")\n self.municipality_regex = re.compile(r\"MUNICIPALITY: (.*)\")\n self.reference_regex = re.compile(r\"REFERENCE NUMBER: (.*?)\\-{80}\", re.DOTALL)\n self.payday_regex = re.compile(r\"(\\-{80}).*(\\-{80})(.*)\", re.DOTALL)\n\n # Filter the dataframe by date and retrieve each title\n df = self.journal\n df = df[df['Registration Date'] >= period]\n\n df.to_pickle('run/{}.journal.pkl'.format(self.runtime))\n\n click.echo('Journal constructed and saved with timestamp {}'.format(self.runtime))\n\n # Set up structure for target DataFrame\n self.dataframe = pd.DataFrame(\n columns=[\n 'linc',\n 'short_legal',\n 'title_number',\n 'ats_reference',\n 'municipality',\n 'registration',\n 'registration_date',\n 'document_type',\n 'sworn_value',\n 'consideration',\n 'condo'\n ], index=df.index\n )\n\n with click.progressbar(df.iterrows(), label='Pulling basic title data', length=len(df)) as d:\n for index, row in d:\n try:\n payload = self.retrieve_title(index)\n self.dataframe.loc[index, 'linc'] = payload['linc']\n self.dataframe.loc[index, 'short_legal'] = payload['short_legal']\n self.dataframe.loc[index, 'title_number'] = payload['title_number']\n self.dataframe.loc[index, 'ats_reference'] = payload['ats_reference']\n self.dataframe.loc[index, 'municipality'] = payload['municipality']\n self.dataframe.loc[index, 'registration'] = payload['registration']\n self.dataframe.loc[index, 'registration_date'] = payload['date']\n self.dataframe.loc[index, 'document_type'] = payload['document_type']\n self.dataframe.loc[index, 'sworn_value'] = payload['value']\n self.dataframe.loc[index, 'consideration'] = payload['consideration']\n self.dataframe.loc[index, 'condo'] = payload['condo']\n except TypeError:\n pass\n\n self.dataframe['registration_date'] = pd.to_datetime(self.dataframe['registration_date'])\n self.dataframe['sworn_value'] = self.dataframe['sworn_value'].astype(float)\n self.dataframe['consideration'] = self.dataframe['consideration'].astype(float)\n self.dataframe['condo'] = self.dataframe['condo'].fillna(False).astype(bool)\n\n self.dataframe.to_pickle('run/{}.dataframe.pkl'.format(self.runtime))\n click.echo('Dataframe constructed and saved with timestamp {}'.format(self.runtime))\n\n return self.dataframe", "def merge_logfiles(log1, log2):\n first_in_2 = log2['time'][0]\n keep_from_1 = log1['time'] < first_in_2\n for key in log1.keys():\n log1[key] = log1[key][keep_from_1]\n log1.timeseries_append(log2)\n return log1", "def __init__(self, numQueues, rate, start_hour, end_hour, appt_low, appt_high):\n\n self.rate = rate\n self.numQueues = numQueues\n self.start = datetime.datetime.combine(datetime.date.today(), datetime.time(start_hour,0,0))\n self.end = datetime.datetime.combine(datetime.date.today(), datetime.time(end_hour,0,0))\n self.appt_low = appt_low\n self.appt_high = appt_high\n minutes_for_new_items = (end_hour-start_hour)*60 #new patients seen between 9AM and 4PM\n time_between_items = rate #exponential dist. time parameter\n self.expected_count = int(np.ceil(stats.poisson.ppf(.9999, minutes_for_new_items/time_between_items)))\n self.ques = [datetime.datetime.combine(datetime.datetime.today(), datetime.time(start_hour,0,0)) for i in range(0, self.numQueues)]\n cols = ['simulation', 'num_items', 'wait_count', 'avg_wait_time', 'close_time']\n self.results = pd.DataFrame(columns = cols)\n return", "def join_domain_time_span(domain_tables, span):\n joined_domain_tables = []\n \n for domain_table in domain_tables:\n #extract the domain concept_id from the table fields. E.g. condition_concept_id from condition_occurrence\n #extract the domain start_date column\n #extract the name of the table\n concept_id_field, date_field, table_domain_field = get_key_fields(domain_table) \n\n domain_table = domain_table.withColumn(\"date\", unix_timestamp(to_date(col(date_field)), \"yyyy-MM-dd\")) \\\n .withColumn(\"lower_bound\", unix_timestamp(date_add(col(date_field), -span), \"yyyy-MM-dd\"))\\\n .withColumn(\"upper_bound\", unix_timestamp(date_add(col(date_field), span), \"yyyy-MM-dd\"))\\\n .withColumn(\"time_window\", lit(1))\n \n #standardize the output columns\n joined_domain_tables.append(\n domain_table \\\n .select(domain_table[\"person_id\"], \n domain_table[concept_id_field].alias(\"standard_concept_id\"),\n domain_table[\"date\"],\n domain_table[\"lower_bound\"],\n domain_table[\"upper_bound\"],\n lit(table_domain_field).alias(\"domain\"))\n )\n \n return joined_domain_tables", "def fake_record_data():\n\n user_ids = [4, 4, 4, 4, 5,\n 5, 2, 6, 1, 2,\n 5, 7, 5, 1, 3,\n 3, 1, 4, 2, 3,\n 6, 4, 2, 7, 3,\n 3, 3, 6, 7, 6,\n 6, 7, 1, 7, 1,\n 8, 7, 1, 8, 4]\n\n days = [1519200000, 1519200000, 1519200000, 1519200000, 1519113600,\n 1519113600, 1519113600, 1519027200, 1519027200, 1519027200,\n 1518940800, 1518940800, 1518854400, 1518854400, 1518768000,\n 1518681600, 1518681600, 1518681600, 1518681600, 1518681600,\n 1518595200, 1518595200, 1518595200, 1518595200, 1518508800,\n 1518422400, 1518422400, 1518422400, 1518422400, 1518336000,\n 1518336000, 1518336000, 1518336000, 1518249600, 1518249600,\n 1518163200, 1518163200, 1518076800, 1517990400, 1517904000]\n\n for i, user_id in enumerate(user_ids):\n act_qty = random.randint(5, 13)\n selected_activities = set()\n\n for _ in range(0, act_qty):\n act_id = random.randint(1, 13)\n selected_activities.add(act_id)\n\n day = days[-(i + 1)]\n start = day + 33000\n total_time = 0\n\n for act_id in selected_activities:\n act_time = random.randint(120, 1000)\n\n start_t = start + total_time\n end_t = datetime.fromtimestamp(start_t + act_time)\n start_t = datetime.fromtimestamp(start_t)\n\n total_time += act_time\n\n print (str(user_id) + '|' + str(i + 1) + '|' + str(act_id) + '|' +\n str(start_t) + '|' + str(end_t))", "def update_period(self, period):\n\n # Update attribute\n self._period = period\n\n # Create new data and time\n shape = int(round(self._drate) * self._period + 1)\n new_data = OrderedDict([(ch, np.zeros(shape=shape)) for i, ch in enumerate(self.channels)])\n new_time = np.zeros(shape=shape)\n\n # Check whether new time and data hold more or less indices\n decreased = True if self._time.shape[0] >= shape else False\n\n if decreased:\n # Cut time axis\n new_time = self._time[:shape]\n\n # If filled before, go to 0, else go to 0 if currnt index is bigger than new shape\n if self._filled:\n self._idx = 0\n else:\n self._idx = 0 if self._idx >= shape else self._idx\n\n # Set wheter the array is now filled\n self._filled = True if self._idx == 0 else False\n\n else:\n # Extend time axis\n new_time[:self._time.shape[0]] = self._time\n\n # If array was filled before, go to last time, set it as offset and start from last timestamp\n if self._filled:\n self._idx = self._time.shape[0]\n self._start = self._timestamp\n self._offset = self._time[-1]\n\n self._filled = False\n\n # Set new time and data\n for ch in self.channels:\n if decreased:\n new_data[ch] = self._data[ch][:shape]\n else:\n new_data[ch][:self._data[ch].shape[0]] = self._data[ch]\n\n # Update\n self._time = new_time\n self._data = new_data", "def copy_many_from_temp(self,\r\n count):\r\n\r\n for counter in range(count):\r\n print(PERIOD,end=EMPTYCHAR)\r\n self.copy_from_temp(self.tempobject)\r\n self.constitute_key_freq_dict()\r\n print()", "def _write_to_dataset(parser1, parser2, dset, rundate):\n\n data_all1 = parser1.as_dict()\n data_all2 = parser2.as_dict()\n if parser1.file_path == parser2.file_path:\n collection = [data_all1]\n else:\n collection = [data_all1, data_all2]\n\n # Meta information\n dset.meta[\"tech\"] = \"slr\"\n dset.meta.add(\"file\", parser1.file_path.stem, section=\"input\")\n dset.meta.add(\"file\", parser2.file_path.stem, section=\"input\")\n dset.meta.add(\"type\", config.tech.obs_format.str.upper(), section=\"input\")\n\n # Make new dict \"obs_data\" containing only data in relevant time interval:\n arc_length = config.tech.arc_length.float\n rundate_datetime = datetime(rundate.year, rundate.month, rundate.day)\n obs_data = dict()\n for data_all in collection:\n for i, x in enumerate(data_all[\"meta\"][\"obs_time\"]):\n if rundate_datetime <= x < rundate_datetime + timedelta(days=arc_length):\n for key in (\"meta\", \"obs\", \"obs_str\"):\n for field, val in data_all[key].items():\n obs_data.setdefault(key, dict()).setdefault(field, list()).append(val[i])\n\n data_all.pop(\"meta\")\n data_all.pop(\"obs\")\n data_all.pop(\"obs_str\")\n\n for key in data_all.keys():\n if key.startswith(\"met_\"):\n for key2, val in data_all[key].items():\n obs_data.setdefault(key, dict()).setdefault(key2, list()).append(val)\n elif key.startswith(\"satellite_\"):\n # TODO: Use this information in the future?\n continue\n elif key.startswith(\"station_\"):\n # TODO: Use this information in the future?\n continue\n else:\n log.fatal(f\"Unknown data type{key}\")\n\n obs_date = obs_data[\"meta\"][\"obs_date\"]\n time = [obs_date[i] + timedelta(seconds=obs_data[\"meta\"][\"obs_sec\"][i]) for i in range(0, len(obs_date))]\n dset.num_obs = len(obs_data[\"meta\"][\"obs_time\"])\n dset.add_time(\"time\", val=time, scale=\"utc\", fmt=\"datetime\")\n dset.add_text(val=obs_data[\"meta\"][\"station\"], name=\"station\")\n dset.add_text(val=obs_data[\"meta\"][\"satellite\"], name=\"satellite\")\n dset.add_float(val=obs_data[\"meta\"][\"bin_rms\"], unit=\"picoseconds\", name=\"bin_rms\")\n # Positions\n trf = apriori.get(\"trf\", time=dset.time)\n for station in dset.unique(\"station\"):\n trf_site = trf[station]\n station_pos = trf_site.pos.trs.val\n log.debug(f\"Station position for {station} ({trf_site.name}) is (x,y,z) = {station_pos.mean(axis=0)}\")\n domes = trf_site.meta[\"domes\"]\n obs_data[\"pos_\" + station] = station_pos\n obs_data[\"station-other_\" + station] = dict(domes=domes, cdp=station, site_id=station)\n dset.add_position(\n \"site_pos\",\n time=dset.time,\n system=\"trs\",\n val=np.array([obs_data[\"pos_\" + s][idx] for idx, s in enumerate(dset.station)]),\n )\n # Station data\n sta_fields = set().union(*[v.keys() for k, v in obs_data.items() if k.startswith(\"station_\")])\n for field in sta_fields:\n dset.add_float(field, val=np.array([float(obs_data[\"station_\" + s][field]) for s in dset.station]))\n sta_fields = set().union(*[v.keys() for k, v in obs_data.items() if k.startswith(\"station-other_\")])\n for field in sta_fields:\n dset.add_text(field, val=[obs_data[\"station-other_\" + s][field] for s in dset.station])\n\n # Station meta\n station_keys = sorted([k for k, v in obs_data.items() if k.startswith(\"station-other_\")])\n pos_keys = sorted([k for k, v in obs_data.items() if k.startswith(\"pos_\")])\n\n for sta_key, pos_key in zip(station_keys, pos_keys):\n sta_name = sta_key.replace(\"station-other_\", \"\")\n cdp = obs_data[sta_key][\"cdp\"]\n dset.meta.add(sta_name, \"site_id\", cdp)\n longitude, latitude, height, _ = sofa.iau_gc2gd(2, obs_data[pos_key][0, :]) # TODO: Reference ellipsoid\n dset.meta[\"station\"].setdefault(sta_name, {})[\"cdp\"] = cdp\n dset.meta[\"station\"].setdefault(sta_name, {})[\"site_id\"] = cdp\n dset.meta[\"station\"].setdefault(sta_name, {})[\"domes\"] = obs_data[sta_key][\"domes\"]\n dset.meta[\"station\"].setdefault(sta_name, {})[\"marker\"] = \" \"\n dset.meta[\"station\"].setdefault(sta_name, {})[\"description\"] = \" \"\n dset.meta[\"station\"].setdefault(sta_name, {})[\"longitude\"] = longitude\n dset.meta[\"station\"].setdefault(sta_name, {})[\"latitude\"] = latitude\n dset.meta[\"station\"].setdefault(sta_name, {})[\"height\"] = height\n\n # Satellite data\n sat_fields = set().union(*[v.keys() for k, v in obs_data.items() if k.startswith(\"satellite_\")])\n for field in sat_fields:\n dset.add_float(field, val=np.array([float(obs_data[\"satellite_\" + s][field]) for s in dset.satellite]))\n\n # Observations\n # In the dataset, obs_time is seconds since rundate:\n v = [\n (obs_data[\"meta\"][\"obs_date\"][i] - rundate_datetime).total_seconds() + obs_data[\"meta\"][\"obs_sec\"][i]\n for i in range(0, dset.num_obs)\n ]\n\n obs_data[\"obs\"].pop(\"obs_time\")\n dset.add_float(\"obs_time\", val=v)\n for field, values in obs_data[\"obs\"].items():\n dset.add_float(field, val=np.array(values))\n\n for field, values in obs_data[\"obs_str\"].items():\n dset.add_text(field, val=values)\n\n return obs_data", "def chunk_periods(start, end):\n\n logging.debug(f'chunking {start} to {end}')\n # convert the strings to datetime objects\n #start = dt.datetime.strptime(''.join(start.rsplit(':', 1)), '%Y-%m-%dT%H:%M:%S-%z')\n start = dt.datetime.strptime(start, '%Y-%m-%dT%H:%M:%S-%z')\n logging.debug(f'start: {start}')\n periods = []\n\n # if the year and month of the period are the same, just return the dates as we got them\n\n\n\n return periods", "def stack_ps(ps1, ps2, keep_unique = False, fill_time = False, message = True):\n # create deepcopies to avoid changing original instances\n \n ps1 = copy.deepcopy(ps1)\n ps2 = copy.deepcopy(ps2)\n \n # create datetime information in PS instances\n \n try:\n _ = getattr(ps1, \"datetime\")\n except AttributeError:\n ps1.createTimeDate()\n \n try: \n _ = getattr(ps2, \"datetime\")\n except AttributeError:\n ps2.createTimeDate()\n \n # check time resolutions\n res1 = (dt.datetime.strptime(ps1.datetime['data'][1], ps1.datetime['units']) - dt.datetime.strptime(ps1.datetime['data'][0], ps1.datetime['units'])).seconds\n res2 = (dt.datetime.strptime(ps2.datetime['data'][1], ps2.datetime['units']) - dt.datetime.strptime(ps2.datetime['data'][0], ps2.datetime['units'])).seconds\n \n if abs(res1-res2) > 60:\n if message:\n print( (\"warning: resolutions differ %d seconds\")%(abs(res1-res2)) )\n \n # check if ps1 is \"older\" than ps2\n \n reversed_order = False\n cut = None\n \n if dt.datetime.strptime(ps1.datetime['data'][-1], ps1.datetime['units']) < dt.datetime.strptime(ps2.datetime['data'][0], ps2.datetime['units']):\n # ps2 starts after ps1 ends\n timediff = (dt.datetime.strptime(ps2.datetime['data'][0], ps2.datetime['units']) - dt.datetime.strptime(ps1.datetime['data'][-1], ps1.datetime['units'])).total_seconds()\n elif dt.datetime.strptime(ps2.datetime['data'][-1], ps2.datetime['units']) < dt.datetime.strptime(ps1.datetime['data'][0], ps1.datetime['units']):\n # ps1 starts after ps2 ends (user has inadvertently switched the order of the instances)\n reversed_order = True\n timediff = (dt.datetime.strptime(ps1.datetime['data'][0], ps1.datetime['units']) - dt.datetime.strptime(ps2.datetime['data'][-1], ps2.datetime['units'])).total_seconds()\n else:\n # yikes! The particle sizer instances have overlapping data\n # it is assumed that ps2 data replaces ps1 data starting \n # from the overlapping time\n cut, cutdate = tt.findNearestDate(ps1.datetime['data'], ps2.datetime['data'][0]) \n fill_time = False\n \n #print(timediff, 1.5*res1)\n # check if filling is required\n if fill_time is True:\n # check time difference\n if reversed_order:\n # ps1 starts after ps2 ends\n if timediff > 1.5*res2:\n # the time gap between two instances has to be\n # larger than twice the normal resolution\n numdates = int(np.ceil(timediff/res2))\n base = dt.datetime.strptime(ps1.datetime['data'][0], ps1.datetime['units'])\n date_list = [base - dt.timedelta(seconds=res2*x) for x in range(numdates)]\n date_list = list(reversed(date_list[1:]))# because numdates starts at 0, first date on date_list is the same as the startdate from the second instance\n datetimelist = [dt.datetime.strftime(dl, ps2.datetime['units']) for dl in date_list]\n ps2.datetime['data'] = np.append(ps2.datetime['data'], datetimelist)\n timelist = [dt.datetime.strftime(dl, ps2.time['units']) for dl in date_list]\n ps2.time['data'] = np.append(ps2.time['data'], timelist)\n datelist = [dt.datetime.strftime(dl, ps2.date['units']) for dl in date_list]\n ps2.date['data'] = np.append(ps2.date['data'], datelist)\n else:\n fill_time = False\n else:\n if timediff > 1.5*res1:\n # the time gap between two instances has to be\n # larger than twice the normal resolution\n numdates = int(np.ceil(timediff/res1))\n base = dt.datetime.strptime(ps2.datetime['data'][0], ps2.datetime['units'])\n date_list = [base - dt.timedelta(seconds=res1*x) for x in range(numdates)]\n date_list = list(reversed(date_list[1:])) # because numdates starts at 0, first date on date_list is the same as the startdate from the second instance\n datetimelist = [dt.datetime.strftime(dl, ps1.datetime['units']) for dl in date_list]\n ps1.datetime['data'] = np.append(ps1.datetime['data'], datetimelist)\n timelist = [dt.datetime.strftime(dl, ps1.time['units']) for dl in date_list]\n ps1.time['data'] = np.append(ps1.time['data'], timelist)\n datelist = [dt.datetime.strftime(dl, ps1.date['units']) for dl in date_list]\n ps1.date['data'] = np.append(ps1.date['data'], datelist)\n else:\n fill_time = False\n \n if message:\n print(\"reversed order:\", reversed_order)\n # check which attributes are similar in both instances\n if reversed_order:\n # ps1 starts after ps2 ends\n new_ps = copy.deepcopy(ps2)\n for attribute in ps1.__dict__.keys():\n if attribute in ps2.__dict__.keys():\n afield = getattr(new_ps, attribute)\n if attribute == 'diameter':\n st11, st12, st21, st22, diamlist = check_diameters(ps1.diameter['data'], ps2.diameter['data'], ps1.instrument_type)\n \n for var in new_ps.data['variables']:\n if fill_time is True:\n add = np.ma.zeros((ps2.data[var]['data'].shape[0],len(date_list))) \n add[:] = np.nan\n newdata = np.append(ps2.data[var]['data'],add,axis=1)\n ps2.data[var]['data'] = newdata\n \n sh1 = ps1.data[var]['data'].shape\n sh2 = ps2.data[var]['data'].shape\n newfields = (len(diamlist) ,sh1[1] + sh2[1])\n new_field = np.ma.zeros(newfields)\n new_field[:] = np.ma.masked\n \n new_field[st21:st22, 0:ps2.data[var]['data'][:,:cut].shape[1]] = ps2.data[var]['data'][:,:cut]\n new_field[st11:st12, ps2.data[var]['data'][:,:cut].shape[1]:] = ps1.data[var]['data']\n \n new_ps.data[var]['data'] = new_field\n \n afield['data'] = diamlist\n \n elif attribute == 'data':\n # data has been appended with diameters\n pass\n else:\n try:\n field_ps2 = getattr(ps2, attribute)\n field_ps1 = getattr(ps1, attribute)\n except TypeError:\n if attribute == 'header':\n pass\n else:\n if message:\n print( (\"Could not append %s attribute\")%(attribute) )\n try:\n data_ps2 = field_ps2['data']\n data_ps1 = field_ps1['data']\n if attribute in ['date', 'datetime', 'time']: # these have already been extended with the correct data\n afield['data'] = np.append(data_ps2[:cut], data_ps1)\n elif fill_time:\n add = np.ma.zeros(len(date_list))\n add[:] = np.nan\n afield['data'] = np.append(np.append(data_ps2[:cut],add), data_ps1)\n else:\n afield['data'] = np.append(data_ps2[:cut], data_ps1)\n except:\n if message:\n print( (\"Could not append %s attribute\")%(attribute) )\n \n else:\n if keep_unique:\n newattribute = getattr(ps1,attribute)\n newattribute['time'] = ps1['datetime']['data']\n setattr(new_ps, attribute, newattribute)\n else:\n pass\n if keep_unique is False:\n # get rid of attributes which were in ps2 but not in ps1\n for attribute in ps2.__dict__.keys():\n if attribute in ps1.__dict__.keys():\n pass\n else:\n delattr(new_ps, attribute)\n \n \n else:\n # ps2 starts after ps1 ends\n new_ps = copy.deepcopy(ps1)\n for attribute in ps2.__dict__.keys():\n if attribute in ps1.__dict__.keys():\n afield = getattr(new_ps, attribute)\n if attribute == 'diameter':\n st11, st12, st21, st22, diamlist = check_diameters(ps1.diameter['data'], ps2.diameter['data'], ps1.instrument_type)\n for var in new_ps.data['variables']:\n if fill_time is True:\n add = np.ma.zeros((ps1.data[var]['data'].shape[0],len(date_list))) \n add[:] = np.nan\n newdata = np.append(ps1.data[var]['data'],add,axis=1)\n ps1.data[var]['data'] = newdata\n \n sh1 = ps1.data[var]['data'].shape\n sh2 = ps2.data[var]['data'].shape\n newfields = (len(diamlist) ,sh1[1] + sh2[1])\n new_field = np.ma.zeros(newfields)\n new_field[:] = np.ma.masked\n \n new_field[st11:st12, 0:ps1.data[var]['data'][:,:cut].shape[1]] = ps1.data[var]['data'][:,:cut]\n new_field[st21:st22, ps1.data[var]['data'][:,:cut].shape[1]:] = ps2.data[var]['data']\n \n new_ps.data[var]['data'] = new_field\n \n afield['data'] = diamlist\n \n elif attribute == 'data':\n # data has been appended with diameters\n pass\n else:\n try:\n field_ps2 = getattr(ps2, attribute)\n field_ps1 = getattr(ps1, attribute)\n except TypeError:\n if attribute == 'header':\n pass\n else:\n if message:\n print( (\"Could not append %s attribute\")%(attribute) )\n try:\n data_ps2 = field_ps2['data']\n data_ps1 = field_ps1['data']\n if attribute in ['date', 'datetime', 'time']: # these have already been extended with the correct data\n afield['data'] = np.append(data_ps1[:cut], data_ps2)\n elif fill_time:\n add = np.ma.zeros(len(date_list))\n add[:] = np.nan\n afield['data'] = np.append(np.append(data_ps1[:cut],add), data_ps2)\n else:\n afield['data'] = np.append(data_ps1[:cut], data_ps2)\n except:\n if message:\n print( (\"Could not append %s attribute\")%(attribute) )\n \n else:\n if keep_unique:\n newattribute = getattr(ps2,attribute)\n newattribute['time'] = ps2['datetime']['data']\n setattr(new_ps, attribute,newattribute)\n else:\n pass\n if keep_unique is False:\n # get rid of attributes which were in ps2 but not in ps1\n for attribute in ps1.__dict__.keys():\n if attribute in ps2.__dict__.keys():\n pass\n else:\n delattr(new_ps, attribute)\n \n new_ps.sample['data'] = np.arange(1.0, len(new_ps.datetime['data'])+1)\n new_ps.instrument_type = ps1.instrument_type.split('_')[0] + '_concatenated'\n \n if message:\n print('filltime: ', fill_time)\n \n return new_ps", "def _periodically_create_records(self):\n # WINNERS holds the members that have 'won' this cycle\n winners = set()\n\n while True:\n now = time()\n start_climb = int(now / CYCLE_SIZE) * CYCLE_SIZE\n start_create = start_climb + CYCLE_SIZE * 0.5\n start_idle = start_climb + CYCLE_SIZE * 0.9\n start_next = start_climb + CYCLE_SIZE\n\n if start_climb <= now < start_create:\n yield start_create - now\n\n elif start_create <= now < start_idle and len(winners) < self._signature_count:\n logger.debug(\"c%d record creation phase. wait %.2f seconds until record creation\", int(now / CYCLE_SIZE), CYCLE_SIZE * 0.4 / self._signature_count)\n yield (CYCLE_SIZE * 0.4 / self._signature_count) * pythonrandlib.random()\n\n # find the best candidate for this cycle\n score = 0\n winner = None\n for member in self._slope.iterkeys():\n book = self.get_book(member)\n if book.score > score and not member in winners:\n winner = member\n\n if winner:\n logger.debug(\"c%d attempt record creation %s\", int(now / CYCLE_SIZE), winner.mid.encode(\"HEX\"))\n record_candidate = self._slope[winner]\n\n # prevent this winner to 'win' again in this cycle\n winners.add(winner)\n\n # # TODO: this may be and invalid assumption\n # # assume that the peer is online\n # record_candidate.history.set(now)\n\n self._dispersy.callback.unregister(record_candidate.callback_id)\n self.create_barter_record(record_candidate.candidate, winner)\n\n else:\n logger.debug(\"c%d no peers available for record creation (%d peers on slope)\", int(now / CYCLE_SIZE), len(self._slope))\n\n else:\n logger.debug(\"c%d second climbing phase. wait %d seconds until the next phase\", int(now / CYCLE_SIZE), start_next - now)\n assert now >= start_idle or len(winners) >= self._signature_count\n for record_candidate in self._slope.itervalues():\n self._dispersy.callback.unregister(record_candidate.callback_id)\n self._slope = {}\n winners = set()\n yield start_next - now", "def two_in_one(obs_file,et,subevent):\r\n \r\n #in this function, the \"original time window\" talked about in the comments\r\n #refers to the start and end times that were input to create the file obs_file,\r\n #which will likely have been created using the database_extraction function\r\n \r\n #opening first output file created by operational_sep_quantities\r\n with open(obs_file, 'r') as o:\r\n out = js.load(o)\r\n \r\n #all events recorded in that output file\r\n ongoing_events = (out['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'])\r\n \r\n #creating lists for values from each event\r\n end_times = [] \r\n start_times = []\r\n energy_thresholds = []\r\n flux_thresholds = []\r\n out_names = []\r\n \r\n #appending values to lists for each event\r\n for i in range(len(ongoing_events)):\r\n start_times.append(parse(ongoing_events[i]['start_time']))\r\n end_times.append(parse(ongoing_events[i]['end_time']))\r\n energy_thresholds.append(ongoing_events[i]['energy_min'])\r\n flux_thresholds.append(float(ongoing_events[i]['threshold']))\r\n \r\n #checking if there was a second event for each threshold\r\n for i in range(len(end_times)):\r\n end = end_times[i]\r\n #if the end time of an event for any threshold was a day before the last day\r\n #in the original time window given, will check if ONLY THAT THRESHOLD\r\n #had another event after the first one, using the end time of the first\r\n #event of that threshold as the new start time of the event window\r\n if end.date() < et.date():\r\n print('end time to use as new start time: %s' %end)\r\n #figuring out which threshold this end time was for\r\n flux_thresh = int(flux_thresholds[i])\r\n energy_thresh = int(energy_thresholds[i])\r\n print('extracting second event for threshold ' + str(flux_thresh) + ' MeV '\r\n + str(energy_thresh) + ' pfu')\r\n #new start time (2 days in advance bc the database_extraction function\r\n #makes the start time 2 days prior, so will cancel that out)\r\n st = end + timedelta(days=2)\r\n #thresholds in correct format\r\n thresholds = str(energy_thresh) + ',' + str(flux_thresh)\r\n print('thresholds: %s' %thresholds)\r\n #creating observation data for second event for thresholds given\r\n out_names.append(Path(cfg.obs_path) /\r\n database_extraction(st,et,instrument_chosen,subevent,\r\n thresholds = thresholds,\r\n one_thresh = True))\r\n \r\n #returns list of all new files created by this function\r\n return(out_names)", "def merge_logs(self):\n ourlog = LogData()\n for l in self.data_set:\n ourlog.entries = ourlog.entries + l.entries\n ourlog.sort_time()\n self.finalized_data = ourlog", "def merge_all_data(self):\n \n logging.info('***** Starting the merging process ')\n\n \n \"\"\" All possible unqiue_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n \n date_times = np.array(date_times) \n \n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_merged_obs , all_merged_head, all_merged_fb , merged_indices , merged_date_time, mi= [] , [] , [] , [] , [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #for dt in date_times[3008:3100]: # loop over all the possible date_times \n \n tot = len(date_times)\n for dt, c in zip(date_times[3008:3100], range(tot) ): # loop over all the possible date_times \n #print('Analize : ', str(c) , '/', str(tot) , ' ', dt , ' ', now(time.time()) )\n \n logging.info('Analize : %s %s /', str(c) , str(tot) )\n \n cleaned_df_container = {} \n chunk = ''\n \n for k in self.dataset_per_dt[dt] : # checking the list of available datasets \n \n index, index_up = self.unique_dates[k]['indices'][dt]['low'] , self.unique_dates[k]['indices'][dt]['up'] # extracting the exact chunk of the dataframe where the data of this are stored \n \n chunk = self.data[k]['dataframe'].iloc[index:index_up]\n \n chunk['date_time'] = dt\n chunk = self.clean_dataframe(chunk) # cleaning from wrong or nan values \n \n if len(chunk)==0:\n continue\n \n cleaned_df_container[k] = {} \n cleaned_df_container[k]['df'] = chunk # cleaned dataframe \n\n \n if all(value == 0 for value in cleaned_df_container.values()):\n logging.debug('No data were found! ')\n continue\n \n merged_observations_table, best_ds, duplicates, header = self.merge_record(dt, container = cleaned_df_container)\n \n merged_observations_table['source_id'] = best_ds # adding extra columns i.e. chosen dataset, other dataset with data, number of pressure levels \n merged_observations_table['z_coordinate_type'] = 1 # only pressure inn [Pa] available at the moment. Check z_coordinate_type table for the correpsonding code \n \n \n \"\"\" Extracting the merged feedback, flagging the advanced_observations_feedback flag = 1\"\"\"\n feedback, merged_obs = self.get_reanalysis_feedback( dt, merged_observations_table , reanalysis='era5fb', best_ds= best_ds)\n all_merged_fb.append(feedback) \n all_merged_obs.append(merged_obs)\n \n \"\"\" Setting the correct report_id in the header table \"\"\"\n merged_report_id = merged_obs['report_id'].values[0] # same report_id as calculated in the observation_table \n header['report_id'] = merged_report_id \n all_merged_head.append(header)\n \n #if len(merged_observations_table) != len(header): \n #print('lengths check best ds: ', best_ds , ' obs_merged: ' , len(merged_observations_table), ' feedback:' , len(feedback) , ' header: ' , len(header) )\n #print( len(merged_observations_table), ' ' , len(feedback) )\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n merged_indices.append(len(merged_observations_table)) \n merged_date_time.append(dt)\n\n\n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n merged_date_time = np.array(merged_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : merged_date_time.shape } , merged_date_time )\n \n \n \"\"\" Creating the merged indices \"\"\"\n mi.append(0)\n for i,ind in zip(merged_indices[0:], range(len(merged_indices[0:]) ) ) :\n mi.append(mi[ind] + i )\n mi = np.array(mi) \n di['recordindex'] = ( {'recordindex' : mi.shape } , mi )\n self.MergedRecordIndex = di \n \n \n \"\"\" Creating the merged dataframes \"\"\"\n logging.debug('*** Concatenating the observations_table dataframes' ) \n merged_obs = pd.concat (all_merged_obs)\n \n self.MergedObs = merged_obs \n logging.debug('*** Finished concatenating theobservations_table dataframes' ) \n \n logging.debug('*** Concatenating the header_table dataframes' ) \n merged_hd = pd.concat (all_merged_head)\n self.MergedHead = merged_hd \n logging.debug('*** Finished concatenating the header_table dataframes' ) \n \n logging.debug('*** Concatenating the feedback dataframes' ) \n merged_fb = pd.concat (all_merged_fb)\n self.MergedFeedback = merged_fb \n logging.debug('*** Finished concatenating the feedback dataframes' ) \n\n return 0", "def _fill_day_dicts(self):\n today = datetime.date.today()\n for i, record in enumerate(self._dataset):\n if (record[\"createdAt\"] / 1000) > time.mktime((today - datetime.timedelta(days=30)).timetuple()):\n self._add_record(self._all30_dict, record, key=i)\n\n elif (record[\"createdAt\"] / 1000) > time.mktime((today - datetime.timedelta(days=60)).timetuple()):\n self._add_record(self._all60_dict, record, key=i)\n\n else:\n self._add_record(self._all90_dict, record, key=i)", "def merge(self, other, gap_method=\"slinear\", new_sample_rate=None):\n if new_sample_rate is not None:\n merge_sample_rate = new_sample_rate\n combine_list = [self.decimate(new_sample_rate).dataset]\n else:\n merge_sample_rate = self.sample_rate\n combine_list = [self.dataset]\n\n ts_filters = self.filters\n if isinstance(other, (list, tuple)):\n for run in other:\n if not isinstance(run, RunTS):\n raise TypeError(f\"Cannot combine {type(run)} with RunTS.\")\n\n if new_sample_rate is not None:\n run = run.decimate(new_sample_rate)\n combine_list.append(run.dataset)\n ts_filters.update(run.filters)\n else:\n if not isinstance(other, RunTS):\n raise TypeError(f\"Cannot combine {type(other)} with RunTS.\")\n\n if new_sample_rate is not None:\n other = other.decimate(new_sample_rate)\n combine_list.append(other.dataset)\n ts_filters.update(other.filters)\n\n # combine into a data set use override to keep attrs from original\n\n combined_ds = xr.combine_by_coords(\n combine_list, combine_attrs=\"override\"\n )\n\n n_samples = (\n merge_sample_rate\n * float(\n combined_ds.time.max().values - combined_ds.time.min().values\n )\n / 1e9\n ) + 1\n\n new_dt_index = make_dt_coordinates(\n combined_ds.time.min().values,\n merge_sample_rate,\n n_samples,\n self.logger,\n )\n\n run_metadata = self.run_metadata.copy()\n run_metadata.sample_rate = merge_sample_rate\n\n new_run = RunTS(\n run_metadata=self.run_metadata,\n station_metadata=self.station_metadata,\n survey_metadata=self.survey_metadata,\n )\n\n ## tried reindex then interpolate_na, but that has issues if the\n ## intial time index does not exactly match up with the new time index\n ## and then get a bunch of Nan, unless use nearest or pad, but then\n ## gaps are not filled correctly, just do a interp seems easier.\n new_run.dataset = combined_ds.interp(\n time=new_dt_index, method=gap_method\n )\n\n # update channel attributes\n for ch in new_run.channels:\n new_run.dataset[ch].attrs[\"time_period.start\"] = new_run.start\n new_run.dataset[ch].attrs[\"time_period.end\"] = new_run.end\n\n new_run.run_metadata.update_time_period()\n new_run.station_metadata.update_time_period()\n new_run.survey_metadata.update_time_period()\n new_run.filters = ts_filters\n\n return new_run", "def get_data(self):\n# epoch_from = 1301641200\n# epoch_to = epoch_from+60*60*24\n \"\"\"\n letting runs finish for 2 more hours\n ideally, want to make this a function of time from schedule plus some\n variation, like 1 hour just in case\n \"\"\" \n# epoch_to_adjusted = epoch_to + 7200\n conn = self.connect_to_mongo()\n db = conn.muni\n \n# print \"==== Collecting starting runs from %s to %s ====\"\\\n# % (str(time.ctime(epoch_from)), str(time.ctime(epoch_to)))\n \"\"\"\n > db.location.find({loc:{$within:{$center:[[37.80241, -122.4364],\n 0.01]}}})\n > db.location.find({loc:{$within:{$center:[[37.76048, -122.38895],\n 0.002]}}})\n \"\"\"\n bus_ids = db.location.find({'route':self.route_name}).distinct(\"bus_id\")\n for bus_id in bus_ids:\n c_start = db.location.find({\"bus_id\":bus_id,\n \"loc\":{\"$within\":{\"$center\":[[self.start_lat, self.start_lon],\n self.start_prec]}}\n }).sort(\"cur_time\", DESCENDING)\n self.massage_start_data(c_start)\n \"\"\"\n TODO: the end point seems to be too nice to Muni, need to tighten\n the circle a little\n \"\"\"\n c_end = db.location.find({\"bus_id\":bus_id,\n \"loc\":{\"$within\":{\"$center\":[[self.end_lat, self.end_lon],\n self.end_prec]}}\n }).sort(\"cur_time\", ASCENDING)\n self.massage_end_data(c_end)\n if self.to_log:\n print self.start_bus_ids_to_times\n print self.end_bus_ids_to_times\n \n return self.start_bus_ids_to_times, self.end_bus_ids_to_times", "def generate_record(self, data_dictionaries, group_by):\n result = {}\n\n for one_measurement in data_dictionaries:\n time = one_measurement['datetime']\n\n if isinstance(time, str):\n if self.timezone:\n time = arrow.get(time).shift(hours=6) # TODO: fix utc conversion\n else:\n time = arrow.get(time)\n\n record = Record(self.name, self.lat, self.lon, self.height, time)\n\n del one_measurement['datetime']\n\n one_measurement = {k: float(v) for k, v in one_measurement.items()}\n\n record.merge(one_measurement)\n\n key = group_by(time)\n \n if key == '2016-04-01_00':\n break\n\n record_string = record.little_r_report()\n\n try:\n result[key].append(record_string)\n except KeyError:\n result[key] = [record_string]\n\n return result", "def extract_tt_by_periods(ttri, periods, start_time, end_time, filters):\n logger = getLogger(__name__)\n # sess = conn.get_session()\n das = {}\n all_wz_features = {}\n all_wz_laneconfigs = {}\n\n # collecting daily data\n for prd in periods:\n logger.debug('>>>> retrieving data for %s' % prd.get_date_string())\n year = prd.start_date.year\n sdate = prd.start_date\n edate = prd.end_date\n if year not in das:\n da_tt = tt.TravelTimeDataAccess(year)\n da_tt_wz = tt_workzone.TTWorkZoneDataAccess(year)\n da_tt_wz_feature = wz_feature.WZFeatureDataAccess()\n da_tt_wz_lncfg = wz_laneconfig.WZLaneConfigDataAccess()\n da_tt_weather = tt_weather.TTWeatherDataAccess(year)\n da_tt_snowmgmt = tt_snowmgmt.TTSnowManagementDataAccess(year)\n da_tt_incident = tt_incident.TTIncidentDataAccess(year)\n da_tt_specialevent = tt_specialevent.TTSpecialeventDataAccess(year)\n das[year] = (\n da_tt, da_tt_wz, da_tt_wz_feature, da_tt_wz_lncfg, da_tt_weather, da_tt_snowmgmt, da_tt_incident,\n da_tt_specialevent)\n\n (da_tt, da_tt_wz, da_tt_wz_feature, da_tt_wz_lncfg, da_tt_weather, da_tt_snowmgmt, da_tt_incident,\n da_tt_specialevent) = das[year]\n\n # traveltimes = da_tt.list_by_period(ttri.id, self.prd)\n weathers = da_tt_weather.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.WeatherInfo] \"\"\"\n workzones = da_tt_wz.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.WorkZoneInfo] \"\"\"\n incidents = da_tt_incident.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.IncidentInfo] \"\"\"\n snowmgmts = da_tt_snowmgmt.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.SnowManagementInfo] \"\"\"\n specialevents = da_tt_specialevent.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.SpecialEventInfo] \"\"\"\n traveltimes = da_tt.list_by_period(ttri.id, prd)\n \"\"\":type: list[pyticas_tetres.ttrms_types.TravelTimeInfo] \"\"\"\n\n if not any(weathers):\n logger.debug('>>>> end of retrieving data for %s (no weather data)' % prd.get_date_string())\n continue\n\n extras = {\n 'weathers': {_tt.id: [] for _tt in traveltimes},\n 'workzones': {_tt.id: [] for _tt in traveltimes},\n 'incidents': {_tt.id: [] for _tt in traveltimes},\n 'specialevents': {_tt.id: [] for _tt in traveltimes},\n 'snowmgmts': {_tt.id: [] for _tt in traveltimes},\n }\n \"\"\":type: dict[str, dict[int, list]]\"\"\"\n\n _put_to_bucket(ttri, weathers, extras['weathers'], da_tt_weather, year, all_wz_features, all_wz_laneconfigs, das)\n _put_to_bucket(ttri, workzones, extras['workzones'], da_tt_wz, year, all_wz_features, all_wz_laneconfigs, das)\n _put_to_bucket(ttri, incidents, extras['incidents'], da_tt_incident, year, all_wz_features, all_wz_laneconfigs, das)\n _put_to_bucket(ttri, snowmgmts, extras['snowmgmts'], da_tt_snowmgmt, year, all_wz_features, all_wz_laneconfigs, das)\n _put_to_bucket(ttri, specialevents, extras['specialevents'], da_tt_specialevent, year, all_wz_features, all_wz_laneconfigs, das)\n\n for tti in traveltimes:\n _tt_weathers = extras['weathers'][tti.id]\n extdata = ExtData(tti,\n _tt_weathers[0] if _tt_weathers else None,\n extras['incidents'][tti.id],\n extras['workzones'][tti.id],\n extras['specialevents'][tti.id],\n extras['snowmgmts'][tti.id])\n\n if start_time <= tti.str2datetime(tti.time).time() <= end_time:\n for ef in filters:\n try:\n ef.check(extdata)\n except Exception as ex:\n tb.traceback(ex)\n logger.debug('>>>> end of retrieving data for %s (error occured 1)' % prd.get_date_string())\n continue\n else:\n for ef in filters:\n try:\n ef.check_outofrange(extdata)\n except Exception as ex:\n tb.traceback(ex)\n logger.debug('>>>> end of retrieving data for %s (error occured 2)' % prd.get_date_string())\n continue\n\n del extras\n logger.debug('>>>> end of retrieving data for %s' % prd.get_date_string())\n\n # sess.close()", "async def fetch_all_periods_raw(self):\n self._logger.info(\"Fetching current period data\")\n await self._client.select_customer(self.account_id, self.customer_id)\n\n params = {'idContrat': '0' + self.contract_id}\n res = await self._client.http_request(CONTRACT_CURRENT_URL_3, \"get\", params=params)\n text_res = await res.text()\n\n headers = {\"Content-Type\": \"application/json\"}\n res = await self._client.http_request(CONTRACT_CURRENT_URL_2, \"get\", headers=headers)\n text_res = await res.text()\n # We can not use res.json() because the response header are not application/json\n json_res = json.loads(text_res)['results']\n\n self._all_periods_raw = json_res", "def aggregate_local_and_hist_klines(self, symbol, intervals): \n result = {}\n\n for interval in intervals:\n filename = PATH_HIST_KLINES[interval]\n file_klines = pd.DataFrame()\n\n request_begin = strat_begin = self.start_ts - 300000\n write_mode = 'w'\n\n try:\n file_klines = pd.read_csv(filename, index_col=0, names = [\"Open\",\"High\",\"Low\",\"Close\",\"Volume\",\"TurnOver\",\"Date\"])\n file_klines.loc[:,'Date'] = [datetime.fromtimestamp(i).strftime('%Y-%m-%d %H:%M:%S.%d')[:-3] for i in file_klines.index]\n newest = file_klines.tail(1).index[0]\n oldest = file_klines.head(1).index[0]\n if oldest - strat_begin > interval_bybit_notation(interval) * 60:\n request_begin = strat_begin\n write_mode = 'w'\n file_klines = pd.DataFrame()\n else:\n request_begin = int(newest) + interval_bybit_notation(interval) * 60\n write_mode = 'a'\n except FileNotFoundError as err:\n pass\n\n bybit_klines = pd.DataFrame()\n if request_begin < int(datetime.now().timestamp()):\n output_data = self.bybit.get_hist_klines(symbol, interval_bybit_notation(interval), str(request_begin))\n column_data = [i[1:] for i in output_data]\n index = [int(i[0]) for i in output_data]\n # convert to data frame\n bybit_klines = pd.DataFrame(column_data, index = index, columns=['Open', 'High', 'Low', 'Close', 'Volume', 'TurnOver'])\n bybit_klines.loc[:,'Date'] = [datetime.fromtimestamp(i).strftime('%Y-%m-%d %H:%M:%S.%d')[:-3] for i in bybit_klines.index]\n\n bybit_klines.to_csv(filename, header = False, mode=write_mode)\n\n result[interval] = pd.concat([file_klines, bybit_klines]) if not len(bybit_klines.index) == 0 else file_klines\n\n return result", "def records(self, current=False):\n now = datetime.now()\n format = '%Y/%m/%d %H:%M:%S'\n for key in self.keys:\n contents = key.get_contents_as_string()\n (headers, contents) = self._parse_contents(contents)\n # Ignore the first line and any row that is not of type\n # `PayerLineItem`.\n for content in contents[1:]:\n content = dict(itertools.izip(headers, content))\n if content.get('RecordType') != 'PayerLineItem':\n continue\n start_date = datetime.strptime(\n content['BillingPeriodStartDate'], format\n )\n end_date = datetime.strptime(\n content['BillingPeriodEndDate'], format\n )\n if current:\n if start_date <= now <= end_date:\n yield content\n else:\n if start_date <= end_date <= now:\n yield content", "def get_all_periods(self, df):\n df_append = pd.DataFrame()\n for index, element in enumerate(self.periods):\n df_temp = self.get_period(df, element)\n df_append = df_append.append(df_temp)\n return(df_append.sort_index())", "def get_data(self, date_time):\n id_columns = ','.join([col for col in self.table_primary_keys if col not in ['EFFECTIVEDATE', 'VERSIONNO']])\n return_columns = ','.join(self.table_columns)\n with self.con:\n cur = self.con.cursor()\n cur.execute(\"DROP TABLE IF EXISTS temp;\")\n cur.execute(\"DROP TABLE IF EXISTS temp2;\")\n cur.execute(\"DROP TABLE IF EXISTS temp3;\")\n cur.execute(\"DROP TABLE IF EXISTS temp4;\")\n # Store just the unique sets of ids that came into effect before the the datetime in a temporary table.\n query = \"\"\"CREATE TEMPORARY TABLE temp AS \n SELECT * \n FROM {table} \n WHERE EFFECTIVEDATE <= '{datetime}';\"\"\"\n cur.execute(query.format(table=self.table_name, datetime=date_time))\n # For each unique set of ids and effective dates get the latest versionno and sore in temporary table.\n query = \"\"\"CREATE TEMPORARY TABLE temp2 AS\n SELECT {id}, EFFECTIVEDATE, MAX(VERSIONNO) AS VERSIONNO\n FROM temp\n GROUP BY {id}, EFFECTIVEDATE;\"\"\"\n cur.execute(query.format(id=id_columns))\n # For each unique set of ids get the record with the most recent effective date.\n query = \"\"\"CREATE TEMPORARY TABLE temp3 as\n SELECT {id}, VERSIONNO, max(EFFECTIVEDATE) as EFFECTIVEDATE\n FROM temp2\n GROUP BY {id};\"\"\"\n cur.execute(query.format(id=id_columns))\n # Inner join the original table to the set of most recent effective dates and version no.\n query = \"\"\"CREATE TEMPORARY TABLE temp4 AS\n SELECT * \n FROM {table} \n INNER JOIN temp3 \n USING ({id}, VERSIONNO, EFFECTIVEDATE);\"\"\"\n cur.execute(query.format(table=self.table_name, id=id_columns))\n # Inner join the most recent data with the interconnectors used in the actual interval of interest.\n query = \"\"\"SELECT {cols} FROM temp4 ;\"\"\"\n query = query.format(cols=return_columns)\n data = pd.read_sql_query(query, con=self.con)\n return data", "def combine_dict(self, dict2):\n # iterate through smaller data set\n # base_set will be the larger set and is used for updating\n if len(self.content[\"values\"]) > len(dict2[\"values\"]):\n large_set = self.content[\"values\"]\n small_set = dict2[\"values\"]\n base_set = self.content\n else:\n small_set = self.content[\"values\"]\n large_set = dict2[\"values\"]\n base_set = dict2\n\n subset = {}\n for key in small_set.keys():\n # determine wether to compare keys\n if key in large_set:\n updated_l = large_set[key][\"updated_at\"]\n updated_s = small_set[key][\"updated_at\"]\n if updated_l == 'NULL':\n if updated_s != 'NULL':\n # update to not NULL set\n # if both updated_at are NULL, things\n # are ambiguos. We could defer to created_at\n # but for simplicity we will default to\n # the values in the larger set\n subset[key] = small_set[key]\n else:\n if updated_s == 'NULL':\n # update to not NULL set\n subset[key] = large_set[key]\n else:\n if updated_l > updated_s:\n subset[key] = large_set[key]\n else:\n subset[key] =small_set[key]\n else:\n subset[key] = small_set[key]\n base_set[\"values\"].update(subset)\n new_obj = BackupData()\n new_obj.load_from_dict(base_set)\n return new_obj", "def extract(self, extract_from, extract_to):\n # Some API calls do not expect a TZ, so we have to remove the timezone\n # from the dates. We assume that all dates coming from upstream are\n # in UTC TZ.\n extract_from = extract_from.replace(tzinfo=None)\n extract_to = extract_to.replace(tzinfo=None)\n\n # Our records\n self.records = {}\n self.acc_records = {}\n\n # We cannot use just 'changes-since' in the servers.list() API query,\n # as it will only include servers that have changed its status after\n # that date. However we cannot just get all the usages and then query\n # server by server, as deleted servers are not returned by the usages\n # call. Moreover, Nova resets the start_time after performing some\n # actions on the server (rebuild, resize, rescue). If we use that time,\n # we may get a drop in the wall time, as a server that has been resized\n # in the middle of its lifetime will suddenly change its start_time\n #\n # Therefore, what we do is the following (hackish approach)\n #\n # 1.- List all the servers that changed its status after the start time\n # for the reporting period\n # 2.- Build the records for the period [start, end] using those servers\n # 3.- Get all the usages, being aware that the start time may be wrong\n # 4.- Iter over the usages and:\n # 4.1.- get information for servers that are not returned by the query\n # in (1), for instance servers that have not changed it status.\n # We build then the records for those severs\n # 4.2.- For all the servers, adjust the CPU, memory and disk resources\n # as the flavor may not exist, but we can get those resources\n # from the usages API.\n\n # Lets start\n\n # 1.- List all the deleted servers from that period.\n servers = self._get_servers(extract_from)\n # 2.- Build the records for the period. Drop servers outside the period\n # (we do this manually as we cannot limit the query to a period, only\n # changes after start date).\n self._process_servers_for_period(servers, extract_from, extract_to)\n\n # 3.- Get all the usages for the period\n usages = self._get_usages(extract_from, extract_to)\n # 4.- Iter over the results and\n # This one will also generate accelerator records if GPU flavors\n # are found.\n self._process_usages_for_period(usages, extract_from, extract_to)\n\n return list(self.records.values()) + list(self.acc_records.values())", "def compare_results_data(result1, result2):\n def average(r1, r2, delta):\n return (r2 - r1) / delta\n\n if (result1 and result2) and (len(result1) and len(result2)):\n results = result1.copy()\n\n time_delta = round(float(result2['timestamp']) - float(result1['timestamp']))\n results['time_delta'] = time_delta\n\n for key in result1['data'].keys():\n if key in result2['data']:\n results['data'][key]['value'] = round(\n average(float(result1['data'][key]['value']), float(result2['data'][key]['value']), time_delta),\n 2)\n\n results['timestamp'] = time.time()\n\n return results\n \n return {}", "def since(self, ts):\n while True:\n items = super(TailingOplog, self).since(ts)\n for doc in items:\n yield doc\n ts = doc['ts']", "def calculate_new_rating_period(start_datetime, end_datetime):\n # Create the rating period\n rating_period = models.RatingPeriod.objects.create(\n start_datetime=start_datetime, end_datetime=end_datetime\n )\n\n # Grab all games that will be in this rating period\n games = models.Game.objects.filter(\n datetime_played__gte=start_datetime, datetime_played__lte=end_datetime\n )\n\n # Mark all of the above games as belonging in this rating period\n for game in games:\n game.rating_period = rating_period\n game.save()\n\n # For each player, find all their matches, their scores in those\n # matches; then calculate their ratings. The new_ratings dictionary\n # contains players as keys, and dictionaries containing their new\n # rating parameters as the dictionary values.\n new_ratings = {}\n\n for player in models.Player.objects.all():\n # Don't calculate anything if they player's first game is prior\n # to this rating period\n first_game_played = player.get_first_game_played()\n\n if (\n first_game_played is None\n or first_game_played.datetime_played > end_datetime\n ):\n continue\n\n # Get the players rating parameters\n player_rating = player.rating\n player_rating_deviation = player.rating_deviation\n player_rating_volatility = player.rating_volatility\n player_inactivity = player.inactivity\n\n # Build up the per-game rating parameters of opponents\n opponent_ratings = []\n opponent_rating_deviations = []\n scores = []\n\n for won_game in games.filter(winner=player):\n opponent_ratings.append(won_game.loser.rating)\n opponent_rating_deviations.append(won_game.loser.rating_deviation)\n scores.append(1)\n\n for lost_game in games.filter(loser=player):\n opponent_ratings.append(lost_game.winner.rating)\n opponent_rating_deviations.append(\n lost_game.winner.rating_deviation\n )\n scores.append(0)\n\n # Convert empty lists (meaning no matches) to None types\n if not opponent_ratings:\n opponent_ratings = None\n opponent_rating_deviations = None\n scores = None\n\n new_player_rating, new_player_rating_deviation, new_player_rating_volatility = calculate_player_rating(\n r=player_rating,\n RD=player_rating_deviation,\n sigma=player_rating_volatility,\n opponent_rs=opponent_ratings,\n opponent_RDs=opponent_rating_deviations,\n scores=scores,\n )\n\n # Calculate new inactivity\n if opponent_ratings is None:\n new_player_inactivity = player_inactivity + 1\n else:\n new_player_inactivity = 0\n\n # Determine if the player is labelled as active\n new_player_is_active = bool(\n new_player_inactivity\n < settings.NUMBER_OF_RATING_PERIODS_MISSED_TO_BE_INACTIVE\n )\n\n new_ratings[player] = {\n \"player_ranking\": None,\n \"player_ranking_delta\": None,\n \"player_rating\": new_player_rating,\n \"player_rating_deviation\": new_player_rating_deviation,\n \"player_rating_volatility\": new_player_rating_volatility,\n \"player_inactivity\": new_player_inactivity,\n \"player_is_active\": new_player_is_active,\n }\n\n # Filter all active players and sort by rating\n new_active_player_ratings = [\n (player, new_rating[\"player_rating\"])\n for player, new_rating in new_ratings.items()\n if new_rating[\"player_is_active\"]\n ]\n new_active_player_ratings.sort(key=lambda x: x[1], reverse=True)\n\n # Form a tuple of active players where the order is their ranking\n new_active_player_rankings = [\n player for player, _ in new_active_player_ratings\n ]\n\n # Process new rankings and ranking changes\n num_active_players = len(new_active_player_rankings)\n\n for ranking, player in enumerate(new_active_player_rankings, 1):\n # Ranking\n new_ratings[player][\"player_ranking\"] = ranking\n\n # Ranking delta\n if player.ranking is None:\n new_ratings[player][\"player_ranking_delta\"] = (\n num_active_players - ranking + 1\n )\n else:\n new_ratings[player][\"player_ranking_delta\"] = (\n player.ranking - ranking\n )\n\n # Now save all ratings\n for player, ratings_dict in new_ratings.items():\n models.PlayerRatingNode.objects.create(\n player=player,\n rating_period=rating_period,\n ranking=ratings_dict[\"player_ranking\"],\n ranking_delta=ratings_dict[\"player_ranking_delta\"],\n rating=ratings_dict[\"player_rating\"],\n rating_deviation=ratings_dict[\"player_rating_deviation\"],\n rating_volatility=ratings_dict[\"player_rating_volatility\"],\n inactivity=ratings_dict[\"player_inactivity\"],\n is_active=ratings_dict[\"player_is_active\"],\n )", "def example_staypoints_merge():\n p1 = Point(8.5067847, 47.4)\n\n t1 = pd.Timestamp(\"1971-01-01 00:00:00\", tz=\"utc\")\n t2 = pd.Timestamp(\"1971-01-02 05:00:00\", tz=\"utc\")\n t3 = pd.Timestamp(\"1971-01-02 06:45:00\", tz=\"utc\")\n t4 = pd.Timestamp(\"1971-01-02 08:55:00\", tz=\"utc\")\n t45 = pd.Timestamp(\"1971-01-02 08:57:00\", tz=\"utc\")\n t5 = pd.Timestamp(\"1971-01-02 09:00:00\", tz=\"utc\")\n t6 = pd.Timestamp(\"1971-01-02 09:20:00\", tz=\"utc\")\n\n list_dict = [\n {\"id\": 1, \"user_id\": 0, \"started_at\": t1, \"finished_at\": t2, \"geom\": p1, \"location_id\": 1},\n {\"id\": 5, \"user_id\": 0, \"started_at\": t2, \"finished_at\": t2, \"geom\": p1, \"location_id\": 2},\n {\"id\": 2, \"user_id\": 0, \"started_at\": t3, \"finished_at\": t4, \"geom\": p1, \"location_id\": 2},\n {\"id\": 6, \"user_id\": 0, \"started_at\": t4, \"finished_at\": t45, \"geom\": p1, \"location_id\": 2},\n {\"id\": 15, \"user_id\": 0, \"started_at\": t5, \"finished_at\": t6, \"geom\": p1, \"location_id\": 2},\n {\"id\": 7, \"user_id\": 1, \"started_at\": t3, \"finished_at\": t4, \"geom\": p1, \"location_id\": 2},\n {\"id\": 80, \"user_id\": 1, \"started_at\": t45, \"finished_at\": t5, \"geom\": p1, \"location_id\": 2},\n {\"id\": 3, \"user_id\": 1, \"started_at\": t5, \"finished_at\": t6, \"geom\": p1, \"location_id\": 4},\n ]\n sp = gpd.GeoDataFrame(data=list_dict, geometry=\"geom\", crs=\"EPSG:4326\")\n sp = sp.set_index(\"id\")\n sp.as_staypoints\n\n # generate empty triplegs for the merge function\n tpls = pd.DataFrame([], columns=[\"user_id\", \"started_at\", \"finished_at\"])\n return sp, tpls", "def newChunk(self, data, sample_period):\n added = False\n found = False\n for r in self._receiving:\n if r[0] == data.timestamp:\n r[1][data.sensor - 1] = data\n found = True\n break\n if found is False:\n self._receiving.append((data.timestamp, [None] * self._sensors))\n self._receiving[-1][1][data.sensor - 1] = data\n self._receiving.sort(key=lambda x: x[0])\n\n # all data for given timestamp received\n while len(self._receiving) > 0:\n r = self._receiving[0]\n if r[1].count(None) > 0:\n break\n dl = len(data.accelerationX)\n timestamps = np.arange(r[0], r[0] + sample_period * dl, sample_period)\n\n def copy_data(start, l):\n self.data[\"timestamp\"][\n self.current_index : self.current_index + l\n ] = timestamps[start : start + l]\n for s in range(1, self._sensors + 1):\n self.data[f\"{s} X\"][\n self.current_index : self.current_index + l\n ] = r[1][s - 1].accelerationX[start : start + l]\n self.data[f\"{s} Y\"][\n self.current_index : self.current_index + l\n ] = r[1][s - 1].accelerationY[start : start + l]\n self.data[f\"{s} Z\"][\n self.current_index : self.current_index + l\n ] = r[1][s - 1].accelerationZ[start : start + l]\n\n l = min(dl, self._size - self.current_index)\n if l > 0:\n copy_data(0, l)\n self.current_index += l\n dl -= l\n\n if dl > 0:\n self.current_index = 0\n self.filled = True\n copy_data(l, dl)\n self.current_index = dl\n\n self._receiving.remove(r)\n added = True\n\n chunk_removed = False\n if len(self._receiving) > self._window:\n self._receiving = self._receiving[1:]\n chunk_removed = True\n return (added, chunk_removed)", "def merging_time_periods(time_periods, min_time_between_periods):\n n_periods = len(time_periods)\n merged_time_periods = []\n index = 0\n while index < n_periods:\n time_period = time_periods[index]\n if len(merged_time_periods) == 0:\n merged_time_periods.append([time_period[0], time_period[1]])\n index += 1\n continue\n # we check if the time between both is superior at min_time_between_periods\n last_time_period = time_periods[index - 1]\n beg_time = last_time_period[1]\n end_time = time_period[0]\n if (end_time - beg_time) <= min_time_between_periods:\n # then we merge them\n merged_time_periods[-1][1] = time_period[1]\n index += 1\n continue\n else:\n merged_time_periods.append([time_period[0], time_period[1]])\n index += 1\n return merged_time_periods", "def experiment_periods():\n # Temperature readings start March 22 2004, loads start Feb 01 2004.\n # period1_start = pd.Period(\"2004-02-01 00:00\", \"H\")\n # period1_end = pd.Period(\"2005-07-01 00:00\", \"H\")\n # period2_start = pd.Period(\"2005-10-01 00:00\", \"H\")\n # period2_end = pd.Period(\"2006-10-01 00:00\", \"H\")\n period1_start = datetime.datetime.strptime(\"2004-02-01 00:00\", \"%Y-%m-%d %H:%M\")\n period1_end = datetime.datetime.strptime(\"2005-07-01 00:00\", \"%Y-%m-%d %H:%M\")\n period2_start = datetime.datetime.strptime(\"2005-10-01 00:00\", \"%Y-%m-%d %H:%M\")\n period2_end = datetime.datetime.strptime(\"2006-10-01 00:00\", \"%Y-%m-%d %H:%M\")\n return ((period1_start, period1_end), (period2_start, period2_end))", "def get_timeseries_data(self, table, datetime_start, datetime_end, timechunk=datetime.timedelta(hours=1)):\n table_schema = LMTDB_TABLES.get(table.upper())\n if table_schema is None:\n raise KeyError(\"Table '%s' is not valid\" % table)\n else:\n result_columns = ['TIMESTAMP'] + table_schema['columns']\n format_dict = {\n 'schema': ', '.join(result_columns).replace(\"TS_ID,\", \"TIMESTAMP_INFO.TS_ID,\"),\n 'table': table,\n }\n\n index0 = len(self.saved_results.get(table, {'rows': []})['rows'])\n chunk_start = datetime_start\n while chunk_start < datetime_end:\n if timechunk is None:\n chunk_end = datetime_end\n else:\n chunk_end = chunk_start + timechunk\n if chunk_end > datetime_end:\n chunk_end = datetime_end\n start_stamp = chunk_start.strftime(\"%Y-%m-%d %H:%M:%S\")\n end_stamp = chunk_end.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n query_str = \"\"\"SELECT\n %(schema)s\n FROM\n %(table)s\n INNER JOIN TIMESTAMP_INFO ON TIMESTAMP_INFO.TS_ID = %(table)s.TS_ID\n WHERE\n TIMESTAMP_INFO.TIMESTAMP >= %%(ps)s\n AND TIMESTAMP_INFO.TIMESTAMP < %%(ps)s\n \"\"\" % format_dict\n self.query(query_str, (start_stamp, end_stamp), table=table, table_schema=table_schema)\n if timechunk is not None:\n chunk_start += timechunk\n\n return self.saved_results[table]['rows'][index0:], result_columns", "def get_list_block_time_periods(filename_time_periods):\n\n if not hasattr(get_list_block_time_periods, \"cached\"):\n get_list_block_time_periods.cached = {}\n\n if filename_time_periods not in get_list_block_time_periods.cached:\n with open(filename_time_periods, 'r') as file:\n contents = csv.reader(file)\n next(contents)\n\n time_periods = []\n for row in contents:\n time_beginning = datetime.datetime.strptime(row[0], \"%Y-%m-%d %H:%M:%S\")\n time_ending = datetime.datetime.strptime(row[1], \"%Y-%m-%d %H:%M:%S\")\n\n time_periods.append((time_beginning, time_ending, row[2]))\n #print(\"List of time periods: \", time_periods)\n\n get_list_block_time_periods.cached[filename_time_periods] = time_periods\n\n return get_list_block_time_periods.cached[filename_time_periods]", "def _join_on_millisec(dfs: list):\n # Resample to milliseconds befor joining\n for idx, df in enumerate(dfs):\n df[\"sys_time_dt\"] = pd.to_datetime(df[\"sys_time\"], unit=\"ms\")\n df = df.set_index(\"sys_time_dt\")\n df = df.drop(columns=[\"sys_time\"])\n df = df[~df.index.duplicated(keep=\"last\")] # Remove index dups\n dfs[idx] = df.resample(\"10ms\").interpolate(method=\"time\")\n\n # Join resampled sensor data, drop NaNs, that might be generated for\n # start or end of session, because not all sensors start/end at same time\n df_joined = pd.concat(dfs, axis=1).dropna()\n\n # Add datetimeindex as ms\n df_joined[\"sys_time\"] = df_joined.index.astype(\"int64\") // 10 ** 6\n\n # Reset index to save memory\n df_joined = df_joined.reset_index(drop=True)\n\n return df_joined", "def _add(self, other):\n if isinstance(other, SeqPer):\n per1, lper1 = self.periodical, self.period\n per2, lper2 = other.periodical, other.period\n\n per_length = lcm(lper1, lper2)\n\n new_per = []\n for x in range(per_length):\n ele1 = per1[x % lper1]\n ele2 = per2[x % lper2]\n new_per.append(ele1 + ele2)\n\n start, stop = self._intersect_interval(other)\n return SeqPer(new_per, (self.variables[0], start, stop))", "def _get_unit_records(self, start_time):\r\n\r\n if self.optMTTF.get_active():\r\n _query = \"SELECT t2.fld_unit, t1.fld_incident_id, \\\r\n t1.fld_age_at_incident, t1.fld_failure, \\\r\n t1.fld_suspension, t1.fld_cnd_nff, \\\r\n t1.fld_occ_fault, t1.fld_initial_installation, \\\r\n t1.fld_interval_censored, t2.fld_request_date, \\\r\n t2.fld_hardware_id \\\r\n FROM rtk_incident_detail AS t1 \\\r\n INNER JOIN \\\r\n ( \\\r\n SELECT DISTINCT MIN(fld_unit, fld_request_date), \\\r\n fld_incident_id, fld_request_date, \\\r\n fld_unit, fld_hardware_id \\\r\n FROM rtk_incident \\\r\n GROUP BY fld_unit \\\r\n ) AS t2 \\\r\n ON t2.fld_incident_id=t1.fld_incident_id \\\r\n WHERE t1.fld_age_at_incident >= {0:f} \\\r\n ORDER BY t2.fld_unit ASC, \\\r\n t1.fld_age_at_incident ASC, \\\r\n t2.fld_request_date ASC\".format(start_time)\r\n\r\n elif self.optMTBBD.get_active():\r\n _query = \"SELECT t2.fld_unit, t1.fld_incident_id, \\\r\n t1.fld_age_at_incident, t1.fld_failure, \\\r\n t1.fld_suspension, t1.fld_cnd_nff, \\\r\n t1.fld_occ_fault, t1.fld_initial_installation, \\\r\n t1.fld_interval_censored, t2.fld_request_date, \\\r\n t2.fld_hardware_id \\\r\n FROM rtk_incident_detail AS t1 \\\r\n INNER JOIN \\\r\n ( \\\r\n SELECT fld_incident_id, fld_request_date, fld_unit, \\\r\n fld_hardware_id \\\r\n FROM rtk_incident \\\r\n GROUP BY fld_unit, fld_request_date \\\r\n ) AS t2 \\\r\n ON t2.fld_incident_id=t1.fld_incident_id \\\r\n WHERE t1.fld_age_at_incident >= {0:f} \\\r\n GROUP BY t2.fld_unit, t1.fld_age_at_incident \\\r\n ORDER BY t2.fld_unit ASC, \\\r\n t1.fld_age_at_incident ASC, \\\r\n t2.fld_request_date ASC\".format(start_time)\r\n\r\n elif self.optMTBF.get_active():\r\n _query = \"SELECT t2.fld_unit, t1.fld_incident_id, \\\r\n t1.fld_age_at_incident, t1.fld_failure, \\\r\n t1.fld_suspension, t1.fld_cnd_nff, \\\r\n t1.fld_occ_fault, t1.fld_initial_installation, \\\r\n t1.fld_interval_censored, t2.fld_request_date, \\\r\n t2.fld_hardware_id \\\r\n FROM rtk_incident_detail AS t1 \\\r\n INNER JOIN rtk_incident AS t2 \\\r\n ON t2.fld_incident_id=t1.fld_incident_id \\\r\n WHERE t1.fld_age_at_incident >= {0:f} \\\r\n ORDER BY t2.fld_unit ASC, \\\r\n t1.fld_age_at_incident ASC, \\\r\n t2.fld_request_date ASC\".format(start_time)\r\n\r\n (_results, _error_code, __) = self._dao.execute(_query, commit=False)\r\n\r\n return(_results, _error_code)", "def load_obstab_feedback_sliced(self, dataset='' , file ='' , datetime='' ):\n k = dataset \n F = file \n dt = datetime\n \n if dt != self.unique_dates[k][F]['up_to_dt_slice']:\n print(\"Error! the dit does not correspond to the dt I calculated in the previous loading! \")\n return 0\n \n logging.debug(\" === (Re)Load data for %s file %s counter %s\" , dataset, file, data[k][F][\"counter\"])\n print(blue + 'Memory used before reading data: ', process.memory_info().rss/1000000000 , cend)\n \n slice_size = self.slice_size\n \n file = data[k][F]['h5py_file']\n rts, ri = data[k][F][\"recordtimestamp\"][:] , data[k][F][\"recordindex\"][:]\n\n index_min = self.unique_dates[k][F]['indices'][dt]['low'] # here no offset since I am reading the original data \n ind = np.where(rts==dt)[0][0] # index of specific dt , I need the extremes indices of the next date_time after slicing \n \n try: \n up_to_dt_slice = rts[ind + slice_size ] # \n index_max = self.unique_dates[k][F]['indices'][up_to_dt_slice]['low'] # maximum index in the array of date_time to slice on\n update_index = True\n except:\n \"\"\" If the dt is too large, I take the whole array \"\"\"\n index_max = 1000000000000000\n update_index = False \n \n \n ####################\n # OBSERVATIONS TABLE\n #################### \n logging.debug ('*** Loading observations_table' )\n obs_tab = file['observations_table'] \n\n #print('CHECKING THE INDICES:::: ' , k , ' index_min ', index_min , ' index_max ', index_max )\n obs_dic= {} \n for ov in self.observations_table_vars:\n v = copy.deepcopy( obs_tab[ov][index_min:index_max ] )\n obs_dic[ov] = v \n data[k][F]['observations_table']= obs_dic \n\n ###########\n # ERA5FB\n ###########\n if k == 'era5_1' or k == 'era5_2':\n logging.debug('*** Loading era5fb ' )\n era5fb_tab = file['era5fb']\n fb_dic = {} \n for ov in self.era5fb_columns:\n try:\n v = copy.deepcopy( era5fb_tab[ov][index_min:index_max ] )\n fb_dic[ov] = v \n except:\n continue\n #print(\"CANNOT FIND \", ov ) \n \n data[k][F]['era5fb_tab']= fb_dic\n \n print(blue + 'Memory used after reading data: ', process.memory_info().rss/1000000000 , cend)\n \n \"\"\" Updating the indices \"\"\" \n self.unique_dates[k][F]['index_offset'] = copy.deepcopy( self.unique_dates[k][F]['index_offset_next'] ) \n \n if update_index: \n self.unique_dates[k][F]['index_offset_next'] = index_max \n self.unique_dates[k][F]['up_to_dt_slice'] = up_to_dt_slice\n\n return 0", "def reduce_data():\n snapshots = Snapshot.objects.all()\n locations = Location.objects.all()\n lst = []\n for snapshot in snapshots:\n lst.append([snapshot.location.name, snapshot.avail_bikes,\n snapshot.free_stands, snapshot.timestamp])\n cols = ['location', 'avail_bikes', 'free_stands', 'timestamp']\n df = pd.DataFrame(lst, columns=cols)\n df['time'] = df['timestamp'].dt.round('30min').dt.strftime('%H:%M')\n\n group = df.groupby(['location', 'time'])\n means = group.mean()\n sd = group.std()\n today = date.today()\n first = today.replace(day=1)\n last_month = first - timedelta(days=1)\n\n for name, time in means.index:\n subset_mean = means.xs((name, time), level=(0, 1), axis=0)\n subset_sd = sd.xs((name, time), level=(0, 1), axis=0)\n m = Stat.objects.get_or_create(\n location=locations.get(name=name),\n avail_bikes_mean=subset_mean['avail_bikes'],\n free_stands_mean=subset_mean['free_stands'],\n avail_bikes_sd=subset_sd['avail_bikes'],\n free_stands_sd=subset_sd['free_stands'],\n time=time,\n month=last_month\n )\n\n # snaps = Snapshot.objects.all()\n # i = 0\n # length = len(snaps)\n # for s in snaps:\n # i += 1\n # print(i)\n # if i > 35000:\n # s.save()\n # reduce_data()", "def reload_period_from_analytics(cls, period):\n counts = googleanalytics.pageviews_by_document(*period_dates(period))\n if counts:\n # Delete and remake the rows:\n # Horribly inefficient until\n # http://code.djangoproject.com/ticket/9519 is fixed.\n cls.objects.filter(period=period).delete()\n for doc_id, visits in counts.iteritems():\n cls.objects.create(document=Document(pk=doc_id), visits=visits,\n period=period)\n else:\n # Don't erase interesting data if there's nothing to replace it:\n log.warning('Google Analytics returned no interesting data,'\n ' so I kept what I had.')", "def compound(self, period):\n\t\tif self.calendar:\n\t\t\tperiod = CalendarRangePeriod(period, self.calendar)\n\t\t\n\t\tt = self.daycount.timefreq(period, self.frequency)\n\t\treturn self.compounding(self.rate, t)", "def update(start_date, end_date, binning):\n global mean_source1,mean_source,median_source1,median_source, difference_source1,difference_source, difference_source2 , difference_source3, mean_source2, median_source2\n\n # if type of start/end date is date, turn it into a datetime,\n # set time of start/end date time to 12:00\n\n def convert_time(t):\n if type(t) == datetime.date:\n return datetime.datetime(t.year, t.month, t.day, 12, 0, 0, 0)\n else:\n return t.replace(hour=12, minute=0, second=0, microsecond=0)\n\n start_date = convert_time(start_date)\n end_date = convert_time(end_date)\n if binning is None:\n binning = ''\n\n first_timestamp = start_date.timestamp() + time_offset\n second_timestamp = end_date.timestamp() + time_offset\n\n # query data in mysql database\n sql1 = 'SELECT str_to_date(datetime,\"%%Y-%%m-%%d %%H:%%i:%%s\") AS datetime, seeing from seeing ' \\\n ' where datetime >= str_to_date(\"{start_date_}\",\"%%Y-%%m-%%d %%H:%%i:%%s\")' \\\n ' and datetime <= str_to_date(\"{end_date_}\",\"%%Y-%%m-%%d %%H:%%i:%%s\") ' \\\n .format(start_date_=str(start_date), end_date_=str(end_date))\n\n sql2 = 'select _timestamp_,ee50,fwhm,timestamp from tpc_guidance_status__timestamp where timestamp >= {start_date_}' \\\n ' and timestamp<= {end_date_} and guidance_available=\"T\" ' \\\n ' order by _timestamp_' \\\n .format(start_date_=str(first_timestamp), end_date_=str(second_timestamp))\n\n df2 = pd.read_sql(sql2, db.get_engine(app=current_app, bind='els'))\n df1 = pd.read_sql(sql1, db.get_engine(app=current_app, bind='suthweather'))\n\n # setting index time for calculating mean and average\n df2.index = df2[\"_timestamp_\"]\n df1.index = df1['datetime']\n\n # It seems that Pandas doesn't change the index type if the data frame is empty, which means that resampling\n # would fail for an empty data frame. As there will be no row for median or mean , it is safe to just use the\n # original data frame to avoid this problem.\n\n # for external seeing calculating median and mean\n if not df1.empty:\n mean1_all = df1.resample(str(binning) + 'T').mean()\n else:\n mean1_all = df1.copy(deep=True)\n source1 = ColumnDataSource(mean1_all)\n mean_source1.data = source1.data\n\n if not df1.empty:\n median1_all = df1.resample(str(binning) + 'T').median()\n else:\n median1_all = df1.copy(deep=True)\n source = ColumnDataSource(median1_all)\n median_source1.data = source.data\n\n # calculate mean and median for ee50\n if not df2.empty:\n mean_all = df2.resample(str(binning) + 'T').mean()\n else:\n mean_all = df2.copy(deep=True)\n source3 = ColumnDataSource(mean_all)\n mean_source.data = source3.data\n\n if not df2.empty:\n median_all = df2.resample(str(binning) + 'T').median()\n else:\n median_all = df2.copy(deep=True)\n source4 = ColumnDataSource(median_all)\n median_source.data = source4.data\n\n #calculate mean and median for fwhm\n if not df2.empty:\n mean_all1 = df2.resample(str(binning) + 'T').mean()\n else:\n mean_all1 = df2.copy(deep=True)\n source4 = ColumnDataSource(mean_all)\n mean_source2.data = source4.data\n\n if not df2.empty:\n median_all = df2.resample(str(binning) + 'T').median()\n else:\n median_all = df2.copy(deep=True)\n source5 = ColumnDataSource(median_all)\n median_source2.data = source5.data\n\n # calculate difference for external seeing against fwhm and ee50\n dataframes = [mean1_all, mean_all]\n add_dataframes = pd.concat(dataframes, axis=1)\n add_dataframes.index.name = '_timestamp_'\n add_dataframes['difference'] = add_dataframes['seeing'] - add_dataframes['ee50']\n datasource2 = ColumnDataSource(add_dataframes)\n difference_source.data = datasource2.data\n\n dataframes = [mean1_all, mean_all1]\n add_dataframes = pd.concat(dataframes, axis=1)\n add_dataframes.index.name = '_timestamp_'\n add_dataframes['difference1'] = add_dataframes['seeing'] - add_dataframes['fwhm']\n datasource1 = ColumnDataSource(add_dataframes)\n difference_source1.data = datasource1.data\n\n # #difference using the median\n # dataframes2 = [median_all, median1_all]\n # add_dataframes2 = pd.concat(dataframes2, axis=1)\n # add_dataframes2.index.name = '_timestamp_'\n # add_dataframes2['difference2'] = add_dataframes2['seeing'] - add_dataframes2['ee50']\n # datasource2 = ColumnDataSource(add_dataframes2)\n # difference_source2.data = datasource2.data\n #\n # dataframes3 = [median_all, median1_all]\n # add_dataframes3 = pd.concat(dataframes3, axis=1)\n # add_dataframes3.index.name = '_timestamp_'\n # add_dataframes3['difference3'] = add_dataframes3['seeing'] - add_dataframes3['fwhm']\n # datasource3 = ColumnDataSource(add_dataframes3)\n # difference_source3.data = datasource3.data\n\n # plot labels\n p = figure(title=\"external vs internal seeing ({binning} minute bins)\".format(binning=binning), x_axis_type='datetime'\n , x_axis_label='datetime', y_axis_label='seeing',plot_width=1000, plot_height=500,tools=TOOLS)\n dif=figure(title='difference between average internal and external seeing ({binning} minute bins)'.format(binning=binning), x_axis_type='datetime',\n x_axis_label='datetime', y_axis_label='seeing',plot_width=1000, plot_height=500,tools=TOOLS)\n\n #plots\n # plots for external seeing\n p.circle(source=mean_source1, x='datetime',y='seeing', legend=\"external average\" ,fill_color=\"white\",color='green')\n p.line(source=median_source1, x='datetime',y='seeing', legend=\"external median\" ,color='blue')\n\n #plots showing median and mean for ee50 and fwhm\n p.circle(source=mean_source, x='_timestamp_', y='ee50', legend='ee50 average')\n p.circle(source=mean_source, x='_timestamp_', y='fwhm', legend='fwhm average', color='red', fill_color='white')\n\n p.line(source=median_source, x='_timestamp_', y='ee50', legend='ee50 median', color='green')\n p.line(source=median_source, x='_timestamp_', y='fwhm', legend='fwhm median', color='orange')\n\n #for difference\n dif.circle(source=difference_source, x='_timestamp_', y='difference', legend='ee50_mean difference', color='red')\n dif.circle(source=difference_source1, x='_timestamp_', y='difference1', legend='fwhm_mean difference', fill_color='green')\n\n #\n # dif.circle(source=difference_source2, x='_timestamp_', y='difference2', legend='ee50_median difference', fill_color='blue')\n # dif.circle(source=difference_source3, x='_timestamp_', y='difference3', legend='fwhm_median difference', color='orange')\n\n p.xaxis.formatter = date_formatter\n p.legend.location = \"top_left\"\n p.legend.click_policy=\"hide\"\n\n dif.xaxis.formatter = date_formatter\n dif.legend.click_policy=\"hide\"\n\n script, div = components(p)\n content1 = '<div>{script}{div}</div>'.format(script=script, div=div)\n\n script, div = components(dif)\n content2 = '<div>{script}{div}</div>'.format(script=script, div=div)\n\n return '{cont} {cont2}'.format(cont=content1,cont2=content2)", "def by_time_period(cls, user, time_periods):\n return [cls.by_user(user, p.start, p.end) for p in time_periods]", "def history_clones(file, ht_df):\n if os.path.isfile(file):\n # if the file exists, we merge\n print(file + ' found, merging')\n df_file = pd.read_csv(file)\n\n ht_df['timestamp'] = pd.to_datetime(ht_df['timestamp']).dt.date\n\n df_file = pd.concat([df_file, ht_df])\n df_file['timestamp'] = df_file['timestamp'].astype(str)\n\n df_file.sort_values('timestamp', inplace=True)\n print(df_file.to_string())\n # we can't just drop the first instance: for the first day, we'll loose data.\n # so keep max value per date\n\n #df_file.drop_duplicates(subset=['timestamp'], keep='last', inplace=True)\n df_file = df_file.groupby('timestamp')[['uniques', 'count']].agg(['max']).reset_index()\n\n df_file.columns = df_file.columns.droplevel(level=1)\n #print(df_file.to_string())\n #print(df_file.columns)\n df_file.to_csv(file, index=False)\n\n else:\n # otherwise, just dump the df\n print('There is no file to merge, dumping df to ' + file)\n ht_df.to_csv(file, index=False)", "def create_time_s(df, medidor, freq='15T'):\n dates_complete = pd.date_range('1/18/2013', '02/09/2014', freq='15T')\n # this dates take them from the file\n my_complete_series = pd.Series(dates_complete)\n frame1 = my_complete_series.to_frame()\n frame1.columns = ['key']\n merged = pd.merge(frame1, df, on='key', how='outer')\n merged = merged.sort('key')\n # fill the merged file with the number of the meter\n merged['medidor'].fillna(medidor, inplace=True)\n\n return merged", "def testQueryWithTimestamp(self):\n for i in range(5):\n row_name = \"aff4:/row:query_with_ts\"\n data_store.DB.Set(row_name, \"metadata:5\", \"test\", timestamp=i + 10,\n replace=False, token=self.token)\n data_store.DB.Set(row_name, \"aff4:type\", \"test\", timestamp=i + 10,\n replace=False, token=self.token)\n\n # Read all timestamps.\n rows = [row for row in data_store.DB.Query(\n [], data_store.DB.filter.HasPredicateFilter(\"metadata:5\"),\n subject_prefix=\"aff4:/row:query_with_ts\",\n timestamp=data_store.DB.ALL_TIMESTAMPS, token=self.token)]\n attributes = rows[0]\n self.assertEqual(attributes[\"subject\"][0][0], \"aff4:/row:query_with_ts\")\n self.assertEqual(len(attributes[\"aff4:type\"]), 5)\n\n # Read latest timestamp.\n rows = [row for row in data_store.DB.Query(\n [], data_store.DB.filter.HasPredicateFilter(\"metadata:5\"),\n subject_prefix=\"aff4:/row:query_with_ts\",\n timestamp=data_store.DB.NEWEST_TIMESTAMP, token=self.token)]\n\n attributes = rows[0]\n self.assertEqual(attributes[\"subject\"][0][0], \"aff4:/row:query_with_ts\")\n self.assertEqual(len(attributes[\"aff4:type\"]), 1)\n self.assertEqual(attributes[\"aff4:type\"][0][0], \"test\")\n\n # Newest timestamp is 4.\n self.assertEqual(attributes[\"aff4:type\"][0][1], 14)\n\n # Now query for a timestamp range.\n rows = [row for row in data_store.DB.Query(\n [], data_store.DB.filter.HasPredicateFilter(\"metadata:5\"),\n subject_prefix=\"aff4:/row:query_with_ts\",\n timestamp=(11, 13), token=self.token)]\n\n attributes = rows[0]\n self.assertEqual(attributes[\"subject\"][0][0], \"aff4:/row:query_with_ts\")\n # Now we should have three timestamps.\n self.assertEqual(len(attributes[\"aff4:type\"]), 3)\n\n timestamps = [attribute[1] for attribute in attributes[\"aff4:type\"]]\n self.assertListEqual(sorted(timestamps), [11, 12, 13])", "def _tail_profile(self, db, interval):\r\n latest_doc = None\r\n while latest_doc is None:\r\n time.sleep(interval)\r\n latest_doc = db['system.profile'].find_one()\r\n\r\n current_time = latest_doc['ts']\r\n\r\n while True:\r\n time.sleep(interval)\r\n cursor = db['system.profile'].find({'ts': {'$gte': current_time}}).sort('ts', pymongo.ASCENDING)\r\n for doc in cursor:\r\n current_time = doc['ts']\r\n yield doc", "def merge_record(self, dt, container = ''): \n record_dataset_legth ={} \n \n \n \"\"\" Combining the ncar_t and ncar_w files.\n If both are present, select the ncar_t data and rename it as 'ncar'. \n If only one is present, simply rename it as 'ncar'. \n \"\"\" \n if ('ncar_t' in list(container.keys()) ):\n container['ncar'] = {} \n container['ncar']['df'] = container['ncar_t']['df'] \n \n elif ( 'ncar_w' in list(container.keys()) and 'ncar_t' not in list(container.keys()) ) :\n container['ncar'] = {} \n container['ncar']['df'] = container['ncar_w']['df'] \n\n \n for k in container.keys():\n if k == 'ncar_t' or k == 'ncar_w': \n continue \n record_dataset_legth[k] = len(container[k]['df'] )\n \n \n \"\"\" For now, choosing the dataset with more records of all or igra2>ncar>rest data if available and with same number of records \"\"\"\n best_ds, all_ds , best_datasets, all_ds_reports = 'dummy' , [] , [], [] # total number of records, name of the chosen dataset , list of other possible dataset with available data \n \n most_records = max( [ v for v in record_dataset_legth.values() ] ) # maximum number of records per date_time \n \n for k, v in record_dataset_legth.items(): \n if v == 0:\n continue\n if v == most_records:\n best_datasets.append(k) \n if v > 0:\n all_ds.append(k) # all other datasets with smaller number of records than the maximum found\n try: \n all_ds_reports.append( self.observation_ids_merged[k] * 1000000000 + container[k]['df']['report_id'].values[0] ) # converting the original report id using the same convention as for observation_id\n except:\n all_ds_reports.append( self.observation_ids_merged[k] * 1000000000 + int( (container[k]['df']['report_id'].values[0]).tostring() ) ) # converting the original report id using the same convention as for observation_id\n \n \n #all_ds_reports.append(np.nan)\n #print ( type(container[k]['df']['report_id'].values) )\n #all_ds_reports.append( self.observation_ids_merged[k] * 1000000000 + float(container[k]['df']['report_id'].values[0].decode('latin1') ))\n \n if len(best_datasets) ==0:\n print('wrong??? please check')\n return 0,0,0,0 \n \n if 'igra2' in best_datasets:\n best_ds = 'igra2'\n elif 'ncar' in best_datasets:\n best_ds = 'ncar'\n elif 'era5_1' in best_datasets:\n best_ds = 'era5_1' \n else:\n best_ds = best_datasets[0]\n \n \"\"\" Extract container \"\"\" \n selected_df = container[best_ds]['df'].copy(deep = True) # might take extra time, dont know how to get rid of this \n\n try:\n merged_report = self.observation_ids_merged[best_ds] * 1000000000 + int( selected_df['report_id'].values[0].tostring() ) \n except:\n merged_report = np.nan \n\n \"\"\" Calculate new unique observation id \"\"\"\n try: \n obs_ids_merged = [ self.observation_ids_merged[best_ds] * 1000000000 + int( i.tostring() ) for i in selected_df['observation_id'] ]\n except:\n obs_ids_merged = [ np.nan for i in selected_df['observation_id'] ]\n \n \n selected_df['observation_id'] = obs_ids_merged\n \n \"\"\" Calculate new unique report id \"\"\" \n selected_df['report_id'] = merged_report\n\n \"\"\" Returning a string with the alternative available datasets data \"\"\"\n if len(all_ds_reports) > 1: \n duplicates = \",\".join( [ str(i) for i in all_ds_reports] )\n else:\n duplicates = str(all_ds_reports[0])\n \n \n \"\"\" Extracting the merged header_table.\n Again, must consider the special case where best_ds == ncar. \n Note that the header table *should* be identical for ncar_w or ncar_t \"\"\" \n if best_ds != 'ncar':\n header = self.get_header_table(dt, ds= best_ds, all_ds = duplicates , length= len(selected_df) )\n \n elif ( best_ds == 'ncar' and 'ncar_t' in list(container.keys()) ) :\n header = self.get_header_table(dt, ds = 'ncar_t', all_ds = duplicates, length= len(selected_df))\n \n elif ( best_ds == 'ncar' and 'ncar_t' not in list(container.keys()) ) :\n header = self.get_header_table(dt, ds = 'ncar_w', all_ds = duplicates, length= len(selected_df) ) \n \n logging.debug('I use %s record since it has more entries: %s but other available datasets are : %s' , best_ds , str(most_records) , all_ds ) \n \n #print ('duplicates are: ', duplicates)\n return selected_df, best_ds , duplicates, header", "def compare_displacements(ds1,ds2):\n # Obteniendo los datos para BP\n t1 = ds1['t']\n t1 = t1[:n_im-1]\n t1 = mplt.dates.date2num(t1)\n d1 = ds1['d_t']\n # Obteniendo los datos para RMA\n t2 = ds2['t']\n t2 = t2[:n_im-1]\n t2 = mplt.dates.date2num(t2)\n d2 = ds2['d_t']\n\n # Graficando las 2 curvas juntas\n formatter = DateFormatter(\"%d/%m - %H:%M\")\n for i in range(len(d1)):\n # Hallando el valor promedio final x zona\n mean_bp = d1[i].mean()\n mean_rma = d2[i].mean()\n print(\"Valor promedio BP_zona\"+str(i)+\": \",mean_bp)\n print(\"Valor promedio RMA_zona\"+str(i)+\": \",mean_rma)\n print(\"\")\n # Graficando\n direction = 'desplazamientosPromedios_dset'+str(i_o)+'-'+str(i_o+n_im-1)+'_zona'+str(i)\n\n fig, ax= plt.subplots(figsize=(10,7))\n ax.plot_date(t1,d1[i],'b',marker='',markerfacecolor='b',markeredgecolor='b',label='Back Projection')\n ax.plot_date(t2,d2[i],'r',marker='',markerfacecolor='r',markeredgecolor='r',label='RMA')\n ax.set(xlabel='Tiempo',ylabel='Desplazamiento(mm)',title=\"Desplazamientos promedios\\n(Zona \"+str(i)+')')\n ax.xaxis.set_major_formatter(formatter)\n ax.xaxis.set_tick_params(rotation=20)\n #ax.set_xlim([R.min(),R.max()])\n ax.set_ylim([-c*1000*4/(4*fc),c*1000*4/(4*fc)])\n ax.grid(linestyle='dashed')\n ax.legend()\n plt.show()\n fig.savefig(os.getcwd()+\"/Results/Desplazamientos/\"+direction,orientation='landscape')\n\n return 'Ok'", "def __iter__(self):\n if len(self) == 0:\n return\n current = self.first_timestamp\n delta = datetime.timedelta(0, self.interval)\n while current <= self.last_timestamp:\n yield current\n current += delta", "def create_person_offer(transcript,portfolio,profile,person_transaction): \n tqdm.pandas()\n to_be_appended = None\n\n # This will not include transaction, so we need another new table for those.\n for (person_id, offer_index), transcript_grouped in tqdm(transcript.dropna(subset=['offer_index']).groupby(['person','offer_index'])):\n this_offer = portfolio.loc[offer_index]\n this_person = profile.loc[person_id]\n to_be_appended = append_one_person_offer(to_be_appended, this_offer, person_id, offer_index, transcript_grouped, this_person)\n \n person_offer_df = pd.DataFrame(to_be_appended)\n\n # TODO, the stuff above and the stuff below was originally made at completly different times and\n # was different files and functions, now I put it into one, however, there's still probably alot\n # that can be done together instead of looping multiple times on the same stuff... but not reealy needed, as it works\n # but could probably make it way faster...\n person_offer_df['before_start'] = 0\n person_offer_df['same_day_start'] = 0\n person_offer_df['after_start'] = 0\n person_offer_df['before_view'] = 0\n person_offer_df['same_day_view'] = 0\n person_offer_df['after_view'] =0\n person_offer_df['before_complete'] = 0\n person_offer_df['same_day_complete']= 0\n person_offer_df['after_complete'] = 0\n person_offer_df['w_before'] = 0\n person_offer_df['sum_during'] = 0\n person_offer_df['mean_during'] = 0\n person_offer_df['w_after'] = 0\n person_offer_df = person_offer_df.progress_apply(get_before_after_mean,person_transaction=person_transaction, axis=1) \n \n \n person_offer_df['viewed_reltime']=np.nan\n person_offer_df['completed_reltime']=np.nan\n\n def absulute2relative_time(x):\n \"\"\"Converts absolute time (hours since start of simulation)\n to hours since offer recieved (start)\n \"\"\" \n if x.viewed:\n x.viewed_reltime=x.viewed_time-x.start\n \n if x.completed:\n x.completed_reltime=x.completed_time-x.start\n \n return x\n\n person_offer_df = person_offer_df.progress_apply(absulute2relative_time, axis=1)\n \n #makes it easier to access these combinations\n person_offer_df['complete_viewed'] = (person_offer_df['completed'] & person_offer_df['viewed']).astype(int)\n person_offer_df['complete_not_viewed'] = (person_offer_df['completed'] & ~person_offer_df['viewed']).astype(int)\n person_offer_df['not_complete_not_viewed'] = (~person_offer_df['completed'] & ~person_offer_df['viewed']).astype(int)\n person_offer_df['not_complete_viewed'] = (~person_offer_df['completed'] & person_offer_df['viewed']).astype(int)\n person_offer_df['completed'] = person_offer_df['completed'].astype(int)\n person_offer_df['viewed'] = person_offer_df['viewed'].astype(int)\n \n #calculates diff in sales before and after an event\n for x in ['start','view','complete']:\n person_offer_df[f'diff_{x}'] = person_offer_df[f'after_{x}'] - person_offer_df[f'before_{x}'] \n \n person_offer_df[f'diff_offer'] = person_offer_df[f'w_after'] - person_offer_df[f'w_before']\n \n #recalculates became_member_on to member_since instead (where newest member is 0) in days\n person_offer_df['became_member_on'] = pd.to_datetime(person_offer_df['became_member_on'], format='%Y-%m-%d')\n person_offer_df['member_since_days'] = (person_offer_df['became_member_on'].max() - person_offer_df['became_member_on']).dt.days\n \n #remve these wrong ages and turn to NaN\n person_offer_df['age']=person_offer_df['age'].apply(lambda x:np.nan if x==118 else x)\n \n return person_offer_df", "def _run_records_sporadic(self, run_idxs, run_record_key):\n\n # we loop over the run_idxs in the contig and get the fields\n # and cycle idxs for the whole contig\n fields = None\n cycle_idxs = np.array([], dtype=int)\n # keep a cumulative total of the runs cycle idxs\n prev_run_cycle_total = 0\n for run_idx in run_idxs:\n\n # get all the value columns from the datasets, and convert\n # them to something amenable to a table\n run_fields = self._convert_record_fields_to_table_columns(run_idx, run_record_key)\n\n # we need to concatenate each field to the end of the\n # field in the master dictionary, first we need to\n # initialize it if it isn't already made\n if fields is None:\n # if it isn't initialized we just set it as this first\n # run fields dictionary\n fields = run_fields\n else:\n # if it is already initialized we need to go through\n # each field and concatenate\n for field_name, field_data in run_fields.items():\n # just add it to the list of fields that will be concatenated later\n fields[field_name].extend(field_data)\n\n # get the cycle idxs for this run\n rec_grp = self.records_grp(run_idx, run_record_key)\n run_cycle_idxs = rec_grp[CYCLE_IDXS][:]\n\n # add the total number of cycles that came before this run\n # to each of the cycle idxs to get the cycle_idxs in terms\n # of the full contig\n run_contig_cycle_idxs = run_cycle_idxs + prev_run_cycle_total\n\n # add these cycle indices to the records for the whole contig\n cycle_idxs = np.hstack( (cycle_idxs, run_contig_cycle_idxs) )\n\n # add the total number of cycle_idxs from this run to the\n # running total\n prev_run_cycle_total += self.num_run_cycles(run_idx)\n\n # then make the records from the fields\n records = self._make_records(run_record_key, cycle_idxs, fields)\n\n return records", "def pulsEphem(self):\n\n hduMain = fits.open(self.ft1)\n\n # --------------------------------------------------------------------------------------------- #\n # Split the FT1 file every 4000 events\n noEv = 0\n deltEv = 5000\n count = 0\n wfil = open(os.path.dirname(self.ft1) + os.path.basename('tmpFT1.lis'), 'w')\n while noEv <= self.nevents:\n hduCols = []\n for colname, form, uni in zip(hduMain['EVENTS'].columns.names, hduMain['EVENTS'].columns.formats, hduMain['EVENTS'].columns.units):\n hduCols.append( fits.Column(name=colname, array=hduMain['EVENTS'].data[colname][noEv:noEv+deltEv], format=form, unit=uni) )\n # Updte the tstart and tstop in the header in order for tempo2 to work...\n hduMain['EVENTS'].header.set('TSTART', hduMain['EVENTS'].data['TIME'][noEv:noEv+deltEv][0])\n hduMain['EVENTS'].header.set('TSTOP', hduMain['EVENTS'].data['TIME'][noEv:noEv+deltEv][-1])\n newHDU = fits.BinTableHDU.from_columns(hduCols, name='EVENTS', header=hduMain['EVENTS'].header) \n hdulist = fits.HDUList([hduMain['PRIMARY'], newHDU, hduMain['GTI']])\n tmpName = os.path.dirname(self.ft1)+os.path.basename('tempFT1_'+str(count)+'.fits')\n hdulist.writeto(tmpName, clobber=True)\n wfil.write(tmpName + '\\n')\n noEv += deltEv\n count += 1\n if noEv != self.nevents:\n hduCols = []\n noEv -= deltEv\n for colname, form, uni in zip(hduMain['EVENTS'].columns.names, hduMain['EVENTS'].columns.formats, hduMain['EVENTS'].columns.units):\n hduCols.append( fits.Column(name=colname, array=hduMain['EVENTS'].data[colname][noEv:self.nevents], format=form, unit=uni) )\n hduMain['EVENTS'].header.set('TSTART', hduMain['EVENTS'].data['TIME'][noEv:self.nevents][0])\n hduMain['EVENTS'].header.set('TSTOP', hduMain['EVENTS'].data['TIME'][noEv:self.nevents][-1])\n newHDU = fits.BinTableHDU.from_columns(hduCols, name='EVENTS', header=hduMain['EVENTS'].header)\n hdulist = fits.HDUList([hduMain['PRIMARY'], newHDU, hduMain['GTI']])\n tmpName = os.path.dirname(self.ft1)+os.path.basename('tempFT1_'+str(count)+'.fits')\n hdulist.writeto(tmpName, clobber=True)\n wfil.write(tmpName + '\\n')\n wfil.close()\n\n hduMain.close()\n\n # --------------------------------------------------------------------------------------------- #\n # Run tempo2 for each piece of the FT1\n rfil = open(os.path.dirname(self.ft1) + 'tmpFT1.lis', 'r')\n percent = 0\n nbFiles = sum(1 for line in open(os.path.dirname(self.ft1) + 'tmpFT1.lis', 'r'))\n count = 0\n for tmpFil in rfil:\n # Print a progression bar every 5%\n if ( count / np.floor(nbFiles) * 100 ) >= percent:\n self._progressBar(percent, printEvery=5)\n percent += 5\n with open(os.devnull, 'wb') as devnull:\n subprocess.check_call(['/dsm/fermi/fermifast/glast/tempo2-2013.9.1/tempo2',\n '-gr', 'fermi', '-ft1', tmpFil[:-1], '-ft2', self.ft2, '-f', self.ephem,\n '-phase'], stdout=devnull, stderr=subprocess.STDOUT)\n count += 1\n # Replace the old ft1 by the new one with the PULSE_PHASE column\n #os.remove()\n self._gtSelect(data = os.path.dirname(self.ft1) + os.path.basename('tmpFT1.lis'))\n\n\n\n\n #self.nevents\n #J2032+4127_54683_57791_chol_pos.par\n #os.popen(\"tempo2 -gr fermi -ft1 {} -ft2 {} -f {} -phase\".format(self.ft1, self.ft2, self.ephem))", "def _copy(source, track, filter_f=lambda x: True, coef=1000):\n for msg in source:\n if filter_f(msg):\n track.append(msg.copy(time=int(msg.time*coef)))", "def _mergeKeys(self, other):\n for id in set(other.clock.keys()).difference(set(self.clock.keys())):\n self.clock[id] = 0\n for id in set(self.clock.keys()).difference(set(other.clock.keys())):\n other.clock[id] = 0", "def test_get_time_period_search(self):\n obj = CalculateSearchTimes(True, TIME_PERIODS,\n search_path=SEARCH_PATH, df_path=DF_PATH)\n dt1 = dt.datetime(2017, 8, 9, 14, 56, 40, 796658)\n dt2 = dt.datetime(2017, 8, 9, 17, 56, 40, 796658)\n dt3 = dt.datetime(2017, 8, 9, 10, 56, 40, 796658)\n result1 = obj.get_time_period_search(dt1)\n result2 = obj.get_time_period_search(dt2)\n result3 = obj.get_time_period_search(dt3)\n\n self.assertTrue(np.mean(list(result1.values())) == 1666.6666666666667)\n self.assertTrue(np.mean(list(result2.values())) != 1666.6666666666667)\n self.assertTrue(np.mean(list(result3.values())) != 1666.6666666666667)", "def run(self):\n # pylint: disable=too-many-locals\n loop_start = time.clock()\n partitions = []\n for item in self._results:\n partitions.append(\n Partition(item.name, item.dataframe, self.PARTITION_SIZE)\n )\n if item.name == self._primary_dataset:\n primary_dataset = partitions[-1]\n\n index = 0\n while index < self.PARTITION_SIZE:\n merge_sets = []\n combination = ['\\033[1m{0}\\033[0m'.format(self._mappings[self._primary_dataset])]\n for partition in partitions:\n if partition.name != self._primary_dataset:\n merge_sets.append({'name': partition.name, 'data': partition.next()})\n combination.append(self._mappings[partition.name])\n\n Logger().info(\n 'Running data extraction. Index {0} of {1} partitions, (combination: {2}, queries: {3})'.format(\n index,\n self.PARTITION_SIZE,\n ''.join(combination),\n len(self._queries)\n )\n )\n\n for size in range(self.PARTITION_SIZE):\n self._running = []\n q_start = time.clock()\n merge_table = self.merge(\n merge_sets + [{'name': primary_dataset.name, 'data': primary_dataset.next()}]\n )\n m_end = '{0:.2f}'.format(float(time.clock() - q_start))\n Logger().debug(\n 'Combination {0} merge table completed in {1} seconds ({2} rows)'.format(\n ''.join(combination),\n m_end,\n len(merge_table.index)\n )\n )\n for query in self._queries:\n self._running.append(\n DataThreader(merge_table, query, self._unique_columns)\n )\n\n self.monitor()\n times = [item.duration for item in self._running]\n message = '{4}\\n Combination {0}, Index {5}/{1}'\n message += ', merge_table size: {2}.\\n Average time per query {3}'\n Logger().debug(\n message.format(\n ''.join(combination),\n index,\n len(merge_table.index),\n times,\n [len(query.results.index) for query in self._queries],\n size\n )\n )\n del times\n del merge_table\n del self._running\n\n end_time = math.floor(time.clock() - loop_start)\n Logger().info('=========================================================================================')\n Logger().info(\n 'Completed partition {0} in {1} seconds. {2} queries, combination: {3}'.format(\n index,\n end_time,\n len(self._queries),\n ''.join(combination)\n )\n )\n Logger().info('=========================================================================================')\n primary_dataset.reset()\n index += 1\n\n Logger().info('Completed data extraction')\n self._complete = True", "def getContinuousBookingTimeSeries(span=28):\n data = []\n x = []\n y = []\n users = []\n now = datetime.datetime.now(pytz.utc)\n delta = datetime.timedelta(days=span)\n end = now - delta\n bookings = Booking.objects.filter(start__lte=now, end__gte=end).prefetch_related(\"collaborators\")\n for booking in bookings: # collect data from each booking\n user_list = [u.pk for u in booking.collaborators.all()]\n user_list.append(booking.owner.pk)\n data.append((booking.start, 1, user_list))\n data.append((booking.end, -1, user_list))\n\n # sort based on time\n data.sort(key=lambda i: i[0])\n\n # collect data\n count = 0\n active_users = {}\n for datum in data:\n x.append(str(datum[0])) # time\n count += datum[1] # booking count\n y.append(count)\n for pk in datum[2]: # maintain count of each user's active bookings\n active_users[pk] = active_users.setdefault(pk, 0) + datum[1]\n if active_users[pk] == 0:\n del active_users[pk]\n users.append(len([x for x in active_users.values() if x > 0]))\n\n return {\"booking\": [x, y], \"user\": [x, users]}", "def main():\n\n f = open(eventsfile, 'r')\n lines = f.readlines()\n numcounter = 0\n counter = 0\n fullcounter = 0\n movielist = []\n movielists =[]\n timestamp_list = []\n filteredlist = [] \n startdate = \"2020-02-26\"\n \n for line in lines:\n TAPES = line.split('\\t')\n if int(TAPES[2]) == 1 or int(TAPES[2]) == 2:\n filteredlist.append(line)\n \n for newline in filteredlist:\n TAPES = newline.split('\\t')\n fullcounter +=1\n if int(TAPES[2]) == 2:\n timestamp_list.append(0)\n continue\n startdate2 = startdate.split(\"-\")[1] + \"/\" + startdate.split(\"-\")[2] + \"/\" + startdate.split(\"-\")[0]\n dateplustime = startdate2 + TAPES[0][0:len(TAPES[0])]\n thistime = faststrptime(dateplustime)\n unixtimestamp = datetime.datetime.timestamp(thistime)\n timestamp_list.append(int(unixtimestamp))\n\n i = 0 \n for element in timestamp_list:\n\n if i < (len(timestamp_list)-1) and timestamp_list[i+(counter-i)]-timestamp_list[i] >= 3600:\n counter += 1\n i = counter\n movielist.append(counter)\n \n if len(movielist) <= 15:\n numcounter = 0\n j = 0\n for step in movielist:\n movielists[len(movielists)-1].append(movielist[j])\n j += 1\n movielist = []\n continue \n else:\n movielists.append(movielist)\n movielist = []\n numcounter = 0\n continue\n\n if i < (len(timestamp_list)-1) and timestamp_list[i+1]-timestamp_list[i] >= 3600:\n counter += 1\n i = counter\n movielist.append(counter)\n\n if len(movielist) <= 15:\n numcounter = 0\n j = 0\n for step in movielist:\n movielists[len(movielists)-1].append(movielist[j])\n j += 1\n movielist = []\n continue\n else:\n movielists.append(movielist)\n movielist = []\n numcounter = 0\n continue\n\n counter += 1\n numcounter += 1\n if element != 0:\n movielist.append(counter)\n i += 1\n \n if numcounter == 30:\n numcounter = 0\n movielists.append(movielist)\n movielist = []\n \n if i > (len(timestamp_list)-1):\n movielists.append(movielist)\n movielist = []\n numcounter = 0\n \n numendlists = counter - fullcounter\n first = len(movielists)-numendlists\n last = len(movielists)\n del movielists[first:last]\n \n for x in movielists:\n for y in x:\n if int(filenumber) == y:\n movielist = x\n\n modename = str(movielist[0]) + \"to\" + str(movielist[len(movielist)-1])\n modefilename = \"mode_\" + modename + \".png\"\n try:\n imread(modefilename)\n except:\n imageMode(modename,movielist)\n\n e = loadmodeImage(modefilename)\n \n roimask = np.zeros((ydim,xdim))\n f = open(roisfile, 'r')\n lines = f.readlines()\n i = 1\n i2 = 0\n for line in lines:\n try:\n print(int(line.split(' ')[0]))\n except ValueError:\n i2 += 1\n continue\n minx = int(line.split(' ')[0])\n miny = int(line.split(' ')[1])\n maxx = int(line.split(' ')[2])\n maxy = int(line.split(' ')[3])\n roimask[int(miny):int(maxy),int(minx):int(maxx)] = i\n i += 1\n numberofwells = i-1\n numberofcols = int(i2/2)\n numberofrows = int(numberofwells/numberofcols)\n roimaskweights = convertMaskToWeights(roimask)\n\n cap = cv2.VideoCapture(videoStream)\n\n cap.set(3,roimask.shape[1])\n cap.set(4,roimask.shape[0])\n \n ret,frame = cap.read()\n storedImage = np.array(e * 255, dtype = np.uint8)\n storedMode = Blur(storedImage)\n storedFrame = grayBlur(frame)\n cenData = np.zeros([ int(saveFreq), len(np.unique(roimaskweights))*2 -2])\n pixData = np.zeros([ int(saveFreq), len(np.unique(roimaskweights))])\n i = 0;\n totalFrames = 0\n while(cap.isOpened()):\n ret,frame = cap.read()\n if ret == False:\n break\n currentFrame = grayBlur(frame)\n diffpix = diffImage(storedFrame,currentFrame,pixThreshold)\n diff = trackdiffImage(storedMode,currentFrame,pixThreshold)\n diff.dtype = np.uint8\n contours,hierarchy = cv2.findContours(diff, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)\n MIN_THRESH = 20.0\n MIN_THRESH_P = 20.0\n roi_dict = {}\n for r in range(0,numberofwells):\n roi_dict[r+1] = []\n for cs in range(0,len(contours)):\n if cv2.contourArea(contours[cs]) < 1.0:\n continue\n if cv2.arcLength(contours[cs],True) < 1.0:\n continue\n if cv2.contourArea(contours[cs]) > MIN_THRESH or cv2.arcLength(contours[cs],True) > MIN_THRESH_P:\n M = cv2.moments(contours[cs])\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n area = cv2.contourArea(contours[cs])\n perim = cv2.arcLength(contours[cs],True)\n if int(roimask[cY,cX]) == 0:\n continue\n if not roi_dict[int(roimask[cY,cX])]:\n roi_dict[int(roimask[cY,cX])].append((area*perim,cX,cY))\n else:\n if roi_dict[int(roimask[cY,cX])][0][0] < area*perim:\n roi_dict[int(roimask[cY,cX])][0] = (area*perim,cX,cY)\n\n pixcounts = []\n pixcounts = np.bincount(roimaskweights, weights=diffpix.ravel())\n pixData[i,:] = np.hstack((pixcounts))\n counts = []\n keys = roi_dict.keys()\n keys = sorted(keys)\n for k in keys:\n x = -10000\n y = -10000\n if roi_dict[k]:\n x = roi_dict[k][0][1]\n y = roi_dict[k][0][2]\n counts.append(x)\n counts.append(y)\n cv2.line(storedImage,(x,y),(x,y),(255,255,255),2)\n if i == 284:\n cv2.imwrite(videoStream + '_trackedimagewithlines_' + str(i) + \".png\", storedImage)\n cenData[i,:] = np.asarray(counts)\n totalFrames += 1\n storedFrame = currentFrame\n i += 1\n\n file = open(videoStream + \".centroid2\",'w')\n for x in range(0,frameRate):\n for y in range(0,numberofwells*2):\n file.write(str(int(cenData[x,:][y])) + '\\n')\n pixData = pixData[:i,:]\n pixData = pixData[:,1:] \n file = open(videoStream + \".motion2\",'w')\n for x in range(0,frameRate):\n for y in range(0,numberofwells):\n file.write(str(int(pixData[x,:][y])) + '\\n')\n\n cap.release()\n cv2.destroyAllWindows()\n \n try:\n image = Image.open('lastframe.png')\n except:\n makenumROIsimage()", "def combine_record(self, dt, container = ''):\n \n record_dataset_legth ={} \n other_ds = []\n\n ''' I fill the dic e.g. record_dataset_legth{100:['era5_1','ncar'], 80:['bufr','igra2'] }\n i.e. the keys are the lengths, the entries are the lists of datasets '''\n\n duplicates = []\n\n for k in container.keys(): # loop over the dataset\n if k not in other_ds:\n other_ds.append(k)\n for f in container[k]: # loop over the file per dataset\n num_rec = len(container[k][f]['obs_tab'][\"date_time\"])\n \n \"\"\" Storing all the reports id with the proper prefix (for each different dataset) \"\"\"\n rep_id = b''.join(container[k][f][\"obs_tab\"]['report_id'][0]) \n rep_id = self.observation_ids_merged[k] + rep_id \n duplicates.append( rep_id ) \n \n if num_rec not in record_dataset_legth.keys():\n record_dataset_legth[num_rec] = {}\n record_dataset_legth[num_rec]['best_ds'] = []\n record_dataset_legth[num_rec]['file'] = []\n\n record_dataset_legth[num_rec]['best_ds'].append(k)\n record_dataset_legth[num_rec]['file'].append(f)\n\n max_entries = max(record_dataset_legth.keys())\n \n ''' best_ds is the list of longest datasets, best_datasets the list of all the datasets available including best_ds '''\n best_datasets = record_dataset_legth[max_entries]\n\n \"\"\" Choosing the priority of the datasets:\n - if era5_1 or era5_2 are present, pick them (they cant be both present for the same date_time)\n - else, if igra2 is present, pick it\n - else, one of the remaining ones \"\"\"\n\n if 'era5_2' in best_datasets and 'era5_1' not in best_datasets: # era5_1 and era5_2 should never be both present anyway...\n best_ds = 'era5_2' \n elif 'era5_1' in best_datasets and 'era5_2' not in best_datasets:\n best_ds = 'era5_1'\n elif 'era5_1' not in best_datasets and 'era5_2' not in best_datasets and 'igra2' in best_datasets:\n best_ds = 'igra2'\n elif 'era5_1' not in best_datasets and 'era5_2' not in best_datasets and 'igra2' not in best_datasets:\n best_ds = record_dataset_legth[max_entries]['best_ds'][0] # pick the first of the list \n\n best_file = record_dataset_legth[max_entries]['file'][0]\n\n ''' If more file are available for the same best_ds, pick the first one from the list '''\n selected_obstab, selected_era5fb = container[best_ds][best_file]['obs_tab'] , container[best_ds][best_file]['era5fb_tab']\n\n ''' Creating the correct observations and record ids. \n All the bytes variable are shrunk to a long |S1 byte variable type, otherwise \n writing in h5py will not work. '''\n \n for var in ['observation_id']:\n if type (selected_obstab[var] ) == np.ndarray and type (selected_obstab[var][0] ) == np.bytes_:\n selected_obstab[var] = np.array ([self.observation_ids_merged[best_ds] + b''.join(l) for l in selected_obstab[var] ] )\n elif type (selected_obstab[var] ) == np.ndarray and type (selected_obstab[var][0] ) == np.ndarray:\n selected_obstab[var] = np.array ([self.observation_ids_merged[best_ds] + b''.join(l) for l in selected_obstab[var][:] ] )\n\n for var in ['report_id']:\n val = selected_obstab[var][0]\n if type (selected_obstab[var] ) == np.ndarray and type (val) == np.bytes_:\n value = self.observation_ids_merged[best_ds] + b''.join(val) # it is the same for each row in the table\n elif type (selected_obstab[var] ) == np.ndarray and type (val) == np.ndarray:\n value = self.observation_ids_merged[best_ds] + b''.join(val) \n arr = np.full( (1, len( selected_obstab['date_time']) ) , value )[0] # np.full returns a list of lists\n\n selected_obstab[var] = arr\n\n\n for var in selected_era5fb.keys():\n if type (selected_era5fb[var]) == np.ndarray and type (selected_era5fb[var][0] ) == np.ndarray:\n try:\n selected_era5fb[var] = np.array( [b''.join(l) for l in selected_era5fb[var][:] ] )\n #print('MANAGED FFF', var)\n except:\n value = [b''.join(l) for l in selected_era5fb[var][0] ][0]\n #print('VALUE IS FFF', value)\n selected_era5fb[var] = np.array( (1, len( selected_obstab[var]) ) ).fill(value)\n\n \"\"\" Extracting the header \"\"\"\n selected_head = self.get_header_table(dt, ds = best_ds, File = best_file )\n for var in selected_head.keys():\n if type (selected_head[var] ) == np.ndarray and type (selected_head[var][0] ) == np.bytes_:\n selected_head[var] = np.array( [b''.join(l) for l in selected_head[var][:] ] )\n\n if 'best_ds' == 'era5_1' or best_ds == 'era5_2' :\n selected_obstab['advanced_assimilation_feedback'] = np.array([1]*len(selected_obstab['date_time']) )\n else:\n selected_obstab['advanced_assimilation_feedback'] = np.array([0]*len(selected_obstab['date_time']) )\n\n #best_ds_byte = np.bytes_(best_ds, ndtype = '|S10') # converting to bytes object\n best_ds_byte = np.bytes_(best_ds) # converting to bytes object \n arr = np.full( (1, len( selected_obstab['date_time']) ) , best_ds_byte )[0]\n selected_obstab['source_id'] = arr\n\n duplicate = b','.join(duplicates)\n #selected_head['duplicates'] = np.array(duplicate)\n\n duplicate = np.array(duplicate).astype(dtype='|S70')\n selected_head['duplicates'] = np.array([duplicate])\n selected_head['report_id'] = np.array([selected_obstab['report_id'][0]])\n selected_head['source_id'] = np.array([selected_obstab['source_id'][0]])\n selected_head['record_timestamp'] = np.array([selected_obstab['date_time'][0]])\n\n selected_file = np.bytes_(best_file.split('/')[-1])\n \n return best_ds, selected_obstab, selected_era5fb, selected_head, selected_file, best_file", "def append_past_returns(raw, periods):\n data = raw.dropna(subset=['ret']).copy()\n wide = data.pivot(index='time_idx', columns='permno', values='ret')\n wide = wide + 1\n\n rets = []\n for period in periods:\n tmp = wide.rolling(window=period).apply(np.prod, raw=True) - 1\n tmp = tmp.reset_index().melt(id_vars='time_idx', var_name='permno', value_name='prev_ret_' + str(period)).dropna()\n tmp.permno = tmp.permno.astype('int')\n tmp.time_idx = pd.to_datetime(tmp.time_idx)\n rets.append(tmp)\n\n for ret in rets:\n data = pd.merge(data, ret, on=['time_idx', 'permno'], how='left')\n return data # don't drop because we may need to work on other fundamental data too", "def gen_records(self, count=None):\n if not count:\n count = self.num_rec\n tt = time.localtime(time.time())\n addr = None\n for i in range(count):\n logdbg(\"reading record %d of %d\" % (i+1, count))\n addr, record = self.get_record(addr, tt.tm_year, tt.tm_mon)\n yield addr, record", "def merge_working_sets(self, other):\n\n for dist in other.by_key.values(): self.add(dist)\n return self", "def merge(self, otr):\n self._duration = otr.get_start() - self.get_start()\n self._duration += otr.get_duration()\n self._line[3] = self._duration", "def extend(self,\n period_data: List[Candle]) -> None:\n self.point_1_moment = period_data[0].moment\n self.point_1_price = period_data[0].open\n self.point_2_moment = period_data[1].moment\n self.point_2_price = period_data[1].open", "def make_data_pipeline(self, from_, to_):\n\n # Get the volume over time data.\n r_volume = self.get_data_from_endpoint(from_, to_, 'volume')\n print('There are approximately {} documents.'.format(r_volume.json()['numberOfDocuments']))\n\n # Carve up time into buckets of volume <10k.\n l_dates = self.get_dates_from_timespan(r_volume)\n\n data = []\n for i in range(0, len(l_dates)):\n from_, to_ = l_dates[i]\n\n # Pull posts.\n r_posts = self.get_data_from_endpoint(from_, to_, 'posts')\n if r_posts.ok and (r_posts.json()['status'] != 'error'):\n j_result = json.loads(r_posts.content.decode('utf8'))\n data.extend(j_result['posts'])\n return data", "def get_data(self, date_time):\n id_columns = ','.join([col for col in self.table_primary_keys if col not in ['EFFECTIVEDATE', 'VERSIONNO']])\n return_columns = ','.join(self.table_columns)\n with self.con:\n cur = self.con.cursor()\n cur.execute(\"DROP TABLE IF EXISTS temp;\")\n cur.execute(\"DROP TABLE IF EXISTS temp2;\")\n cur.execute(\"DROP TABLE IF EXISTS temp3;\")\n cur.execute(\"DROP TABLE IF EXISTS temp4;\")\n # Store just the unique sets of ids that came into effect before the the datetime in a temporary table.\n query = \"\"\"CREATE TEMPORARY TABLE temp AS \n SELECT * \n FROM {table} \n WHERE EFFECTIVEDATE <= '{datetime}';\"\"\"\n cur.execute(query.format(table=self.table_name, datetime=date_time))\n # For each unique set of ids and effective dates get the latest versionno and sore in temporary table.\n query = \"\"\"CREATE TEMPORARY TABLE temp2 AS\n SELECT {id}, EFFECTIVEDATE, MAX(VERSIONNO) AS VERSIONNO\n FROM temp\n GROUP BY {id}, EFFECTIVEDATE;\"\"\"\n cur.execute(query.format(id=id_columns))\n # For each unique set of ids get the record with the most recent effective date.\n query = \"\"\"CREATE TEMPORARY TABLE temp3 as\n SELECT {id}, VERSIONNO, max(EFFECTIVEDATE) as EFFECTIVEDATE\n FROM temp2\n GROUP BY {id};\"\"\"\n cur.execute(query.format(id=id_columns))\n # Inner join the original table to the set of most recent effective dates and version no.\n query = \"\"\"CREATE TEMPORARY TABLE temp4 AS\n SELECT * \n FROM {table} \n INNER JOIN temp3 \n USING ({id}, VERSIONNO, EFFECTIVEDATE);\"\"\"\n cur.execute(query.format(table=self.table_name, id=id_columns))\n # Inner join the most recent data with the interconnectors used in the actual interval of interest.\n query = \"\"\"SELECT {cols} \n FROM temp4 \n INNER JOIN (SELECT * \n FROM DISPATCHINTERCONNECTORRES \n WHERE SETTLEMENTDATE == '{datetime}') \n USING (INTERCONNECTORID);\"\"\"\n query = query.format(datetime=date_time, id=id_columns, cols=return_columns)\n data = pd.read_sql_query(query, con=self.con)\n return data", "def _link_data_and_events(self, ev, data, limit=None, timezone=None):\n if limit is not None:\n data = data.head(limit)\n\n date, time = ev[\"Date\"], ev[\"Time\"]\n\n for dfmt, tfmt in DATE_AND_TIME_FORMATS:\n try:\n ev[\"DateAndTime\"] = [pd.to_datetime(d + \" \" + t, format=dfmt)\n for d, t in zip(date, time)]\n data[\"DateAndTime\"] = [pd.to_datetime(ts, format=tfmt)\n for ts in data[\"Timestamp\"]]\n except ValueError:\n continue\n date_time_format = dfmt\n break\n\n if timezone is not None:\n ev.DateAndTime += timedelta(hours=timezone)\n\n self.log(\"[.] Data filtering...\")\n\n output = list()\n # group events with proper data\n for event in ev.values:\n ts = event[-1]\n one_min_before = ts - timedelta(minutes=1)\n five_min_ahead = ts + timedelta(minutes=5)\n data_in_range = data[(data.DateAndTime >= one_min_before) &\n (data.DateAndTime <= five_min_ahead)]\n\n for d in data_in_range.values:\n output.append(np.hstack((event[:-1], d)))\n\n if not output:\n return pd.DataFrame()\n\n cols = list(ev.columns[:-1]) + list(data.columns)\n dataframe = pd.DataFrame(output, columns=cols)\n\n self.log(\"[.] Timezones synchronization...\")\n\n # synchronize original Date and Time columns with DateAndTime\n if timezone is not None:\n dates = pd.Series(datetime.strptime(d + \" \" + t, date_time_format)\n + timedelta(hours=timezone)\n for d, t in zip(dataframe.Date, dataframe.Time))\n dataframe[\"DateUTC\"] = dates.map(methodcaller('date'))\n dataframe[\"TimeUTC\"] = dates.map(methodcaller('time'))\n\n return dataframe", "def data_comparison(observations, records, record):\n for observation in observations:\n if observation != \"_id\":\n try:\n if re.search(observation, f\"{records[record]}\"):\n if not re.search(\n observations[observation], f\"{records[record]}\"\n ):\n records[record] = (\n f\"{records[record]}\"\n + \" --> \"\n + observations[observation]\n )\n except Exception as ex:\n Common.logger.warning(f\"Exception happened in data comparison {ex}\")\n return records", "def join_daily_cweeds_wy2_and_wy3(wy2_df, wy3_df):\n assert wy2_df['CWEEDS Format'] == 'WY2'\n assert wy3_df['CWEEDS Format'] == 'WY3'\n assert wy2_df['Time Format'] == wy3_df['Time Format']\n\n time_wy23 = np.hstack([wy2_df['Time'], wy3_df['Time']])\n time_wy23 = np.unique(time_wy23)\n time_wy23 = np.sort(time_wy23)\n\n wy23_df = {}\n wy23_df['Time Format'] = wy3_df['Time Format']\n wy23_df['CWEEDS Format'] = 'WY2+WY3'\n\n # Copy the header info from WY3 dataset :\n\n for key in ['HORZ version', 'Location', 'Province', 'Country',\n 'Station ID', 'Latitude', 'Longitude', 'Time Zone',\n 'Elevation']:\n wy23_df[key] = wy3_df[key]\n\n # Merge the two datasets :\n\n wy23_df['Time'] = time_wy23\n wy23_df['Years'] = np.empty(len(time_wy23)).astype(int)\n wy23_df['Months'] = np.empty(len(time_wy23)).astype(int)\n wy23_df['Days'] = np.empty(len(time_wy23)).astype(int)\n wy23_df['Hours'] = np.empty(len(time_wy23)).astype(int)\n wy23_df['Irradiance'] = np.empty(len(time_wy23)).astype('float64')\n\n for dataset in [wy2_df, wy3_df]:\n indexes = np.digitize(dataset['Time'], time_wy23, right=True)\n for key in ['Years', 'Months', 'Days', 'Hours', 'Irradiance']:\n wy23_df[key][indexes] = dataset[key]\n\n return wy23_df", "def run(self):\n\t\tdf_iter = self.file_to_df(50000)\n\t\tdf_airport = self.airport_file_to_df()\n\t\tfor df in df_iter: # type: pd.DataFrame\n\t\t\tdf.drop_duplicates(inplace=True)\n\t\t\tdf = self.transform(df, df_airport)\n\n\t\t\tdf_result = self.get_only_new_records(\n\t\t\t\tdf=df,\n\t\t\t\tdf_columns=self.join_columns,\n\t\t\t\ttable_columns=self.join_columns\n\t\t\t)\n\n\t\t\tif len(df_result) > 0:\n\t\t\t\t# df_result.drop(self.table_columns, axis=1)\n\n\t\t\t\tself.save(\n\t\t\t\t\tdf=df_result,\n\t\t\t\t\ttable_name=\"travel_dimension\",\n\t\t\t\t\tdf_columns=self.table_columns,\n\t\t\t\t\ttable_colums=self.table_columns\n\t\t\t\t)", "def concat_and_sort(self):\n for link in self.to_concat:\n \n to_concat = self.to_concat[link]\n df = pd.concat(to_concat,axis=0)\n df=df.sort_values(by=['day','actualtime_arr_from'])\n for d in df['day'].unique():\n self.data[d][link] = {}\n temp = df[df['day']==d]\n \n for r in temp['routeid'].unique(): \n self.data[d][link][r] = temp[temp['routeid']==r][['actualtime_arr_from','actualtime_arr_to','routeid']].values \n del(temp)\n del(df)\n del(self.to_concat)", "def period_limit_time_series(self, length, period, use_smalls=False):\n filtered = self._components[:]\n if use_smalls:\n filtered = filter(lambda c: c.period <= period, filtered)\n else:\n filtered = filter(lambda c: c.period >= period, filtered)\n \n maker = r.Recomposer(filtered, self.bias)\n return maker.time_series(length)", "def create_query_df(self):\n\n # display output message for timeframe\n print(\n f'{Fore.GREEN}\\nQuerying database for tags between the timeframe: '\n f'{Fore.LIGHTGREEN_EX}{str(self._start)}{Fore.GREEN} and {Fore.LIGHTGREEN_EX}{str(self._end)}'\n f'{Style.RESET_ALL}')\n print(\n f'{Fore.GREEN}\\nTIMESPAN: '\n f'{Fore.LIGHTGREEN_EX}{self.time_span} hours'\n f'{Style.RESET_ALL}')\n\n engine = get_db_engine()\n offset = 0\n chunk_size = 100000\n\n dfs = []\n while True:\n sa_select = sa.select(\n [self.data_table],\n whereclause=sa.and_(\n self.data_table.c._TIMESTAMP > '{}'.format(self._start),\n self.data_table.c._TIMESTAMP <= '{}'.format(self._end)),\n limit=chunk_size,\n offset=offset,\n order_by=self.data_table.c._NUMERICID\n )\n dfs.append(pd.read_sql(sa_select, engine))\n offset += chunk_size\n if len(dfs[-1]) < chunk_size:\n break\n\n self.query_df = pd.concat(dfs)", "def collect(self, revisions):\n nr_revisions = len(revisions)\n estimate = TimeEstimator(nr_revisions)\n for index, revision_number in enumerate(revisions):\n last_measurement = self.__get_last_measurement(revision_number)\n self.__write_measurement(last_measurement)\n self.__last_revision.set(revision_number)\n logging.info('Revision: %s, %s/%s, measurement date: %s, time remaining: %s', revision_number, index + 1,\n nr_revisions, self.__get_date(last_measurement), estimate.time_remaining(index))", "def overlap(self, other):\r\n self.set_datetime()\r\n other.set_datetime()\r\n return (self.dt_1 - other.dt_0).total_seconds()", "def _recompute(self):\n current_date = self.start_date\n self.quarterly_date_list = []\n self.daily_date_list = []\n while current_date <= self.end_date:\n current_quarter = get_quarter(current_date)\n current_year = current_date.year\n next_year, next_quarter = add_quarter(current_year, current_quarter)\n next_start_quarter_date = date(next_year, get_month(next_quarter),\n 1)\n\n days_till_next_quarter = (next_start_quarter_date -\n current_date).days\n days_till_end = (self.end_date - current_date).days\n if days_till_next_quarter <= days_till_end:\n current_start_quarter_date = date(current_year,\n get_month(current_quarter), 1)\n if current_start_quarter_date == current_date:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n elif days_till_next_quarter > self.balancing_point:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) >= self.start_date))\n current_date = next_start_quarter_date\n else:\n while current_date < next_start_quarter_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)\n else:\n if days_till_end > self.balancing_point:\n if days_till_next_quarter - 1 == days_till_end:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n else:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) <= self.end_date))\n current_date = self.end_date\n else:\n while current_date <= self.end_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)", "def read_daily_qualified_report(self):\n from itertools import repeat\n\n self.ID_TOTAL_CANDIDATES = kpi_from_db_config.ID_TOTAL_CANDIDATES\n self.ID_TOTAL_PROCESSED = kpi_from_db_config.ID_TOTAL_PROCESSED\n self.ID_TOTAL_EXPORTED = kpi_from_db_config.ID_TOTAL_EXPORTED\n self.ID_TOTAL_CLASSIFIED = kpi_from_db_config.ID_TOTAL_CLASSIFIED\n self.ID_TOTAL_QUALIFIED = kpi_from_db_config.ID_TOTAL_QUALIFIED\n self.ID_TOTAL_DISQUALIFIED = kpi_from_db_config.ID_TOTAL_DISQUALIFIED\n\n list_id = [self.ID_TOTAL_CANDIDATES, \n self.ID_TOTAL_PROCESSED, \n self.ID_TOTAL_EXPORTED, \n self.ID_TOTAL_CLASSIFIED, \n self.ID_TOTAL_QUALIFIED, \n self.ID_TOTAL_DISQUALIFIED]\n list_result = [[] for i in repeat(None,len(list_id))]\n\n for i in range(len(list_id)):\n self.cursor.execute('''\n SELECT value\n FROM public.kpi_report\n WHERE id = %s\n ORDER BY created_at DESC\n LIMIT 2\n ''', [list_id[i]])\n\n rows_count = self.cursor.rowcount\n if (rows_count == 2):\n for doc in self.cursor:\n list_result[i].append(int(doc[0]))\n elif (rows_count == 1):\n for doc in self.cursor:\n list_result[i].append(int(doc[0]))\n list_result[i] = list_result[i] + [0]\n else:\n list_result[i] = [0] * 2 \n\n# print \"TESTING .... {}\".format(list_result)\n return list_result", "def return_trade_history(\n self,\n start: Timestamp,\n end: Timestamp,\n ) -> list[dict[str, Any]]:\n limit = 100\n data: list[dict[str, Any]] = []\n start_ms = start * 1000\n end_ms = end * 1000\n while True:\n new_data = self.api_query_list('/trades', {\n 'startTime': start_ms,\n 'endTime': end_ms,\n 'limit': limit,\n })\n results_length = len(new_data)\n if data == [] and results_length < limit:\n return new_data # simple case - only one query needed\n\n latest_ts_ms = start_ms\n # add results to data and prepare for next query\n existing_ids = {x['id'] for x in data}\n for trade in new_data:\n try:\n timestamp_ms = trade['createTime']\n latest_ts_ms = max(latest_ts_ms, timestamp_ms)\n # since we query again from last ts seen make sure no duplicates make it in\n if trade['id'] not in existing_ids:\n data.append(trade)\n except (DeserializationError, KeyError) as e:\n msg = str(e)\n if isinstance(e, KeyError):\n msg = f'Missing key entry for {msg}.'\n self.msg_aggregator.add_warning(\n 'Error deserializing a poloniex trade. Check the logs for details',\n )\n log.error(\n 'Error deserializing poloniex trade',\n trade=trade,\n error=msg,\n )\n continue\n\n if results_length < limit:\n break # last query has less than limit. We are done.\n\n # otherwise we query again from the last ts seen in the last result\n start_ms = latest_ts_ms\n continue\n\n return data", "def fill_price_gaps(\n from_date=dt.datetime(1970,1,1),\n to_date=dt.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)\n ):\n #Create a collection of years\n years = []\n cur_year = from_date.year\n while cur_year <= to_date.year:\n years.append(cur_year)\n cur_year += 1\n #Loop each year\n all_year_dates = pd.DataFrame([])\n for year in tqdm(years, total=len(years), desc=\"Loop through years to find dates\"):\n #establish bounding dates\n year_from_date = None if year != from_date.year else from_date\n year_to_date = None if year != to_date.year else to_date\n #Get filtered year dates\n year_dates = create_filtered_year_dates(year, from_date=year_from_date, to_date=year_to_date, )\n #Add to the full list\n all_year_dates = pd.concat([all_year_dates, year_dates])\n #Order the dates (just in case)\n all_year_dates = all_year_dates.sort_values([\"date\"]) \\\n .reset_index(drop=True)\n #Fetch all the tickers\n tickers = sqlaq_to_df(ticker.fetch())\n #Loop through tickers\n errors = []\n run_time = ProcessTime()\n for _,r in tqdm(tickers[[\"id\",\"ticker\"]].iterrows(), total=tickers.shape[0], desc=\"Filling in gaps\"):\n logger.info(f\"Filling gaps in {r.id} -> {r.ticker}\")\n try:\n #Fetch all prices\n dp = sqlaq_to_df(daily_price.fetch(ticker_ids=[r.id]))\n dp[\"date\"] = dp.date.astype(\"datetime64[ns]\")\n #Identify missing dates\n missing_dates = pd.merge(all_year_dates, dp[[\"date\",\"id\"]], on=[\"date\"], how=\"left\")\n #Identify the start date and remove all missing date before that\n start_date = missing_dates[~missing_dates.id.isnull()].date.min()\n missing_dates = missing_dates[missing_dates.date > start_date]\n #Remove all other items which have dates\n missing_dates = missing_dates[missing_dates.id.isnull()]\n #Order remaining dates\n missing_dates = missing_dates.sort_values(\"date\")\n #Create groupings no larger than max_days (in config)\n st_d = None\n date_groups = []\n missing_dates = missing_dates.date.to_list()\n if len(missing_dates):\n for i,d in enumerate(missing_dates):\n if not st_d:\n st_d = d\n else:\n #Append when group gets too big\n if (d - st_d).days > WEB_SCRAPE_MAX_DAYS:\n date_groups.append([st_d, missing_dates[i-1]])\n #Update the start date\n st_d = d\n #Append the last item\n date_groups.append([st_d, d])\n #Scrape the missing prices\n logger.info('Number of webscrapes to perform -> {}'.format(len(date_groups)))\n #For each time frame perform a scrape\n try: #Try loop so as not to miss all following date groups\n for i,dates in enumerate(date_groups):\n logger.info(f\"Running dates {i} -> {dt.datetime.strptime(str(dates[0])[:10], '%Y-%m-%d')} - {dt.datetime.strptime(str(dates[1])[:10], '%Y-%m-%d')}\")\n process_daily_prices(\n r.ticker,\n r.id,\n st_date=dates[0],\n en_date=dates[1],\n \n )\n except Exception as e:\n logger.error(e)\n errors.append({'ticker_id':r.id, 'ticker':r.ticker, \"error\":e, \"st_date\":dates[0], \"en_dates\":dates[1]})\n #Run an update on th weekly prices\n process_weekly_prices(\n r.id,\n \n )\n except Exception as e:\n logger.error(e)\n errors.append({'ticker_id':r.id, 'ticker':r.ticker, \"error\":e})\n #Lap\n logger.info(run_time.lap())\n logger.info(run_time.show_latest_lap_time(show_time=True))\n logger.info(f\"GAP FILL RUN TIME - {run_time.end()}\")\n\n logger.info(f'\\nGAP FILL ERROR COUNT -> {len(errors)}')\n if len(errors) > 0:\n logger.info('GAP FILL ERRORS ->')\n for e in errors:\n logger.error(e)", "def set_period_starters(self, missing_period_starters=MISSING_PERIOD_STARTERS):\n for period in self.Periods:\n period.Starters = {self.HomeTeamId: [], self.VisitorTeamId: []}\n subbed_in_players = {self.HomeTeamId: [], self.VisitorTeamId: []}\n for pbp_event in period.Events:\n player_id = pbp_event.player_id\n if player_id in self.Players[self.HomeTeamId]:\n team_id = self.HomeTeamId\n elif player_id in self.Players[self.VisitorTeamId]:\n team_id = self.VisitorTeamId\n else:\n team_id = None\n\n if team_id is not None and team_id != '0' and player_id != '0':\n player2_id = pbp_event.player2_id\n player3_id = pbp_event.player3_id\n if pbp_event.is_substitution():\n # player_id is player going out, player2_id is playing coming in\n if player2_id not in period.Starters[team_id] and player2_id not in subbed_in_players[team_id]:\n subbed_in_players[team_id].append(player2_id)\n if player_id not in period.Starters[team_id] and player_id not in subbed_in_players[team_id]:\n if player_id in self.Players[self.HomeTeamId] or player_id in self.Players[self.VisitorTeamId]:\n period.Starters[team_id].append(player_id)\n if player_id != '0':\n # player_id 0 is team\n if player_id not in period.Starters[team_id] and player_id not in subbed_in_players[team_id]:\n if player_id in self.Players[self.HomeTeamId] or player_id in self.Players[self.VisitorTeamId]:\n if not (\n pbp_event.is_technical_foul() or\n pbp_event.is_double_technical_foul() or\n pbp_event.is_ejection() or\n (pbp_event.is_technical_ft() and pbp_event.clock_time == '12:00') # ignore technical fts at start of period\n ):\n # ignore all techs because a player could get a technical foul when they aren't in the game\n period.Starters[team_id].append(player_id)\n # need player2_id for players who play full period and never appear in an event as player_id - ex assists\n if (player2_id in self.Players[self.HomeTeamId] or player2_id in self.Players[self.VisitorTeamId]) and not pbp_event.is_substitution():\n if not (\n pbp_event.is_technical_foul() or\n pbp_event.is_double_technical_foul() or\n pbp_event.is_ejection()\n ):\n # ignore all techs because a player could get a technical foul when they aren't in the game\n if player2_id in self.Players[self.HomeTeamId]:\n player2_team_id = self.HomeTeamId\n if player2_id in self.Players[self.VisitorTeamId]:\n player2_team_id = self.VisitorTeamId\n if player2_id not in period.Starters[player2_team_id] and player2_id not in subbed_in_players[player2_team_id]:\n period.Starters[player2_team_id].append(player2_id)\n if (player3_id in self.Players[self.HomeTeamId] or player3_id in self.Players[self.VisitorTeamId]) and not pbp_event.is_substitution():\n if not (\n pbp_event.is_technical_foul() or\n pbp_event.is_double_technical_foul() or\n pbp_event.is_ejection()\n ):\n # ignore all techs because a player could get a technical foul when they aren't in the game\n if player3_id in self.Players[self.HomeTeamId]:\n player3_team_id = self.HomeTeamId\n if player3_id in self.Players[self.VisitorTeamId]:\n player3_team_id = self.VisitorTeamId\n if player3_id not in period.Starters[player3_team_id] and player3_id not in subbed_in_players[player3_team_id]:\n period.Starters[player3_team_id].append(player3_id)\n\n if self.GameId in missing_period_starters.keys() and str(period.Number) in missing_period_starters[self.GameId].keys():\n for team_id in missing_period_starters[self.GameId][str(period.Number)].keys():\n period.Starters[team_id] = missing_period_starters[self.GameId][str(period.Number)][team_id]\n\n for team_id in period.Starters.keys():\n if len(period.Starters[team_id]) != 5:\n raise InvalidNumberOfStartersException(f\"GameId: {self.GameId}, Period: {period}, TeamId: {team_id}, Players: {period.Starters[team_id]}\")", "def _get_time_periods(current_time_period=None):\n sorted_time_periods = list(SORTED_TIME_PERIODS)\n if current_time_period and current_time_period in sorted_time_periods:\n # make the current time period the first in the list so that it shows up\n # as selected in project choice drop down\n for time_period in sorted_time_periods:\n if time_period == current_time_period:\n sorted_time_periods.remove(time_period)\n sorted_time_periods.insert(0, time_period)\n break\n\n return sorted_time_periods", "def _summarize_period(self):\n print(\"entering _summarize_period()\")\n for i in range(TIME_BETWEEN_FEEDBACK // SONG_OVER_CHECK_TIME): ### Wait 30 seconds\n self._check_completion()\n if self._song_over is True:\n break\n self._set_last_30_sec()\n return", "def spending_over_time_test_data():\n for i in range(30):\n # Define some values that are calculated and used multiple times\n transaction_id = i\n award_id = i + 1000\n awarding_agency_id = i + 2000\n toptier_awarding_agency_id = i + 3000\n subtier_awarding_agency_id = i + 4000\n funding_agency_id = i + 5000\n toptier_funding_agency_id = i + 6000\n subtier_funding_agency_id = i + 7000\n federal_action_obligation = i + 8000\n total_obligation = i + 9000\n federal_account_id = i + 10000\n treasury_account_id = i + 11000\n\n action_date = f\"20{i % 10 + 10}-{i % 9 + 1}-{i % 28 + 1}\"\n action_date_obj = datetime.datetime.strptime(action_date, \"%Y-%m-%d\")\n fiscal_month = generate_fiscal_month(action_date_obj)\n fiscal_year = generate_fiscal_year(action_date_obj)\n fiscal_action_date = f\"{fiscal_year}-{fiscal_month}-{i % 28 + 1}\"\n contract_award_type = [\"A\", \"B\", \"C\", \"D\"][i % 4]\n grant_award_type = [\"02\", \"03\", \"04\", \"05\"][i % 4]\n is_fpds = i % 2 == 0\n\n # Award\n baker.make(\n \"search.AwardSearch\",\n award_id=award_id,\n fain=f\"fain_{transaction_id}\" if not is_fpds else None,\n is_fpds=is_fpds,\n latest_transaction_id=transaction_id,\n piid=f\"piid_{transaction_id}\" if is_fpds else None,\n total_obligation=total_obligation,\n type=contract_award_type if is_fpds else grant_award_type,\n action_date=\"2020-01-01\",\n )\n\n # Federal, Treasury, and Financial Accounts\n baker.make(\n \"accounts.FederalAccount\",\n id=federal_account_id,\n parent_toptier_agency_id=toptier_awarding_agency_id,\n account_title=f\"federal_account_title_{transaction_id}\",\n federal_account_code=f\"federal_account_code_{transaction_id}\",\n )\n baker.make(\n \"accounts.TreasuryAppropriationAccount\",\n agency_id=f\"taa_aid_{transaction_id}\",\n allocation_transfer_agency_id=f\"taa_ata_{transaction_id}\",\n availability_type_code=f\"taa_a_{transaction_id}\",\n beginning_period_of_availability=f\"taa_bpoa_{transaction_id}\",\n ending_period_of_availability=f\"taa_epoa_{transaction_id}\",\n federal_account_id=federal_account_id,\n main_account_code=f\"taa_main_{transaction_id}\",\n sub_account_code=f\"taa_sub_{transaction_id}\",\n treasury_account_identifier=treasury_account_id,\n )\n tas_components = [\n f\"aid=taa_aid_{transaction_id}\"\n f\"main=taa_main_{transaction_id}\"\n f\"ata=taa_ata_{transaction_id}\"\n f\"sub=taa_sub_{transaction_id}\"\n f\"bpoa=taa_bpoa_{transaction_id}\"\n f\"epoa=taa_epoa_{transaction_id}\"\n f\"a=taa_a_{transaction_id}\"\n ]\n baker.make(\"awards.FinancialAccountsByAwards\", award_id=award_id, treasury_account_id=treasury_account_id)\n\n # Awarding Agency\n baker.make(\n \"references.Agency\",\n id=awarding_agency_id,\n subtier_agency_id=subtier_awarding_agency_id,\n toptier_agency_id=toptier_awarding_agency_id,\n )\n baker.make(\n \"references.ToptierAgency\",\n abbreviation=f\"toptier_awarding_agency_abbreviation_{transaction_id}\",\n name=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n toptier_agency_id=toptier_awarding_agency_id,\n toptier_code=f\"toptier_awarding_agency_code_{transaction_id}\",\n )\n baker.make(\n \"references.SubtierAgency\",\n abbreviation=f\"subtier_awarding_agency_abbreviation_{transaction_id}\",\n name=f\"subtier_awarding_agency_agency_name_{transaction_id}\",\n subtier_agency_id=subtier_awarding_agency_id,\n subtier_code=f\"subtier_awarding_agency_code_{transaction_id}\",\n )\n\n # Funding Agency\n baker.make(\n \"references.Agency\",\n id=funding_agency_id,\n subtier_agency_id=subtier_funding_agency_id,\n toptier_agency_id=toptier_funding_agency_id,\n )\n baker.make(\n \"references.ToptierAgency\",\n abbreviation=f\"toptier_funding_agency_abbreviation_{transaction_id}\",\n name=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n toptier_agency_id=toptier_funding_agency_id,\n toptier_code=f\"toptier_funding_agency_code_{transaction_id}\",\n )\n baker.make(\n \"references.SubtierAgency\",\n abbreviation=f\"subtier_funding_agency_abbreviation_{transaction_id}\",\n name=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n subtier_agency_id=subtier_funding_agency_id,\n subtier_code=f\"subtier_funding_agency_code_{transaction_id}\",\n )\n\n # Ref Country Code\n baker.make(\"references.RefCountryCode\", country_code=\"USA\", country_name=\"UNITED STATES\")\n\n # FPDS / FABS\n if is_fpds:\n baker.make(\n \"search.TransactionSearch\",\n transaction_id=transaction_id,\n is_fpds=is_fpds,\n action_date=action_date,\n fiscal_year=fiscal_year,\n fiscal_action_date=fiscal_action_date,\n award_id=award_id,\n awarding_agency_id=awarding_agency_id,\n business_categories=[f\"business_category_1_{transaction_id}\", f\"business_category_2_{transaction_id}\"],\n transaction_description=f\"This is a test description {transaction_id}\"\n if transaction_id % 2 == 0\n else None,\n federal_action_obligation=federal_action_obligation,\n generated_pragmatic_obligation=federal_action_obligation,\n award_amount=total_obligation,\n funding_agency_id=funding_agency_id,\n type=contract_award_type if is_fpds else grant_award_type,\n awarding_agency_code=f\"toptier_awarding_agency_code_{transaction_id}\",\n awarding_toptier_agency_name=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n awarding_toptier_agency_abbreviation=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n funding_agency_code=f\"toptier_funding_agency_code_{transaction_id}\",\n funding_toptier_agency_name=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n funding_toptier_agency_abbreviation=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n awarding_sub_tier_agency_c=f\"subtier_awarding_agency_code_{transaction_id}\",\n awarding_subtier_agency_name=f\"subtier_awarding_agency_agency_name_{transaction_id}\",\n funding_sub_tier_agency_co=f\"subtier_funding_agency_code_{transaction_id}\",\n funding_subtier_agency_name=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n funding_subtier_agency_abbreviation=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n recipient_name=f\"recipient_name_{transaction_id}\",\n recipient_unique_id=f\"{transaction_id:09d}\",\n recipient_hash=\"c687823d-10af-701b-1bad-650c6e680190\" if transaction_id == 21 else None,\n recipient_levels=[\"R\"] if i == 21 else [],\n extent_competed=f\"extent_competed_{transaction_id}\",\n recipient_location_country_code=\"USA\",\n recipient_location_country_name=\"USA\",\n recipient_location_state_code=f\"LE_STATE_CODE_{transaction_id}\",\n recipient_location_county_code=f\"{transaction_id:03d}\",\n recipient_location_county_name=f\"LE_COUNTY_NAME_{transaction_id}\",\n recipient_location_congressional_code=f\"{transaction_id:02d}\",\n recipient_location_zip5=f\"LE_ZIP5_{transaction_id}\",\n recipient_location_city_name=f\"LE_CITY_NAME_{transaction_id}\",\n naics_code=f\"{transaction_id}{transaction_id}\",\n naics_description=f\"naics_description_{transaction_id}\",\n piid=f\"piid_{transaction_id}\",\n pop_country_code=\"USA\",\n pop_country_name=\"UNITED STATES\",\n pop_state_code=f\"POP_STATE_CODE_{transaction_id}\",\n pop_county_code=f\"{transaction_id:03d}\",\n pop_county_name=f\"POP_COUNTY_NAME_{transaction_id}\",\n pop_zip5=f\"POP_ZIP5_{transaction_id}\",\n pop_congressional_code=f\"{transaction_id:02d}\",\n pop_city_name=f\"POP_CITY_NAME_{transaction_id}\",\n product_or_service_code=str(transaction_id).zfill(4),\n product_or_service_description=f\"psc_description_{transaction_id}\",\n type_of_contract_pricing=f\"type_of_contract_pricing_{transaction_id}\",\n type_set_aside=f\"type_set_aside_{transaction_id}\",\n tas_components=tas_components,\n )\n baker.make(\n \"references.NAICS\",\n code=f\"{transaction_id}\",\n description=f\"naics_description_{transaction_id}\",\n )\n baker.make(\n \"references.PSC\", code=str(transaction_id).zfill(4), description=f\"psc_description_{transaction_id}\"\n )\n else:\n baker.make(\n \"search.TransactionSearch\",\n transaction_id=transaction_id,\n is_fpds=is_fpds,\n action_date=action_date,\n fiscal_year=fiscal_year,\n fiscal_action_date=fiscal_action_date,\n award_id=award_id,\n awarding_agency_id=awarding_agency_id,\n business_categories=[f\"business_category_1_{transaction_id}\", f\"business_category_2_{transaction_id}\"],\n transaction_description=f\"This is a test description {transaction_id}\"\n if transaction_id % 2 == 0\n else None,\n federal_action_obligation=federal_action_obligation,\n generated_pragmatic_obligation=federal_action_obligation,\n award_amount=total_obligation,\n funding_agency_id=funding_agency_id,\n type=contract_award_type if is_fpds else grant_award_type,\n awarding_agency_code=f\"toptier_awarding_agency_code_{transaction_id}\",\n awarding_toptier_agency_name=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n awarding_toptier_agency_abbreviation=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n funding_agency_code=f\"toptier_funding_agency_code_{transaction_id}\",\n funding_toptier_agency_name=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n funding_toptier_agency_abbreviation=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n awarding_sub_tier_agency_c=f\"subtier_awarding_agency_code_{transaction_id}\",\n awarding_subtier_agency_name=f\"subtier_awarding_agency_agency_name_{transaction_id}\",\n funding_sub_tier_agency_co=f\"subtier_funding_agency_code_{transaction_id}\",\n funding_subtier_agency_name=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n funding_subtier_agency_abbreviation=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n recipient_name=f\"recipient_name_{transaction_id}\",\n recipient_unique_id=f\"{transaction_id:09d}\",\n recipient_hash=\"c687823d-10af-701b-1bad-650c6e680190\" if transaction_id == 21 else None,\n recipient_levels=[\"R\"] if i == 21 else [],\n cfda_number=f\"cfda_number_{transaction_id}\",\n fain=f\"fain_{transaction_id}\",\n recipient_location_country_code=\"USA\",\n recipient_location_country_name=\"USA\",\n recipient_location_state_code=f\"LE_STATE_CODE_{transaction_id}\",\n recipient_location_county_code=f\"{transaction_id:03d}\",\n recipient_location_county_name=f\"LE_COUNTY_NAME_{transaction_id}\",\n recipient_location_congressional_code=f\"{transaction_id:02d}\",\n recipient_location_zip5=f\"LE_ZIP5_{transaction_id}\",\n recipient_location_city_name=f\"LE_CITY_NAME_{transaction_id}\",\n pop_country_code=\"USA\",\n pop_country_name=\"UNITED STATES\",\n pop_state_code=f\"POP_STATE_CODE_{transaction_id}\",\n pop_county_code=f\"{transaction_id:03d}\",\n pop_county_name=f\"POP_COUNTY_NAME_{transaction_id}\",\n pop_zip5=f\"POP_ZIP5_{transaction_id}\",\n pop_congressional_code=f\"{transaction_id:02d}\",\n pop_city_name=f\"POP_CITY_NAME{transaction_id}\",\n tas_components=tas_components,\n )", "def print_alerts_stats(now, stats_period_seconds, alerts_df):\n\n if not alerts_df.empty:\n active_alerts = alerts_df.loc[alerts_df[\"date_end\"].isnull()]\n archived_alerts = alerts_df.loc[alerts_df[\"date_end\"].notnull()]\n new_alerts = active_alerts.loc[active_alerts[\"date_start\"] > now - stats_period_seconds]\n recovered_alerts = archived_alerts.loc[archived_alerts[\"date_end\"] > now - stats_period_seconds ]\n\n if len(new_alerts) > 0:\n print(f'### New alerts during the last {stats_period_seconds} seconds###')\n for index, row in new_alerts.iterrows():\n date_time_start = datetime.datetime.fromtimestamp(row['date_start']).strftime('%Y-%m-%d %H:%M:%S')\n print(f\" - {row['type']} generated an alert - {row['value_start']}, triggered at {date_time_start}\")\n\n if len(recovered_alerts) > 0:\n print(f'### Recovered alerts during the last {stats_period_seconds} seconds###')\n for index, row in recovered_alerts.iterrows():\n date_time_start = datetime.datetime.fromtimestamp(row['date_start']).strftime('%Y-%m-%d %H:%M:%S')\n date_time_end = datetime.datetime.fromtimestamp(row['date_end']).strftime('%Y-%m-%d %H:%M:%S')\n print(f\" - {row['type']} triggered at {date_time_start} recovered at {date_time_end} - was {row['value_start']}, now {row['value_end']}\")\n\n if len(active_alerts) > 0:\n print(f'### Active alerts ###')\n for index, row in active_alerts.iterrows():\n date_time = datetime.datetime.fromtimestamp(row['date_start']).strftime('%Y-%m-%d %H:%M:%S')\n print(f\" - Since {date_time} : {row['type']}\")\n\n if len(archived_alerts) > 0:\n print(f'### Past alerts ###')\n for index, row in archived_alerts.iterrows():\n date_time_start = datetime.datetime.fromtimestamp(row['date_start']).strftime('%Y-%m-%d %H:%M:%S')\n date_time_end = datetime.datetime.fromtimestamp(row['date_start']).strftime('%Y-%m-%d %H:%M:%S')\n print(f\" - {date_time_start} to {date_time_end} : {row['type']}\")\n else:\n print(f'### No alert ###')", "def get_aggregate_periods(all_periods, all_samples, write_to_csv=True):\n null_idx = all_periods['activation_date'].isna()\n all_periods['activation_date'][null_idx] = all_periods['date_from'][null_idx]\n assert (all_periods['date_to'] >= all_periods['date_from']).all()\n # we can do this below since they're all in 2017 and this way is faster\n all_periods['days_to_publish'] = all_periods['date_from'].dt.dayofyear - \\\n all_periods['activation_date'].dt.dayofyear\n all_periods['days_online'] = all_periods['date_to'].dt.dayofyear - \\\n all_periods['date_from'].dt.dayofyear\n for col in ['activation_date', 'date_from', 'date_to']:\n all_periods = featurize_date_col(all_periods, col)\n\n grouped = all_periods.groupby('item_id')\n base = grouped[['item_id']].count().rename(columns={'item_id': 'nlisted'})\n base['sum_days_online'] = grouped[['days_online']].sum()\n base['mean_days_online'] = grouped[['days_online']].mean()\n base['last_days_online'] = grouped[['days_online']].last()\n base['sum_days_to_publish'] = grouped[['days_to_publish']].sum()\n base['mean_days_to_publish'] = grouped[['days_to_publish']].mean()\n base['median_date_to_isholiday'] = grouped[['date_to_isholiday']].median()\n base['median_date_to_wday'] = grouped[['date_to_wday']].median()\n base['median_date_to_yday'] = grouped[['date_to_yday']].median()\n\n base['start_date'] = grouped[['date_from']].min()\n base['end_date'] = grouped[['date_to']].max()\n for col in ['start_date', 'end_date']:\n base = featurize_date_col(base, col, remove_when_done=True)\n if 'item_id' not in all_periods:\n all_periods = all_periods.reset_index()\n if 'item_id' not in base:\n base = base.reset_index()\n all_periods = all_periods.drop_duplicates(['item_id'])\n all_periods = all_periods.merge(base, on='item_id', how='left')\n all_periods = all_periods.merge(all_samples, on='item_id', how='left')\n avg_per_user_periods = all_periods.drop(\n ['item_id', 'activation_date', 'date_from', 'date_to'], axis=1\n ).groupby('user_id').mean()\n avg_per_user_periods['nitems'] = all_periods[\n ['user_id', 'item_id']].groupby('user_id').count().reset_index()['item_id']\n if write_to_csv:\n avg_per_user_periods.to_csv('data/periods_aggregate_features.csv')\n return avg_per_user_periods", "def _fill_moment_results(self):\n toprocess = [('stock_tom', self.c_stock, 2),\n ('stock_woody', self.c_stock, 3),\n ('stock_non_woody', self.c_stock, 4),\n ('stock_acid', self.c_stock, 5),\n ('stock_water', self.c_stock, 6),\n ('stock_ethanol', self.c_stock, 7),\n ('stock_non_soluble', self.c_stock, 8),\n ('stock_humus', self.c_stock, 9),\n ('change_tom', self.c_change, 2),\n ('change_woody', self.c_change, 3),\n ('change_non_woody', self.c_change, 4),\n ('change_acid', self.c_change, 5),\n ('change_water', self.c_change, 6),\n ('change_ethanol', self.c_change, 7),\n ('change_non_soluble', self.c_change, 8),\n ('change_humus', self.c_change, 9),\n ('co2', self.co2_yield, 2)]\n for (resto, dataarr, dataind) in toprocess:\n # filter time steps\n ts = numpy.unique(dataarr[:,1])\n # extract data for the timestep\n for timestep in ts:\n ind = numpy.where(dataarr[:,1]==timestep)\n mean = stats.mean(dataarr[ind[0], dataind])\n mode_res = stats.mode(dataarr[ind[0], dataind])\n mode = mode_res[0]\n var = stats.var(dataarr[ind[0], dataind])\n skew = stats.skew(dataarr[ind[0], dataind])\n kurtosis = stats.kurtosis(dataarr[ind[0], dataind])\n if var>0.0:\n sd2 = 2 * math.sqrt(var)\n else:\n sd2 = var\n res = [[timestep, mean, mode[0], var, skew, kurtosis,\n mean - sd2, mean + sd2]]\n if resto=='stock_tom':\n self.md.stock_tom = numpy.append(self.md.stock_tom,\n res, axis=0)\n elif resto=='stock_woody':\n self.md.stock_woody = numpy.append(self.md.stock_woody,\n res, axis=0)\n elif resto=='stock_non_woody':\n self.md.stock_non_woody = numpy.append(\\\n self.md.stock_non_woody, res, axis=0)\n elif resto=='stock_acid':\n self.md.stock_acid = numpy.append(self.md.stock_acid,\n res, axis=0)\n elif resto=='stock_water':\n self.md.stock_water = numpy.append(self.md.stock_water,\n res, axis=0)\n elif resto=='stock_ethanol':\n self.md.stock_ethanol = numpy.append(self.md.stock_ethanol,\n res, axis=0)\n elif resto=='stock_non_soluble':\n self.md.stock_non_soluble= numpy.append(\n self.md.stock_non_soluble, res, axis=0)\n elif resto=='stock_humus':\n self.md.stock_humus = numpy.append(self.md.stock_humus,\n res, axis=0)\n elif resto=='change_tom':\n self.md.change_tom = numpy.append(self.md.change_tom,\n res, axis=0)\n elif resto=='change_woody':\n self.md.change_woody = numpy.append(self.md.change_woody,\n res, axis=0)\n elif resto=='change_non_woody':\n self.md.change_non_woody = numpy.append(\\\n self.md.change_non_woody, res, axis=0)\n elif resto=='change_acid':\n self.md.change_acid = numpy.append(self.md.change_acid,\n res, axis=0)\n elif resto=='change_water':\n self.md.change_water = numpy.append(self.md.change_water,\n res, axis=0)\n elif resto=='change_ethanol':\n self.md.change_ethanol = numpy.append(\n self.md.change_ethanol, res, axis=0)\n elif resto=='change_non_soluble':\n self.md.change_non_soluble=numpy.append(\n self.md.change_non_soluble, res, axis=0)\n elif resto=='change_humus':\n self.md.change_humus = numpy.append(self.md.change_humus,\n res, axis=0)\n elif resto=='co2':\n self.md.co2 = numpy.append(self.md.co2, res, axis=0)", "def pair_records():\r\n\r\n study_list = retrieve_ref('study_list')\r\n sensor_list = retrieve_ref('sensor_list')\r\n\r\n # check each study\r\n for study in study_list:\r\n\r\n df_meta = retrieve_meta(study)\r\n recordNames = list(df_meta['recordName'])\r\n\r\n # create column to list wearableName and coregister records\r\n df_meta = add_wearableName(df_meta)\r\n df_meta['coregisterRecords'] = recordNames\r\n\r\n # look for paired records using the unix time stamp for when the record begins\r\n for recordA in recordNames:\r\n\r\n i = df_meta[ df_meta['recordName']== recordA].index.values[0]\r\n recordBeginA = df_meta.loc[i, 'recordBegin' ]\r\n wearableA = df_meta.loc[i, 'wearableName' ]\r\n\r\n for recordB in recordNames:\r\n\r\n j = df_meta[ df_meta['recordName']== recordB].index.values[0]\r\n recordBeginB = df_meta.loc[j, 'recordBegin' ]\r\n wearableB = df_meta.loc[j, 'wearableName' ]\r\n\r\n if abs(recordBeginA - recordBeginB) < 300:\r\n\r\n if recordA != recordB:\r\n\r\n if wearableA != wearableB:\r\n\r\n print('coregister record found for ' + recordA + ' + ' + recordB)\r\n coregisterList = str(recordA + ' ' + recordB)\r\n df_meta.loc[i, 'coregisterRecords' ] = coregisterList\r\n\r\n save_meta(study, df_meta)" ]
[ "0.60452324", "0.5793913", "0.5528706", "0.5526547", "0.5517215", "0.5493142", "0.54775965", "0.54737484", "0.5462078", "0.54354554", "0.5395026", "0.5321515", "0.529761", "0.5283373", "0.5275335", "0.5271934", "0.52359205", "0.51437724", "0.51378435", "0.513382", "0.51214904", "0.5115958", "0.51156235", "0.50994545", "0.5098985", "0.50937", "0.5085141", "0.5084859", "0.5083243", "0.50631934", "0.5050032", "0.50406504", "0.5030587", "0.5024467", "0.5010877", "0.5004063", "0.4992973", "0.49842623", "0.4964456", "0.4963334", "0.49429554", "0.49367234", "0.49336576", "0.4925689", "0.49231765", "0.49179956", "0.4915248", "0.49092072", "0.48959056", "0.48920822", "0.48910353", "0.48871446", "0.4884426", "0.4883723", "0.48716518", "0.486742", "0.4847713", "0.48465183", "0.48282254", "0.48252833", "0.48232928", "0.48164704", "0.48076624", "0.48031577", "0.4798058", "0.47851086", "0.47810793", "0.4776493", "0.47729877", "0.4770081", "0.47691718", "0.476548", "0.47651705", "0.47644314", "0.4754531", "0.47516888", "0.4750576", "0.4739991", "0.4735852", "0.4735222", "0.4735202", "0.47345805", "0.4731123", "0.47231656", "0.4719897", "0.47168493", "0.47109717", "0.4709279", "0.47080532", "0.47059748", "0.4697536", "0.46955082", "0.4690138", "0.46901008", "0.46873394", "0.4684192", "0.46790767", "0.4678922", "0.46744505", "0.46708104" ]
0.7975035
0
Given a UC480 camera object (instrumental module) and a number indicating the number of trap objects, applies an iterative image analysis to individual trap adjustment in order to achieve a nearly homogeneous intensity profile across traps.
def stabilize_intensity(which_cam, cam, verbose=False): L = 0.5 # Correction Rate mags = np.ones(12) ### ! ntraps = len(mags) iteration = 0 while iteration < 5: iteration += 1 print("Iteration ", iteration) im = cam.latest_frame() try: trap_powers = analyze_image(which_cam, im, ntraps, iteration, verbose) except (AttributeError, ValueError) as e: print("No Bueno, error occurred during image analysis:\n", e) break mean_power = trap_powers.mean() rel_dif = 100 * trap_powers.std() / mean_power print(f'Relative Power Difference: {rel_dif:.2f} %') if rel_dif < 0.8: print("WOW") break deltaP = [mean_power - P for P in trap_powers] dmags = [(dP / abs(dP)) * sqrt(abs(dP)) * L for dP in deltaP] mags = np.add(mags, dmags) print("Magnitudes: ", mags) break # self._update_magnitudes(mags) _ = analyze_image(im, ntraps, verbose=verbose)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def analyze_image(which_cam, image, ntraps, iteration=0, verbose=False):\n threshes = [0.5, 0.6]\n margin = 10\n threshold = np.max(image) * threshes[which_cam]\n im = image.transpose()\n\n x_len = len(im)\n peak_locs = np.zeros(x_len)\n peak_vals = np.zeros(x_len)\n\n ## Trap Peak Detection ##\n for i in range(x_len):\n if i < margin or x_len - i < margin:\n peak_locs[i] = 0\n peak_vals[i] = 0\n else:\n peak_locs[i] = np.argmax(im[i])\n peak_vals[i] = max(im[i])\n\n ## Trap Range Detection ##\n first = True\n pos_first, pos_last = 0, 0\n left_pos = 0\n for i, p in enumerate(peak_vals):\n if p > threshold:\n left_pos = i\n elif left_pos != 0:\n if first:\n pos_first = (left_pos + i) // 2\n first = False\n pos_last = (left_pos + i) // 2\n left_pos = 0\n\n ## Separation Value ##\n separation = (pos_last - pos_first) / ntraps # In Pixels\n\n ## Initial Guesses ##\n means0 = np.linspace(pos_first, pos_last, ntraps).tolist()\n waists0 = (separation * np.ones(ntraps) / 2).tolist()\n ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()\n _params0 = [means0, waists0, ampls0, [0.06]]\n params0 = [item for sublist in _params0 for item in sublist]\n\n ## Fitting ##\n if verbose:\n print(\"Fitting...\")\n xdata = np.arange(x_len)\n popt, pcov = curve_fit(lambda x, *params_0: wrapper_fit_func(x, ntraps, params_0),\n xdata, peak_vals, p0=params0)\n if verbose:\n print(\"Fit!\")\n plt.figure()\n plt.plot(xdata, peak_vals) # Data\n if iteration:\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, popt)) # Fit\n plt.title(\"Iteration: %d\" % iteration)\n else:\n plt.title(\"Final Product\")\n\n plt.xlim((pos_first - margin, pos_last + margin))\n plt.legend([\"Data\", \"Guess\", \"Fit\"])\n plt.show(block=False)\n print(\"Fig_Newton\")\n trap_powers = np.frombuffer(popt[2 * ntraps:3 * ntraps])\n return trap_powers", "def enumerate_detector(det, thresholds, shot_ok=None, tiles=None, nimages=np.inf, startimg=0, stopimg=np.inf, correction=False, progress=True):\n global terminated\n Ncorrect = 64\n correctionphotonthres = 3000\n if not isinstance(det, h5py.Group):\n raise TypeError('det should be a h5 group')\n if tiles is None:\n tiles = [k for k in det.keys() if 'tile' in k]\n else:\n newtiles = []\n for t in tiles:\n if t in det:\n newtiles.append(t)\n elif f'tile{t}' in det:\n newtiles.append(f'tile{t}')\n else:\n raise KeyError(f'tile {t} not found')\n tiles = newtiles\n datanames = [(f'{det.name}/{t}/data') for t in tiles]\n filename = det.file.filename\n\n nshots = det[f'{tiles[0]}/data'].shape[0]\n startimg = int(np.clip(startimg, 0, nshots))\n stopimg = int(np.clip(stopimg, startimg, nshots))\n tileshape = det[f'{tiles[0]}/data'].shape[1:]\n correctmask = [correctionmask(det[t]['absfft0/mean'], Ncorrect) for t in tiles]\n if shot_ok is None:\n shot_ok = np.ones(nshots, np.bool)\n ind_filtered = 0\n data = np.zeros((len(tiles), *tileshape))\n willread = np.copy(shot_ok)\n willread[:startimg] = False\n willread[stopimg:] = False\n with datasetreader(datanames, filename, sizecache=10, willread=willread) as reader:\n\n for ind_orig in range(startimg, stopimg):\n if not shot_ok[ind_orig]:\n continue\n if ind_filtered >= nimages or terminated != 0:\n return\n if progress and ind_filtered % 10 == 0:\n print(ind_filtered, end=' ', flush=True)\n\n for it, t in enumerate(tiles):\n cdat = np.array(reader[ind_orig, it], dtype=np.float, order='C')\n if correction:\n correct(cdat, correctionphotonthres, Ncorrect, correctmask[it])\n data[it, ...] = cdat\n ev, number, scatter = getstats(data, thresholds)\n\n yield (ind_filtered, ind_orig, data, ev, number, scatter)\n\n ind_filtered += 1", "def run(self,workspace):\n image_name = self.image_name.value\n cpimage = workspace.image_set.get_image(image_name)\n image = cpimage.pixel_data\n mask = cpimage.mask\n workspace.display_data.statistics = []\n level = int(self.atrous_level.value)\n\n wavelet = self.a_trous(1.0*image, level+1)\n wlevprod = wavelet[:,:,level-1] * 3.0\n\n spotthresh = wlevprod.mean() + float(self.noise_removal_factor.value) * wlevprod.std()\n tidx = wlevprod < spotthresh\n wlevprod[tidx] = 0\n\n wlevprod = self.circular_average_filter(wlevprod, int(self.smoothing_filter_size.value))\n wlevprod = self.smooth_image(wlevprod, mask)\n\n max_wlevprod = scipy.ndimage.filters.maximum_filter(wlevprod,3)\n maxloc = (wlevprod == max_wlevprod)\n twlevprod = max_wlevprod > float(self.final_spot_threshold.value)\n maxloc[twlevprod == 0] = 0\n \n labeled_image,object_count = scipy.ndimage.label(maxloc,\n np.ones((3,3),bool))\n\n unedited_labels = labeled_image.copy()\n # Filter out objects touching the border or mask\n border_excluded_labeled_image = labeled_image.copy()\n labeled_image = self.filter_on_border(image, labeled_image)\n border_excluded_labeled_image[labeled_image > 0] = 0\n \n # Relabel the image\n labeled_image,object_count = relabel(labeled_image)\n new_labeled_image, new_object_count = self.limit_object_count(\n labeled_image, object_count)\n if new_object_count < object_count:\n # Add the labels that were filtered out into the border\n # image.\n border_excluded_mask = ((border_excluded_labeled_image > 0) |\n ((labeled_image > 0) & \n (new_labeled_image == 0)))\n border_excluded_labeled_image = scipy.ndimage.label(border_excluded_mask,\n np.ones((3,3),bool))[0]\n object_count = new_object_count\n labeled_image = new_labeled_image\n \n # Make an outline image\n outline_image = cellprofiler.cpmath.outline.outline(labeled_image)\n outline_border_excluded_image = cellprofiler.cpmath.outline.outline(border_excluded_labeled_image)\n \n if self.show_window:\n statistics = workspace.display_data.statistics\n statistics.append([\"# of accepted objects\",\n \"%d\"%(object_count)])\n\n workspace.display_data.image = image\n workspace.display_data.labeled_image = labeled_image\n workspace.display_data.border_excluded_labels = border_excluded_labeled_image\n\n # Add image measurements\n objname = self.object_name.value\n measurements = workspace.measurements\n cpmi.add_object_count_measurements(measurements,\n objname, object_count)\n # Add label matrices to the object set\n objects = cellprofiler.objects.Objects()\n objects.segmented = labeled_image\n objects.unedited_segmented = unedited_labels\n objects.parent_image = image\n \n workspace.object_set.add_objects(objects,self.object_name.value)\n cpmi.add_object_location_measurements(workspace.measurements, \n self.object_name.value,\n labeled_image)\n if self.should_save_outlines.value:\n out_img = cpi.Image(outline_image.astype(bool),\n parent_image = image)\n workspace.image_set.add(self.save_outlines.value, out_img)", "def simImpacts(blankimage):\n\n # - Initialize variables - #\n count = 0\n unique = .001\n uniquelist = []\n cratersatstep = []\n cratermap = blankimage\n\n # -- Loop until saturation -- #\n while True:\n # - Wait until we have at least 10000 iterations before checking if we\n # have reached saturation - #\n if len(cratersatstep) > 10000:\n # - We calculate average by comparing the average of the last 1000\n # to the average of the last 100 - #\n smallAvg = np.average(cratersatstep[-100:])\n bigAvg = np.average(cratersatstep[-1000:])\n # - If we have reached saturation we can leave the loop - #\n if abs( smallAvg - bigAvg ) < (bigAvg * (1 - 0.99)):\n return cratermap, count, uniquelist, cratersatstep\n\n # - Every 1000 impacts we should save an image so we can compare - #\n if count%1000 == 0:\n pl.imshow(image)\n pl.title('Uniform Craters after '+str(int(count))+' Impactors')\n pl.savefig('../images/Uniform'+str(int(count/1000))+'.png')\n pl.clf()\n\n # --- BEGIN SIMULATION CODE --- #\n # - Increment our impactor count - #\n count += 1\n\n # - Generate the location for the center of the crater - #\n x = int(np.random.rand()*500.)\n y = int(np.random.rand()*500.)\n\n # - All of our impactors are the same size since this is our uniform sim - #\n impactsize = 10\n\n # - Pass our image array, the impact size (divided by 2 for radius)\n # origin of the impact, and a unique color value to drawCircle function - #\n cratermap = drawCircle(cratermap, int(impactsize / 2.), [x,y], unique)\n # - Get all of the unique color values still in cratermap - #\n uniquelist = np.unique(cratermap[:,:,0])\n # - Keep track of how many craters we can see at each step of the loop - #\n cratersatstep.append(len(uniquelist))\n\n # - Add to our unique value to keep it unique! - #\n unique += .001\n \n return cratermap, count , uniquelist, cratersvisible", "def test_nirspec_aperture_transforms(verbose=False, siaf=None):\n if siaf is None:\n siaf = Siaf(instrument)\n else:\n siaf = copy.deepcopy(siaf)\n\n labels = ['X', 'Y']\n threshold = 0.2\n\n from_frame = 'sci'\n to_frames = 'det gwa idl tel'.split()\n\n x_sci = np.linspace(-10, 10, 3)\n y_sci = np.linspace(10, -10, 3)\n\n for include_tilt in [False, True]:\n\n for aper_name in siaf.apertures.keys():\n skip = False\n\n # aperture\n aperture = siaf[aper_name]\n # offset slightly from default tilt values\n\n if (aperture.AperType in ['COMPOUND', 'TRANSFORM', 'SLIT']) or ('_FULL' not in aper_name):\n skip = True\n\n if skip is False:\n if(include_tilt is True):\n # set tilt to a representative off nominal value\n gwa_aperture = getattr(aperture, '_CLEAR_GWA_OTE')\n rx0 = getattr(gwa_aperture, 'XSciRef')\n ry0 = getattr(gwa_aperture, 'YSciRef')\n aperture.tilt = (ry0 - 0.002, rx0 - 0.01)\n \n # test transformations\n if verbose:\n print('testing {} {} Tilt={}'.format(siaf.instrument, aper_name, aperture.tilt))\n\n for to_frame in to_frames:\n forward_transform = getattr(aperture, '{}_to_{}'.format(from_frame, to_frame))\n backward_transform = getattr(aperture, '{}_to_{}'.format(to_frame, from_frame))\n\n x_out, y_out = backward_transform(*forward_transform(x_sci, y_sci))\n x_mean_error = np.mean(np.abs(x_sci - x_out))\n y_mean_error = np.mean(np.abs(y_sci - y_out))\n for i, error in enumerate([x_mean_error, y_mean_error]):\n if verbose:\n print('{} {}: Error in {}<->{} {}-transform is {:02.6f})'.format(\n siaf.instrument, aper_name, from_frame, to_frame, labels[i], error))\n assert error < threshold", "def enumerate_detector(det, thresholds, shot_ok=None, tiles=None, nimages=np.inf, stats=True, correction=False, progress=True, photonfunction=None):\n Ncorrect = 64\n correctionphotonthres = 3000\n if not isinstance(det, h5py.Group):\n raise TypeError('det should be a h5 group')\n if tiles is None:\n tiles = [k for k in det.keys() if 'tile' in k]\n else:\n newtiles = []\n for t in tiles:\n if t in det:\n newtiles.append(t)\n elif f'tile{t}' in det:\n newtiles.append(f'tile{t}')\n else:\n raise KeyError(f'tile {t} not found')\n tiles = newtiles\n multitiles = not (len(tiles) == 1 and 'data' in det[tiles[0]])\n mincorners = []\n maxcorners = []\n rots = []\n datanames = []\n filename = det.file.filename\n nshots = det[f'{tiles[0]}/data'].shape[0]\n correctmask = []\n for t in tiles:\n d = det[t]\n offset = np.rint(d.attrs['detector_tile_position_in_pixels'])\n rot = int(d.attrs['detector_rotation_steps'][0])\n rots.append(rot)\n n, a, b = d['data'].shape\n if n != nshots:\n raise ValueError('tiles should have same number of shots')\n shape = ((a, b), (-b, a), (-a, -b), (b, -a))[rot % 4]\n corners = (offset, (shape + offset))\n mincorners.append(np.min(corners, axis=0))\n maxcorners.append(np.max(corners, axis=0))\n datanames.append(f'{d.name}/data')\n if correction:\n correctmask.append(correctionmask(det[t]['absfft0/mean'], Ncorrect))\n\n globaloffset = np.floor(np.min(mincorners, axis=0)).astype(int)\n extent = [fastlen(x) for x in (np.ceil(np.max(maxcorners, axis=0)) - globaloffset)]\n startx, starty = [list(s) for s in (np.floor(mincorners - globaloffset).astype(int)).T]\n\n if shot_ok is None:\n shot_ok = np.ones(nshots, np.bool)\n assembled = np.zeros(extent, np.float64)\n global terminated\n ind_filtered = 0\n with datasetreader(datanames, filename, willread=shot_ok) if multitiles else arrayreader(det[tiles[0]]['data']) as reader:\n for ind_orig in range(nshots):\n if not shot_ok[ind_orig]:\n continue\n if ind_filtered >= nimages or terminated != 0:\n return\n if progress and ind_filtered % 100 == 0:\n print(ind_filtered, end=' ', flush=True)\n\n for t in range(len(tiles)):\n if multitiles:\n tile = np.asarray(reader[ind_orig, t], order='C', dtype=np.float64)\n if correction:\n correct(tile, correctionphotonthres, Ncorrect, correctmask[t], rots[t], assembled, startx[t], starty[t])\n else:\n place(tile, rots[t], assembled, startx[t], starty[t])\n else:\n if correction:\n tile = np.asarray(reader[ind_orig], order='C', dtype=np.float64)\n correct(tile, correctionphotonthres, Ncorrect, correctmask[t], rots[t], assembled, startx[t], starty[t])\n else:\n assembled = np.asarray(np.rot90(reader[ind_orig], rots[t]), order='C', dtype=np.float64)\n \n\n \n numberfromfunc = photonfunction(assembled) if photonfunction is not None else None\n if thresholds is not None:\n if stats:\n ev, number, scatter = getstats(assembled, thresholds)\n yield (ind_filtered, ind_orig, np.copy(assembled), ev, number, scatter, numberfromfunc)\n else:\n number = getphotons(assembled, thresholds)\n yield (ind_filtered, ind_orig, np.copy(assembled), None, number, None, numberfromfunc)\n else: \n yield (ind_filtered, ind_orig, np.copy(assembled), None, None, None, numberfromfunc)\n\n \n ind_filtered += 1", "def eye_timings(self, nr_dummy_scans = 6, mystery_threshold = 0.05,saccade_duration_threshold = 10):\n\n\t\n\t\tfor r in [self.runList[i] for i in self.conditionDict['WMM']]:\n\t\t\t# shell()\n\t\t\tniiFile = NiftiImage(self.runFile(stage = 'processed/mri', run = r))\n\t\t\ttr = round(niiFile.rtime*1)/1000.0\n\t\t\twith open (self.runFile(stage = 'processed/eye', run = r, extension = '.msg')) as inputFileHandle:\n\t\t\t\tmsg_file = inputFileHandle.read()\n\n\n\t\t\tsacc_re = 'ESACC\\t(\\S+)[\\s\\t]+(-?\\d*\\.?\\d*)\\t(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+.?\\d+)'\n\t\t\tfix_re = 'EFIX\\t(\\S+)\\s+(-?\\d*\\.?\\d*)\\t(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)?\\s+(-?\\d+\\.?\\d*)?\\s+(-?\\d+\\.?\\d*)?\\s+(-?\\d+\\.?\\d*)?'\n\t\t\tblink_re = 'EBLINK\\t(\\S+)\\s+(-?\\d*\\.?\\d*)\\t(-?\\d+\\.?\\d*)\\s+(-?\\d?.?\\d*)?'\n\t\t\tstart_eye = 'START\\t(-?\\d+\\.?\\d*)'\n\n\t\t\t# self.logger.info('reading eyelink events from %s', os.path.split(self.message_file)[-1])\n\t\t\tsaccade_strings = re.findall(re.compile(sacc_re), msg_file)\n\t\t\tfix_strings = re.findall(re.compile(fix_re), msg_file)\n\t\t\tblink_strings = re.findall(re.compile(blink_re), msg_file)\n\t\t\tstart_time_scan = float(re.findall(re.compile(start_eye),msg_file)[0])\n\t\t\t\n\t\t\tif len(saccade_strings) > 0:\n\t\t\t\tself.saccades_from_message_file = [{'eye':e[0],'start_timestamp':float(e[1]),'end_timestamp':float(e[2]),'duration':float(e[3]),'start_x':float(e[4]),'start_y':float(e[5]),'end_x':float(e[6]),'end_y':float(e[7]), 'mystery_measure':float(e[8]),'peak_velocity':float(e[9])} for e in saccade_strings]\n\t\t\t\tself.fixations_from_message_file = [{'eye':e[0],'start_timestamp':float(e[1]),'end_timestamp':float(e[2]),'duration':float(e[3]),'x':float(e[4]),'y':float(e[5]),'pupil_size':float(e[6])} for e in fix_strings]\n\t\t\t\tself.blinks_from_message_file = [{'eye':e[0],'start_timestamp':float(e[1]),'end_timestamp':float(e[2]),'duration':float(e[3])} for e in blink_strings]\n\t\t\t\n\t\t\t\tself.saccade_type_dictionary = np.dtype([(s , np.array(self.saccades_from_message_file[0][s]).dtype) for s in self.saccades_from_message_file[0].keys()])\n\t\t\t\tself.fixation_type_dictionary = np.dtype([(s , np.array(self.fixations_from_message_file[0][s]).dtype) for s in self.fixations_from_message_file[0].keys()])\n\t\t\t\tif len(self.blinks_from_message_file) > 0:\n\t\t\t\t\tself.blink_type_dictionary = np.dtype([(s , np.array(self.blinks_from_message_file[0][s]).dtype) for s in self.blinks_from_message_file[0].keys()])\n\t\t\t\n\t\t\teye_blinks = [[((self.blinks_from_message_file[i]['start_timestamp']- start_time_scan)/1000) - nr_dummy_scans*tr, self.blinks_from_message_file[i]['duration']/1000,1] for i in range(len(self.blinks_from_message_file)) if (self.blinks_from_message_file[i]['start_timestamp']- start_time_scan) > (nr_dummy_scans*tr*1000)]\n\t\t\t\n\t\t\t\n\t\t\tsaccades = [[((self.saccades_from_message_file[i]['start_timestamp']- start_time_scan)/1000) - nr_dummy_scans*tr, self.saccades_from_message_file[i]['duration']/1000,1] for i in range(len(self.saccades_from_message_file)) if np.all([(self.saccades_from_message_file[i]['start_timestamp']- start_time_scan) > (nr_dummy_scans*tr*1000), (self.saccades_from_message_file[i]['duration'] > saccade_duration_threshold)]) ]\n\t\t\tsaccades_thresholded = [[((self.saccades_from_message_file[i]['start_timestamp']- start_time_scan)/1000) - nr_dummy_scans*tr, self.saccades_from_message_file[i]['duration']/1000,1] for i in range(len(self.saccades_from_message_file)) if np.all([(self.saccades_from_message_file[i]['start_timestamp']- start_time_scan) > (nr_dummy_scans*tr*1000), (self.saccades_from_message_file[i]['mystery_measure'] > mystery_threshold), (self.saccades_from_message_file[i]['duration'] > saccade_duration_threshold)]) ]\n\t\t\n\t\t\tnp.savetxt(self.runFile(stage = 'processed/eye', run = r, extension = '.txt', postFix = ['eye_blinks']), np.array(eye_blinks), fmt = '%3.2f', delimiter = '\\t')\n\t\t\tnp.savetxt(self.runFile(stage = 'processed/eye', run = r, extension = '.txt', postFix = ['saccades']), np.array(saccades), fmt = '%3.2f', delimiter = '\\t')\n\t\t\tnp.savetxt(self.runFile(stage = 'processed/eye', run = r, extension = '.txt', postFix = ['saccades_thresholded']), np.array(saccades_thresholded), fmt = '%3.2f', delimiter = '\\t')", "def phot_aperture(input_file):\n #set the original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n planet = input_file['exoplanet']\n #radii = np.arange(input_file['apertures'][0],input_file['apertures'][1],0.1)\n radii = np.array(input_file['apertures'])\n #change to save data reduction directory\n os.chdir(save_path)\n if not os.path.exists('phot_results'):\n os.makedirs('phot_results')\n tempo = time.time()\n print 'Starting aperture photometry'\n print 'Saving results on: '+save_path+'/phot_results/'\n \n #check the number of objects to make the photometry\n N_obj = len(input_file['pxpositions'])/2.\n print 'Number of objects = ',N_obj\n positions = [] #create the positions variable (X,Y) in pixels unit on the CCD\n for i in range(len(input_file['pxpositions'])):\n if i % 2 == 0: #if the number is a even (or not a odd), the turple is created\n positions.append((input_file['pxpositions'][i],input_file['pxpositions'][i+1]))\n print 'Radius from ',radii[0],' to ',radii[-1],'\\n'\n \n skysection = input_file['skysection']\n skysection[0] = int(skysection[0])\n skysection[1] = int(skysection[1])\n \n images = sorted(glob.glob('AB'+planet+'*.fits'))\n for radius in radii:\n flux_data = []\n for i in range(len(images)):\n im = fits.getdata(images[i],header=False)\n im = array(im,dtype='Float64')\n \n # ERROR\n #Traceback (most recent call last):\n # File \"ExoTRed.py\", line 105, in <module>\n # exotred.phot_aperture(input_file)\n # File \"./sources/ExoTRed_core.py\", line 637, in phot_aperture \n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 329, in __init__\n # self._calc_bkg_bkgrms()\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 686, in _calc_bkg_bkgrms\n # bkg = self._interpolate_meshes(self._bkg1d)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 575, in _interpolate_meshes\n # f = ShepardIDWInterpolator(yx, data)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/utils/interpolation.py\", line 138, in __init__\n # raise ValueError('The number of values must match the number '\n # ValueError: The number of values must match the number of coordinates.\n\n # bkg = background.background_2d.Background2D(im,tuple(skysection))\n # bkg_data = bkg.background\n # bkg_rms = bkg.background_rms\n\n # phot_table = aperture_photometry(im - bkg_data, CircularAperture(positions, radius),\n # error=bkg_rms, method ='center')#,effective_gain=float(input_file['gain']))\n ####### SUBSTITUTE ROUTINE\n window = 100\n sky_size = im.shape\n sky_mean = float(np.median(im[int(skysection[1]-window):int(skysection[1]+window),int(skysection[0]-window):int(skysection[0]+window)]))\n bkg = np.random.poisson(sky_mean,sky_size)\n apertures = CircularAperture(positions, radius)\n phot_table = aperture_photometry(im, apertures, error=bkg)\n #######\n phot_table_flux = np.array([]) #saving results of aperture photometry\n for j in range(len(phot_table['aperture_sum'])):\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum'][j]])),axis=0)\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum_err'][j]])),axis=0)\n flux = np.concatenate((phot_table_flux,np.array([images[i]])),axis=0)\n # flux = [phot_table['aperture_sum'][0], phot_table['aperture_sum'][1],phot_table['aperture_sum_err'][0],\n # phot_table['aperture_sum_err'][1],images[i]]\n flux_data.append(flux)\n flux_data = DataFrame(flux_data)#,columns=['hoststar','refstar','hoststar_err','refstar_err','image'])\n flux_data.to_csv('./phot_results/'+planet+'_flux_radius_'+str(radius)+'.csv',index=False)\n use.update_progress((float(np.where(radii == radius)[0])+1.)/len(radii))\n print 'Time total = ',abs(time.time()-tempo)/60.,' minutes'\n os.chdir(original_path)", "def update_for_in_trap(self, t, traps): #******\n sources = traps.param['source_locations'] #Of format [(0,0),]\n for trap_num, trap_loc in enumerate(sources):\n dist_vals = distance((self.x_position, self.y_position),trap_loc)\n mask_trapped = dist_vals < traps.param['trap_radius']\n self.mode[mask_trapped] = self.Mode_Trapped\n self.trap_num[mask_trapped] = trap_num\n self.x_trap_loc[mask_trapped] = trap_loc[0]\n self.y_trap_loc[mask_trapped] = trap_loc[1]\n self.x_velocity[mask_trapped] = 0.0\n self.y_velocity[mask_trapped] = 0.0\n\n # Get time stamp for newly trapped flies\n mask_newly_trapped = mask_trapped & (self.t_in_trap == scipy.inf)\n self.t_in_trap[mask_newly_trapped] = t", "def run(self):\n openShutter = True\n actuateXed = False\n image_type = \"PPUMP\"\n\n wl = float(self.eo_config.get(\"PPUMP_WL\", 550))\n meas_flux = self.measured_flux(wl)\n seqno = 0\n for tokens in self.instructions:\n exptime = float(tokens[1])\n nframes = int(tokens[2])\n shifts = int(tokens[3])\n for iframe in range(nframes):\n self.image_clears()\n self.bias_image(seqno)\n self.take_image(seqno, exptime, openShutter, actuateXed,\n image_type)\n seqno += 1", "def analyze(self):\n try:\n self.options[self.multi_image][1]()\n except:\n raise Exception(\"Multi Image Option not defined.\")\n\n self.image = self.data / self.exposure\n\n background = self.min_val = np.min(self.image[:511,:511])\n self.max_val = np.max(self.image[:511,:511])\n # stats.mode returns modal value = value that occours most often\n #background = stats.mode(im[:50,:50].ravel())[0][0]\n\n intensity = self.image.sum() - background*np.size(self.image)\n\n #results.append((self.index, intensity, background))\n self.index =+ 1", "def main():\n\n with its.device.ItsSession() as cam:\n\n props = cam.get_camera_properties()\n its.caps.skip_unless(its.caps.raw16(props) and\n its.caps.manual_sensor(props) and\n its.caps.read_3a(props) and\n its.caps.per_frame_control(props) and\n not its.caps.mono_camera(props))\n debug = its.caps.debug_mode()\n\n # Expose for the scene with min sensitivity\n exp_min, exp_max = props[\"android.sensor.info.exposureTimeRange\"]\n sens_min, _ = props[\"android.sensor.info.sensitivityRange\"]\n # Digital gains might not be visible on RAW data\n sens_max = props[\"android.sensor.maxAnalogSensitivity\"]\n sens_step = (sens_max - sens_min) / NUM_ISO_STEPS\n white_level = float(props[\"android.sensor.info.whiteLevel\"])\n black_levels = [its.image.get_black_level(i,props) for i in range(4)]\n # Get the active array width and height.\n aax = props[\"android.sensor.info.activeArraySize\"][\"left\"]\n aay = props[\"android.sensor.info.activeArraySize\"][\"top\"]\n aaw = props[\"android.sensor.info.activeArraySize\"][\"right\"]-aax\n aah = props[\"android.sensor.info.activeArraySize\"][\"bottom\"]-aay\n raw_stat_fmt = {\"format\": \"rawStats\",\n \"gridWidth\": aaw/IMG_STATS_GRID,\n \"gridHeight\": aah/IMG_STATS_GRID}\n\n e_test = []\n mult = 1.0\n while exp_min*mult < exp_max:\n e_test.append(int(exp_min*mult))\n mult *= EXP_MULT\n if e_test[-1] < exp_max * INCREASING_THR:\n e_test.append(int(exp_max))\n e_test_ms = [e / 1000000.0 for e in e_test]\n\n for s in range(sens_min, sens_max, sens_step):\n means = []\n means.append(black_levels)\n reqs = [its.objects.manual_capture_request(s, e, 0) for e in e_test]\n # Capture raw in debug mode, rawStats otherwise\n caps = []\n for i in range(len(reqs) / SLICE_LEN):\n if debug:\n caps += cam.do_capture(reqs[i*SLICE_LEN:(i+1)*SLICE_LEN], cam.CAP_RAW)\n else:\n caps += cam.do_capture(reqs[i*SLICE_LEN:(i+1)*SLICE_LEN], raw_stat_fmt)\n last_n = len(reqs) % SLICE_LEN\n if last_n == 1:\n if debug:\n caps += [cam.do_capture(reqs[-last_n:], cam.CAP_RAW)]\n else:\n caps += [cam.do_capture(reqs[-last_n:], raw_stat_fmt)]\n elif last_n > 0:\n if debug:\n caps += cam.do_capture(reqs[-last_n:], cam.CAP_RAW)\n else:\n caps += cam.do_capture(reqs[-last_n:], raw_stat_fmt)\n\n # Measure the mean of each channel.\n # Each shot should be brighter (except underexposed/overexposed scene)\n for i,cap in enumerate(caps):\n if debug:\n planes = its.image.convert_capture_to_planes(cap, props)\n tiles = [its.image.get_image_patch(p, 0.445, 0.445, 0.11, 0.11) for p in planes]\n mean = [m * white_level for tile in tiles\n for m in its.image.compute_image_means(tile)]\n img = its.image.convert_capture_to_rgb_image(cap, props=props)\n its.image.write_image(img, \"%s_s=%d_e=%05d.jpg\" % (NAME, s, e_test))\n else:\n mean_image, _ = its.image.unpack_rawstats_capture(cap)\n mean = mean_image[IMG_STATS_GRID/2, IMG_STATS_GRID/2]\n\n print \"ISO=%d, exposure time=%.3fms, mean=%s\" % (\n s, e_test[i] / 1000000.0, str(mean))\n means.append(mean)\n\n\n # means[0] is black level value\n r = [m[0] for m in means[1:]]\n gr = [m[1] for m in means[1:]]\n gb = [m[2] for m in means[1:]]\n b = [m[3] for m in means[1:]]\n\n pylab.plot(e_test_ms, r, \"r.-\")\n pylab.plot(e_test_ms, b, \"b.-\")\n pylab.plot(e_test_ms, gr, \"g.-\")\n pylab.plot(e_test_ms, gb, \"k.-\")\n pylab.xscale('log')\n pylab.yscale('log')\n pylab.title(\"%s ISO=%d\" % (NAME, s))\n pylab.xlabel(\"Exposure time (ms)\")\n pylab.ylabel(\"Center patch pixel mean\")\n matplotlib.pyplot.savefig(\"%s_s=%d.png\" % (NAME, s))\n pylab.clf()\n\n allow_under_saturated = True\n for i in xrange(1, len(means)):\n prev_mean = means[i-1]\n mean = means[i]\n\n if np.isclose(max(mean), white_level, rtol=SATURATION_TOL):\n print \"Saturated: white_level %f, max_mean %f\"% (white_level, max(mean))\n break;\n\n if allow_under_saturated and np.allclose(mean, black_levels, rtol=BLK_LVL_TOL):\n # All channel means are close to black level\n continue\n\n allow_under_saturated = False\n # Check pixel means are increasing (with small tolerance)\n channels = [\"Red\", \"Gr\", \"Gb\", \"Blue\"]\n for chan in range(4):\n err_msg = \"ISO=%d, %s, exptime %3fms mean: %.2f, %s mean: %.2f, TOL=%.f%%\" % (\n s, channels[chan],\n e_test_ms[i-1], mean[chan],\n \"black level\" if i == 1 else \"exptime %3fms\"%e_test_ms[i-2],\n prev_mean[chan],\n INCREASING_THR*100)\n assert mean[chan] > prev_mean[chan] * INCREASING_THR, err_msg", "def trapfilt_taps(N, phil, alfa):\n\n\n\n tt = arange(-N/2,N/2 + 1) # Time axis for h(t) \n # ***** Generate impulse response ht here *****\n ht = zeros(len(tt))\n ix = where(tt != 0)[0]\n if alfa != 0:\n ht[ix] = ((sin(2*pi*phil*tt[ix]))/(pi*tt[ix]))*((sin(2*pi*alfa*phil*tt[ix]))/(2*pi*alfa*phil*tt[ix]))\n else:\n ht[ix] = (sin(2*pi*phil*tt[ix]))/(pi*tt[ix])\n ix0 = where(tt == 0)[0]\n ht[ix0] = 2*phil\n ht = ht/sum(power(ht,2))\n\n return ht", "def guess_image(which_cam, image, ntraps):\n threshes = [0.5, 0.65]\n ## Image Conditioning ##\n margin = 10\n threshold = np.max(image)*threshes[which_cam]\n im = image.transpose()\n\n x_len = len(im)\n peak_locs = np.zeros(x_len)\n peak_vals = np.zeros(x_len)\n\n ## Trap Peak Detection ##\n for i in range(x_len):\n if i < margin or x_len - i < margin:\n peak_locs[i] = 0\n peak_vals[i] = 0\n else:\n peak_locs[i] = np.argmax(im[i])\n peak_vals[i] = max(im[i])\n\n ## Trap Range Detection ##\n first = True\n pos_first, pos_last = 0, 0\n left_pos = 0\n for i, p in enumerate(peak_vals):\n if p > threshold:\n left_pos = i\n elif p < threshold and left_pos != 0:\n if first:\n pos_first = (left_pos + i) // 2\n first = False\n pos_last = (left_pos + i) // 2\n left_pos = 0\n\n ## Separation Value ##\n separation = (pos_last - pos_first) / ntraps # In Pixels\n\n ## Initial Guesses ##\n means0 = np.linspace(pos_first, pos_last, ntraps).tolist()\n waists0 = (separation * np.ones(ntraps) / 2).tolist()\n ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()\n _params0 = [means0, waists0, ampls0, [0.06]]\n params0 = [item for sublist in _params0 for item in sublist]\n\n xdata = np.arange(x_len)\n plt.figure()\n plt.plot(xdata, peak_vals)\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess\n plt.xlim((pos_first - margin, pos_last + margin))\n plt.legend([\"Data\", \"Guess\", \"Fit\"])\n plt.show(block=False)", "def mri_dixon_analysis(data_objects, working_dir, settings):\n\n logger.info(\"Running Dixon analysis Calculation\")\n logger.info(\"Using settings: %s\", settings)\n\n output_objects = []\n\n fat_obj = None\n water_obj = None\n for data_obj in data_objects:\n\n if data_obj.meta_data[\"image_type\"] == \"fat\":\n fat_obj = data_obj\n\n if data_obj.meta_data[\"image_type\"] == \"water\":\n water_obj = data_obj\n\n if fat_obj is None or water_obj is None:\n logger.error(\"Both Fat and Water Images are required\")\n return []\n\n # Read the image series\n fat_load_path = fat_obj.path\n if fat_obj.type == \"DICOM\":\n fat_load_path = sitk.ImageSeriesReader().GetGDCMSeriesFileNames(fat_obj.path)\n fat_img = sitk.ReadImage(fat_load_path)\n\n water_load_path = water_obj.path\n if water_obj.type == \"DICOM\":\n water_load_path = sitk.ImageSeriesReader().GetGDCMSeriesFileNames(water_obj.path)\n water_img = sitk.ReadImage(water_load_path)\n\n # Cast to float for calculation\n fat_img = sitk.Cast(fat_img, sitk.sitkFloat32)\n water_img = sitk.Cast(water_img, sitk.sitkFloat32)\n\n # Let's do the calcuation using NumPy\n fat_arr = sitk.GetArrayFromImage(fat_img)\n water_arr = sitk.GetArrayFromImage(water_img)\n\n # Do the calculation\n divisor = water_arr + fat_arr\n fat_fraction_arr = (fat_arr * 100) / divisor\n fat_fraction_arr[divisor == 0] = 0 # Sets those voxels which were divided by zero to 0\n water_fraction_arr = (water_arr * 100) / divisor\n water_fraction_arr[divisor == 0] = 0 # Sets those voxels which were divided by zero to 0\n\n fat_fraction_img = sitk.GetImageFromArray(fat_fraction_arr)\n water_fraction_img = sitk.GetImageFromArray(water_fraction_arr)\n\n fat_fraction_img.CopyInformation(fat_img)\n water_fraction_img.CopyInformation(water_img)\n\n # Create the output Data Objects and add it to output_ob\n fat_fraction_file = os.path.join(working_dir, \"fat.nii.gz\")\n sitk.WriteImage(fat_fraction_img, fat_fraction_file)\n water_fraction_file = os.path.join(working_dir, \"water.nii.gz\")\n sitk.WriteImage(water_fraction_img, water_fraction_file)\n\n fat_data_object = DataObject(type=\"FILE\", path=fat_fraction_file, parent=fat_obj)\n output_objects.append(fat_data_object)\n\n water_data_object = DataObject(type=\"FILE\", path=water_fraction_file, parent=water_obj)\n output_objects.append(water_data_object)\n\n return output_objects", "def imagetest(thetainput,doubleopponencyinput):\n theta = thetainput\n rgcMode = doubleopponencyinput\n\n\n C = retina.sample(img,x,y,coeff[i],loc[i],rgb=True) # CENTRE\n S = retina.sample(img,x,y,dcoeff[i],dloc[i],rgb=True) # SURROUND\n \n if rgcMode == 0:\n \tpV,nV = rgc.opponency(C,S,theta)\n else:\n \tpV,nV = rgc.doubleopponency(C,S,theta)\n cv2.namedWindow(\"Input\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"Input\", img)\n rIntensity,cIntensity = showNonOpponency(C,theta)\n cv2.namedWindow(\"Intensity Responses\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"Intensity Responses\", rIntensity)\n cv2.namedWindow(\"Intensity Responses Cortex\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"Intensity Responses Cortex\", cIntensity)\n cv2.waitKey(0)\n #Generate backprojected images\n if showInverse:\n rOpponent = showBPImg(pV,nV)\n cv2.namedWindow(\"Backprojected Opponent Cells Output\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"Backprojected Opponent Cells Output\", rOpponent)\n cv2.waitKey(0)\n # Cortex\n if showCortex:\n cOpponent = showCortexImg(pV,nV)\n cv2.namedWindow(\"Cortex Opponent Cells Output\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"Cortex Opponent Cells Output\", cOpponent)\n cv2.waitKey(0)", "def calculate_uip(vpx, raster, weight, neuron, tau):\n\n m = 1\n\n vpx[0] = weight[neuron, raster[0][\"id\"]]\n\n for k, evt in enumerate(raster[1:], 1):\n\n dt = evt[\"time\"] - raster[k - 1][\"time\"]\n\n if not dt:\n m -= 1\n else:\n vpx[m] = vpx[m - 1] * np.exp(-dt / tau)\n\n vpx[m] += weight[neuron, evt[\"id\"]]\n\n m += 1", "def stack_tir(scene_urls,cloud_mask_bits,aoi,aoi_crs,\n subtract_median_lst=True,subtract_air_temp=False):\n if subtract_air_temp:\n ceda_password = get_ceda_password()\n at = met_climate.access_ukcp09(cf.ceda_username,ceda_password)\n\n \n # with rasterio.open(scene_bqa) as bqa:\n # with rasterio.open(scene_tir) as tir:\n\n # bqa_data,bqa_trans = ru.read_in_aoi(bqa,**aoi_kwargs)\n # tir_data,tir_trans = ru.read_in_aoi(tir,**aoi_kwargs)\n \n # bqa_data = bqa_data[0,:,:]\n # tir_data = tir_data[0,:,:]\n # tir_data = ma.array(tir_data,dtype=float,\n # mask=ru.mask_qa(bqa_data,bitmask=0b1))\n\n # (ymin,ymax) = (0, tir_data.shape[0])\n # (xmin,xmax) = (0, tir_data.shape[1])\n \n counter=-1\n for scene_url in scene_urls:\n counter+=1\n scene_tir = scene_url\n scene_bqa = scene_url.replace('B'+tirband,'B'+qaband)\n scene_red = scene_url.replace('B'+tirband,'B'+rband)\n scene_nir = scene_url.replace('B'+tirband,'B'+nband)\n scene_metadata = scene_url.replace('B'+tirband+'.TIF','MTL.txt')\n\n print('Reading scene {}'.format(counter+1))\n try:\n with rasterio.open(scene_bqa) as bqa:\n #print(scene_bqa)\n bqa_data,bqa_trans = ru.read_in_aoi(bqa,aoi=aoi,aoi_crs=aoi_crs)\n\n with rasterio.open(scene_tir) as tir:\n #print(scene_tir)\n tir_data,tir_trans = ru.read_in_aoi(tir,aoi=aoi,aoi_crs=aoi_crs)\n tir_crs = tir.crs\n tir_profile = tir.profile\n\n with rasterio.open(scene_red) as red:\n #print(scene_red)\n red_data,red_trans = ru.read_in_aoi(red,aoi=aoi,aoi_crs=aoi_crs)\n red_crs = red.crs\n\n with rasterio.open(scene_nir) as nir:\n #print(scene_nir)\n nir_data,nir_trans = ru.read_in_aoi(nir,aoi=aoi,aoi_crs=aoi_crs)\n \n except OSError as e:\n print('ERROR',e)\n print('skipping scene')\n counter = counter-1\n continue\n \n # Determine size of stack allowing for AoI to extend outside of scene\n if counter == 0:\n aoi_box = rasterio.warp.transform_bounds(aoi_crs,tir_crs,*aoi.values())\n aoi_left, aoi_bottom, aoi_right, aoi_top = aoi_box\n aoi_box = dict(zip(('minx','miny','maxx','maxy'),aoi_box))\n # rowmin,colmin = (bqa.index(aoi_left,aoi_top)) #,op=round))\n # rowmax,colmax = (bqa.index(aoi_right,aoi_bottom)) #,op=round))\n # The above two lines are fine but the following does not \n # require the rasterio dataset to be kept open\n rowmin,colmin = rasterio.transform.rowcol(tir_trans,aoi_left,aoi_top)\n rowmax,colmax = rasterio.transform.rowcol(tir_trans,aoi_right,aoi_bottom)\n stack_height,stack_width = (rowmax-rowmin,colmax-colmin)\n lst_stack = (ma.zeros((len(scene_urls),stack_height,stack_width),\n dtype=np.float,fill_value=np.nan\n )+np.nan) \n \n # Determine size of intersect in THIS scene\n intersect = ru.aoi_scene_intersection(aoi_box,bqa)\n ins_left, ins_bottom, ins_right, ins_top = intersect.bounds\n #rowmin,colmin = (bqa.index(ins_left,ins_top,op=round))\n #rowmax,colmax = (bqa.index(ins_right,ins_bottom,op=round))\n # The above two lines are incorrect now that we read a window:\n # We need to transform the coordinates into the row,col of \n # the window, not the original file.\n rowmin,colmin = rasterio.transform.rowcol(tir_trans,ins_left,ins_top)\n rowmax,colmax = rasterio.transform.rowcol(tir_trans,ins_right,ins_bottom)\n\n try:\n # Subset data \n bqa_data = ma.array(bqa_data[0,rowmin:rowmax,colmin:colmax])\n tir_data = ma.array(tir_data[0,rowmin:rowmax,colmin:colmax])\n red_data = ma.array(red_data[0,rowmin:rowmax,colmin:colmax])\n nir_data = ma.array(nir_data[0,rowmin:rowmax,colmin:colmax])\n assert tir_data.shape == lst_stack.shape[1:]\n except (IndexError,AssertionError) as e:\n print('ERROR:',e)\n print('loop count',counter)\n print(tir_data.shape, lst_stack.shape)\n print(rowmin,rowmax,colmin,colmax)\n import pdb; pdb.set_trace()\n\n lst_data = lst.calculate_land_surface_temperature_NB(\n red_data, nir_data, tir_data,\n red_trans, tir_trans, \n red_crs, tir_crs, scene_metadata\n )\n \n # Masks\n smw = 11\n mask_all = filters.maximum_filter(\n ru.mask_qa(bqa_data,bits=cloud_mask_bits),size=smw\n )\n\n lst_data_mask_all = ma.array(lst_data,\n mask=mask_all,\n dtype=np.float,\n fill_value=np.nan) #.filled()\n\n # After masking, reproject\n # not necessary if they share a CRS\n if counter > 0:\n assert tir_crs == prev_crs\n prev_crs = tir_crs\n\n # Now do some normalisation\n if subtract_air_temp:\n filename = scene_tir.split('/')[-1]\n datestring = filename.split('_')[3]\n\n atscene = met_climate.dummy_scene( \n tir_crs, tir_trans, aoi_box,(stack_height,stack_width))\n\n # import pdb; pdb.set_trace()\n # If the following fails, it may mean there was a problem setting up the session\n atdata = at.grid_temp_over_scene(\n atscene, datestring, interpolation='linear')\n atdata = atdata[rowmin:rowmax,colmin:colmax]\n assert lst_data_mask_all.shape == atdata.shape\n lst_data_mask_all = ma.array(\n lst_data_mask_all - atdata,\n mask=mask_all,\n fill_value=np.nan)\n \n if subtract_median_lst:\n # ALSO subtract median xLST\n medval = ma.median(lst_data_mask_all)\n lst_data_mask_all = ma.array(\n lst_data_mask_all - medval,\n mask=mask_all,\n fill_value=np.nan)\n \n elif subtract_median_lst:\n # Subtract median LST from scene (within QA mask) \n \n medval = ma.median(lst_data_mask_all)\n lst_data_mask_all = ma.array(\n lst_data_mask_all - medval,\n mask=mask_all,\n fill_value=np.nan)\n \n # Then add to stack\n lst_stack[counter,:,:] = lst_data_mask_all\n\n # Make profile for file output\n N_layers = counter+1\n tir_profile.update(\n dtype=rasterio.float64,\n width=stack_width,\n height=stack_height,\n transform=tir_trans,\n count=N_layers,\n compress='lzw'\n )\n\n\n return lst_stack, tir_profile", "def tail_cts_per_shot(datapath, lower, TPQI_starts, bin_size = 0.256, normalize = False, correct_for_bg = True, save = 1, pulses_in_sequence = 300):\n\n print 'analyzing tail counts per shot...' \n current_dir = os.getcwd()\n plt.close('all')\n os.chdir(datapath)\n files = os.listdir(datapath)\n\n for k in arange(len(files)):\n right_file = '.npz' in files[k]\n \n if right_file:\n data = numpy.load(datapath+'\\\\'+files[k])\n\n ch1_counts = data['hist_ch1']\n ch0_counts = data['hist_ch0']\n\n time = bin_size*arange(len(ch1_counts))\n \n if correct_for_bg:\n bg_level_ch1 = ch1_counts[int(0.75*len(ch1_counts)):int(0.90*len(ch1_counts))].mean()\n ch1_counts = ch1_counts - bg_level_ch1*ones(len(ch1_counts))\n bg_level_ch0 = ch0_counts[int(0.75*len(ch0_counts)):int(0.90*len(ch0_counts))].mean()\n ch0_counts = ch0_counts - bg_level_ch0*ones(len(ch0_counts))\n\n #print 'measured background level for [ch0,ch1] = ['+num2str(bg_level_ch0,1)+','+num2str(bg_level_ch1,1)+']'\n\n if normalize:\n ch1_counts_normalized = ch1_counts/ch1_counts.max()\n ch0_counts_normalized = ch0_counts/ch0_counts.max()\n \n upper = lower + 40.0\n\n tail_area_time = time[int(lower/bin_size):int(upper/bin_size)]\n tail_area_ch1 = ch1_counts[int(lower/bin_size):int(upper/bin_size)]\n tail_area_ch0 = ch0_counts[int(lower/bin_size):int(upper/bin_size)]\n\n tail_counts_per_shot = (tail_area_ch1.sum()+tail_area_ch0.sum())/float(TPQI_starts*pulses_in_sequence)\n\n figure1 = plt.figure(figsize=(16.0, 12.0))\n plt.subplot(211)\n if not normalize:\n plt.semilogy(time, ch1_counts, '-k')\n plt.plot(array([lower,lower]), array([1E-1,ch1_counts.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch1_counts.max()]), 'r', lw = 2.0)\n else:\n plt.semilogy(time, ch1_counts_normalized, '-r')\n plt.plot(array([lower,lower]), array([1E-1,ch1_counts_normalized.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch1_counts_normalized.max()]), 'r', lw = 2.0)\n \n plt.xlabel('Time after sync (ns)')\n plt.ylabel('Counts ch1')\n plt.title('tail counts per shot = '+num2str(tail_counts_per_shot*1e4,1)+'E-4')\n plt.xlim([0,200])\n\n plt.subplot(212)\n if not normalize:\n plt.semilogy(time, ch0_counts, '-k')\n plt.plot(array([lower,lower]), array([1E-1,ch0_counts.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch0_counts.max()]), 'r', lw = 2.0)\n else:\n plt.semilogy(time, ch0_counts_normalized, '-k')\n plt.plot(array([lower,lower]), array([1E-1,ch0_counts_normalized.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch0_counts_normalized.max()]), 'r', lw = 2.0)\n \n plt.xlabel('Time after sync (ns)')\n plt.ylabel('Counts ch0')\n plt.title('tail counts per shot = '+num2str(tail_counts_per_shot*1e4,1)+'E-4')\n plt.xlim([0,200])\n if save:\n figure1.savefig('tail_cts_per_shot.pdf')\n\n try:\n data.close()\n except:\n pass\n\n print 'tail counts per shot = '+num2str(tail_counts_per_shot*1e4,1)+'E-4'\n\n return tail_counts_per_shot", "def transform(self, dataset, number_of_thresholds=1,\n enable_valley_emphasis=False):\n\n # Initial progress\n self.progress.value = 0\n self.progress.maximum = 100\n\n # Approximate percentage of work completed after each step in the\n # transform\n STEP_PCT = [10, 20, 70, 90, 100]\n\n try:\n import itk\n import itkExtras\n import itkTypes\n from tomviz import itkutils\n except Exception as exc:\n print(\"Could not import necessary module(s)\")\n raise exc\n\n # Return values\n returnValues = None\n\n # Add a try/except around the ITK portion. ITK exceptions are\n # passed up to the Python layer, so we can at least report what\n # went wrong with the script, e.g,, unsupported image type.\n try:\n self.progress.value = STEP_PCT[0]\n self.progress.message = \"Converting data to ITK image\"\n\n # Get the ITK image\n itk_image = itkutils.dataset_to_itk_image(dataset)\n itk_input_image_type = type(itk_image)\n\n # OtsuMultipleThresholdsImageFilter's wrapping requires that the\n # input and output image types be the same.\n itk_threshold_image_type = itk_input_image_type\n\n # Otsu multiple threshold filter\n otsu_filter = itk.OtsuMultipleThresholdsImageFilter[\n itk_input_image_type, itk_threshold_image_type].New()\n otsu_filter.SetNumberOfThresholds(number_of_thresholds)\n otsu_filter.SetValleyEmphasis(enable_valley_emphasis)\n otsu_filter.SetInput(itk_image)\n itkutils.observe_filter_progress(self, otsu_filter,\n STEP_PCT[1], STEP_PCT[2])\n\n try:\n otsu_filter.Update()\n except RuntimeError:\n return\n\n print(\"Otsu threshold(s): %s\" % (otsu_filter.GetThresholds(),))\n\n itk_image_data = otsu_filter.GetOutput()\n\n # Cast threshold output to an integral type if needed.\n py_buffer_type = itk_threshold_image_type\n voxel_type = itkExtras.template(itk_threshold_image_type)[1][0]\n if voxel_type is itkTypes.F or voxel_type is itkTypes.D:\n self.progress.message = \"Casting output to integral type\"\n\n # Unsigned char supports 256 labels, or 255 threshold levels.\n # This should be sufficient for all but the most unusual use\n # cases.\n py_buffer_type = itk.Image.UC3\n caster = itk.CastImageFilter[itk_threshold_image_type,\n py_buffer_type].New()\n caster.SetInput(itk_image_data)\n itkutils.observe_filter_progress(self, caster,\n STEP_PCT[2], STEP_PCT[3])\n\n try:\n caster.Update()\n except RuntimeError:\n return\n\n itk_image_data = caster.GetOutput()\n\n self.progress.value = STEP_PCT[3]\n self.progress.message = \"Saving results\"\n\n label_map_dataset = dataset.create_child_dataset()\n itkutils.set_itk_image_on_dataset(itk_image_data, label_map_dataset,\n dtype=py_buffer_type)\n\n self.progress.value = STEP_PCT[4]\n\n # Set up dictionary to return operator results\n returnValues = {}\n returnValues[\"label_map\"] = label_map_dataset\n\n except Exception as exc:\n print(\"Problem encountered while running %s\" %\n self.__class__.__name__)\n raise exc\n\n return returnValues", "def process_observation(self, observation):\n #print(\"start_process_obs\")\n processed_observation = np.zeros((NB_AGENTS, OBSERVATION_SIZE))\n\n goliath_type = getattr(env, 'Terran_Goliath')\n battlecruiser_type = getattr(env, 'Terran_Battlecruiser')\n '''\n goliath and battlecruiser type:\n hp_max: 125\n armor: 1\n cooldown_max: 22\n acceleration: 1\n top_speed: 4.57\n damage_amount: 12\n damage_factor: 1\n weapon_range: 192\n sight_range: 256\n seek_range: 160\n\n hp_max: 500\n energy_max: 200\n armor: 3\n cooldown_max: 30\n acceleration: 27\n top_speed: 2.5\n damage_amount: 25\n damage_factor: 1\n weapon_range: 192\n sight_range: 352\n '''\n #print(\"goliath and battlecruiser type:\")\n #print(goliath_type)\n #print(battlecruiser_type)\n\n for i, agent in enumerate(observation.my_unit):\n if agent.hp <= 0:\n continue\n my_x = agent.pos_x\n my_y = agent.pos_y\n my_type_str = agent.unit_type\n my_type = goliath_type if my_type_str == 'Terran_Goliath' else print(\"error in the my_type\")\n t1 = [agent.hp + agent.shield, agent.cooldown, math.atan2(agent.velocity_y, agent.velocity_x),\n math.sqrt((agent.velocity_x) ** 2 + (agent.velocity_y) ** 2), agent.angle,\n 1 if agent.accelerating else -1 if agent.braking else 0, agent.attacking, agent.is_attack_frame]\n t2 = [self.last_action[i] / (env.action_space[1] - 1)]\n t3 = [i.nearest_obstacle_dist for i in agent.pos_info]\n t4 = []\n t5 = []\n t4_max = []\n t5_max = []\n for idx, enemy in enumerate(observation.en_unit):\n en_type_str = enemy.unit_type\n if en_type_str == 'Terran_Battlecruiser':\n en_type = battlecruiser_type\n else:\n continue \n if enemy.hp <= 0:\n t4.extend([0,0,0,0,0,0,0,0,0,0])\n else:\n t4.extend([math.atan2(enemy.pos_y - my_y, enemy.pos_x - my_x), math.sqrt((enemy.pos_x - my_x) ** 2 + (enemy.pos_y - my_y) ** 2),\n math.atan2(enemy.velocity_y, enemy.velocity_x), math.sqrt((enemy.velocity_x) ** 2 + (enemy.velocity_y) ** 2),\n enemy.cooldown, enemy.hp + enemy.shield, enemy.angle, 1 if agent.accelerating else -1 if agent.braking else 0, agent.attacking, agent.is_attack_frame])\n t4_max.extend([math.pi, 320, math.pi, en_type.top_speed, en_type.cooldown_max, en_type.hp_max + en_type.shield_max, math.pi, 1, 1, 1])\n for idx, ally in enumerate(observation.my_unit):\n if i == idx:\n continue\n if ally.hp <= 0:\n t5.extend([0,0,0,0,0])\n else:\n t5.extend([math.atan2(ally.pos_y - my_y, ally.pos_x - my_x), math.sqrt((ally.pos_x - my_x) ** 2 + (ally.pos_y - my_y) ** 2),\n math.atan2(ally.velocity_y, ally.velocity_x), math.sqrt((ally.velocity_x) ** 2 + (ally.velocity_y) ** 2), ally.hp + ally.shield])\n ally_type = goliath_type\n t5_max.extend([math.pi, 320, math.pi, ally_type.top_speed, ally_type.hp_max + ally_type.shield_max])\n if my_type_str == 'Terran_Goliath':\n t1_max = [my_type.hp_max + my_type.shield_max, 1, math.pi, my_type.top_speed, math.pi, 1, 1, 1]\n else:\n t1_max = [my_type.hp_max + my_type.shield_max, my_type.cooldown_max, math.pi, my_type.top_speed, math.pi, 1, 1, 1]\n #t4_max = [math.pi, 320, math.pi, en_type.top_speed, en_type.cooldown_max, en_type.hp_max + en_type.shield_max, math.pi, 1, 1, 1]\n #t5_max = [math.pi, 320, math.pi, ally_type.top_speed, ally_type.hp_max + ally_type.shield_max]\n\n #t5_max = [32, 32, type.hp_max + type.shield_max, type.cooldown_max,\n #32, 32, type.hp_max + type.shield_max, type.cooldown_max,\n #32, 32, type.hp_max + type.shield_max, type.cooldown_max,\n #32, 32, type.hp_max + type.shield_max, math.pi,\n #32, 32, type.hp_max + type.shield_max, math.pi,\n #32, 32, type.hp_max + type.shield_max, math.pi]\n\n t1 = np.divide(t1, t1_max) # runtime warning\n t2 = np.array(t2) / 320\n t3 = np.array(t3) / 320\n t4 = np.divide(t4, t4_max)\n t5 = np.divide(t5, t5_max)\n\n processed_observation[i] = np.concatenate([t1, t2, t3, t4, t5])\n\n self.last_my_unit_cnt.append(np.sum(np.array([u.hp+u.shield for u in observation.my_unit]) > 0))\n self.last_enemy_unit_cnt.append(np.sum(np.array([u.hp+u.shield for u in observation.en_unit]) > 0))\n self.last_enemy_unit_hp.append(sum([u.hp + u.shield for u in observation.en_unit]))\n self.accumulated_observation.append(processed_observation)\n\n\n return processed_observation", "def single_channel_stacking(tifs):\n template_ID=int(len(tifs)/2)\n \n template_raster=gdal_array.LoadFile(tifs[template_ID-1])\n avg_raster=np.zeros_like(template_raster)\n avg_raster=avg_raster+1\n new_raster=np.copy(template_raster)\n # ones=np.full(template_raster.shape, 1)\n for i, tif in enumerate(tifs, start=1):\n if i==template_ID: \n continue\n \n tif_raster=gdal_array.LoadFile(tif)\n # tif_raster=cut_transformed_array_borders(tif_raster)\n result=ird.similarity(template_raster,tif_raster , numiter=1, order=1)\n img_transformed= ird.transform_img(tif_raster, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=2)\n \n img_transformed=cut_transformed_array_borders(img_transformed)\n \n # ones_transformed=ird.transform_img(ones, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=1)\n ones_transformed=np.zeros_like(template_raster)\n ones_transformed[np.where(img_transformed>0)]=1\n print(ones_transformed)\n \n print(np.mean(ones_transformed), np.max(ones_transformed), np.min(ones_transformed))\n print(ones_transformed[np.where(ones_transformed>0)])\n print(np.min(ones_transformed[np.where(ones_transformed>0)]))\n print(np.max(ones_transformed[np.where(ones_transformed>0)]))\n\n plt.imshow(ones_transformed)\n plt.show()\n plt.close()\n \n # ones_transformed=cut_transformed_array_borders(ones_transformed)\n \n avg_raster=avg_raster+ones_transformed\n # ird.imshow(template_raster, tif_raster, img_transformed)\n \n new_raster=new_raster+img_transformed\n \n # new_raster=new_raster+template_raster \n # new_raster=new_raster/len(tifs)\n\n gtz=np.where(avg_raster>0)\n \n\n \n\n \n \n plt.imshow(new_raster)\n plt.show()\n plt.close()\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_not_abvertaghe_stacked_.tiff\")\n new_raster[gtz]=new_raster[gtz]/avg_raster[gtz] \n gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")\n plt.imshow(new_raster)\n plt.savefig(\"test.tif\", dpi=800)\n plt.show()\n plt.close()\n\n def discrete_cmap(N, base_cmap=None):\n \"\"\"Create an N-bin discrete colormap from the specified input map\"\"\"\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\n cmap=discrete_cmap(int(avg_raster.max())+1, base_cmap=\"ocean\") \n \n norm=mpl.colors.BoundaryNorm(np.arange(-0.5,int(avg_raster.max()+1)), cmap.N)\n fig=plt.figure()\n fig.set_size_inches(5,4)\n ax=fig.add_subplot(111)\n data=ax.matshow(avg_raster, cmap=cmap, norm=norm)\n fig.colorbar(data, ticks=np.linspace(0,int(avg_raster.max()),int(avg_raster.max()+1)), drawedges=True)\n\n plt.show()\n plt.close()\n\n\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")", "def photometry(userinputs, image, catalog, outputname, apertures, annulus='', dannulus='', recenter=False):\n logging.info('Running photometry function on {}'.format(image))\n logging.info('Using {}px apertures'.format(apertures))\n\n #set directory\n target_dir = userinputs['OUTDIR']\n\n #Update passed names to be full paths if they are not\n\n if len(image.split('/'))==1:\n logging.info('Looking for {} in {}.'.format(image,userinputs['DATA']))\n image = glob.glob(userinputs['DATA'] + '/' + image)\n if len(image)==0:\n logging.critical('No {} image found'.format(image))\n filemanagement.shutdown('Selected image does not exist',userinputs)\n else:\n image = image[0]\n logging.debug('Using image: {}'.format(image))\n\n if len(catalog.split('/'))==1:\n catalog = target_dir + '/init/' + catalog\n logging.debug('Input catalog: {}'.format(catalog))\n\n if len(outputname.split('/'))==1:\n output = target_dir + '/photometry/' + outputname\n logging.debug('Output name: {}'.format(output))\n else:\n output = outputname\n outputname = outputname.split('/')[-1]\n logging.debug('Output name: {}'.format(output))\n\n\n #Load zeropoints\n inst_zp, filter_zp, zp_zp = np.loadtxt(target_dir + '/init/Hi-PEEC_zeropoints.tab', unpack=True, dtype='str')\n # print inst_zp, filter_zp, zp_zp\n # Get filter from header\n filter = get_filter(image)\n\n\n # Set the necessary variables for photometry on the reference image\n exptime = fits.getheader(image)['EXPTIME']\n logging.debug('Exposure time from header: {}'.format(exptime))\n inst = fits.getheader(image)['INSTRUME']\n logging.debug('Intrument from header: {}'.format(inst))\n inst = inst.lower()\n\n\n match = (inst_zp == inst) & (filter_zp == filter.lower())\n zp = zp_zp[match]\n\n # zp is a string within an array, so need to turn into a float\n try:\n zp = float(zp[0])\n #If that cannot be done there was no match.\n except IndexError:\n if inst == 'acs':\n logging.debug('Zeropoint not found in file, passing to ACS calculation')\n zp = ACS_zeropoint(image)\n elif inst == 'wfc3':\n logging.debug('Zeropoint not found in file, passing to WFC3 calculation')\n zp = WFC3_zeropoint(image)\n else:\n logging.critical('No matching zeropoint found. Quitting.')\n logging.debug('No zeropoint match found for filter {} with instrument {}'\\\n .format(filter,inst))\n logging.debug('Available filters in zeropoint file : {} for instrument {}'\\\n .format(filter_zp, inst_zp))\n filemanagement.shutdown('No zeropoint was found for filter: {}'.format(filter),userinputs)\n\n logging.debug('Zeropoint from file: {}'.format(zp))\n # Remove output file if it already exists\n filemanagement.remove_if_exists(output)\n\n\n # Run photometry\n #--------------------------------------------------------------------------\n # Set up IRAF params:\n iraf.datapars.epadu = exptime\n\n # !!!!!!!!!!!!!!!!!\n # Only center on reference frame\n if recenter:\n iraf.centerpars.calgorithm = 'centroid'\n else:\n iraf.centerpars.calgorithm = 'none'\n # !!!!!!!!!!!!!!!\n # CHANGE BACKGROUND ESTIMATE IN ANNULUS TO MODE\n\n # Select the annulus depending on whether it is overwritten in the function call or not\n if annulus == '':\n iraf.fitskypars.annulus = userinputs['ANNULUS']\n logging.debug('Using annulus from inputfile ({}px)'.format(userinputs['ANNULUS']))\n else:\n iraf.fitskypars.annulus = annulus\n logging.debug('Using user specified annulus ({}px)'.format(annulus))\n if dannulus == '':\n iraf.fitskypars.dannulus = userinputs['D_ANNULUS']\n logging.debug('Using annulus width from inputfile ({}px)'.format(userinputs['D_ANNULUS']))\n else:\n iraf.fitskypars.dannulus = dannulus\n logging.debug('Using user specified annulus width ({}px)'.format(dannulus))\n\n iraf.photpars.apertures = apertures\n logging.debug('Using aperture(s) of {}px'.format(apertures))\n iraf.photpars.zmag = zp\n logging.debug('Setting zeropoint to {}'.format(zp))\n\n # Do phot\n iraf.phot(image+'[SCI]', catalog, output)\n #--------------------------------------------------------------------------\n\n\n #Depending on the number of apertures used, different methods of saving the\n # results are required\n #--------------------------------------------------------------------------\n\n naper = len(apertures.split(','))\n logging.debug('Number of apertures used {}'.format(naper))\n\n #final output filename\n fullcat_mag_short = target_dir + '/photometry/short_' + outputname\n\n if naper > 1:\n # Removes all outputlines that do not contain the character '*'\n # ensures only phot results are kept\n cmd = 'grep \"*\" ' + output + ' > ' + fullcat_mag_short\n os.system(cmd)\n\n # Replace INDEFS:\n cmd = 'sed -i.bak \"s/INDEF/99.999/g\" ' + fullcat_mag_short\n os.system(cmd)\n\n # Remove .bak files to prevent confusion\n bak_fullcat = fullcat_mag_short + '.bak'\n os.remove(bak_fullcat)\n\n\n else:\n #Dump results into a temp file\n temp = target_dir + '/photometry/phot_dump.mag'\n filemanagement.remove_if_exists(temp)\n iraf.txdump(output, 'XCENTER,YCENTER,FLUX,MAG,MERR,MSKY,ID', 'yes', Stdout = temp)\n\n # Set placeholders for sources outside of FOV and undetected sources\n # For outside of FOV, use 66.666 instead of INDEF\n # For undetected sources, use 99.999 instead of INDEF\n\n # Sources outside of FOV have exactly zero flux\n x, y, flux, mag, merr, msky, id = np.loadtxt(temp, unpack = True,\n dtype = str)\n\n flux = flux.astype(float)\n\n out_fov = (flux == 0.)\n logging.debug('Number of sources outside FOV: {}'.format(len(out_fov)))\n\n mag[out_fov] = 66.666\n merr[out_fov] = 66.666\n msky[out_fov] = 66.666\n\n # Undetected sources, those with negative flux or fluxes so small that mag err\n # is INDEF\n neg_flux = (flux < 0.)\n tiny_flux = (flux > 0.) & (merr == 'INDEF')\n\n mag[neg_flux] = 99.999\n merr[neg_flux] = 99.999\n msky[neg_flux] = 99.999\n\n merr[tiny_flux] = 99.999\n msky[tiny_flux] = 99.999\n\n logging.debug('Nr of undetected sources: {}'.format(len(tiny_flux)+len(neg_flux)))\n # Save results to new file\n x = x.astype(float)\n y = y.astype(float)\n mag = mag.astype(float)\n merr = merr.astype(float)\n msky = msky.astype(float)\n id = id.astype(int)\n\n zip_phot = zip(x, y, mag, merr, msky, id)\n\n np.savetxt(fullcat_mag_short, zip_phot,\n fmt = '%.3f %.3f %.3f %.3f %.9f %i')\n\n #--------------------------------------------------------------------------\n\n return fullcat_mag_short", "def main():\n camera = picamera.PiCamera()\n camera.resolution = (RESOLUTIONX, RESOLUTIONY)\n camera.iso = 800\n time.sleep(2)\n while True:\n camera.capture('current-image.jpg')\n adapt_steering(navigation.get_xposition('current-image.jpg'))\n time.sleep(0.4)", "def process_tir_image(ds, data_res, t_thresh=-50, min_mcs_size=5000):\n ctt = (ds['tb']).squeeze()-273.15\n min_pix_nb = min_mcs_size / data_res**2\n\n max_pix_nb = 300000 / data_res**2 # this is to capture satellite artefacts that come in large contiguous stripes.\n labels, goodinds = mcs_define(ctt.values, t_thresh, minmax_area=[min_pix_nb, max_pix_nb]) # 7.7x7.7km = 64km2 per pix in gridsat? 83 pix is 5000km2\n dic = dictionary()\n #plt.figure()\n #plt.pcolormesh(labels)\n #plt.colorbar()\n #plt.show()\n for g in goodinds:\n\n if g==0:\n continue\n\n pos = np.where(labels==g)\n npos = np.where(labels!=g)\n datestr = str(int(ctt['time.year'].values))+'-'+str(int(ctt['time.month'].values)).zfill(2)+'-'+str(int(ctt['time.day'].values)).zfill(2)+'_'+\\\n str(int(ctt['time.hour'].values)).zfill(2)+':'+str(int(ctt['time.minute'].values)).zfill(2)\n \n dic['date'].append(datestr)\n dic['month'].append(int(ctt['time.month']))\n dic['hour'].append(int(ctt['time.hour']))\n dic['year'].append(int(ctt['time.year']))\n dic['day'].append(int(ctt['time.day']))\n dic['minute'].append(int(ctt['time.minute']))\n\n storm = ctt.copy()\n storm.values[npos] = np.nan\n tmin_pos = np.nanargmin(storm.values)\n tpos_2d = np.unravel_index(tmin_pos, storm.shape)\n \n latmin = np.nanmin(ctt.lat.values[pos[0]])\n latmax = np.nanmax(ctt.lat.values[pos[0]])\n lonmin = np.nanmin(ctt.lon.values[pos[1]])\n lonmax = np.nanmax(ctt.lon.values[pos[1]])\n dic['area'].append(np.sum(np.isfinite(storm.values))*data_res**2)\n dic['70area'].append(np.sum(storm.values<=-70)*data_res**2)\n dic['minlon'].append(lonmin)\n dic['minlat'].append(latmin)\n dic['maxlon'].append(lonmax)\n dic['maxlat'].append(latmax)\n dic['clon'].append(lonmin + (lonmax - lonmin)/2)\n dic['clat'].append(latmin + (latmax - latmin)/2)\n dic['tmin'].append(np.nanmin(storm))\n dic['tminlat'].append(float(ctt.lat[tpos_2d[0]].values))\n dic['tminlon'].append(float(ctt.lon[tpos_2d[1]].values))\n dic['tmean'].append(float(np.nanmean(storm)))\n dic['tp1'].append(float(np.nanpercentile(storm, 1)))\n dic['tp99'].append(float(np.nanpercentile(storm, 99)))\n dic['stormID'].append(datestr + '_' + str(g))\n dic['cloudMask'].append(labels==g)\n dic['tir'].append(storm.values)\n\n # for k in dic.keys():\n # print(k, len(dic[k]))\n return dic", "def calculate_thresholds(self):\n \n for group in self.roi_groups:\n for roi in group.rois:\n for image in range(len(roi.counts)):\n # print(roi.autothreshs)\n # print('image',image)\n if roi.autothreshs[image]:\n values = np.fromiter(roi.counts[image].values(), dtype=float)\n roi.thresholds[image] = self.calculate_threshold(values)\n\n for image, im_copy in enumerate(self.copy_im_threshs): # copy values from a different image and set to manual thresh if needed\n if im_copy is not None:\n for group in self.roi_groups:\n for roi in group.rois:\n roi.autothreshs[image] = False\n roi.thresholds[image] = roi.thresholds[im_copy]", "def calculate_dark_current(image, i, int_time):\n dark_data_dir = r'F:\\TEMPO\\Data\\GroundTest\\FPS\\Integration_Sweep\\Dark'\n data_path_name_split = image.split('_')\n #print(data_path_name_split)\n all_int_files = [each for each in os.listdir(dark_data_dir) \\\n if each.endswith('_'+data_path_name_split[-1])] \n print(all_int_files)\n \n dark_data_file = os.path.join(dark_data_dir, all_int_files[0])\n IDL_variable = readsav(dark_data_file) \n all_full_frame = IDL_variable.q \n quad = all_full_frame[:, i, :, :]\n active_quad = np.mean(quad[:, 4:1028, 10:1034], axis=0) \n tsoc = np.mean(quad[:, 4:1028, 1034:1056], axis=0)\n bias_subtracted_quad = perform_bias_subtraction_ave(active_quad, tsoc)\n smear_subtracted_quad, smear_signal = perform_smear_subtraction(bias_subtracted_quad[10:1000, :], int_time)\n return smear_subtracted_quad", "def process_sample(self, ch, method, properties, body):\n\n # data inside a dictionary\n # {'amplitude':[1.3,4.4,5...],\n # 'angle': [0.04,0.1,...]}\n sample_dict = self.deserialize_vtt_60_processed(body)\n\n with open('sample_test_run.txt', 'a') as f:\n x_arrstr = np.char.mod('%d', sample_dict['amplitude'])\n\n # x_arrstr -> should be 2d array \"frame\".\n raw_data = \",\".join(x_arrstr.flatten())\n f.write(raw_data)\n f.write('\\n')\n\n arr = np.array(sample_dict['amplitude'])\n\n frame = np.reshape(arr, (180,110))\n frame_transposed_flipped = np.flip(np.transpose(frame))\n frame_transposed_flipped = np.flip(np.transpose(frame))\n details_removed = frame_transposed_flipped\n details_removed = np.clip(frame_transposed_flipped, a_min=frame_transposed_flipped.max() - 15, a_max=None)\n self.i = self.i + 1\n if self.i % 10 == 0:\n b = sum(pd.DataFrame(frame).max().diff() / pd.DataFrame(frame).max() > 0.13)\n self.report_people(b)\n\n\n if body is None or sample_dict is None:\n print('Stream stopped.')\n return\n if self.samples_in_total % self.fps == 0:\n endTime = time.time()\n print('FPS: {:.1f}'.format(self.fps/(endTime - self.startTime)))\n self.startTime = endTime\n \n # your code here\n amplitude = sample_dict['amplitude']\n amplitude = np.reshape(amplitude,(180,110))\n amplitude = np.where(amplitude>130,130,amplitude)\n amplitude = np.uint8(255*amplitude/130)\n out = cv2.cvtColor(amplitude,cv2.COLOR_GRAY2BGR)\n out = cv2.applyColorMap(out,cv2.COLORMAP_MAGMA)\n cv2.imshow('junction',out)\n cv2.waitKey(10)", "def optimize_trap(dg):\n f_peak = './temp_peak.lh5' # lh5\n f_results = './temp_results.h5' # pandas\n grp_data, grp_grid = '/optimize_data', '/optimize_grid'\n \n # epar, elo, ehi, epb = 'energy', 0, 1e7, 10000 # full range\n epar, elo, ehi, epb = 'energy', 3.88e6, 3.92e6, 500 # K40 peak\n \n show_movie = True\n write_output = True\n n_rows = None # default None\n \n with open('opt_trap.json') as f:\n dsp_config = json.load(f, object_pairs_hook=OrderedDict)\n \n # files to consider. fixme: right now only works with one file\n sto = lh5.Store()\n lh5_dir = os.path.expandvars(dg.config['lh5_dir'])\n raw_list = lh5_dir + dg.fileDB['raw_path'] + '/' + dg.fileDB['raw_file']\n f_raw = raw_list.values[0] \n tb_raw = 'ORSIS3302DecoderForEnergy/raw/'\n\n # quick check of the energy range\n # ene_raw = sto.read_object(tb_raw+'/'+epar, f_raw).nda\n # hist, bins, var = pgh.get_hist(ene_raw, range=(elo, ehi), dx=epb)\n # plt.plot(bins[1:], hist, ds='steps')\n # plt.show()\n # exit()\n \n # set grid parameters\n # TODO: jason's suggestions, knowing the expected shape of the noise curve\n # e_rises = np.linspace(-1, 0, sqrt(sqrt(3))\n # e_rises # make another list which is 10^pwr of this list\n # np.linspace(log_tau_min, log_tau_max) # try this too\n e_rises = np.arange(1, 12, 1)\n e_flats = np.arange(1, 6, 1)\n # rc_consts = np.arange(54, 154, 10) # changing this here messes up DCR\n \n # -- create the grid search file the first time -- \n # NOTE: this makes a linear grid, and is editable by the arrays above.\n # jason also proposed a more active gradient-descent style search\n # like with Brent's method. (https://en.wikipedia.org/wiki/Brent%27s_method)\n \n if True:\n # if not os.path.exists(f_peak):\n print('Recreating grid search file')\n \n # create the grid file\n # NOTE: save it as an lh5 Table just as an example of writing/reading one\n lists = [e_rises, e_flats]#, rc_consts]\n prod = list(itertools.product(*lists)) # clint <3 stackoverflow\n df_grid = pd.DataFrame(prod, columns=['rise', 'flat'])#,'rc']) \n lh5_grid = {}\n for i, dfcol in df_grid.iteritems():\n lh5_grid[dfcol.name] = lh5.Array(dfcol.values)\n tb_grid = lh5.Table(col_dict=lh5_grid)\n sto.write_object(tb_grid, grp_grid, f_peak)\n \n # filter events by onboard energy\n ene_raw = sto.read_object(tb_raw+'/'+epar, f_raw).nda\n # hist, bins, var = pgh.get_hist(ene_raw, range=(elo, ehi), dx=epb)\n # plt.plot(bins[1:], hist, ds='steps')\n # plt.show()\n if n_rows is not None:\n ene_raw = ene_raw[:n_rows]\n idx = np.where((ene_raw > elo) & (ene_raw < ehi))\n\n # create a filtered table with correct waveform and attrs\n # TODO: move this into a function in lh5.py which takes idx as an input\n tb_data, wf_tb_data = lh5.Table(), lh5.Table()\n\n # read non-wf cols (lh5 Arrays)\n data_raw = sto.read_object(tb_raw, f_raw, n_rows=n_rows)\n for col in data_raw.keys():\n if col=='waveform': continue\n newcol = lh5.Array(data_raw[col].nda[idx], attrs=data_raw[col].attrs)\n tb_data.add_field(col, newcol)\n \n # handle waveform column (lh5 Table)\n data_wfs = sto.read_object(tb_raw+'/waveform', f_raw, n_rows=n_rows)\n for col in data_wfs.keys():\n attrs = data_wfs[col].attrs\n if isinstance(data_wfs[col], lh5.ArrayOfEqualSizedArrays):\n # idk why i can't put the filtered array into the constructor\n aoesa = lh5.ArrayOfEqualSizedArrays(attrs=attrs, dims=[1,1])\n aoesa.nda = data_wfs[col].nda[idx]\n newcol = aoesa\n else:\n newcol = lh5.Array(data_wfs[col].nda[idx], attrs=attrs)\n wf_tb_data.add_field(col, newcol)\n tb_data.add_field('waveform', wf_tb_data)\n tb_data.attrs = data_raw.attrs\n sto.write_object(tb_data, grp_data, f_peak)\n\n else:\n print('Loading peak file. groups:', sto.ls(f_peak))\n tb_grid = sto.read_object(grp_grid, f_peak)\n tb_data = sto.read_object(grp_data, f_peak) # filtered file\n # tb_data = sto.read_object(tb_raw, f_raw) # orig file\n df_grid = tb_grid.get_dataframe()\n \n # check shape of input table\n print('input table attributes:')\n for key in tb_data.keys():\n obj = tb_data[key]\n if isinstance(obj, lh5.Table):\n for key2 in obj.keys():\n obj2 = obj[key2]\n print(' ', key, key2, obj2.nda.shape, obj2.attrs)\n else:\n print(' ', key, obj.nda.shape, obj.attrs)\n\n # clear new colums if they exist\n new_cols = ['e_fit', 'fwhm_fit', 'rchisq', 'xF_err', 'fwhm_ovr_mean']\n for col in new_cols:\n if col in df_grid.columns:\n df_grid.drop(col, axis=1, inplace=True)\n\n t_start = time.time()\n def run_dsp(dfrow):\n \"\"\"\n run dsp on the test file, editing the processor list\n alternate idea: generate a long list of processors with different names\n \"\"\"\n # adjust dsp config dictionary\n rise, flat = dfrow\n # dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = f'{tau}*us'\n dsp_config['processors']['wf_trap']['args'][1] = f'{rise}*us'\n dsp_config['processors']['wf_trap']['args'][2] = f'{flat}*us'\n # pprint(dsp_config)\n \n # run dsp\n pc, tb_out = build_processing_chain(tb_data, dsp_config, verbosity=0)\n pc.execute()\n \n # analyze peak\n e_peak = 1460.\n etype = 'trapEmax'\n elo, ehi, epb = 4000, 4500, 3 # the peak moves around a bunch\n energy = tb_out[etype].nda\n \n # get histogram\n hE, bins, vE = pgh.get_hist(energy, range=(elo, ehi), dx=epb)\n xE = bins[1:]\n \n # should I center the max at 1460?\n\n # simple numerical width\n i_max = np.argmax(hE)\n h_max = hE[i_max]\n upr_half = xE[(xE > xE[i_max]) & (hE <= h_max/2)][0]\n bot_half = xE[(xE < xE[i_max]) & (hE >= h_max/2)][0]\n fwhm = upr_half - bot_half\n sig = fwhm / 2.355\n \n # fit to gaussian: amp, mu, sig, bkg\n fit_func = pgf.gauss_bkg\n amp = h_max * fwhm\n bg0 = np.mean(hE[:20])\n x0 = [amp, xE[i_max], sig, bg0]\n xF, xF_cov = pgf.fit_hist(fit_func, hE, bins, var=vE, guess=x0)\n\n # collect results\n e_fit = xF[0]\n xF_err = np.sqrt(np.diag(xF_cov))\n e_err = xF\n fwhm_fit = xF[1] * 2.355 * 1460. / e_fit\n \n fwhm_err = xF_err[2] * 2.355 * 1460. / e_fit\n \n chisq = []\n for i, h in enumerate(hE):\n model = fit_func(xE[i], *xF)\n diff = (model - h)**2 / model\n chisq.append(abs(diff))\n rchisq = sum(np.array(chisq) / len(hE))\n fwhm_ovr_mean = fwhm_fit / e_fit\n\n if show_movie:\n \n plt.plot(xE, hE, ds='steps', c='b', lw=2, label=f'{etype} {rise}--{flat}')\n\n # peak shape\n plt.plot(xE, fit_func(xE, *x0), '-', c='orange', alpha=0.5,\n label='init. guess')\n plt.plot(xE, fit_func(xE, *xF), '-r', alpha=0.8, label='peakshape fit')\n plt.plot(np.nan, np.nan, '-w', label=f'mu={e_fit:.1f}, fwhm={fwhm_fit:.2f}')\n\n plt.xlabel(etype, ha='right', x=1)\n plt.ylabel('Counts', ha='right', y=1)\n plt.legend(loc=2)\n\n # show a little movie\n plt.show(block=False)\n plt.pause(0.01)\n plt.cla()\n\n # return results\n return pd.Series({'e_fit':e_fit, 'fwhm_fit':fwhm_fit, 'rchisq':rchisq,\n 'fwhm_err':xF_err[0], 'fwhm_ovr_mean': fwhm_ovr_mean})\n \n # df_grid=df_grid[:10]\n df_tmp = df_grid.progress_apply(run_dsp, axis=1)\n df_grid[new_cols] = df_tmp\n # print(df_grid)\n \n if show_movie:\n plt.close()\n \n print('elapsed:', time.time() - t_start)\n if write_output:\n df_grid.to_hdf(f_results, key=grp_grid)\n print(f\"Wrote output file: {f_results}\")", "def CorrectMotion(self):\n if self.verbose:\n print \"Correct for motion\"\n for entry in self.entry_map['epi']:\n info = self.info[entry]\n\n if os.path.exists(info['imgfile_m'] + info['suffix']):\n return\n# Always use brik for 3dDeconvolve.\n suffix = '+orig'\n epifile = '%s%s' % (info['imgfile'], suffix)\n prefix = info['imgfile_m']\n base_entry = info['base_entry']\n if info['base'] == 'start':\n# Use the first frame specified in template file. Defaults\n# to zero.\n base = info['motion_ref_frame']\n else:\n# Use the last frame.\n base = self.info[base_entry]['tdim'] - info['skip']-1\n base = ('%d' % base).replace(' ','')\n\n# Correct for slice-timing.\n self.SliceTimeCorrect(info, epifile)\n\n plane = info['plane']\n anat_tgt = info['anat_tgt']\n# anat_entry = self.anat_entry[plane]\n\n if info['catmats']:\n# Include additonal transformation in motion correction such\n# that final image is in register with the fieldmap, which has\n# been registered to the structural image that will be used for\n# spatial normalization.\n self.MotcorCatenate(info, base, anat_tgt)\n else:\n# Assume fieldmap is in register with the structural.\n self.Motcor(info, base)\n\n if info.get('fmapname', None) is None:\n# No fieldmap correction.\n if self.fsl_flip:\n# Flip the way fslview likes it.\n self.FSLFlip(info['imgfile_m'], info['imgfile_final'])\n elif info['suffix'] == '.nii':\n# Copy motion-corrected images from /tmp to output directory\n outfile = info['imgfile_final'] + info['suffix']\n cmd = '3dcopy %s+orig %s' % (info['imgfile_m'], outfile)\n self.CheckExec(cmd, [outfile], force=True)\n cmd = '/bin/rm %s+orig*' % info['imgfile_m']\n self.CheckExec(cmd, [], force=True)", "def test_rolling_before_analysis(self):\n cheese = TomoCheese.from_demo_images()\n cheese.analyze()\n original_roi_1 = copy.copy(cheese.module.rois[\"1\"].pixel_value)\n for img in cheese.dicom_stack:\n img.roll(direction=\"x\", amount=20)\n cheese.analyze()\n new_roi_1 = cheese.module.rois[\"1\"].pixel_value\n assert math.isclose(original_roi_1, new_roi_1, abs_tol=3)", "def atmos_worker(srcs, window, ij, args):\n src = srcs[0]\n rgb = src.read(window=window)\n rgb = to_math_type(rgb)\n\n atmos = simple_atmo(rgb, args[\"atmo\"], args[\"contrast\"], args[\"bias\"])\n\n # should be scaled 0 to 1, scale to outtype\n return scale_dtype(atmos, args[\"out_dtype\"])", "def make_sim(args):\n flux, nread, iexp = args\n satlevel=65535\n\n #outdir = \"sim_data_flux\" + str(flux)\n #outdir = \"sim_data_flux\" + str(flux) + \"_nonl\"\n outdir = \"sim_\" + \"exposure\" + str(iexp) + \"_flux\" + str(flux)\n pathlib.Path(outdir).mkdir(parents=True, exist_ok=True) \n \n for i in range(1, nread+1):\n logger.info(\"Looping through UTR {}\".format(i))\n #outfileprefix = outdir + \"/exposure\" + str(iexp) + \"_utr\" + str(i)\n outfileprefix = outdir + \"/utr\" + str(i)\n refoutfileprefix = outdir + \"/utr\" + str(i) + \"_ref\"\n\n primary_hdu = fits.PrimaryHDU()\n outhdus = fits.HDUList([primary_hdu])\n outhdus_ref = fits.HDUList([primary_hdu])\n\n ahdu = fits.HDUList([primary_hdu]) #for a coefficients\n bhdu = fits.HDUList([primary_hdu]) #for b coefficients\n\n #loop all 16 detectors\n ndet = 16\n logger.info(\"Looping through all 16 detectors\")\n for idet in range(ndet):\n detid = detids[idet]\n logger.info(\"Looping through the detector {}, {}\".format(idet+1, detid)) \n\n flux_actual = np.zeros((2048, 2048))\n #start with the measured flux, constant spatially but varying temporally\n flux_actual[4:2044,4:2044] = flux * TREAD * i #fill data region with measured flux, (ADU/f) * s = ADU ???\n flux_actual *= Gain #e-??\n logger.info(\"Filled with measured flux\")\n\n #calculate nonlinearity from calibration data, varing spatially but not temporally, unit e-??\n flux_nonlinear = make_nonlinear(flux_actual, outfileprefix, ahdu, bhdu, detid) \n #flux_nonlinear = flux_actual #no non-linearity\n logger.info(\"Made non-linearized flux\")\n\n #add pedestal, constant spatially very varying temporally\n pedestal = random.randint(0, 1000) #set the random pedistal (between 0 - 1000) for this exposure, unit e-??\n flux_nonlinear[:, :] += pedestal #add the pedestal to all of the data including the reference pixels.\n logger.info(\"Added pedestal\")\n\n #add noise (e-)\n readnoise_array = np.random.normal(size=[2048,2048]) * ReadNoise #generate the read noise, the reference pixels should also have noise. \n flux_nonlinear += readnoise_array #add the read noise to the data array\n logger.info(\"Added read noise\")\n\n # set any pixel above the saturation limit to the saturation limit.\n # This can set to 65535 for all pixels for now, but should be able to \n # take a map since this will vary pixel to pixel. \n ind = np.where(flux_nonlinear > satlevel)\n flux_nonlinear[ind] = satlevel\n logger.info(\"Checked saturation level\")\n\n #add this dector to the final simulated data\n hdu = fits.ImageHDU(data=flux_nonlinear, name=detid)\n outhdus.append(hdu)\n\n #do reference pixel correction\n updownCorr(flux_nonlinear)\n leftrightCorr(flux_nonlinear)\n\n #add this dector to another output\n hdu1 = fits.ImageHDU(data=flux_nonlinear, name=detid)\n outhdus_ref.append(hdu1)\n \n\n #write the sim data for this UTR\n outfile = outfileprefix + \".fits\"\n #logger.info(\"Writing sim data to: {}\".format(outfile))\n #outhdus.writeto(outfile)\n\n refoutfile = refoutfileprefix + \".fits\"\n logger.info(\"Writing reference pixel corrected sim data to: {}\".format(refoutfile))\n outhdus_ref.writeto(refoutfile)\n\n #ahdu.writeto(outfileprefix + \"_a_coef.fits\")\n #bhdu.writeto(outfileprefix + \"_b_coef.fits\")", "def calibrate(cap, location):\n\n #Poisition and size of sensor\n [x, y, h, w] = location\n\n #show square to user and wait for key\n print(\"please, step away to clear the blue square displayed on screen and press q to continue\")\n while True:\n ret, frame = cap.read()\n cv2.namedWindow('Calibrate',cv2.WINDOW_NORMAL)\n show = cv2.rectangle(frame, (x,y), (x+w,y+h), (255, 0, 0) , 5)\n cv2.imshow('Calibrate', show)\n key = cv2.waitKey(1)\n if key == ord('q'):\n break\n\n #get first image, process and define window previous for iteration\n ret, frame = cap.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = cv2.GaussianBlur(frame, (7,7), 0)\n previous = frame[y:y+w,x:x+h]\n\n #set parameters for mean value of sensor, kernel of erode function,\n sampleNbMean = 50\n xi = np.empty((0, sampleNbMean))\n kernel = np.ones((5,5), np.uint8)\n\n #iterate over each frame until sample number\n for iteration in range(sampleNbMean):\n\n # Capture frame, draw the window and display to the user\n ret, frame = cap.read()\n # Image operation\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = cv2.GaussianBlur(frame, (7,7), 0)\n\n #get present window\n present = frame[y:y+w,x:x+h]\n\n #add sample for mean, add diference of window with prieviuos\n xi = np.append(xi,\n np.sum(\n cv2.erode(\n cv2.bitwise_xor(present,previous), kernel, iterations=1)))\n\n #present image becomes previous before steping into next image\n previous = present\n\n #mean\n mean = np.sum(xi)/len(xi)\n\n #sigma\n sum = 0\n for sample in xi:\n sum += np.power(sample - mean, 2)\n sigma = np.sqrt(sum/len(xi))\n\n #close window\n cv2.destroyWindow('Calibrate')\n\n return mean, sigma", "def __call__(self, frame_num):\n quant_sys = self.quant_sys\n quant_sys.propagate(10)\n\n # propagate the wigner function\n self.img_clasical_rho.set_array(\n (quant_sys.D22 + quant_sys.D11).real\n #quant_sys.get_classical_rho()\n )\n\n self.img_Upsilon2.set_array(\n quant_sys.quantum_rho.real\n )\n\n return self.img_clasical_rho, self.img_Upsilon2", "def main ():\n\n # interval ends\n a,b = -1.0, +1.0\n\n # List of trapezium counts\n NVals = numpy.logspace (1, 8, base=10.0, num=6, dtype=numpy.int)\n\n # List of implementations\n intFuncList = [ \n (integral.trapintPython, \"python.txt\")\n#\t\t (integral.trapintNumPy, \"numpy.txt\"),\n#\t\t (integral.trapintNumba, \"numba.txt\"),\n#\t\t (cpintegral.trapint,\t \"cython.txt\")\n ]\n\n for intFunc, file in intFuncList:\n\n\t# Open results file...\n\n\tfh = open( file, \"w\" ) \n\n \t# compute Pi \n\tfor N in NVals:\n \tt1 = time.time ()\n \t \tv1 = intFunc (a, b, N)\n\t \te1 = math.fabs (v1 - math.pi)\n \t \tt1 = time.time () - t1\n\t \tfh.write(\"{:d} {:6.4g}\\n\".format(N, t1))\n\tfh.close()", "def process_image( self, image ):\n \n # 1. detect cars in image at different scales\n \n # Modify x/y start stop according to scale, cars appear smaller near horizon\n scales = config.scales\n \n box_list = []\n for scale_item in scales:\n scale = scale_item[\"scale\"]\n detects_image, boxes = hog_subsample.find_cars(image, \n scale_item[\"y_start_stop\"][0], scale_item[\"y_start_stop\"][1], \n scale, \n config.settings[\"svc\"], \n config.settings[\"scaler\"], \n config.settings[\"orient\"], \n config.settings[\"pix_per_cell\"], config.settings[\"cell_per_block\"], \n config.settings[\"spatial_size\"], config.settings[\"hist_bins\"],\n scale_item[\"x_start_stop\"][0], scale_item[\"x_start_stop\"][1])\n box_list.extend(boxes)\n \n # Update history\n self.bbox_list_history.append( box_list )\n bbox_list_history_list = sum(self.bbox_list_history.copy(), []) # single list of bbox lists in history\n \n # 2. heat map and threshold\n \n # Make zeros shaped like image\n heat = np.zeros_like(image[:,:,0]).astype(np.float)\n\n # Add heat for each box in box list history\n heat = heatmap_threshold_detection.add_heat(heat, bbox_list_history_list)\n\n # Apply threshold to help remove false positives\n heat_threshold = config.heatmap_threshold\n heat = heatmap_threshold_detection.apply_threshold(heat, heat_threshold)\n\n # Find final boxes from heatmap using label function\n heatmap = np.clip(heat, 0, 255) # only need to clip if there is more than 255 boxes around a point?\n labels = label(heatmap)\n boxed_image = heatmap_threshold_detection.draw_labeled_bboxes(np.copy(image), labels)\n \n # frame image annotation\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(boxed_image,\"Frame:{}\".format(config.count), (10,100), font, 1, (255,255,255), 2 ,cv2.LINE_AA )\n \n return boxed_image", "def process_plot_mri_with_damaged(paths, params):\n\n\n\t# hdf5 file that contains the original images\n\thdf5_file = os.path.join(paths['hdf5_folder'], params['hdf5_file'])\n\t\n\t# get all patient names from original MRI group\n\tpatients = get_datasets_from_group(group_name = params['group_original_mri'], hdf5_file = hdf5_file)\n\n\t# get list of patients without state\n\tpatients = set([re.search('(.*) (fersk|Tint)', x).group(1) for x in patients])\n\t\n\t# loop over each patient, read data, perform inference\n\tfor i, patient in enumerate(patients):\n\n\t\tlogging.info(f'Processing patient: {patient} {i + 1}/{len(patients)}')\n\n\t\t# parse out treatment, sample, and state from patient name\n\t\ttreatment, _, _ = parse_patientname(patient_name = f'{patient} fersk')\n\n\t\t\"\"\"\n\t\tGet fresh state\n\t\t\"\"\"\n\t\t# read original images\n\t\tfresh_original_images = read_dataset_from_group(dataset = f'{patient} fersk', group_name = params['group_original_mri'], hdf5_file = hdf5_file)\n\t\t# read reconstructed images\n\t\tfresh_reconstructed_images = read_dataset_from_group(dataset = f'{patient} fersk', group_name = params['group_segmented_classification_mri'], hdf5_file = hdf5_file)\n\t\t# only take damaged tissue and set connected tissue\n\t\tfresh_reconstructed_damaged_images = (process_connected_tissue(images = fresh_reconstructed_images.copy(), params = params) == 1)\n\n\t\t\"\"\"\n\t\tGet frozen/thawed\n\t\t\"\"\"\n\t\t# read original images\n\t\tfrozen_original_images = read_dataset_from_group(dataset = f'{patient} Tint', group_name = params['group_original_mri'], hdf5_file = hdf5_file)\n\t\t# read reconstructed images\n\t\tfrozen_reconstructed_images = read_dataset_from_group(dataset = f'{patient} Tint', group_name = params['group_segmented_classification_mri'], hdf5_file = hdf5_file)\n\t\t# only take damaged tissue and set connected tissue\n\t\tfrozen_reconstructed_damaged_images = (process_connected_tissue(images = frozen_reconstructed_images.copy(), params = params) == 1)\n\n\t\t# get total number of slices to process\n\t\ttotal_num_slices = fresh_original_images.shape[0]\n\t\t# loop over each slice\n\t\tfor mri_slice in range(total_num_slices):\n\n\t\t\t# check slice validity of fresh patient\n\t\t\tif check_mri_slice_validity(patient = f'{patient} fersk', mri_slice = mri_slice, total_num_slices = total_num_slices):\n\n\t\t\t\tif check_mri_slice_validity(patient = f'{patient} Tint', mri_slice = mri_slice, total_num_slices = total_num_slices):\n\t\n\t\t\t\t\t# setting up the plot environment\n\t\t\t\t\tfig, axs = plt.subplots(2, 2, figsize = (8, 8))\n\t\t\t\t\taxs = axs.ravel()\n\n\t\t\t\t\t# define the colors we want\n\t\t\t\t\tplot_colors = ['#250463', '#e34a33']\n\t\t\t\t\t# create a custom listed colormap (so we can overwrite the colors of predefined cmaps)\n\t\t\t\t\tcmap = colors.ListedColormap(plot_colors)\n\t\t\t\t\t# subfigure label for example, a, b, c, d etc\n\t\t\t\t\tsf = cycle(['a', 'b', 'c', 'd', 'e', 'f', 'g'])\n\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tPlot fresh state\n\t\t\t\t\t\"\"\"\n\t\t\t\t\t# obtain vmax score so image grayscales are normalized better\n\t\t\t\t\tvmax_percentile = 99.9\n\t\t\t\t\tvmax = np.percentile(fresh_original_images[mri_slice], vmax_percentile)\n\t\t\t\t\t\n\t\t\t\t\t# plot fresh original MRI image\n\t\t\t\t\taxs[0].imshow(fresh_original_images[mri_slice], cmap = 'gray', vmin = 0, vmax = vmax)\n\t\t\t\t\taxs[0].set_title(rf'$\\bf({next(sf)})$ Fresh - Original MRI')\n\t\t\t\t\t\n\t\t\t\t\t# plot fresh reconstucted image overlayed on top of the original image\n\t\t\t\t\t# axs[1].imshow(fresh_original_images[mri_slice], cmap = 'gray', vmin = 0, vmax = vmax)\n\t\t\t\t\t# im = axs[1].imshow(fresh_reconstructed_images[mri_slice],alpha = 0.7, interpolation = 'none')\n\t\t\t\t\t# axs[1].set_title(rf'$\\bf({next(sf)})$ Fresh - Reconstructed')\n\t\t\t\t\t\n\n\t\t\t\t\t# plot fresh reconstucted image overlayed on top of the original image\n\t\t\t\t\taxs[1].imshow(fresh_original_images[mri_slice], cmap = 'gray', vmin = 0, vmax = vmax)\n\t\t\t\t\taxs[1].imshow(fresh_reconstructed_damaged_images[mri_slice], cmap = cmap, alpha = .5, interpolation = 'none')\n\t\t\t\t\taxs[1].set_title(rf'$\\bf({next(sf)})$ Fresh - Reconstructed')\n\t\t\t\t\t\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tPlot frozen/thawed state\n\t\t\t\t\t\"\"\"\n\t\t\t\t\t# plot frozen/thawed original MRI image\n\t\t\t\t\t# obtain vmax score so image grayscales are normalized better\n\t\t\t\t\tvmax = np.percentile(frozen_original_images[mri_slice], vmax_percentile)\n\t\t\t\t\taxs[2].imshow(frozen_original_images[mri_slice], cmap = 'gray', vmin = 0, vmax = vmax)\n\t\t\t\t\taxs[2].set_title(rf'$\\bf({next(sf)})$ {treatment_to_title(treatment)} - Original MRI')\n\n\t\t\t\t\t# plot frozen reconstucted all classes\n\t\t\t\t\t# axs[4].imshow(frozen_original_images[mri_slice], cmap = 'gray', vmin = 0, vmax = vmax)\n\t\t\t\t\t# im = axs[4].imshow(frozen_reconstructed_images[mri_slice], alpha = 0.7, interpolation = 'none')\n\t\t\t\t\t# axs[4].set_title(rf'$\\bf({next(sf)})$ {treatment_to_title(treatment)} - Reconstructed')\n\t\t\t\t\t\n\t\t\t\t\t# # plot frozen/thawed reconstucted image overlayed on top of the original image\n\t\t\t\t\taxs[3].imshow(frozen_original_images[mri_slice], cmap = 'gray', vmin = 0, vmax = vmax)\n\t\t\t\t\taxs[3].imshow(frozen_reconstructed_damaged_images[mri_slice], cmap = cmap, alpha = .5, interpolation = 'none')\n\t\t\t\t\taxs[3].set_title(rf'$\\bf({next(sf)})$ {treatment_to_title(treatment)} - Reconstructed')\n\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tCreate custom legend\n\t\t\t\t\t\"\"\"\n\t\t\t\t\t# add custom legend\t\t\t\t\n\t\t\t\t\tclass_labels = {0 : 'background', 1 : 'damaged tissue'}\n\t\t\t\t\tclass_values = list(class_labels.keys())\n\t\t\t\t\t# create a patch \n\t\t\t\t\tpatches = [ mpatches.Patch(color = plot_colors[i], label= class_labels[i]) for i in range(len(class_values)) ]\n\t\t\t\t\taxs[1].legend(handles = patches)#, bbox_to_anchor=(1.05, 1), loc = 2, borderaxespad=0. )\n\t\t\t\t\n\t\t\t\t\t# legend for fully reconstructed image\n\t\t\t\t\t# get class labels\n\t\t\t\t\t# class_labels = params['class_labels']\n\t\t\t\t\t# # get class indexes from dictionary\n\t\t\t\t\t# values = class_labels.keys()\n\t\t\t\t\t# # get the colors of the values, according to the \n\t\t\t\t\t# # colormap used by imshow\n\t\t\t\t\t# plt_colors = [ im.cmap(im.norm(value)) for value in values]\n\t\t\t\t\t# # create a patch (proxy artist) for every color \n\t\t\t\t\t# patches = [ mpatches.Patch(color = plt_colors[i], label= class_labels[i]) for i in range(len(values)) ]\n\t\t\t\t\t# # put those patched as legend-handles into the legend\n\t\t\t\t\t# axs[1].legend(handles = patches)#, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0. )\n\t\t\t\t\t\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tAdjust figures\n\t\t\t\t\t\"\"\"\n\t\t\t\t\t# remove axis of all subplots\n\t\t\t\t\t[ax.axis('off') for ax in axs]\n\t\t\t\t\t# define plot subfolder\n\t\t\t\t\tsubfolder = os.path.join(paths['paper_plot_folder'], 'original_vs_reconstructed', patient)\n\t\t\t\t\t# create subfolder\n\t\t\t\t\tcreate_directory(subfolder)\n\t\t\t\t\t# crop white space\n\t\t\t\t\tfig.set_tight_layout(True)\n\t\t\t\t\t# save the figure\n\t\t\t\t\tfig.savefig(os.path.join(subfolder, f'slice_{mri_slice}.pdf'))\n\t\t\t\t\t\n\t\t\t\t\t# close the figure environment\n\t\t\t\t\tplt.close()", "def main(input_data_path, output_data_path, window):\n # open data info dataframe\n info_df = pd.read_csv(input_data_path + 'hemorrhage_diagnosis_raw_ct.csv')\n # replace No-Hemorrhage to hemorrange\n info_df['Hemorrhage'] = 1 - info_df.No_Hemorrhage\n info_df.drop(columns='No_Hemorrhage', inplace=True)\n # open patient info dataframe\n patient_df = pd.read_csv(input_data_path + 'Patient_demographics.csv', header=1, skipfooter=2, engine='python') \\\n .rename(columns={'Unnamed: 0':'PatientNumber', 'Unnamed: 1':'Age',\n 'Unnamed: 2':'Gender', 'Unnamed: 8':'Fracture', 'Unnamed: 9':'Note'})\n patient_df[patient_df.columns[3:9]] = patient_df[patient_df.columns[3:9]].fillna(0).astype(int)\n # add columns Hemorrgae (any ICH)\n patient_df['Hemorrhage'] = patient_df[patient_df.columns[3:8]].max(axis=1)\n\n # make patient directory\n if not os.path.exists(output_data_path): os.mkdir(output_data_path)\n if not os.path.exists(output_data_path + 'Patient_CT/'): os.mkdir(output_data_path + 'Patient_CT/')\n # iterate over volume to extract data\n output_info = []\n for n, id in enumerate(info_df.PatientNumber.unique()):\n # read nii volume\n ct_nii = nib.load(input_data_path + f'ct_scans/{id:03}.nii')\n mask_nii = nib.load(input_data_path + f'masks/{id:03}.nii')\n # get np.array\n ct_vol = ct_nii.get_fdata()\n mask_vol = skimage.img_as_bool(mask_nii.get_fdata())\n # rotate 90° counter clockwise for head pointing upward\n ct_vol = np.rot90(ct_vol, axes=(0,1))\n mask_vol = np.rot90(mask_vol, axes=(0,1))\n # window the ct volume to get better contrast of soft tissues\n if window is not None:\n ct_vol = window_ct(ct_vol, win_center=window[0], win_width=window[1], out_range=(0,1))\n\n if mask_vol.shape != ct_vol.shape:\n print(f'>>> Warning! The ct volume of patient {id} does not have '\n f'the same dimension as the ground truth. CT ({ct_vol.shape}) vs Mask ({mask_vol.shape})')\n # make patient directory\n if not os.path.exists(output_data_path + f'Patient_CT/{id:03}/'): os.mkdir(output_data_path + f'Patient_CT/{id:03}/')\n # iterate over slices to save slices\n for i, slice in enumerate(range(ct_vol.shape[2])):\n ct_slice_fn =f'Patient_CT/{id:03}/{slice+1}.tif'\n # save CT slice\n skimage.io.imsave(output_data_path + ct_slice_fn, ct_vol[:,:,slice], check_contrast=False)\n is_low = True if skimage.exposure.is_low_contrast(ct_vol[:,:,slice]) else False\n # save mask if some positive ICH\n if np.any(mask_vol[:,:,slice]):\n mask_slice_fn = f'Patient_CT/{id:03}/{slice+1}_ICH_Seg.bmp'\n skimage.io.imsave(output_data_path + mask_slice_fn, skimage.img_as_ubyte(mask_vol[:,:,slice]), check_contrast=False)\n else:\n mask_slice_fn = 'None'\n # add info to output list\n output_info.append({'PatientNumber':id, 'SliceNumber':slice+1, 'CT_fn':ct_slice_fn, 'mask_fn':mask_slice_fn, 'low_contrast_CT':is_low})\n\n print_progessbar(i, ct_vol.shape[2], Name=f'Patient {id:03} {n+1:03}/{len(info_df.PatientNumber.unique()):03}',\n Size=20, erase=False)\n\n # Make dataframe of outputs\n output_info_df = pd.DataFrame(output_info)\n # Merge with input info\n info_df = pd.merge(info_df, output_info_df, how='inner', on=['PatientNumber', 'SliceNumber'])\n # save df\n info_df.to_csv(output_data_path + 'ct_info.csv')\n print('>>> Data informations saved at ' + output_data_path + 'ct_info.csv')\n # save patient df\n patient_df.to_csv(output_data_path + 'patient_info.csv')\n print('>>> Patient informations saved at ' + output_data_path + 'patient_info.csv')", "def ring_int(self, tissue):\n print(\"controller - ring_int!\")\n img_cv2_mask = self.pressure_img.ring_ext\n self.pressure_img.roi_crop(img_cv2_mask, tissue, 2)", "def test_imsim():\n import yaml\n import astropy.units as u\n import matplotlib.pyplot as plt\n from tqdm import tqdm\n # Need these for `eval` below\n from numpy import array\n import coord\n\n with open(DATA_DIR / \"wcs_466749.yaml\", 'r') as f:\n wcss = yaml.safe_load(f)\n\n cmds = {}\n with open(DATA_DIR / \"phosim_cat_466749.txt\", 'r') as f:\n for line in f:\n k, v = line.split()\n try:\n v = int(v)\n except ValueError:\n try:\n v = float(v)\n except ValueError:\n pass\n cmds[k] = v\n\n # Values below (and others) from phosim_cat_466749.txt\n rc = cmds['rightascension']\n dc = cmds['declination']\n boresight = galsim.CelestialCoord(\n rc*galsim.degrees,\n dc*galsim.degrees\n )\n obstime = Time(cmds['mjd'], format='mjd', scale='tai')\n obstime -= 15*u.s\n band = \"ugrizy\"[cmds['filter']]\n wavelength_dict = dict(\n u=365.49,\n g=480.03,\n r=622.20,\n i=754.06,\n z=868.21,\n y=991.66\n )\n wavelength = wavelength_dict[band]\n camera = imsim.get_camera()\n\n rotTelPos = cmds['rottelpos'] * galsim.degrees\n telescope = imsim.load_telescope(f\"LSST_{band}.yaml\", rotTelPos=rotTelPos)\n # Ambient conditions\n # These are a guess.\n temperature = 293.\n pressure = 69.0\n H2O_pressure = 1.0\n\n # Start by constructing a refractionless factory, which we can use to\n # cross-check some of the other values in the phosim cmd file.\n factory = imsim.BatoidWCSFactory(\n boresight, obstime, telescope, wavelength,\n camera,\n temperature=temperature,\n pressure=0.0,\n H2O_pressure=H2O_pressure\n )\n\n aob, zob, hob, dob, rob, eo = factory._ICRF_to_observed(\n boresight.ra.rad, boresight.dec.rad, all=True\n )\n np.testing.assert_allclose(\n np.rad2deg(aob)*3600, cmds['azimuth']*3600,\n rtol=0, atol=2.0\n )\n np.testing.assert_allclose(\n (90-np.rad2deg(zob))*3600, cmds['altitude']*3600,\n rtol=0, atol=6.0,\n )\n q = factory.q * galsim.radians\n rotSkyPos = rotTelPos - q\n # Hmmm.. Seems like we ought to be able to do better than 30 arcsec on the\n # rotator? Maybe this is defined at a different point in time? Doesn't seem\n # to affect the final WCS much though.\n np.testing.assert_allclose(\n rotSkyPos.deg*3600, cmds['rotskypos']*3600,\n rtol=0, atol=30.0,\n )\n\n # We accidentally simulated DC2 with the camera rotated 180 degrees too far.\n # That includes the regression test data here. So to fix the WCS code, but\n # still use the same regression data, we need to add 180 degrees here. Just\n # rotate the camera by another 180 degrees\n telescope = telescope.withLocallyRotatedOptic(\n \"LSSTCamera\", batoid.RotZ(np.deg2rad(180))\n )\n\n # For actual WCS check, we use a factory that _does_ know about refraction.\n factory = imsim.BatoidWCSFactory(\n boresight, obstime, telescope, wavelength,\n camera,\n temperature=temperature,\n pressure=pressure,\n H2O_pressure=H2O_pressure\n )\n\n do_plot = False\n my_centers = []\n imsim_centers = []\n if do_plot:\n _, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 12))\n i = 0\n r1 = []\n d1 = []\n r2 = []\n d2 = []\n rng = np.random.default_rng(1234)\n for k, v in tqdm(wcss.items()):\n name = k[18:25].replace('-', '_')\n det = camera[name]\n cpix = det.getCenter(cameraGeom.PIXELS)\n\n wcs = factory.getWCS(det, order=2)\n wcs1 = eval(v)\n # Need to adjust ab parameters to new GalSim convention\n wcs1.ab[0,1,0] = 1.0\n wcs1.ab[1,0,1] = 1.0\n\n my_centers.append(wcs.posToWorld(galsim.PositionD(cpix.x, cpix.y)))\n imsim_centers.append(wcs1.posToWorld(galsim.PositionD(cpix.x, cpix.y)))\n\n corners = det.getCorners(cameraGeom.PIXELS)\n xs = np.array([corner.x for corner in corners])\n ys = np.array([corner.y for corner in corners])\n ra1, dec1 = wcs.xyToradec(xs, ys, units='radians')\n ra2, dec2 = wcs1.xyToradec(xs, ys, units='radians')\n if i == 0:\n labels = ['batoid', 'PhoSim']\n else:\n labels = [None]*2\n if do_plot:\n ax.plot(ra1, dec1, c='r', label=labels[0])\n ax.plot(ra2, dec2, c='b', label=labels[1])\n\n # add corners to ra/dec check lists\n r1.extend(ra1)\n d1.extend(dec1)\n r2.extend(ra2)\n d2.extend(dec2)\n # Add some random points as well\n xs = rng.uniform(0, 4000, 100)\n ys = rng.uniform(0, 4000, 100)\n ra1, dec1 = wcs.xyToradec(xs, ys, units='radians')\n ra2, dec2 = wcs1.xyToradec(xs, ys, units='radians')\n r1.extend(ra1)\n d1.extend(dec1)\n r2.extend(ra2)\n d2.extend(dec2)\n i += 1\n\n if do_plot:\n ax.legend()\n xlim = ax.get_xlim()\n ax.set_xlim(xlim[1], xlim[0])\n plt.show()\n\n dist = sphere_dist(r1, d1, r2, d2)\n print(\"sphere dist mean, max, std\")\n print(\n np.rad2deg(np.mean(dist))*3600,\n np.rad2deg(np.max(dist))*3600,\n np.rad2deg(np.std(dist))*3600,\n )\n np.testing.assert_array_less(\n np.rad2deg(np.mean(dist))*3600,\n 5.0\n )\n if do_plot:\n plt.hist(np.rad2deg(dist)*3600, bins=100)\n plt.show()\n\n if do_plot:\n r1 = np.array([c.ra.rad for c in my_centers])\n d1 = np.array([c.dec.rad for c in my_centers])\n r2 = np.array([c.ra.rad for c in imsim_centers])\n d2 = np.array([c.dec.rad for c in imsim_centers])\n cd = np.cos(np.deg2rad(cmds['declination']))\n q = plt.quiver(r1, d1, np.rad2deg(r1-r2)*3600*cd, np.rad2deg(d1-d2)*3600)\n plt.quiverkey(q, 0.5, 1.1, 5.0, \"5 arcsec\", labelpos='E')\n plt.show()", "def calc_tau(z_array_reion_allmodels, cosmology_allmodels, helium_allmodels,\n mass_frac_allmodels):\n\n def integrand(z, h, OM):\n H = av.Hubble_Param(z, h, OM) / (av.pc_to_m * 1.0e6 / 1.0e3)\n return (((1 + z)**2) / H)\n\n tau = []\n for model_number in range(len(mass_frac_allmodels)):\n\n # Set up some things for the model cosmology etc.\n model_mass_frac = mass_frac_allmodels[model_number]\n model_helium = helium_allmodels[model_number]\n model_h = cosmology_allmodels[model_number].H(0).value/100.0\n model_OM = cosmology_allmodels[model_number].Om0\n model_OB = cosmology_allmodels[model_number].Ob0\n model_z = z_array_reion_allmodels[model_number]\n\n model_tau = np.zeros(len(model_mass_frac))\n\n # First determine optical depth for redshift 0 to 4.\n tau_04 = integrate.quad(integrand, 0, 4, args=(model_h, model_OM,))[0] \n tau_04 *= (1 + 2*model_helium/(4 * (1-model_helium)))\n\n # Then determine optical depth from z = 4 to lowest z of model.\n tau_46 = integrate.quad(integrand, 4, model_z[-1], args=(model_h, model_OM,))[0]\n tau_46 *= (1 + model_helium/(4* (1-model_helium)))\n\n tau_06 = tau_04 + tau_46\n\n model_tau[-1] = tau_06\n\n # Then loop down through snapshots (low z to high z) and calculate tau.\n for snapnum in np.arange(len(model_mass_frac) - 2, -1, -1):\n\n this_z = model_z[snapnum]\n prev_z = model_z[snapnum + 1]\n\n # Hubble Parameter in Mpc/s/Mpc.\n H = av.Hubble_Param(this_z, model_h, model_OM) / (av.pc_to_m * 1.0e6 / 1.0e3)\n numerator = ((1 + this_z) **2) * (1.0 - model_mass_frac[snapnum])\n \n model_tau[snapnum] = model_tau[snapnum+1] + (( numerator / H) * (this_z - prev_z) * (1 + model_helium/(4 * (1-model_helium)))) \n\n model_tau *= av.n_HI(0, model_h, model_OB, model_helium) * av.c_in_ms * av.Sigmat\n\n tau.append(model_tau)\n\n return tau", "def trap_knobs():\n print('Executing trap_knobs...')\n #0) Define parameters and heck to see what scripts have been run\n from project_parameters import save,debug,trapFile,elMap,electrodes,multipoles,name,simulationDirectory,reg,trapType\n #from all_functions import nullspace,plotN\n import pickle\n with open(trapFile,'rb') as f:\n trap = pickle.load(f)\n if trap.configuration.expand_field!=True:\n return 'You must run expand_field first!'\n if trap.configuration.trap_knobs and not debug.trap_knobs:\n return 'Already executed trap_knobs.'\n #1) redefine parameters with shorthand and run sanity checks\n totE = len(electrodes) # numTotalElectrodes\n totM = len(multipoles) # numTotalMultipoles\n assert totE == trap.numElectrodes # Make sure that the total number of electrodes includes the RF.\n tc = trap.configuration\n mc = tc.multipoleCoefficients # this is the original, maximum-length multipole coefficients matrix (multipoles,electrodes)\n for row in range(totM):\n #row+=1\n if abs(np.sum(mc[row,:])) < 10**-50: # arbitrarily small\n return 'trap_knobs: row {} is all 0, can not solve least square, stopping trap knobs'.format(row)\n #2) Apply electrode mapping by clearing some electrodes and adding them to the new map\n # mapping one to an unused electrode should turn it off as well\n for index in range(totE):\n if index != elMap[index]:\n mc[:,elMap[index]] += mc[:,index] # combine the electrode to its mapping\n electrodes[index] = 0 # clear the mapped electrode, implemented in part 3\n useE = int(np.sum(electrodes)) # numUsedElectrodes\n useM = int(np.sum(multipoles)) # numUsedMultipoles\n eo = np.sqrt(useM)-1 # expansionOrder\n #3) build a reduced array of multipole coefficients to invert\n MC = np.zeros((useM,useE)) # reduced matrix to build up and invert\n ML = 0\n for ml in range(totM):\n if multipoles[ml]:\n EL = 0 # clear the electrode indexing before looping through electrodes again for new multipole\n for el in range(totE):\n if electrodes[el]:\n MC[ML,EL] = mc[ml,el]\n EL += 1\n ML += 1\n print('trap_knobs: with electrode and multipole constraints, the coefficient matrix size is ({0},{1}).'.format(MC.shape[0],MC.shape[1]))\n #4) numerially invert the multipole coefficients to get the multipole controls, one multipole at a time\n # solve the equation MC*A = B, where the matrix made from all A vectors is C\n C = np.zeros((useE,useM)) # multipole controls (electrodes,multipoles) will be the inverse of multipole coefficents \n for mult in range(useM):\n B = np.zeros(useM)\n B[mult] = 1\n A = np.linalg.lstsq(MC,B)[0]\n C[:,mult] = A\n #5) calculate the nullspace and regularize if the coefficients matrix is sufficiently overdetermined\n if useM < useE:\n K = nullspace(MC)\n else:\n print('There is no nullspace because the coefficient matrix is rank deficient.\\nThere can be no regularization.')\n K = None\n reg = False\n if reg:\n for mult in range(useM):\n Cv = C[:,mult].T\n Lambda = np.linalg.lstsq(K,Cv)[0]\n test=np.dot(K,Lambda)\n C[:,mult] = C[:,mult]-test \n #6) expand the matrix back out again, with zero at all unused electrodes and multipoles; same as #3 with everything flipped\n c = np.zeros((totE,totM)) \n EL = 0\n for el in range(totE):\n if electrodes[el]:\n ML = 0 # clear the electrode indexing before looping through electrodes again for new multipole\n for ml in range(totM):\n if multipoles[ml]:\n c[el,ml] = C[EL,ML]\n ML += 1\n EL += 1\n #7) plot the multipole controls in teh trap geometry\n if debug.trap_knobs:\n for ml in range(totM):\n if multipoles[ml]:\n plot = c[:,ml]\n plotN(plot,trap,'Multipole {}'.format(ml)) \n #8) update instance configuration with multipole controls to be used bu dc_voltages in post_process_trap\n tc.multipoleKernel = K\n tc.multipoleControl = c\n tc.trap_knobs = True\n trap.configuration=tc\n #8.5) change the order of the columns of c for labrad\n # originally (after constant) Ez,Ey,Ex,U3,U4,U2,U5,U1,...,Y4-4,...,Y40,...,Y44\n # we want it to be Ex,Ey,Ez,U2,U1,U3,U5,U4,...,Y40 and then end before any of the other 4th order terms\n# cc = c.copy()\n# cc[:,1] = c[:,3] # Ex\n# cc[:,2] = c[:,1] # Ey\n# cc[:,3] = c[:,2] # Ez\n# cc[:,4] = c[:,6] # U2\n# cc[:,5] = c[:,8]\n# cc[:,6] = 0 # U3\n# cc[:,16]= c[:,20]# Y40\n# cc[:,20]= 0\n cc = c\n #9) save trap and save c as a text file (called the \"C\" file for Labrad)\n if save: \n print('Saving '+name+' as a data structure...')\n with open(trapFile,'wb') as f:\n pickle.dump(trap,f)\n #ct = cc[1:totE,1:17] #eliminate the RF electrode and constant multipole; eliminate everything past Y40\n ct = cc[1:totE,1:25] # only eliminate the constant\n text = np.zeros((ct.shape[0])*(ct.shape[1]))\n for j in range(ct.shape[1]):\n for i in range(ct.shape[0]):\n text[j*ct.shape[0]+i] = ct[i,j]\n np.savetxt(simulationDirectory+name+'.txt',text,delimiter=',')\n return 'Completed trap_knobs.'", "def rayShooting():\r\n \r\n \r\n if nbRay==1:\r\n maxi=1\r\n mini=1\r\n peaceofAngle=angleMax\r\n #to trace one ray at angleMax\r\n else:\r\n maxi=(nbRay-1)/2\r\n mini=-maxi\r\n peaceofAngle=2*angleMax/(nbRay-1)\r\n #to trace rays at regular intervals between [-angleMax;angleMax] \r\n\r\n tot=0 #to count the number of peace of ray\r\n indice=0 #to browse raysIndex\r\n\r\n raysMatrix=np.empty(shape=(0,5),dtype=np.float64)#will contain all the rays in a row\r\n raysIndex=np.empty(shape=(nbRay,),dtype=np.int16)#indexation of the rays in raysMatrix\r\n \r\n for i in np.arange(mini,maxi+1,1):#put maxi+1 to include maxi in the loop\r\n \r\n rayon=Rayon(source.position,angleToVector(peaceofAngle*i))#rayon is\r\n #the ray we will trace\r\n ray,compt=traceRay(rayon)\r\n tot+=(compt+1)\r\n\r\n \r\n raysIndex[indice]=tot #the rays index contains the indice just above\r\n #of the end of the i th ray\r\n\r\n raysMatrix=np.vstack((raysMatrix,ray))\r\n #the form of the ray matrix is a stack of peace of rays describe by\r\n #a,b,c,x1,reflexion. the polynome of the peace of ray being ax^2+bx+c and the\r\n #abscisses of the limiting point being x1, reflexion indicating if a reflexion happened\r\n #when we meet a 5-uple with a coefficient b or c infinite it means\r\n #a new ray begin\r\n \r\n indice+=1\r\n print(\"ray at indice\",i,\"and at angle\",peaceofAngle*i/np.pi*180,'degree(s)')\r\n \r\n print(\"the total number of peaces of ray is :\", tot)\r\n\r\n return(raysMatrix,raysIndex)", "def decode_patches_from_roi(self, nr_patches = 100, roi = 'V1', stim_stat_threshold = 2.0, prf_stat_threshold = 4.0, prf_ecc_threshold = 0.6, run_type = 'WMM', smoothing_for_PRF = 5, scaling = True, sum_TRs = True,summed_TRs = 3):\n\t\t\n\t\t# first determine individual run duration (to make sure that stimulus timings of all runs are correct)\n\t\trun_duration = []\n\t\tfor r in [self.runList[i] for i in self.conditionDict['WMM']]:\n\t\t\tniiFile = NiftiImage(self.runFile(stage = 'processed/mri', run = r))\n\t\t\ttr, nr_trs = round(niiFile.rtime*1)/1000.0, niiFile.timepoints\n\t\t\trun_duration.append(tr * nr_trs)\n\t\trun_duration = np.r_[0,np.cumsum(np.array(run_duration))]\n\n\t\t# timing information stimuli\n\t\tpatches_all_trials = []\n\t\trun = 0\n\t\tfor r in [self.runList[i] for i in self.conditionDict['WMM']]:\n\t\t\tstim_events = np.loadtxt(self.runFile(stage = 'processed/behavior', run = r, extension = '.txt', postFix = ['stim' ,'all','task']))\n\t\t\tpatches_run= np.array([[stim_events[i][2],np.ceil((stim_events[i][0]+ run_duration[run])/tr),np.ceil((stim_events[i][1] +run_duration[run])/tr),stim_events[i][3]] for i in range(stim_events.shape[0])])\n\t\t\tpatches_all_trials.append(patches_run)\n\t\t\trun += 1\n\n\t\tpatches_all_trials = np.vstack(patches_all_trials) # array (patches, timing_memory, timing test)\n\t\tunique_patches = np.unique(patches_all_trials[:,0]) # array (unique_patches)\n\n\t\t# numpy array of all patches from stimulus pool (also rotated by 90 degrees to check orientation space of PRF)\n\t\tpatches = np.array(self.rescale_images())\n\t\tpatches_rotated = [patches]\n\t\tfor i in range(3):\n\t\t\tpatches_rotated.append(np.array([np.rot90(p) for p in patches_rotated[-1]]))\n\t\tpatches_rotated = np.array(patches_rotated) \n\t\t\n\t\t# brain data\n\t\tself.hdf5_filename = os.path.join(self.conditionFolder(stage = 'processed/mri', run = self.runList[self.conditionDict[run_type][0]]), run_type + '.hdf5')\n\t\tself.logger.info('data from table file ' + self.hdf5_filename)\n\t\th5file = open_file(self.hdf5_filename, mode = 'r')\t\t\n\n\t\tstim_stats = self.roi_data_from_hdf(h5file, roi, data_type = 'zstat1', run = self.runList[self.conditionDict[run_type][0]], postFix = ['mcf','sgtf'], combined = False, prf = False)\n\t\tprfs = self.roi_data_from_hdf(h5file, roi, data_type = 'all_coefs', run = self.runList[self.conditionDict[run_type][0]], postFix = ['mcf','sgtf'], combined = False, prf = True)\n\t\tprf_stats = self.roi_data_from_hdf(h5file, roi, data_type = 'all_corrs', run = self.runList[self.conditionDict[run_type][0]], postFix = ['mcf','sgtf'], combined = False, prf = True)\n\t\t\n\t\tprf_ecc = self.roi_data_from_hdf(h5file, roi, data_type = 'all_results', run = self.runList[self.conditionDict[run_type][0]], postFix = ['mcf','sgtf'], combined = False, prf = True)[:,1]\n\n\t\ttimeseries = []\n\t\tfor i in range(len(self.conditionDict[run_type])):\n\t\t\ttimeseries.append(self.roi_data_from_hdf(h5file, roi, data_type = 'tf_psc_data', run = self.runList[self.conditionDict[run_type][i]], postFix = ['mcf','sgtf'], combined = False, prf = False))\n\t\ttimeseries = np.concatenate(timeseries, axis = 1) \n\n\t\t# select voxels based on prf and stimulus contrasts\n\t\tprfs = prfs[((stim_stats.squeeze() > stim_stat_threshold) * (prf_stats[:,1] > prf_stat_threshold)*(prf_ecc < prf_ecc_threshold)).squeeze(),:] \n\t\ttimeseries = timeseries[((stim_stats.squeeze() > stim_stat_threshold) * (prf_stats[:,1] > prf_stat_threshold)*(prf_ecc < prf_ecc_threshold)).squeeze(),:] \n\t\tprf_stats = prf_stats[((stim_stats.squeeze() > stim_stat_threshold) * (prf_stats[:,1] > prf_stat_threshold)*(prf_ecc < prf_ecc_threshold)).squeeze(),1]\n\n\t\t#create stim mask (based on eccentricity mask and atend centre mask)\n\t\tcentre_x, centre_y = patches.shape[1]/2,patches.shape[1]/2\n\n\t\ty,x = np.ogrid[-centre_x:patches.shape[1]-centre_x, -centre_y:patches.shape[1]-centre_y]\n\t\tmask_ecc = x*x + y*y <= (prf_ecc_threshold*patches.shape[1]/2)**2\n\n\t\ty,x = np.ogrid[-centre_x:patches.shape[1]-centre_x, -centre_y:patches.shape[1]-centre_y]\n\t\tmask_centre = x*x + y*y >= 4**2\n\n\t\tmask_stim = (mask_ecc.ravel()*mask_centre.ravel())\t\n\n\t\t# create scaling matrix (identity matrix) of nr_voxels by nr_voxels\n\t\tif scaling:\n\t\t\tscaling_matrix = np.eye(prfs.shape[0])\n\t\t\tfor i in range(len(prf_stats)):\n\t\t\t\tscaling_matrix[i,i] = prf_stats[i]\n\n\n\t\t# based on prfs and scaling factor create brain patches (either for single TRs or for summed TRS)\t\t\n\t\tbrain_patches = np.mat(timeseries.T) * np.mat(scaling_matrix) * np.mat(prfs) \n\t\tif sum_TRs == True:\n\t\t\ttimeseries_summed = np.array([np.sum(timeseries.T[i:i+summed_TRs,:],axis = 0) for i in range(timeseries.shape[1]) if i < timeseries.shape[1] - summed_TRs])\n\t\t\tbrain_patches= np.mat(timeseries_summed) * np.mat(scaling_matrix) * np.mat(prfs)\n \n\t\t# per TR (either summed or individual) calculate correlations between brain patch and stimulus input\n\t\tpatch_correlations = np.zeros((4, brain_patches.shape[0], patches.shape[0]))\n\n\t\tfor k in range(4): # 4 orientations\n\t\t\tfor i, t in enumerate(np.array(brain_patches)): # all brain_patches\n\t\t\t\tpatch_correlations[k, i, :] = np.array([pearsonr(t[mask_stim], patch.ravel()[mask_stim])[0] for patch in patches_rotated[k]])\n\n\t\t# for analysis only look at correlations for all patches that were presented in this subjects session\n\t\tpatch_index_unique = np.array([j for i in patches_all_trials[:,0] for j in range(len( np.unique(patches_all_trials[:,0]))) if i == np.unique(patches_all_trials[:,0])[j]])\t\n\t\tpatch_correlations_unique = patch_correlations[:,:,np.array(unique_patches, dtype = int)]\n\n\t\t# show correlation matrix of patches for all rotations (dots represent stimulus that was present)\n\t\tfig, axes = plt.subplots(2,2)\n\t\tfor i in range(patches_rotated.shape[0]):\n\t\t\tax = axes.flat[i]\n\t\t\tax.imshow(patch_correlations_unique[i,:,:].T,vmin = np.min(patch_correlations_unique), vmax = np.max(patch_correlations_unique))\n\t\t\tax.scatter(patches_all_trials[:,1],patch_index_unique)\n\t\t\tax.set_title(\"rotation_\" + str(i*90))\n\n\t\t# show decoding evidence across runs for all unique patches\t\n\t\tfig, axes = plt.subplots(2,2)\t\n\t\tfor i in range(patches_rotated.shape[0]):\n\t\t\tax = axes.flat[i]\n\t\t\tax.hist(np.argmax(patch_correlations_unique[i,:,:], axis = 1), bins = patch_correlations_unique.shape[2] + 1)\n\t\t\tax.set_ylim([0,600])\n\t\t\tax.set_title(\"rotation_\" + str(i*90))\n\t\t\n\t\t# show brain_patches and stimulus input\t\t\n\t\tmask_stim = np.array([not i for i in mask_stim]).reshape(patches.shape[1],patches.shape[1]) # flip stim mask for graphing purposes\n\n\t\tindex_att_patch = patches_all_trials[:,3] == 0\n\t\tindex_att_center = patches_all_trials[:,3] == 1 \n\n\n\t\t# calculate decoding performance\n\t\tcorrelations_unique_TR_patch = np.array([patch_correlations[:,np.array(patches_all_trials[:,1],dtype = int)[index_att_patch]+ TR][:,:,np.array(unique_patches, dtype = int)] for TR in range(2,5)])\n\t\tcorrelations_unique_TR_center = np.array([patch_correlations[:,np.array(patches_all_trials[:,1],dtype = int)[index_att_center]+ TR][:,:,np.array(unique_patches, dtype = int)] for TR in range(2,5)])\n\n\t\t# check relative position of presented patch compared to other unique patches\n\t\tpatch_dict = {}\n\t\tcenter_dict = {}\n\t\tfor condition in ['patch' ,'center']:\n\t\t\tfor TR in range(correlations_unique_TR_patch.shape[0]):\n\t\t\t\tfor rotation in range(correlations_unique_TR_patch.shape[1]):\n\t\t\t\t\tif condition == 'patch':\n\t\t\t\t\t\tprint 'update'\n\t\t\t\t\t\tdecode = np.array([int(np.where(np.argsort(correlations_unique_TR_patch[TR,rotation,i])==patch_index_unique[index_att_patch][i])[0]) for i in range(correlations_unique_TR_patch.shape[2])])\n\t\t\t\t\t\tpatch_dict.update({'RT_' + str(TR) + '_Rotate_' + str(rotation):[decode, np.mean(decode),np.where(decode == np.max(decode))]})\n\t\t\t\t\telif condition == 'center':\t\n\t\t\t\t\t\tdecode = np.array([int(np.where(np.argsort(correlations_unique_TR_center[TR,rotation,i])==patch_index_unique[index_att_patch][i])[0]) for i in range(correlations_unique_TR_patch.shape[2])])\n\t\t\t\t\t\tcenter_dict.update({'RT_' + str(TR) + '_Rotate_' + str(rotation):[decode, np.mean(decode),np.where(decode == np.max(decode))]})\n\n\t\tshell()\n\t\t# show decoding performance\n\t\tfor TR in [2]:\n\t\t\tcorrelations_unique_TR_patch = patch_correlations[:,np.array(patches_all_trials[:,1],dtype = int)[index_att_patch]+ TR][:,:,np.array(unique_patches, dtype = int)]\n\t\t\tcorrelations_unique_TR_cent = patch_correlations[:,np.array(patches_all_trials[:,1],dtype = int)[index_att_cent]+ TR][:,:,np.array(unique_patches, dtype = int)]\n\n\t\t\tfor cond in ['patch','center']:\n\t\t\t\tfig, axes = plt.subplots(2,2)\n\t\t\t\tfor k in range(4): # 4 rotations\n\t\t\t\t\tax = axes.flat[k]\n\t\t\t\t\tax.set_title(\"rotation_\" + str(k*90))\n\t\t\t\t\tif cond == 'patch':\n\t\t\t\t\t\tax.hist([np.argsort(correlations_unique_TR_patch[k][i])[patch_index_unique[i]] for i in range(len(patch_index_unique)/2)], alpha = 0.3, color = ['r','g','b','k'][k], bins = 44, cumulative = True, histtype = 'step')\n\t\t\t\t\telif cond == 'center':\n\t\t\t\t\t\tax.hist([np.argsort(correlations_unique_TR_cent[k][i])[patch_index_unique[i]] for i in range(len(patch_index_unique)/2)], alpha = 0.3, color = ['r','g','b','k'][k], bins = 44, cumulative = True, histtype = 'step')\n\n\t\tif sum_TRs == True:\n\t\t\tbpr = np.array(brain_patches).reshape((timeseries.shape[1] - summed_TRs,patches.shape[1],patches.shape[1])) # brain patch reshaped to PRF space, length is nr of TRS\n\t\telse:\n\t\t\tbpr = np.array(brain_patches).reshape((timeseries.shape[1],patches.shape[1],patches.shape[1]))\n\t\t\t\n\t\t\n\t\ttr_bpr_patch = np.array([bpr[np.array(patches_all_trials[:,1], dtype = int)[index_att_patch]+i] for i in range(2,5)]) #brain patches for different timepoints \n\t\ttr_bpr_cent = np.array([bpr[np.array(patches_all_trials[:,1], dtype = int)[index_att_cent]+i] for i in range(2,5)])\n\n\t\t\n\t\t# show decoding attend patches condition\n\t\n\n\n\t\tpatch = 0\n\t\ttest = [i for i in range(index_att_patch.shape[0]) if index_att_patch[i]]\n\t\tfor i in test:\n\t\t\tprint i, patch\n\t\t\tf = pl.figure()\n\t\t\tpatch += 1\n\t\t\t#f = pl.figure()\n\t\t\t#s = f.add_subplot(241, aspect = 'equal')\n\t\t\t\n\t\t\t#tr_bpr_patch[0,patch][mask_stim] = 0\n\t\t\t#pl.imshow(tr_bpr_patch[0,patch],vmin = np.min(tr_bpr), vmax = np.max(tr_bpr))\n\t\t\t#s = f.add_subplot(242, aspect = 'equal')\n\t\t\t#pl.imshow(tr_bpr_patch[0,patch],vmin = np.min(tr_bpr), vmax = np.max(tr_bpr))\n\t\t\t#s = f.add_subplot(243, aspect = 'equal')\n\t\t\t#pl.imshow(tr_bpr_patch[0,patch],vmin = np.min(tr_bpr), vmax = np.max(tr_bpr))\n\t\t\t#s = f.add_subplot(244, aspect = 'equal')\n\t\t\t#pl.imshow(tr_bpr_patch[0,patch],vmin = np.min(tr_bpr), vmax = np.max(tr_bpr))\n\t\t\t#s = f.add_subplot(245, aspect = 'equal')\n\t\t\t\n\t\t\t#this_patch = patches_rotated[0,int(patches_all_trials[i][0])]\n\t\t\t#this_patch[mask_stim] = 0\n\t\t\t#pl.imshow(this_patch)\n\t\t\t#s = f.add_subplot(246, aspect = 'equal')\n\t\t\t#this_patch = patches_rotated[1,int(patches_all_trials[i][0])]\n\t\t\t#this_patch[mask_stim] = 0\n\t\t\t#pl.imshow(this_patch)\n\t\t\t#s = f.add_subplot(247, aspect = 'equal')\n\t\t\t#this_patch = patches_rotated[2,int(patches_all_trials[i][0])]\n\t\t\t#this_patch[mask_stim] = 0\n\t\t\t#pl.imshow(this_patch)\n\t\t\t#s = f.add_subplot(248, aspect = 'equal')\n\t\t\t#this_patch = patches_rotated[3,int(patches_all_trials[i][0])]\n\t\t\t#this_patch[mask_stim] = 0\n\t\t\t#pl.imshow(this_patch)\n\n\n\n\n\n\n\n\t\ttr_bpr = np.array([bpr[np.array(patches_all_trials[:,1], dtype = int)+i] for i in range(2,5)]) #brain patches for different timepoints \n\t\t\n\t\tfor i in []:\n\t\t\tf = pl.figure()\n\t\t\ts = f.add_subplot(241, aspect = 'equal')\n\t\t\ttr_bpr[0,i][mask_stim] = 0\n\t\t\tpl.imshow(tr_bpr[0,i],vmin = np.min(tr_bpr), vmax = np.max(tr_bpr))\n\t\t\ts = f.add_subplot(242, aspect = 'equal')\n\t\t\ttr_bpr[0,i][mask_stim] = 0\n\t\t\tpl.imshow(tr_bpr[0,i],vmin = np.min(tr_bpr), vmax = np.max(tr_bpr))\n\t\t\ts = f.add_subplot(243, aspect = 'equal')\n\t\t\ttr_bpr[0,i][mask_stim] = 0\n\t\t\tpl.imshow(tr_bpr[0,i],vmin = np.min(tr_bpr), vmax = np.max(tr_bpr))\n\t\t\ts = f.add_subplot(244, aspect = 'equal')\n\t\t\ttr_bpr[0,i][mask_stim] = 0\n\t\t\tpl.imshow(tr_bpr[0,i],vmin = np.min(tr_bpr), vmax = np.max(tr_bpr))\n\t\t\ts = f.add_subplot(245, aspect = 'equal')\n\t\t\tpatch = patches_rotated[0,np.array(patches_all_trials[:,0], dtype = int)][i]\n\t\t\tpatch[mask_stim] = 0\n\t\t\tpl.imshow(patch)\n\t\t\ts = f.add_subplot(246, aspect = 'equal')\n\t\t\tpatch = patches_rotated[1,np.array(patches_all_trials[:,0], dtype = int)][i]\n\t\t\tpatch[mask_stim] = 0\n\t\t\tpl.imshow(patch)\n\t\t\ts = f.add_subplot(247, aspect = 'equal')\n\t\t\tpatch = patches_rotated[2,np.array(patches_all_trials[:,0], dtype = int)][i]\n\t\t\tpatch[mask_stim] = 0\n\t\t\tpl.imshow(patch)\n\t\t\ts = f.add_subplot(248, aspect = 'equal')\n\t\t\tpatch = patches_rotated[3,np.array(patches_all_trials[:,0], dtype = int)][i]\n\t\t\tpatch[mask_stim] = 0\n\t\t\tpl.imshow(patch)\n\n\t\tpl.show()", "def behaviour03(prior_propia): \r\n count = 0\r\n global stop\r\n while not stop:\r\n results = process_image(cameraData,prior_propia)\r\n if count > 3:\r\n count = 0\r\n print(\"hay estos pixeles verdes\")\r\n print(results)\r\n else:\r\n count = count + 1", "def process(self, im, include=True):\n full_im = im - self.bias # remove the bias offset, it's arbitrary\n try:\n self.im_vals = full_im * self.mask # get the ROI\n not_roi = full_im * (1-self.mask)\n except ValueError as e:\n s0 = np.shape(self.mask)\n self.im_vals = np.zeros(s0)\n not_roi = np.zeros(s0)\n include = False\n s1 = np.shape(im)\n error(\"Received image was wrong shape (%s,%s) for analyser's ROI (%s,%s)\"%(\n s1[0],s1[1],s0[0],s0[1]))\n # background statistics: mean count and standard deviation across image\n N = np.sum(1-self.mask)\n self.stats['Mean bg count'].append(np.sum(not_roi) / N)\n self.stats['Bg s.d.'].append(\n np.sqrt(np.sum((not_roi - self.stats['Mean bg count'][-1])**2) / (N - 1)))\n # sum of counts in the ROI of the image gives the signal\n self.stats['Counts'].append(np.sum(self.im_vals)) \n # file ID number should be updated externally before each event\n self.stats['File ID'].append(self.fid)\n # the pixel value at the centre of the ROI\n try:\n self.stats['ROI centre count'].append(full_im[self.xc, self.yc])\n except IndexError as e:\n error('ROI centre (%s, %s) outside of image size (%s, %s)'%(\n self.xc, self.yc, self.pic_width, self.pic_height))\n self.stats['ROI centre count'].append(0)\n # position of the (first) max intensity pixel\n xmax, ymax = np.unravel_index(np.argmax(full_im), full_im.shape)\n self.stats['Max xpos'].append(xmax)\n self.stats['Max ypos'].append(ymax)\n self.stats['Include'].append(include)\n if np.size(self.bins):\n index = np.where(self.bins > self.stats['Counts'][-1])\n try:\n self.occs[index[0]] += 1\n except IndexError:\n self.occs[-1] += 1\n self.ind += 1", "def freespaceImageAnalysis( fids, guesses = None, fit=True, bgInput=None, bgPcInput=None, shapes=[None], zeroCorrection=0, zeroCorrectionPC=0,\n keys=None, fitModule=bump, extraPicDictionaries=None, newAnnotation=False, onlyThisPic=None, pltVSize=5, \n plotSigmas=False, plotCounts=False, manualColorRange=None, calcTemperature=False, clearOutput=True, \n dataRange=None, guessTemp=10e-6, trackFitCenter=False, picsPerRep=1, startPic=0, binningParams=None, \n win=pw.PictureWindow(), transferAnalysisOpts=None, tferBinningParams=None, tferWin= pw.PictureWindow(),\n extraTferAnalysisArgs={}, emGainSetting=300, lastConditionIsBackGround=True, showTferAnalysisPlots=True,\n show2dFitsAndResiduals=True, plotFitAmps=False, indvColorRanges=False, fitF2D=gaussian_2d.f_notheta, \n rmHighCounts=True, useBase=True, weightBackgroundByLoading=True, returnPics=False, forceNoAnnotation=False):\n fids = [fids] if type(fids) == int else fids\n keys = [None for _ in fids] if keys is None else keys\n sortedStackedPics = {}\n initThresholds = [None]\n picsForBg = []\n bgWeights = []\n isAnnotatedList = []\n for filenum, fid in enumerate(fids):\n if transferAnalysisOpts is not None:\n res = ta.stage1TransferAnalysis( fid, transferAnalysisOpts, useBase=useBase, **extraTferAnalysisArgs )\n (initAtoms, tferAtoms, initAtomsPs, tferAtomsPs, key, keyName, initPicCounts, tferPicCounts, repetitions, initThresholds,\n avgPics, tferThresholds, initAtomImages, tferAtomImages, basicInfoStr, ensembleHits, groupedPostSelectedPics, isAnnotated) = res\n isAnnotatedList.append(isAnnotated)\n # assumes that you only want to look at the first condition. \n for varPics in groupedPostSelectedPics: # don't remember why 0 works if false...\n picsForBg.append(varPics[-1 if lastConditionIsBackGround else 0])\n bgWeights.append(len(varPics[0]))\n allFSIPics = [ varpics[0][startPic::picsPerRep] for varpics in groupedPostSelectedPics]\n if showTferAnalysisPlots:\n fig, axs = plt.subplots(1,2)\n mp.makeAvgPlts( axs[0], axs[1], avgPics, transferAnalysisOpts, ['r','g','b'] ) \n allFSIPics = [win.window( np.array(pics) ) for pics in allFSIPics]\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n elif type(fid) == int:\n ### For looking at either PGC imgs or FSI imgs \n with exp.ExpFile(fid) as file:\n # I think this only makes sense if there is a specific bg pic in the rotation\n picsForBg.append(list(file.get_pics()))\n allFSIPics = file.get_pics()[startPic::picsPerRep]\n _, key = file.get_key()\n if len(np.array(key).shape) == 2:\n key = key[:,0]\n file.get_basic_info()\n allFSIPics = win.window( allFSIPics )\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n allFSIPics = np.reshape( allFSIPics, (len(key), int(allFSIPics.shape[0]/len(key)), allFSIPics.shape[1], allFSIPics.shape[2]) )\n else:\n ### Assumes given pics have the same start pic and increment (picsPerRep).\n # doesn't combine well w/ transfer analysis\n picsForBg.append(fid)\n allFSIPics = fid[startPic::picsPerRep]\n print(\"Assuming input is list of all pics, then splices to get FSI pics. Old code assumed the given were FSI pics.\")\n allFSIPics = win.window( allFSIPics )\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n allFSIPics = np.reshape( allFSIPics, (len(key), int(allFSIPics.shape[0]/len(key)), allFSIPics.shape[1], allFSIPics.shape[2]) )\n # ##############\n if keys[filenum] is not None:\n key = keys[filenum]\n for i, keyV in enumerate(key):\n keyV = misc.round_sig_str(keyV)\n sortedStackedPics[keyV] = np.append(sortedStackedPics[keyV], allFSIPics[i],axis=0) if (keyV in sortedStackedPics) else allFSIPics[i] \n if lastConditionIsBackGround:\n bgInput, pcBgInput = getBgImgs(picsForBg, startPic = startPic, picsPerRep = picsPerRep, rmHighCounts=rmHighCounts, bgWeights=bgWeights, \n weightBackgrounds=weightBackgroundByLoading)\n elif bgInput == 'lastPic':\n bgInput, pcBgInput = getBgImgs(picsForBg, startPic = picsPerRep-1, picsPerRep = picsPerRep, rmHighCounts=rmHighCounts, bgWeights=bgWeights,\n weightBackgrounds=weightBackgroundByLoading )\n if bgInput is not None: # was broken and not working if not given bg\n bgInput = win.window(bgInput)\n bgInput = ah.softwareBinning(binningParams, bgInput)\n if bgPcInput is not None:\n bgPcInput = win.window(bgPcInput)\n bgPcInput = ah.softwareBinning(binningParams, bgPcInput) \n \n if extraPicDictionaries is not None:\n if type(extraPicDictionaries) == dict:\n extraPicDictionaries = [extraPicDictionaries]\n for dictionary in extraPicDictionaries:\n for keyV, pics in dictionary.items():\n sortedStackedPics[keyV] = (np.append(sortedStackedPics[keyV], pics,axis=0) if keyV in sortedStackedPics else pics) \n sortedStackedKeyFl = [float(keyStr) for keyStr in sortedStackedPics.keys()]\n sortedKey, sortedStackedPics = ah.applyDataRange(dataRange, sortedStackedPics, list(sorted(sortedStackedKeyFl)))\n numVars = len(sortedStackedPics.items())\n if len(np.array(shapes).shape) == 1:\n shapes = [shapes for _ in range(numVars)] \n if guesses is None:\n guesses = [[None for _ in range(4)] for _ in range(numVars)]\n if len(np.array(bgInput).shape) == 2 or bgInput == None:\n bgInput = [bgInput for _ in range(numVars)]\n if len(np.array(bgPcInput).shape) == 2 or bgPcInput == None:\n bgPcInput = [bgPcInput for _ in range(numVars)]\n \n datalen, avgFitSigmas, images, hFitParams, hFitErrs, vFitParams, vFitErrs, fitParams2D, fitErrs2D = [{} for _ in range(9)]\n titles = ['Bare', 'Photon-Count', 'Bare-mbg', 'Photon-Count-mbg']\n assert(len(sortedKey)>0)\n for vari, keyV in enumerate(sortedKey):\n keyV=misc.round_sig_str(keyV)\n if vari==0:\n initKeyv = keyV\n varPics = sortedStackedPics[keyV]\n # 0 is init atom pics for post-selection on atom number... if we wanted to.\n expansionPics = rmHighCountPics(varPics,7000) if rmHighCounts else varPics\n datalen[keyV] = len(expansionPics)\n expPhotonCountImage = photonCounting(expansionPics, 120)[0] / len(expansionPics)\n bgPhotonCountImage = np.zeros(expansionPics[0].shape) if bgPcInput[vari] is None else bgPcInput[vari]\n expAvg = np.mean(expansionPics, 0)\n bgAvg = np.zeros(expansionPics[0].shape) if (bgInput[vari] is None or len(bgInput[vari]) == 1) else bgInput[vari]\n \n if bgPhotonCountImage is None:\n print('no bg photon', expAvg.shape)\n bgPhotonCount = np.zeros(photonCountImage.shape)\n avg_mbg = expAvg - bgAvg\n avg_mbgpc = expPhotonCountImage - bgPhotonCountImage\n images[keyV] = [expAvg, expPhotonCountImage, avg_mbg, avg_mbgpc]\n hFitParams[keyV], hFitErrs[keyV], vFitParams[keyV], vFitErrs[keyV], fitParams2D[keyV], fitErrs2D[keyV] = [[] for _ in range(6)]\n for imnum, (im, guess) in enumerate(zip(images[keyV], guesses[vari])):\n if fit:\n # fancy guess_x and guess_y values use the initial fitted value, typically short time, as a guess.\n _, pictureFitParams2d, pictureFitErrors2d, v_params, v_errs, h_params, h_errs = ah.fitPic(\n im, guessSigma_x=5, guessSigma_y=5, showFit=False, \n guess_x=None if vari==0 else fitParams2D[initKeyv][imnum][1], guess_y=None if vari==0 else fitParams2D[initKeyv][imnum][2],\n fitF=fitF2D)\n fitParams2D[keyV].append(pictureFitParams2d)\n fitErrs2D[keyV].append(pictureFitErrors2d)\n hFitParams[keyV].append(h_params)\n hFitErrs[keyV].append(h_errs)\n vFitParams[keyV].append(v_params)\n vFitErrs[keyV].append(v_errs)\n # conversion from the num of pixels on the camera to microns at the focus of the tweezers\n cf = 16e-6/64\n mins, maxes = [[], []]\n imgs_ = np.array(list(images.values()))\n for imgInc in range(4):\n if indvColorRanges:\n mins.append(None)\n maxes.append(None)\n elif manualColorRange is None:\n mins.append(min(imgs_[:,imgInc].flatten()))\n maxes.append(max(imgs_[:,imgInc].flatten()))\n else:\n mins.append(manualColorRange[0])\n maxes.append(manualColorRange[1])\n numVariations = len(images)\n if onlyThisPic is None:\n fig, axs = plt.subplots(numVariations, 4, figsize=(20, pltVSize*numVariations))\n if numVariations == 1:\n axs = np.array([axs])\n bgFig, bgAxs = plt.subplots(1, 2, figsize=(20, pltVSize))\n else:\n numRows = int(np.ceil((numVariations+3)/4))\n fig, axs = plt.subplots(numRows, 4 if numVariations>1 else 3, figsize=(20, pltVSize*numRows))\n avgPicAx = axs.flatten()[-3]\n avgPicFig = fig\n bgAxs = [axs.flatten()[-1], axs.flatten()[-2]]\n bgFig = fig\n if show2dFitsAndResiduals:\n fig2d, axs2d = plt.subplots(*((2,numVariations) if numVariations>1 else (1,2)))\n keyPlt = np.zeros(len(images))\n (totalSignal, hfitCenter, hFitCenterErrs, hSigmas, hSigmaErrs, h_amp, hAmpErrs, vfitCenter, vFitCenterErrs, vSigmas, vSigmaErrs, v_amp, \n vAmpErrs, hSigma2D, hSigma2dErr, vSigma2D, vSigma2dErr) = [np.zeros((len(images), 4)) for _ in range(17)]\n \n for vari, ((keyV,ims), hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D) in enumerate(zip(\n images.items(), *[dic.values() for dic in [hFitParams, hFitErrs, vFitParams, vFitErrs, fitParams2D, fitErrs2D]])):\n for which in range(4):\n if onlyThisPic is None:\n (im, ax, title, min_, max_, hparams, hErrs, vparams, vErrs, param2d, err2d\n ) = [obj[which] for obj in (ims, axs[vari], titles, mins, maxes, hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D)] \n else:\n which = onlyThisPic\n ax = axs.flatten()[vari]\n (im, title, min_, max_, hparams, hErrs, vparams, vErrs, param2d, err2d\n ) = [obj[which] for obj in (ims, titles, mins, maxes, hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D)] \n h_amp[vari][which], hfitCenter[vari][which], hSigmas[vari][which] = hparams[0], hparams[1], hparams[2]*cf*1e6\n hAmpErrs[vari][which], hFitCenterErrs[vari][which], hSigmaErrs[vari][which] = hErrs[0], hErrs[1], hErrs[2]*cf*1e6\n v_amp[vari][which], vfitCenter[vari][which], vSigmas[vari][which] = vparams[0], vparams[1], vparams[2]*cf*1e6\n vAmpErrs[vari][which], vFitCenterErrs[vari][which], vSigmaErrs[vari][which] = vErrs[0], vErrs[1], vErrs[2]*cf*1e6\n hSigma2D[vari][which], hSigma2dErr[vari][which], vSigma2D[vari][which], vSigma2dErr[vari][which] = [\n val*cf*1e6 for val in [param2d[-3], err2d[-3], param2d[-2], err2d[-2]]]\n \n totalSignal[vari][which] = np.sum(im.flatten())\n keyPlt[vari] = keyV\n res = mp.fancyImshow(fig, ax, im, imageArgs={'cmap':dark_viridis_cmap, 'vmin':min_, 'vmax':max_}, \n hFitParams=hparams, vFitParams=vparams, fitModule=fitModule, flipVAx = True, fitParams2D=param2d)\n ax.set_title(keyV + ': ' + str(datalen[keyV]) + ';\\n' + title + ': ' + misc.errString(hSigmas[vari][which],hSigmaErrs[vari][which]) \n + r'$\\mu m$ sigma, ' + misc.round_sig_str(totalSignal[vari][which],5), fontsize=12) \n if show2dFitsAndResiduals:\n X, Y = np.meshgrid(np.arange(len(im[0])), np.arange(len(im)))\n data_fitted = fitF2D((X,Y), *param2d)\n fitProper = data_fitted.reshape(im.shape[0],im.shape[1])\n ax1 = axs2d[0] if numVariations == 1 else axs2d[0,vari]\n ax2 = axs2d[1] if numVariations == 1 else axs2d[1,vari]\n imr = ax1.imshow(fitProper, vmin=min_, vmax=max_)\n mp.addAxColorbar(fig2d, ax1, imr)\n ax1.contour(np.arange(len(im[0])), np.arange(len(im)), fitProper, 4, colors='w', alpha=0.2)\n imr = ax2.imshow(fitProper-im)\n mp.addAxColorbar(fig2d, ax2, imr)\n ax2.contour(np.arange(len(im[0])), np.arange(len(im)), fitProper, 4, colors='w', alpha=0.2)\n if onlyThisPic is not None:\n break\n \n mp.fancyImshow(avgPicFig, avgPicAx, np.mean([img[onlyThisPic] for img in images.values()],axis=0), imageArgs={'cmap':dark_viridis_cmap},flipVAx = True)\n avgPicAx.set_title('Average Over Variations')\n ### Plotting background and photon counted background\n mp.fancyImshow(bgFig, bgAxs[0], bgAvg, imageArgs={'cmap':dark_viridis_cmap},flipVAx = True) \n bgAxs[0].set_title('Background image (' + str(len(picsForBg)/picsPerRep) + ')')\n mp.fancyImshow(bgFig, bgAxs[1], bgPhotonCountImage, imageArgs={'cmap':dark_viridis_cmap},flipVAx = True) \n bgAxs[1].set_title('Photon counted background image (' + str(len(picsForBg)/picsPerRep) + ')')\n fig.subplots_adjust(left=0,right=1,bottom=0.1, hspace=0.2, **({'top': 0.7, 'wspace': 0.4} if (onlyThisPic is None) else {'top': 0.9, 'wspace': 0.3}))\n \n disp.display(fig)\n temps, tempErrs, tempFitVs, = [],[],[]\n if calcTemperature: \n for sigmas, sigmaerrs in zip([hSigmas, vSigmas, hSigma2D, vSigma2D],[hSigmaErrs, vSigmaErrs, hSigma2dErr, vSigma2dErr]):\n mbgSigmas = np.array([elt[2] for elt in sigmas])\n mbgSigmaErrs = np.array([elt[2] for elt in sigmaerrs])\n myGuess = [0.0, min((mbgSigmas)*1e-6), guessTemp]\n temp, fitV, cov = ah.calcBallisticTemperature(keyPlt*1e-3, (mbgSigmas)*1e-6, guess = myGuess, sizeErrors = mbgSigmaErrs)\n error = np.sqrt(np.diag(cov))\n temps.append(temp)\n tempErrs.append(error[2])\n tempFitVs.append(fitV)\n numAxisCol = int(plotSigmas) + int(plotCounts) + int(trackFitCenter)\n if numAxisCol != 0:\n fig2, axs = plt.subplots(1, numAxisCol, figsize = (15, 5)) \n fig2.subplots_adjust(top=0.75, wspace = 0.4)\n colors = ['b','k','c','purple']\n if plotSigmas:\n ax = (axs if numAxisCol == 1 else axs[0]) \n stdStyle = dict(marker='o',linestyle='',capsize=3)\n if onlyThisPic is not None:\n ax.errorbar(keyPlt, hSigmas[:,onlyThisPic], hSigmaErrs[:,onlyThisPic], color=colors[0], label='h '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, hSigma2D[:,onlyThisPic], hSigma2dErr[:,onlyThisPic], color=colors[1], label='2dh '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, vSigmas[:,onlyThisPic], vSigmaErrs[:,onlyThisPic], color=colors[2], label='v '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, vSigma2D[:,onlyThisPic], vSigma2dErr[:,onlyThisPic], color=colors[3], label='2dv '+titles[onlyThisPic], **stdStyle);\n else:\n for whichPic in range(4):\n ax.errorbar(keyPlt, hSigmas[:,whichPic], hSigmaErrs[:,whichPic], color='b', label='h '+titles[whichPic], **stdStyle);\n ax.errorbar(keyPlt, vSigmas[:,whichPic], vSigmaErrs[:,whichPic], color='c', label='v '+titles[whichPic], **stdStyle);\n ax.set_ylim(max(0,ax.get_ylim()[0]),min([ax.get_ylim()[1],5]))\n ax.set_ylabel(r'Fit Sigma ($\\mu m$)')\n \n if calcTemperature:\n # converting time to s, hSigmas in um \n xPoints = np.linspace(min(keyPlt), max(keyPlt))*1e-3\n for num, fitV in enumerate(tempFitVs):\n #ax.plot(xPoints*1e3, LargeBeamMotExpansion.f(xPoints, *myGuess)*1e6, label = 'guess')\n ax.plot(xPoints*1e3, LargeBeamMotExpansion.f(xPoints, *fitV)*1e6, color=colors[num])\n ax.legend()\n\n if plotFitAmps: \n ax = (axs if numAxisCol == 1 else axs[0])\n ampAx = ax.twinx()\n\n if onlyThisPic is not None:\n ampAx.errorbar(keyPlt, h_amp[:,onlyThisPic], hAmpErrs[:,onlyThisPic], label='h '+titles[onlyThisPic], color = 'orange', **stdStyle);\n ampAx.errorbar(keyPlt, v_amp[:,onlyThisPic], vAmpErrs[:,onlyThisPic], label='v '+titles[onlyThisPic], color = 'r', **stdStyle);\n else:\n for whichPic in range(4):\n ampAx.errorbar(keyPlt, h_amp[:,whichPic], hAmpErrs[:,whichPic], label='h '+titles[whichPic], color = 'orange', **stdStyle);\n ampAx.errorbar(keyPlt, v_amp[:,whichPic], vAmpErrs[:,whichPic], label='v '+titles[whichPic], color = 'r', **stdStyle);\n [tick.set_color('red') for tick in ampAx.yaxis.get_ticklines()]\n [tick.set_color('red') for tick in ampAx.yaxis.get_ticklabels()]\n ampAx.set_ylabel(r'Fit h_amps', color = 'r')\n \n hTotalPhotons, vTotalPhotons = None, None\n if plotCounts:\n # numAxCol = 1: ax = axs\n # numAxCol = 2: plotSigmas + plotCounts -- ax = axs[1]\n # numAxCol = 2: plotCounts + trackFitCenter -- ax = axs[0]\n # numAxCol = 3: ax = axs[1]\n if numAxisCol == 1:\n ax = axs\n elif numAxisCol == 2:\n ax = axs[1 if plotSigmas else 0]\n else:\n ax = axs[1]\n # Create axis to plot photon counts\n ax.set_ylabel(r'Integrated signal')\n photon_axis = ax.twinx()\n # This is not currently doing any correct for e.g. the loading rate.\n countToCameraPhotonEM = 0.018577 / (emGainSetting/200) # the float is is EM200. \n countToScatteredPhotonEM = 0.018577 / 0.07 / (emGainSetting/200)\n\n if onlyThisPic is not None:\n # calculate number of photons\n hamp = h_amp[:,onlyThisPic]*len(expansionPics[0][0]) # Horizontal \"un\"normalization for number of columns begin averaged.\n vamp = v_amp[:,onlyThisPic]*len(expansionPics[0]) \n hsigpx = hSigmas[:,onlyThisPic]/(16/64) # Convert from um back to to pixels.\n vsigpx = vSigmas[:,onlyThisPic]/(16/64)\n htotalCountsPerPic = bump.area_under(hamp, hsigpx)\n vtotalCountsPerPic = bump.area_under(vamp, vsigpx)\n hTotalPhotons = countToScatteredPhotonEM*htotalCountsPerPic\n vTotalPhotons = countToScatteredPhotonEM*vtotalCountsPerPic\n ax.plot(keyPlt, totalSignal[:,onlyThisPic], marker='o', linestyle='', label=titles[onlyThisPic]);\n photon_axis.plot(keyPlt, hTotalPhotons, marker='o', linestyle='', color = 'r', label='Horizontal')\n photon_axis.plot(keyPlt, vTotalPhotons, marker='o', linestyle='', color = 'orange', label='Vertical')\n else:\n for whichPic in range(4):\n # See above comments\n amp = h_amp[:,whichPic]*len(expansionPics[0][0]) \n sig = hSigmas[:,whichPic]/(16/64) \n totalCountsPerPic = bump.area_under(amp, sig)\n hTotalPhotons = countToScatteredPhotonEM*totalCountsPerPic\n ax.plot(keyPlt, totalSignal[:,whichPic], marker='o', linestyle='', label=titles[whichPic]);\n photon_axis.plot(keyPlt, hTotalPhotons, marker='o', linestyle='', color = ['red', 'orange', 'yellow', 'pink'][whichPic]) \n ax.legend()\n photon_axis.legend()\n [tick.set_color('red') for tick in photon_axis.yaxis.get_ticklines()]\n [tick.set_color('red') for tick in photon_axis.yaxis.get_ticklabels()]\n photon_axis.set_ylabel(r'Fit-Based Avg Scattered Photon/Img', color = 'r')\n if trackFitCenter:\n #numaxcol = 1: ax = axs\n #numaxcol = 2: trackfitcenter + plothSigmas: ax = axs[1]\n #numaxcol = 2: trackfitcenter + plotCounts: ax = axs[1]\n #numaxcol = 3: ax = axs[2]\n ax = (axs if numAxisCol == 1 else axs[-1])\n if onlyThisPic is not None:\n #ax.errorbar(keyPlt, hfitCenter[:,onlyThisPic], hFitCenterErrs[:,onlyThisPic], marker='o', linestyle='', capsize=3, label=titles[onlyThisPic]);\n ax.errorbar(keyPlt, vfitCenter[:,onlyThisPic], vFitCenterErrs[:,onlyThisPic], marker='o', linestyle='', capsize=3, label=titles[onlyThisPic]);\n #def accel(t, x0, a):\n # return x0 + 0.5*a*t**2\n #accelFit, AccelCov = opt.curve_fit(accel, keyPlt*1e-3, hfitCenter[:,onlyThisPic], sigma = hFitCenterErrs[:,onlyThisPic])\n #fitx = np.linspace(keyPlt[0], keyPlt[-1])*1e-3\n #fity = accel(fitx, *accelFit)\n #ax.plot(fitx*1e3, fity)\n else:\n for whichPic in range(4):\n ax.errorbar(keyPlt, hfitCenter[:,whichPic], hFitCenterErrs[:,whichPic], marker='o', linestyle='', capsize=3, label=titles[whichPic]);\n #accelErr = np.sqrt(np.diag(AccelCov))\n fig2.legend()\n ax.set_ylabel(r'Fit Centers (pix)')\n ax.set_xlabel('time (ms)')\n \n if numAxisCol != 0:\n disp.display(fig2) \n \n if not forceNoAnnotation:\n for fid, isAnnotated in zip(fids, isAnnotatedList):\n if not isAnnotated:\n if type(fid) == int or type(fid) == type(''):\n if newAnnotation or not exp.checkAnnotation(fid, force=False, quiet=True, useBase=useBase):\n exp.annotate(fid, useBase=useBase)\n if clearOutput:\n disp.clear_output()\n if calcTemperature: \n for temp, err, label in zip(temps, tempErrs, ['Hor', 'Vert', 'Hor2D', 'Vert2D']): \n print(label + ' temperature = ' + misc.errString(temp*1e6, err*1e6) + 'uk')\n\n for fid in fids:\n if type(fid) == int:\n expTitle, _, lev = exp.getAnnotation(fid)\n expTitle = ''.join('#' for _ in range(lev)) + ' File ' + str(fid) + ': ' + expTitle\n disp.display(disp.Markdown(expTitle))\n with exp.ExpFile(fid) as file:\n file.get_basic_info()\n if trackFitCenter:\n pass\n #print('Acceleration in Mpix/s^2 = ' + misc.errString(accelFit[1], accelErr[1]))\n if transferAnalysisOpts is not None and showTferAnalysisPlots:\n colors, colors2 = misc.getColors(len(transferAnalysisOpts.initLocs()) + 2)#, cmStr=dataColor)\n pltShape = (transferAnalysisOpts.initLocsIn[-1], transferAnalysisOpts.initLocsIn[-2])\n # mp.plotThresholdHists([initThresholds[0][0],initThresholds[1][0]], colors, shape=pltShape)\n mp.plotThresholdHists([initThresholds[0][0], initThresholds[0][0]], colors, shape=[1,2])\n returnDictionary = {'images':images, 'fits':hFitParams, 'errs':hFitErrs, 'hSigmas':hSigmas, 'sigmaErrors':hSigmaErrs, 'dataKey':keyPlt, \n 'hTotalPhotons':hTotalPhotons, 'tempCalc':temps, 'tempCalcErr':tempErrs, 'initThresholds':initThresholds[0], \n '2DFit':fitParams2D, '2DErr':fitErrs2D, 'bgPics':picsForBg, 'dataLength':datalen}\n if returnPics: \n returnDictionary['pics'] = sortedStackedPics\n return returnDictionary", "def calc_rsi(image):\n\n # roll axes to conventional row,col,depth\n img = np.rollaxis(image, 0, 3)\n\n # bands: Coastal(0), Blue(1), Green(2), Yellow(3), Red(4), Red-edge(5), NIR1(6), NIR2(7)) Multispectral\n COAST = img[:, :, 0]\n B = img[:, :, 1]\n G = img[:, :, 2]\n Y = img[:, :, 3]\n R = img[:, :, 4]\n RE = img[:, :, 5]\n NIR1 = img[:, :, 6]\n NIR2 = img[:, :, 7]\n\n arvi = old_div((NIR1 - (R - (B - R))), (NIR1 + (R - (B - R))))\n dd = (2 * NIR1 - R) - (G - B)\n gi2 = (B * -0.2848 + G * -0.2434 + R * -0.5436 + NIR1 * 0.7243 + NIR2 * 0.0840) * 5\n gndvi = old_div((NIR1 - G), (NIR1 + G))\n ndre = old_div((NIR1 - RE), (NIR1 + RE))\n ndvi = old_div((NIR1 - R), (NIR1 + R))\n ndvi35 = old_div((G - R), (G + R))\n ndvi84 = old_div((NIR2 - Y), (NIR2 + Y))\n nirry = old_div((NIR1), (R + Y))\n normnir = old_div(NIR1, (NIR1 + R + G))\n psri = old_div((R - B), RE)\n rey = old_div((RE - Y), (RE + Y))\n rvi = old_div(NIR1, R)\n sa = old_div(((Y + R) * 0.35), 2) + old_div((0.7 * (NIR1 + NIR2)), 2) - 0.69\n vi1 = old_div((10000 * NIR1), (RE) ** 2)\n vire = old_div(NIR1, RE)\n br = (old_div(R, B)) * (old_div(G, B)) * (old_div(RE, B)) * (old_div(NIR1, B))\n gr = old_div(G, R)\n rr = (old_div(NIR1, R)) * (old_div(G, R)) * (old_div(NIR1, RE))\n\n ###Built-Up indices\n wvbi = old_div((COAST - RE), (COAST + RE))\n wvnhfd = old_div((RE - COAST), (RE + COAST))\n\n ###SIs\n evi = old_div((2.5 * (NIR2 - R)), (NIR2 + 6 * R - 7.5 * B + 1))\n L = 0.5 # some coefficient for Soil Adjusted Vegetation Index (SAVI) DO NOT INCLUDE IN FEATURES\n savi = old_div(((1 + L) * (NIR2 - R)), (NIR2 + R + L))\n msavi = old_div((2 * NIR2 + 1 - ((2 * NIR2 + 1) ** 2 - 8 * (NIR2 - R)) ** 0.5), 2)\n bai = old_div(1.0, ((0.1 + R) ** 2 + 0.06 + NIR2))\n rgi = old_div(R, G)\n bri = old_div(B, R)\n\n rsi = np.stack(\n [arvi, dd, gi2, gndvi, ndre, ndvi, ndvi35, ndvi84, nirry, normnir, psri, rey, rvi, sa, vi1, vire, br, gr, rr,\n wvbi, wvnhfd, evi, savi, msavi, bai, rgi, bri],\n axis=2)\n\n return rsi", "def apphot(im, yx, rap, subsample=4, **kwargs):\n n, f = anphot(im, yx, rap, subsample=subsample, **kwargs)\n if np.size(rap) > 1:\n return n.cumsum(-1), f.cumsum(-1)\n else:\n return n, f", "def run(self, verbose=False):\n if verbose:\n from sage.combinat.rigged_configurations.tensor_product_kr_tableaux_element \\\n import TensorProductOfKirillovReshetikhinTableauxElement\n\n for cur_crystal in reversed(self.tp_krt):\n r = cur_crystal.parent().r()\n\n # Check if it is a spinor\n if r == self.n:\n # Perform the spinor bijection by converting to type A_{2n-1}^{(2)}\n # doing the bijection there and pulling back\n from sage.combinat.rigged_configurations.bij_type_A2_odd import KRTToRCBijectionTypeA2Odd\n from sage.combinat.rigged_configurations.tensor_product_kr_tableaux import TensorProductOfKirillovReshetikhinTableaux\n from sage.combinat.rigged_configurations.rigged_partition import RiggedPartition\n\n if verbose:\n print(\"====================\")\n if len(self.cur_path) == 0:\n print(repr([])) # Special case for displaying when the rightmost factor is a spinor\n else:\n print(repr(TensorProductOfKirillovReshetikhinTableauxElement(self.tp_krt.parent(), self.cur_path)))\n print(\"--------------------\")\n print(repr(self.ret_rig_con))\n print(\"--------------------\\n\")\n print(\"Applying doubling map\")\n\n # Convert to a type A_{2n-1}^{(2)} RC\n dims = self.cur_dims[:]\n dims.insert(0, [r, cur_crystal.parent().s()])\n KRT = TensorProductOfKirillovReshetikhinTableaux(['A', 2*self.n-1, 2], dims)\n # Convert the n-th partition into a regular rigged partition\n self.ret_rig_con[-1] = RiggedPartition(self.ret_rig_con[-1]._list,\n self.ret_rig_con[-1].rigging,\n self.ret_rig_con[-1].vacancy_numbers)\n bij = KRTToRCBijectionTypeA2Odd(KRT.module_generators[0]) # Placeholder element\n bij.ret_rig_con = KRT.rigged_configurations()(*self.ret_rig_con, use_vacancy_numbers=True)\n bij.cur_path = self.cur_path\n bij.cur_dims = self.cur_dims\n for i in range(len(self.cur_dims)):\n if bij.cur_dims[i][0] != self.n:\n bij.cur_dims[i][1] *= 2\n for i in range(self.n-1):\n for j in range(len(bij.ret_rig_con[i])):\n bij.ret_rig_con[i]._list[j] *= 2\n bij.ret_rig_con[i].rigging[j] *= 2\n bij.ret_rig_con[i].vacancy_numbers[j] *= 2\n\n # Perform the type A_{2n-1}^{(2)} bijection\n r = cur_crystal.parent().r()\n # Iterate through the columns\n for col_number, cur_column in enumerate(reversed(cur_crystal.to_array(False))):\n bij.cur_path.insert(0, []) # Prepend an empty list\n bij.cur_dims.insert(0, [0, 1])\n\n # Note that we do not need to worry about iterating over columns\n # (see previous note about the data structure).\n for letter in reversed(cur_column):\n bij.cur_dims[0][0] += 1\n val = letter.value # Convert from a CrystalOfLetter to an Integer\n\n if verbose:\n print(\"====================\")\n print(repr(TensorProductOfKirillovReshetikhinTableauxElement(self.tp_krt.parent(), bij.cur_path)))\n print(\"--------------------\")\n print(repr(bij.ret_rig_con))\n print(\"--------------------\\n\")\n\n # Build the next state\n bij.cur_path[0].insert(0, [letter]) # Prepend the value\n bij.next_state(val)\n\n # If we've split off a column, we need to merge the current column\n # to the current crystal tableau\n if col_number > 0:\n for i, letter_singleton in enumerate(self.cur_path[0]):\n bij.cur_path[1][i].insert(0, letter_singleton[0])\n bij.cur_dims[1][1] += 1\n bij.cur_path.pop(0)\n bij.cur_dims.pop(0)\n\n # And perform the inverse column splitting map on the RC\n for a in range(self.n):\n bij._update_vacancy_nums(a)\n\n if verbose:\n print(\"====================\")\n print(repr(TensorProductOfKirillovReshetikhinTableauxElement(self.tp_krt.parent(), bij.cur_path)))\n print(\"--------------------\")\n print(repr(bij.ret_rig_con))\n print(\"--------------------\\n\")\n print(\"Applying halving map\")\n\n # Convert back to a type B_n^{(1)}\n for i in range(len(self.cur_dims)):\n if bij.cur_dims[i][0] != self.n:\n bij.cur_dims[i][1] //= 2\n for i in range(self.n-1):\n for j in range(len(bij.ret_rig_con[i])):\n bij.ret_rig_con[i]._list[j] //= 2\n bij.ret_rig_con[i].rigging[j] //= 2\n bij.ret_rig_con[i].vacancy_numbers[j] //= 2\n self.ret_rig_con = self.tp_krt.parent().rigged_configurations()(*bij.ret_rig_con, use_vacancy_numbers=True)\n # Make it mutable so we don't have to keep making copies, at the\n # end of the bijection, we will make it immutable again\n self.ret_rig_con._set_mutable()\n else:\n # Perform the regular type B_n^{(1)} bijection\n # Iterate through the columns\n for col_number, cur_column in enumerate(reversed(cur_crystal.to_array(False))):\n self.cur_path.insert(0, []) # Prepend an empty list\n self.cur_dims.insert(0, [0, 1])\n\n # Note that we do not need to worry about iterating over columns\n # (see previous note about the data structure).\n for letter in reversed(cur_column):\n self.cur_dims[0][0] += 1\n val = letter.value # Convert from a CrystalOfLetter to an Integer\n\n if verbose:\n print(\"====================\")\n print(repr(TensorProductOfKirillovReshetikhinTableauxElement(self.tp_krt.parent(), self.cur_path)))\n print(\"--------------------\")\n print(repr(self.ret_rig_con))\n print(\"--------------------\\n\")\n\n # Build the next state\n self.cur_path[0].insert(0, [letter]) # Prepend the value\n self.next_state(val)\n\n # If we've split off a column, we need to merge the current column\n # to the current crystal tableau\n if col_number > 0:\n if verbose:\n print(\"====================\")\n print(repr(TensorProductOfKirillovReshetikhinTableauxElement(self.tp_krt.parent(), self.cur_path)))\n print(\"--------------------\")\n print(repr(self.ret_rig_con))\n print(\"--------------------\\n\")\n print(\"Applying column merge\")\n\n for i, letter_singleton in enumerate(self.cur_path[0]):\n self.cur_path[1][i].insert(0, letter_singleton[0])\n self.cur_dims[1][1] += 1\n self.cur_path.pop(0)\n self.cur_dims.pop(0)\n\n # And perform the inverse column splitting map on the RC\n for a in range(self.n):\n self._update_vacancy_nums(a)\n self.ret_rig_con.set_immutable() # Return it to immutable\n return self.ret_rig_con", "def __getitem__(self, idx):\n R = self.R\n \n image_path = os.path.join(self.image_dir, self.data['ImageId'][idx] + '.jpg')\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = torch.from_numpy(image)\n H, W, N = image.size() # H, W, 3\n image = image.permute(2, 0, 1).float()\n image = 2*(image/255)-1\n\n car_id, euler_angle, world_coords = from_prediction_string_to_world_coords(self.data['PredictionString'][idx])\n image_coords = from_camera_coords_to_image_coords(world_coords, self.camera_matrix)\n image_coords = torch.from_numpy(image_coords).float()\n \n target_pointmap = torch.zeros(H//R, W//R, dtype=torch.float)\n target_heatmap = None\n target_local_offset = torch.zeros(2, H//R, W//R, dtype=torch.float)\n target_depth = torch.zeros(H//R, W//R, dtype=torch.float)\n target_yaw = torch.zeros(H//R, W//R, dtype=torch.float)\n target_pitch = torch.zeros(H//R, W//R, dtype=torch.float)\n target_roll = torch.zeros(H//R, W//R, dtype=torch.float)\n h = torch.arange(0, H//R, 1, dtype=torch.float).unsqueeze(dim=1).expand(-1, W//R)\n w = torch.arange(0, W//R, 1, dtype=torch.float).unsqueeze(dim=0).expand(H//R, -1)\n p_h = image_coords[:, 1]\n p_w = image_coords[:, 0]\n \n num_object = torch.Tensor([len(car_id)])\n \n for object_id in range(int(num_object.item())):\n point = ((p_h[object_id]/R).int(), (p_w[object_id]/R).int())\n \n if point[0] < 0 or H//R - 1 < point[0] or point[1] < 0 or W//R - 1 < point[1] :\n continue\n target_pointmap[point[0], point[1]] = 1\n target_local_offset[0][point[0], point[1]] = p_h[object_id]/R - point[0].float()\n target_local_offset[1][point[0], point[1]] = p_w[object_id]/R - point[1].float()\n target_depth[point[0], point[1]] = image_coords[object_id, 2]\n target_yaw[point[0], point[1]] = torch.Tensor([euler_angle[object_id][0]])\n target_pitch[point[0], point[1]] = torch.Tensor([euler_angle[object_id][1]])\n target_roll[point[0], point[1]] = torch.Tensor([euler_angle[object_id][2]])\n \n sigma = self.coeff_sigma / target_depth[point[0], point[1]]\n exponent = - ((h-p_h[object_id]/R)**2 + (w-p_w[object_id]/R)**2)/(2 * torch.pow(sigma, 2))\n heatmap = torch.exp(exponent)\n \n if target_heatmap is None:\n target_heatmap = heatmap.unsqueeze(dim=0)\n else:\n target_heatmap = torch.cat((target_heatmap, heatmap.unsqueeze(dim=0)), dim=0)\n\n target_heatmap, _ = torch.max(target_heatmap, dim=0)\n \n if torch.cuda.is_available():\n image = image.cuda().contiguous()\n target = {'num_object': num_object.cuda(), 'pointmap': target_pointmap.cuda(), 'heatmap': target_heatmap.cuda(), 'local_offset': target_local_offset.cuda(), 'depth': target_depth.cuda(), 'yaw': target_yaw.cuda(), 'pitch': target_pitch.cuda(), 'roll': target_roll.cuda()}\n else:\n image = image.contiguous()\n target = {'num_object': num_object, 'pointmap': target_pointmap, 'heatmap': target_heatmap, 'local_offset': target_local_offset, 'depth': target_depth, 'yaw': target_yaw, 'pitch': target_pitch, 'roll': target_roll}\n \n return image, target", "def NAME():\n\n # Location of data\n base_dir = \"(Location)\" #Location of align tif --> Should be the location of the experiment's align tiff folder, ex: \"C/desktop/work/image_processing/YYYYMMDD/align_tiffs\"\n resolution = {'res_xy_nm': 100, 'res_z_nm': 70} #Resolution of a pixel (do not alter)\n thresh = 0.9 #What qualifies for final probability map (do not alter)\n number_of_datasets = 20 #Number of wells in the experiemnts, \"20\" is an example where there are 16 samples and 4 controls\n\n #Rb Antibody\n conjugate_fn_str = 'GAD2' #String segment to search in a filename\n #conjugate_fn_str should be the term used in the name of the control align tiff for a well (usually \"PSD\", \"GAD2\", or \"SYNAPSIN\")\n target_fn_str = 'L106'\n #Ms Antibody project name, no parent or subclone number needed\n #target_fn_str should be the project number, for instance if this was testing L109 samples, this would be \"L109\"\n #Takes base directory string and gives you an array of all the files within\n filenames = aa.getListOfFolders(base_dir) #Do not change\n conjugate_filenames = [] #Do not change\n target_filenames = [] #Do not change\n query_list = [] #Do not change\n folder_names = [] #Do not change\n\n for n in range(1, 17):\n #Use if dataset missing\n #This is where you put in the rangee of wells used as your test samples\n #Since we have 16 samples that are test samples for L106, the range is equal to 1 through n+1, or 1 through 17\n #If your test samples do not begin at well 1, then adjust the beginning of the range accordingly (3 through 17 if the first test sample is in well 3) \n #continue\n\n print('Well: ', str(n)) #Do not change\n folder_names.append('Test-' + str(n)) # Collate 'dataset' names for excel sheet #Do not change\n conjugate_str = str(n) + '-' + conjugate_fn_str #creates filename to search for #Creates n-conjugatename #Do not change\n target_str = str(n) + '-' + target_fn_str #Do not change\n\n # Search for file associated with the specific dataset number\n indices = [i for i, s in enumerate(filenames) if conjugate_str == s[0:len(conjugate_str)]] #Do not change\n conjugate_name = filenames[indices[0]] #Do not change\n print(conjugate_name) #Do not change\n indices = [i for i, s in enumerate(filenames) if target_str == s[0:len(target_str)]] #Do not change\n target_name = filenames[indices[0]] #Do not change\n print(target_name) #Do not change\n \n conjugate_filenames.append(conjugate_name) #Do not change\n target_filenames.append(target_name) #Do not change\n\n # Create query\n #\n query = {'preIF': [conjugate_name], 'preIF_z': [2],\n 'postIF': [target_name], 'postIF_z': [1],\n 'punctumSize': 2}\n #preIF = items that are presynaptic targets go here, because GAD2, our conjugate, is presynaptic I put the conjugate_name in this box\n #preIF_z = how many tiffs a puncta must be in to be registered, conjugate sample number is 2 so 2 goes in this box\n #postIF = items that are postsynaptic targets go here, L106 is postsynaptic so I put target_name here\n #postIF_z = how many tiffs a puncta must be in to be registered, target sample number is 1 (for now unless changed later) \n #punctumSize = size of punctum the algorithm is looking for, do not change unless directed to\n\n \"\"\"Example of a presynaptic target and presynaptic conjugate\n query = {'preIF': [target_name,conjugate_name], 'preIF_z': [1,2],\n 'postIF': [], 'postIF_z': [],\n 'punctumSize': 2}\"\"\"\n\n \"\"\"Example of a postsynaptic target and presynaptic conjugate\n query = {'preIF': [conjugate_name], 'preIF_z': [2],\n 'postIF': [target_name], 'postIF_z': [1],\n 'punctumSize': 2}\"\"\"\n\n \"\"\"Example of a postsynaptic target and postsynaptic conjugate\n query = {'preIF': [], 'preIF_z': [],\n 'postIF': [target_name,conjugate_name], 'postIF_z': [1,2],\n 'punctumSize': 2}\"\"\"\n\n \"\"\"Example of a presynaptic target and postsynaptic conjugate\n query = {'preIF': [target_name], 'preIF_z': [1],\n 'postIF': [conjugate_name], 'postIF_z': [2],\n 'punctumSize': 2}\"\"\"\n\n\n query_list.append(query)\n\n\n #The following n samples are controls - you can add as many of these as you want by copying the block of code and pasting it after the last one\n #The notes in the following block of code apply to all of the controls\n n = 17 #well number of control sample\n folder_names.append('Control' + str(n)) # Collate 'dataset' names for excel sheet #Do not change\n reference_fn_str = 'GAD2' #String segment to search in a filename #refernce_fn_str is the project number/name of RB control\n target_fn_str = 'L106' #target_fn_str is the project number of the Ms control you are using\n conjugate_name, target_name = aa.findFilenames(reference_fn_str, target_fn_str, filenames, n) #Do not alter\n conjugate_filenames.append(conjugate_name) #Do not alter\n target_filenames.append(target_name) #Do not alter\n query = {'preIF': [conjugate_name], 'preIF_z': [2], 'postIF': [target_name], 'postIF_z': [1], 'punctumSize': 2} #Se the examples and explanations above about \"query\"\n query_list.append(query) #Do not change\n\n n = 18\n folder_names.append('Control' + str(n)) # Collate 'dataset' names for excel sheet\n reference_fn_str = 'GAD2' #String segment to search in a filename\n target_fn_str = 'SP2'\n conjugate_name, target_name = aa.findFilenames(reference_fn_str, target_fn_str, filenames, n)\n conjugate_filenames.append(conjugate_name)\n target_filenames.append(target_name)\n query = {'preIF': [target_name,conjugate_name], 'preIF_z': [1,2], 'postIF': [], 'postIF_z': [], 'punctumSize': 2}\n query_list.append(query)\n\n n = 19\n folder_names.append('Control' + str(n)) # Collate 'dataset' names for excel sheet\n reference_fn_str = 'NP-RB' #String segment to search in a filename\n target_fn_str = 'NP-MS'\n conjugate_name, target_name = aa.findFilenames(reference_fn_str, target_fn_str, filenames, n)\n conjugate_filenames.append(conjugate_name)\n target_filenames.append(target_name)\n query = {'preIF': [], 'preIF_z': [], 'postIF': [target_name,conjugate_name], 'postIF_z': [1,2], 'punctumSize': 2}\n query_list.append(query)\n\n n = 20\n folder_names.append('Control' + str(n)) # Collate 'dataset' names for excel sheet\n reference_fn_str = 'NPNS-RB' #String segment to search in a filename\n target_fn_str = 'NPNS-MS'\n conjugate_name, target_name = aa.findFilenames(reference_fn_str, target_fn_str, filenames, n)\n conjugate_filenames.append(conjugate_name)\n target_filenames.append(target_name)\n query = {'preIF': [], 'preIF_z': [], 'postIF': [target_name,conjugate_name], 'postIF_z': [1,2], 'punctumSize': 2}\n query_list.append(query)\n\n\n \n measure_list = aa.calculate_measure_lists(query_list, None, base_dir,\n thresh, resolution, target_filenames) # Run all the queries\n\n df = aa.create_df(measure_list, folder_names, target_filenames, conjugate_filenames) #Do not change\n print(df) #Do not change\n\n return df #Do not change", "def simanalyze(project='sim', image=True, imagename='default', skymodel='', vis='default', modelimage='', imsize=[0, 0], imdirection='', cell='', interactive=False, niter=0, threshold='0.1mJy', weighting='natural', mask=[], outertaper=[''], pbcor=True, stokes='I', featherimage='', analyze=False, showuv=True, showpsf=True, showmodel=True, showconvolved=False, showclean=True, showresidual=False, showdifference=True, showfidelity=True, graphics='both', verbose=False, overwrite=True, dryrun=False, logfile=''):\n if type(imsize)==int: imsize=[imsize]\n if type(outertaper)==str: outertaper=[outertaper]\n\n#\n# The following is work around to avoid a bug with current python translation\n#\n mytmp = {}\n\n mytmp['project'] = project\n mytmp['image'] = image\n mytmp['imagename'] = imagename\n mytmp['skymodel'] = skymodel\n mytmp['vis'] = vis\n mytmp['modelimage'] = modelimage\n mytmp['imsize'] = imsize\n mytmp['imdirection'] = imdirection\n mytmp['cell'] = cell\n mytmp['interactive'] = interactive\n mytmp['niter'] = niter\n mytmp['threshold'] = threshold\n mytmp['weighting'] = weighting\n mytmp['mask'] = mask\n mytmp['outertaper'] = outertaper\n mytmp['pbcor'] = pbcor\n mytmp['stokes'] = stokes\n mytmp['featherimage'] = featherimage\n mytmp['analyze'] = analyze\n mytmp['showuv'] = showuv\n mytmp['showpsf'] = showpsf\n mytmp['showmodel'] = showmodel\n mytmp['showconvolved'] = showconvolved\n mytmp['showclean'] = showclean\n mytmp['showresidual'] = showresidual\n mytmp['showdifference'] = showdifference\n mytmp['showfidelity'] = showfidelity\n mytmp['graphics'] = graphics\n mytmp['verbose'] = verbose\n mytmp['overwrite'] = overwrite\n mytmp['dryrun'] = dryrun\n mytmp['logfile'] = logfile\n pathname='file://' + xmlpath( ) + '/'\n trec = casac.utils().torecord(pathname+'simanalyze.xml')\n\n casalog.origin('simanalyze')\n if trec.has_key('simanalyze') and casac.utils().verify(mytmp, trec['simanalyze']) :\n result = task_simanalyze.simanalyze(project, image, imagename, skymodel, vis, modelimage, imsize, imdirection, cell, interactive, niter, threshold, weighting, mask, outertaper, pbcor, stokes, featherimage, analyze, showuv, showpsf, showmodel, showconvolved, showclean, showresidual, showdifference, showfidelity, graphics, verbose, overwrite, dryrun, logfile)\n\n else :\n result = False\n return result", "def create_azi_to_rad_sequence():\n num_tot = 30\n for i in range(2*num_tot + 1):\n angle_arr = azi_to_rad_transformation(512, i, 30)\n phase_arr = create_flat_phase(512, 0)\n delta_1_arr = create_delta_1(phase_arr, angle_arr)\n delta_2_arr = create_delta_2(angle_arr)\n cv2.imwrite('frame' + str(i) +'.tiff', delta_2_arr)\n print(\"Frame \" + str(i))", "def update_header(arr_imgs,obj,filter_i):\n \n for img in arr_imgs:\n warnings.simplefilter('ignore', category=AstropyUserWarning)\n try:\n hdulist = fits.open(img,ignore_missing_end=True)\n #if there is only a primary header get the data from it\n if len(hdulist) == 1:\n data = getdata(img, 0, header=False)\n #if there is more than one header get data from the 'SCI' extension\n else:\n data = getdata(img, 1, header=False)\n #Get value of EXPTIME and PHOTZPT keyword from primary header and \n #set CCDGAIN to a default value of 1\n EXPTIME = hdulist[0].header['EXPTIME']\n PHOTFLAM = hdulist[1].header['PHOTFLAM']\n PHOTZPT = hdulist[1].header['PHOTZPT']\n CCDGAIN = 1.0\n #First pass locating value for gain\n for i in range(2):\n if len(hdulist) == 1:\n break\n #Go through primary and secondary header and ignore the \n #BinTable formatted header\n if not isinstance(hdulist[i],astropy.io.fits.hdu.table.\\\n BinTableHDU):\n if 'CCDGAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['CCDGAIN']\n break\n if 'GAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['GAIN']\n break\n if 'ATODGAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['ATODGAIN']\n break\n \n #Locating units of image\n print('Doing BUNIT check')\n for i in range(2):\n #If there is only one header then this is the only place to \n #check\n if len(hdulist) == 1:\n bunit = hdulist[0].header['D001OUUN']\n print('BUNIT was {0}'.format(bunit))\n if bunit == 'counts':\n ### Rescaling zeropoint\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*EXPTIME) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/EXPTIME)*pixmod\n hdulist[0].header.set('BUNIT','COUNTS/S')\n hdulist[0].header.set('MAGZPT',ZPT_NEW)\n print('BUNIT is {0}'.format(hdulist[0].\\\n header['BUNIT']))\n \n #If there are multiple headers then they all have to be checked\n else:\n if 'BUNIT' in hdulist[i].header:\n bunit = hdulist[i].header['BUNIT']\n print('BUNIT was {0}'.format(bunit))\n if bunit == 'COUNTS':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*EXPTIME) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/EXPTIME)*pixmod\n if bunit == 'ELECTRONS':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN*EXPTIME) \\\n + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/(CCDGAIN*EXPTIME))*pixmod\n if bunit == 'ELECTRONS/S':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/CCDGAIN)*pixmod\n if bunit == 'ELECTRONS/SEC':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/CCDGAIN)*pixmod\n hdulist[i].header['BUNIT'] = 'COUNTS/S'\n hdulist[i].header['MAGZPT'] = ZPT_NEW\n ###\n print('BUNIT is {0}'.format(hdulist[i].\\\n header['BUNIT']))\n print('PHOTZPT is {0}'.format(hdulist[i].\\\n header['MAGZPT']))\n print('Done changing BUNIT')\n \n #Second pass to assign gain and exptime to headers\n for i in range(2):\n if len(hdulist) == 1:\n break\n if not isinstance(hdulist[i],astropy.io.fits.hdu.table.\\\n BinTableHDU):\n if 'CCDGAIN' not in hdulist[i].header:\n hdulist[i].header.set('CCDGAIN',CCDGAIN)\n if 'EXPTIME' not in hdulist[i].header:\n hdulist[i].header.set('EXPTIME',EXPTIME)\n \n #Make new versions of images in interim/obj1 folder\n os.chdir(path_to_interim + obj)\n #Remove .fits extension\n img = os.path.splitext(img)[0]\n #If there was only one header write that header's data to new\n #version of fits image\n if len(hdulist) == 1:\n fits.writeto(img+'_test_'+filter_i+'.fits',data,hdulist[0].\\\n header,output_verify='ignore')\n #Else write the 'SCI' header's data to new version of fits image\n else:\n fits.writeto(img+'_test_'+filter_i+'.fits',data,hdulist[1].\\\n header,output_verify='ignore')\n hdulist.close()\n os.chdir(path_to_raw + obj)\n \n #This is to catch 'empty or corrupt FITS file' or any other IOError\n #and write it to a text file along with the object name and the \n #filter name\n except IOError as e:\n os.chdir('..')\n dir_path = os.getcwd()\n if os.path.basename(dir_path) == 'raw':\n os.chdir(path_to_interim)\n with open('Error_swarp.txt','a') as newfile: \n newfile.write('Object {0} and image {1} raises {2}'.\\\n format(obj,img,e))\n newfile.write('\\n')\n newfile.close()\n os.chdir(path_to_raw + obj)\n \n os.chdir(path_to_interim + obj)\n #For this object and filter combination grab all the new versions made\n arr = glob('*test_'+filter_i+'.fits')\n print(len(arr))\n if len(arr) >= 1: #avoid empty cases where files have been removed earlier\n #or don't exist at all since the dictionary also contains\n #pairs of objects and filters that didn't meet the swarp\n #requirements (didn't pass preliminary exptime or filter\n #checks so those folders/images don't exist)\n \n #If new versions exist then write their names to a text file \n with open(filter_i+'_img_list_testfil.txt','wb') as newfile2:\n for obj in arr:\n newfile2.write(obj)\n newfile2.write('\\n')\n newfile2.close()\n #If text file exists return the name\n return filter_i+'_img_list_testfil.txt'\n #If text file doesn't exist return this string\n return 'error'", "def simulator_from_instrument(instrument):\r\n\r\n grid = grid_from_instrument(instrument=instrument)\r\n psf = psf_from_instrument(instrument=instrument)\r\n\r\n if instrument in \"vro\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=100.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"euclid\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=2260.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"hst\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=2000.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"hst_up\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=2000.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"ao\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=1000.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n else:\r\n raise ValueError(\"An invalid instrument was entered - \", instrument)", "def FluorescenceAnalysis(self, folder, round_num, save_mask = True):\r\n RoundNumberList, CoordinatesList, fileNameList = self.retrive_scanning_scheme(folder, file_keyword = 'Zmax')\r\n # RoundNumberList, CoordinatesList, fileNameList = self.retrive_scanning_scheme(folder, file_keyword = 'Zfocus')\r\n \r\n if not os.path.exists(os.path.join(folder, 'MLimages_{}'.format(round_num))):\r\n # If the folder is not there, create the folder\r\n os.mkdir(os.path.join(folder, 'MLimages_{}'.format(round_num))) \r\n \r\n for EachRound in RoundNumberList:\r\n \r\n cells_counted_in_round = 0\r\n \r\n if EachRound == round_num:\r\n \r\n # Start numbering cells at each round\r\n self.cell_counted_inRound = 0 \r\n \r\n for EachCoord in CoordinatesList:\r\n \r\n # =============================================================================\r\n # For tag fluorescence:\r\n # ============================================================================= \r\n print(EachCoord)\r\n #-------------- readin image---------------\r\n for Eachfilename in enumerate(fileNameList):\r\n if EachCoord in Eachfilename[1] and EachRound in Eachfilename[1]:\r\n if '0Zmax' in Eachfilename[1]:\r\n ImgNameInfor = Eachfilename[1][0:len(Eachfilename[1])-14] # get rid of '_PMT_0Zmax.tif' in the name.\r\n elif '0Zfocus' in Eachfilename[1]:\r\n ImgNameInfor = Eachfilename[1][0:len(Eachfilename[1])-16] # get rid of '_PMT_0Zfocus.tif' in the name.\r\n _imagefilename = os.path.join(folder, Eachfilename[1])\r\n #------------------------------------------\r\n \r\n # =========================================================================\r\n # USING MASKRCNN...\r\n # =========================================================================\r\n # Imagepath = self.Detector._fixPathName(_imagefilename)\r\n Rawimage = imread(_imagefilename)\r\n \r\n# if ClearImgBef == True:\r\n# # Clear out junk parts to make it esaier for ML detection.\r\n# RawimageCleared = self.preProcessMLimg(Rawimage, smallest_size=300, lowest_region_intensity=0.16)\r\n# else:\r\n# RawimageCleared = Rawimage.copy()\r\n \r\n image = ProcessImage.convert_for_MaskRCNN(Rawimage)\r\n \r\n # Run the detection on input image.\r\n results = self.Detector.detect([image])\r\n \r\n MLresults = results[0]\r\n \r\n if save_mask == True:\r\n fig, ax = plt.subplots()\r\n # Set class_names = [None,None,None,None] to mute class name display.\r\n visualize.display_instances(image, MLresults['rois'], MLresults['masks'], MLresults['class_ids'],\r\n class_names = [None,None,None,None], ax=ax,\r\n centre_coors = MLresults['Centre_coor'], Centre_coor_radius = 2, \r\n WhiteSpace = (0, 0))#MLresults['class_ids'],MLresults['scores'], \r\n # ax.imshow(fig)\r\n fig.tight_layout()\r\n # Save the detection image\r\n fig_name = os.path.join(folder, 'MLimages_{}\\{}.tif'.format(round_num, ImgNameInfor))\r\n plt.savefig(fname = fig_name, dpi=200, pad_inches=0.0, bbox_inches='tight')\r\n \r\n # segmentationImg = Image.fromarray(fig) #generate an image object\r\n # segmentationImg.save(os.path.join(folder, 'MLimages_{}\\{}.tif'.format(round_num, ImgNameInfor)))#save as tif\r\n \r\n if self.cell_counted_inRound == 0:\r\n cell_Data, self.cell_counted_inRound, total_cells_counted_in_coord = \\\r\n ProcessImage.retrieveDataFromML(Rawimage, MLresults, str(ImgNameInfor), self.cell_counted_inRound)\r\n else: \r\n Cell_Data_new, self.cell_counted_inRound, total_cells_counted_in_coord = \\\r\n ProcessImage.retrieveDataFromML(Rawimage, MLresults, str(ImgNameInfor), self.cell_counted_inRound)\r\n if len(Cell_Data_new) > 0:\r\n cell_Data = cell_Data.append(Cell_Data_new)\r\n \r\n # Count in total how many flat and round cells are identified.\r\n cells_counted_in_round += total_cells_counted_in_coord\r\n \r\n print(\"Number of round/flat cells in this round: {}\".format(cells_counted_in_round))\r\n \r\n # Save to excel\r\n cell_Data.to_excel(os.path.join(os.path.join(folder, round_num + '_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'_CellsProperties.xlsx')))\r\n \r\n return cell_Data", "def CalcAtmTransmissionForImage(img, header='', chanInfo='', airmass=1.5,pwv=-1, \n spectralaxis=-1, \n value='transmission', P=-1, H=-1, \n T=-1, altitude=-1):\n if (header == ''):\n print \"imhead\", # the comma prevents the newline so that ...10...20 will be on same line\n header = imhead(img,mode='list')\n if (type(header) != dict):\n # Input was a spectrum rather than an image\n if (chanInfo[1] < 60e9):\n telescopeName = 'ALMA'\n else:\n telescopeName = 'VLA'\n else:\n telescopeName = header['telescope']\n # this will not match up with the plot, which uses numberOfChannelsInCube\n# freqs = getFreqsForImage(img, header, spectralaxis)\n freqs = np.linspace(chanInfo[1]*1e-9,chanInfo[2]*1e-9,chanInfo[0])\n# print \"freqs: %f-%f\" % (freqs[0], freqs[-1])\n numchan = len(freqs)\n lsrkwidth = (chanInfo[2] - chanInfo[1])/(numchan-1)\n result = cubeLSRKToTopo(img, nchan=numchan, f0=chanInfo[1], f1=chanInfo[2], chanwidth=lsrkwidth)\n if (result is None):\n topofreqs = freqs\n else:\n topoWidth = (result[1]-result[0])/(numchan-1)\n topofreqs = np.linspace(result[0], result[1], chanInfo[0]) * 1e-9\n casalogPost(\"Converted LSRK range (%f-%f) to TOPO (%f-%f) over %d channels\" % (chanInfo[1]*1e-9, chanInfo[2]*1e-9,topofreqs[0],topofreqs[-1],numchan))\n P0 = 1000.0 # mbar\n H0 = 20.0 # percent\n T0 = 273.0 # Kelvin\n if (telescopeName.find('ALMA') >= 0 or telescopeName.find('ACA') >= 0):\n pwv0 = 1.0 \n P0 = 563.0\n H0 = 20.0\n T0 = 273.0\n altitude0 = 5059\n elif (telescopeName.find('VLA') >= 0):\n P0 = 786.0\n pwv0 = 5.0 \n altitude0 = 2124\n else:\n pwv0 = 10.0 \n altitude0 = 0\n if (pwv < 0):\n pwv = pwv0\n if (T < 0):\n T = T0\n if (H < 0):\n H = H0\n if (P < 0):\n P = P0\n if (altitude < 0):\n altitude = altitude0\n tropical = 1\n midLatitudeSummer = 2\n midLatitudeWinter = 3\n# print \"image bandwidth = %f GHz\" % (np.max(freqs)-np.min(freqs))\n reffreq = np.mean(topofreqs)\n numchanModel = numchan*1\n chansepModel = (topofreqs[-1]-topofreqs[0])/(numchanModel-1)\n# print \"regridded bandwidth=%f GHz, chansep=%f, reffreq=%f\" % (np.max(topofreqs)-np.min(topofreqs), chansepModel, reffreq)\n nbands = 1\n myqa = createCasaTool(qatool)\n fCenter = create_casa_quantity(myqa, reffreq, 'GHz')\n fResolution = create_casa_quantity(myqa, chansepModel, 'GHz')\n fWidth = create_casa_quantity(myqa, numchanModel*chansepModel, 'GHz')\n myat = casac.atmosphere()\n myat.initAtmProfile(humidity=H, temperature=create_casa_quantity(myqa,T,\"K\"),\n altitude=create_casa_quantity(myqa,altitude,\"m\"),\n pressure=create_casa_quantity(myqa,P,'mbar'),atmType=midLatitudeWinter)\n myat.initSpectralWindow(nbands, fCenter, fWidth, fResolution)\n myat.setUserWH2O(create_casa_quantity(myqa, pwv, 'mm'))\n# myat.setAirMass() # This does not affect the opacity, but it does effect TebbSky, so do it manually.\n myqa.done()\n\n dry = np.array(myat.getDryOpacitySpec(0)[1])\n wet = np.array(myat.getWetOpacitySpec(0)[1]['value'])\n TebbSky = myat.getTebbSkySpec(spwid=0)[1]['value']\n # readback the values to be sure they got set\n \n rf = myat.getRefFreq()['value']\n cs = myat.getChanSep()['value']\n if (myat.getRefFreq()['unit'] != 'GHz'):\n casalogPost(\"There is a unit mismatch for refFreq in the atm code.\")\n if (myat.getChanSep()['unit'] != 'MHz'):\n casalogPost(\"There is a unit mismatch for chanSep in the atm code.\")\n numchanModel = myat.getNumChan()\n freq0 = myat.getChanFreq(0)['value']\n freq1 = myat.getChanFreq(numchanModel-1)['value']\n# print \"atm returned bandwidth = %f GHz = %f to %f \" % (freq1-freq0, freq0, freq1)\n newfreqs = np.linspace(freqs[0], freqs[-1], numchanModel) # fix for SCOPS-4815\n# print \"freqs: %f-%f newfreqs: %f-%f\" % (freqs[0], freqs[-1], newfreqs[0], newfreqs[-1])\n transmission = np.exp(-airmass*(wet+dry))\n TebbSky *= (1-np.exp(-airmass*(wet+dry)))/(1-np.exp(-wet-dry))\n if value=='transmission':\n values = transmission\n else:\n values = TebbSky\n del myat\n return(newfreqs, values)", "def analyze(self, options, target):\r\n\r\n target = 0\r\n\r\n upf = None\r\n\r\n dwnf = None\r\n\r\n if options.upfile is not None:\r\n\r\n upf = basepath + options.upfile + '.ma'\r\n\r\n if options.downfile is not None:\r\n\r\n dwnf = basepath + options.downfile + '.ma'\r\n\r\n\r\n\r\n for filename in (upf, dwnf):\r\n\r\n # if options.upfile is not None and options.downfile is not None:\r\n\r\n if filename is None:\r\n\r\n break\r\n\r\n im=[]\r\n\r\n self.imageData = []\r\n\r\n print (\"Loading data from %s\" % filename)\r\n\r\n try:\r\n\r\n im = MetaArray(file = filename, subset=(slice(0,2), slice(64,128), slice(64,128)))\r\n\r\n except:\r\n\r\n print(' Error loading upfile: %s' % filename)\r\n\r\n return\r\n\r\n print(' Data loaded')\r\n\r\n target = target + 1\r\n\r\n self.times = im.axisValues('Time').astype('float32')\r\n\r\n self.imageData = im.view(np.ndarray).astype('float32')\r\n\r\n im=[]\r\n\r\n self.analysis_fourier_map(period=self.period, target=target, bins=binsize,)\r\n\r\n if target > 0:\r\n\r\n self.plot_maps(mode = 1, target = target, gfilter = self.gfilter)", "def addAllTasselCapIndices(self,img):\n\t\t\n\t\tdef getTasseledCap(img):\n\t\t\t\t\"\"\"Function to compute the Tasseled Cap transformation and return an image\"\"\"\n\t\t\t\t\n\t\t\tcoefficients = ee.Array([\n\t\t\t\t[0.3037, 0.2793, 0.4743, 0.5585, 0.5082, 0.1863],\n\t\t\t\t[-0.2848, -0.2435, -0.5436, 0.7243, 0.0840, -0.1800],\n\t\t\t\t[0.1509, 0.1973, 0.3279, 0.3406, -0.7112, -0.4572],\n\t\t\t\t[-0.8242, 0.0849, 0.4392, -0.0580, 0.2012, -0.2768],\n\t\t\t\t[-0.3280, 0.0549, 0.1075, 0.1855, -0.4357, 0.8085],\n\t\t\t\t[0.1084, -0.9022, 0.4120, 0.0573, -0.0251, 0.0238]\n\t\t\t]);\n\t\t\t\n\t\t\tbands=ee.List(['blue','green','red','nir','swir1','swir2'])\n\t\t\t\t\n\t\t\t# Make an Array Image, with a 1-D Array per pixel.\n\t\t\tarrayImage1D = img.select(bands).toArray()\n\t\t\t\n\t\t\t# Make an Array Image with a 2-D Array per pixel, 6x1.\n\t\t\tarrayImage2D = arrayImage1D.toArray(1)\n\t\t\t\n\t\t\tcomponentsImage = ee.Image(coefficients).matrixMultiply(arrayImage2D).arrayProject([0]).arrayFlatten([['brightness', 'greenness', 'wetness', 'fourth', 'fifth', 'sixth']]).float();\n\t\t \n\t\t\t# Get a multi-band image with TC-named bands.\n\t\t\treturn img.addBands(componentsImage);\t\n\t\t\t\t\n\t\t\t\t\n\t\tdef addTCAngles(img):\n\n\t\t\t\"\"\" Function to add Tasseled Cap angles and distances to an image. Assumes image has bands: 'brightness', 'greenness', and 'wetness'.\"\"\"\n\t\t\t\t\t\n\t\t\t# Select brightness, greenness, and wetness bands\t\n\t\t\tbrightness = img.select('brightness');\n\t\t\tgreenness = img.select('greenness');\n\t\t\twetness = img.select('wetness');\n\t \n\t\t\t# Calculate Tasseled Cap angles and distances\n\t\t\ttcAngleBG = brightness.atan2(greenness).divide(math.pi).rename(['tcAngleBG']);\n\t\t\ttcAngleGW = greenness.atan2(wetness).divide(math.pi).rename(['tcAngleGW']);\n\t\t\ttcAngleBW = brightness.atan2(wetness).divide(math.pi).rename(['tcAngleBW']);\n\t\t\ttcDistBG = brightness.hypot(greenness).rename(['tcDistBG']);\n\t\t\ttcDistGW = greenness.hypot(wetness).rename(['tcDistGW']);\n\t\t\ttcDistBW = brightness.hypot(wetness).rename(['tcDistBW']);\n\t\t\timg = img.addBands(tcAngleBG).addBands(tcAngleGW).addBands(tcAngleBW).addBands(tcDistBG).addBands(tcDistGW).addBands(tcDistBW);\n\t\t\t\t\n\t\t\treturn img;\n\t\t\n\t\t\n\t\timg = getTasseledCap(img)\n\t\timg = addTCAngles(img)\n\t\treturn img", "def regrid_in_miriad(taskid, image_name, hdu_image, b, c):\n\n\t# Change the reference pixel of beam model to reference pixel of image to correct\n\tcb_model = beam_lookup.model_lookup2(taskid, b)\n\thdulist_cb = pyfits.open(cb_model)\n\thdulist_cb[0].header['CRVAL1'] = hdu_image[0].header['CRVAL1']\n\thdulist_cb[0].header['CRVAL2'] = hdu_image[0].header['CRVAL2']\n\n\t# Rescale to appropriate frequency. This should work for either drift scans or Gaussian regression (only tested on latter):\n\tavg_cube_freq = (hdu_image[0].header['CRVAL3'] + hdu_image[0].header['CDELT3'] * hdu_image[0].data.shape[0]) * u.Hz\n\thdulist_cb[0].header['CDELT1'] = (hdulist_cb[0].header['CDELT1'] * get_cb_model_freq().to(u.Hz) / avg_cube_freq).value\n\thdulist_cb[0].header['CDELT2'] = (hdulist_cb[0].header['CDELT2'] * get_cb_model_freq().to(u.Hz) / avg_cube_freq).value\n\n\tcb2d_name = 'temp_b{}_c{}_cb-2d.fits'.format(b, c)\n\thdulist_cb.writeto(cb2d_name)\n\thdulist_cb.close()\n\n\tprint('\\tRegridding in miriad using model {}'.format(cb_model))\n\n\tfits = lib.miriad('fits')\n\tregrid = lib.miriad('regrid')\n\n\t# Convert files to miriad:\n\tfits.in_ = image_name\n\tfits.out = '{}.mir'.format(image_name[:-5])\n\tfits.op = 'xyin'\n\tfits.go()\n\n\tfits.in_ = cb2d_name\n\tfits.out = '{}.mir'.format(cb2d_name[:-5])\n\tfits.op = 'xyin'\n\tfits.go()\n\n\t# Regrid beam image\n\tregrid.in_ = '{}.mir'.format(cb2d_name[:-5])\n\tregrid.out = '{}_rgrid.mir'.format(cb2d_name[:-5])\n\tregrid.tin = '{}.mir'.format(image_name[:-5])\n\tregrid.axes = '1,2'\n\tregrid.go()\n\n\t# Convert regrided beam image to fits\n\tfits.in_ = '{}_rgrid.mir'.format(cb2d_name[:-5])\n\tfits.out = '{}_rgrid.fits'.format(cb2d_name[:-5])\n\tfits.op = 'xyout'\n\tfits.go()\n\n\t# Make cb 3D and save as FITS:\n\thdu_cb = pyfits.open('{}_rgrid.fits'.format(cb2d_name[:-5]))\n\td_new = np.ones((hdu_image[0].header['NAXIS3'], hdu_cb[0].header['NAXIS2'], hdu_cb[0].header['NAXIS2']))\n\td_beam_cube = d_new * hdu_cb[0].data\n\thdu_cb[0].data = np.float32(d_beam_cube)\n\n\tprint('\\tWriting compound beam cube.')\n\thdu_cb.writeto('{}_cb.fits'.format(image_name[:-5]))\n\n\thdu_cb.close()\n\n\t# Clean up the extra Miriad & 2D cb files\n\tos.system('rm -rf {}*.mir'.format(image_name[:-5]))\n\tos.system('rm -rf {}*'.format(cb2d_name[:-5]))", "def ptc_acquisition(self, explow=0.1, exphigh=2, expdelta=0.1, laserchannel = 2, lasercurrent=45.0):\n\n #\n self.laser.select(laserchannel)\n self.laser.setCurrent(laserchannel, lasercurrent)\n self.laser.enable()\n\n #self.powerup_CCD()\n self.reb.set_testtype('PTC')\n\n #self.DKD.setup_current_measurements(DKD_range)\n self.PhD.setup_current_measurements(2e-8)\n\n # Create the logging summary file\n summaryfile = os.path.join(eodir, 'summary.log')\n f = open(summaryfile, 'a')\n\n print >>f, \"# power\\t exposure time\\t file name\"\n\n effpow = self.laser.getPower(laserchannel)\n # First take bias frames\n self.log(\"Taking bias\")\n m = self.execute_reb_sequence('ClearBias', 0, 20, True )\n #to have only useful channels:\n fname = \"%s_ptc_bias_%s.fits\" % (serno, self.reb.reb.imgtag)\n i = self.conv_to_fits(channels=validamps)\n # to save FITS HDU with headers\n self.save_to_fits(i, m, fitsname=os.path.join(eodir, fname))\n\n print >>f, effpow, 0, fname\n\n for t in np.arange(explow, exphigh+expdelta, expdelta):\n # pair of flats\n for numpair in [1, 2]:\n effpow = self.laser.getPower(laserchannel)\n m = self.execute_reb_sequence('Acquisition', t)\n #to have only useful channels:\n fname = \"%s_ptc_flat%d_%05d_%s.fits\" % (serno, numpair, int(t*100), self.reb.reb.imgtag)\n i = self.conv_to_fits(channels=validamps)\n # to save FITS HDU with headers\n self.save_to_fits(i, m, fitsname=os.path.join(eodir, fname))\n\n print >>f, effpow, t, fname\n\n f.close()\n\n # Shutting down (not the lamp by default)\n self.laser.disable()\n #self.shutdown_CCD()\n # p = self.reb.start_waiting_sequence()", "def countmap(band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t verbose=0,tscale=1000.,memlight=False,hdu=False,retries=20):\n\timsz = gxt.deg2pix(skypos,skyrange)\n\tcount = np.zeros(imsz)\n\tfor trange in tranges:\n\t\t# If memlight is requested, break the integration into\n\t\t# smaller chunks.\n\t\tstep = memlight if memlight else trange[1]-trange[0]\n\t\tfor i in np.arange(trange[0],trange[1],step):\n\t\t\tt0,t1=i,i+step\n\t\t\tif verbose:\n\t\t\t\tprint_inline('Coadding '+str(t0)+' to '+str(t1))\n\t\t\tevents = gQuery.getArray(gQuery.rect(band,skypos[0],skypos[1],t0,t1,\n\t\t\t\t\t\t\t\t\t\t\t\t skyrange[0],skyrange[1]),\n\t\t\t\t\t\t\t\t\t verbose=verbose,retries=retries)\n\n\t\t\t# Check that there is actually data here.\n\t\t\tif not events:\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint \"No data in \"+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\ttimes = np.array(events,dtype='float64')[:,0 ]/tscale\n\t\t\tcoo =\tnp.array(events,dtype='float64')[:,1:]\n\n\t\t\t# If there's no data, return a blank image.\n\t\t\tif len(coo)==0:\n\t\t\t\tif verbose:\n\t\t\t\t\tprint 'No data in this frame: '+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\t# Define World Coordinate System (WCS)\n\t\t\twcs = define_wcs(skypos,skyrange,width=False,height=False)\n\n\t\t\t# Map the sky coordinates onto the focal plane\n\t\t\tfoc = wcs.sip_pix2foc(wcs.wcs_world2pix(coo,1),1)\n\n\t\t\t# Bin the events into actual image pixels\n\t\t\tH,xedges,yedges=np.histogram2d(foc[:,1]-0.5,foc[:,0]-0.5,\n\t\t\t\t\t\t\t\tbins=imsz,range=([ [0,imsz[0]],[0,imsz[1]] ]))\n\t\t\tcount += H\n\n\treturn count", "def process_event(self, evt):\n det_data = {}\n for det, thisDetDict in zip(self.dets, self.targetVarsXtc):\n try:\n det.getData(evt)\n det.processFuncs()\n thisDetDataDict = getUserData(det)\n img = None\n for key in thisDetDataDict.keys():\n if key=='full_area':\n img = thisDetDataDict[key].astype(float) # needed for detectors whose data are uint16 (Rayonix)\n elif key.find('ROI')>=0:\n img = thisDetDataDict[key].astype(float)\n if img is None:\n print('Problem with getting detector area data.')\n continue\n if 'thresADU' in thisDetDict:\n img[img<thisDetDict['thresADU']] = 0\n elif 'thresRms' in thisDetDict:\n img[img<thisDetDict['thresRms']*det.rms] = 0\n\n det_data[det._name] = img # can onky handle full area ROIFunc for now\n \n # calculate variance (see ... for ref)\n \n# if not (key=='full_area' or key.find('ROI')>=0 or key.find('photon_img')>=0):\n# continue\n# if (key=='full_area' or key.find('ROI')>=0):\n# if 'thresADU' in thisDetDict:\n# thisDetDataDict[key][thisDetDataDict[key]<thisDetDict['thresADU']]=0\n# elif 'thresRms' in thisDetDict:\n# thisDetDataDict[key][thisDetDataDict[key]<thisDetDict['thresRms']*det.rms]=0\n# dArray[ib%bins_per_job]=dArray[ib%bins_per_job]+thisDetDataDict[key]\n# else: #if key.find('photon_img')\n# dIArray[ib%bins_per_job]=dIArray[ib%bins_per_job]+thisDetDataDict[key]\n\n# x = thisDetDataDict[key]\n# oldM = dMArray\n# dMArray = dMArray + (x-dMArray)/(ievt+1)\n# dSArray = dSArray + (x-dMArray)*(x-oldM)\n except Exception as e:\n print('Failed to get data for this event for det {}.\\n{}'.format(det._name, e))\n det_data[det._name] = None\n return det_data", "def addAllTasselCapIndices(self,img):\n\t\t\n\t\tdef getTasseledCap(img):\n\t\t\t\"\"\"Function to compute the Tasseled Cap transformation and return an image\"\"\"\n\t\t\t\n\t\t\tcoefficients = ee.Array([\n\t\t\t\t[0.3037, 0.2793, 0.4743, 0.5585, 0.5082, 0.1863],\n\t\t\t\t[-0.2848, -0.2435, -0.5436, 0.7243, 0.0840, -0.1800],\n\t\t\t\t[0.1509, 0.1973, 0.3279, 0.3406, -0.7112, -0.4572],\n\t\t\t\t[-0.8242, 0.0849, 0.4392, -0.0580, 0.2012, -0.2768],\n\t\t\t\t[-0.3280, 0.0549, 0.1075, 0.1855, -0.4357, 0.8085],\n\t\t\t\t[0.1084, -0.9022, 0.4120, 0.0573, -0.0251, 0.0238]\n\t\t\t]);\n\t\t\n\t\t\tbands=ee.List(['blue','green','red','nir','swir1','swir2'])\n\t\t\t\n\t\t\t# Make an Array Image, with a 1-D Array per pixel.\n\t\t\tarrayImage1D = img.select(bands).toArray()\n\t\t\n\t\t\t# Make an Array Image with a 2-D Array per pixel, 6x1.\n\t\t\tarrayImage2D = arrayImage1D.toArray(1)\n\t\t\n\t\t\tcomponentsImage = ee.Image(coefficients).matrixMultiply(arrayImage2D).arrayProject([0]).arrayFlatten([['brightness', 'greenness', 'wetness', 'fourth', 'fifth', 'sixth']]).float();\n\t \n\t\t\t# Get a multi-band image with TC-named bands.\n\t\t\treturn img.addBands(componentsImage);\t\n\t\t\t\n\t\t\t\n\t\tdef addTCAngles(img):\n\n\t\t\t\"\"\" Function to add Tasseled Cap angles and distances to an image. Assumes image has bands: 'brightness', 'greenness', and 'wetness'.\"\"\"\n\t\t\t\n\t\t\t# Select brightness, greenness, and wetness bands\t\n\t\t\tbrightness = img.select('brightness');\n\t\t\tgreenness = img.select('greenness');\n\t\t\twetness = img.select('wetness');\n\t \n\t\t\t# Calculate Tasseled Cap angles and distances\n\t\t\ttcAngleBG = brightness.atan2(greenness).divide(math.pi).rename(['tcAngleBG']);\n\t\t\ttcAngleGW = greenness.atan2(wetness).divide(math.pi).rename(['tcAngleGW']);\n\t\t\ttcAngleBW = brightness.atan2(wetness).divide(math.pi).rename(['tcAngleBW']);\n\t\t\ttcDistBG = brightness.hypot(greenness).rename(['tcDistBG']);\n\t\t\ttcDistGW = greenness.hypot(wetness).rename(['tcDistGW']);\n\t\t\ttcDistBW = brightness.hypot(wetness).rename(['tcDistBW']);\n\t\t\timg = img.addBands(tcAngleBG).addBands(tcAngleGW).addBands(tcAngleBW).addBands(tcDistBG).addBands(tcDistGW).addBands(tcDistBW);\n\t\t\t\n\t\t\treturn img;\n\t\n\t\timg = getTasseledCap(img)\n\t\timg = addTCAngles(img)\n\t\treturn img", "def main():\n cam = Realsense()\n # cam.access_intr_and_extr()\n profile = cam.pipeline.start(cam.config)\n depth_sensor = profile.get_device().first_depth_sensor()\n depth_scale = depth_sensor.get_depth_scale()\n align_to = rs.stream.color\n align = rs.align(align_to)\n\n objp = np.zeros((3*4,3), np.float32)\n objp[:,:2] = np.mgrid[0:4,0:3].T.reshape(-1,2)\n axis = np.float32([[1,0,0], [0,1,0], [0,0,-1]]).reshape(-1,3)\n # print(objp)\n\n try:\n while (True):\n # detect ArUco markers in RGB images\n frames = cam.pipeline.wait_for_frames()\n aligned_frames = align.process(frames)\n color_frame = aligned_frames.get_color_frame()\n color_image = np.asanyarray(color_frame.get_data()) \n frame = color_image\n font = cv2.FONT_HERSHEY_SIMPLEX\n corners, ids, rvecs, tvecs = cam.detect_markers_realsense(frame)\n \n if np.all(ids != None): # if markers are detected\n for i in range(0, ids.size):\n aruco.drawAxis(frame, cam.newcameramtx, cam.dist, rvecs[i],\n tvecs[i], 0.1) # Draw axis\n aruco.drawDetectedMarkers(frame, corners) # draw square around markers\n\n ###### DRAW ID #####\n strg = ''\n for i in range(0, ids.size):\n strg += str(ids[i][0])+', '\n\n cv2.putText(frame, \"Id: \" + strg, (0,25), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n\t ###### Output marker positions in camera frame ######\n \t # output tvec\n y0 = 60\n dy = 40\n for i in range(0, ids.size):\n y = y0 + i*dy\n cv2.putText(frame, str(tvecs[i][0]), (0, y), font, 1, (0,255,0),\n 2, cv2.LINE_AA)\n\n else:\n ##### DRAW \"NO IDS\" #####\n cv2.putText(frame, \"No Ids\", (0,64), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (4,3), None)\n if ret == True:\n corners2 = cv2.cornerSubPix(gray, corners,(11,11), (-1,-1),\n cam.criteria)\n corners2 = corners2[::-1]\n # print(corners2)\n # print(objp)\n frame = cv2.drawChessboardCorners(frame, (4,3), corners2, ret)\n # Find the rotation and translation vectors.\n _, rvecs, tvecs = cv2.solvePnP(objp, corners2, cam.newcameramtx,\n cam.dist)\n rot, _ = cv2.Rodrigues(rvecs)\n # print(rot)\n # project 3D points to image plane\n imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs,\n cam.newcameramtx, cam.dist)\n frame = draw(frame, corners2, imgpts)\n\n # Display the resulting frame\n cv2.imshow('frame',frame)\n cv2.waitKey(5)\n\n # When everything done, release the capture\n cv2.destroyAllWindows()\n\n finally:\n cam.pipeline.stop()", "def PETImageProcess(PET_Scan):\n PET_Scan = normalise(PET_Scan)\n return PET_Scan", "def step(self):\n\n for t, z, data_igm, data_cgm, RC_igm, RC_cgm in self.medium.step():\n\n Jc = np.atleast_1d(self._f_Jc(z))\n Ji = np.atleast_1d(self._f_Ji(z))\n Jlw = np.atleast_1d(self._f_Jlw(z))\n Ja = Jc + Ji\n\n # Compute spin temperature\n n_H = self.medium.parcel_igm.grid.cosm.nH(z)\n Ts = self.medium.parcel_igm.grid.hydr.Ts(z,\n data_igm['Tk'], Ja, data_igm['h_2'], data_igm['e'] * n_H)\n\n if self.pf['floor_Ts'] is not None:\n Ts = max(Ts, self.medium.parcel_igm.grid.hydr.Ts_floor(z=z))\n\n # Compute volume-averaged ionized fraction\n if self.pf['include_cgm']:\n xavg = data_cgm['h_2'] + (1. - data_cgm['h_2']) * data_igm['h_2']\n else:\n xavg = data_igm['h_2']\n\n # Derive brightness temperature\n dTb = self.medium.parcel_igm.grid.hydr.get_21cm_dTb(z, Ts, xavg)\n dTb_b = self.medium.parcel_igm.grid.hydr.get_21cm_dTb(z, Ts)\n\n # Add derived fields to data\n data_igm.update({'Ts': Ts, 'dTb': dTb, #'dTb_bulk': dTb_b,\n 'Jc': Jc, 'Ji': Ji, 'Ja': Ja, 'Jlw': Jlw})\n\n # Yield!\n yield t, z, data_igm, data_cgm, RC_igm, RC_cgm", "def manipulations(path):\r\n\r\n print (\"\\n Working on %s\\n\" %(path))\r\n\r\n # Creates a folder with the results for the current image\r\n if not os.path.exists(\"Results\\\\%s\" %(path)):\r\n os.makedirs(\"Results\\\\%s\" %(path))\r\n\r\n # The variations made of the image\r\n func.pixelImage(path, 10, 10)\r\n func.animate(path)\r\n func.colorScale(path, 0)\r\n func.colorScale(path, 1)\r\n func.colorScale(path, 2)\r\n func.scan(path, 280)\r\n func.greyImage(path)\r\n func.colorSteps(path, 1)\r\n func.inverted(path)", "def calculate_psf_tilts():\n for order in [1, 2]:\n\n # Get the file\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n\n # Dimensions\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n\n # Get the wave map\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n\n # Get the y-coordinate of the trace polynomial in this column\n # (center of the trace)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n\n # Interpolate to get the wavelength value at the center\n wave = interp2d(X, Y, wave_map)\n\n # Get the wavelength of the trace center in each column\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n\n # For each column wavelength (defined by the wavelength at\n # the trace center) define an isowavelength contour\n angles = []\n for n, x in enumerate(X):\n\n w = trace_wave[x]\n\n # Edge cases\n try:\n w0 = trace_wave[x-1]\n except IndexError:\n w0 = 0\n\n try:\n w1 = trace_wave[x+1]\n except IndexError:\n w1 = 10\n\n # Define the width of the wavelength bin as half-way\n # between neighboring points\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n\n # Get the coordinates of all the pixels in that range\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n\n # Find the angle between the vertical and the tilted wavelength bin\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n\n # Don't flip them upside down\n angle = angle % 180\n\n # Add to the array\n angles.append(angle)\n\n # Save the file\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)", "def analyze(self):\n # turn off all indicator lights\n self._stop_all()\n \n # run, but catch exceptions and abort if necessary\n try:\n # setup\n self.analysis_led[1].blink\n ims_left = self.num_images\n fluid_left = True\n \n data_session = Data(self.data_path)\n \n # run motor & imaging\n while self.power.update() and ims_left > 0:\n # run pump\n self.motor.run(self.pump_runtime)\n \n if not self.power.update():\n break\n \n # image\n time.sleep(self.rest_time)\n self.cam_led.on\n self.camera.capture()\n data_session.fetch_data()\n self.cam_led.off\n \n # subtract from remaining images every cycle\n # if the fluid sensor turns off, set remaining\n # images to the maximum possible remaining\n ims_left -= 1\n if fluid_left and \\\n not self.fluid.update() and \\\n ims_left > self.samps_after_sensor_off:\n fluid_left = False\n ims_left = self.samps_after_sensor_off\n \n # change indicator lights, given complete or power off\n if ims_left == 0:\n # set analysis to green\n self.analysis_led[1].off\n self.analysis_led[0].on\n else:\n # set analysis to solid red\n self.analysis_led[1].on\n \n # transmit data whether or not power switched off\n self.data_led.blink\n data = data_session.prepare_broadcast()\n broadcast_session = Broadcast(self.peer_ip)\n broadcast_session.broadcast_data(data)\n self.data_led.off\n \n except:\n # turn on error indicator and turn off all else\n # do not transmit data\n self._stop_all()\n self.error.on", "def track_iou(detections, sigma_l, sigma_h, sigma_iou, t_min, ttl, mom_alpha=0.95, exp_zoom=1.05, min_area=0):\n\n tracks_active = []\n\n for frame_num, detections_frame in enumerate(detections, start=1):\n # apply low threshold to detections\n dets = [det for det in detections_frame if det['score'] >= sigma_l]\n\n updated_tracks = []\n for track in tracks_active:\n f_dets = list(filter(lambda det: area(det['bbox']) > min_area, dets))\n if len(f_dets) > 0:\n # get det with highest iou and similarity score\n best_match = max(f_dets, key=lambda x: iou(predict_bbox(exp_zoom, track), x['bbox']))\n if iou(track['bboxes'][-1], best_match['bbox']) >= sigma_iou:\n track['bboxes'].append(best_match['bbox'])\n track['max_score'] = max(track['max_score'], best_match['score'])\n if track['inactive'] > 0:\n track['inactive'] = 0\n elif len(track['bboxes']) > 1:\n mom = track['momentum']\n prev_bbox = np.array(track['bboxes'][-2]).reshape(2, 2)\n bbox = np.array(track['bboxes'][-1]).reshape(2, 2)\n\n track['momentum'] = mom * mom_alpha + (bbox - prev_bbox) * (1 - mom_alpha)\n\n updated_tracks.append(track)\n\n # remove from best matching detection from detections\n del dets[dets.index(best_match)]\n\n # if track was not updated, use momentum for 'ttl' frames\n if len(updated_tracks) == 0 or track is not updated_tracks[-1]:\n # if the track's ttl is over\n if track['inactive'] > ttl:\n # finish track when the conditions are met\n if track['max_score'] >= sigma_h and len(track['bboxes']) - track['inactive'] >= t_min:\n if track['inactive'] > 0:\n del track['bboxes'][-track['inactive']:]\n yield track\n else:\n # else use momentum\n # move the bbox and zoom it\n moved_bbox = predict_bbox(exp_zoom, track)\n\n track['bboxes'].append(moved_bbox)\n track['inactive'] += 1\n\n updated_tracks.append(track)\n\n # create new tracks if box large enough\n new_tracks = [\n {\n 'bboxes': [det['bbox']],\n 'max_score': det['score'],\n 'start_frame': frame_num,\n 'momentum': np.zeros((2, 2)),\n 'inactive': 0,\n }\n for det in dets if area(det['bbox']) > min_area\n ]\n tracks_active = updated_tracks + new_tracks\n\n # finish all remaining active tracks\n for track in tracks_active:\n if track['max_score'] >= sigma_h and len(track['bboxes']) >= t_min:\n yield track", "def system_6(in_dir, out_dir, threshold, num_frames, num_prev_frames, blend_coef, blur=(3,3), as_numeric=True, stretched=True):\n pass", "def trajectory_error_correcter(trajectories):\r\n\r\n n_birds, n_paramaters, n_time_steps = np.shape(trajectories)\r\n\r\n for i in range(n_birds):\r\n if squared_distance_calculator(trajectories[i, :, 1],\r\n trajectories[i, :, 0]) > 1.5 * min(squared_distance_calculator(\r\n trajectories[i, :, 1], trajectories[i, :, 2]), squared_distance_calculator(\r\n trajectories[i, :, 2], trajectories[i, :, 3]), squared_distance_calculator(\r\n trajectories[i, :, 3], trajectories[i, :, 4])):\r\n for l in range(n_birds):\r\n if squared_distance_calculator(trajectories[i, :, 0],\r\n trajectories[l, :, 1]) < 1.5 * min(squared_distance_calculator(\r\n trajectories[i, :, 1], trajectories[i, :, 2]), squared_distance_calculator(\r\n trajectories[i, :, 2], trajectories[i, :, 3]), squared_distance_calculator(\r\n trajectories[i, :, 3], trajectories[i, :, 4])):\r\n trajectories[i, :, :], trajectories[l, :, :] = trajectory_switcher(trajectories[i, :, :],\r\n trajectories[l, :, :], 1)\r\n break\r\n for j in range(2, n_time_steps):\r\n if squared_distance_calculator(trajectories[i, :, j - 1],\r\n trajectories[i, :, j]) > 1.5 * squared_distance_calculator(\r\n trajectories[i, :, j - 1], trajectories[i, :, j - 2]):\r\n for l in range(n_birds):\r\n if squared_distance_calculator(trajectories[i, :, j - 1],\r\n trajectories[l, :, j]) < 2 * squared_distance_calculator(\r\n trajectories[i, :, j - 1], trajectories[i, :, j - 2]):\r\n trajectories[i, :, :], trajectories[l, :, :] = trajectory_switcher(trajectories[i, :, :],\r\n trajectories[l, :, :], j)\r\n break\r\n return trajectories", "def build_r_map(input_file: str, output_file: str, threshold: float):\n\n DataSiPM = db.DataSiPMsim_only('petalo', 0) # full body PET\n DataSiPM_idx = DataSiPM.set_index('SensorID')\n\n try:\n sns_response = pd.read_hdf(input_file, 'MC/sns_response')\n except ValueError:\n print(f'File {input_file} not found')\n exit()\n except OSError:\n print(f'File {input_file} not found')\n exit()\n except KeyError:\n print(f'No object named MC/sns_response in file {input_file}')\n exit()\n print(f'Analyzing file {input_file}')\n\n sel_df = rf.find_SiPMs_over_threshold(sns_response, threshold)\n\n particles = pd.read_hdf(input_file, 'MC/particles')\n hits = pd.read_hdf(input_file, 'MC/hits')\n events = particles.event_id.unique()\n\n true_r1, true_r2 = [], []\n var_phi1, var_phi2 = [], []\n var_z1, var_z2 = [], []\n\n touched_sipms1, touched_sipms2 = [], []\n\n for evt in events:\n\n ### Select photoelectric events only\n evt_parts = particles[particles.event_id == evt]\n evt_hits = hits [hits .event_id == evt]\n select, true_pos = mcf.select_photoelectric(evt_parts, evt_hits)\n if not select: continue\n\n sns_resp = sel_df[sel_df.event_id == evt]\n if len(sns_resp) == 0: continue\n\n _, _, pos1, pos2, q1, q2 = rf.assign_sipms_to_gammas(sns_resp, true_pos, DataSiPM_idx)\n\n if len(pos1) > 0:\n pos_phi = rf.from_cartesian_to_cyl(np.array(pos1))[:,1]\n _, var_phi = rf.phi_mean_var(pos_phi, q1)\n\n pos_z = np.array(pos1)[:,2]\n mean_z = np.average(pos_z, weights=q1)\n var_z = np.average((pos_z-mean_z)**2, weights=q1)\n r = np.sqrt(true_pos[0][0]**2 + true_pos[0][1]**2)\n\n var_phi1 .append(var_phi)\n var_z1 .append(var_z)\n touched_sipms1.append(len(pos1))\n true_r1 .append(r)\n\n else:\n var_phi1 .append(1.e9)\n var_z1 .append(1.e9)\n touched_sipms1.append(1.e9)\n true_r1 .append(1.e9)\n\n if len(pos2) > 0:\n pos_phi = rf.from_cartesian_to_cyl(np.array(pos2))[:,1]\n _, var_phi = rf.phi_mean_var(pos_phi, q2)\n\n pos_z = np.array(pos2)[:,2]\n mean_z = np.average(pos_z, weights=q2)\n var_z = np.average((pos_z-mean_z)**2, weights=q2)\n r = np.sqrt(true_pos[1][0]**2 + true_pos[1][1]**2)\n\n var_phi2 .append(var_phi)\n var_z2 .append(var_z)\n touched_sipms2.append(len(pos2))\n true_r2 .append(r)\n\n else:\n var_phi2 .append(1.e9)\n var_z2 .append(1.e9)\n touched_sipms2.append(1.e9)\n true_r2 .append(1.e9)\n\n a_true_r1 = np.array(true_r1)\n a_true_r2 = np.array(true_r2)\n a_var_phi1 = np.array(var_phi1)\n a_var_phi2 = np.array(var_phi2)\n a_var_z1 = np.array(var_z1)\n a_var_z2 = np.array(var_z2)\n\n a_touched_sipms1 = np.array(touched_sipms1)\n a_touched_sipms2 = np.array(touched_sipms2)\n\n\n np.savez(output_file, a_true_r1=a_true_r1, a_true_r2=a_true_r2, a_var_phi1=a_var_phi1, a_var_phi2=a_var_phi2, a_var_z1=a_var_z1, a_var_z2=a_var_z2, a_touched_sipms1=a_touched_sipms1, a_touched_sipms2=a_touched_sipms2)", "def mic_of_simulation(trajectories):\n\n avpvipsol = trajectories[:, 1:(160+1)]\n navsol = trajectories[:, (160+1):]\n\n per2 = np.hstack([avpvipsol[:, ::4], navsol[:, ::3]])\n numcells = per2.shape[1]\n\n # set up mic calculator\n mic = mp.MINE(alpha=0.6, c=15, est='mic_approx')\n mic_values = []\n for combo in combinations(range(numcells), 2):\n mic.compute_score(per2[:, combo[0]], per2[:, combo[1]])\n mic_values.append(mic.mic())\n\n return mic_values", "def post_process_trap(): \n #################### 0) assign internal values #################### \n from project_parameters import trapType,debug,trapFile,name,driveAmplitude,driveFrequency,Omega,dcplot,weightElectrodes,coefs,ax,az,phi,save,scale\n #from all_functions import find_saddle,plot_potential,dc_potential,set_voltages,exact_saddle,spher_harm_bas,spher_harm_exp,pfit,plotN\n import pickle\n\n with open(trapFile,'rb') as f:\n trap = pickle.load(f)\n\n qe = trap.configuration.charge\n mass = trap.configuration.mass\n Zval = trap.configuration.position\n r0 = trap.configuration.r0\n RFampl = driveAmplitude \n V0 = mass*(2*np.pi*Omega)**2*(r0*10**-3)**2/qe\n X,Y,Z=trap.instance.X,trap.instance.Y,trap.instance.Z \n data = trap.configuration\n dcVoltages = set_voltages()\n ne = len(weightElectrodes)\n E = trap.instance.E\n out = trap.configuration\n if debug.post_process_trap:\n print dcVoltages,np.max(dcVoltages)#np.sum(abs(dcVoltages))\n plotN(dcVoltages,trap,'set DC voltages') \n Vdc = dc_potential(trap,dcVoltages,E)\n #[IDC,JDC,KDC] = find_saddle(Vdc,X,Y,Z,3,Zval) \n #[XDC,YDC,ZDC] = exact_saddle(Vdc,X,Y,Z,3,Zval)\n #XDC,YDC,ZDC = X[IDC],150/scale,Z[KDC]\n #print XDC,YDC,ZDC,IDC,JDC,KDC\n #dcbasis,dcscale= spher_harm_bas(XDC,YDC,ZDC,X,Y,Z,4)\n #QQ = spher_harm_exp(Vdc,dcbasis,dcscale) \n #print QQ[0:9].T\n #1) RF Analysis\n print('RF Analysis') \n Vrf = RFampl*data.EL_RF\n [Irf,Jrf,Krf] = find_saddle(Vrf,X,Y,Z,2,Zval)\n if debug.post_process_trap:\n plot_potential(Vrf,X,Y,Z,dcplot,'weighted RF potential','V_{rf} (eV)',[Irf,Jrf,Krf])\n #2) DC Analysis\n print('DC Analysis')\n trap = dc_potential(trap,dcVoltages,E,update=None)\n Vdc = trap.instance.DC\n [Idc,Jdc,Kdc] = find_saddle(Vdc,X,Y,Z,3,Zval) # only used to calculate error at end\n if debug.post_process_trap:\n plot_potential(Vdc,X,Y,Z,'1D plots','full DC potential')\n #3) determine the exact saddles of the RF and DC\n trap = dc_potential(trap,dcVoltages,E)\n Vdc = trap.instance.DC\n print('Determining exact RF saddle...')\n [Xrf,Yrf,Zrf] = exact_saddle(Vrf,X,Y,Z,2,Zval) \n print('Determining exact DC saddle...')\n [Xdc,Ydc,Zdc] = exact_saddle(Vdc,X,Y,Z,3,Zval)\n #4) determine stray field (beginning of justAnalyzeTrap)\n print('Determining compensation due to E field...')\n nx,ny,nz=X.shape[0],Y.shape[0],Z.shape[0]\n x,y,z = np.zeros((nx,ny,nz)),np.zeros((nx,ny,nz)),np.zeros((nx,ny,nz))\n for i in range(nx):\n for j in range(ny):\n for k in range(nz):\n x[i,j,k] = X[i]\n y[i,j,k] = Y[j]\n z[i,j,k] = Z[k]\n VlessE = Vdc-E[0]*x-E[1]*y-E[2]*z\n [Xdc,Ydc,Zdc] = exact_saddle(VlessE,X,Y,Z,3) \n dist = np.sqrt((Xrf-Xdc)**2+(Yrf-Ydc)**2+(Zrf-Zdc)**2) \n #5) call pfit to built teh total field and determine the trap characteristics\n [fx,fy,fz,theta,Depth,Xe,Ye,Ze] = pfit(Vrf,Vdc,X,Y,Z,Irf,Jrf,Krf)#pfit(trap,E,Freq,RFampl)\n print('Stray field is ({0},{1},{2}) V/m.'.format(scale*E[0],scale*E[1],scale*E[2]))\n print('With this field, the compensation is optimized to {} micron.'.format(scale*dist))\n print('RF saddle: ({0},{1},{2})\\nDC saddle ({3},{4},{5}).'.format(Xrf,Yrf,Zrf,Xdc,Ydc,Zdc)) \n if debug.trap_depth:\n print('The trap escape position is at ({0},{1},{2}) microns, for a trap depth of {3} mV'.format(Xe*scale,Ye*scale,Ze*scale,Depth*scale))\n print('The trap frequencies are fx = {0} MHz, fy = {1} MHz, and fz = {2} MHz'.format(fx*10**-6,fy*10**-6,fz*10**-6))\n #6) Sanity testing; quality check no longer used\n if debug.post_process_trap:\n rfbasis,rfscale= spher_harm_bas(Xrf,Yrf,Zrf,X,Y,Z,2)\n Qrf = spher_harm_exp(Vrf,rfbasis,rfscale) \n if np.sqrt((Xrf-Xdc)**2+(Yrf-Ydc)**2+(Zrf-Zdc)**2)>0.008: \n print('Expanding DC with RF for saniy checking.')\n Qdc = spher_harm_exp(Vdc,rfbasis,rfscale) \n else:\n print('Expanding DC without RF for sanity checking.')\n dcbasis,dcscale= spher_harm_bas(Xdc,Ydc,Zdc,X,Y,Z,2)\n Qdc = spher_harm_exp(Vdc,dcbasis,dcscale) \n Arf = 2*np.sqrt( (3*Qrf[7])**2+(3*Qrf[8])**2 )\n Thetarf = 45*(Qrf[8]/abs(Qrf[8]))-90*np.arctan((3*Qrf[7])/(3*Qrf[8]))/np.pi\n Adc = 2*np.sqrt( (3*Qdc[7])**2+(3*Qdc[8])**2 )\n Thetadc = 45*(Qrf[8]/abs(Qrf[8]))-90*np.arctan((3*Qdc[7])/(3*Qdc[8]))/np.pi\n out.E = E\n out.miscompensation = dist\n out.ionpos = [Xrf,Yrf,Zdc]\n out.ionposIndex = [Irf,Jrf,Krf]\n out.frequency = [fx,fy,fz]\n out.theta = theta\n out.trap_depth = Depth/qe \n out.escapepos = [Xe,Ye,Ze]\n out.Quadrf = 2*np.array([Qrf[7]*3,Qrf[4]/2,Qrf[8]*6,-Qrf[6]*3,-Qrf[5]*3])\n out.Quaddc = 2*np.array([Qdc[7]*3,Qdc[4]/2,Qdc[8]*6,-Qdc[6]*3,-Qdc[5]*3])\n out.Arf = Arf\n out.Thetarf = Thetarf\n out.Adc = Adc\n out.Thetadc = Thetadc\n T = np.array([[2,-2,0,0,0],[-2,-2,0,0,0],[0,4,0,0,0],[0,0,1,0,0],[0,0,0,1,0],[0, 0,0,0,1]])\n Qdrf = out.Quadrf.T\n Qddc = out.Quaddc.T\n out.q = (1/V0)*T*Qdrf\n out.alpha = (2/V0)*T*Qddc\n out.Error = [X[Idc]-Xdc,Y[Jdc]-Ydc,Z[Kdc]-Zdc]\n #7) update the trapping field data structure with instance attributes\n trap.configuration=out\n trap.instance.driveAmplitude = driveAmplitude\n trap.instance.driveFrequency = driveFrequency\n trap.instance.coefs = coefs\n trap.instance.ax = ax\n trap.instance.az = az\n trap.instance.phi = phi\n trap.instance.ppt = True\n trap.instance.out = out\n if save==True:\n print('Saving '+trapFile+' as a data structure...')\n with open(trapFile,'wb') as f:\n pickle.dump(trap,f)\n return 'post_proccess_trap complete' #out # no output needed really", "def qc_illumina(args):\n clarity_epp.qc.illumina.set_avg_q30(lims, args.process_id)", "def filter_and_correct_expression_and_image_features(tissue, model, aggregation, patch_size, M, k, pc_correction=False, tf_correction=False):\n\n\n\n\n # Filter expression\n Y, X, dIDs, tIDs, tfs, ths, t_idx = extract_final_layer_data(tissue, model, aggregation, patch_size)\n filt_X, filt_tIDs, final_exp_idx = filter_expression(X, tIDs, M, k)\n\n\n\n if pc_correction:\n print ('Correcting with {} expression PCs'.format(pc_correction))\n pca = PCA(n_components=pc_correction)\n\n\n pca_predictors = pca.fit_transform(filt_X)\n\n # Correct Y\n lr = LinearRegression()\n lr.fit(pca_predictors, Y)\n predicted_Y = lr.predict(pca_predictors)\n corrected_Y = Y - predicted_Y\n\n # Correct X\n projected_filt_X = np.dot(pca_predictors,pca.components_)\n corrected_filt_X = filt_X - projected_filt_X\n\n # Set as return variables\n final_X = corrected_filt_X\n final_Y = corrected_Y\n\n elif tf_correction:\n print('Correcting with all technical factors')\n tf_Y = Y[t_idx,:]\n tf_filt_X = filt_X[t_idx,:]\n\n tfs[list(ths).index('SMTSISCH')] = np.log2(tfs[list(ths).index('SMTSISCH')] + 1)\n tf_predictors = tfs\n\n #Correct Y\n lr_Y = LinearRegression()\n lr_Y.fit(tf_predictors, tf_Y)\n tf_Y_predicted = lr_Y.predict(tf_predictors)\n corrected_tf_Y = tf_Y - tf_Y_predicted\n\n #Correct X\n lr_X = LinearRegression()\n lr_X.fit(tf_predictors, tf_filt_X)\n tf_filt_X_predicted = lr_X.predict(tf_predictors)\n corrected_tf_filt_X = tf_filt_X - tf_filt_X_predicted\n\n # Set as return variables\n final_X = corrected_tf_filt_X\n final_Y = corrected_tf_Y\n else:\n # Set unmodified values as return variables\n final_X = filt_X\n final_Y = Y\n\n return final_Y, final_X, dIDs, filt_tIDs, tfs, ths, t_idx", "def main():\n print(\"Program version: 1.5\")\n StartTime = datetime.now()\n args = parseArguments()\n\n verbose = args.verbose\n images = args.images\n ignore_warnings = args.ignore_warnings\n if(args.silent):\n verbose = False\n images = False\n ignore_warnings = True\n\n if(args.images):\n plt.ioff()\n\n if(args.ignore_warnings):\n warnings.simplefilter('ignore', UserWarning)\n\n #sample header keywords\n # OBJECT = 'P016+03_P1_JKdeep' / Original target\n # RA = ' 01:06:37.759' / 01:06:37.7 RA (J2000) pointing\n # DEC = ' 03:32:36.096' / 03:32:36.0 DEC (J2000) pointing\n # EQUINOX = 2000. / Standard FK5 (years)\n # RADECSYS= 'FK5 ' / Coordinate reference frame\n # CRVAL1 = 16.65733 / 01:06:37.7, RA at ref pixel\n # CRVAL2 = 3.54336 / 03:32:36.0, DEC at ref pixel\n # CRPIX1 = 447. /Ref pixel in X\n # CRPIX2 = 452. / Ref pixel in Y\n # CDELT1 = -8.0000000000000E-5 / SS arcsec per pixel in RA\n # CDELT2 = 8.00000000000003E-5 / SS arcsec per pixel in DEC\n # CTYPE1 = 'RA---TAN' / pixel coordinate system\n # CTYPE2 = 'DEC--TAN' / pixel coordinate system\n # PC1_1 = 0.000000 / Translation matrix element\n # PC1_2 = 1.000000 / Translation matrix element\n # PC2_1 = -1.000000 / Translation matrix element\n # PC2_2 = 0.000000 / Translation matrix element\n\n fits_image_filenames = args.input\n\n #if directory given search for appropriate fits files\n\n if(os.path.isdir(fits_image_filenames[0])):\n print(\"detected a directory. Will search for fits files in it\")\n path = fits_image_filenames[0]\n fits_image_filenames = []\n for file in os.listdir(path):\n if file.endswith(\".fits\") and \"_astro\" not in file:\n fits_image_filenames.append(path+\"/\"+file)\n print(fits_image_filenames)\n\n multiple = False\n if(len(fits_image_filenames)>1):\n multiple = True\n not_converged = []\n converged_counter = 0\n for fits_image_filename in fits_image_filenames:\n\n result,_ = astrometry_script(fits_image_filename, catalog=args.catalog, rotation_scaling=0, xy_transformation=args.xy_transformation, fine_transformation=args.fine_transformation,\n images=images, vignette=args.vignette,vignette_rectangular=args.vignette_rectangular, cutouts=args.cutout, ra=args.ra, dec=args.dec, projection_ra=args.projection_ra, projection_dec=args.projection_dec, verbose=verbose, save_images=args.save_images, ignore_header_rot=args.ignore_header_rot, radius = args.radius, save_bad_result=args.save_bad_result, silent =args.silent, sigma_threshold_for_source_detection= args.sigma_threshold_for_source_detection, high_res=args.high_resolution, hdul_idx=args.hdul_idx, filename_for_sources=args.filename_for_sources, FWHM=args.seeing)\n\n if((not result) and args.rotation_scaling):\n print(\"Did not converge. Will try again with full rotation and scaling\")\n result, _ = astrometry_script(fits_image_filename, catalog=args.catalog, rotation_scaling=args.rotation_scaling, xy_transformation=args.xy_transformation, fine_transformation=args.fine_transformation,\n images=images, vignette=args.vignette,vignette_rectangular=args.vignette_rectangular, cutouts=args.cutout, ra=args.ra, dec=args.dec, projection_ra=args.projection_ra, projection_dec=args.projection_dec, verbose=verbose, save_images=args.save_images, ignore_header_rot=args.ignore_header_rot, radius = args.radius, save_bad_result=args.save_bad_result, silent=args.silent, sigma_threshold_for_source_detection=args.sigma_threshold_for_source_detection, high_res=args.high_resolution, hdul_idx=args.hdul_idx, filename_for_sources=args.filename_for_sources, FWHM=args.seeing)\n\n if(result):\n print(\"Astrometry was determined to be good.\")\n converged_counter = converged_counter+1\n else:\n print(\"Astrometry was determined to be bad.\")\n not_converged.append(fits_image_filename)\n if(args.save_bad_result):\n print(\"Result was saved anyway\")\n else:\n print(\"Result was not saved.\")\n # print(\"\")\n # print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n # print(\"> Astrometry for {} \".format(fits_image_filename))\n #\n # with fits.open(fits_image_filename) as hdul:\n # #print(hdul.info())\n # if(args.verbose):\n # print(\"if image is not at first position in the fits file the program will break later on\")\n # #print(hdul[0].header)\n #\n # hdu = hdul[0]\n # #hdu.verify('fix')\n # hdr = hdu.header\n #\n #\n # image_or = hdul[0].data.astype(float)\n # median = np.nanmedian(image_or)\n # image_or[np.isnan(image_or)]=median\n # image = image_or - median\n #\n # observation = find_sources(image, args.vignette)\n # #print(observation)\n #\n # positions = (observation['xcenter'], observation['ycenter'])\n # apertures = CircularAperture(positions, r=4.)\n #\n #\n # #world coordinates\n # print(\">Info found in the file -- (CRVAl: position of central pixel (CRPIX) on the sky)\")\n # print(WCS(hdr))\n #\n # hdr[\"NAXIS1\"] = image.shape[0]\n # hdr[\"NAXIS2\"] = image.shape[1]\n #\n # #wcsprm = Wcsprm(hdr.tostring().encode('utf-8')) #everything else gave me errors with python 3, seemed to make problems with pc conversios, so i wwitched to the form below\n # wcsprm = WCS(hdr).wcs\n # wcsprm_original = WCS(hdr).wcs\n # if(args.verbose):\n # print(WCS(wcsprm.to_header()))\n # wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR = read_additional_info_from_header(wcsprm, hdr, args.ra, args.dec, args.projection_ra, args.projection_dec)\n # if(args.verbose):\n # print(WCS(wcsprm.to_header()))\n #\n # #print(wcsprm)\n # #wcsprm.pc = [[2, 0],[0,1]]\n #\n #\n # #Possibly usefull examples of how to use wcsprm:\n # #print(wcsprm.set())\n # #print(wcsprm.get_pc())\n # #pc = wcsprm.get_pc()\n # #print(np.linalg.det(pc))\n # #print(wcsprm.get_cdelt())\n # #wcs.fix()\n # #print(wcsprm.print_contents())\n # #print(repr(hdr.update(wcsprm.to_header().encode('utf-8')))) #not working\n #\n # #hdu.verify(\"fix\")\n # #print(repr(hdr))\n # #wcs.wcs_pix2world(pixcrd, 1)\n # #wcs.wcs_world2pix(world, 1)\n # #wcs.wcs.crpix = [-234.75, 8.3393]\n # # wcs.wcs.cdelt = np.array([-0.066667, 0.066667])\n # # wcs.wcs.crval = [0, -90]\n # # wcs.wcs.ctype = [\"RA---AIR\", \"DEC--AIR\"]\n # # wcs.wcs.set_pv([(2, 1, 45.0)])\n # # For historical compatibility, three alternate specifications of the linear transformations\n # # are available in wcslib. The canonical PCi_ja with CDELTia, CDi_ja, and the deprecated CROTAia\n # # keywords. Although the latter may not formally co-exist with PCi_ja,\n # # the approach here is simply to ignore them if given in conjunction with PCi_ja.\n # # has_pc, has_cd and has_crota can be used to determine which of these alternatives are present in the header.\n # # These alternate specifications of the linear transformation matrix are translated immediately to PCi_ja by set\n # # and are nowhere visible to the lower-level routines. In particular, set resets cdelt to unity if CDi_ja is present\n # # (and no PCi_ja). If no CROTAia is associated with the latitude axis, set reverts to a unity PCi_ja matrix.\n #\n #\n #\n #\n #\n # #get rough coordinates\n # #print(hdr[\"RA\"])\n # #coord = SkyCoord(hdr[\"RA\"], hdr[\"DEC\"], unit=(u.hourangle, u.deg), frame=\"icrs\")\n # coord = SkyCoord(wcsprm.crval[0], wcsprm.crval[1], unit=(u.deg, u.deg), frame=\"icrs\")\n # if(not PIXSCALE_UNCLEAR):\n # if(wcsprm.crpix[0] < 0 or wcsprm.crpix[1] < 0 or wcsprm.crpix[0] > image.shape[0] or wcsprm.crpix[1] > image.shape[1] ):\n # print(\"central value outside of the image, moving it to the center\")\n # coord_radec = wcsprm.p2s([[image.shape[0]/2, image.shape[1]/2]], 0)[\"world\"][0]\n # coord = SkyCoord(coord_radec[0], coord_radec[1], unit=(u.deg, u.deg), frame=\"icrs\")\n # #print(wcsprm)\n #\n #\n #\n # #better: put in nice wrapper! with repeated tries and maybe try synchron!\n # print(\">Dowloading catalog data\")\n # radius = u.Quantity(fov_radius, u.arcmin)#will prob need more\n # catalog_data = query.get_data(coord, radius, args.catalog)\n # #reference = reference.query(\"mag <20\")\n # max_sources = 500\n # if(INCREASE_FOV_FLAG):\n # max_sources= max_sources*2.25 #1.5 times the radius, so 2.25 the area\n # if(catalog_data.shape[0]>max_sources):\n # catalog_data = catalog_data.nsmallest(400, \"mag\")\n #\n # if(args.catalog == \"GAIA\" and catalog_data.shape[0] < 5):\n # print(\"GAIA seems to not have enough objects, will enhance with PS1\")\n # catalog_data2 = query.get_data(coord, radius, \"PS\")\n # catalog_data = pd.concat([catalog_data, catalog_data2])\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n # elif(args.catalog == \"PS\" and (catalog_data is None or catalog_data.shape[0] < 5)):\n # print(\"We seem to be outside the PS footprint, enhance with GAIA data\")\n # catalog_data2 = query.get_data(coord, radius, \"GAIA\")\n # catalog_data = pd.concat([catalog_data, catalog_data2])\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n #\n # #remove duplicates in catalog?\n #\n # apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #\n #\n # #plotting what we have, I keep it in the detector field, world coordinates are more painfull to plot\n # if(args.images):\n # fig = plt.figure()\n # fig.canvas.set_window_title('Input for {}'.format(fits_image_filename))\n # plt.xlabel(\"pixel x direction\")\n # plt.ylabel(\"pixel y direction\")\n # plt.title(\"Input - red: catalog sources, blue: detected sources in img\")\n # plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n # apertures.plot(color='blue', lw=1.5, alpha=0.5)\n # apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n #\n # plt.xlim(-200,image.shape[0]+200)\n # plt.ylim(-200,image.shape[1]+200)\n # if(args.save_images):\n # name_parts = fits_image_filename.rsplit('.', 1)\n # plt.savefig(name_parts[0]+\"_image_before.pdf\")\n #\n # ###tranforming to match the sources\n # print(\"---------------------------------\")\n # print(\">Finding the transformation\")\n # if(args.rotation_scaling):\n # print(\"Finding scaling and rotation\")\n # wcsprm = register.get_scaling_and_rotation(observation, catalog_data, wcsprm, scale_guessed=PIXSCALE_UNCLEAR, verbose=args.verbose)\n # if(args.xy_transformation):\n # print(\"Finding offset\")\n # wcsprm,_,_ = register.offset_with_orientation(observation, catalog_data, wcsprm, fast=False , INCREASE_FOV_FLAG=INCREASE_FOV_FLAG, verbose= args.verbose)\n #\n # #correct subpixel error\n # obs_x, obs_y, cat_x, cat_y, distances = register.find_matches(observation, catalog_data, wcsprm, threshold=3)\n # rms = np.sqrt(np.mean(np.square(distances)))\n # best_score = len(obs_x)/(rms+10) #start with current best score\n # fine_transformation = False\n # if(args.fine_transformation):\n # for i in [2,3,5,8,10,6,4, 20,2,1,0.5]:\n # wcsprm_new, score = register.fine_transformation(observation, catalog_data, wcsprm, threshold=i)\n # if(score> best_score):\n # wcsprm = wcsprm_new\n # best_score = score\n # fine_transformation = True\n # if not fine_transformation:\n # print(\"Fine transformation did not improve result so will be discarded.\")\n # else:\n # print(\"Fine transformation applied to improve result\")\n # #register.calculate_rms(observation, catalog_data,wcs)\n #\n # #make wcsprim more physical by moving scaling to cdelt, out of the pc matrix\n # wcs =WCS(wcsprm.to_header())\n # if(args.verbose):\n # print(wcs)\n #\n # from astropy.wcs import utils\n # scales = utils.proj_plane_pixel_scales(wcs)\n # print(scales)\n # cdelt = wcsprm.get_cdelt()\n # print(cdelt)\n # scale_ratio = scales/cdelt\n # #print(scale_ratio)\n # pc = np.array(wcsprm.get_pc())\n # pc[0,0] = pc[0,0]/scale_ratio[0]\n # pc[1,0] = pc[1,0]/scale_ratio[1]\n # pc[0,1] = pc[0,1]/scale_ratio[0]\n # pc[1,1] = pc[1,1]/scale_ratio[1]\n # wcsprm.pc = pc\n # wcsprm.cdelt = scales\n # if(args.verbose):\n # print(\"moved scaling info to CDelt\")\n # print(WCS(wcsprm.to_header()))\n #\n # #WCS difference before and after\n # print(\"> Compared to the input the Wcs was changed by: \")\n # scales_original = utils.proj_plane_pixel_scales(WCS(hdr))\n # print(\"WCS got scaled by {} in x direction and {} in y direction\".format(scales[0]/scales_original[0], scales[1]/scales_original[1]))\n # #sources:\n # #https://math.stackexchange.com/questions/2113634/comparing-two-rotation-matrices\n # #https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249#13849249\n # def unit_vector(vector):\n # \"\"\" Returns the unit vector of the vector. \"\"\"\n # return vector / max(np.linalg.norm(vector), 1e-10)\n # def matrix_angle( B, A ):\n # \"\"\" comment cos between vectors or matrices \"\"\"\n # Aflat = A.reshape(-1)\n # Aflat = unit_vector(Aflat)\n # Bflat = B.reshape(-1)\n # Bflat = unit_vector(Bflat)\n # #return np.arccos((np.dot( Aflat, Bflat ) / max( np.linalg.norm(Aflat) * np.linalg.norm(Bflat), 1e-10 )))\n # return np.arccos(np.clip(np.dot(Aflat, Bflat), -1.0, 1.0))\n # #print(matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360)\n # rotation_angle = matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360\n # if((wcsprm.get_pc() @ wcsprm_original.get_pc() )[0,1] > 0):\n # text = \"counterclockwise\"\n # else:\n # text = \"clockwise\"\n # print(\"Rotation of WCS by an angle of {} deg \".format(rotation_angle)+text)\n # old_central_pixel = wcsprm_original.s2p([wcsprm.crval], 0)[\"pixcrd\"][0]\n # print(\"x offset: {} px, y offset: {} px \".format(wcsprm.crpix[0]- old_central_pixel[0], wcsprm.crpix[1]- old_central_pixel[1]))\n #\n #\n # #check final figure\n # if(args.images):\n # fig = plt.figure()\n # fig.canvas.set_window_title('Result for {}'.format(fits_image_filename))\n # plt.xlabel(\"pixel x direction\")\n # plt.ylabel(\"pixel y direction\")\n # plt.title(\"Result - red: catalog sources, blue: detected sources in img\")\n # plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n # apertures.plot(color='blue', lw=1.5, alpha=0.5)\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #\n # apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n # if(args.save_images):\n # name_parts = fits_image_filename.rsplit('.', 1)\n # plt.savefig(name_parts[0]+\"_image_after.pdf\")\n #\n # print(\"--- Evaluate how good the transformation is ----\")\n # register.calculate_rms(observation, catalog_data,wcsprm)\n #\n #\n # #updating file\n # write_wcs_to_hdr(fits_image_filename, wcsprm)\n #\n #\n # print(\"overall time taken\")\n # print(datetime.now()-StartTime)\n # if(args.images):\n # plt.show()\n if(multiple):\n print(\">> Final report:\")\n print(\"Processed {} files, {} of them did converge. The following files failed:\".format(len(fits_image_filenames), converged_counter))\n print(not_converged)\n print(\"-- finished --\")", "def analyze(ctx, filename, trigger, threshold, eyecandy, ignore_extra=False,\n fix_missing=False, window_height=None, window_width=None, output=None, notebook=None,\n calibration=None, distance=None, verbose=False, debug=False,processes=None,\n by_channel=False, integrity_filter=0.0): \n #### FILEPATHS\n if not os.path.isfile(filename):\n filename = match_filename(filename)\n data_directory, data_name = os.path.split(filename)\n name, extension = os.path.splitext(data_name)\n analog_file = os.path.join(data_directory, name +'.analog')\n stimulus_file = os.path.join(data_directory, name + \".stimulus\")\n ctx.obj = {\"filename\": os.path.join(data_directory,name)}\n\n if not notebook:\n notebook = find_notebook(data_directory)\n\n #### LOGGING CONFIGURATION\n fh = logging.FileHandler(os.path.join(data_directory,name + '.log'))\n fh.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n if verbose:\n ch.setLevel(logging.INFO)\n # tracemalloc.start()\n elif debug:\n ch.setLevel(logging.DEBUG)\n\n else:\n ch.setLevel(logging.WARNING)\n if processes!=None:\n config.processes = processes\n formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s', '%H:%M:%S')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n logger.addHandler(fh)\n logger.addHandler(ch)\n logger.info(\"Verbose logging on\")\n\n lab_notebook = glia.open_lab_notebook(notebook)\n experiment_protocol = glia.get_experiment_protocol(lab_notebook, name)\n flicker_version = experiment_protocol[\"flickerVersion\"]\n\n\n #### LOAD STIMULUS\n try:\n ctx.obj[\"stimulus_list\"] = glia.load_stimulus(stimulus_file)\n except OSError:\n print(\"No .stimulus file found. Attempting to create from .analog file.\".format(trigger))\n if flicker_version==0.3:\n ctx.obj[\"stimulus_list\"] = glia.create_stimulus_list(\n analog_file, stimulus_file, notebook, name, eyecandy, ignore_extra,\n calibration, distance, threshold)\n print('finished creating stimulus list')\n elif trigger == \"ttl\":\n raise ValueError('not implemented')\n else:\n raise ValueError(\"invalid trigger: {}\".format(trigger))\n\n #### LOAD SPIKES\n spyking_regex = re.compile('.*\\.result.hdf5$')\n eye = experiment_protocol['eye']\n experiment_n = experiment_protocol['experimentNumber']\n\n date = experiment_protocol['date'].date().strftime(\"%y%m%d\")\n\n retina_id = date+'_R'+eye+'_E'+experiment_n\n if extension == \".txt\":\n ctx.obj[\"units\"] = glia.read_plexon_txt_file(filename,retina_id, channel_map)\n elif re.match(spyking_regex, filename):\n ctx.obj[\"units\"] = glia.read_spyking_results(filename)\n else:\n raise ValueError('could not read {}. Is it a plexon or spyking circus file?')\n\n #### DATA MUNGING OPTIONS\n if integrity_filter>0.0:\n good_units = solid.filter_units_by_accuracy(\n ctx.obj[\"units\"], ctx.obj['stimulus_list'], integrity_filter)\n filter_good_units = glia.f_filter(lambda u,v: u in good_units)\n ctx.obj[\"units\"] = filter_good_units(ctx.obj[\"units\"])\n\n if by_channel:\n ctx.obj[\"units\"] = glia.combine_units_by_channel(ctx.obj[\"units\"])\n\n\n # prepare_output\n plot_directory = os.path.join(data_directory, name+\"-plots\")\n config.plot_directory = plot_directory\n\n os.makedirs(plot_directory, exist_ok=True)\n os.chmod(plot_directory, 0o777)\n\n if output == \"pdf\":\n logger.debug(\"Outputting pdf\")\n ctx.obj[\"retina_pdf\"] = PdfPages(glia.plot_pdf_path(plot_directory, \"retina\"))\n ctx.obj[\"unit_pdfs\"] = glia.open_pdfs(plot_directory, list(ctx.obj[\"units\"].keys()), Unit.name_lookup())\n # c connotes 'continuation'\n ctx.obj[\"c_unit_fig\"] = partial(glia.add_to_unit_pdfs,\n unit_pdfs=ctx.obj[\"unit_pdfs\"])\n ctx.obj[\"c_retina_fig\"] = lambda x: ctx.obj[\"retina_pdf\"].savefig(x)\n \n elif output == \"png\":\n logger.debug(\"Outputting png\")\n ctx.obj[\"c_unit_fig\"] = glia.save_unit_fig\n ctx.obj[\"c_retina_fig\"] = glia.save_retina_fig\n os.makedirs(os.path.join(plot_directory,\"00-all\"), exist_ok=True)\n\n for unit_id in ctx.obj[\"units\"].keys():\n name = unit_id\n os.makedirs(os.path.join(plot_directory,name), exist_ok=True)", "def system_5(in_dir, out_dir, threshold, num_frames=150, num_prev_frames=10, blur=(3,3), as_numeric=True, stretched=True):\n filenames = _prepare_filenames(in_dir, num_frames=150)\n initial_background_model = np.array([cv2.imread(f) for f in filenames[0:num_prev_frames]])\n seed_img = mode(initial_background_model)\n previous_frames = deque(initial_background_model, maxlen=num_prev_frames)\n\n for i, f in tqdm(enumerate(filenames[num_prev_frames:])):\n img = lm(cv2.imread(f))", "def check_cti(image, CTI, verbose=0):\n\n#\n# Initialize ctiDict\n#\n ctiDict = {'isCTI': False}\n ctiDict['expnum'] = image['EXPNUM']\n\n # Also create the BAND and NITE keywords if they are not present\n try:\n image['BAND']\n except:\n image['BAND'] = decaminfo.get_band(image['FILTER'])\n try:\n image['NITE']\n except:\n image['NITE'] = decaminfo.get_nite(image['DATE-OBS'])\n\n band = image['BAND'].strip()\n sec = section2slice(image['DATASEC' + CTI['amp']])\n#\n# This could become useful if it is necessary to start examining the opposite amplifier in\n# conjunction with the amplifier that is having a problem\n#\n# if (CTI['amp']==\"A\"):\n# osec = section2slice(image['DATASEC'+'B'])\n# else:\n# osec = section2slice(image['DATASEC'+'A'])\n\n maxiter = 10\n converge_num = 0.0001\n clipsig = 3.0\n\n clip_avg, clip_med, clip_std = lb.medclip(image.data[sec], clipsig, maxiter, converge_num, verbose=0)\n logger.info(' CTI: Global(clipped): median = {:.3f}, stddev = {:.3f} '.format(clip_med, clip_std))\n ctiDict['cmed'] = float(clip_med)\n ctiDict['cstd'] = float(clip_std)\n clow = clip_med - (3.0 * clip_std)\n ctiDict['clow'] = float(clow)\n\n# oclip_avg,oclip_med,oclip_std=medclip(image.data[osec],clipsig,maxiter,converge_num,verbose)\n# print(\" Global(oclipped): median = {:.3f}, stddev = {:.3f} \".format(oclip_med,oclip_std))\n# oclow=oclip_med-(3.0*oclip_std)\n\n#\n# Obtain row-by-row median to look for horizontal striping (also needed to check/reject edgebleeds)\n#\n row_med = np.median(image.data[sec], axis=1)\n wsm = np.where(row_med < clow)\n nrow_low = row_med[wsm].size\n#\n# Hacky attempt to check for edge-bleed\n#\n iedge = [4, 4091]\n while row_med[iedge[0]] < clow:\n iedge[0] = iedge[0] + 1\n while row_med[iedge[1]] < clow:\n iedge[1] = iedge[1] - 1\n if iedge[0] == 4:\n iedge[0] = 0\n if iedge[1] == 4091:\n iedge[1] = 4095\n nrow_edge = 4096 - (iedge[1] - iedge[0] + 1)\n logger.info(' CTI: Number of low rows: {:d} (nrow_edge={:d}) '.format(nrow_low, nrow_edge))\n\n#\n# Blank out pixels that are below the 3-sigma level with respect to median\n# This removes power from vertical stripes\n#\n wsm = np.where(image.data[sec] < clow)\n npix_low = image.data[sec][wsm].size\n logger.info(' CTI: Number of low pixels: {:d} '.format(npix_low))\n u = image.data[sec] - clip_med\n u[wsm] = 0.0\n#\n# Harder cut currently not needed. If used this would get rid of all pixels below the median level\n# (effectively this reduces the amount that noise suppresses contrast of the auto-correlation signal from CTI)\n#\n# wsm=np.where(u<0.)\n# npix_zero=u[wsm].size\n# logger.info(' CTI: Number of sub-zero pixels: {:d} '.format(npix_zero))\n# u[wsm]=0.0\n\n#\n# Calculate a set of auto-correlations by sampling lags in the x-direction and\n# then two diaganol sets at PA=+/-45 degrees\n# Note: y-direction lags would be succeptible to both bad columns and bleeds.\n# These are normalized by the auto-correlation with lag 0 (defined as 'a' below).\n# Take a maximum lag that will be calculated and use that to trim the image.\n# Note: This both gets rid of most edge-effects automatically but also removes the need to calculate an effective normalization for higher lags\n#\n maxlag = 100\n lagList = [0, 1, 3, 5, 7, 11, 15, 19, 23, 31, 37, 45]\n\n a = np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag:-maxlag, maxlag:-maxlag])\n# b=np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag:-maxlag,maxlag:-maxlag])\n x = [1.0]\n d1 = [1.0]\n d2 = [1.0]\n# vx=[1.0]\n# vd1=[1.0]\n# vd2=[1.0]\n#\n# More lags than those sampled are needed because the diagonal (PA=+/-45) measures will need to be interpolated\n# for comaparison to lags in the x-direction.\n#\n\n for lag in lagList:\n if lag != 0:\n x.append(np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag:-maxlag, maxlag - lag:-maxlag - lag]) / a)\n d1.append(np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag-lag:-maxlag - lag, maxlag - lag:-maxlag - lag]) / a)\n d2.append(np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag-lag:-maxlag - lag, maxlag + lag:-maxlag + lag]) / a)\n# vx.append(np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag:-maxlag,maxlag-lag:-maxlag-lag])/b)\n# vd1.append(np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag-lag:-maxlag-lag,maxlag-lag:-maxlag-lag])/b)\n# vd2.append(np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag-lag:-maxlag-lag,maxlag+lag:-maxlag+lag])/b)\n\n data = {'lag': np.array(lagList),\n 'x': np.array(x),\n 'd1': np.array(d1),\n 'd2': np.array(d2)\n# 'vx':np.array(vx),\n# 'vd1':np.array(vd1),\n# 'vd2':np.array(vd2)\n }\n\n r2 = np.sqrt(2.0)\n l1 = data['lag']\n l2 = data['lag'] * r2\n x1 = data['x']\n d1i = np.interp(data['lag'], l2, data['d1'])\n d2i = np.interp(data['lag'], l2, data['d2'])\n rd1 = data['x'] / d1i\n rd2 = data['x'] / d2i\n\n# vx1=data['vx']\n# vd1i=np.interp(data['lag'],l2,data['vd1'])\n# vd2i=np.interp(data['lag'],l2,data['vd2'])\n# vrd1=data['vx']/vd1i\n# vrd2=data['vx']/vd2i\n## vdx=data['x']/data['vx']\n# vdx=(rd1+rd2)/(vrd1+vrd2)\n\n logger.info(' CTI: lags {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(l1[3], l1[4], l1[6], l1[8], l1[10]))\n logger.info(' CTI: lx {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(x1[3], x1[4], x1[6], x1[8], x1[10]))\n logger.info(' CTI: d1i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(d1i[3], d1i[4], d1i[6], d1i[8], d1i[10]))\n logger.info(' CTI: d2i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(d2i[3], d2i[4], d2i[6], d2i[8], d2i[10]))\n logger.info(' CTI: ld1 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(rd1[3], rd1[4], rd1[6], rd1[8], rd1[10]))\n logger.info(' CTI: ld2 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(rd2[3], rd2[4], rd2[6], rd2[8], rd2[10]))\n# logger.info(' CTI: lvx {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vx1[3],vx1[4],vx1[6],vx1[8],vx1[10]))\n# logger.info(' CTI:vd1i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vd1i[3],vd1i[4],vd1i[6],vd1i[8],vd1i[10]))\n# logger.info(' CTI:vd2i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vd2i[3],vd2i[4],vd2i[6],vd2i[8],vd2i[10]))\n# logger.info(' CTI:vld1 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vrd1[3],vrd1[4],vrd1[6],vrd1[8],vrd1[10]))\n# logger.info(' CTI:vld2 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vrd2[3],vrd2[4],vrd2[6],vrd2[8],vrd2[10]))\n# logger.info(' CTI:vdx0 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vdx[3],vdx[4],vdx[6],vdx[8],vdx[10]))\n\n#\n# Set band dependent thresholds...\n# Note the criteria used are based on an empirical study of the one example we currently have (CCD=41, Y6)\n#\n nrow_lim = 5\n if band != \"Y\":\n cclim = 0.9\n else:\n cclim = 1.15\n#\n# Now check and set flag based on empirical critera.\n# First are the horizontal streaks that can appear...\n# Second are the comparison of the auto-correlation in the x and average of the diaganol directrions\n#\n flag_cti = False\n if nrow_low - nrow_edge >= nrow_lim:\n flag_cti = True\n\n avg_rd = (rd1 + rd2) / 2.0\n if avg_rd[3] > cclim and avg_rd[4] > cclim and avg_rd[6] > cclim:\n flag_cti = True\n\n if flag_cti:\n ctiDict['isCTI'] = True\n\n return ctiDict", "def integrateObservation(self, img,action):\n self.States.append(img)\n self.Actions.append(action) \n #self.printLevelScene()", "def motionCorrection(baseName, outDir, rawDataFile, granularity ='plane',\n maxDisplacement=[40, 40],\n nCpu=(multiprocessing.cpu_count() - 1), \n exportFrames=True):\n try:\n # instead of os.makedirs use os.mkdir\n # the former creates also intermediate dirs\n os.mkdir(outDir + '/' + baseName)\n except OSError:\n print('Either directory exists or \\\n the intermedite directories do not exist!')\n raise\n print(\"--Motion correction started with %s...\" % baseName)\n # Checking if there are two cycles of images. Important to know otherwise \n # you get merged motion corrected images.\n allImages = (glob.glob(rawDataFile))\n tseries_names = [os.path.basename(imageName).split('_')[0] for imageName in allImages]\n uniqueTSeries = Counter(tseries_names).keys()\n tseries_lengths = Counter(tseries_names).values()\n \n if len(uniqueTSeries) > 1:\n print('---More than 1 T-Series found. Aligning them together...')\n\n match = [int(re.search(r'Cycle(.*?)_',imageName).group(1)) for imageName in allImages]\n uniqueCycles = numpy.unique(numpy.asarray(match))\n \n if len(uniqueCycles) > 1:\n warnstr='More than 1 image cycle detected. Aborting alignment.'\n warnings.warn(warnstr)\n return \n sequence = [sima.Sequence.create('TIFFs', [[rawDataFile]])]\n print(\"Creating sima dataset of non-aligned images\")\n nonAlignedDatasetDir = outDir + '/' + baseName + '/' + 'TIFFs.sima'\n sima.ImagingDataset(sequence, nonAlignedDatasetDir)\n\n print(\"Running motion correction.\")\n \n mc_approach = sima.motion.HiddenMarkov2D(granularity=granularity,\n max_displacement=maxDisplacement,\n verbose=True,\n n_processes=nCpu)\n print(\"Creating sima dataset of aligned images\")\n motCorrDir = outDir + '/' + baseName + '/' + 'motCorr.sima'\n dataset = mc_approach.correct(sequence, motCorrDir)\n\n if exportFrames:\n print(\"Exporting motion-corrected movies.\")\n \n for iTSeries, curr_T_series in enumerate(uniqueTSeries):\n \n start_frame = tseries_names.index(curr_T_series)\n end_frame = start_frame+tseries_lengths[iTSeries]\n dataset[0,start_frame:end_frame].export_frames([[[os.path.join(motCorrDir,\n '{t}_motCorr.tif'.format(t = curr_T_series))]]],\n fill_gaps=True)\n \n\n print(\"--Motion correction done with %s...\" % baseName)\n\n return uniqueTSeries", "def trajectory_error_correcter_improved(trajectories):\r\n\r\n n_birds, n_paramaters, n_time_steps = np.shape(trajectories)\r\n\r\n conditional_squared_distance = 3 * min(squared_distance_calculator(\r\n trajectories[0, :, 1], trajectories[0, :, 2]), squared_distance_calculator(\r\n trajectories[0, :, 2], trajectories[0, :, 3]), squared_distance_calculator(\r\n trajectories[0, :, 3], trajectories[0, :, 4]))\r\n\r\n difference_array = trajectories[:, :, 1:] - trajectories[:, :, :-1]\r\n squared_distance_array = np.sum(difference_array ** 2, axis=1) # creates array with shape (n_birds, n_time_steps-1)\r\n splits_array = squared_distance_array > conditional_squared_distance # Creates boolean array with True at location of splits\r\n splits_indices = np.array(np.nonzero(splits_array)) # Returns array with shape (n_axes, n_splits)\r\n\r\n counter = 0\r\n limit = 510000\r\n while len(splits_indices[0, :]) != 0 and counter < limit:\r\n counter += 1\r\n indices_of_birds_with_same_split = list(np.nonzero(splits_indices[1, :] == splits_indices[1, 0]))[0]\r\n position_of_first_bird = trajectories[splits_indices[0, 0], :, splits_indices[1, 0]]\r\n for count, i in enumerate(indices_of_birds_with_same_split):\r\n position_of_second_bird = trajectories[splits_indices[0, i], :, splits_indices[1, i] + 1]\r\n if squared_distance_calculator(position_of_first_bird,\r\n position_of_second_bird) < conditional_squared_distance:\r\n trajectories[splits_indices[0, 0], :, :], trajectories[splits_indices[0, i], :,\r\n :] = trajectory_switcher(\r\n trajectories[splits_indices[0, 0], :, :],\r\n trajectories[splits_indices[0, i], :, :], splits_indices[1, i] + 1)\r\n splits_array[splits_indices[0, 0], :], splits_array[splits_indices[0, i], :] = splits_array_switcher(\r\n splits_array[splits_indices[0, 0], :],\r\n splits_array[splits_indices[0, i], :], splits_indices[1, i])\r\n\r\n splits_array[splits_indices[0, 0], splits_indices[\r\n 1, 0]] = False # CHANGE SPLITS_ARRAY AT LOCATION OF SPLIT MANUALLY.\r\n # splits_array[splits_indices[0, i], splits_indices[1, i]] = False\r\n splits_indices = np.array(np.nonzero(splits_array))\r\n break\r\n if counter%4000 == 0:\r\n print(f\"{counter} - Corrections left: {len(splits_indices[0, :])}\")\r\n if counter == limit:\r\n print(\"The trajectory correction failed\")\r\n return trajectories, False\r\n # print(f\"The number of corrections left is {len(splits_indices[0, :])}\")\r\n # trajectory_plotter(trajectories)\r\n return trajectories, True", "def trap_depth(V,X,Y,Z,Im,Jm,Km): \n from project_parameters import debug,scale,position\n #from all_functions import sum_of_e_field,spher_harm_bas,spher_harm_exp,spher_harm_cmp,find_saddle\n def a(a,N):\n \"\"\"Shortcut function to convert array x into a row vector.\"\"\" \n a=np.ravel(a, order='F') # Same order\n return a\n N1,N2,N3=V.shape\n N=N1*N2*N3\n [Ex,Ey,Ez]=np.gradient(V,abs(X[1]-X[0])/scale,abs(Y[1]-Y[0])/scale,abs(Z[1]-Z[0])/scale)\n E=np.sqrt(Ex**2+Ey**2+Ez**2)\n # identify the escape position and height by checking each point\n minElectricField=np.max(E) # initialize as maximum E field magnitude\n distance=0\n escapeHeight=1\n escapePosition=[0,0,0]\n [Im,Jm,Km] = find_saddle(V,X,Y,Z,3)\n Vm = V[Im,Jm,Km]\n for i in range(N1):\n for j in range(N2):\n for k in range(N3):\n if E[i,j,k]<minElectricField:\n distance=abs(np.sqrt((Im-i)**2+(Jm-j)**2+(Km-k)**2)) \n if distance > 6:\n minElectricField=E[i,j,k]\n escapeHeight=V[i,j,k]\n escapePosition=[i,j,k]\n if debug.trap_depth: \n print E[i,j,k],V[i,j,k],[i,j,k],distance\n check=1 \n if debug.trap_depth: \n print minElectricField,escapeHeight,escapePosition,distance \n if distance<check:\n print('trap_depth.py: Escape point too close to trap minimum. Improve grid resolution or extend grid.')\n if escapeHeight>0.2:\n print('trap_depth.py: Escape point parameter too high. Improve grid resolution or extend grid.')\n D=escapeHeight-Vm\n [Ie,Je,Ke]=escapePosition\n [Xe,Ye,Ze]=[X[Ie],Y[Je],Z[Ke]] \n return [D,Xe,Ye,Ze]", "def pan_corr(file):\n\n # # infile = 'd:\\\\Projekti\\\\Satelit\\\\CO\\\\Razpis\\\\Flat field images_new2020\\\\flatfield\\\\NHDBflat_1D'\n # # infile = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Jure_naloga_banje_raw_pyt\\\\NHDRGoreMorje_3D'\n #\n # # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\Flat field images_new2020\\\\20201028 Vignetting\\\\flatfield\\\\'\n # # in_pan_ref_file = 'NHDPflat_3D_py.tif'\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Peking_PAN\\\\'\n # in_pan_ref_file = 'NHDPfoc_swp6_1D_py.tif'\n # in_ref = in_path + in_pan_ref_file\n #\n # inreffil = gdal.Open(in_ref)\n # image_ref = inreffil.ReadAsArray()\n # # size_ref = image_ref.shape\n # # pix_count = size_ref[0]*size_ref[1]\n #\n # image_ref = image_ref[800:930, 1420:1640]\n # size_ref = image_ref.shape\n # pix_count = size_ref[0] * size_ref[1]\n #\n # g1 = 0.\n # g2 = 0.\n # r1 = 0.\n # b1 = 0.\n #\n # for i in range(size_ref[0]):\n # for j in range(size_ref[1]):\n # if (i % 2) == 0 and (j % 2) == 0: g1 = g1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 1: g2 = g2 + image_ref[i, j]\n # if (i % 2) == 0 and (j % 2) == 1: r1 = r1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 0: b1 = b1 + image_ref[i, j]\n #\n # g1_avg = g1 / pix_count * 4\n # g2_avg = g2 / pix_count * 4\n # r1_avg = r1 / pix_count * 4\n # b1_avg = b1 / pix_count * 4\n #\n # raz_g1 = 1\n # raz_g2 = g1_avg/g2_avg\n # raz_r1 = g1_avg/r1_avg\n # raz_b1 = g1_avg/b1_avg\n #\n # avg = (g1+g2+r1+b1)/pix_count\n #\n # print(g1_avg, g2_avg, r1_avg, b1_avg, avg)\n\n raz_g1 = 1\n raz_g2 = 1.0245196396115988\n raz_r1 = 1.0131841989689434\n raz_b1 = 1.0517113199247086\n\n print('razmerje:', raz_g1, raz_g2, raz_r1, raz_b1)\n\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Peking_PAN\\\\'\n # in_pan_ref_file = 'NHDPfoc_swp6_4D_py.tif'\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\Flat field images_new2020\\\\20201028 Vignetting\\\\flatfield\\\\'\n # in_pan_ref_file = 'NHDPflat_3D_py.tif'\n\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Slo_PAN\\_26_30\\\\'\n # in_pan_ref_file = [filename for filename in os.listdir(in_path) if filename.lower().startswith(\"nhd\") and filename.lower().endswith(\"tif\")]\n\n \n\n \n\n # print('image', i)\n in_ref=file\n inreffil = gdal.Open(in_ref)\n image_ref = inreffil.ReadAsArray()\n size_ref = image_ref.shape\n # pix_count = size_ref[0] * size_ref[1]\n # pix_count = np.count_nonzero(image_ref)\n # pix_count = 3664*650\n\n # g1 = 0.\n # g2 = 0.\n # r1 = 0.\n # b1 = 0.\n #\n # for i in range(size_ref[0]):\n # for j in range(size_ref[1]):\n # if (i % 2) == 0 and (j % 2) == 0: g1 = g1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 1: g2 = g2 + image_ref[i, j]\n # if (i % 2) == 0 and (j % 2) == 1: r1 = r1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 0: b1 = b1 + image_ref[i, j]\n #\n # g1_avg = g1 / pix_count * 4\n # g2_avg = g2 / pix_count * 4\n # r1_avg = r1 / pix_count * 4\n # b1_avg = b1 / pix_count * 4\n #\n # avg = (g1 + g2 + r1 + b1) / pix_count\n #\n # print(g1_avg, g2_avg, r1_avg, b1_avg, avg)\n\n # popravek\n im_p_pop = np.zeros((size_ref[0], size_ref[1]), np.uint16)\n\n\n for i in range(size_ref[0]):\n for j in range(size_ref[1]):\n if (i % 2) == 0 and (j % 2) == 0 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_g1\n if (i % 2) == 1 and (j % 2) == 1 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_g2\n if (i % 2) == 0 and (j % 2) == 1 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_r1\n if (i % 2) == 1 and (j % 2) == 0 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_b1\n \n _,_,_,_,P=return_flatfield_set_path(2)\n P_flat=gdal_array.LoadFile(P)\n \n # im_p_pop=simple_flatfield_corr(P_flat, im_p_pop, 2, 1) \n \n # outout\n \n im_p_pop=BLUE_simple_flatfield_corr(P_flat, im_p_pop)\n \n out=os.path.abspath(file)+\"/corr/\"+os.path.basename(file)[:-4] + \"_pop_flat_corr.tif\"\n\n \n # out = in_ref[:-4] + \"_pop_flat_corr.tif\"\n\n driver = gdal.GetDriverByName('GTiff')\n\n # outRaster = driver.Create(out, size[1], size[0], 3, gdal.GDT_UInt16)\n outRaster = driver.Create(out, size_ref[1], size_ref[0], 1, gdal.GDT_UInt16)\n\n outband = outRaster.GetRasterBand(1)\n outband.WriteArray(im_p_pop)\n outband.FlushCache()", "def light_measure_rn_weit(data, weit_data, pix_size, cx, cy, z0, R_low, R_up):\n\tDa0 = Test_model.angular_diameter_distance(z0).value\n\tR_pix_low = (R_low * 1e-3 * rad2arcsec / Da0) / pix_size\n\tR_pix_up = (R_up * 1e-3 * rad2arcsec / Da0) / pix_size\n\n\tNx = data.shape[1]\n\tNy = data.shape[0]\n\tx0 = np.linspace(0, Nx-1, Nx)\n\ty0 = np.linspace(0, Ny-1, Ny)\n\tpix_id = np.array(np.meshgrid(x0,y0))\n\n\t#..center pixel point\n\tdev_05_x = cx - np.int( cx )\n\tdev_05_y = cy - np.int( cy )\n\n\tif dev_05_x > 0.5:\n\t\txn = np.int( cx ) + 1\n\telse:\n\t\txn = np.int( cx )\n\n\tif dev_05_y > 0.5:\n\t\tyn = np.int( cy ) + 1\n\telse:\n\t\tyn = np.int( cy )\n\n\tdr = np.sqrt(((2*pix_id[0] + 1) / 2 - (2*xn + 1) / 2)**2 + ((2*pix_id[1] + 1) / 2 - (2*yn + 1) / 2)**2)\n\tidu = (dr >= R_pix_low) & (dr <= R_pix_up)\n\n\ttheta = np.arctan2((pix_id[1,:] - yn), (pix_id[0,:] - xn))\n\tchi = theta * 180 / np.pi\n\n\tsamp_chi = chi[idu]\n\tsamp_flux = data[idu]\n\tweit_arr = weit_data[idu]\n\tIntns = np.nansum( samp_flux * weit_arr ) / np.nansum( weit_arr )\n\n\tid_nn = np.isnan(samp_flux)\n\tN_pix = np.sum( id_nn == False )\n\tnsum_ratio = np.nansum(weit_arr) / np.sum( id_nn == False )\n\n\tcdr = R_up - R_low\n\td_phi = ( cdr / (0.5 * (R_low + R_up) ) ) * 180 / np.pi\n\tN_phi = np.int(360 / d_phi) + 1\n\tphi = np.linspace(0, 360, N_phi)\n\tphi = phi - 180.\n\n\ttmpf = []\n\tfor tt in range(len(phi) - 1):\n\t\tidv = (samp_chi >= phi[tt]) & (samp_chi <= phi[tt + 1])\n\n\t\tset_samp = samp_flux[idv]\n\t\tset_weit = weit_arr[idv]\n\n\t\tttf = np.nansum(set_samp * set_weit) / np.nansum( set_weit )\n\t\ttmpf.append(ttf)\n\n\t# rms of flux\n\ttmpf = np.array(tmpf)\n\tid_inf = np.isnan(tmpf)\n\ttmpf[id_inf] = np.nan\n\tid_zero = tmpf == 0\n\ttmpf[id_zero] = np.nan\n\n\tid_nan = np.isnan(tmpf)\n\tid_fals = id_nan == False\n\tTmpf = tmpf[id_fals]\n\n\tRMS = np.std(Tmpf)\n\tif len(Tmpf) > 1:\n\t\tIntns_err = RMS / np.sqrt(len(Tmpf) - 1)\n\telse:\n\t\tIntns_err = RMS\n\n\t#Intns_r = (0.5 * (R_low + R_up) )\n\tcen_r = np.nansum(dr[idu] * weit_arr) / np.nansum( weit_arr ) * pix_size\n\tIntns_r = cen_r * Da0 * 1e3 / rad2arcsec\n\n\tIntns, Intns_err = Intns / pix_size**2, Intns_err / pix_size**2\n\n\treturn Intns, Intns_r, Intns_err, N_pix, nsum_ratio", "def preprocess_RSofia(img):\n for i in range(480):\n for j in range(200):\n img[i,j]=0\n for i in range(150):\n for j in range(500):\n img[i,j]=0 \n for i in range(475):\n for j in range(90):\n img[i,img.shape[1]-1-j]=0\n for i in range(90):\n for j in range(465):\n img[i,img.shape[1]-1-j]=0\n return img", "def continue_with_exposure(self):\r\n # Allocate space to give to scan_until_abort, and name the two\r\n # rows appropriately.\r\n self.data_pair = self.cam.get_new_array(n_images=2)\r\n self.pump_probe_data = self.data_pair[0]\r\n self.probe_only_data = self.data_pair[1]\r\n # Keep track of which image will be updated next\r\n self.next_data_has_pump = True\r\n\r\n # Tell self.thread what to do when the camera has new images\r\n self.cam.new_images.connect(self.send_new_images)\r\n\r\n # Get the current array of wavelengths from cam\r\n self.wavelen_arr = self.cam.get_wavelen_array()\r\n\r\n # Queue a call to cam.scan_until_abort\r\n self.startAcq.emit(self.data_pair)\r\n\r\n # Tell listeners (plotting widgets) to start displaying data too\r\n self.startDisplay.emit()", "def main():\n\n rules = parse_input(get_input())\n for part in [5, 18]:\n image = np.array(START_PATTERN).astype(bool)\n for i in range(part):\n image = enlarge(image, rules)\n count = sum(sum(ch for ch in row) for row in image)\n\n print(\"Number of # in the final matrix after {} iterations is {}.\".format(part, count))\n return", "def image_processing(eye_frame, threshold):\n kernel = np.ones((3, 3), np.uint8)\n new_frame = cv2.bilateralFilter(eye_frame, 10, 15, 15)\n new_frame = cv2.erode(new_frame, kernel, iterations=3)\n new_frame = cv2.threshold(new_frame, threshold, 255, cv2.THRESH_BINARY)[1]\n\n return new_frame", "def process_image_for_heuristic_af(self, tile_key):\n\n # image provided as numpy array from dictionary,\n # already cropped to 512x512\n img = self.img[tile_key]\n mean = int(np.mean(img))\n # recast as int16 before mean subtraction\n img = img.astype(np.int16)\n img -= mean\n # Autocorrelation:\n norm = np.sum(img ** 2)\n autocorr = fftconvolve(img, img[::-1, ::-1])/norm\n height, width = autocorr.shape[0], autocorr.shape[1]\n # Crop to 64-pixel central region:\n autocorr = autocorr[int(height/2 - 32):int(height/2 + 32),\n int(width/2 - 32):int(width/2 + 32)]\n # Calculate coefficients:\n fi = self.muliply_with_mask(autocorr, self.fi_mask)\n fo = self.muliply_with_mask(autocorr, self.fo_mask)\n apx = self.muliply_with_mask(autocorr, self.apx_mask)\n amx = self.muliply_with_mask(autocorr, self.amx_mask)\n apy = self.muliply_with_mask(autocorr, self.apy_mask)\n amy = self.muliply_with_mask(autocorr, self.amy_mask)\n # Check if tile_key already in dictionary:\n if not (tile_key in self.foc_est):\n self.foc_est[tile_key] = []\n if not (tile_key in self.astgx_est):\n self.astgx_est[tile_key] = []\n if not (tile_key in self.astgy_est):\n self.astgy_est[tile_key] = []\n # Calculate single-image estimators for current tile key:\n if len(self.foc_est[tile_key]) > 1:\n self.foc_est[tile_key].pop(0)\n self.foc_est[tile_key].append(\n (fi - fo) / (fi + fo))\n if len(self.astgx_est[tile_key]) > 1:\n self.astgx_est[tile_key].pop(0)\n self.astgx_est[tile_key].append(\n (apx - amx) / (apx + amx))\n if len(self.astgy_est[tile_key]) > 1:\n self.astgy_est[tile_key].pop(0)\n self.astgy_est[tile_key].append(\n (apy - amy) / (apy + amy))", "def imageProcessing(filepath):\n imagedata = []\n for img in glob.glob(filepath):\n edit_image = cv2.imread(img, cv2.IMREAD_GRAYSCALE)\n edit_image = cv2.resize(255-edit_image, (28, 28))\n\n cv2.imwrite(img, edit_image)\n (thresh, edit_image) = cv2.threshold(edit_image, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n while np.sum(edit_image[0]) == 0:\n edit_image = edit_image[1:]\n\n while np.sum(edit_image[:,0]) == 0:\n edit_image = np.delete(edit_image,0,1)\n\n while np.sum(edit_image[-1]) == 0:\n edit_image = edit_image[:-1]\n\n while np.sum(edit_image[:,-1]) == 0:\n edit_image = np.delete(edit_image,-1,1)\n\n rows,cols = edit_image.shape\n if rows > cols:\n factor = 20.0/rows\n rows = 20\n cols = int(round(cols*factor))\n edit_image = cv2.resize(edit_image, (cols,rows))\n else:\n factor = 20.0/cols\n cols = 20\n rows = int(round(rows*factor))\n edit_image = cv2.resize(edit_image, (cols, rows))\n\n colsPadding = (int(math.ceil((28-cols)/2.0)),int(math.floor((28-cols)/2.0)))\n rowsPadding = (int(math.ceil((28-rows)/2.0)),int(math.floor((28-rows)/2.0)))\n edit_image = np.lib.pad(edit_image,(rowsPadding,colsPadding),'constant')\n shiftx,shifty = getBestShift(edit_image)\n shifted = shift(edit_image,shiftx,shifty)\n edit_image = shifted\n edit_image = edit_image.flatten()\n imagedata.append(edit_image)\n return imagedata", "def system_7(seed_img_file, in_dir, out_dir, threshold, num_frames, num_prev_frames, blend_coef, blur=(3,3), as_numeric=True, stretched=True):\n pass", "def autoAnalyze(self):\n print(\"Perfoming full automatic analysis...\")\n t1=time.perf_counter()\n self.cleanUp()\n self.figure_rois()\n self.figure_roi_inspect_all()\n self.figure_dGoR_roi(showEach=False,saveAs=self.folderSave+\"/avg.png\")\n self.figure_dGoR_roi(showEach=True,saveAs=self.folderSave+\"/each.png\")\n self.index()\n print(\"analysis completed in %.02f sec\"%(time.perf_counter()-t1))", "def recompute_exit_pupil(self):\r\n\r\n rearZ = self.rear_z()\r\n if rearZ <= 0.0:\r\n print('Not focus')\r\n rearRadius = self.rear_aperture()\r\n samples = 1024 * 1024\r\n half = 2.0 * rearRadius\r\n proj_bmin, proj_bmax = ti.Vector([-half, -half]), ti.Vector([half, half])\r\n for i in range(pupil_interval_count):\r\n r0 = ti.cast(i, ti.f32) / pupil_interval_count * self.film_diagnal / 2.0\r\n r1 = ti.cast(i + 1, ti.f32) / pupil_interval_count * self.film_diagnal / 2.0\r\n bmin, bmax = make_bound2()\r\n count = 0\r\n for j in range(samples):\r\n u, v= ti.random(), ti.random()\r\n film_pos = ti.Vector([lerp(ti.cast(j, ti.f32)/samples, r0, r1), 0.0, 0.0])\r\n x, y = lerp(u, -half, half), lerp(v, -half, half)\r\n lens_pos = ti.Vector([x, y, rearZ])\r\n if inside_aabb(bmin, bmax, ti.Vector([x, y])):\r\n ti.atomic_add(count, 1)\r\n else:\r\n ok, _, _ = self.gen_ray_from_film(film_pos, (lens_pos - film_pos).normalized())\r\n if ok:\r\n bmin, bmax = bound_union_with(bmin,bmax, ti.Vector([x, y]))\r\n ti.atomic_add(count, 1)\r\n\r\n if count == 0:\r\n bmin, bmax = proj_bmin, proj_bmax\r\n\r\n # extents pupil bound\r\n delta = 2 * (proj_bmax - proj_bmin).norm() / ti.sqrt(samples)\r\n bmin -= delta\r\n bmax += delta\r\n\r\n self.exitPupilBoundMin[i] = bmin\r\n self.exitPupilBoundMax[i] = bmax", "def step_run(cls, image, config):\n logger.info('Gain correction will be applied to %s', image)\n\n ret_code = cls.__call__(image)\n return ret_code" ]
[ "0.5652331", "0.5335536", "0.52658623", "0.5259198", "0.5201728", "0.51909906", "0.515129", "0.5142113", "0.5076965", "0.5063984", "0.5055916", "0.49459288", "0.4884683", "0.48668435", "0.4824002", "0.482143", "0.47895026", "0.47749937", "0.47587273", "0.4741739", "0.4710989", "0.46998623", "0.46991298", "0.46979156", "0.4692717", "0.4690254", "0.46848482", "0.46795285", "0.46666843", "0.46552742", "0.46539572", "0.46506596", "0.46461558", "0.46368298", "0.46339366", "0.46324965", "0.46317604", "0.46298322", "0.46297243", "0.46284553", "0.4627619", "0.46249053", "0.4621805", "0.46157008", "0.4607761", "0.46051344", "0.46003705", "0.45951575", "0.45918143", "0.45905232", "0.45862117", "0.4585815", "0.4582318", "0.4579147", "0.45787397", "0.45777598", "0.45760685", "0.45703727", "0.4568256", "0.45583394", "0.45569658", "0.45555323", "0.45550525", "0.45546493", "0.45470822", "0.4547025", "0.45453024", "0.4544413", "0.4542224", "0.4541217", "0.4540933", "0.45391735", "0.453897", "0.453802", "0.45259672", "0.4519855", "0.45186555", "0.4518203", "0.4516783", "0.4515055", "0.4507584", "0.45060053", "0.45056754", "0.45049143", "0.4497676", "0.4496176", "0.44961554", "0.44916448", "0.44911525", "0.4489983", "0.44887206", "0.44873115", "0.44793153", "0.44784293", "0.4476427", "0.4475186", "0.44735175", "0.4470664", "0.44679096", "0.44641295" ]
0.5542998
1
Returns intensity profile of 1d gaussian beam
def gaussian1d(x, x0, w0, A, offset): if w0 == 0: return 0 return A * np.exp(-2 * (x - x0) ** 2 / (w0 ** 2)) + offset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pvalue_gaussian(self):\n \n pv = 2 * stats.norm.sf(abs(self.TS_prime_obs), loc=0, scale=1)\n return(pv)", "def gaussian(amp, fwhm, mean, x):\n return amp * np.exp(-4. * np.log(2) * (x-mean)**2 / fwhm**2)", "def estimate_uni_gaussian(X):\n mu = mean(X, axis=0)\n sigma2 = var(X, axis=0)\n return mu, sigma2", "def parzen_windowing_gaussian(self, img: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:\n img = torch.clamp(img, 0, 1)\n img = img.reshape(img.shape[0], -1, 1) # (batch, num_sample, 1)\n weight = torch.exp(\n -self.preterm.to(img) * (img - self.bin_centers.to(img)) ** 2\n ) # (batch, num_sample, num_bin)\n weight = weight / torch.sum(weight, dim=-1, keepdim=True) # (batch, num_sample, num_bin)\n probability = torch.mean(weight, dim=-2, keepdim=True) # (batch, 1, num_bin)\n return weight, probability", "def intensity(self):\r\n return np.power(prb.amplitude, 2)", "def intensity(self) -> int:", "def gaussian(amp, fwhm, mean):\n return lambda x: amp * np.exp(-4. * np.log(2) * (x-mean)**2 / fwhm**2)", "def generate_gaussian():\n amp = 10 * numpy.random.chisquare(3)\n width = numpy.random.chisquare(3)\n mean = numpy.random.uniform(-10 + width, 10 - width)\n x = numpy.linspace(-10, 10, 500)\n y = amp * numpy.exp(- (x - mean) ** 2 / width ** 2)\n add_noise(y, 0.1)\n return x, y", "def intensityLogGauss(sample,bins,beta):\r\n\r\n width=1/bins\r\n data,edges=np.histogram(sample,bins=bins,range=(0,1))\r\n distMat=width*np.array([[np.abs(i-j) for i in range(bins)] for j in range(bins)])\r\n\r\n model=pm.Model()\r\n\r\n print('building model')\r\n with model:\r\n\r\n beta=beta\r\n sigmaSq=pm.HalfNormal('sigmaSq',5)\r\n\r\n chol=np.sqrt(sigmaSq)*cholesky(pm.math.exp(-1.*distMat**2/(2.*beta**2))+1e-6*np.eye(bins))\r\n y=pm.Normal('gaussfield',mu=0,sigma=1,shape=bins)\r\n\r\n lam=pm.Deterministic('intensity',width*pm.math.exp(pm.math.dot(chol,y)))\r\n k=pm.Poisson('points',mu=lam,observed=data)\r\n\r\n print('model built, start sampling')\r\n trace=pm.sample(draws=1000, tune=500,chains=1)\r\n\r\n #pm.traceplot(trace,varnames=['intensity'])\r\n #plt.show()\r\n\r\n return trace", "def uni_gaussian(X, mu, sigma2):\n p = (1 / sqrt(2 * pi * sigma2))\n p = p * exp(-power(X - mu, 2) / (2 * sigma2))\n\n def prod(x, y):\n return x * y\n p = array([[reduce(prod, el)] for el in p])\n\n return p", "def estimate_gaussian_params(X):\n mu = X.mean(axis=0)\n var = X.std(axis=0)**2.0\n return mu,var", "def gaussian(centre, k, intensity, xpos):\r\n\treturn intensity * np.exp(- np.power(k * (xpos - centre), 2))", "def _FWHMGauss(sigma, pixel=12):\n return sigma*2*np.sqrt(2*np.log(2))*pixel", "def _gaussian_distribution(self, x: ndarray, mu: float, sigma: float) -> ndarray:\n return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(\n -np.power(\n (x - mu) / sigma, 2) / 2)", "def calc_psf_fwhm_inpix_gaussian(arr):\n\tmodel = fit_gaussian(arr)\n\n\tsigma = max(model.y_stddev, model.x_stddev)\n\tfwhm = 2.355 * sigma\n\n\treturn fwhm", "def gaussian_highpass(image):\n lowpass = ndimage.gaussian_filter(image, 2)\n highpass = image - lowpass\n return highpass", "def fake_gaussian(img, vertical_horizontal_sigma, iter=3):\n sigma_vertical, sigma_horizontal = vertical_horizontal_sigma\n h_blured = box_filter1d(img, sigma_horizontal, horizontal=True, iter=iter)\n blured = box_filter1d(h_blured, sigma_vertical, horizontal=False, iter=iter)\n return blured", "def gaussian_hp(image, sigma):\n row, col = image.shape\n H = np.zeros((row, col))\n for y in range(row):\n for x in range(col):\n D = np.sqrt((y-int(row/2))**2 + (x-int(col/2))**2)\n H[y,x] = np.exp(-D**2/(2*sigma**2))\n H_hp = (1-H)\n X = np.fft.fftshift(np.fft.fft2(image))\n Y = np.fft.fftshift((1+H_hp)*X)\n y = np.fft.ifft2(Y)\n return np.abs(y)", "def smooth_gauss(image, variance=2, kernel_size=(9, 9)):\n return cv2.GaussianBlur(image, kernel_size, variance)", "def EstimateIlluminationProfile(image, kernel_sigma): \n \n illumination_profile = Denoising(image, kernel_sigma);\n \n map_corr = np.ones(image.shape);\n map_corr = Denoising(map_corr, kernel_sigma);\n \n illumination_profile = illumination_profile / map_corr;\n \n return illumination_profile;", "def gaus(x, A, mu, sigma):\n return A * np.exp(-(x - mu) ** 2 / (2. * sigma ** 2))", "def gauss(x, gamma):\n return 1 / np.sqrt(2*np.pi) / gamma * np.exp(-(x/gamma)**2 / 2)", "def gauss(x, *p):\n mu, sigma = p\n return (1 / (sigma * np.sqrt(2 * np.pi)) *\n np.exp(-(x - mu) ** 2 / (2. * sigma ** 2)))", "def get_intensity_transformer(aug):\n\n def gamma_tansform(img):\n gamma_range = aug['aug']['gamma_range']\n if isinstance(gamma_range, tuple):\n gamma = np.random.rand() * (gamma_range[1] - gamma_range[0]) + gamma_range[0]\n cmin = img.min()\n irange = (img.max() - cmin + 1e-5)\n\n img = img - cmin + 1e-5\n img = irange * np.power(img * 1.0 / irange, gamma)\n img = img + cmin\n\n elif gamma_range == False:\n pass\n else:\n raise ValueError(\"Cannot identify gamma transform range {}\".format(gamma_range))\n return img\n\n return gamma_tansform", "def gauss(self, X, xm, amp, w):\n return amp * np.exp(-((X - xm) / w) ** 2)", "def gaussian(x, *parameters):\n position, sigma, amplitude, background = parameters\n return amplitude * np.exp(-(x - position)**2 / (2.0 * sigma**2)) + background", "def differenceOfGausssians(image,sigma0, sigma1,window_size, roi, out = None):\n return (vigra.filters.gaussianSmoothing(image,sigma0,window_size=window_size,roi = roi)-vigra.filters.gaussianSmoothing(image,sigma1,window_size=window_size,roi = roi))", "def gaussian(p, x):\n #2008-09-11 15:11 IJC: Created for LINEPROFILE\n # 2011-05-18 11:46 IJC: Moved to analysis.\n # 2013-04-11 12:03 IJMC: Tried to speed things up slightly via copy=False\n # 2013-05-06 21:42 IJMC: Tried to speed things up a little more.\n\n if not isinstance(x, np.ndarray):\n x = array(x, dtype=float, copy=False)\n\n if len(p)==3:\n p = array(p, copy=True)\n p = concatenate((p, [0]))\n #elif len(p)==4:\n # p = array(p, copy=False)\n\n return p[3] + p[0]/(p[1]*sqrt(2*pi)) * exp(-(x-p[2])**2 / (2*p[1]**2))", "def gaussian(x, mean, sigma):\n return np.exp(-np.square(x-mean)/(2*np.square(sigma))) / (np.sqrt(2*np.pi*sigma**2))", "def gauss_seeing(npix = None,fwhm=None,e1=None,e2=None,scale=scale):\n fwhm = fwhm/scale\n M20 = 2.*(fwhm/2.35482)**2\n row,col = np.mgrid[-npix/2:npix/2,-npix/2:npix/2]\n rowc = row.mean()\n colc = col.mean()\n Mcc = 0.5*M20*(1+e1)\n Mrc = 0.5*e2*M20\n Mrr = 0.5*M20*(1-e1)\n rho = Mrc/np.sqrt(Mcc*Mrr)\n img = np.exp(-0.5/(1-rho**2)*(row**2/Mrr + col**2/Mcc - 2*rho*row*col/np.sqrt(Mrr*Mcc)))\n res = img/img.sum()\n return res", "def gaussian_white(z, mu: 'normal' = 0, sigma: (0.4, 1) = 0.7):\n return 1 - gaussian_black(z, mu, sigma)", "def gauss(x, *p):\n A, mu, sigma = p\n\n return A*np.exp(-(x-mu)**2/(2.*sigma**2))", "def gaussian(x, amp, wid, cen):\n return amp*np.exp(-(x-cen)**2/(2*wid**2))", "def gaussed_value(self):\n from random import gauss\n return sorted([0, int(gauss(self.value, self.sigma)), \\\n (self.size*8)-1])[1]", "def grd_posterior_gaussian(self, ) -> Tuple[np.ndarray, np.ndarray]:\n xmin, xmax = self.x_range\n ymin, ymax = self.y_range\n\n mu = np.array([0, 0])\n sigma = np.zeros((2, 2))\n\n _sample = self._sample\n _prior = self.prior\n\n def mean_x(x: float, y: float):\n return x * _sample(x, y) * _prior.eval(x, y)\n\n def mean_y(x: float, y: float):\n return y * _sample(x, y) * _prior.eval(x, y)\n\n def var_x(x: float, y: float):\n return x * mean_x(x, y)\n\n def var_y(x: float, y: float):\n return y * mean_y(x, y)\n\n # def var_xy(x: float, y: float):\n # return x * mean_y(x, y)\n\n # First moment\n (mu[0], mu[1]) = (integrate.dblquad(mean_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(mean_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0])\n (sigma[0, 0], sigma[1, 1]) = \\\n (integrate.dblquad(var_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(var_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n # integrate.dblquad(var_xy, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n return mu, sigma", "def beam(xb,yb,zb,wx,wy,wavelen):\n\n zRx = np.pi * wx**2 / wavelen\n zRy = np.pi * wy**2 / wavelen \n \n sqrtX = np.sqrt( 1 + np.power(zb/zRx,2) ) \n sqrtY = np.sqrt( 1 + np.power(zb/zRy,2) ) \n intensity = np.exp( -2.*( np.power(xb/(wx*sqrtX ),2) \\\n + np.power(yb/(wy*sqrtY),2) )) / sqrtX / sqrtY\n return intensity", "def Gausian_response(img,sigma=1):\n \n # Gausian response\n img_sigma = zeros(img.shape)\n filters.gaussian_filter(img, (sigma,sigma), (0,0), img_sigma)\n \n return img_sigma", "def __measurement_prob(angle, measurement, noise):\n return ParticleFilter.Gaussian(angle, noise, measurement)", "def gaussian(x, mu, sigma):\n return (np.exp(-(x - mu)**2 / 2.0 / sigma**2) /\n np.sqrt(2.0 * np.pi) / sigma)", "def kernel_gaussiano(image: np.ndarray, sigma: float, kind: str = 'low') -> np.ndarray:\n U, V = fourier_meshgrid(image)\n D = fourier_distance(U, V)\n H = np.exp( (-1.0 * D) / (2.0 * sigma**2) )\n \n if kind == 'high' or kind == 'highpass':\n H = 1.0 - H\n \n return H", "def estimateGaussian(X):\n\tmu = np.mean(X, axis=0)\n\tsigma2 = np.std(X, axis=0) ** 2\n\treturn mu, sigma2", "def _prior_gaussian(self, x_start):\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.gaussian_q_mean_variance(x_start, t)\n kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)\n return mean_flat(kl_prior) / np.log(2.0)", "def __call__( self, X, Y, Z):\n xb,yb,zb = self.transform( X,Y,Z)\n \n gauss = beam( xb,yb,zb, self.w[0], self.w[1], self.l)\n intensity = (2/np.pi)* self.mW/1000. /self.w[0]/self.w[1] *gauss # W um^-2\n \n return uL(self.l)*intensity", "def gaussian(x, amp, cen, wid):\n return amp * exp (-(x-cen)**2/(2*wid**2))", "def gauss(x,p):\n return np.exp((-(x - p[0])**2) / (2 * p[1]**2))", "def get_pca_mean_beam(self):\n return self.get_pca_images()[0]", "def gaussianPSF(shape, sigma):\n psf = dg.drawGaussiansXY(shape,\n numpy.array([0.5*shape[0]]),\n numpy.array([0.5*shape[1]]),\n sigma = sigma)\n return psf/numpy.sum(psf)", "def gaussian_prior(self):\n self.prior = sps.multivariate_normal(self.m0,self.S0)", "def test_estimate_parameters():\n # a red image\n image = numpy.zeros((3, 11, 11))\n image[0, :, :] = 255\n skin_filter.estimate_gaussian_parameters(image)\n assert (skin_filter.mean == [1.0, 0.0]).all(), \"mean for a red image is not OK\"\n assert (skin_filter.covariance == [[0.0, 0.0], [0.0, 0.0]]).all(), \"covariance for red image is not OK\"\n\n # a green image\n image = numpy.zeros((3, 11, 11))\n image[1, :, :] = 255\n skin_filter.estimate_gaussian_parameters(image)\n assert (skin_filter.mean == [0.0, 1.0]).all(), \"mean for a green image is not OK\"\n assert (skin_filter.covariance == [[0.0, 0.0], [0.0, 0.0]]).all(), \"covariance for green image is not OK\"", "def pdf(x):\n return - np.exp(self.ks_gaussian.score_samples(x.reshape(1, -1)))", "def gaussian_blur(self,img):\n return cv2.GaussianBlur(img, (self.kernel_size, self.kernel_size), 0)", "def pseudo_flatfield(img_plane, sigma=5):\n filtered_img = gaussian_filter(img_plane, sigma)\n return img_plane / (filtered_img + 1)", "def MyBaseMoments(p,q,img,gauss_sigma,gauss_centroid=None, gauss_g1=0., gauss_g2=0.):\n weight = galsim.Image(np.zeros_like(img.array))\n gauss = galsim.Gaussian(sigma=gauss_sigma*pixel_scale).shear(g1=gauss_g1,g2=gauss_g2)\n if gauss_centroid is None:\n gauss_centroid = img.true_center\n weight = gauss.drawImage(image=weight, scale=pixel_scale, method='no_pixel', use_true_center=True, offset=(gauss_centroid-img.true_center)*(1))\n x = np.linspace(img.xmin-img.center.x*0-gauss_centroid.x*1, img.xmax-img.center.x*0-gauss_centroid.x*1, img.xmax-img.xmin+1)+0.*0.5\n y = np.linspace(img.ymin-img.center.y*0-gauss_centroid.y*1, img.ymax-img.center.y*0-gauss_centroid.y*1, img.ymax-img.ymin+1)+0.*0.5\n X, Y = np.meshgrid(x,y)\n\n Q00 = np.sum(weight.array*img.array)\n Q10 = gauss_centroid.x + np.sum(X*weight.array*img.array)/Q00\n Q01 = gauss_centroid.y + np.sum(Y*weight.array*img.array)/Q00\n Q20 = np.sum((X**2)*weight.array*img.array)\n Q02 = np.sum((Y**2)*weight.array*img.array)\n\n monomial = 1.\n for pp in xrange(p):\n monomial *= X\n for qq in xrange(q):\n monomial *= Y\n Qpq = np.sum(monomial*weight.array*img.array) #/Q00\n\n return Qpq", "def gaussian_kernel(training_ex, landmark, sigma=0.1):\n return np.exp(-(np.linalg.norm(training_ex - landmark) ** 2 / (2 * (sigma ** 2))))", "def gaussian(mu, wid, x):\n return np.exp(-((x - mu) / (0.6005612 * wid))**2)", "def taper_visibility_gaussian(vis: Visibility, beam=None) -> Visibility:\n assert isinstance(vis, Visibility), vis\n \n if beam is None:\n raise ValueError(\"Beam size not specified for Gaussian taper\")\n uvdistsq = vis.u ** 2 + vis.v ** 2\n # See http://mathworld.wolfram.com/FourierTransformGaussian.html\n scale_factor = numpy.pi ** 2 * beam ** 2 / (4.0 * numpy.log(2.0))\n prior = vis.flagged_imaging_weight[:, :]\n wt = numpy.exp(-scale_factor * uvdistsq)\n vis.data['imaging_weight'][:, :] = vis.flagged_imaging_weight[:, :] * wt[:, numpy.newaxis]\n \n return vis", "def gaussian_black(z, mu: 'normal' = 0, sigma: (0.4,1) = 0.7):\n return 1/(np.sqrt(2*np.pi)*sigma)*np.exp(-np.power((z - mu)/sigma, 2)/2)", "def get_input_vector(img):\n parts = partition_matrix(blurmap(img), 10)\n return numpy.array([numpy.mean(part) for part in parts],\n dtype=numpy.float32)", "def gauss_pert(N,a):\n x = np.arange(0,N,1,float)\n y = x[:,np.newaxis]\n\n # Choose the location of the peak\n x0 = y0 = int(0.4*N)\n\n # Choose the fwhm, 'width' of the perturbation\n fwhm = N/15\n\n return a*np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)", "def gaussian_filter(x):\n return _gaussian_filter(x, 3)", "def gaussian_blur(self, img):\n kernel_size = self.gaussian_blur_params[\"kernel_size\"]\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_proba_map(img):\n method = 'cv2.TM_CCOEFF_NORMED'\n sigmas = [41,31,21,11]\n out = np.zeros(img.shape)\n for sigma in sigmas:\n size=3*sigma\n template = gaussian(size,sigma)\n template/=template.max()\n template*=255\n template = template.astype(np.uint8)\n \n img2 = img.copy()\n meth = eval(method)\n # Apply template Matching\n res = cv2.matchTemplate(img2,template,meth)\n res = np.pad(res,size/2,mode='constant')\n to_replace = res>out\n out[to_replace] = res[to_replace]\n return out", "def _gauss_pix(x, mean=0.0, sigma=1.0):\n x = (np.asarray(x, dtype=float) - mean) / (sigma*np.sqrt(2))\n dx = x[1]-x[0]\n if not np.allclose(np.diff(x), dx):\n raise ValueError('all pixels must have the same size')\n\n edges = np.concatenate([x-dx/2, x[-1:]+dx/2])\n assert len(edges) == len(x)+1\n\n y = scipy.special.erf(edges)\n return (y[1:] - y[:-1])/2", "def sphere_l_intensity(img):\n pixels = []\n for j in range(0, img.shape[0]):\n for i in range(1, 40):\n pixels.append(img[j, i])\n\n return np.mean(pixels)", "def estimateGaussian(X):\n mu = X.mean(0, keepdims=True).T\n sigma2 = X.var(0, keepdims=True).T\n return mu, sigma2", "def gauss(x, x0, gamma):\n sigma = gamma / sqrt(2.0)\n \n A = 1/ (sigma * sqrt(2*pi))\n return (A * exp (-0.5 * (x-x0)**2/sigma**2))", "def gaussint(x, mean=0.0, sigma=1.0):\n z = (x - mean) / (math.sqrt(2) * sigma)\n return (erf(z) + 1.0) / 2.0", "def test_gaussian_filter():\n\n def rgb2gray(rgb):\n r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n\n return gray\n\n img = rgb2gray(np.array(Image.open('data/graf.png')))\n gx, x = gauss_module.gauss(4)\n gx = gx.reshape(1, gx.shape[0])\n gy = gx.reshape(gx.shape[1], gx.shape[0])\n smooth_img = conv2(img, gx * np.array(gy))\n\n test_smooth_img = gauss_module.gaussianfilter(img, 4)\n\n assert np.all(smooth_img.round(5) == test_smooth_img.round(5))", "def apply_gaussian(X, sigma):\n return np.array([ndimage.gaussian_filter(x, sigma) for x in X])", "def psfVal(ix, iy, x, y, sigma1, sigma2, b):\n return (math.exp (-0.5*((ix - x)**2 + (iy - y)**2)/sigma1**2) +\n b*math.exp (-0.5*((ix - x)**2 + (iy - y)**2)/sigma2**2))/(1 + b)", "def Gauss_filter(data, sigma=(0,2,2), mode='wrap'): \n import scipy.ndimage.filters as flt\n return flt.gaussian_filter(data, sigma=sigma, mode=mode)", "def gaussian(self, amp_step, sigma_step):\n l = len(self.overlaid_x_axis)\n x = np.linspace(0, l, l) - l/2 # centre of data\n\n # This is new code to 'guess' the size of the Gaussian from the\n # existing data rather than from hard-coded numbers.\n # TODO: test this! Possibly link up to the get_windowed_data function\n # as it uses a lot of the same functionality\n trigger = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][0]\n trace = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][1]\n amplitude = max(trace) + amp_step\n diff = np.diff(trigger)\n stepvalue = 0.5\n if min(diff) > -1 * stepvalue or max(diff) < stepvalue:\n raise RangeError\n else:\n maxtrig = next(x for x in diff if x > stepvalue)\n mintrig = next(x for x in diff if x < -1 * stepvalue)\n edges = [np.where(diff == maxtrig)[0][0],\n np.where(diff == mintrig)[0][0]]\n half_trigger_length = (edges[1]-edges[0])\n sigma = half_trigger_length/4 + sigma_step\n\n gauss = self.ax2.plot(amplitude * np.exp(-x**2 / (2 * sigma**2)), 'r')\n self.overlaid_lines.append(gauss)\n self.draw()", "def enhance(self, image):\n\n if self.random.uniform(0, 1) < self.p:\n return self.intensity_enhance(image)\n else:\n return image", "def getIntensityS(self):\n return self._Esigma.intensity()", "def features_sigma(img,\n sigma,\n intensity=True,\n edges=True,\n texture=True):\n\n features = []\n\n gx,gy = np.meshgrid(np.arange(img.shape[1]), np.arange(img.shape[0]))\n # print(gx.shape)\n #features.append(gx)\n gx = filters.gaussian(gx, sigma)\n gy = filters.gaussian(gy, sigma)\n\n features.append(np.sqrt(gx**2 + gy**2)) #use polar radius of pixel locations as cartesian coordinates\n\n del gx, gy\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Location features extracted using sigma= %f' % (sigma))\n\n img_blur = filters.gaussian(img, sigma)\n\n if intensity:\n features.append(img_blur)\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Intensity features extracted using sigma= %f' % (sigma))\n\n if edges:\n features.append(filters.sobel(img_blur))\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Edge features extracted using sigma= %f' % (sigma))\n\n if texture:\n H_elems = [\n np.gradient(np.gradient(img_blur)[ax0], axis=ax1)\n for ax0, ax1 in itertools.combinations_with_replacement(range(img.ndim), 2)\n ]\n\n eigvals = feature.hessian_matrix_eigvals(H_elems)\n del H_elems\n\n for eigval_mat in eigvals:\n features.append(eigval_mat)\n del eigval_mat\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Texture features extracted using sigma= %f' % (sigma))\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Image features extracted using sigma= %f' % (sigma))\n\n return features", "def get_intensity_normalization(self):\n return self.intensity_normalize_image", "def gaussian(mu, sigma, start, end):\r\n \r\n val = np.linspace(start, end, 100)\r\n a = 1/(sigma*np.pi)\r\n b = - 0.5 * np.power((mu - val)/sigma, 2)\r\n return a*np.exp(b)", "def gausspix(x, mean=0.0, sigma=1.0):\n edges = np.concatenate((x-0.5, x[-1:]+0.5))\n integrals = gaussint(edges, mean=mean, sigma=sigma)\n return integrals[1:] - integrals[0:-1]", "def _fspecial_gauss_1d(self, size, sigma):\n coords = torch.arange(size).to(dtype=torch.float)\n coords -= size // 2\n g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))\n g /= g.sum()\n return g.reshape(-1)", "def phi_gauss(self,x,i):\n s = 0.1\n return np.exp(-(x-self.mu[i])**2/(2*s))", "def area_of_gaussian(amp, fwhm):\n return amp * fwhm / 0.93943727869965132", "def create_gaussian_array(self):\n\n # Fill array of size l x w with Gaussian Noise.\n terrain_length = int(ceil(self.length/self.resolution))\n terrain_width = int(ceil(self.width/self.resolution))\n gaussian_array = np.random.normal(self.mu, self.sigma, (terrain_length,terrain_width))\n\n # Filter the array to smoothen the variation of the noise\n gaussian_array = gaussian_filter(gaussian_array, self.sigma_filter)\n\n return gaussian_array", "def _fspecial_gauss_1d(self, size, sigma):\n coords = torch.arange(size).to(dtype=torch.float)\n coords -= size // 2\n\n g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))\n g /= g.sum()\n\n return g.unsqueeze(0).unsqueeze(0)", "def gaussian(window_size, sigma):\n gauss = torch.Tensor([math.exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])\n return gauss/gauss.sum()", "def gauss(x, mu, A, sigma):\n mu, A, sigma = np.atleast_2d(mu), np.atleast_2d(A), np.atleast_2d(sigma)\n val = (A / (sigma * np.sqrt(np.pi * 2)) *\n np.exp(-(x[:, None] - mu)**2 / (2 * sigma**2)))\n return val.sum(axis=-1)", "def cf_profile(self):\n x = np.abs(self.gen_profile() / self.sam_sys_inputs['system_capacity'])\n return x", "def gaussian_likelihood(input_, mu_, log_std):\n pre_sum = -0.5 * (((input_ - mu_) / (\n tf.exp(log_std) + EPS)) ** 2 + 2 * log_std + np.log(\n 2 * np.pi))\n return tf.reduce_sum(pre_sum, axis=1)", "def prob(x):\n\treturn 1. * bivariate_normal(x, (0., 1.2), (1., 1.), .8) + \\\n\t 1.05 * bivariate_normal(x, (.6, -1.), (1.3, .7), -.6)", "def gaussian_filter(stddev, array):\n\n return astropy.convolution.convolve(\n array, astropy.convolution.Gaussian2DKernel(stddev))", "def getIntensity(self):\n return self.getIntensityS() + self.getIntensityP()", "def flat_top_gaussian(a1_val, a2_val, sigma1, sigma2, w1_val, w2_val, w_val):\n gauss1 = a1_val * np.exp(-(w_val - w1_val)**4/(2 * sigma1**2))\n gauss2 = a2_val * np.exp(-(w_val - w2_val)**4/(2 * sigma2**4))\n sum_gauss = gauss1 + gauss2\n return sum_gauss", "def prewitt(img):\n kernel1 = numpy.array([1.0, 0.0, -1.0])\n kernel2 = numpy.array([1.0, 1.0, 1.0])\n Gx1 = convRows(img, kernel1)\n Gx = convCols(Gx1, kernel2)\n Gy1 = convCols(img, kernel1)\n Gy = convRows(Gy1, kernel2)\n \n G = numpy.sqrt(Gx*Gx + Gy*Gy)\n \n return G", "def Gaussian(x, mu=0, sigma=26.4, A=1, y0=0):\r\n #width = sigma*(2*np.sqrt(2*np.log(2)))\r\n b = 1/(sigma*np.sqrt(2*np.pi))\r\n f = b*np.power(np.e, -(((x-mu)**2)/(2*sigma**2)))\r\n return A*f + y0", "def profile(c_mat, mat_rows):\n\n profile_mat = c_mat / mat_rows\n\n return profile_mat", "def get_map_gaussian_fit(self, map_type, i_seq):\n map_gaussians = self._map_gaussian_fits.get(map_type, None)\n if (map_gaussians is not None):\n return map_gaussians[i_seq]\n return None", "def param_gauss(xdata_, *params_):\n scale_, mean_, cov_ = params_to_scale_mean_cov(params_)\n return scale_ * gaussian(xdata_, mean=mean_, cov=cov_)", "def sphere_r_intensity(img):\n pixels = []\n for j in range(0, img.shape[0]):\n for i in range(1, 40):\n pixels.append(img[j, img.shape[1] - i])\n\n return np.mean(pixels)", "def get_kernel_values(self, i, j, abscissa_array):\n x = np.zeros_like(abscissa_array)\n for m in range(self.n_gaussians):\n x += self.amplitudes[i, j, m] * \\\n norm.pdf((abscissa_array - self.means_gaussians[m]) \\\n / self.std_gaussian) / self.std_gaussian\n return x", "def getIntensityP(self):\n return self._Epi.intensity()", "def GaussianKernel(radius, std):\n size = 2 * radius + 1\n weight = torch.ones(size, size)\n weight.requires_grad = False\n for i in range(-radius, radius+1):\n for j in range(-radius, radius+1):\n dis = (i * i) + (j * j)\n weight[i+radius][j+radius] = np.exp(-dis / (2 * std * std))\n weight = weight / weight.sum()\n return weight", "def gauss_sample(mean, covariance):\n\n return None" ]
[ "0.6619526", "0.63317794", "0.62996733", "0.627856", "0.62739635", "0.61623496", "0.6124203", "0.610553", "0.60432166", "0.60368556", "0.60006434", "0.59681493", "0.5952793", "0.59485036", "0.5927189", "0.5926995", "0.5875386", "0.5873252", "0.5861136", "0.58427656", "0.5829406", "0.58169806", "0.5813704", "0.58073467", "0.5805789", "0.57963747", "0.5794637", "0.57767224", "0.5776591", "0.57571816", "0.57551694", "0.5740491", "0.57103753", "0.5709734", "0.5705603", "0.5700343", "0.5700127", "0.5698982", "0.56879747", "0.5684806", "0.5680259", "0.56728595", "0.565731", "0.56439245", "0.5642952", "0.5639772", "0.56325006", "0.5625263", "0.56243724", "0.56167006", "0.56114584", "0.5610747", "0.56047875", "0.559979", "0.55844843", "0.5560884", "0.55590236", "0.5557906", "0.554965", "0.55335665", "0.5529015", "0.5525451", "0.55235046", "0.55230856", "0.55230325", "0.552173", "0.5521207", "0.55200744", "0.5508664", "0.55042887", "0.5497192", "0.5490256", "0.5464166", "0.54619056", "0.5459805", "0.54595727", "0.54559493", "0.5446888", "0.54463387", "0.544419", "0.54405904", "0.5419804", "0.5419641", "0.54192597", "0.54179", "0.54170334", "0.5415881", "0.5414582", "0.540571", "0.5403416", "0.53972316", "0.5391768", "0.53890115", "0.5388656", "0.5388268", "0.53880835", "0.53854966", "0.5379961", "0.53759253", "0.53732103", "0.53727823" ]
0.0
-1
Returns intensity profile of trap array
def gaussianarray1d(x, x0_vec, wx_vec, A_vec, offset, ntraps): array = np.zeros(np.shape(x)) for k in range(ntraps): array = array + gaussian1d(x, x0_vec[k], wx_vec[k], A_vec[k], 0) return array + offset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def intensity(self) -> int:", "def intensity(self):\r\n return np.power(prb.amplitude, 2)", "def getIntensity(self):\n return self.getIntensityS() + self.getIntensityP()", "def estimate_skin_tone(face_roi):\n return [int(face_roi[:, :, i].mean()) for i in range(face_roi.shape[-1])]", "def getIntensity(self):\n return self.__intensity", "def turbulence_intensity(self):\n return self.flow_field.turbulence_intensity", "def getIntensityP(self):\n return self._Epi.intensity()", "def detector_intensity(val_addr):\n \n return(np.concatenate((np.unique(val_addr, return_counts=True)[1][:13],[0],np.unique(val_addr, return_counts=True)[1][13:]),axis=0))", "def getstats(img, thresholds):\n number = np.zeros(img.shape, np.float64)\n ev = np.zeros(img.shape, np.float64)\n scatter = np.zeros(img.shape, np.float64)\n for n, s, low, high, evs in thresholds:\n for i in numba.prange(img.shape[0]):\n for j in numba.prange(img.shape[1]):\n if (low < img[i, j]) and (img[i, j] < high):\n scatter[i, j] = s\n number[i, j] = n\n ev[i, j] = img[i, j] - evs\n return ev, number, scatter", "def extract_intensity(seg, fluo_im):\n\n # Get the region props of the fluorescence image using the segmentation\n # mask.\n props = skimage.measure.regionprops(seg, intensity_image=fluo_im)\n cell_ints = []\n for prop in props:\n cell_ints.append(prop.mean_intensity)\n\n # Convert the cell_ints to an array and return.\n return np.array(cell_ints)", "def intensity(self, value: int, /) -> None:", "def apphot(im, yx, rap, subsample=4, **kwargs):\n n, f = anphot(im, yx, rap, subsample=subsample, **kwargs)\n if np.size(rap) > 1:\n return n.cumsum(-1), f.cumsum(-1)\n else:\n return n, f", "def OF1_CalculateRawHistogram(image):\n h = np.zeros(256, np.float_)\n for i in np.nditer(image):\n h[i - 1] = h[i - 1] + 1\n\n return h", "def compute_intensity(self, data):\n # Compute RMS intensity (as float to prevent overflow).\n data = self.audio_resample(data.astype(np.float32)**2)**0.5\n\n # Apply dynamic range compression.\n return data**self._exponent", "def confint(arr):\n res=[[],[],[]]\n #r=hpd(arr)\n r=(sap(arr,2.5),sap(arr,97.5))\n res[0]=r[0]\n res[1]=arr.mean(0)\n res[2]=r[1]\n return np.array(res)", "def EstimateIlluminationProfile(image, kernel_sigma): \n \n illumination_profile = Denoising(image, kernel_sigma);\n \n map_corr = np.ones(image.shape);\n map_corr = Denoising(map_corr, kernel_sigma);\n \n illumination_profile = illumination_profile / map_corr;\n \n return illumination_profile;", "def profile_from_xvalues(self, xvalues):\r\n\r\n transformed_xvalues = xvalues - self.centre\r\n\r\n return np.multiply(\r\n np.divide(self.intensity, self.sigma * np.sqrt(2.0 * np.pi)),\r\n np.exp(-0.5 * np.square(np.divide(transformed_xvalues, self.sigma))),\r\n )", "def getIntensityS(self):\n return self._Esigma.intensity()", "def measure_amplitude(self, ipx, ipy):\r\n\r\n intensity = np.mean(np.mean(self.imageData[:, ipx-1:ipx+1, ipy-1:ipy+1], axis=2), axis=1)\r\n\r\n # fold over repetitions\r\n\r\n remapped = intensity.reshape( self.nrepetitions, intensity.shape[0]/self.nrepetitions).mean(axis=0)\r\n\r\n return remapped", "def get_fthreshold(img,factor=1.):\n import noiselevel\n # sigma=Table.read('noiselevel.csv',format='csv')['sigma'][0]\n sigma=noiselevel.getnoiselevel(img,ranges=(-30,30),toplot=False)\n \n thres= sigma*factor\n return thres,sigma", "def detectable_intensity(self):\n return self.disease.detectable_intensity()", "def tempProfile_hocuk(self,dist,draine=1):\n radius = np.linspace(0,self.size * self.dist * au2cm,100)\n density = self.denProfile(radius)\n dr = radius[1] - radius[0]\n tdust = np.zeros(len(dist))\n for i in range(len(dist)):\n av = self.compute_av(dist[i],radius,density,dr)\n tdust[i] =(11 + 5.7*np.tanh(0.61 - np.log10(av)))*draine**(1/5.9)\n return tdust", "def addNoise_amp(array,counts):\r\n if array.dtype == 'complex' :\r\n arrayout = addNoise(np.real(array),counts) + 1.0J * addNoise(np.imag(array),counts)\r\n else :\r\n if np.float64(counts) == 0.0e0 :\r\n arrayout = np.copy(array)\r\n elif np.float64(counts) < 0.0e0 :\r\n print 'bg.addNoise : warning counts < 0'\r\n else :\r\n arrayout = np.zeros(array.shape)\r\n arrayout = np.square(normalise(array))\r\n arrayout = np.random.poisson(arrayout*np.float64(counts))/np.float64(counts)\r\n arrayout = np.sqrt(arrayout)\r\n tot = np.sum(np.abs(array)**2)\r\n arrayout = normalise(arrayout,tot)\r\n return arrayout", "def get_brightness(arr):\n\tR,G,B = arr[:,:,0], arr[:,:,1], arr[:,:,2]\n\tY = 0.299*R + 0.587*G + 0.144*B\n\treturn Y.mean()", "def observation(self, img):\r\n img = img[25:200]\r\n img = cv2.resize(img, self.img_size[1:])\r\n if not self.color:\r\n img = img.mean(-1, keepdims=True)\r\n\r\n return img.transpose([2, 0, 1]) / 255", "def infectious_intensity(self):\n return InfectiousIntensity(self)", "def extract_intensity(mask, yfp_image):\n # Get the region properties for the image.\n props = skimage.measure.regionprops(mask, intensity_image=yfp_image)\n\n # Make a vector to store the mean intensities\n mean_int = []\n for prop in props:\n intensity = prop.mean_intensity\n mean_int.append(intensity)\n\n return mean_int", "def gen_profile(self):\n return np.array(self['gen'], dtype=np.float32)", "def gen_profile(self):\n return np.array(self['gen'], dtype=np.float32)", "def calculate_absorbance(intensity):\n blanco = intensity[0]\n intensity_np_arr = np.array(intensity)\n transmittance = intensity_np_arr / blanco\n absorbance = - np.log10(transmittance)\n return absorbance", "def cf_profile(self):\n x = np.abs(self.gen_profile() / self.sam_sys_inputs['system_capacity'])\n return x", "def scalarInfo(img, cnt):\n\tm = cntInfo(img, cnt)\n\td = {\"perimeter\":m[\"perimeter\"], \"oreientation\":m[\"orientation\"], \"solidity\":m[\"solidity\"],\"height\":m[\"height\"], \"extent\":m[\"extent\"], \"aspect ratio\":m[\"aspect ratio\"], \"area\":m[\"area\"], \"sum intensity\":m[\"sum intensity\"], \"width\":m[\"width\"], \"equivalent diameter\": m[\"equivalent diameter\"], \"mean intensity\": m[\"mean intensity\"]}\n\treturn d", "def profile(c_mat, mat_rows):\n\n profile_mat = c_mat / mat_rows\n\n return profile_mat", "def row_uncertainty(a):\n try:\n return sum(safe_p_log_p(a), 1)\n except ValueError:\n raise ValueError(\"Array has to be two-dimensional\")", "def trapfilt_taps(N, phil, alfa):\n\n\n\n tt = arange(-N/2,N/2 + 1) # Time axis for h(t) \n # ***** Generate impulse response ht here *****\n ht = zeros(len(tt))\n ix = where(tt != 0)[0]\n if alfa != 0:\n ht[ix] = ((sin(2*pi*phil*tt[ix]))/(pi*tt[ix]))*((sin(2*pi*alfa*phil*tt[ix]))/(2*pi*alfa*phil*tt[ix]))\n else:\n ht[ix] = (sin(2*pi*phil*tt[ix]))/(pi*tt[ix])\n ix0 = where(tt == 0)[0]\n ht[ix0] = 2*phil\n ht = ht/sum(power(ht,2))\n\n return ht", "def get_uthreshold(img):\n import noiselevel\n # sigma=Table.read('noiselevel.csv',format='csv')['sigma'][0]\n sigma = noiselevel.getnoiselevel(img,ranges=(-30,30),toplot=False)\n \n thres = sigma*np.sqrt(2*np.log(img.size))\n return thres, sigma", "def get_intensity(self, step_index):\n\n if step_index < 4 or step_index >= self.intensity_step_count - 4:\n raise ValueError('step index {0} is out of bounds [4, {1})'.format(\n step_index, self.intensity_step_count - 4))\n\n # Checking if we already computed required intensity value.\n\n if self.intensity_list[step_index] != None:\n return self.intensity_list[step_index]\n\n # No, we haven't, so we are going to compute it.\n\n window_list, window_sum = get_kaiser_window(self.intensity_half_window_size)\n\n sample_array = self.intensity_sound.get_array_of_samples()\n sample_sum = 0.0\n\n channel_count = self.intensity_sound.channels\n amplitude_limit = self.intensity_sound.max_possible_amplitude\n\n # We sum squared normalized amplitudes of all samples of all channels in the window.\n\n sample_from = (step_index - 4) * self.intensity_step_size * channel_count\n\n for i in range(self.intensity_window_size):\n for j in range(channel_count):\n sample = sample_array[sample_from + i * channel_count + j] / amplitude_limit\n sample_sum += sample ** 2 * window_list[i]\n\n # Multiplication by 2.5e9 is taken directly from Praat source code, where it is performed via\n # division by 4e-10.\n\n intensity_ratio = sample_sum / (channel_count * window_sum) * 2.5e9\n intensity = -300 if intensity_ratio < 1e-30 else 10 * math.log10(intensity_ratio)\n\n # Saving computed intensity value for reuse, and returning it.\n\n self.intensity_list[step_index] = intensity\n return intensity", "def wav_to_intensity(path, sr=16000, offset=10):\n sound = parselmouth.Sound(path)\n intensity = sound.to_intensity()\n\n features = []\n\n max_time = sound.get_total_duration()\n\n for time in np.arange(0, max_time, 0.001):\n int_db = intensity.get_value(time)\n if np.isnan(int_db):\n int_db = 0\n\n features.append([int_db])\n\n array_feats = np.array(features).T\n\n print(\"SHAPE OF THE FEATURES:\", array_feats.shape)\n assert(not np.any(np.isnan(array_feats)))\n\n return array_feats, max_time", "def profile(self):\n return NumericStatsMixin.profile(self)", "def plot_intensity_prop(stack, wavelengths_arr, colors_arr):\n\n # for the electric fields profile\n for i, wl in enumerate(wavelengths_arr):\n electric_tot_te, electric_tot_tm, reflectivity_te, reflectivity_tm, transmission_te, transmission_tm, index_tot, L_tot, theta_tot = transfer_matrix_method(\n stack, 1, 0, wl, 0)\n intensity = np.abs(electric_tot_te[::-1]) ** 2\n plt.plot(L_tot * 1e6, intensity / max(intensity) * 2, color=colors_arr[i])\n # for the indexes profile\n ax.plot(L_tot * 1e6, index_tot[::-1], color='black')\n ax.fill_between(L_tot * 1e6, index_tot[::-1], color='azure')", "def intensity(self):\n LP = 1/np.sin(self.theta)**2/np.cos(self.theta)\n P = 1 + np.cos(2*self.theta)**2\n I = (np.abs(self.F))**2*LP*P\n self.xrd_intensity = I\n self.theta2 = 2*self.theta\n rank = np.argsort(self.theta2)\n self.theta2 = self.theta2[rank]\n self.hkl_list = self.hkl_list[rank]\n self.d_hkl = self.d_hkl[rank]\n self.xrd_intensity = self.xrd_intensity[rank]", "def metric_iaf(self, x):\n data = np.asarray(x['data'])\n iaf = [10.0] * data.shape[0]\n for ch, ch_data in enumerate(data):\n pxx, freqs = mlab.psd(ch_data, Fs=128.0, NFFT=256)\n alpha_mask = np.abs(freqs - 10) <= 2.0\n alpha_pxx = 10*np.log10(pxx[alpha_mask])\n alpha_pxx = scipy.signal.detrend(alpha_pxx)\n # iaf[ch] = alpha_pxx.shape\n iaf[ch] = freqs[alpha_mask][np.argmax(alpha_pxx)]\n return iaf", "def depth2intensity(depth, interval=300):\n return depth * 3600 / interval", "def info_np(img):\n import numpy as np\n\n print ('Dimensions: ' + str(np.shape(img)))\n print ('Min value: ' + str(np.min(img)))\n print ('Avg value: ' + str(np.average(img)))\n print ('Med value: ' + str(np.median(img)))\n print ('Max value: ' + str(np.max(img)))\n print ('Std dev: ' + str(np.std(img)))\n print ('Sum: ' + str(np.sum(img)))", "def get_intensity_normalization(self):\n return self.intensity_normalize_image", "def sphere_r_intensity(img):\n pixels = []\n for j in range(0, img.shape[0]):\n for i in range(1, 40):\n pixels.append(img[j, img.shape[1] - i])\n\n return np.mean(pixels)", "def statistics_from_array(x: numpy.ndarray):\n try:\n return x.mean(), x.std(), x.max(), x.min()\n except AttributeError:\n return numpy.nan, numpy.nan, numpy.nan, numpy.nan", "def analyze(self):\n try:\n self.options[self.multi_image][1]()\n except:\n raise Exception(\"Multi Image Option not defined.\")\n\n self.image = self.data / self.exposure\n\n background = self.min_val = np.min(self.image[:511,:511])\n self.max_val = np.max(self.image[:511,:511])\n # stats.mode returns modal value = value that occours most often\n #background = stats.mode(im[:50,:50].ravel())[0][0]\n\n intensity = self.image.sum() - background*np.size(self.image)\n\n #results.append((self.index, intensity, background))\n self.index =+ 1", "def extract_temp_integral_2_to_100(batch,index):\n from scipy import integrate\n integral = []\n for ind in index:\n cell_no = list(batch.keys())[ind]\n integrate_ = integrate.simps(batch[cell_no]['summary']['Tavg'][1:100])\n # integral.append(integrate_)\n integral.append(log(abs(integrate_),10))\n integral = np.reshape(integral,(-1,1))\n return integral\n pass", "def make_lineprofile(npix,rstar,xc,vgrid,A,veq,linewidth):\n vc=(np.arange(npix)-xc)/rstar*veq\n vs=vgrid[np.newaxis,:]-vc[:,np.newaxis]\n profile=1.-A*np.exp( -(vs*vs)/2./linewidth**2)\n return profile", "def createdog(self,imagearr):\n re = [0,1,2,3]\n re[0] = self.diff(self.gs_blur(self.sigma,imagearr))\n for i in range(1,4):\n base = self.sampling(re[i-1][2])\n re[i] = self.diff(self.gs_blur(self.sigma, base))\n return re", "def get_threshold_data(self):\n return [roi.get_threshold_data() for roi in self.rois]", "def fiber_profile(x, y, r0, blur=0.1):\n r = np.sqrt(x ** 2 + y ** 2)\n return 0.5 + 0.5 * scipy.special.erf((r0 - r) / (np.sqrt(2) * blur))", "def getNoiseVar(img,fraction=0.95):\n last_val = np.percentile(img,fraction)\n #si(img<last_val,title=\"Pixel values considered as noise\")\n return np.var(img[img<last_val])", "def profile(x):\n return x", "def intensityPSF_Fire(N=1000):\n col_seq = [ ( 0/255., 0/255., 0/255.),\n ( 0/255., 0/255., 22/255.),\n ( 0/255., 0/255., 45/255.),\n ( 0/255., 0/255., 65/255.),\n ( 0/255., 0/255., 78/255.),\n ( 0/255., 0/255., 91/255.),\n ( 7/255., 0/255., 104/255.),\n ( 16/255., 0/255., 117/255.),\n ( 25/255., 0/255., 130/255.),\n ( 34/255., 0/255., 143/255.),\n ( 43/255., 0/255., 156/255.),\n ( 52/255., 0/255., 168/255.),\n ( 55/255., 0/255., 171/255.),\n ( 58/255., 0/255., 175/255.),\n ( 61/255., 0/255., 178/255.),\n ( 64/255., 0/255., 181/255.),\n ( 67/255., 0/255., 185/255.),\n ( 70/255., 0/255., 188/255.),\n ( 73/255., 0/255., 192/255.),\n ( 76/255., 0/255., 195/255.),\n ( 79/255., 0/255., 199/255.),\n ( 82/255., 0/255., 202/255.),\n ( 85/255., 0/255., 206/255.),\n ( 88/255., 0/255., 209/255.),\n ( 91/255., 0/255., 213/255.),\n ( 94/255., 0/255., 216/255.),\n ( 98/255., 0/255., 220/255.),\n (101/255., 0/255., 220/255.),\n (104/255., 0/255., 221/255.),\n (107/255., 0/255., 222/255.),\n (110/255., 0/255., 223/255.),\n (113/255., 0/255., 224/255.),\n (116/255., 0/255., 225/255.),\n (119/255., 0/255., 226/255.),\n (122/255., 0/255., 227/255.),\n (125/255., 0/255., 224/255.),\n (128/255., 0/255., 222/255.),\n (131/255., 0/255., 220/255.),\n (134/255., 0/255., 218/255.),\n (137/255., 0/255., 216/255.),\n (140/255., 0/255., 214/255.),\n (143/255., 0/255., 212/255.),\n (146/255., 0/255., 210/255.),\n (148/255., 0/255., 206/255.),\n (150/255., 0/255., 202/255.),\n (152/255., 0/255., 199/255.),\n (154/255., 0/255., 195/255.),\n (156/255., 0/255., 191/255.),\n (158/255., 0/255., 188/255.),\n (160/255., 0/255., 184/255.),\n (162/255., 0/255., 181/255.),\n (163/255., 0/255., 177/255.),\n (164/255., 0/255., 173/255.),\n (166/255., 0/255., 169/255.),\n (167/255., 0/255., 166/255.),\n (168/255., 0/255., 162/255.),\n (170/255., 0/255., 158/255.),\n (171/255., 0/255., 154/255.),\n (173/255., 0/255., 151/255.),\n (174/255., 0/255., 147/255.),\n (175/255., 0/255., 143/255.),\n (177/255., 0/255., 140/255.),\n (178/255., 0/255., 136/255.),\n (179/255., 0/255., 132/255.),\n (181/255., 0/255., 129/255.),\n (182/255., 0/255., 125/255.),\n (184/255., 0/255., 122/255.),\n (185/255., 0/255., 118/255.),\n (186/255., 0/255., 114/255.),\n (188/255., 0/255., 111/255.),\n (189/255., 0/255., 107/255.),\n (190/255., 0/255., 103/255.),\n (192/255., 0/255., 100/255.),\n (193/255., 0/255., 96/255.),\n (195/255., 0/255., 93/255.),\n (196/255., 1/255., 89/255.),\n (198/255., 3/255., 85/255.),\n (199/255., 5/255., 82/255.),\n (201/255., 7/255., 78/255.),\n (202/255., 8/255., 74/255.),\n (204/255., 10/255., 71/255.),\n (205/255., 12/255., 67/255.),\n (207/255., 14/255., 64/255.),\n (208/255., 16/255., 60/255.),\n (209/255., 19/255., 56/255.),\n (210/255., 21/255., 53/255.),\n (211/255., 22/255., 51/255.),\n (212/255., 24/255., 49/255.),\n (213/255., 27/255., 45/255.),\n (214/255., 29/255., 42/255.),\n (215/255., 32/255., 38/255.),\n (217/255., 35/255., 35/255.),\n (218/255., 37/255., 31/255.),\n (219/255., 39/255., 29/255.),\n (220/255., 40/255., 27/255.),\n (220/255., 41/255., 25/255.),\n (221/255., 43/255., 23/255.),\n (222/255., 44/255., 21/255.),\n (223/255., 46/255., 20/255.),\n (224/255., 48/255., 16/255.),\n (225/255., 49/255., 14/255.),\n (226/255., 51/255., 12/255.),\n (227/255., 54/255., 8/255.),\n (228/255., 55/255., 6/255.),\n (229/255., 57/255., 5/255.),\n (230/255., 59/255., 4/255.),\n (231/255., 62/255., 3/255.),\n (233/255., 65/255., 3/255.),\n (234/255., 68/255., 2/255.),\n (235/255., 70/255., 1/255.),\n (237/255., 73/255., 1/255.),\n (238/255., 76/255., 0/255.),\n (240/255., 79/255., 0/255.),\n (241/255., 81/255., 0/255.),\n (243/255., 84/255., 0/255.),\n (244/255., 87/255., 0/255.),\n (246/255., 90/255., 0/255.),\n (247/255., 92/255., 0/255.),\n (249/255., 95/255., 0/255.),\n (250/255., 98/255., 0/255.),\n (252/255., 101/255., 0/255.),\n (252/255., 103/255., 0/255.),\n (252/255., 105/255., 0/255.),\n (253/255., 107/255., 0/255.),\n (253/255., 109/255., 0/255.),\n (253/255., 111/255., 0/255.),\n (254/255., 113/255., 0/255.),\n (254/255., 115/255., 0/255.),\n (255/255., 117/255., 0/255.),\n (255/255., 119/255., 0/255.),\n (255/255., 121/255., 0/255.),\n (255/255., 123/255., 0/255.),\n (255/255., 125/255., 0/255.),\n (255/255., 127/255., 0/255.),\n (255/255., 129/255., 0/255.),\n (255/255., 131/255., 0/255.),\n (255/255., 133/255., 0/255.),\n (255/255., 134/255., 0/255.),\n (255/255., 136/255., 0/255.),\n (255/255., 138/255., 0/255.),\n (255/255., 140/255., 0/255.),\n (255/255., 141/255., 0/255.),\n (255/255., 143/255., 0/255.),\n (255/255., 145/255., 0/255.),\n (255/255., 147/255., 0/255.),\n (255/255., 148/255., 0/255.),\n (255/255., 150/255., 0/255.),\n (255/255., 152/255., 0/255.),\n (255/255., 154/255., 0/255.),\n (255/255., 155/255., 0/255.),\n (255/255., 157/255., 0/255.),\n (255/255., 159/255., 0/255.),\n (255/255., 161/255., 0/255.),\n (255/255., 162/255., 0/255.),\n (255/255., 164/255., 0/255.),\n (255/255., 166/255., 0/255.),\n (255/255., 168/255., 0/255.),\n (255/255., 169/255., 0/255.),\n (255/255., 171/255., 0/255.),\n (255/255., 173/255., 0/255.),\n (255/255., 175/255., 0/255.),\n (255/255., 176/255., 0/255.),\n (255/255., 178/255., 0/255.),\n (255/255., 180/255., 0/255.),\n (255/255., 182/255., 0/255.),\n (255/255., 184/255., 0/255.),\n (255/255., 186/255., 0/255.),\n (255/255., 188/255., 0/255.),\n (255/255., 190/255., 0/255.),\n (255/255., 191/255., 0/255.),\n (255/255., 193/255., 0/255.),\n (255/255., 195/255., 0/255.),\n (255/255., 197/255., 0/255.),\n (255/255., 199/255., 0/255.),\n (255/255., 201/255., 0/255.),\n (255/255., 203/255., 0/255.),\n (255/255., 205/255., 0/255.),\n (255/255., 206/255., 0/255.),\n (255/255., 208/255., 0/255.),\n (255/255., 210/255., 0/255.),\n (255/255., 212/255., 0/255.),\n (255/255., 213/255., 0/255.),\n (255/255., 215/255., 0/255.),\n (255/255., 217/255., 0/255.),\n (255/255., 219/255., 0/255.),\n (255/255., 220/255., 0/255.),\n (255/255., 222/255., 0/255.),\n (255/255., 224/255., 0/255.),\n (255/255., 226/255., 0/255.),\n (255/255., 228/255., 0/255.),\n (255/255., 230/255., 0/255.),\n (255/255., 232/255., 0/255.),\n (255/255., 234/255., 0/255.),\n (255/255., 235/255., 4/255.),\n (255/255., 237/255., 8/255.),\n (255/255., 239/255., 13/255.),\n (255/255., 241/255., 17/255.),\n (255/255., 242/255., 21/255.),\n (255/255., 244/255., 26/255.),\n (255/255., 245/255., 28/255.),\n (255/255., 246/255., 30/255.),\n (255/255., 247/255., 33/255.),\n (255/255., 248/255., 35/255.),\n (255/255., 248/255., 42/255.),\n (255/255., 249/255., 50/255.),\n (255/255., 250/255., 58/255.),\n (255/255., 251/255., 66/255.),\n (255/255., 252/255., 74/255.),\n (255/255., 253/255., 82/255.),\n (255/255., 254/255., 90/255.),\n (255/255., 254/255., 95/255.),\n (255/255., 255/255., 98/255.),\n (255/255., 255/255., 101/255.),\n (255/255., 255/255., 103/255.),\n (255/255., 255/255., 105/255.),\n (255/255., 255/255., 108/255.),\n (255/255., 255/255., 113/255.),\n (255/255., 255/255., 117/255.),\n (255/255., 255/255., 121/255.),\n (255/255., 255/255., 129/255.),\n (255/255., 255/255., 132/255.),\n (255/255., 255/255., 136/255.),\n (255/255., 255/255., 144/255.),\n (255/255., 255/255., 149/255.),\n (255/255., 255/255., 152/255.),\n (255/255., 255/255., 160/255.),\n (255/255., 255/255., 163/255.),\n (255/255., 255/255., 167/255.),\n (255/255., 255/255., 170/255.),\n (255/255., 255/255., 175/255.),\n (255/255., 255/255., 179/255.),\n (255/255., 255/255., 183/255.),\n (255/255., 255/255., 187/255.),\n (255/255., 255/255., 191/255.),\n (255/255., 255/255., 194/255.),\n (255/255., 255/255., 199/255.),\n (255/255., 255/255., 203/255.),\n (255/255., 255/255., 207/255.),\n (255/255., 255/255., 211/255.),\n (255/255., 255/255., 215/255.),\n (255/255., 255/255., 223/255.),\n (255/255., 255/255., 227/255.),\n (255/255., 255/255., 231/255.),\n (255/255., 255/255., 235/255.),\n (255/255., 255/255., 239/255.),\n (255/255., 255/255., 243/255.),\n (255/255., 255/255., 245/255.),\n (255/255., 255/255., 246/255.),\n (255/255., 255/255., 247/255.),\n (255/255., 255/255., 251/255.),\n (255/255., 255/255., 255/255.),\n (255/255., 255/255., 255/255.),\n (255/255., 255/255., 255/255.),\n (255/255., 255/255., 255/255.),\n (255/255., 255/255., 255/255.),\n (255/255., 255/255., 255/255.) ]\n\n seqLen = len(col_seq)\n delta = 1.0/(seqLen - 1)\n r_tuple = ((i*delta, col_seq[i][0], col_seq[i][0]) for i in range(seqLen))\n g_tuple = ((i*delta, col_seq[i][1], col_seq[i][1]) for i in range(seqLen))\n b_tuple = ((i*delta, col_seq[i][2], col_seq[i][2]) for i in range(seqLen))\n cdict = {'red': tuple(r_tuple),\n 'green': tuple(g_tuple),\n 'blue': tuple(b_tuple)}\n firecm = _mplb.colors.LinearSegmentedColormap('psffire', cdict, N)\n return firecm", "def get_temp(self) -> float:\n return np.round(np.mean(self.temp_data), 1)", "def profile(self):\n\n def _flatten(f):\n return [coeffifient for value in f.values()\\\n for coeffifient in value.coefficients()]\n\n elements = _flatten(self.domain().j) +\\\n _flatten(self.codomain().j) +\\\n _flatten(self)\n\n\n profile = enveloping_profile_elements(elements)\n\n # Avoid returning the zero profile because it triggers a corner case\n # in FP_Module_class.resolution().\n # \n # XXX: Fix FP_Module_class.resolution().\n #\n return (1,) if profile == (0,) else profile", "def s2profile(r,r0,A,B):\n x = r/r0\n res = A*4./(np.exp(x)+np.exp(-x))**2 + B\n return res", "def addNoise(array,counts):\r\n if array.dtype == 'complex' :\r\n arrayout = addNoise(np.real(array),counts) + 1.0J * addNoise(np.imag(array),counts)\r\n else :\r\n if np.float64(counts) == 0.0e0 :\r\n arrayout = np.zeros(array.shape, dtype=arrayout.dtype)\r\n elif np.float64(counts) < 0.0e0 :\r\n print 'bg.addNoise : warning counts < 0'\r\n elif np.float64(counts) > 1.0e9 :\r\n arrayout = np.zeros(array.shape)\r\n arrayout = normaliseInt(array)\r\n arrayout = np.random.normal(arrayout*np.float64(counts),np.sqrt(arrayout*np.float64(counts)))/np.float64(counts)\r\n tot = np.sum(array)\r\n arrayout = normaliseInt(arrayout,tot)\r\n else :\r\n arrayout = np.zeros(array.shape)\r\n arrayout = normaliseInt(array)\r\n arrayout = np.random.poisson(arrayout*np.float64(counts))/np.float64(counts)\r\n tot = np.sum(array)\r\n arrayout = normaliseInt(arrayout,tot)\r\n return arrayout", "def calcthresh(self, img):\n mu = np.mean(img.ravel())\n sd = np.std(img.ravel())\n return mu + 3 * sd", "def observation(self, img):\r\n img = img.transpose(1, 2, 0)\r\n return img", "def scipyTheilSen(array):\n\ttry:\n\t\tslope, intercept, _, _ = stats.mstats.theilslopes(array)\n\t\treturn slope #, intercept\n\texcept Exception as e:\n\t \tprint(e) \n\t \tipdb.set_trace()\n\t \treturn np.NAN", "def getIntensitySpectum(self,wavelengths,intensities = 1.0):\n angles,peaks,widths = self.getSpectrum(wavelengths,intensities)\n\n # Get arnge of angles to calulate over\n minField = np.min(angles) - 10*np.max(widths)\n maxField = np.max(angles) + 10*np.max(widths)\n # Sample finely enough to make peak widths visible, but least 400points\n npoints = max(int((maxField - minField)/np.min(widths)),400)\n\n # Make the two two array for the output data\n fieldAngle = np.linspace(minField,maxField,npoints)\n spectralOutput = np.zeros(fieldAngle.size)\n\n # Add each peak in turn\n for a,p,w in zip(angles,peaks,widths):\n\n for i,af in enumerate(fieldAngle):\n s = self.lineShape(a,w,af) # Add the spectrometer lineshape\n spectralOutput[i] += p*s\n\n # Return the two numpy arrays as a list\n return fieldAngle,spectralOutput", "def autothreshold(gray_im, method=\"otsu\"):\n if method == \"otsu\":\n t = otsu(gray_im)\n elif method == \"kmeans\":\n t = ave(kmeans(list(gray_im.getdata())))\n return gray_im.point(lambda x: 0 if x < t else 255) # < or <= ?", "def get_statistical_information(mat, percentile=(0, 100), denoise=False):\n if denoise is True:\n mat = ndi.gaussian_filter(mat, 2)\n gmin = np.min(mat)\n gmax = np.max(mat)\n min_percent = np.percentile(mat, percentile[0])\n max_percent = np.percentile(mat, percentile[-1])\n median = np.median(mat)\n mean = np.mean(mat)\n variance = np.var(mat)\n return gmin, gmax, min_percent, max_percent, mean, median, variance", "def get_rain_values(self):\n return float(self.data[2]) / 10, float(self.data[4]) / 10", "def weights(self):\n return np.array(self.intensity[self.idx])", "def avg_temps(self):\r\n average_temp = 0\r\n for j in range(len(self.trip)):\r\n average_temp += self.trip[j].get_temperature(j)\r\n average_temp /= len(self.trip)\r\n return average_temp", "def switchy_score(array):\n array = np.array(array)\n variance = 1 - np.std(np.sin(array[~np.isnan(array)] * np.pi))\n mean_value = -np.mean(np.cos(array[~np.isnan(array)] * np.pi))\n return variance * mean_value", "def calculate_incoherent_scattered_intensity(element, q):\n return current_calculator.get_incoherent_intensity(element, q)", "def har_mean(array):\n return ((sum([1/x for x in array]))**(-1))*len(array)", "def intensity_histogram_measures(regionmask, intensity):\n feat = Intensity_Histogram_Measures(\n [\n np.percentile(intensity[regionmask], 0),\n np.percentile(intensity[regionmask], 25),\n np.percentile(intensity[regionmask], 50),\n np.percentile(intensity[regionmask], 75),\n np.percentile(intensity[regionmask], 100),\n np.mean(intensity[regionmask]),\n stats.mode(intensity[regionmask], axis=None)[0][0],\n np.std(intensity[regionmask]),\n ]\n )\n return feat", "def magerr2Ivar(flux, magErr):\n fluxErr = flux * ((10.0 ** (magErr/2.5)) - 1.0)\n\n return 1.0 / (fluxErr ** 2.0)", "def calc_profile(self, phases):\n self._profile = self._generator(phases)\n self._Amax = self.Amax if hasattr(self, '_Amax') else np.max(self.profile)\n return self.profile / self.Amax", "def iou_stats(pred, target, num_classes=6, background=5):\n # Set redundant classes to background.\n locs = np.logical_and(target > -1, target < num_classes)\n\n # true positive + false negative\n tp_fn, _ = np.histogram(target[locs],\n bins=np.arange(num_classes+1))\n # true positive + false positive\n tp_fp, _ = np.histogram(pred[locs],\n bins=np.arange(num_classes+1))\n # true positive\n tp_locs = np.logical_and(locs, pred == target)\n tp, _ = np.histogram(target[tp_locs],\n bins=np.arange(num_classes+1))\n\n return tp_fn, tp_fp, tp", "def calc_flux_array(self):\n \n # First determine the associated spectrum\n self.compute_template_spectrum()\n\n # Calculate baseline counts to normalise fluxes we scan over\n # Go from 10**(bin_min)*mean up to 10**(bin_max)*mean in nbins steps\n b = self.setup_b_instance(0,add_ps_mask=True)\n mean = np.sum(b.CTB_masked_compressed[0])/len(b.CTB_masked_compressed[0])\n A_array = mean*10**np.linspace(self.bin_min,self.bin_max,self.nbins)\n\n # Array to get LLs when no profile likelihood run\n norun = np.array([1.0, 1.0, 1.0, 1.0])\n\n # Now setup and compute the arrays\n LL_array = np.array([]) \n A_array_short = np.array([])\n spect_array = np.array([])\n\n for i in range(len(A_array)):\n print \"on i =\",i\n # Calculate LL\n if i == 0:\n b1 = self.setup_b_instance(A_array[i],add_ps_mask=True)\n else:\n for key in b1.fixed_template_dict_nested.keys():\n b1.fixed_template_dict_nested[key] = b1.fixed_template_dict_nested[key]*A_array[i]/A_array[i-1]\n ll_val = b1.ll(norun,4,4)\n # Make triangle\n\n # Append to arrays\n LL_array = np.append(LL_array,ll_val)\n A_array_short = np.append(A_array_short,A_array[i])\n spect_array = self.spectrum*np.array(A_array_short)\n\n # Save output\n np.save(work_dir+'ScanOutput/'+self.tag+'/En_array-'+str(self.flux_array_ebin)+'.npy',self.En_center)\n np.save(work_dir+'ScanOutput/'+self.tag+'/LL_array-'+str(self.flux_array_ebin)+'.npy',LL_array)\n np.save(work_dir+'ScanOutput/'+self.tag+'/Flux_array-'+str(self.flux_array_ebin)+'.npy',spect_array)", "def get_specific_heat() -> float:\n return 1006.0", "def t_measure_estimate(self):\n ho = self.humidity_oversampling\n to = self.temperature_oversampling\n po = self.pressure_oversampling\n typ = 1. + 2.*to + (2.*po + 0.5)*bool(po) + (2.*ho +0.5)*bool(ho)\n mx = 1.25 + 2.3*to + (2.3*po + 0.575)*bool(po) + (2.3*ho +0.575)*bool(ho)\n return typ, mx", "def tsz_profile_highacc(self, nu=None):\n bb = np.linspace(0.0, self.bmax, 100) # Range of impact parameters\n rr = bb * self.r500\n ysz = map(lambda b: scipy.integrate.quad(self._ig_tsz, b+0.0001,\n self.bmaxc, args=(b,))[0], bb)\n if nu == None:\n g_nu = 1. # Factor-out spectral dependence\n else:\n g_nu = self.tsz_spectrum(nu)\n fac_ysz = (2. * 2. * 2.051 / 511.) * self.r500\n ysz = g_nu * fac_ysz * np.array(ysz)\n interp = scipy.interpolate.interp1d( rr, ysz, kind='linear', \n bounds_error=False, fill_value=0.0 )\n return interp", "def impute_cumulative_array(array):\n array = np.array(array).copy()\n array = convert_non_monotonic_to_nan(array)\n array = log_interpolate(array)\n return array", "def intensity( rgb ):\n return int( (rgb[0] + rgb[1] + rgb[2])/3 )", "def GetGrayArray(self, p_int):\n ...", "def profile(self) -> Optional[Any]:\n\n def get_profile_attribute(numpy_data, attr_name):\n if isinstance(numpy_data, dict):\n return {key: getattr(array, attr_name) for key, array in numpy_data.items()}\n else:\n return getattr(numpy_data, attr_name)\n\n profile = {\n \"features_shape\": get_profile_attribute(self._features, \"shape\"),\n \"features_size\": get_profile_attribute(self._features, \"size\"),\n \"features_nbytes\": get_profile_attribute(self._features, \"nbytes\"),\n }\n if self._targets is not None:\n profile.update(\n {\n \"targets_shape\": get_profile_attribute(self._targets, \"shape\"),\n \"targets_size\": get_profile_attribute(self._targets, \"size\"),\n \"targets_nbytes\": get_profile_attribute(self._targets, \"nbytes\"),\n }\n )\n\n return profile", "def get_incoherent_intensity(self, element: str, q):\n fs_coherent = self.get_coherent_scattering_factor(element, q)\n intensity_coherent = fs_coherent ** 2\n s = q / (4 * np.pi)\n Z = float(self.incoherent_param['Z'][element])\n M = float(self.incoherent_param['M'][element])\n K = float(self.incoherent_param['K'][element])\n L = float(self.incoherent_param['L'][element])\n intensity_incoherent = (Z - intensity_coherent / Z) * (1 - M * (np.exp(-K * s) - np.exp(-L * s)))\n return intensity_incoherent", "def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))", "def sphere_l_intensity(img):\n pixels = []\n for j in range(0, img.shape[0]):\n for i in range(1, 40):\n pixels.append(img[j, i])\n\n return np.mean(pixels)", "def geo_mean(array):\n logsum = sum([np.log(each) for each in array])/len(array)\n return np.exp(logsum)", "def getIntensity(self, pos):\n #Camera doesnt have position so im just using the position of the followed object (of 1st camera)\n camPos = glad.renderer.cameraList[0].objectFollowed.getPos()\n\n r=(pos-camPos)#separation vector\n if r.isNullVector(): #if the vector is null, sound will be max anyways\n sin = 1\n cos = 1\n else:\n #calculate angles to determine where sound is coming from\n cos = dotProduct(r.getNormalized(),Vector(-1,0))\n sin = dotProduct(r.getNormalized(), Vector(0,1))\n #Calculate intensity for left and right channels\n #when sound is directly to the side have 80 percent come from that side speaker\n #hopefully this will give some directional sounds\n k = 130000 #arbitrary constant to calculate sound intensity\n if r.isNullVector():\n intensity = k #removes division by zero error\n else:\n intensity = k/r.getMagnitude()**2\n #major is the percent of the sound intensity from the side with the greater intensity\n a=0.68 #max percent of the intensity coming from one side\n major = (a*0.5)/((0.5*cos)**2+(a*sin)**2)**0.5 #equation for an ellipse\n if r[0] <= 0:\n right = major\n left = 1-major\n else:\n left = major\n right = 1-major\n right *= intensity\n left *= intensity\n if right > 1: right = 1\n if left > 1: left = 1\n return left,right", "def _find_saturated_profiles(self) -> np.ndarray:\n n_gates, var_lim, _, _ = self.noise_params\n var = np.var(self.data['backscatter'][:, -n_gates:], axis=1)\n return var < var_lim", "def get_metrics(self) -> np.ndarray:\n dice = np.mean(self.dice_scores)\n iou = np.mean(self.iou_scores)\n sens = np.mean(self.sens_scores)\n spec = np.mean(self.spec_scores)\n accu = np.mean(self.accu_scores)\n return dice, iou, sens, spec, accu", "def normalize_profile(ph):\n return ph/np.abs(ph[1])", "def areaFraction(nb_probes, I):\n P = np.random.randint(I.shape[0], size=(nb_probes, 2))\n\n # count the number of probes in phase\n count = np.sum(I[P[:, 0], P[:, 1]])\n\n# fig=plt.figure();\n# plt.imshow(I);\n# plt.plot(P[:,0], P[:,1], '+');\n# plt.show();\n# fig.savefig('areaFrac.pdf', bbox_inches='tight');\n return float(count) / nb_probes", "def _calc_npixels_fired(self, r0, z, pwm):\n N0 = 500 * self.npix * _pwm_multiplier(pwm)\n Nraw = N0 * z / (r0**2 + z**2)**1.5\n return self.npix * (1 - np.exp(-Nraw / self.npix))", "def find_backstats(f_arr, sigma, niter):\n ave = f_arr.mean()\n std = f_arr.std()\n for i in range(niter):\n mask = (abs(f_arr - ave) < sigma * std)\n ave = f_arr[mask].mean()\n std = f_arr[mask].std()\n return ave, std", "def MeanPixArr(PixArr, binary=True):\n \n import numpy as np\n \n F, R, C = PixArr.shape\n \n # Initialise MeanPixArr:\n MeanPixArr = np.zeros((1, R, C), dtype='uint')\n \n \n result = np.mean(PixArr, axis=0)\n \n if binary:\n MeanPixArr[0] = (result >= 0.5) * result\n else:\n MeanPixArr[0] = result\n \n return MeanPixArr", "def result_array(self) -> np.ndarray:\n return np.array([r[\"time\"] for r in self.profile_result])", "def getScalarFlux(self):\n totScalarFlux = []\n for cell in self.cells:\n totScalarFlux.append(cell.getTotScalarFlux())\n totScalarFlux = np.array(totScalarFlux)\n #return totScalarFlux / np.sum(totScalarFlux) # norm flux to 1.\n return totScalarFlux", "def average_profile_in_bins(Redges, R, prof):\n if np.min(Redges) < np.min(R):\n raise Exception(\"Minimum edge must be >= minimum R\")\n if np.max(Redges) > np.max(R):\n raise Exception(\"Maximum edge must be <= maximum R\")\n ave_prof = np.zeros(len(Redges)-1)\n cluster_toolkit._lib.average_profile_in_bins(_dcast(Redges), len(Redges), _dcast(R), len(R), _dcast(prof), _dcast(ave_prof))\n return ave_prof", "def findRMpeaks(self, pix, threshold):\n\t\tsigma = np.std(self.getz(pix))\n\t\tdetections = []\n\t\tfor i, phi in enumerate(self.getz(pix)):\n \t\t \tif phi > threshold*sigma: detections.append(i)\n \t \treturn detections", "def skystats(stamp):\n\t\n\tif isinstance(stamp, galsim.Image):\n\t\ta = stamp.array\n\t\t# Normally there should be a .transpose() here, to get the orientation right.\n\t\t# But in the present case it doesn't change anything, and we can skip it.\n\telse:\n\t\ta = stamp # Then we assume that it's simply a numpy array.\n\t\n\tedgepixels = np.concatenate([\n\t\t\ta[0,1:], # left\n\t\t\ta[-1,1:], # right\n\t\t\ta[:,0], # bottom\n\t\t\ta[1:-1,-1] # top\n\t\t\t])\n\tassert len(edgepixels) == 2*(a.shape[0]-1) + 2*(a.shape[0]-1)\n\n\t# And we convert the mad into an estimate of the Gaussian std:\n\treturn {\n\t\t\"std\":np.std(edgepixels), \"mad\": 1.4826 * mad(edgepixels),\n\t\t\"mean\":np.mean(edgepixels), \"med\":np.median(edgepixels),\n\t\t\"stampsum\":np.sum(a)\n\t\t}" ]
[ "0.62673634", "0.593113", "0.5869846", "0.58618975", "0.5649231", "0.5581468", "0.5578077", "0.5490693", "0.5435854", "0.5350697", "0.5301182", "0.5300452", "0.52878165", "0.5265786", "0.5254433", "0.52346134", "0.5218291", "0.5211029", "0.5208299", "0.5205285", "0.5173795", "0.51410526", "0.51351696", "0.5130163", "0.5101332", "0.50918484", "0.50865495", "0.5083497", "0.5083497", "0.5079967", "0.5077461", "0.50650233", "0.5060322", "0.50473046", "0.5047266", "0.5039288", "0.5033676", "0.50262785", "0.50054467", "0.4974781", "0.49624556", "0.49617594", "0.49397102", "0.49305668", "0.49176168", "0.49158955", "0.49019435", "0.4897411", "0.48893467", "0.4883825", "0.48665455", "0.48657554", "0.4865579", "0.48634455", "0.4855761", "0.48501727", "0.48474345", "0.48458728", "0.4844118", "0.4835661", "0.48253068", "0.4820242", "0.48197436", "0.4815872", "0.48111862", "0.48078427", "0.48047692", "0.48041874", "0.48010805", "0.4788903", "0.47836968", "0.47653654", "0.47526765", "0.47477773", "0.47471777", "0.47461048", "0.47434095", "0.47428262", "0.47420502", "0.47388285", "0.47343117", "0.47324792", "0.4725646", "0.47255906", "0.47159833", "0.4707207", "0.47069904", "0.47069877", "0.4704628", "0.47021878", "0.47016653", "0.47015992", "0.46995717", "0.46992505", "0.46919", "0.46912", "0.46909282", "0.46896383", "0.46836764", "0.46828902", "0.46812937" ]
0.0
-1
Juggles parameters in order to be able to fit a list of parameters
def wrapper_fit_func(x, ntraps, *args): a, b, c = list(args[0][:ntraps]), list(args[0][ntraps:2 * ntraps]), list(args[0][2 * ntraps:3 * ntraps]) offset = args[0][-1] return gaussianarray1d(x, a, b, c, offset, ntraps)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"kappa_W[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_Z[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_tau[1,0.0,3.0]\")\n self.modelBuilder.doVar(\"kappa_mu[1,0.0,5.0]\") \n self.modelBuilder.factory_(\"expr::kappa_mu_expr(\\\"@0*@1+(1-@0)*@2\\\", CMS_use_kmu[0], kappa_mu, kappa_tau)\")\n self.modelBuilder.doVar(\"kappa_t[1,0.0,4.0]\")\n # additional kappa for the anomalous coupling\n self.modelBuilder.doVar(\"kappa_tilde_t[0.0,0.0,4.0]\")\n self.modelBuilder.doVar(\"kappa_b[1,0.0,3.0]\")\n if not self.resolved:\n self.modelBuilder.doVar(\"kappa_g[1,0.0,2.0]\")\n self.modelBuilder.doVar(\"kappa_gam[1,0.0,2.5]\")\n\tself.modelBuilder.doVar(\"BRinv[0,0,1]\")\n self.modelBuilder.out.var(\"BRinv\").setConstant(True)\n # adding additional kappa to list of parameters of interest\n pois = 'kappa_W,kappa_Z,kappa_tau,kappa_t,kappa_tilde_t,kappa_b'\n if not self.resolved:\n pois += ',kappa_g,kappa_gam'\n self.doMH()\n self.modelBuilder.doSet(\"POI\",pois)\n # use modified Higgs Builder\n self.SMH = AnomalousTopHiggsBuilder(self.modelBuilder)\n self.setup()", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.70,0.70]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"(1.+@0)\",Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(1.-@0)\",Afb)')", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[1,0,100]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)\", mu)')", "def parameter_tuning(D, param_grid):\n grid = ParameterGrid(param_grid)\n\n for params in grid:\n model_file = 'Theshpairs1_Ind_5' + '_emb_' + str(params['embedding_size']) + '_nr_' + str(\n params['negative_ratio']) + \\\n '_batch_' + str(params['batch_size']) + '_epochs_' \\\n + str(params['nb_epochs']) + '_classification_' + str(params['classification'])\n\n print(model_file)\n\n # Train Model\n Prio = NNEmbeddings(D, embedding_size=params['embedding_size'], negative_ratio=params['negative_ratio'],\n nb_epochs=params['nb_epochs'], batch_size=params['batch_size'],\n classification=params['classification'], save=True,\n model_file='Models/' + model_file + '.h5')\n\n # New Predicitons\n df_metrics = Prio.predict(pickle_file=None)\n plot_single(df_metrics)\n plot_metric(df_metrics, name='Plot_Metrics/' + model_file + '.png')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rbk[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd_emu[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rbk,Rdy,Rqcd_emu\")", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::vbfH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)\", mu)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Dilu_ratio[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"0.5*(1.+@0*@1)\",Afb, Dilu_ratio)')\n self.modelBuilder.factory_('expr::Rmn(\"0.5*(1.-@0*@1)\",Afb, Dilu_ratio)')", "def get_parameter_estimation_parameters(self, friendly=True):\n #Get the sensitivities task:\n fitTask=self._getTask('parameterFitting')\n fitProblem = fitTask.find(xmlns + 'Problem')\n optimizationItems = fitProblem.find(xmlns + 'ParameterGroup')\n parameters = []\n for subGroup in optimizationItems:\n name = None\n lowerBound = None\n upperBound = None\n startValue = None\n \n for item in subGroup:\n if item.attrib['name'] == 'ObjectCN':\n name = item.attrib['value']\n elif item.attrib['name'] == 'UpperBound':\n upperBound = item.attrib['value']\n elif item.attrib['name'] == 'LowerBound':\n lowerBound = item.attrib['value']\n elif item.attrib['name'] == 'StartValue':\n startValue = item.attrib['value']\n assert name !=None\n assert lowerBound != None\n assert upperBound != None\n assert startValue != None\n \n if friendly:\n #Construct a user-friendly name for the parameter name using regexs\n #Look for a match for global parameters: Vector=Values[Test parameter],\n global_string = r'.*Vector=Values\\[(?P<name>.*)\\].*'\n global_string_re = re.compile(global_string)\n global_match = re.match(global_string_re, name)\n \n if global_match:\n name = global_match.group('name')\n \n #else check for a local match.\n #Vector=Reactions[Reaction] Parameter=k1\n local_string = r'.*Vector=Reactions\\[(?P<reaction>.*)\\].*Parameter=(?P<parameter>.*),Reference=Value.*'\n local_string_re = re.compile(local_string)\n local_match = re.match(local_string_re, name)\n \n if local_match:\n reaction = local_match.group('reaction')\n parameter = local_match.group('parameter')\n name = '(%s).%s'%(reaction, parameter)\n\n parameters.append((name, lowerBound, upperBound, startValue))\n\n return parameters", "def test_joint_parameter(self):\n assert_allclose(self.jf.fitparams[0], self.g1.parameters[0])\n assert_allclose(self.jf.fitparams[0], self.g2.parameters[0])", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rdy,Rqcd\")", "def suggest_parameters(trial, config):\n\n # Get parameters from config\n parameters = config['params_' + config['model_name']]\n # Init parameters for optuna\n optuna_parameters = dict()\n for key in parameters.keys():\n if parameters[key][0] == 'int':\n optuna_parameters[key] = trial.suggest_int(key, parameters[key][1], parameters[key][2])\n elif parameters[key][0] == 'uniform':\n optuna_parameters[key] = trial.suggest_uniform(key, parameters[key][1], parameters[key][2])\n elif parameters[key][0] == 'categorical':\n optuna_parameters[key] = trial.suggest_categorical(key, parameters[key][1])\n elif parameters[key][0] == 'loguniform':\n optuna_parameters[key] = trial.suggest_loguniform(key, parameters[key][1], parameters[key][2])\n return optuna_parameters", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')", "def set_parameters(targeted_flag='true',\r\n tv_flag='false',\r\n hinge_flag='true',\r\n cos_flag='false',\r\n interpolation='bilinear',\r\n model_type='small',\r\n loss_type='center',\r\n dataset_type='vgg',\r\n attack='CW',\r\n norm='2',\r\n epsilon=0.1,\r\n iterations=100,\r\n binary_steps=8,\r\n learning_rate=0.01,\r\n epsilon_steps=0.01,\r\n init_const=0.3,\r\n mean_loss='embeddingmean',\r\n batch_size=-1,\r\n margin=5.0,\r\n amplification=2.0):\r\n params = {}\r\n\r\n params['model_type'] = model_type\r\n params['loss_type'] = loss_type\r\n params['dataset_type'] = dataset_type\r\n params['attack'] = attack\r\n params['norm'] = norm\r\n params['epsilon'] = epsilon\r\n params['iterations'] = iterations\r\n params['binary_steps'] = binary_steps\r\n params['learning_rate'] = learning_rate\r\n params['epsilon_steps'] = epsilon_steps\r\n params['init_const'] = init_const\r\n params['mean_loss'] = mean_loss\r\n params['batch_size'] = batch_size\r\n params['targeted_flag'] = string_to_bool(targeted_flag)\r\n params['tv_flag'] = string_to_bool(tv_flag)\r\n params['hinge_flag'] = string_to_bool(hinge_flag)\r\n params['cos_flag'] = string_to_bool(cos_flag)\r\n params['margin'] = margin\r\n params['amp'] = amplification\r\n\r\n if model_type == 'small' and loss_type == 'center':\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = -1.0\r\n else:\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = 0.0\r\n\r\n if (dataset_type == 'vggsmall'):\r\n params['align_dir'] = VGG_ALIGN_160_DIR\r\n params['test_dir'] = VGG_TEST_DIR\r\n elif model_type == 'large' or dataset_type == 'casia':\r\n params['align_dir'] = ALIGN_160_DIR\r\n elif model_type == 'small':\r\n params['align_dir'] = ALIGN_96_DIR\r\n else:\r\n ValueError('ValueError: Argument must be either \"small\" or \"large\".')\r\n \r\n if interpolation == 'nearest':\r\n params['interpolation'] = cv2.INTER_NEAREST\r\n elif interpolation == 'bilinear':\r\n params['interpolation'] = cv2.INTER_LINEAR\r\n elif interpolation == 'bicubic':\r\n params['interpolation'] = cv2.INTER_CUBIC\r\n elif interpolation == 'lanczos':\r\n params['interpolation'] = cv2.INTER_LANCZOS4\r\n elif interpolation == 'super':\r\n print('finish later')\r\n else:\r\n raise ValueError('ValueError: Argument must be of the following, [nearest, bilinear, bicubic, lanczos, super].')\r\n\r\n if params['hinge_flag']:\r\n params['attack_loss'] = 'hinge'\r\n else:\r\n params['attack_loss'] = 'target'\r\n if not params['targeted_flag']:\r\n params['attack_loss'] = 'target'\r\n if norm == 'inf':\r\n norm_name = 'i'\r\n else:\r\n norm_name = '2'\r\n if params['tv_flag']:\r\n tv_name = '_tv'\r\n else:\r\n tv_name = ''\r\n if params['cos_flag']:\r\n cos_name = '_cos'\r\n else:\r\n cos_name = ''\r\n\r\n params['model_name'] = '{}_{}'.format(model_type, loss_type)\r\n if dataset_type == 'casia' or dataset_type == 'vggsmall':\r\n params['model_name'] = dataset_type\r\n params['attack_name'] = '{}_l{}{}{}'.format(attack.lower(), norm_name, tv_name, cos_name)\r\n\r\n return params", "def experiment_params():\n exp = {\n 'lr': [1e-3],\n 'loss_function': ['cce'],\n 'optimizer': ['nadam'],\n 'dataset': [\n # 'curv_contour_length_9',\n 'curv_contour_length_14',\n # 'curv_baseline',\n ]\n }\n exp['data_augmentations'] = [\n [\n 'grayscale',\n 'left_right',\n 'up_down',\n 'uint8_rescale',\n 'singleton',\n 'resize',\n # 'per_image_standardization',\n 'zero_one'\n ]]\n exp['val_augmentations'] = exp['data_augmentations']\n exp['batch_size'] = 32 # Train/val batch size.\n exp['epochs'] = 16\n exp['exp_name'] = 'hgru_bn_pathfinder_14'\n exp['model_name'] = 'hgru'\n # exp['clip_gradients'] = 7.\n exp['save_weights'] = True\n exp['validation_iters'] = 1000\n exp['num_validation_evals'] = 50\n exp['shuffle_val'] = True # Shuffle val data.\n exp['shuffle_train'] = True\n return exp", "def generative_parameters(self):\n params = nn.ParameterList()\n if 'parameters' in dir(self.generative_model):\n params.extend(list(self.generative_model.parameters()))\n params.extend(list(self.latent.generative_parameters()))\n return params", "def obtain_training_parameters(para, x, y, alg = 'LR'):\n \n \n global omega\n \n # Iterate to find the optimal parameters\n if alg == 'LR': # logistic regression\n omega = np.zeros((3, 1))\n alpha = para.step_size # step size\n for i in range(para.iteration):\n grad = np.zeros((3, 1))\n for i in range(len(x[:, 0])):\n grad += np.reshape(x[i, :], (3, 1)) * (-y[i] + 1 / (1 + np.exp(-np.dot(x[i, :], omega))))\n omega -= alpha * grad \n \n elif alg == 'GNB': # Gaussian Naive Bayes\n # get counts for each class\n itszero = 0\n itsone = 0\n for i in range(len(y)):\n if y[i] == 1:\n itsone += 1\n else:\n itszero += 1\n \n # probability of see y\n theta0 = itszero / len(y)\n theta1 = 1 - theta0\n \n # mean of omega\n mew00 = 0\n mew01 = 0\n mew02 = 0\n mew10 = 0\n mew11 = 0\n mew12 = 0\n for i in range(len(y)):\n if y[i] == 0:\n mew00 += x[i, 0] / itszero\n mew01 += x[i, 1] / itszero\n mew02 += x[i, 2] / itszero\n else:\n mew10 += x[i, 0] / itsone\n mew11 += x[i, 1] / itsone\n mew12 += x[i, 2] / itsone\n \n # variance of omega \n sigma00 = 0\n sigma01 = 0\n sigma02 = 0\n sigma10 = 0\n sigma11 = 0\n sigma12 = 0\n for i in range(len(y)):\n if y[i] == 0:\n sigma00 += (x[i, 0] - mew00)**2 / itszero\n sigma01 += (x[i, 1] - mew01)**2 / itszero\n sigma02 += (x[i, 2] - mew02)**2 / itszero\n else:\n sigma10 += (x[i, 0] - mew10)**2 / itsone\n sigma11 += (x[i, 1] - mew11)**2 / itsone\n sigma12 += (x[i, 2] - mew12)**2 / itsone\n \n # store these parameters into the name \"omage\"\n omega = [theta0, theta1, mew00, mew01, mew02, mew10, mew11, mew12,\n sigma00, sigma01, sigma02, sigma10, sigma11, sigma12] \n \n else: # Gaussian Mixture\n pass\n \n return omega", "def config_params0(data,parameter):\n model = []\n #Range of value of p\n acf = sm.graphics.tsa.acf(data.diff().dropna())\n for i in range(len(acf)):\n acf[i] = abs(acf[i]*10)\n if (ceil(acf[i])) <= 2:\n p = range(ceil(acf[i])-1,ceil(acf[i])+2)\n break\n\n #range of value of q\n pacf = sm.graphics.tsa.pacf(data.diff().dropna())\n for i in range(len(pacf)):\n pacf[i] = abs(pacf[i]*10)\n if (ceil(pacf[i])) <= 2:\n q = range(ceil(pacf[i])-1,ceil(pacf[i])+2)\n break\n\n\t# define config lists\n p_params = p\n d_params = parameter['d']\n q_params = q\n m_params = parameter['m']\n #P_params = p\n #D_params = [0, 1]\n #Q_params = q\n \n pdq_m = list(itertools.product(p_params, d_params, q_params,m_params)) #Generate all different combinations of p, q and q triplets\n params = [[(x[0], x[1], x[2]),(x[0], x[1], x[2], x[3])] for x in pdq_m]\n return params", "def get_optimal_param(data_desc, ml_model_desc):\n if ml_model_desc == 'ANN': \n # return [<num_layers>, <momentum>, <learn rate>]\n if data_desc == 'young_students_ti_courses':\n return [100, 0.5, 0.001]\n elif data_desc == 'young_students_lic_courses':\n return [36, 0.9, 1.0]\n elif data_desc == 'young_students_comp_courses':\n return [36, 0.6, 0.001]\n elif data_desc == 'old_students':\n return [24, 0.5, 0.7]\n else:\n exit('can not get optimal parameters for the combination passed!')\n elif ml_model_desc == 'naive_bayes':\n if data_desc == 'young_students_ti_courses':\n return [GaussianNB()]\n elif data_desc == 'young_students_lic_courses':\n return [BernoulliNB()]\n elif data_desc == 'young_students_comp_courses':\n return [MultinomialNB()]\n elif data_desc == 'old_students':\n return [GaussianNB()]\n else:\n exit('can not get optimal parameters for the combination passed!')\n elif ml_model_desc == 'SVR': \n if data_desc == 'young_students_ti_courses':\n return ['linear', 1.0]\n elif data_desc == 'young_students_lic_courses':\n return ['linear', 1.0]\n elif data_desc == 'young_students_comp_courses':\n return ['rbf', 1.0]\n elif data_desc == 'old_students':\n return ['linear', 1.0]\n else:\n exit('can not get optimal parameters for the combination passed!')\n else: \n exit('can not get optimal parameters for the combination passed!')", "def test_michaelis_menten_fit(self):\n res = michaelis_menten_fit([22])\n self.assertFloatEqual(res,1.0,eps=.01)\n res = michaelis_menten_fit([42])\n self.assertFloatEqual(res,1.0,eps=.01)\n res = michaelis_menten_fit([34],num_repeats=3,params_guess=[13,13])\n self.assertFloatEqual(res,1.0,eps=.01)\n res = michaelis_menten_fit([70,70],num_repeats=5)\n self.assertFloatEqual(res,2.0,eps=.01)", "def update_parameters(parameters, grads, learning_rate):\n pass", "def __init__(self):\n self.param_names = []\n self.param_values = []\n self.param_settings = []\n self.result = []\n self.best_params = None\n self.best_score = None\n self.max_reps = 5\n self.num_values = False\n self.algorithm_done = False", "def update_parameters(parameters, grads, learning_rate):\n L = len(parameters) // 2\n\n for i in range(L):\n parameters[\"W\"+str(i+1)] = parameters[\"W\"+str(i+1)] - learning_rate * grads[\"dW\"+str(i+1)]\n parameters[\"b\"+str(i+1)] = parameters[\"b\"+str(i+1)] - learning_rate * grads[\"db\"+str(i+1)]\n\n return parameters", "def get_optimization_parameters(self, friendly=True):\n #Get the sensitivities task:\n sensTask=self._getTask('optimization')\n sensProblem = sensTask.find(xmlns + 'Problem')\n optimizationItems = sensProblem.find(xmlns + 'ParameterGroup')\n parameters = []\n for subGroup in optimizationItems:\n name = None\n lowerBound = None\n upperBound = None\n startValue = None\n \n for item in subGroup:\n if item.attrib['name'] == 'ObjectCN':\n name = item.attrib['value']\n elif item.attrib['name'] == 'UpperBound':\n upperBound = item.attrib['value']\n elif item.attrib['name'] == 'LowerBound':\n lowerBound = item.attrib['value']\n elif item.attrib['name'] == 'StartValue':\n startValue = item.attrib['value']\n assert name !=None\n assert lowerBound != None\n assert upperBound != None\n assert startValue != None\n \n if friendly:\n #Construct a user-friendly name for the parameter name using regexs\n #Look for a match for global parameters: Vector=Values[Test parameter],\n values_string = r'.*Vector=Values\\[(?P<name>.*)\\].*'\n values_string_re = re.compile(values_string)\n values_match = re.match(values_string_re, name)\n \n if values_match:\n name = 'Values[' + values_match.group('name') + ']'\n \n else:\n #else check for a parameter match.\n #Vector=Reactions[Reaction] Parameter=k1\n parameter_string = r'.*Vector=Reactions\\[(?P<reaction>.*)\\].*Parameter=(?P<parameter>.*),Reference=Value.*'\n parameter_string_re = re.compile(parameter_string)\n parameter_match = re.match(parameter_string_re, name)\n \n if parameter_match:\n reaction = parameter_match.group('reaction')\n parameter = parameter_match.group('parameter')\n name = '(%s).%s'%(reaction, parameter)\n \n else:\n #Try again, this time looking for a string like: Vector=Metabolites[blah]\n metabolites_string = r'.*Vector=Metabolites\\[(?P<name>.*)\\].*'\n metabolites_string_re = re.compile(metabolites_string)\n metabolites_match = re.match(metabolites_string_re, name)\n if metabolites_match:\n name = 'Metabolites[' + metabolites_match.group('name') + ']'\n\n parameters.append((name, lowerBound, upperBound, startValue))\n\n return parameters", "def trainable_params(model, feature_extract):\n params_to_update = model.parameters()\n print(\"Params to learn:\")\n if feature_extract:\n params_to_update = []\n for name, param in model.named_parameters():\n if param.requires_grad == True:\n params_to_update.append(param)\n print(\"\\t\", name)\n else:\n for name, param in model.named_parameters():\n if param.requires_grad == True:\n print(\"\\t\", name)\n return params_to_update", "def parameters(self):", "def hyperparams():\n H = 6\n return Munch(N=500, H=H, D=(H // 2) ** 2, batch_size=10, precision=to.float32)", "def get_param_scenario3():\n nb_carre_x = 42\n nb_carre_y = 45\n largeur_x = 0.825\n largeur_y = 0.645\n z1 = np.array([[11], [32], [17.26]])\n z2 = np.array([[12], [32], [18.43]])\n z3 = np.array([[13], [32], [23.2]])\n z4 = np.array([[15], [31], [23.4]])\n z5 = np.array([[16], [30], [23.29]])\n z6 = np.array([[18], [29], [22.28]])\n z7 = np.array([[19], [28], [35.79]])\n z8 = np.array([[20], [27], [36.87]])\n z9 = np.array([[21], [26], [33.92]])\n z10 = np.array([[23], [24], [38.11]])\n z11 = np.array([[24], [23], [37.76]])\n z12 = np.array([[25], [22], [45.6]])\n z13 = np.array([[26], [20], [56.4]])\n z14 = np.array([[27], [18], [55.2]])\n z15 = np.array([[28], [16], [57.53]])\n z16 = np.array([[29], [14], [58.64]])\n z17 = np.array([[30], [13], [66.04]])\n z18 = np.array([[30], [11], [70.02]])\n z19 = np.array([[31], [9], [69.64]])\n z20 = np.array([[31], [6], [68.82]])\n\n meas = [z1, z2, z3, z4, z5, z6, z7, z8, z9, z10, z11, z12, z13, z14, z15, z16, z17, z18, z19, z20]\n\n z = [np.array([[x[0][0] * largeur_x / nb_carre_x], [x[1][0] * largeur_y / nb_carre_y], [-x[2][0] * np.pi / 180]])\n for x in meas]\n\n # We take a measure every second\n thymio_speed_to_mms = 0.4753\n Ts = 1\n\n Thymio_speed_left = [73, 92, 94, 102, 100, 89, 92, 103, 97, 100, 100, 104, 95, 97, 103, 101, 100, 101, 92, 98]\n Thymio_speed_right = [67, 96, 102, 89, 105, -45, 102, 101, 104, 71, 100, 93, 94, 105, 96, -45, 95, 103, 88, 94]\n delta_sr_test = [x * Ts / thymio_speed_to_mms / 1000 for x in Thymio_speed_right]\n delta_sl_test = [x * Ts / thymio_speed_to_mms / 1000 for x in Thymio_speed_left]\n\n return delta_sr_test, delta_sl_test, z", "def def_paramt():\n Zeff = 1.0\n amu = 2.0\n mf = mp*amu\n return Zeff, amu,mf", "def fit():\n pass", "def create_parameters(X, parameters):\n\n parameters['Car_Constant'] = X[:, 0].tolist()\n parameters['Walk_Constant'] = X[:, 1].tolist()\n parameters['PT_Constant'] = X[:, 2].tolist()\n\n return parameters", "def experiment_params():\n exp = {\n 'lr': [1e-3],\n 'loss_function': ['cce'],\n 'optimizer': ['nadam'],\n 'dataset': [\n # 'curv_contour_length_9',\n 'cluttered_nist_ix1',\n # 'curv_baseline',\n ]\n }\n exp['data_augmentations'] = [\n [\n 'grayscale',\n 'center_crop',\n # 'left_right',\n # 'up_down',\n 'uint8_rescale',\n 'singleton',\n 'zero_one'\n ]]\n exp['val_augmentations'] = [\n [\n 'grayscale',\n 'center_crop',\n # 'left_right',\n # 'up_down',\n 'uint8_rescale',\n 'singleton',\n 'zero_one'\n ]]\n exp['batch_size'] = 32 # Train/val batch size.\n exp['epochs'] = 4\n exp['model_name'] = 'unet'\n exp['exp_name'] = exp['model_name'] + '_' + exp['dataset'][0]\n exp['save_weights'] = True\n exp['validation_iters'] = 1000\n exp['num_validation_evals'] = 200\n exp['shuffle_val'] = True # Shuffle val data.\n exp['shuffle_train'] = True\n return exp", "def set_parameters(api_name='',\r\n targeted_flag='true',\r\n tv_flag='false',\r\n hinge_flag='true',\r\n cos_flag='false',\r\n interpolation='bilinear',\r\n model_type='large',\r\n loss_type='triplet',\r\n dataset_type='vgg',\r\n target_model='large',\r\n target_loss='center',\r\n target_dataset='VGG',\r\n attack='CW',\r\n norm='2',\r\n epsilon=0.1,\r\n iterations=20,\r\n binary_steps=5,\r\n learning_rate=0.01,\r\n epsilon_steps=0.01,\r\n init_const=0.3,\r\n mean_loss='embeddingmean',\r\n batch_size=-1,\r\n margin=15.0,\r\n amplification=6.0,\r\n granularity='normal',\r\n whitebox_target=False,\r\n pair_flag='false'):\r\n \r\n params = {}\r\n params['model_type'] = model_type\r\n params['loss_type'] = loss_type\r\n params['dataset_type'] = dataset_type\r\n params['target_model'] = target_model\r\n params['target_loss'] = target_loss\r\n params['target_dataset'] = target_dataset\r\n params['attack'] = attack\r\n params['norm'] = norm\r\n params['epsilon'] = epsilon\r\n params['iterations'] = iterations\r\n params['binary_steps'] = binary_steps\r\n params['learning_rate'] = learning_rate\r\n params['epsilon_steps'] = epsilon_steps\r\n params['init_const'] = init_const\r\n params['mean_loss'] = mean_loss\r\n params['batch_size'] = batch_size\r\n params['test_dir'] = TEST_DIR\r\n params['full_dir'] = FULL_DIR\r\n params['whitebox_target'] = whitebox_target\r\n params['targeted_flag'] = string_to_bool(targeted_flag)\r\n params['tv_flag'] = string_to_bool(tv_flag)\r\n params['hinge_flag'] = string_to_bool(hinge_flag)\r\n params['cos_flag'] = string_to_bool(cos_flag)\r\n params['pair_flag'] = string_to_bool(pair_flag)\r\n params['api_name'] = api_name\r\n\r\n if model_type == 'small' and loss_type == 'center':\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = -1.0\r\n else:\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = 0.0\r\n\r\n if dataset_type == 'vggsmall' and not whitebox_target:\r\n params['align_dir'] = VGG_ALIGN_160_DIR\r\n params['test_dir'] = VGG_TEST_DIR\r\n elif model_type == 'large' or dataset_type == 'casia':\r\n params['align_dir'] = ALIGN_160_DIR\r\n elif model_type == 'small':\r\n params['align_dir'] = ALIGN_96_DIR\r\n else:\r\n ValueError('ValueError: Argument must be either \"small\" or \"large\".')\r\n \r\n if interpolation == 'nearest':\r\n params['interpolation'] = cv2.INTER_NEAREST\r\n elif interpolation == 'bilinear':\r\n params['interpolation'] = cv2.INTER_LINEAR\r\n elif interpolation == 'bicubic':\r\n params['interpolation'] = cv2.INTER_CUBIC\r\n elif interpolation == 'lanczos':\r\n params['interpolation'] = cv2.INTER_LANCZOS4\r\n elif interpolation == 'super':\r\n ValueError('ValueError: Super interpolation not yet implemented.')\r\n else:\r\n raise ValueError('ValueError: Argument must be of the following, [nearest, bilinear, bicubic, lanczos, super].')\r\n\r\n if granularity == 'fine':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 20.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 0.2)\r\n elif granularity == 'normal':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 10.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 0.5)\r\n elif granularity == 'coarse':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 5.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 1.0)\r\n elif granularity == 'coarser':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 3.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 0.2)\r\n elif granularity == 'coarsest':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 3.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 1.0)\r\n elif granularity == 'single':\r\n params['margin_list'] = np.array([margin])\r\n params['amp_list'] = np.array([amplification])\r\n elif granularity == 'fine-tuned':\r\n params['margin_list'] = np.arange(10.0, margin, 1.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 0.2)\r\n elif granularity == 'coarse-single':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 3.0)\r\n params['amp_list'] = np.array([1.0])\r\n elif granularity == 'api-eval':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 3.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 0.8)\r\n else:\r\n raise ValueError('ValueError: Argument must be of the following, [fine, normal, coarse, coarser, single].')\r\n\r\n if params['hinge_flag']:\r\n params['attack_loss'] = 'hinge'\r\n else:\r\n params['attack_loss'] = 'target'\r\n if not params['targeted_flag']:\r\n params['attack_loss'] = 'target'\r\n if norm == 'inf':\r\n norm_name = 'i'\r\n else:\r\n norm_name = '2'\r\n if params['tv_flag']:\r\n tv_name = '_tv'\r\n else:\r\n tv_name = ''\r\n if params['cos_flag']:\r\n cos_name = '_cos'\r\n else:\r\n cos_name = ''\r\n\r\n params['model_name'] = '{}_{}'.format(model_type, loss_type)\r\n if dataset_type == 'casia' or dataset_type == 'vggsmall':\r\n params['model_name'] = dataset_type\r\n params['target_model_name'] = '{}_{}_{}'.format(target_model, target_loss, target_dataset)\r\n params['attack_name'] = '{}_l{}{}{}'.format(attack.lower(), norm_name, tv_name, cos_name)\r\n params['directory_path'] = os.path.join(ROOT,\r\n OUT_DIR,\r\n params['attack_name'],\r\n params['model_name'],\r\n '{}_loss/full'.format(params['attack_loss']))\r\n params['directory_path_crop'] = os.path.join(ROOT,\r\n OUT_DIR,\r\n params['attack_name'],\r\n params['model_name'],\r\n '{}_loss/crop'.format(params['attack_loss']))\r\n params['directory_path_npz'] = os.path.join(ROOT,\r\n OUT_DIR,\r\n params['attack_name'],\r\n params['model_name'],\r\n '{}_loss/npz'.format(params['attack_loss']))\r\n params['api_path'] = os.path.join(ROOT,\r\n API_DIR,\r\n params['attack_name'],\r\n params['model_name'],\r\n '{}_loss/npz'.format(params['attack_loss']))\r\n if params['mean_loss'] == 'embedding':\r\n params['directory_path'] += '_mean'\r\n params['directory_path_crop'] += '_mean'\r\n params['directory_path_npz'] += '_mean'\r\n params['api_path'] += '_mean'\r\n\r\n return params", "def fit(self, inputs: list) -> 'BasePreprocessor':", "def set_parameters(pars):\n y0=[]\n fun=None \n state_evol=None\n if pars[\"state_law\"]==0:\n state_evol=state_evol_d\n elif pars[\"state_law\"]==1:\n state_evol=state_evol_r\n elif pars[\"state_law\"]==2:\n state_evol=state_evol_p\n elif pars[\"state_law\"]==3:\n state_evol=state_evol_n\n \n if pars[\"model\"]==0:\n y0 = [pars[\"Vpl\"]*0.9,0.1,pars[\"sigma1\"]]\n fun = fun_qds\n damping = pars[\"nu\"]\n \n if pars[\"model\"]==1:\n y0 = [pars[\"Vpl\"]*0.9, 0.1,pars[\"sigma1\"],pars[\"sigma1\"]*pars[\"f0\"]]\n fun = fun_fds\n damping = pars[\"m\"]\n\n if pars[\"model\"]==2:\n y0 = [pars[\"Vpl\"]*0.99,pars[\"Vpl\"], pars[\"Vpl\"],0.1,pars[\"sigma1\"],pars[\"sigma2\"]]\n fun= fun_qdc\n damping = pars[\"nu\"]\n\n if pars[\"model\"]==3:\n y0 = [pars[\"Vpl\"]*1.1,pars[\"Vpl\"], pars[\"Vpl\"],0.0,pars[\"sigma1\"],pars[\"sigma2\"],pars[\"sigma1\"]*pars[\"f0\"]]\n fun = fun_fdc\n damping = pars[\"m\"]\n\n return (np.array(y0), state_evol, fun, damping)", "def fit(self, X):", "def expand_params(params):\n cv_params = []\n param_pool = unpack_cv_parameters(params)\n\n for i in list(itertools.product(*param_pool)):\n d = copy.deepcopy(params)\n name = d['name']\n for j in i:\n dict_set_nested(d, j[0].split(\".\"), j[1])\n name += \"_\" + j[0] + \"_\" + str(j[1])\n d['name'] = name.replace('.args.', \"_\")\n d = convert_tuples_2_list(d)\n cv_params.append(d)\n if not cv_params:\n return [params] * params['num_runs']\n\n gs_params = []\n for p in cv_params:\n gs_params += [p] * p['num_runs']\n return gs_params", "def get_1x_lr_params(model):\n b = [model.xception_features]\n for i in range(len(b)):\n for k in b[i].parameters():\n if k.requires_grad:\n yield k", "def optimiseParameters(path_to_aug, species, path_to_training):\n\n\ttraining = {}\n\tcmd = \"{}/scripts/optimize_augustus.pl --species={} --metapars={}/config/species/{}/{}_metapars.cfg --aug_exec_dir={}/bin --AUGUSTUS_CONFIG_PATH={}/config {}/output/trainingSet.gb\" \\\n\t.format(path_to_aug, species, path_to_aug, species, species, \\\n\tpath_to_aug, path_to_aug, path_to_training)\n\te = subprocess.check_call(cmd, shell=True)", "def config_params1(parameter):\n\n p = parameter['p']\n q = parameter['q']\n d = parameter['d']\n m = parameter['m']\n pdq_m = list(itertools.product(p, d, q,m)) #Generate all different combinations of p, q and q triplets\n params = [[(x[0], x[1], x[2]),(x[0], x[1], x[2], x[3])] for x in pdq_m]\n return params", "def _get_fitted_params(self):\n return {}", "def model(data_x, parameters):\n return data_x @ parameters", "def initialize_parameters():\n\n W1 = tf.get_variable('W1', [3,3,3,64], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W2 = tf.get_variable('W2', [3,3,64,128], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W3 = tf.get_variable('W3', [3,3,128,256], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W4 = tf.get_variable('W4', [3,3,256,512], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W5 = tf.get_variable('W5', [3,3,512,512], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n\n ### END CODE HERE ###\n\n parameters = {\"W1\": W1,\n \"W2\": W2,\n \"W3\": W3,\n \"W4\": W4,\n \"W5\": W5\n }\n\n return parameters", "def test_param(self):\n Y, T, X, _ = ihdp_surface_B()\n est = AutomatedLinearDML(model_y=automl_model_reg(),\n model_t=GradientBoostingClassifier(),\n featurizer=None,\n discrete_treatment=True)\n est.fit(Y, T, X=X)\n _ = est.effect(X)", "def BestFit(self,initialParameterValues=None, method=None , fixedParams=None):\n\n if fixedParams:\n if not isinstance(fixedParams, list):\n fixedParams=[fixedParams]\n #Check now if the name is correct\n l_index=[]\n for index, par in enumerate(fixedParams):\n pName, pValue = par\n if pName not in self.theory.parameterNameList0:\n print \"%s is not a valid name. Ignored\" %pName\n l_index.append(index)\n if l_index:\n for i in l_index:\n fixedParams.pop(i)\n self.SetFixedParams(fixedParams)\n\n if initialParameterValues is None:\n initialParameterValues = self.theory.initialParameterValues\n #d = numpy.ones(len(initialParameterValues))\n start_time = time.time()\n if method is None or method == 'lm':\n out = minpack.leastsq(self.Residual,initialParameterValues,full_output=1, ftol=1.e-16)\n elif method == 'boldAccel':\n initialParameterValues=numpy.array(initialParameterValues)\n out = BoldAccel.leastsq(self.Residual,None,initialParameterValues,gtol=1e-8,xtol=1.49e-8,ftol=1e-16,full_output=1,ibold=0,verbose=True)\n elif method == 'bold':\n initialParameterValues = numpy.array(initialParameterValues)\n out = Bold.leastsq(self.Residual,None,initialParameterValues,gtol=1e-8,xtol=1.49e-8,ftol=1e-16,full_output=1,ibold=0,verbose=True)\n #out = minpack.leastsq(self.Residual,self.AnalyJac,initialParameterValues,gtol=1e-8,xtol=1.49e-8,ftol=1e-16,full_output=1,Cgoal=4.e04)\n elif method == 'lm_accel':\n initialParameterValues=numpy.array(initialParameterValues)\n out = numrec.leastsq(self.Residual,self.AnalyJac,initialParameterValues,full_output=1,verbose=True, flags=[],maxfev=500)\n else:\n print \"fitting method is not included\"\n end_time = time.time()\n print \"fitting took time (mins): \", (end_time-start_time)/60.\n print \"number of function_calls:\", f_counter\n \n if fixedParams:\n outputParameterValues = self.MergeFixedAndVariableParams(fixedParams,out[0])\n else:\n outputParameterValues = out[0]\n\n return outputParameterValues, out", "def build_parameters(pobj):\n ViscosityWilke.build_parameters(pobj)", "def _get_trainable_params(model):\n trainable_params = []\n for x in model.parameters():\n if x.requires_grad:\n trainable_params.append(x)\n return trainable_params", "def set_params(self, params_):\n x_start, x_end = params_[\"lim_fit\"]\n self.find_idx_of_fit_limit(x_start, x_end)\n self.is_error_bar_for_fit = params_[\"use_error_bar\"]\n self.fitting_method1 = params_[\"method1\"]\n self.fitting_method2 = params_[\"method2\"]\n self.qty_to_min = params_[\"qty_to_min\"]\n\n for i, key in enumerate(self.params):\n # self.params[key].set(value=params_[\"val\"][i], min=params_[\"min\"][i], max=params_[\"max\"][i], vary=bool(params_[\"hold\"][i]), brute_step=params_[\"brute_step\"][i])\n if self.params[key].user_data is not None:\n if \"dontGenerate\" in self.params[key].user_data:\n continue\n self.params[key].set(value=params_[key][\"value\"], min=params_[key][\"min\"], max=params_[key][\"max\"], vary=params_[key][\"vary\"], brute_step=params_[key][\"b_step\"])", "def get_1x_lr_params(model):\r\n\r\n for name, param in model.named_parameters():\r\n if 'weight' in name and param.requires_grad:\r\n # print (name, param.data)\r\n\r\n yield param", "def one_experiment():\n\n # set the name of the experiment\n now = datetime.datetime.now()\n experiment_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute)\n experiment_name = 'overfit_' + str(experiment_id)\n\n # define if you want to use preprocessed data from file\n use_prep_data = False\n if use_prep_data:\n set_params(preproc_data_id='16_5_10.16.47')\n\n # define the changing parameter and its value\n changing_param_name = 'class_weights'\n changing_param_value = [{0: 15, 1: 85}]\n # {0:15, 1:85}]#, {0:4, 1:100}, {0:3, 1:100}, {0:2, 1:100}, {0:1, 1:100}] #[{0:1, 1:1}, {0:15, 1:85}]#\n\n features_to_use = ['user', 'countries', 'session', 'format', 'token']\n # set constant parameters\n set_params(use_word_emb=1)\n set_params(epochs=40)\n set_params(features_to_use=features_to_use)\n\n # save constant parameters to a new \"experiment_..\" filgithx+P@2ub\n save_constant_parameters(experiment_name, changing_param_name)\n\n # run experiment for every parameter value\n for value in changing_param_value:\n process = psutil.Process(os.getpid())\n print(\"-----MEMORY before starting experiment ------\", int(process.memory_info().rss/(8*10**3)), \"KB\")\n\n # update the parameter value\n set_params(class_weights_1=value)\n\n # update the model_id for this new model\n now = datetime.datetime.now()\n new_model_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute) + \".\" + str(now.second)\n\n set_params(model_id=new_model_id)\n\n # evaluate the new model and save the results in the experiment file\n oneExperiment = Process(target=run_experiment, args=(experiment_name,\n new_model_id, changing_param_name, value,))\n oneExperiment.start()\n oneExperiment.join()", "def get_optimization_parameters(self):\n pass", "def tune_parameters(self, model, param_set, train, predictor_var, target_var):\n \n grid_search = GridSearchCV(estimator = model, param_grid = param_set,n_jobs=-1, cv=5)\n grid_search.fit(train[predictor_var],train[target_var])\n \n print(grid_search.best_params_, grid_search.best_score_)\n \n return grid_search.best_params_", "def BestFit(self,initialParameterValues = None, method = None, fixedParams=None):\n\n if fixedParams:\n if not isinstance(fixedParams, list):\n fixedParams=[fixedParams]\n #Check now if the name is correct\n l_index=[]\n for index, par in enumerate(fixedParams):\n pName, pValue = par\n if pName not in self.theory.parameterNameList0:\n print \"%s is not a valid name. Ignored\" %pName\n l_index.append(index)\n if l_index:\n for i in l_index:\n fixedParams.pop(i)\n\n self.theory.SetFixedParams(fixedParams)\n\n if initialParameterValues is None:\n initialParameterValues = self.theory.initialParameterValues\n #d = numpy.ones(len(initialParamaeterValues))\n start_time = time.time()\n if method is None or method == 'lm':\n out = scipy.optimize.minpack.leastsq(self.Residual,initialParameterValues,full_output=1, ftol=1.e-16)\n elif method == 'boldAccel':\n initialParameterValues=numpy.array(initialParameterValues)\n out = BoldAccel.leastsq(self.Residual,None,initialParameterValues,gtol=1e-8,xtol=1.49e-8,ftol=1e-16,full_output=1,ibold=0,verbose=True)\n elif method == 'bold':\n initialParameterValues = numpy.array(initialParameterValues)\n out = Bold.leastsq(self.Residual,None,initialParameterValues,gtol=1e-8,xtol=1.49e-8,ftol=1e-16,full_output=1,ibold=0,verbose=True)\n #out = minpack.leastsq(self.Residual,self.AnalyJac,initialParameterValues,gtol=1e-8,xtol=1.49e-8,ftol=1e-16,full_output=1,Cgoal=4.e04)\n elif method == 'lm_accel':\n initialParameterValues=numpy.array(initialParameterValues)\n out = numrec.leastsq(self.Residual,self.AnalyJac,initialParameterValues,full_output=1,verbose=True, flags=[],maxfev=500)\n else:\n print \"fitting method is not included\"\n out = None\n end_time = time.time()\n print \"fitting took (mins)\", (end_time-start_time)/60.\n print \"number of function evals:\", f_counter\n \n if fixedParams:\n outputParameterValues = self.MergeFixedAndVariableParams(fixedParams,out[0])\n self.theory.SetFixedParams()\n else:\n outputParameterValues = out[0]\n\n\n return outputParameterValues, out", "def define_parameters(self):", "def params_refactoring(_params):\n _params['wavelength'] = 1e-9 * 299792458 / _params['ms_nu']\n\n return _params", "def potential_parameters(cls):\n raise NotImplementedError()", "def __init__(self, parameters: ParametersList, algorithm: ClassVar, algorithm_data: AlgorithmData):\n super(GreedyTrain, self).__init__(parameters, algorithm, algorithm_data)", "def _get_fitted_param_names(self):\n return self._fitted_param_names", "def params():\n raise NotImplementedError", "def stimulus_params(**kwargs):\n pars = {'rate' : 1.,\n 'twin' : [0.,10.],\n 'stimulus' : 'poisson',\n 'spikes_pre': None,\n 'Nsyn' : 1,\n 'Nvar' : 2,\n # GRE parameters\n 'rate_gtr' : None,\n 'twin_gtr' : None,\n 'stimulus_gtr' : None,\n 'pre_gtr' : None\n }\n\n ## User-defined parameters\n pars = gu.varargin(pars,**kwargs)\n # Parameters must be floats\n for k,item in pars.iteritems():\n if k in ['Nsyn','Nvar']:\n pars[k] = int(item)\n elif k=='rate':\n if np.isscalar(item):\n pars[k] = float(item)\n else:\n pars[k] = np.array(item,dtype=float)\n elif k=='twin':\n if not np.isscalar(pars['rate']):\n assert len(pars['rate'])==len(item), \"twin must be of the same size of rate\"\n pars[k] = np.array(item, dtype=float)\n elif (k=='rate_gtr') and (item!=None):\n if np.isscalar(item):\n pars[k] = float(item)\n else:\n pars[k] = np.array(item,dtype=float)\n elif (k=='twin_gtr') and (item!=None):\n if (not pars['rate_gtr']) and (not np.isscalar(pars['rate_gtr'])):\n assert len(pars['rate_gtr'])==len(item), \"twin_gtr must be of the same size of rate_gtr\"\n pars[k] = np.array(item, dtype=float)\n return pars", "def set_parameters(self):\n params = {}\n if self.modelname == 'SI':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after splot\n # Ts: Time from split to present, in 2*Na generation units\n names = ['N1', 'N2', 'Ts']\n values = [1, 1, 1]\n upper_bounds = [20, 20, 10]\n lower_bounds = [0.01, 0.01, 0]\n elif self.modelname == 'IM':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Ts: Time from split to present, in 2*Na generations\n names = ['N1', 'N2', 'm21', 'm12', 'Ts']\n values = [1, 1, 1, 1, 1]\n upper_bounds = [20, 20, 20, 20, 10]\n lower_bounds = [0.01, 0.01, 0, 0, 0]\n elif self.modelname == 'AM':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Tam: Time from end of anc migration to split, in 2*Na gens\n # Ts: Time from split to present, in 2*Na generations\n names = ['N1', 'N2', 'm21', 'm12', 'Tam', 'Ts']\n values = [1, 1, 1, 1, 0.1, 1]\n upper_bounds = [20, 20, 20, 20, 2, 10]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0]\n elif self.modelname == 'SC':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Ts: Time from split to secondary contact, in 2*Na generations\n # Tsc: Time from secondary contact to presesnt, in 2*Na gens\n names = ['N1', 'N2', 'm21', 'm12', 'Ts', 'Tsc']\n values = [1, 1, 1, 1, 1, 0.1]\n upper_bounds = [20, 20, 20, 20, 10, 2]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0]\n elif self.modelname == 'IM2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Ts: Time from split to present, in 2*Na generations\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Ts', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 10, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0.05]\n elif self.modelname == 'AM2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Tam: Time from end of anc migration to split, in 2*Na gens\n # Ts: Time from split to present, in 2*Na generations\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Tam', 'Ts', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 0.1, 1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 2, 10, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0, 0.05]\n elif self.modelname == 'SC2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Ts: Time from split to secondary contact, in 2*Na generations\n # Tsc: Time from secondary contact to presesnt, in 2*Na gens\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Ts', 'Tsc', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 1, 0.1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 10, 2, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0, 0.05]\n params['Names'] = names\n params['Values'] = values\n params['Upper'] = upper_bounds\n params['Lower'] = lower_bounds\n return params", "def select_params( model, contains=[]):\n names = []\n for name, param in model.named_parameters():\n param.requires_grad = False\n if any(name.find(c) != -1 for c in contains):\n param.requires_grad = True\n names.append([name,param.numel()])\n\n total = sum(p.numel() for p in model.parameters()) \n p_str = \"\\nParameters: \\n\\nTotal: {} \\n\\nTrained: {} \\n\"\n print(p_str.format(total,sum([p for n,p in names])))\n for n,t in names: print(n,t)\n print(\"\\n\")", "def add_params(traj):\n\n # We set the BrianParameter to be the standard parameter\n traj.v_standard_parameter=Brian2Parameter\n traj.v_fast_access=True\n\n # Add parameters we need for our network\n traj.f_add_parameter('Net.C',281*pF)\n traj.f_add_parameter('Net.gL',30*nS)\n traj.f_add_parameter('Net.EL',-70.6*mV)\n traj.f_add_parameter('Net.VT',-50.4*mV)\n traj.f_add_parameter('Net.DeltaT',2*mV)\n traj.f_add_parameter('Net.tauw',40*ms)\n traj.f_add_parameter('Net.a',4*nS)\n traj.f_add_parameter('Net.b',0.08*nA)\n traj.f_add_parameter('Net.I',.8*nA)\n traj.f_add_parameter('Net.Vcut','vm > 0*mV') # practical threshold condition\n traj.f_add_parameter('Net.N',50)\n\n eqs='''\n dvm/dt=(gL*(EL-vm)+gL*DeltaT*exp((vm-VT)/DeltaT)+I-w)/C : volt\n dw/dt=(a*(vm-EL)-w)/tauw : amp\n Vr:volt\n '''\n traj.f_add_parameter('Net.eqs', eqs)\n traj.f_add_parameter('reset', 'vm=Vr;w+=b')", "def _define_SDSS_fit_params(self):\n\t\tself.a = 1.4335\n\t\tself.b = 0.3150 \n\t\tself.c = -8.8979\n\t\tself.intrinsic_scatter = 0.0578\n\t\t#self.delta_a = 0.02\n\t\t#self.delta_b = 0.01", "def learn_params(self, measurements, true_ranges):\n z_hit,z_short,z_max,z_rand,var_hit,lambda_short= self.params\n pre_params=[z_hit,z_short,z_max,z_rand,var_hit,lambda_short]\n updated_params=[-1,-1,-1,-1,-1,-1]\n while np.max(np.abs(np.array(updated_params) - np.array(pre_params))) > 1e-6:\n\n e_hit, e_short, e_max, e_rand = [], [], [], []\n for i in range(len(measurements)):\n true_range, measurement = true_ranges[i], measurements[i]\n p_hit = self.PHit(true_range, measurement,var_hit)\n p_short = self.PShort(true_range, measurement,lambda_short)\n p_max = self.PMax(measurement)\n p_rand = self.PRand(measurement)\n normalizer = 1.0 / (p_hit + p_short + p_max + p_rand)\n e_hit.append(normalizer * p_hit)\n e_short.append(normalizer * p_short)\n e_max.append(normalizer * p_max)\n e_rand.append(normalizer * p_rand)\n e_hit, e_short, e_max, e_rand = np.array(e_hit), np.array(e_short), np.array(e_max), np.array(e_rand)\n\n # perform M step\n pre_params = [z_hit, z_short, z_max, z_rand, var_hit,lambda_short]\n z_hit = sum(e_hit) / len(measurements)\n z_short = sum(e_short) / len(measurements)\n z_max = sum(e_max)/ len(measurements)\n z_rand = sum(e_rand) / len(measurements)\n var_hit = np.sqrt(1.0 / np.sum(e_hit) * np.sum(e_hit * (np.array(measurements)-np.array(true_ranges))**2)).item()\n lambda_short = (np.sum(e_short) / np.sum(e_short * np.array(measurements))).item()\n updated_params = [z_hit, z_short, z_max, z_rand, var_hit, lambda_short]\n print('origin',self.params)\n print('updated',updated_params)\n return updated_params", "def model_2_parameters(num_features, num_classes):\n parameters = {}\n parameters['num_features'] = num_features\n parameters['num_classes'] = num_classes\n \n return parameters", "def check_parameters():\r\n for par in PARAM:\r\n if isinstance(par, ExperimentFrame):\r\n EXP.change_variable(**par())\r\n else:\r\n EXP.change_variable(**par)", "def mes_fabolas_helper(X, parameters, output_file):\n\n fidelity_feature = parameters['Fidelity_Feature']\n chosen_fidelity = X[:, -1]\n\n if fidelity_feature == 'Population':\n parameters['Population_Fraction'] = chosen_fidelity.tolist()\n else:\n parameters['Iterations'] = chosen_fidelity.tolist()\n\n parameters = create_parameters(X, parameters)\n\n return parameters", "def put_trainable_parameters(net,X):\n trainable=filter(lambda p: p.requires_grad, net.parameters())\n paramlist=list(trainable)\n offset=0\n for params in paramlist:\n numel=params.numel()\n with torch.no_grad():\n params.data.copy_(X[offset:offset+numel].data.view_as(params.data))\n offset+=numel", "def parameters(self):\n return [i.parameter for i in self.joints.values()]", "def generate_parameter_list(self) -> None:\n\n # simulation parameters from model\n model_parameter_ids = np.array(self.amici_model.getParameterIds())\n write_string_array(self.f, \"/parameters/modelParameterNames\",\n model_parameter_ids)\n print(Fore.CYAN + \"Number of model parameters:\",\n len(model_parameter_ids))\n\n print(Fore.CYAN + \"Number of optimization parameters:\",\n len(self.parameter_df))\n write_string_array(self.f, \"/parameters/parameterNames\",\n self.parameter_df.index.values[\n (self.parameter_df.estimate == 1)\n & ~self.parameter_df.index.isin(\n self.amici_model.getFixedParameterIds())])\n\n self.generate_simulation_to_optimization_parameter_mapping()\n\n self.f.flush()", "def _training_params(self):\n if isinstance(\n self.lyapunov_hybrid_system.system,\n feedback_system.FeedbackSystem) and self.search_controller:\n # For a feedback system, we train both the Lyapunov network\n # parameters and the controller network parameters.\n training_params = list(\n self.lyapunov_hybrid_system.lyapunov_relu.parameters(\n )) + self.lyapunov_hybrid_system.system.controller_variables(\n ) + self.R_options.variables()\n else:\n training_params = \\\n list(self.lyapunov_hybrid_system.lyapunov_relu.parameters()) +\\\n self.R_options.variables()\n return training_params", "def get_model_parameter_names():\n params = ['mu', 'rho']\n return params", "def parameter_list(self):\n return [\n [encut, kpoint_mesh]\n for encut, kpoint_mesh in zip(\n self._job.iteration_frame.ENCUT, self._job.iteration_frame.KPOINT_MESH\n )\n ]", "def params(self):\n return [p for sublist in [o.params for o in self.obs] for p in sublist]", "def update_model_parameters(parameters, grads, learning_rate):\n L = len(parameters) /2 # number of layers in the neural network\n\n for l in range(int(L)):\n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * grads[\"dW\" + str(l + 1)]\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * grads[\"db\" + str(l + 1)]\n return parameters\n # raise NotImplementedError", "def __init__(self, population=25, initSampling='lhc', fracMutation=0.2, fracElite=0.2, fracLevy=1.0, alpha=0.5, gamma=1, n=1, scalingFactor=10.0, penalty=0.0, maxGens=20000, maxFevals=200000, convTol=1e-06, stallLimit=10000, optConvTol=0.01, **kwargs):\n ProblemParameters_multi.__init__(self, **kwargs)\n self.population = population\n self.initSampling = initSampling\n self.fracMutation = fracMutation\n assert self.fracMutation >= 0 and self.fracMutation <= 1, 'The probability of discovery must exist on (0,1]'\n self.fracElite = fracElite\n assert self.fracElite >= 0 and self.fracElite <= 1, 'The elitism fraction must exist on (0,1]'\n self.fracLevy = fracLevy\n assert self.fracLevy >= 0 and self.fracLevy <= 1, 'The probability that a Levy flight is performed must exist on (0,1]'\n self.alpha = alpha\n self.gamma = gamma\n self.n = n\n self.scalingFactor = scalingFactor\n self.penalty = penalty\n self.maxGens = maxGens\n self.maxFevals = maxFevals\n self.convTol = convTol\n self.stallLimit = stallLimit\n self.optConvTol = optConvTol", "def source_individual_params(targ_ind):\n # # Note - Table S6 lists beta1, beta2, pi1, pi2 rather than beta_T, beta_S, pi_T, pi_S.\n # # This seems to be a typo since I can reproduce their curves.\n # all_params = [[21.45e-6, 0.86, 3.68, 0.17e-7, 2.2, 10.89, 14.7, 8.21, 0.06, 4e6, 4.8e8, 1, 10, 4, 0.001],\n # [1.31e-6, 1.82, 15.53, 0.8e-7, 2.18, 2.46, 15, 8.44, 0.18, 4e6, 4.8e8, 1, 10, 4, 0.001],\n # [13.35e-6, 1.16, 11.61, 2.63e-7, 4.17, 1.67, 6.5, 7.92, 0, 4e6, 4.8e8, 1, 10, 4, 0.001],\n # [2.4e-6, 3.55, 11.53, 1.35e-7, 1.6, 1.7, 15.7, 10.99, 2.4, 4e6, 4.8e8, 1, 10, 4, 0.001],\n # [1.41e-6, 1.42, 12.47, 1.06e-7, 2.17, 1.08, 22, 8.21, 0, 4e6, 4.8e8, 1, 10, 4, 0.001],\n # [6.94e-6, 0.76, 5.89, 0.17e-7, 3.33, 10.34, 17.3, 8.79, 0.15, 4e6, 4.8e8, 1, 10, 4, 0.001],\n # [18.21e-6, 0.38, 8.74, 9.19e-7, 0.41, 0.15, 17.85, 9, 0.22, 4e6, 4.8e8, 1, 10, 4, 0.001],\n # [5.12e-6, 3.53, 4.5, 4.9e-7, 2.04, 1.64, 8.3, 6.89, 1.89, 4e6, 4.8e8, 1, 10, 4, 0.001],\n # [1.53e-6, 4.06, 9.65, 0.29e-7, 3.96, 8.15, 17.11, 9.47, 0.66, 4e6, 4.8e8, 1, 10, 4, 0.001],\n # ]\n\n # I've modified it so that I10 is instead VT0, the initial (swabbable) virions from the URT.\n all_params = [[21.45e-6, 0.86, 3.68, 0.17e-7, 2.2, 10.89, 14.7, 8.21, 0.06, 4e6, 4.8e8, 1, 10, 4, 0.001],\n [1.31e-6, 1.82, 15.53, 0.8e-7, 2.18, 2.46, 15, 8.44, 0.18, 4e6, 4.8e8, 1, 10, 4, 0.001],\n [13.35e-6, 1.16, 11.61, 2.63e-7, 4.17, 1.67, 6.5, 7.92, 0, 4e6, 4.8e8, 1, 10, 4, 0.001],\n [2.4e-6, 3.55, 11.53, 1.35e-7, 1.6, 1.7, 15.7, 10.99, 2.4, 4e6, 4.8e8, 1, 10, 4, 0.001],\n [1.41e-6, 1.42, 12.47, 1.06e-7, 2.17, 1.08, 22, 8.21, 0, 4e6, 4.8e8, 1, 10, 4, 0.001],\n [6.94e-6, 0.76, 5.89, 0.17e-7, 3.33, 10.34, 17.3, 8.79, 0.15, 4e6, 4.8e8, 1, 10, 4, 0.001],\n [18.21e-6, 0.38, 8.74, 9.19e-7, 0.41, 0.15, 17.85, 9, 0.22, 4e6, 4.8e8, 1, 10, 4, 0.001],\n [5.12e-6, 3.53, 4.5, 4.9e-7, 2.04, 1.64, 8.3, 6.89, 1.89, 4e6, 4.8e8, 1, 10, 4, 0.001],\n [1.53e-6, 4.06, 9.65, 0.29e-7, 3.96, 8.15, 17.11, 9.47, 0.66, 4e6, 4.8e8, 1, 10, 4, 0.001],\n ]\n currparam = all_params[targ_ind]\n return currparam", "def fit(train_data, train_target):\r\n for name in models.keys():\r\n est = models[name]\r\n est_params = params2[name]\r\n gscv = GridSearchCV(estimator=est, param_grid=est_params, cv=5)\r\n gscv.fit(train_data, train_target)\r\n print(\"best parameters are: {}\".format(gscv.best_estimator_))\r\n print(\"Where we selected the parameters: {}\" .format(gscv.cv_results_['params'][gscv.best_index_]))\r\n print(\"with mean cross-validated score: {}\" .format(gscv.best_score_))", "def generate_grid_search(model: KerasClassifier, model_params: dict,\n fit_params: dict) -> Tuple[List[Model], List[dict]]:\n\n models = []\n fit_parameters = []\n keys = []\n values = []\n is_for_fit = []\n\n # fill keys' and values' lists with model parameters\n for key_model, value_model in model_params.items():\n keys.append(key_model)\n values.append(value_model)\n is_for_fit.append(False)\n\n # fill keys' and values' lists with fit parameters\n for key_fit, value_fit in fit_params.items():\n keys.append(key_fit)\n values.append(value_fit)\n is_for_fit.append(True)\n\n # generate all possible combinations of parameters\n for values_list in product(*values):\n\n learner = copy.deepcopy(model)\n\n # uniquely define model structure\n model_params_values = [values_list[i] for i in range(0, len(values_list)) if is_for_fit[i] is False]\n model_params_keys = [keys[i] for i in range(0, len(keys)) if is_for_fit[i] is False]\n model_params_dict = dict(zip(model_params_keys, model_params_values))\n learner = learner.build_fn(**model_params_dict)\n\n # uniquely define fit function\n fit_params_values = [values_list[i] for i in range(0, len(values_list)) if is_for_fit[i] is True]\n fit_params_keys = [keys[i] for i in range(0, len(keys)) if is_for_fit[i] is True]\n fit_params_dict = dict(zip(fit_params_keys, fit_params_values))\n functools.partial(learner.fit, **fit_params_dict)\n\n models.append(learner)\n fit_parameters.append(fit_params_dict)\n\n return models, fit_parameters", "def gen_params(no_cultures):\n # Plate level\n kn = 0.1 # Nutrient diffusion\n ks = 0.1 # Signal diffusion\n b = 0.05 # Signal on cells effect constant\n a = 0.05 # Signal secretion constant\n # Culture level\n # Growth rate constant\n r_mean = 1.0\n r_var = 1.0\n r_params = [max(0.0, gauss(r_mean, r_var)) for i in range(no_cultures)]\n params = np.array([kn, ks, b, a] + r_params)\n return params", "def __init__(self, params: Iterable[nn.Parameter]):\n self.params = params\n self.param_states = [p.requires_grad for p in self.params]", "def parameter_grid_search(y, tx, fit_function, score_function, ff_params={}, seed=1, k_fold=5,\n verbose=False, **ff_fixed_params):\n\n combinations = product(*ff_params.values())\n results = []\n\n for i, params in enumerate(combinations):\n kwargs = {param: value for param, value in zip(ff_params.keys(), params)}\n score = cross_validation(y, tx, k_fold, fit_function, score_function, seed=seed,\n **kwargs, **ff_fixed_params)\n\n results.append({\"params\": kwargs, \"score\": score})\n\n if verbose:\n print(f\"Parameter combination {i}:\")\n print(f\"\\tParams: {kwargs}\")\n print(f\"\\tScore: {score}\")\n\n return sorted(results, key=lambda x: x[\"score\"])", "def model_1_parameters(num_features, num_classes, image_info):\n parameters = {}\n parameters['n1'] = num_features\n parameters['k1'] = int(math.floor(parameters['n1'] / 9))\n parameters['n2'] = parameters['n1'] - parameters['k1'] + 1\n if image_info['key'][:5] == \"pavia\":\n parameters['n3'] = 30\n parameters['k2'] = 3\n else:\n parameters['n3'] = 40\n parameters['k2'] = 5\n parameters['n4'] = 100\n parameters['n5'] = num_classes\n \n return parameters", "def step6_set_gan_params(params):\n global GAN_PARAMS\n GAN_PARAMS = {**GAN_PARAMS, **params}", "def fit(\n self, parameters: NDArrays, config: Dict[str, Scalar]\n ) -> Tuple[NDArrays, int, Dict[str, Scalar]]:\n _ = (self, parameters, config)\n return [], 0, {}", "def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()", "def _define_SLACS_fit_params(self):\n\t\t# Fit params from R_eff\n\t\tself.a = -0.41\n\t\tself.b = 0.39\n\t\t#self.delta_a = 0.12\n\t\t#self.delta_b = 0.10\n\t\tself.intrinsic_scatter = 0.14\n\t\t# Fit params from vel_disp\n\t\tself.a_v = 0.07\n\t\tself.b_v = -0.12\n\t\tself.int_v = 0.17", "def __init__(self, params, fitness, population_size=20, generations=20, temperature_factor=0.9):\n self.params = params\n self.fitness = fitness\n self.population_size = population_size\n self.generations = generations\n self.temperature_factor = temperature_factor\n\n self.population = []", "def update_parameters_with_gd(parameters, grads, learning_rate):\n\n L = len(parameters) // 2 # number of layers in the neural networks\n\n # Update rule for each parameter\n for l in range(L):\n ### START CODE HERE ### (approx. 2 lines)\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)]-learning_rate* grads[\"dW\" + str(l+1)]\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)]-learning_rate* grads[\"db\" + str(l+1)]\n ### END CODE HERE ###\n \n return parameters", "def hyper_parameter_tuning(X, y, classifier, models, sntypes_map, feature_names, fig_dir='.', remove_models=(), name=''):\n\n # Hyperparameter grid\n n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]\n max_features = ['auto', 'sqrt']\n max_depth = [int(x) for x in np.linspace(10, 110, num=11)]\n max_depth.append(None)\n min_samples_split = [2, 5, 10]\n min_samples_leaf = [1, 2, 4]\n bootstrap = [True, False]\n random_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'bootstrap': bootstrap}\n\n # Get data\n num_features = X.shape[1]\n model_names = [sntypes_map[model] for model in models]\n X, y, models, remove_models = remove_redundant_classes(X, y, models, remove_models)\n\n # Get best features\n n = 50\n num_features, feature_names, X = get_n_best_features(n, X, y, classifier, feature_names, num_features, fig_dir, name, models, model_names)\n\n # Randomised Search\n clf_random = RandomizedSearchCV(estimator=classifier, param_distributions=random_grid, n_iter=7, cv=3, verbose=2,\n random_state=42, n_jobs=2)\n clf_random.fit(X, y)\n print(clf_random.best_params_)\n\n def evaluate(model, test_features, test_labels):\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n print('Model Performance')\n print('Average Error: {:0.4f} degrees.'.format(np.mean(errors)))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n\n return accuracy\n\n best_random = clf_random.best_estimator_\n # random_accuracy = evaluate(best_random, test_features, test_labels)", "def setup_parameters(self):\n structure = self.ctx.structure_initial_primitive\n ecutwfc = []\n ecutrho = []\n\n for kind in structure.get_kind_names():\n try:\n dual = self.ctx.protocol['pseudo_data'][kind]['dual']\n cutoff = self.ctx.protocol['pseudo_data'][kind]['cutoff']\n cutrho = dual * cutoff\n ecutwfc.append(cutoff)\n ecutrho.append(cutrho)\n except KeyError as exception:\n self.abort_nowait('failed to retrieve the cutoff or dual factor for {}'.format(kind))\n\n natoms = len(structure.sites)\n conv_thr = self.ctx.protocol['convergence_threshold'] * natoms\n\n self.ctx.inputs['parameters'] = {\n 'CONTROL': {\n 'restart_mode': 'from_scratch',\n 'tstress': self.ctx.protocol['tstress'],\n },\n 'SYSTEM': {\n 'ecutwfc': max(ecutwfc),\n 'ecutrho': max(ecutrho),\n 'smearing': self.ctx.protocol['smearing'],\n 'degauss': self.ctx.protocol['degauss'],\n 'occupations': self.ctx.protocol['occupations'],\n },\n 'ELECTRONS': {\n 'conv_thr': conv_thr,\n }\n }", "def model_3_parameters(num_features, num_classes, image_info):\n parameters = {}\n parameters['num_features'] = num_features\n parameters['num_classes'] = num_classes\n parameters['n_estimators'] = image_info['n_estimators']\n min_child_samples = image_info['min_child_samples']\n parameters['min_child_samples'] = min_child_samples\n \n # Parameters message\n with open(OUTPUT_FILE, 'a') as f:\n f.write(\"min_child_samples: {}\\n\\n\".format(min_child_samples))\n \n return parameters" ]
[ "0.6610045", "0.65775573", "0.6563652", "0.6521646", "0.646521", "0.6455579", "0.63229144", "0.63210595", "0.63088965", "0.629318", "0.6284352", "0.6191725", "0.61914194", "0.60860085", "0.6085613", "0.60815185", "0.60616034", "0.6052334", "0.6052334", "0.6052334", "0.6023257", "0.5992335", "0.5957211", "0.5929541", "0.5894867", "0.5884702", "0.58454865", "0.584223", "0.58362436", "0.58165574", "0.58023673", "0.580047", "0.5797893", "0.5795321", "0.5779361", "0.5770755", "0.57674474", "0.5755759", "0.5735632", "0.573522", "0.57270527", "0.5722954", "0.57111424", "0.5705349", "0.56825477", "0.56725425", "0.5666446", "0.56642413", "0.5663388", "0.56615645", "0.56569636", "0.56552285", "0.5651863", "0.56473035", "0.5634126", "0.5633792", "0.5631065", "0.562488", "0.56127614", "0.5604299", "0.560395", "0.5601653", "0.5596862", "0.55955386", "0.5593043", "0.5585504", "0.5578342", "0.5577254", "0.55726296", "0.5556531", "0.5553852", "0.5552027", "0.55493003", "0.5548012", "0.55374944", "0.55313116", "0.55280876", "0.551912", "0.5514505", "0.5512423", "0.5507359", "0.5505671", "0.55036616", "0.5489682", "0.5484106", "0.547881", "0.54785514", "0.5468003", "0.54571664", "0.54556334", "0.54517466", "0.5448485", "0.54473436", "0.54464704", "0.54456705", "0.5445578", "0.54374987", "0.5433507", "0.54256684", "0.5421985", "0.5414501" ]
0.0
-1