query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Returns True if there is a soldier in the army on position (x, y)
def isSoldier(army, x, y): return getDirectionByPosition(x, y, army) is not None
[ "def is_solved(self):\n return (khun := self.sorted_pieces()[0]).x() == self.goal[0] and khun.y() == self.goal[1]", "def is_solved(self):\n carName = 'X'\n squares = [(i, j) for i in range(6) for j in range(6) if self.board[i][j] == carName]\n edge = squares[1]\n return edge[1] == 5", "def is_at(self, location):\r\n dx = self.location[0] - location[0]\r\n dy = self.location[1] - location[1]\r\n return dx * dx + dy * dy <= Node.radius_squared", "def is_solved(self):\r\n for x, j in enumerate(self.board):\r\n for y, k in enumerate(j):\r\n if k == -1:\r\n return False\r\n return True", "def canMove(mapObj, army, enemy):\n for soldier in army:\n if soldierCanMove(mapObj, soldier, army + enemy):\n return True\n\n return False", "def isSolved(self):\n\tif not self.isValid():\n\t\treturn False\n\tfor i in range(9):\n\t\tfor j in range(9):\n\t\t\tif self.grid[i][j] == 0:\n\t\t\t\treturn False\n return True", "def __is_in(self, figure):\n try:\n figure.transform_to(self.ground.frame)\n figure.to_cartesian_coords()\n self.ground.to_cartesian_coords()\n x = figure.x - self.ground.x\n y = figure.y - self.ground.y\n z = figure.z - self.ground.z\n return ((x / self.ground.space.a) ** 2 + (y / self.ground.space.b) ** 2 +\n (z / self.ground.space.c) ** 2) <= 1\n except AttributeError:\n raise LocationError(\"The operation 'is in' needs a figure with coordinates \"\n \"and a ground with a spanned space.\")", "def InShip(ships, x, y):\n coord = (x, y)\n for ship in ships:\n if coord in ship: \n return True\n return False", "def isSolveable(self):\n\t\t# if gold is in a pit, then not solvable\n\t\tfor y in range(0, self.size):\n\t\t\tfor x in range(0, self.size):\n\t\t\t\t(pit, wumpus, gold) = (False, False, False)\n\t\t\t\tif (x,y) in self.map:\n\t\t\t\t\t(pit, wumpus, gold) = self.map[(x,y)]\n\t\t\t\tif (pit and gold):\n\t\t\t\t\treturn False\n\n\t\treturn True", "def isLineAt(self, x, y, dx, dy):\n # Your code here\n nextX = x + (3 * dx)\n nextY = y + (3 * dy)\n if nextX >= self.w or nextY >= self.h:\n return False\n else:\n if self.board[y][x] == self.board[y + dy][x + dx] == self.board[y+(2*dy)][x+(2*dx)] == self.board[y+(3*dy)][x+(3*dx)]:\n return True\n else:\n return False", "def tile_exists(self, x, y):\n return (x, y) in self.world", "def is_castle(position_2d):\n return position_2d[0] == 3 and position_2d[1] == 3", "def position_check(pos_object):\n def check_coord(exp_x, exp_y, pos):\n return (exp_x == pos.get_x() and exp_y == pos.get_y())\n\n cur_x = pos_object.get_x()\n cur_y = pos_object.get_y()\n\n pos_object.move(checkersgame.NW)\n print check_coord(cur_x + 1, cur_y + 1, pos_object) #false\n pos_object.move(checkersgame.SE)\n print check_coord(cur_x, cur_y, pos_object) #true\n pos_object.move(checkersgame.NE)\n print check_coord(cur_x - 1, cur_y + 1, pos_object) #false\n pos_object.move(checkersgame.SW)\n print check_coord(cur_x, cur_y, pos_object) #true", "def check_surrounded(self, x, y, dx, dy):\n xa, ya = x + dx, y + dy\n firstval = None\n if self.within_board(xa, ya):\n firstval = self.board[ya][xa].piece\n\n xb, yb = x - dx, y - dy\n secondval = None\n if self.within_board(xb, yb):\n secondval = self.board[yb][xb].piece\n\n # If both adjacent squares have enemies then this piece is surrounded!\n piece = self.allies()\n enemies = self.enemies(piece)\n return (firstval in enemies and secondval in enemies)", "def __is_at(self, figure):\n try:\n figure.transform_to(self.ground.frame)\n if self.ground.repr == \"cartesian\":\n return figure.x == self.ground.x and figure.y == self.ground.y and figure.z == self.ground.z\n return figure.lon == self.ground.lon and figure.lat == self.ground.lat\n except AttributeError:\n raise LocationError(\"The operation 'is at' needs a figure and a ground with coordinates\")", "def isSafe(self):\r\n for spots in self.safe_places:\r\n if self.pos == spots:\r\n return True", "def isSolved(self):\n # Make use of the methods to get rows/cols in Square\n for x in (self.rows + self.cols):\n if Square(x).isSolved():\n continue\n else:\n return False\n return True", "def getDirectionByPosition(x, y, army):\n for soldier in army:\n if (x, y) == soldier.getPosition():\n return soldier.direction\n return None", "def inside_farm(tractor):\r\n status = False\r\n x, y = main_canvas.coords(tractor)\r\n global v\r\n v = 0\r\n while v < len(existing_farms):\r\n if x > existing_farms[v][0] and x < existing_farms[v][1] and y > existing_farms[v][2] and y < existing_farms[v][3]:\r\n return True\r\n v += 1\r\n return status" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if the (x, y) position on the map is a wall, otherwise return False.
def isWall(mapObj, x, y): if x < 0 or x >= len(mapObj) or y < 0 or y >= len(mapObj[x]): return False # x and y aren't actually on the map. elif mapObj[x][y] in ('#', 'x'): return True # wall is blocking return False
[ "def is_wall(self, x, y):\n\t\treturn self.get_bool(x, y, 'wall')", "def is_wall(self, x, y):\n return self.get_tile(x, y) == Tile.wall", "def check_wall(self, pos):\n\t\tif(str(pos) in self.wall_map and self.wall_map[str(pos)]):\n\t\t\treturn True\n\t\treturn False", "def is_wall(self, row, col):\n \n return self.maze[row][col] == WALL", "def is_wall(grille, x, y):\n\n # les coordonnées de MG doivent impérativement être de type entier :\n x = int(x)\n y = int(y)\n\n if grille[y][x] != 'm':\n return False\n return True", "def hit_a_wall(self) -> bool:\n\n snake = self.snake\n if snake.moving_direction == \"left\" and snake.head.x <= self.boundaries[\"left\"]:\n return True\n if snake.moving_direction == \"right\" and snake.head.x >= self.boundaries[\"right\"]:\n return True\n if snake.moving_direction == \"up\" and snake.head.y <= self.boundaries[\"up\"]:\n return True\n if snake.moving_direction == \"down\" and snake.head.y >= self.boundaries[\"down\"]:\n return True\n\n return False", "def wall_in_walls(self, wall):\n for w in self.walls:\n if wall == w:\n return True\n return False", "def no_wall(self, coord1, coord2):\n d = self.blocked_coords\n if coord1.tuple in d:\n return coord2.tuple not in d[coord1.tuple]\n return True", "def found_wall(self, wall_threshold=0.30):\n return utils.ds_sensor_to_m(self.bot_distance) > utils.ds_sensor_to_m(self.top_distance) - wall_threshold", "def __check_neighbours(self):\n horizontal = False\n vertical = False\n x, y = self._coord.get_coord_tuple()\n if (x, y) in Ghost.neighbours_map.keys():\n return Ghost.neighbours_map.get((x, y))\n else:\n keys = self.__coord_dict.keys()\n if (x - 1, y) not in keys or (x + 1, y) not in keys or not self.__coord_dict.get(\n (x - 1, y)).is_wall() or not self.__coord_dict.get((x + 1, y)).is_wall():\n horizontal = True\n if (x, y + 1) not in keys or (x, y - 1) not in keys or not self.__coord_dict.get(\n (x, y - 1)).is_wall() or not self.__coord_dict.get((x, y + 1)).is_wall():\n vertical = True\n Ghost.neighbours_map[(x, y)] = horizontal and vertical\n return Ghost.neighbours_map.get((x, y))", "def has_wall(self, direction):\n if not isinstance(direction, int): \n raise ValueError('direction must be an integer, not {:s}'.format(type(direction)))\n\n if direction >3 or direction < 0:\n raise ValueError('direction must be 0, 1, 2 or 3, not {:d}'.format(direction))\n\n return self.walls[direction];", "def wall_collide(self, entity, pos):\n # If the tile has a wall to collide with\n if self.boundary or (self.solid and not entity.incorporeal):\n # And the entity collides with the square that is the tile\n if self._wall_geom_collide(entity, pos):\n # Then the entity collides with the wall\n return True\n return False", "def __is_pos_valid(self, x, y, map):\n cell_radius = int((self.ROBOT_RADIUS + 0.1)/map.info.resolution)\n y_index = int((y-map.info.origin.position.y)/map.info.resolution)\n x_index = int((x-map.info.origin.position.x)/map.info.resolution)\n\n for i in range(x_index-cell_radius, x_index+cell_radius, 1):\n for j in range(y_index-cell_radius, y_index+cell_radius, 1):\n index = j * map.info.width + i\n if index >= len(map.data):\n return False\n try:\n val = map.data[index]\n except IndexError:\n print(\"IndexError: index: %d, map_length: %d\"%(index, len(map.data)))\n return False\n if val != 0:\n return False\n return True", "def is_land(self, x, y):\n return self.land.contains(sgeom.Point(x, y))", "def check_walls(self, aim):\n k = 100\n points = []\n dy = (aim.y - self.y) / k\n dx = (aim.x - self.x) / k\n for i in range(k):\n points.append([self.x + math.ceil(i * dx), self.y + math.ceil(i * dy)])\n for i in self.cells:\n for j in i:\n for k in points:\n if (k[0] - j[0] > 0) and (k[0] - j[0] < self.cell_size) and (k[1] - j[1] > 0) and (\n k[1] - j[1] < self.cell_size):\n if j[2] == -1:\n return 1\n return 0", "def is_boundary_cell(self,x,y):\r\n\r\n next_to_game_cell = False\r\n\r\n if x+1 < self.cols:\r\n if self.sumo_grid[x + 1][y] != -9:\r\n next_to_game_cell = True\r\n if x-1 >= 0:\r\n if self.sumo_grid[x - 1][y] != -9:\r\n next_to_game_cell = True\r\n if y + 1 < self.rows:\r\n if self.sumo_grid[x][y + 1] != -9:\r\n next_to_game_cell = True\r\n if y - 1 >= 0:\r\n if self.sumo_grid[x][y - 1] != -9:\r\n next_to_game_cell = True\r\n return next_to_game_cell", "def _is_valid_doorway(pos: Position) -> bool:\n return 1 <= pos.col <= Position.SCREEN_W - 1 and 1 <= pos.row <= Position.SCREEN_H - 1", "def run_on_wall(cls, width, height):\n return True", "def create_wall(window, grid, mouse, wall, empty, start, end):\n\n x = int(mouse.x / (window.getWidth() / grid.get_length()))\n y = int(mouse.y / (window.getHeight() / grid.get_height()))\n if (x != start[0] or y != start[1]) and (x != end[0] or y != end[1]):\n if grid.value_at(x, y) == wall:\n print(\"Wall destroyed at (\" + str(x) + \", \" + str(y), end=\").\")\n grid.modify_tile(x, y, empty)\n grid.tile_at(x, y).update_colour()\n grid.tile_at(x, y).draw(window)\n else:\n print(\"Wall placed at (\" + str(x) + \", \" + str(y), end=\"). \")\n grid.modify_tile(x, y, wall)\n grid.tile_at(x, y).update_colour()\n grid.tile_at(x, y).draw(window)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Changes any values matching oldCharacter on the map object to newCharacter at the (x, y) position, and does the same for the positions to the left, right, down, and up of (x, y), recursively.
def floodFill(mapObj, position, oldCharacter, newCharacter): # In this game, the flood fill algorithm creates the inside/outside # floor distinction. This is a "recursive" function. # For more info on the Flood Fill algorithm, see: # http://en.wikipedia.org/wiki/Flood_fill x, y = position if mapObj[x][y] == oldCharacter: mapObj[x][y] = newCharacter if x < len(mapObj) - 1 and mapObj[x + 1][y] == oldCharacter: floodFill(mapObj, (x + 1, y), oldCharacter, newCharacter) # call right if x > 0 and mapObj[x - 1][y] == oldCharacter: floodFill(mapObj, (x - 1, y), oldCharacter, newCharacter) # call left if y < len(mapObj[x]) - 1 and mapObj[x][y + 1] == oldCharacter: floodFill(mapObj, (x, y + 1), oldCharacter, newCharacter) # call down if y > 0 and mapObj[x][y - 1] == oldCharacter: floodFill(mapObj, (x, y - 1), oldCharacter, newCharacter) # call up
[ "def change_map(self, pos: int, char: str) -> None:\r\n map_list = list(self.map)\r\n map_list[pos] = char\r\n self.map = \"\".join(map_list)", "def updateBoard(board, row, col, character):\n pass", "def characterMap(mapMethod=\"string\", mapping=\"string\", unmapNode=\"string\", mapNode=\"string\", unmapAttr=\"string\", proposedMapping=bool, mapAttr=\"string\"):\n pass", "def move_character(x, y, character, gridboxes):\n\tnew_box = gridboxes[x][y]\n\tcharacter.move(new_box)", "def update_map(self, x, y):\n self._map[self.hero.x_coord][self.hero.y_coord] = '.'\n self._map[x][y] = 'H'\n self.hero.set_coordinates(x, y)", "def move_character(self, character, loc, moves_left):\n #FIXME: check for already existing characters\n del self.map[character.loc.x][character.loc.y]['character']\n self.free_locations[character.loc] = True\n character.set_location(loc)\n self.map[loc.x][loc.y]['character'] = character\n del self.free_locations[character.loc]\n self.invalidate_paths()\n if self.is_character_hero(character):\n gate = self.has_gate(loc)\n if gate and not moves_left:\n self.transition = self.gates[gate]", "def set_character(self, new_character):\n self.character = new_character", "def swap_character(self, new_character, delete_old_character=False):\r\n return self.dbobj.swap_character(new_character, delete_old_character=delete_old_character)", "def change_tile(self, next_tile):\n old_position = next_tile[0][0], next_tile[0][1]\n self.worldmap.remove(old_position)\n new_position = tuple([next_tile[0][0], \".\"])\n self.worldmap.add(new_position)", "def getCharacterMapping(self):", "def test_move(self):\n\n map1 = \"\"\"\\\n OOOOOO\n OODOJO\n OOJJOO\n OOOOOO\"\"\"\n rd.seed(5)\n m = Ma.Map(map1)\n m.populate_map((1, 2), [Fa.Carnivore(\n age=10, weight=50) for _ in range(100)])\n m.populate_map((1, 2), [Fa.Herbivore(\n age=15, weight=30) for _ in range(10)])\n\n new_cell = m.migrate_to((1, 2))\n m.move()\n\n assert m.island[1, 2].total_pop + m.island[\n new_cell].total_pop == 110\n assert m.island[1, 2].total_pop == 62\n assert m.island[new_cell].total_pop == 48", "def change_position(self, cell: Cell, direction):\n \n new_x = 0\n new_y = 0\n\n if direction == 'N':\n new_x = cell.x - 1\n new_y = cell.y\n elif direction == 'E':\n new_x = cell.x \n new_y = cell.y + 1\n elif direction == 'W':\n new_x = cell.x\n new_y = cell.y - 1\n elif direction == 'S':\n new_x = cell.x + 1\n new_y = cell.y\n\n if (new_x < 0 or new_x > 7 or new_y < 0 or new_y > 7):\n raise Exception(\"Knight Drowns\")\n \n return self.board[new_x][new_y]", "def replace_with_char(board, char):\n for row in board:\n for index in range(0, len(row)):\n value = row[index]\n if value != ' Q ':\n row[index] = ' X '\n return board", "def move(self, key):\n \n #Checks if input is left and changes sprite positions\n if key == 'left':\n #Checks level limit\n if self.tile_x > 0:\n #Checks for wall\n if self.level.frame[self.tile_y][self.tile_x - 1] != 'X':\n #check for inventory tiles\n if self.level.frame[self.tile_y][self.tile_x - 1] != 'I':\n #Sets new position on grid \n self.tile_x -= 1\n #Sets new position on screen\n self.x = self.tile_x * TILESIZE\n\n #Checks if input is right and changes sprite positions\n elif key == 'right':\n #Checks level limits\n if self.tile_x < (XTILES - 1):\n #Checks for wall\n if self.level.frame[self.tile_y][self.tile_x + 1] != 'X':\n #Checks for inventory tiles\n if self.level.frame[self.tile_y][self.tile_x + 1] != 'I':\n #set new position on grid\n self.tile_x += 1\n #Sets new position on screen\n self.x = self.tile_x * TILESIZE\n \n #Checks if input is up and changes sprite positions\n elif key == 'up':\n #Checks level limits\n if self.tile_y > 0:\n #Checks for wall\n if self.level.frame[self.tile_y-1][self.tile_x] != 'X':\n #Checks for inventory tiles\n if self.level.frame[self.tile_y-1][self.tile_x] != 'I':\n #Sets new position on grid \n self.tile_y -= 1\n #Sets new position on screen\n self.y = self.tile_y * TILESIZE\n\n #Checks if input is down and changes sprite positions\n elif key == 'down':\n #check level limits\n if self.tile_y < (YTILES - 1):\n #check for wall\n if self.level.frame[self.tile_y+1][self.tile_x] != 'X':\n #check for inventory tiles\n if self.level.frame[self.tile_y+1][self.tile_x] != 'I':\n #Sets new position on grid\n self.tile_y += 1\n #Sets new position on screen\n self.y = self.tile_y * TILESIZE", "def recreate_map(self):\n self.create_map()\n for item in self.saved_positions.items():\n print(item[1][-1])\n self.update_position(item[1][-1])\n self.draw_historic_path(device_id=item[1][-1]['device_id'],last=20)\n m.draw_map()", "def change_grid(x: int, y: int, new: str):\r\n grid[y][x] = new", "def __update_character_translation_dict(self, word, found_word):\n for char1, char2 in zip(word, found_word):\n if char1 not in self.__character_translation_dict:\n self.__character_translation_dict[char1] = char2", "def update(self, row, col, change):\n cell = self.grid[row][col]\n\n if isinstance(cell, dict):\n if len(cell.keys()) == 0:\n cell = change\n else:\n (cell).update(change)\n\n self.grid[row][col] = cell", "def __update_direction(self, (previous_x, previous_y)):\n\n # No need to do any checks if next direction is the same\n if self.next_direction != self.current_direction:\n current_cell = self.game.level.cells[self.grid_position_x][self.grid_position_y]\n current_edge = current_cell.get_edge(self.next_direction)\n\n if current_edge.type == level_cell.CellEdgeType.passage:\n if self.current_direction == direction.Direction.right:\n # Set new direction for next frame if character has moved into/past center or is at the center of cell\n if ((self.center_x >= current_cell.center_x and current_cell.center_x > previous_x) or\n (self.center_x == current_cell.center_x and previous_x == current_cell.center_x)):\n # Ensure player is in center before changing direction\n self.center_x = current_cell.center_x\n self.__change_direction()\n\n elif self.current_direction == direction.Direction.left:\n if ((self.center_x <= current_cell.center_x and current_cell.center_x < previous_x) or\n (self.center_x == current_cell.center_x and previous_x == current_cell.center_x)):\n self.center_x = current_cell.center_x\n self.__change_direction()\n\n elif self.current_direction == direction.Direction.up:\n if ((self.center_y >= current_cell.center_y and current_cell.center_y > previous_y) or\n (self.center_y == current_cell.center_y and previous_y == current_cell.center_y)):\n self.center_y = current_cell.center_y\n self.__change_direction()\n\n elif self.current_direction == direction.Direction.down:\n if ((self.center_y <= current_cell.center_y and current_cell.center_y < previous_y) or\n (self.center_y == current_cell.center_y and previous_y == current_cell.center_y)):\n self.center_y = current_cell.center_y\n self.__change_direction()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the direction of soldier on (x, y) Return None if no soldier in the army is on (x, y)
def getDirectionByPosition(x, y, army): for soldier in army: if (x, y) == soldier.getPosition(): return soldier.direction return None
[ "def direction(x1, y1, x2, y2):\n\tif x1 == x2 and y2 > y1:\n\t\treturn NORTH\n\telif x1 == x2 and y2 < y1:\n\t\treturn SOUTH\n\telif y1 == y2 and x2> x1:\n\t\treturn EAST\n\telif y1 == y2 and x2 < x1:\n\t\treturn WEST\n\telse:\t\n\t\treturn None", "def isSoldier(army, x, y):\n return getDirectionByPosition(x, y, army) is not None", "def get_direction(x1, y1, x2, y2):\n\n dx = x2 - x1\n dy = y2 - y1\n if dx > 0:\n return 'right'\n elif dx < 0:\n return 'left'\n elif dy > 0:\n return 'up'\n elif dy < 0:\n return 'down'\n else:\n return None", "def get_adjacent(x, y, direction):\n\n if direction == LEFT and x > 0:\n return x - 1, y\n elif direction == RIGHT and x < 6:\n return x + 1, y\n elif direction == DOWN and y > 0:\n return x, y - 1\n elif direction == UP and y < 4:\n return x, y + 1\n else:\n return None, None", "def _compute_direction(self):\n # Get the first helix axis and a point on that axis from the staple bases. \n # If there is no staple then use the scaffold.\n helix1 = self.from_helix\n if len(helix1.staple_bases) != 0:\n helix1_base = helix1.staple_bases[0]\n elif len(helix1.scaffold_bases) != 0:\n helix1_base = helix1.scaffold_bases[0]\n pt1 = helix1_base.coordinates\n axis1 = [helix1.end_frames[0,2,0], helix1.end_frames[1,2,0], helix1.end_frames[2,2,0]]\n\n # Get the second (adjacent) helix axis and a point on that axis.\n helix2 = self.to_helix\n if len(helix2.staple_bases) != 0:\n helix2_base = helix2.staple_bases[0]\n elif len(helix2.scaffold_bases) != 0:\n helix2_base = helix2.scaffold_bases[0]\n pt2 = helix2_base.coordinates\n axis2 = [helix2.end_frames[0,2,0], helix2.end_frames[1,2,0], helix2.end_frames[2,2,0]]\n axis2_length = np.linalg.norm(axis2)\n\n # Compute the unit vector in the direction of the adjacent helix.\n vec = pt1 - pt2\n d = np.dot(axis2,vec) / axis2_length\n a2pt = pt2 + np.dot(axis2,d)\n self.direction = a2pt - pt1\n self.direction = self.direction / np.linalg.norm(self.direction)", "def get_direction_player1(self):\n x = 0\n y = 0\n\n if arcade.key.A in self._keys:\n x = -1\n elif arcade.key.D in self._keys:\n x = 1\n\n if arcade.key.W in self._keys:\n y = 1\n elif arcade.key.S in self._keys:\n y = -1\n\n velocity = Point(x, y)\n return velocity", "def attack_direction(sprite): \n\n\tif sprite.direction == 0:\n\t\treturn vec(-1,0)\n\telif sprite.direction == 1:\n\t\treturn vec(-1,-1)\n\telif sprite.direction == 2:\n\t\treturn vec(0,-1)\n\telif sprite.direction == 3:\n\t\treturn vec(1,-1)\n\telif sprite.direction == 4:\n\t\treturn vec(1,0)\n\telif sprite.direction == 5:\n\t\treturn vec(1,1)\n\telif sprite.direction == 6:\n\t\treturn vec(0,1)\n\telif sprite.direction == 7:\n\t\treturn vec(-1,1)", "def direction_of_move(move):\n if move not in direction_of_move.direction_map:\n return None\n return direction_of_move.direction_map[move]", "def closest_dirt(self):\r\n position = self.bot_pos\r\n dirts = self.get_dirts(position[0],position[1])\r\n if dirts:\r\n i, j = min(dirts,\r\n key=lambda dirt_pos:((position[0]-dirt_pos[0])**2+(position[1]-dirt_pos[1])**2)**0.5\r\n )\r\n return (i,j)", "def which_direction(from_location_id,to_location_id):\n [from_x, from_y] = convert_to_rowcol(from_location_id)\n [to_x, to_y] = convert_to_rowcol(to_location_id)\n if to_y < from_y:\n return 'W'\n if to_y > from_y:\n return 'E'\n if to_x < from_x:\n return 'N'\n if to_x > from_x:\n return 'S'", "def dist_and_dir_to_closest_monster(self):\n # Find all the monsters in the world\n location, count = self.locate_monsters()\n smallest_dist = self.world.height() * self.world.width()\n direction = -1\n for monster_loc in location:\n dist_to_mon = self.layer_dist(self.x, self.y, monster_loc[0], monster_loc[1])\n if dist_to_mon < smallest_dist:\n smallest_dist = dist_to_mon\n direction = self.dir_between_cells(self.x, self.y, monster_loc[0], monster_loc[1])\n if smallest_dist > 4:\n smallest_dist = 4 # If the monster is too far away, consider the distance as the character's max vision\n return smallest_dist, direction", "def _get_target_position(self):\n\n if self.chasing:\n player_position = self.game.player.grid_position\n distance_from_player = Vector(player_position).distance(self.grid_position)\n if distance_from_player > self.flee_distance:\n # Target position is player if the player is more than 4 tiles away\n target_position = player_position\n return target_position\n\n # Returns bottom left in scatter mode or if within flee_distance from player\n target_position = -1, -1\n return target_position", "def pathfindTo(self,x,y,Game):\n self.goalx = x\n self.goaly = y\n dist = math.sqrt((y-self.y)**2 + (x-self.x)**2)\n time = dist / self.speed\n if(time != 0):\n self.xvel = (x - self.x) / time\n self.yvel = (y - self.y) / time\n else:\n self.xvel = 0\n self.yvel = 0", "def get_coordinates(self, direction):\n\t\tif direction == \"n\":\n\t\t\treturn (1, 1)\n\t\tif direction == \"s\":\n\t\t\treturn (3, 1)\n\t\tif direction == \"w\":\n\t\t\treturn (2, 0)\n\t\tif direction == \"e\":\n\t\t\treturn (2, 2)\n\t\tif direction == \"c\":\n\t\t\treturn (2, 1)\n\t\tif direction == \"nw\":\n\t\t\treturn (1, 0)\n\t\tif direction == \"ne\":\n\t\t\treturn (1, 2)\n\t\tif direction == \"sw\":\n\t\t\treturn (3, 0)\n\t\tif direction == \"se\":\n\t\t\treturn (3, 2)\n\n\t\t# otherwise return Nones\n\t\treturn (None, None)", "def next_pos(self, direction):\n\t\tif direction == \"up\":\n\t\t\treturn (self.x, self.y - 1)\n\t\telif direction == \"down\":\n\t\t\treturn (self.x, self.y + 1)\n\t\telif direction == \"right\":\n\t\t\treturn (self.x + 1, self.y)\n\t\telif direction == \"left\":\n\t\t\treturn (self.x - 1, self.y)\n\t\treturn None", "def _get_target_position(self):\n\n if self.chasing:\n player_position = self.game.player.grid_position\n player_direction_vector = self.game.player.current_direction.value\n # Could have used Pink's target position, but calculating here reduces confusion\n two_cells_ahead_of_player = Vector(player_position) + (2 * player_direction_vector)\n red_beetle_position = self.game.red_enemy.grid_position\n # Double the vector between 2 cells ahead of the player and the red beetle's position\n target_position = 2 * Vector(two_cells_ahead_of_player) - Vector(red_beetle_position)\n return target_position\n\n else:\n # Bottom right in scatter mode\n target_position = (self.game.level.columns + 1, -1)\n return target_position", "def _optimalDestination(self):\n destX,destY = self.path.pop(0)\n destX=destX%self.worldSize[0]\n destY=destY%self.worldSize[1]\n\n return specialMath.findClosest(self.realCenter, (destX, destY), self.worldSize)", "def search(i, dir_x, dir_y):\n if dir_x == '+':\n new_x = x + i\n elif dir_x == '-':\n new_x = x - i\n \n if dir_y == '+':\n new_y = y + i\n elif dir_y == '-':\n new_y = y - i\n return new_x, new_y", "def get_closest_aim_dir(self, aim):\n all_directions_distances = []\n for x in range(-1, 2):\n for y in range(-1, 2):\n a = (self.rect.centerx\n - (aim.rect.centerx + x*(WIDTH+aim.rect.width)))\n b = (self.rect.centery\n - (aim.rect.centery + y*(HEIGHT+aim.rect.height)))\n\n dist = np.sqrt(a**2 + b**2)\n all_directions_distances.append(dist)\n\n best = all_directions_distances.index(min(all_directions_distances))\n\n if best < 3: x = -1\n elif best > 5: x = 1\n else: x = 0\n if (best+1)%3 == 0: y = 1\n elif best in [1, 3, 6]: y = 0\n else: y = -1\n a = aim.rect.centerx + x*(WIDTH + aim.rect.width)\n b = aim.rect.centery + y*(HEIGHT + aim.rect.height)\n aim = GObject(blanc, a, b)\n return self.get_angle_to(aim)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a datashape instance into Aterm annotation >>> ds = dshape('2, 2, int32') >>> anno = dshape_anno(ds) dshape("2, 2, int32") >>> type(anno)
def annotate_dshape(ds): assert isinstance(ds, DataShape) return AAppl(ATerm('dshape'), [AString(str(ds))])
[ "def annotate(args):\n from .annotation.annotation import annotate as anno\n anno(args)", "def test_create_from_gds_type(self):\n _b = emdb_sff.biological_annotationType(\n name=self.name,\n description=self.description,\n number_of_instances=self.no,\n external_references=self._external_references\n )\n b = adapter.SFFBiologicalAnnotation.from_gds_type(_b)\n self.assertRegex(\n _str(b),\n r\"\"\"SFFBiologicalAnnotation\\(\"\"\" \\\n r\"\"\"name=\"{}\", description=\"{}\", \"\"\" \\\n r\"\"\"number_of_instances={}, \"\"\" \\\n r\"\"\"external_references=SFFExternalReferenceList\\(\\[.*\\]\\)\\)\"\"\".format(\n self.name,\n self.description,\n self.no\n )\n )\n self.assertEqual(b.name, self.name)\n self.assertEqual(b.description, self.description)\n self.assertEqual(b.number_of_instances, self.no)\n self.assertEqual(b.external_references, self.external_references)", "def _create_annotations(self, args: parser_extensions.Namespace):\n annotations = flags.Get(args, 'annotations')\n return self._dict_to_annotations_message(annotations)", "def _mkannotation(\n pa: typ.Dict[str, typ.Any],\n page: Page\n) -> typ.Optional[Annotation]:\n\n subtype = pa.get('Subtype')\n annot_type = None\n assert isinstance(subtype, PSLiteral)\n try:\n annot_type = ANNOT_SUBTYPES[subtype]\n except KeyError:\n pass\n\n if annot_type is None:\n if subtype not in IGNORED_ANNOT_SUBTYPES:\n logger.warning(\"Unsupported %s annotation ignored on %s\", subtype.name, page)\n return None\n\n contents = pa.get('Contents')\n if contents is not None:\n # decode as string, normalise line endings, replace special characters\n contents = cleanup_text(pdfminer.utils.decode_text(contents))\n\n rgb: typ.Optional[RGB] = None\n color = pa.get('C')\n if color is not None:\n if (isinstance(color, list)\n and len(color) == 3\n and all(isinstance(e, (int, float)) and 0 <= e <= 1 for e in color)):\n rgb = RGB(*color)\n else:\n logger.warning(\"Invalid color %s in annotation on %s\", color, page)\n\n # Rect defines the location of the annotation on the page\n rect = pdftypes.resolve1(pa.get('Rect'))\n\n # QuadPoints are defined only for \"markup\" annotations (Highlight, Underline, StrikeOut,\n # Squiggly), where they specify the quadrilaterals (boxes) covered by the annotation.\n quadpoints = pdftypes.resolve1(pa.get('QuadPoints'))\n\n author = pdftypes.resolve1(pa.get('T'))\n if author is not None:\n author = pdfminer.utils.decode_text(author)\n\n created = None\n dobj = pa.get('CreationDate')\n # some pdf apps set modification date, but not creation date\n dobj = dobj or pa.get('ModDate')\n # poppler-based apps (e.g. Okular) use 'M' for some reason\n dobj = dobj or pa.get('M')\n createds = pdftypes.resolve1(dobj)\n if createds is not None:\n createds = pdfminer.utils.decode_text(createds)\n created = decode_datetime(createds)\n\n return Annotation(page, annot_type, quadpoints, rect,\n contents, author=author, created=created, color=rgb)", "def get_annotation(label):\n assert get_xsi_type(label) == 'saltCore:SAnnotation'\n return (label.attrib['name'], label.attrib['valueString'])", "def create_annotations(self) -> None:\n pass", "def create_annotation_for_dataset_by_id(self, datasetid: str, annotation_post: AnnotationPOST, query_params: Dict[str, object] = None) -> Annotation:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"datasetid\": datasetid,\n }\n\n path = Template(\"/catalog/v2alpha2/datasets/${datasetid}/annotations\").substitute(path_params)\n url = self.base_client.build_url(path)\n data = annotation_post.to_dict()\n response = self.base_client.post(url, json=data, params=query_params)\n return handle_response(response, Annotation)", "def test__convert_to_annotation_cls_synapse_style():\n status = SubmissionStatus(id=\"5\", etag=\"12\")\n annots = {\n \"id\": \"6\",\n \"etag\": \"123\",\n \"annotations\": {\"foo\": {\"type\": \"STRING\", \"value\": [\"doo\"]}},\n }\n annotation_cls = annotations._convert_to_annotation_cls(status, annots)\n assert isinstance(annotation_cls, annotations.Annotations)\n assert annotation_cls == {\"foo\": [\"doo\"]}\n assert annotation_cls.id == \"6\"\n assert annotation_cls.etag == \"123\"", "def annotate(component, annotation, indicator):\n import abjad\n assert isinstance(annotation, str), repr(annotation)\n wrapper = abjad.Wrapper(\n annotation=annotation,\n component=component,\n indicator=indicator,\n )\n wrapper._bind_to_component(component)", "def _mkannotation(\n pa: typing.Any,\n page: Page\n) -> typing.Optional[Annotation]:\n\n subtype = pa.get('Subtype')\n try:\n annot_type = ANNOT_SUBTYPES[subtype.name]\n except (TypeError, KeyError):\n # subtype is missing (None), or is an unknown/unsupported type\n return None\n\n contents = pa.get('Contents')\n if contents is not None:\n # decode as string, normalise line endings, replace special characters\n contents = cleanup_text(pdfminer.utils.decode_text(contents))\n\n coords = pdftypes.resolve1(pa.get('QuadPoints'))\n rect = pdftypes.resolve1(pa.get('Rect'))\n\n author = pdftypes.resolve1(pa.get('T'))\n if author is not None:\n author = pdfminer.utils.decode_text(author)\n\n created = None\n dobj = pa.get('CreationDate')\n # some pdf apps set modification date, but not creation date\n dobj = dobj or pa.get('ModDate')\n # poppler-based apps (e.g. Okular) use 'M' for some reason\n dobj = dobj or pa.get('M')\n createds = pdftypes.resolve1(dobj)\n if createds is not None:\n createds = pdfminer.utils.decode_text(createds)\n created = decode_datetime(createds)\n\n return Annotation(page, annot_type, coords, rect,\n contents, author=author, created=created)", "def test_create_annotations(self):\n segmentation = adapter.SFFSegmentation() # annotation\n segmentation.name = u\"name\"\n segmentation.software_list = adapter.SFFSoftwareList()\n segmentation.software_list.append(\n adapter.SFFSoftware(\n name=u\"Software\",\n version=u\"1.0.9\",\n processing_details=u\"Processing details\"\n )\n )\n segmentation.details = u\"Details\"\n # global external references\n segmentation.global_external_references = adapter.SFFGlobalExternalReferenceList()\n segmentation.global_external_references.append(\n adapter.SFFExternalReference(\n resource=u'one',\n url=u'two',\n accession=u'three'\n )\n )\n segmentation.global_external_references.append(\n adapter.SFFExternalReference(\n resource=u'four',\n url=u'five',\n accession=u'six'\n )\n )\n segmentation.segments = adapter.SFFSegmentList()\n segment = adapter.SFFSegment()\n biol_ann = adapter.SFFBiologicalAnnotation()\n biol_ann.name = u\"Segment1\"\n biol_ann.description = u\"Some description\"\n # external refs\n biol_ann.external_references = adapter.SFFExternalReferenceList()\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"sldjflj\",\n accession=u\"doieaik\"\n )\n )\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"sljd;f\",\n accession=u\"20ijalf\"\n )\n )\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"lsdjlsd\",\n url=u\"lsjfd;sd\",\n accession=u\"23ijlsdjf\"\n )\n )\n biol_ann.number_of_instances = 30\n segment.biological_annotation = biol_ann\n # colour\n segment.colour = adapter.SFFRGBA(\n red=1,\n green=0,\n blue=1,\n alpha=0\n )\n segmentation.segments.append(segment)\n # export\n # segmentation.export(os.path.join(TEST_DATA_PATH, u'sff', u'v0.7', u'test_annotated_segmentation.sff'))\n # assertions\n self.assertEqual(segmentation.name, u'name')\n self.assertEqual(segmentation.version, segmentation._local.schema_version) # automatically set\n software = segmentation.software_list[0]\n self.assertEqual(software.name, u\"Software\")\n self.assertEqual(software.version, u\"1.0.9\")\n self.assertEqual(software.processing_details, u\"Processing details\")\n self.assertEqual(segmentation.details, u\"Details\")\n # global external references\n self.assertEqual(segmentation.global_external_references[0].resource, u'one')\n self.assertEqual(segmentation.global_external_references[0].url, u'two')\n self.assertEqual(segmentation.global_external_references[0].accession, u'three')\n self.assertEqual(segmentation.global_external_references[1].resource, u'four')\n self.assertEqual(segmentation.global_external_references[1].url, u'five')\n self.assertEqual(segmentation.global_external_references[1].accession, u'six')\n # segment: biological_annotation\n self.assertEqual(segment.biological_annotation.name, u\"Segment1\")\n self.assertEqual(segment.biological_annotation.description, u\"Some description\")\n self.assertEqual(len(segment.biological_annotation.external_references), 3)\n self.assertEqual(segment.biological_annotation.external_references[0].resource, u\"sldjflj\")\n self.assertEqual(segment.biological_annotation.external_references[0].accession, u\"doieaik\")\n self.assertEqual(segment.biological_annotation.external_references[1].resource, u\"sljd;f\")\n self.assertEqual(segment.biological_annotation.external_references[1].accession, u\"20ijalf\")\n self.assertEqual(segment.biological_annotation.external_references[2].resource, u\"lsdjlsd\")\n self.assertEqual(segment.biological_annotation.external_references[2].url, u\"lsjfd;sd\")\n self.assertEqual(segment.biological_annotation.external_references[2].accession, u\"23ijlsdjf\")\n self.assertEqual(segment.biological_annotation.number_of_instances, 30)\n # colour\n self.assertEqual(segment.colour.value, (1, 0, 1, 0))", "def simple_attr_annotation(nm, type_ref):\n assert type_ref.type_ in python_primitives\n return '@sprop.%s #%s' % (nm, type_ref.type_)", "def extract_annotation(self, sequence):\n annotation = self._extractor.predict(sequence)\n return annotation.flatten()", "def _build_annotation(arguments: Dict):\n if arguments[\"annotation_format\"] == \"pascal\":\n\n # write a PASCAL VOC file for this image\n # using all bounding boxes in the image's group\n _write_bboxes_as_pascal(\n arguments[\"bboxes\"],\n arguments[\"class_label\"],\n arguments[\"image_id\"],\n arguments[\"images_dir\"],\n arguments[\"annotations_dir\"],\n # arguments[\"include_segmentation_masks\"],\n )\n\n elif arguments[\"annotation_format\"] == \"darknet\":\n\n # write a Darknet annotation file for this image\n # using all bounding boxes in the image's group\n _write_bboxes_as_darknet(\n arguments[\"bboxes\"],\n arguments[\"class_index\"],\n arguments[\"image_id\"],\n arguments[\"images_dir\"],\n arguments[\"annotations_dir\"],\n )\n # elif arguments[\"annotation_format\"] == \"kitti\":\n # # TODO\n # pass\n else:\n raise ValueError(\n f\"Unsupported annotation format: \\\"{arguments['annotation_format']}\\\"\",\n )", "def create_annotation_for_dashboardby_id(self, dashboardid: str, annotation_post: AnnotationPOST, query_params: Dict[str, object] = None) -> Annotation:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"dashboardid\": dashboardid,\n }\n\n path = Template(\"/catalog/v2alpha2/dashboards/${dashboardid}/annotations\").substitute(path_params)\n url = self.base_client.build_url(path)\n data = annotation_post.to_dict()\n response = self.base_client.post(url, json=data, params=query_params)\n return handle_response(response, Annotation)", "def to_anno_dict(self):\n d = {\n \"id\": self.id,\n \"image_id\": self.image_id,\n \"category_id\": self.category_id,\n }\n\n if self.bbox is not None:\n d[\"bbox\"] = self.bbox\n\n if self.keypoints is not None:\n d[\"keypoints\"] = self.keypoints\n d[\"num_keypoints\"] = len(self.keypoints) // 3\n\n if self.segmentation is not None:\n d[\"segmentation\"] = self.segmentation\n\n if self.score is not None:\n d[\"score\"] = self.score\n\n if self.area is not None:\n d[\"area\"] = self.area\n\n if self.iscrowd is not None:\n d[\"iscrowd\"] = self.iscrowd\n\n if self.attributes:\n d.update(self.attributes)\n\n return d", "def rough_type(anno):\n if anno.type == 'Segment' or stac.is_edu(anno):\n return 'EDU'\n elif stac.is_relation_instance(anno):\n return 'relation'\n else:\n return anno.type", "def annotate_sparse_output_shape(\n tensor: tf.SparseTensor, shape: Union[Sequence[int], tf.Tensor]):\n if not isinstance(shape, tf.Tensor):\n if (tensor.shape.rank > 1 and tensor.shape.rank - 1 != len(shape)) or (\n tensor.shape.rank == 1 and len(shape) != 1):\n raise ValueError(\n f'Annotated shape {shape} was expected to have rank'\n f' {tensor.shape.rank - 1}')\n if not all(a is None or a <= b for a, b in zip(tensor.shape[1:], shape)):\n raise ValueError(\n f'Shape {shape} cannot contain annotated tensor {tensor}')\n shape = tf.convert_to_tensor(shape, dtype=tf.int64)\n elif shape.shape.rank > 1 or (\n shape.shape.rank == 1 and shape.shape[0] != tensor.shape.rank - 1):\n raise ValueError(\n f'Annotation shape has rank {shape.shape.rank} but expected to have'\n f' rank {tensor.shape.rank - 1}')\n if shape.shape.rank < 1:\n shape = tf.expand_dims(shape, -1)\n # There's currently no way to override SparseTensor.dense_shape directly,\n # unless composing and returning a new SparseTensor.\n tensor._dense_shape = tf.concat( # pylint: disable=protected-access\n [tf.expand_dims(tensor.dense_shape[0], -1), tf.cast(shape, tf.int64)],\n axis=0)\n schema_inference.annotate_sparse_output_shape(tensor, shape)", "def load_annotation( anno_file ):\n\t\n\tanno = {}\n\t\n\twith open( anno_file, \"r\" ) as f:\n\t\tline = f.readline()\n\t\twhile line:\n\t\t\tparts = line.strip().split('\\t')\n\t\t\tanno.update( { parts[0]: parts[1] } )\n\t\t\tline = f.readline()\n\t\n\treturn anno" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the NCBImetaAnnotate application in concatenate mode for run completion
def test_annotate_concatenate_run(): # Use the test database test_db = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test.sqlite") test_annotfile = os.path.join( os.path.dirname(os.path.abspath(__file__)), "test_annot.txt" ) # If the test_db doesn't alread exist, run the test cmd from test_ncbimeta if not os.path.exists(test_db): test_ncbimeta.test_ncbimeta_run() test_table = "BioSample" test_cmd = ( "ncbimeta/NCBImetaAnnotate --database " + test_db + " --table " + test_table + " --annotfile " + test_annotfile + " --concatenate" ) # test NCBImetaAnnotate through a subprocess returned_value = subprocess.call(test_cmd, shell=True) # If it returns a non-zero value, it failed assert returned_value == 0
[ "def test_annotate_replace_run():\n # Use the test database\n test_db = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"test.sqlite\")\n test_annotfile = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"test_annot.txt\"\n )\n # If the test_db doesn't already exist, run the test cmd from test_ncbimeta\n if not os.path.exists(test_db):\n test_ncbimeta.test_ncbimeta_run()\n test_table = \"BioSample\"\n test_cmd = (\n \"ncbimeta/NCBImetaAnnotate --database \"\n + test_db\n + \" --table \"\n + test_table\n + \" --annotfile \"\n + test_annotfile\n )\n # test NCBImetaAnnotate through a subprocess\n returned_value = subprocess.call(test_cmd, shell=True)\n # If it returns a non-zero value, it failed\n assert returned_value == 0", "def cli(raw_args: Optional[list[str]] = None) -> None:\n if not raw_args:\n raw_args = sys.argv[1:]\n\n parser = configure_argument_parser()\n args = parser.parse_args(raw_args)\n VerbosityConfiguration.set(args)\n CLIAnnotationContext.register(args)\n\n context = get_genomic_context()\n pipeline = CLIAnnotationContext.get_pipeline(context)\n grr = CLIAnnotationContext.get_genomic_resources_repository(context)\n\n if args.output:\n output = args.output\n else:\n output = os.path.basename(args.input).split(\".\")[0] + \"_annotated.vcf\"\n\n if not os.path.exists(args.work_dir):\n os.mkdir(args.work_dir)\n\n\n task_graph = TaskGraph()\n\n task_graph.input_files.append(args.input)\n task_graph.input_files.append(args.pipeline)\n if args.reannotate:\n task_graph.input_files.append(args.reannotate)\n\n if not tabix_index_filename(args.input):\n # annotate(args.input, None, pipeline.get_info(),\n # grr.definition, output, args.reannotate)\n assert grr is not None\n task_graph.create_task(\n \"all_variants_annotate\",\n annotate,\n [args.input, None, pipeline.get_info(),\n grr.definition, output, args.reannotate],\n []\n )\n else:\n with closing(TabixFile(args.input)) as pysam_file:\n regions = produce_regions(pysam_file, args.region_size)\n file_paths = produce_partfile_paths(args.input, regions, args.work_dir)\n region_tasks = []\n for index, (region, file_path) in enumerate(zip(regions, file_paths)):\n assert grr is not None\n region_tasks.append(task_graph.create_task(\n f\"part-{index}\",\n annotate,\n [args.input, region,\n pipeline.get_info(), grr.definition,\n file_path, args.reannotate],\n []\n ))\n\n assert grr is not None\n task_graph.create_task(\n \"combine\",\n combine,\n [args.input, pipeline.get_info(),\n grr.definition, file_paths, output],\n region_tasks\n )\n\n args.task_status_dir = os.path.join(args.work_dir, \".tasks-status\")\n args.log_dir = os.path.join(args.work_dir, \".tasks-log\")\n\n TaskGraphCli.process_graph(task_graph, **vars(args))", "def _buildkite_annotate(content, style=\"success\", context=None):\n if context is None:\n context = \"ctx-%s\" % (style,)\n agent = local[\"buildkite-agent\"]\n _ = (\n agent[\"annotate\", content, \"--style\", style, \"--context\", context, \"--append\"]\n & FG\n )", "def take_action(self, parsed_args):\n args = sys.argv[1:]\n self.log.info('Annotation Development')\n self.log.debug('debugging [Annotation]')\n\n url = parsed_args.url\n doc = parsed_args.doc\n self.log.info('Arguments: '+ str(args) + '\\n')\n\n if url:\n req_ob = requests.get(str(url).strip())\n soup = BeautifulSoup(req_ob.content, \"html.parser\")\n\n \n try: \n abstract = soup.find_all(\"p\", {\"id\" : \"p-2\"})[0]\n abs_text = trimlines(abstract.text).encode('ascii','ignore')\n data = {'content' : str(abs_text)}\n\n response = requests.get(server_url + '/annotations/entities', params = data)\n\n if response.status_code == 200:\n annotated_data = response.json()\n self.app.stdout.write(str(annotated_data))\n hpo_terms = []\n\n if parsed_args.output:\n fopen = open(str(parsed_args.output) + '_annotated_data.txt', 'w')\n fopen.write(str(annotated_data) + '\\n')\n\n fopen.close()\n\n for ob in annotated_data:\n token = ob['token']\n if 'Phenotype' in token['categories']:\n term = str(token['terms'][0])\n if term not in hpo_terms:\n hpo_terms.append(token['terms'][0])\n\n self.app.stdout.write('\\n HPO Terms:\\n')\n for term in hpo_terms:\n self.app.stdout.write(str(term) + '\\n')\n\n if parsed_args.output:\n fopen = open(str(parsed_args.output) + '_hpo_terms.txt', 'w' )\n fopen.write('HPO Terms:\\n')\n for term in hpo_terms:\n fopen.write(str(term) + '\\n')\n\n fopen.close()\n else:\n self.app.stdout.write(str(response.status_code))\n except:\n self.app.stdout.write(\"Abstract Not found\\n\")\n \n if doc:\n html_doc = open(str(doc), 'r')\n soup = BeautifulSoup(html_doc, 'html.parser')\n\n try:\n self.app.stdout.write('Title:' + str(soup.title.get_text()) + '\\n')\n except:\n pass\n\n try:\n meta_list = soup.find_all('meta', {'name' : 'dc.Description'})\n content_list= [k.get('content') for k in meta_list]\n content = ' '.join(content_list)\n data = {'content' : str(content)}\n \n response = requests.get(server_url + '/annotations/entities', params = data)\n\n if response.status_code == 200:\n annotated_data = response.json()\n self.app.stdout.write(str(annotated_data))\n hpo_terms = []\n\n if parsed_args.output:\n fopen = open(str(parsed_args.output) + '_annotated_data.txt', 'w')\n fopen.write(str(annotated_data) + '\\n')\n\n fopen.close()\n\n for ob in annotated_data:\n token = ob['token']\n if 'Phenotype' in token['categories']:\n term = str(token['terms'][0])\n if term not in hpo_terms:\n hpo_terms.append(token['terms'][0])\n\n self.app.stdout.write('\\n HPO Terms:\\n')\n for term in hpo_terms:\n self.app.stdout.write(str(term) + '\\n')\n\n if parsed_args.output:\n fopen = open(str(parsed_args.output) + '_hpo_terms.txt', 'w' )\n fopen.write('HPO Terms:\\n')\n for term in hpo_terms:\n fopen.write(str(term) + '\\n')\n\n fopen.close()\n else:\n self.app.stdout.write(str(response.status_code)+ '\\n')\n\n except:\n self.app.stdout.write('Meta Data not Found\\n')", "def main(argv):\n global ENCODING\n # define command line arguments\n argparser = argparse.ArgumentParser(description = \"\"\"Script for measuring corpus agreement\non RST.\"\"\")\n # optional arguments\n argparser.add_argument(\"--anno-sfx\", help = \"extension of annotation files\", type = str,\n default = \"\")\n argparser.add_argument(\"-d\", \"--output-difference\", help = \"\"\"output difference\"\"\",\n action = \"store_true\")\n argparser.add_argument(\"--file-format\", help = \"format of annotation file\", type = str,\n default = XML_FMT)\n argparser.add_argument(\"--segment-strict\", help = \"\"\"use strict metric\nfor evaluating segment agreement\"\"\", action = \"store_true\")\n argparser.add_argument(\"--src-ptrn\", help = \"shell pattern of source files\", type = str,\n default = \"*\")\n argparser.add_argument(\"--type\", help = \"\"\"type of element (relation) for which\nto measure the agreement\"\"\", choices = [SEGMENTS, MNUCLEARITY, DNUCLEARITY, MRELATIONS, \\\n DRELATIONS, ALL],\n type = str, action = \"append\")\n argparser.add_argument(\"-v\", \"--verbose\", help = \"output agreement statistics for each file\", \\\n action = \"store_true\")\n # mandatory arguments\n argparser.add_argument(\"src_dir\", help = \"directory with source files used for annotation\")\n argparser.add_argument(\"anno1_dir\", help = \"directory with annotation files of first annotator\")\n argparser.add_argument(\"anno2_dir\", help = \"directory with annotation files of second annotator\")\n args = argparser.parse_args(argv)\n # set parameters\n chck_flags = 0\n if args.type:\n for itype in set(args.type):\n if itype == SEGMENTS:\n chck_flags |= CHCK_SEGMENTS\n elif itype == MNUCLEARITY:\n chck_flags |= CHCK_MNUCLEARITY\n elif itype == DNUCLEARITY:\n chck_flags |= CHCK_DNUCLEARITY\n elif itype == MRELATIONS:\n chck_flags |= CHCK_MRELATIONS\n elif itype == DRELATIONS:\n chck_flags |= CHCK_DRELATIONS\n elif itype == ALL:\n chck_flags |= CHCK_ALL\n break\n else:\n chck_flags |= CHCK_ALL\n\n # iterate over each source file in `source` directory and find\n # corresponding annotation files\n anno1_fname = \"\"\n anno2_fname = \"\"\n src_fname_base = \"\"\n\n for src_fname in glob.iglob(os.path.join(args.src_dir, args.src_ptrn)):\n if not os.path.isfile(src_fname) or not os.access(src_fname, os.R_OK):\n continue\n # check annotation files corresponding to the given source file\n src_fname_base = os.path.splitext(os.path.basename(src_fname))[0]\n anno1_fname = \"\"\n anno1_fnames = glob.glob(os.path.join(args.anno1_dir, \\\n src_fname_base + args.anno_sfx))\n if anno1_fnames:\n anno1_fname = anno1_fnames[0]\n if not os.path.isfile(anno1_fname) or not os.access(anno1_fname, os.R_OK):\n continue\n\n anno2_fname = \"\"\n anno2_fnames = glob.glob(os.path.join(args.anno2_dir, \\\n src_fname_base + args.anno_sfx))\n if anno2_fnames:\n anno2_fname = anno2_fnames[0]\n if not os.path.isfile(anno2_fname) or not os.access(anno2_fname, os.R_OK):\n continue\n\n # measure agreement for the given annotation files\n update_stat(src_fname, anno1_fname, anno2_fname, chck_flags, args.output_difference, \\\n args.segment_strict, args.file_format, args.verbose)\n output_stat()\n return 0", "def program(self, occam_objs, native_objs, source, output):\n\t\tpass", "def anotation(output):\r\n\r\n vcfs = obtener_nombre_ficheros(output + '/pileup/', 'vcf')\r\n for fichero in vcfs:\r\n os.system(\"awk '{{print $1, $2, $4, $5, $10}}' {0}/pileup/{1} > {0}/annotate/{1}\".format(output, fichero))\r\n os.system(\"sed -i 's/chr//g' {0}/annotate/{1}\".format(output, fichero))\r\n os.system(\"awk '{{print $1{2}$2{2}$2{2}$3{2}$4{2}$5}}' {0}/annotate/{1} > {0}/annotate/{1}_awk.vcf\".format(output, fichero,'\"\\\\t\"'))\r\n os.system(\"grep -v '#' {0}/annotate/{1}_awk.vcf > {0}/annotate/{1}_grep.vcf\".format(output,fichero))\r\n os.system(\"python genotipo.py -i {0}/annotate/{1}_grep.vcf -o {0}/annotate/{1}\".format(output,fichero))\r\n os.system(\"rm {0}/annotate/{1}_awk.vcf\".format(output,fichero))\r\n os.system(\"rm {0}/annotate/{1}_grep.vcf\".format(output,fichero))\r\n os.system(\"perl annovar/table_annovar.pl {0}/annotate/{1} annovar/humandb/ -buildver hg19 -out {0}/annotate/{1} -remove -protocol refGene,cytoBand,gnomad_exome,clinvar_20131105,exac03,avsnp147,dbnsfp30a -operation g,r,f,f,f,f,f -nastring . -csvout -polish -xref annovar/example/gene_fullxref.txt\".format(output,fichero))\r\n os.system(\"awk -f filtro_awk {0}/annotate/{1}.{2}_multianno.csv > {0}/annotate/{1}.{2}_multianno_filtrado.csv\".format(output,fichero,\"hg19\")\r\n os.system(\"python multianno_vcf_annot.py -i {0}/annotate/{1}.{2}_multianno_filtrado.csv -o {0}/annotate/{1}.{2}_multianno_filtrado_genot.csv -v {0}/annotate/{1}\".format(output,fichero,\"hg19\"))\r\n \r\ndef main():\r\n \"\"\"\r\n Funcion que ejecuta el programa.\r\n \"\"\"\r\n\r\n ext = \"fastq\"\r\n argum = argumentos()\r\n crear_directorios(argum.output)\r\n ficheros = obtener_nombre_ficheros(argum.input, ext)\r\n calidad_fichero(ficheros, argum.input, argum.output)\r\n trimming(ficheros, argum.input, argum.output, argum.type)\r\n alineamiento(argum.reference, argum.input, argum.output, argum.type, ext, argum.amplicon)\r\n variant_calling(argum.reference, argum.input, argum.output)\r\n anotation(argm.output)", "def run(\n python,\n iAnnotateSV,\n build,\n distance,\n canonicalTranscriptFile,\n uniprotFile,\n cosmicFile,\n cosmicCountsFile,\n repeatregionFile,\n dgvFile,\n inputTabFile,\n outputPrefix,\n outputDir):\n\n start_time = time.time()\n cp.checkDir(outputDir)\n cp.checkFile(iAnnotateSV)\n cp.checkFile(inputTabFile)\n cp.checkFile(python)\n cp.checkInt(distance, \"Distance for extending the promoter region\")\n cp.checkEmpty(build, \"Which human reference file to be used, hg18,hg19 or hg38\")\n cp.checkFile(canonicalTranscriptFile)\n cp.checkFile(uniprotFile)\n cp.checkFile(cosmicFile)\n cp.checkFile(cosmicCountsFile)\n cp.checkFile(repeatregionFile)\n cp.checkFile(dgvFile)\n logger.info(\"Run_iAnnotateSV: All input parameters look good. Lets run the package.\")\n myPid = os.getpid()\n day = date.today()\n today = day.isoformat()\n logger.info(\"Run_iAnnotateSV: ProcessID:%s, Date:%s\", myPid, today)\n outputFile = outputDir + \"/\" + outputPrefix + \"_Annotated.txt\"\n cmd = python + \" \" + iAnnotateSV + \" -r \" + build + \" -i \" + inputTabFile + \" -o \" + outputDir + \" -ofp \" + outputPrefix + \" -d \" + str(\n distance) + \" -c \" + canonicalTranscriptFile + \" -rr \" + repeatregionFile + \" -cc \" + cosmicFile + \" -cct \" + cosmicCountsFile + \" -dgv \" + dgvFile + \" -v -p -u \" + uniprotFile\n args = shlex.split(cmd)\n logger.info(\"Run_iAnnotateSV: Command that will be run: %s\", cmd)\n # Remove if the file exists\n if(os.path.isfile(outputFile)):\n os.remove(outputFile)\n proc = Popen(args)\n proc.wait()\n retcode = proc.returncode\n if(retcode >= 0):\n end_time = time.time()\n totaltime = str(timedelta(seconds=end_time - start_time))\n logger.info(\n \"Run_iAnnotateSV: We have finished running iAnnotateSV for %s using local machine\",\n inputTabFile)\n logger.info(\"Run_iAnnotateSV Duration: %s\", totaltime)\n else:\n logger.info(\n \"Run_iAnnotateSV: iAnnotateSV is either still running on local machine or it errored out with return code %d for %s\",\n retcode,\n inputTabFile)\n sys.exit()\n return(outputFile)", "def test_bookmark_runs(self):\n pass", "def test(filename):\n cb = CorpusBuilder()\n\n # generate a test file for Mallet (file_basename.tst).\n cb.testing(filename)\n\n f = open('data/{}.txt'.format(filename))\n lines = f.readlines()\n corpus = \"\".join(lines)\n spans = WordPunctTokenizer().span_tokenize(corpus)\n\n # java command to run a mallet model.\n p = subprocess.Popen(['java', '-cp', 'lib/mallet.jar:lib/mallet-deps.jar', 'cc.mallet.fst.SimpleTagger',\n '--model-file', 'model/genecrf', '--include-input', 'true', 'data/{}.tst'.format(filename)]\n , stdout=subprocess.PIPE)\n out = p.stdout\n\n # producing annotations from CRF outputs.\n p_gene_s, p_gene_e = -1, -1\n p_name = {}\n t = 1\n for span in spans:\n s, e = span\n tok = out.readline().decode(\"utf-8\").rstrip('\\n').strip(' ')\n if tok == \"\":\n tok = out.readline().decode(\"utf-8\").rstrip('\\n').strip(' ')\n\n if tok.startswith(\"GENE\"):\n if not s == p_gene_e: # new gene starts.\n p_name = {'name': corpus[s:e], 's': s, 'e': e}\n else: # the same gene continues.\n p_name['name'] += corpus[s:e]\n p_name['e'] = e\n p_gene_s, p_gene_e = s, e\n else: # not a gene\n if p_name:\n print('T{}\\tProtein {} {}\\t{}'.format(t, p_name['s'], p_name['e'], p_name['name']))\n p_name = {}\n t += 1\n out.close()", "def test_bookmark_run(self):\n pass", "def annotate_all(self):\n logger.info(\"Annotating data\")\n self.genomic_df = self.genomic_df.merge(\n self.annotation_df, how=\"left\", on=[\"IDENTIFIER\"]\n )\n self.genomic_df = self._string_split(self.genomic_df, \"GENE\", \",\")\n self.annotate = True", "def test_seqbuster(self):\n from mirtop.libs import logger\n logger.initialize_logger(\"test\", True, True)\n logger = logger.getLogger(__name__)\n from mirtop.mirna import fasta, mapper\n precursors = fasta.read_precursor(\"data/examples/annotate/hairpin.fa\", \"hsa\")\n matures = mapper.read_gtf_to_precursor(\"data/examples/annotate/hsa.gff3\")\n def annotate(fn, precursors, matures):\n from mirtop.importer import seqbuster\n from mirtop.bam import bam\n reads = seqbuster.read_file(fn, precursors)\n ann = bam.annotate(reads, matures, precursors)\n return True\n print \"\\nperfect\\n\"\n annotate(\"data/examples/seqbuster/reads20.mirna\", precursors, matures)\n print \"\\naddition\\n\"\n annotate(\"data/examples/seqbuster/readsAdd.mirna\", precursors, matures)", "def run_humann(command):\n \n command+=[\"--nucleotide-database\",cfg.chocophlan_example_demo_folder,\n \"--protein-database\", cfg.uniref_example_demo_folder]\n run_command(command)", "def main():\n print \"Preprocess a single sample application\"", "def annotateVCF(self):\n cwd = os.getcwd()\n if self.__finalVCF:\n self.__ifVerbose(\"Annotating final VCF.\")\n self.__CallCommand(['SnpEff', self.fOut + \"/\" + self.name +'_annotation.txt'],\n ['java', '-Xmx4g', '-jar', self.__annotator, 'NC_000962', self.__finalVCF])\n self.__annotation = self.fOut + \"/\" + self.name +'_annotation.txt'\n self.__ifVerbose(\"parsing final Annotation.\")\n self.__CallCommand(['parse annotation', self.fOut + \"/\" + self.name +'_Final_annotation.txt'],\n ['python', self.__parser, self.__annotation, self.name, self.mutationloci])\n else:\n self.__ifVerbose(\"Use SamTools, GATK, or Freebayes to annotate the final VCF.\")\n self.__CallCommand('rm', ['rm', cwd + \"/snpEff_genes.txt\"])\n self.__CallCommand('rm', ['rm', cwd + \"/snpEff_summary.html\"])", "def main():\n\n # get list of exams in DICOM_DIR (/dicom) that meet criterion\n exams = sorted([d for d in os.listdir(DICOM_DIR) if\n check_dir(op.join(DICOM_DIR,d))])\n if len(exams) == 0: return\n\n # print time, date, and exams to concatenate to log file\n t = datetime.strftime(datetime.fromtimestamp(time.time()),'%D %H:%M:%S')\n d = 'Exams to concatenate: {0}'.format(', '.join(exams))\n print('\\n{}\\n{:^80}\\n{:^80}\\n{}'.format('='*80,t,d,'='*80))\n\n # iterate through exams and call concatenate() (from utils.py)\n for sub in exams:\n t = datetime.strftime(datetime.fromtimestamp(time.time()),'%H:%M:%S')\n print('+ {0} Exam: {1} +'.format(t, sub))\n flush_it('++ Concatenating scans')\n\n concatenate(op.join(DICOM_DIR,sub), interactive=False)", "def main():\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument('-i', '--input_path', required=True,\n help='Path to folder containing parallel corpora')\n arg_parser.add_argument('-o', '--output_path', required=True,\n help='Path and name for output.')\n arg_parser.add_argument('-info', '--is_info', action='store_true',\n default=False,\n help='Use when concatenating .info files')\n\n args = arg_parser.parse_args()\n\n input_path = args.input_path\n if not os.path.isdir(input_path):\n raise ValueError('Invalid input folder: %s' % input_path)\n output_path = args.output_path\n output_folder, output_name = os.path.split(output_path)\n if not os.path.isdir(output_folder):\n raise ValueError('Invalid output folder: %s' % output_folder)\n if output_name.strip() == '':\n raise ValueError('Empty output name')\n\n if args.is_info:\n concat_info_corpus(input_path, output_path)\n else:\n concat_corpus(input_path, output_path)", "def annotateVCF(self):\n cwd = os.getcwd()\n if self.__finalVCF:\n self.__ifVerbose(\"Annotating final VCF.\")\n self.__CallCommand(['SnpEff', self.fOut + \"/\" + self.name +'_annotation.txt'],\n ['env', '_JAVA_OPTIONS=-Xmx4g', self.__annotator, '-download', 'm_tuberculosis_H37Rv', self.__finalVCF])\n self.__annotation = self.fOut + \"/\" + self.name +'_annotation.txt'\n self.__ifVerbose(\"parsing final Annotation.\")\n self.__CallCommand(['parse annotation', self.fOut + \"/\" + self.name +'_Final_annotation.txt'],\n [self.__parser, self.__annotation, self.name, self.mutationloci])\n if os.path.isfile(self.fOut + \"/\" + self.name +'_SamTools_Resistance_filtered.vcf'):\n self.__CallCommand(['SnpEff', self.fOut + \"/\" + self.name +'_Resistance_annotation.txt'],\n ['env', '_JAVA_OPTIONS=-Xmx4g', self.__annotator, '-download', 'm_tuberculosis_H37Rv', self.fOut + \"/\" + self.name +'_SamTools_Resistance_filtered.vcf']) \n self.__ifVerbose(\"parsing final Annotation.\")\n self.__CallCommand(['parse annotation', self.fOut + \"/\" + self.name +'_Resistance_Final_annotation.txt'],\n [self.__parser, self.fOut + \"/\" + self.name +'_Resistance_annotation.txt', self.name, self.mutationloci])\n elif os.path.isfile(self.fOut + \"/\" + self.name +'_GATK_Resistance_filtered.vcf'):\n self.__CallCommand(['SnpEff', self.fOut + \"/\" + self.name +'_Resistance_annotation.txt'],\n ['env', '_JAVA_OPTIONS=-Xmx4g', self.__annotator, '-download', 'm_tuberculosis_H37Rv', self.fOut + \"/\" + self.name +'_GATK_Resistance_filtered.vcf']) \n self.__ifVerbose(\"parsing final Annotation.\")\n self.__CallCommand(['parse annotation', self.fOut + \"/\" + self.name +'_Resistance_Final_annotation.txt'],\n [self.__parser, self.fOut + \"/\" + self.name +'_Resistance_annotation.txt', self.name, self.mutationloci])\n else:\n self.__ifVerbose(\"Use SamTools, GATK, or Freebayes to annotate the final VCF.\")\n self.__CallCommand('rm', ['rm', cwd + \"/snpEff_genes.txt\"])\n self.__CallCommand('rm', ['rm', cwd + \"/snpEff_summary.html\"])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the NCBImetaAnnotate application for run completion
def test_annotate_replace_run(): # Use the test database test_db = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test.sqlite") test_annotfile = os.path.join( os.path.dirname(os.path.abspath(__file__)), "test_annot.txt" ) # If the test_db doesn't already exist, run the test cmd from test_ncbimeta if not os.path.exists(test_db): test_ncbimeta.test_ncbimeta_run() test_table = "BioSample" test_cmd = ( "ncbimeta/NCBImetaAnnotate --database " + test_db + " --table " + test_table + " --annotfile " + test_annotfile ) # test NCBImetaAnnotate through a subprocess returned_value = subprocess.call(test_cmd, shell=True) # If it returns a non-zero value, it failed assert returned_value == 0
[ "def test_annotate_concatenate_run():\n # Use the test database\n test_db = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"test.sqlite\")\n test_annotfile = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"test_annot.txt\"\n )\n # If the test_db doesn't alread exist, run the test cmd from test_ncbimeta\n if not os.path.exists(test_db):\n test_ncbimeta.test_ncbimeta_run()\n test_table = \"BioSample\"\n test_cmd = (\n \"ncbimeta/NCBImetaAnnotate --database \"\n + test_db\n + \" --table \"\n + test_table\n + \" --annotfile \"\n + test_annotfile\n + \" --concatenate\"\n )\n # test NCBImetaAnnotate through a subprocess\n returned_value = subprocess.call(test_cmd, shell=True)\n # If it returns a non-zero value, it failed\n assert returned_value == 0", "def take_action(self, parsed_args):\n args = sys.argv[1:]\n self.log.info('Annotation Development')\n self.log.debug('debugging [Annotation]')\n\n url = parsed_args.url\n doc = parsed_args.doc\n self.log.info('Arguments: '+ str(args) + '\\n')\n\n if url:\n req_ob = requests.get(str(url).strip())\n soup = BeautifulSoup(req_ob.content, \"html.parser\")\n\n \n try: \n abstract = soup.find_all(\"p\", {\"id\" : \"p-2\"})[0]\n abs_text = trimlines(abstract.text).encode('ascii','ignore')\n data = {'content' : str(abs_text)}\n\n response = requests.get(server_url + '/annotations/entities', params = data)\n\n if response.status_code == 200:\n annotated_data = response.json()\n self.app.stdout.write(str(annotated_data))\n hpo_terms = []\n\n if parsed_args.output:\n fopen = open(str(parsed_args.output) + '_annotated_data.txt', 'w')\n fopen.write(str(annotated_data) + '\\n')\n\n fopen.close()\n\n for ob in annotated_data:\n token = ob['token']\n if 'Phenotype' in token['categories']:\n term = str(token['terms'][0])\n if term not in hpo_terms:\n hpo_terms.append(token['terms'][0])\n\n self.app.stdout.write('\\n HPO Terms:\\n')\n for term in hpo_terms:\n self.app.stdout.write(str(term) + '\\n')\n\n if parsed_args.output:\n fopen = open(str(parsed_args.output) + '_hpo_terms.txt', 'w' )\n fopen.write('HPO Terms:\\n')\n for term in hpo_terms:\n fopen.write(str(term) + '\\n')\n\n fopen.close()\n else:\n self.app.stdout.write(str(response.status_code))\n except:\n self.app.stdout.write(\"Abstract Not found\\n\")\n \n if doc:\n html_doc = open(str(doc), 'r')\n soup = BeautifulSoup(html_doc, 'html.parser')\n\n try:\n self.app.stdout.write('Title:' + str(soup.title.get_text()) + '\\n')\n except:\n pass\n\n try:\n meta_list = soup.find_all('meta', {'name' : 'dc.Description'})\n content_list= [k.get('content') for k in meta_list]\n content = ' '.join(content_list)\n data = {'content' : str(content)}\n \n response = requests.get(server_url + '/annotations/entities', params = data)\n\n if response.status_code == 200:\n annotated_data = response.json()\n self.app.stdout.write(str(annotated_data))\n hpo_terms = []\n\n if parsed_args.output:\n fopen = open(str(parsed_args.output) + '_annotated_data.txt', 'w')\n fopen.write(str(annotated_data) + '\\n')\n\n fopen.close()\n\n for ob in annotated_data:\n token = ob['token']\n if 'Phenotype' in token['categories']:\n term = str(token['terms'][0])\n if term not in hpo_terms:\n hpo_terms.append(token['terms'][0])\n\n self.app.stdout.write('\\n HPO Terms:\\n')\n for term in hpo_terms:\n self.app.stdout.write(str(term) + '\\n')\n\n if parsed_args.output:\n fopen = open(str(parsed_args.output) + '_hpo_terms.txt', 'w' )\n fopen.write('HPO Terms:\\n')\n for term in hpo_terms:\n fopen.write(str(term) + '\\n')\n\n fopen.close()\n else:\n self.app.stdout.write(str(response.status_code)+ '\\n')\n\n except:\n self.app.stdout.write('Meta Data not Found\\n')", "def run(\n python,\n iAnnotateSV,\n build,\n distance,\n canonicalTranscriptFile,\n uniprotFile,\n cosmicFile,\n cosmicCountsFile,\n repeatregionFile,\n dgvFile,\n inputTabFile,\n outputPrefix,\n outputDir):\n\n start_time = time.time()\n cp.checkDir(outputDir)\n cp.checkFile(iAnnotateSV)\n cp.checkFile(inputTabFile)\n cp.checkFile(python)\n cp.checkInt(distance, \"Distance for extending the promoter region\")\n cp.checkEmpty(build, \"Which human reference file to be used, hg18,hg19 or hg38\")\n cp.checkFile(canonicalTranscriptFile)\n cp.checkFile(uniprotFile)\n cp.checkFile(cosmicFile)\n cp.checkFile(cosmicCountsFile)\n cp.checkFile(repeatregionFile)\n cp.checkFile(dgvFile)\n logger.info(\"Run_iAnnotateSV: All input parameters look good. Lets run the package.\")\n myPid = os.getpid()\n day = date.today()\n today = day.isoformat()\n logger.info(\"Run_iAnnotateSV: ProcessID:%s, Date:%s\", myPid, today)\n outputFile = outputDir + \"/\" + outputPrefix + \"_Annotated.txt\"\n cmd = python + \" \" + iAnnotateSV + \" -r \" + build + \" -i \" + inputTabFile + \" -o \" + outputDir + \" -ofp \" + outputPrefix + \" -d \" + str(\n distance) + \" -c \" + canonicalTranscriptFile + \" -rr \" + repeatregionFile + \" -cc \" + cosmicFile + \" -cct \" + cosmicCountsFile + \" -dgv \" + dgvFile + \" -v -p -u \" + uniprotFile\n args = shlex.split(cmd)\n logger.info(\"Run_iAnnotateSV: Command that will be run: %s\", cmd)\n # Remove if the file exists\n if(os.path.isfile(outputFile)):\n os.remove(outputFile)\n proc = Popen(args)\n proc.wait()\n retcode = proc.returncode\n if(retcode >= 0):\n end_time = time.time()\n totaltime = str(timedelta(seconds=end_time - start_time))\n logger.info(\n \"Run_iAnnotateSV: We have finished running iAnnotateSV for %s using local machine\",\n inputTabFile)\n logger.info(\"Run_iAnnotateSV Duration: %s\", totaltime)\n else:\n logger.info(\n \"Run_iAnnotateSV: iAnnotateSV is either still running on local machine or it errored out with return code %d for %s\",\n retcode,\n inputTabFile)\n sys.exit()\n return(outputFile)", "def test_main_completes(self):\n assert app.main() == 0", "def test_bookmark_run(self):\n pass", "def cli(raw_args: Optional[list[str]] = None) -> None:\n if not raw_args:\n raw_args = sys.argv[1:]\n\n parser = configure_argument_parser()\n args = parser.parse_args(raw_args)\n VerbosityConfiguration.set(args)\n CLIAnnotationContext.register(args)\n\n context = get_genomic_context()\n pipeline = CLIAnnotationContext.get_pipeline(context)\n grr = CLIAnnotationContext.get_genomic_resources_repository(context)\n\n if args.output:\n output = args.output\n else:\n output = os.path.basename(args.input).split(\".\")[0] + \"_annotated.vcf\"\n\n if not os.path.exists(args.work_dir):\n os.mkdir(args.work_dir)\n\n\n task_graph = TaskGraph()\n\n task_graph.input_files.append(args.input)\n task_graph.input_files.append(args.pipeline)\n if args.reannotate:\n task_graph.input_files.append(args.reannotate)\n\n if not tabix_index_filename(args.input):\n # annotate(args.input, None, pipeline.get_info(),\n # grr.definition, output, args.reannotate)\n assert grr is not None\n task_graph.create_task(\n \"all_variants_annotate\",\n annotate,\n [args.input, None, pipeline.get_info(),\n grr.definition, output, args.reannotate],\n []\n )\n else:\n with closing(TabixFile(args.input)) as pysam_file:\n regions = produce_regions(pysam_file, args.region_size)\n file_paths = produce_partfile_paths(args.input, regions, args.work_dir)\n region_tasks = []\n for index, (region, file_path) in enumerate(zip(regions, file_paths)):\n assert grr is not None\n region_tasks.append(task_graph.create_task(\n f\"part-{index}\",\n annotate,\n [args.input, region,\n pipeline.get_info(), grr.definition,\n file_path, args.reannotate],\n []\n ))\n\n assert grr is not None\n task_graph.create_task(\n \"combine\",\n combine,\n [args.input, pipeline.get_info(),\n grr.definition, file_paths, output],\n region_tasks\n )\n\n args.task_status_dir = os.path.join(args.work_dir, \".tasks-status\")\n args.log_dir = os.path.join(args.work_dir, \".tasks-log\")\n\n TaskGraphCli.process_graph(task_graph, **vars(args))", "def testDetectTextPdf_Success(self):\n input_file = 'gs://fake-bucket/fake-file'\n output_path = 'gs://fake-bucket/'\n self._ExpectAsyncBatchAnnotationRequest(\n input_file=input_file,\n output_path=output_path,\n feature_type=self.messages.Feature.\\\n TypeValueValuesEnum.DOCUMENT_TEXT_DETECTION,\n mime_type='application/pdf',\n entity_field_name='textAnnotations',\n results=['animal', 'cat'],\n model=self.model)\n self.Run('ml vision detect-text-pdf {input_file} {output_path}'\n .format(input_file=input_file, output_path=output_path))\n self.AssertOutputEquals(textwrap.dedent(\"\"\"\\\n {\n \"response\": {\n \"textAnnotations\": [\n {\n \"confidence\": 0.5,\n \"description\": \"animal\"\n },\n {\n \"confidence\": 0.5,\n \"description\": \"cat\"\n }\n ]\n }\n }\n \"\"\"))", "def main():\n print \"Preprocess a single sample application\"", "def main():\n sys.exit(SignificanceTestApplication().run())", "def test_bookmark_runs(self):\n pass", "def run_sample():\n from autumn.projects.covid_19.vaccine_optimisation.sample_code import run_sample_code\n\n run_sample_code()", "def main():\n lbls = imageio.v2.imread(Path(\"sample_data/test_labels.tif\"))\n lbls2 = np.zeros_like(lbls)\n lbls2[:, 3:, 2:] = lbls[:, :-3, :-2]\n lbls2 = lbls2 * 20\n\n labels = np.unique(lbls)[1:]\n labels_2 = np.unique(lbls2)[1:]\n\n viewer = napari.Viewer()\n lbls_layer = viewer.add_labels(lbls)\n lbls_layer2 = viewer.add_labels(lbls2)\n\n lbls_layer.features = make_features(labels, roi_id=\"ROI1\", n_features=6)\n lbls_layer2.features = make_features(labels_2, roi_id=\"ROI2\", n_features=6)\n # classifier_widget = ClassifierWidget(viewer)\n\n # viewer.window.add_dock_widget(classifier_widget)\n viewer.show(block=True)", "def test(filename):\n cb = CorpusBuilder()\n\n # generate a test file for Mallet (file_basename.tst).\n cb.testing(filename)\n\n f = open('data/{}.txt'.format(filename))\n lines = f.readlines()\n corpus = \"\".join(lines)\n spans = WordPunctTokenizer().span_tokenize(corpus)\n\n # java command to run a mallet model.\n p = subprocess.Popen(['java', '-cp', 'lib/mallet.jar:lib/mallet-deps.jar', 'cc.mallet.fst.SimpleTagger',\n '--model-file', 'model/genecrf', '--include-input', 'true', 'data/{}.tst'.format(filename)]\n , stdout=subprocess.PIPE)\n out = p.stdout\n\n # producing annotations from CRF outputs.\n p_gene_s, p_gene_e = -1, -1\n p_name = {}\n t = 1\n for span in spans:\n s, e = span\n tok = out.readline().decode(\"utf-8\").rstrip('\\n').strip(' ')\n if tok == \"\":\n tok = out.readline().decode(\"utf-8\").rstrip('\\n').strip(' ')\n\n if tok.startswith(\"GENE\"):\n if not s == p_gene_e: # new gene starts.\n p_name = {'name': corpus[s:e], 's': s, 'e': e}\n else: # the same gene continues.\n p_name['name'] += corpus[s:e]\n p_name['e'] = e\n p_gene_s, p_gene_e = s, e\n else: # not a gene\n if p_name:\n print('T{}\\tProtein {} {}\\t{}'.format(t, p_name['s'], p_name['e'], p_name['name']))\n p_name = {}\n t += 1\n out.close()", "def run_mention_detection_tests(self, mention_test_dir):\n self.logger.info(\"Running mention tests\")\n print \"Running mention tests\"", "def run_post_test(self):\n pass", "def test_demo(self):\n self.cbct.run_demo(show=False)", "def test_generate_sdk_markdown():\n assert gen_sdk.main() is True", "def setUp(self):\n self.CLI = BTCPBNBCommand()", "def test_seqbuster(self):\n from mirtop.libs import logger\n logger.initialize_logger(\"test\", True, True)\n logger = logger.getLogger(__name__)\n from mirtop.mirna import fasta, mapper\n precursors = fasta.read_precursor(\"data/examples/annotate/hairpin.fa\", \"hsa\")\n matures = mapper.read_gtf_to_precursor(\"data/examples/annotate/hsa.gff3\")\n def annotate(fn, precursors, matures):\n from mirtop.importer import seqbuster\n from mirtop.bam import bam\n reads = seqbuster.read_file(fn, precursors)\n ann = bam.annotate(reads, matures, precursors)\n return True\n print \"\\nperfect\\n\"\n annotate(\"data/examples/seqbuster/reads20.mirna\", precursors, matures)\n print \"\\naddition\\n\"\n annotate(\"data/examples/seqbuster/readsAdd.mirna\", precursors, matures)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse table to MADX sequence and return it as a string. Returns str MADX sequence
def parse_table_to_madx_sequence_string(self) -> str: return parse_table_to_madx_sequence_string(self.name, self.len, self.table)
[ "def parse_table_to_madx_line_string(self) -> str:\n self.add_drifts()\n defstr = _parse_table_to_madx_definitions(self.table)\n linestr = \"{}: LINE=({});\".format(\n self.name,\n \",\\n\\t\\t\".join(\n [\",\".join(c) for c in list(self.chunks(self.table.name.to_list(), 20))]\n ),\n )\n return defstr + \"\\n\\n\" + linestr", "def parse_table_to_madx_sequence_file(self, filename: str) -> None:\n parse_table_to_madx_sequence_file(self.name, self.len, self.table, filename)", "def parse_table_to_madx_install_str(self) -> str:\n return parse_table_to_madx_install_str(self.name, self.table)", "def parse_table_to_madx_remove_str(self) -> str:\n return parse_table_to_madx_remove_str(self.name, self.table)", "def parse_table_to_elegant_string(self) -> str:\n self.add_drifts()\n return parse_table_to_elegant_string(self.name, self.table)", "def parse_table_to_tracy_string(self) -> str:\n return parse_table_to_tracy_string(self.name, self.table)", "def parse_from_madx_sequence_string(string: str) -> (str, float, pd.DataFrame):\n # use lark to parse the string\n tree = MADX_PARSER.parse(string)\n positions, elements, name, length = MADXTransformer().transform(tree)\n\n # read the positions of the elements ('at' in the seq file)\n if positions is not None:\n dfpos = pd.DataFrame.from_records(positions, columns=[\"name\", \"pos\"])\n else:\n dfpos = pd.DataFrame()\n\n # if not bare sequence file\n if elements:\n dfel = pd.DataFrame(elements)\n\n # if positions are available merge the tables\n if positions:\n df = dfpos.merge(dfel, on=\"name\").sort_values(by=\"pos\")\n df.loc[df.L.isna(), \"L\"] = 0\n df[\"at\"] = df[\"pos\"]\n return name, length, df\n else:\n return name, length, dfel\n\n # if seq file is bare print warning and return only\n # pos table as table\n print(\"Warning: bare lattice only positions returned\")\n\n return name, length, dfpos", "def parse_table_to_madx_line_file(self, filename: str):\n save_string(self.parse_table_to_madx_line_string(), filename)", "def matrix2string(matrix):\n\tlines = ''\n\tfor entry in matrix:\n\t\ts = ''\n\t\tfor j, field in enumerate(entry):\n\t\t\tif j > 0:\n\t\t\t\ts += ','\n\t\t\ts += '\"' + str(field) + '\"'\n\t\tlines += s + '\\n'\n\treturn lines", "def get_string_from_table(self, offset):\r\n return parse_cstring_from_stream(self.debug_str_sec.stream, offset)", "def __FASTA_sequencesToMatrix(self):\n MatrixSequences = list()\n sequences = self.__read_FASTA_sequences()\n\n for sequence in sequences:\n # append sequance dna\n MatrixSequences.append(list(sequence[1]))\n\n return MatrixSequences", "def parse_from_madx_sequence_file(filename: str) -> (str, float, pd.DataFrame):\n with open(filename, \"r\") as f:\n string = f.read()\n\n return parse_from_madx_sequence_string(string)", "def convert_to_sequence_unit(symbol, start_time):\n return str(start_time) + ',' + str(start_time + 1) + ',' + str(symbol) + ';'", "def translateSequence(seq):\n aa = ''\n for i in xrange(0, len(seq), 3):\n aa += codonToAminoAcid(seq[i:i+3])\n return aa", "def __latex__(self):\n a = self.MomentMatrix\n lines = str(a).replace('[', '').replace(']', '').splitlines()\n rv = [r'\\begin{bmatrix}']\n rv += [' ' + ' & '.join(l.split()) + r'\\\\' for l in lines]\n rv += [r'\\end{bmatrix}']\n return '\\n'.join(rv)", "def _seq_from_struct(self):\n seq = []\n ch = self.structure[0][0][4]\n fasta = ''\n for atom in self.structure[0]:\n if atom[2] == ' CA ':\n if atom[4] == ch:\n fasta += AA_code(atom[3])\n else:\n seq.append(fasta)\n ch = atom[4]\n fasta = AA_code(atom[3])\n seq.append(fasta)\n return seq", "def translate(seq, table):\n result = []\n for i in range(len(seq)):\n result.append(table[seq[i]])\n return result", "def translationMatrix(self, dagPath):\n \n matrix = dagPath.inclusiveMatrix()\n \n matrix = self.checkUpAxis(matrix)\n\n strOut = ( '\\tConcatTransform [%f %f %f %f' % (matrix(0,0), matrix(0,1), matrix(0,2), matrix(0,3)) ) + os.linesep\n strOut += ( '\\t %f %f %f %f' % (matrix(1,0), matrix(1,1), matrix(1,2), matrix(1,3)) ) + os.linesep\n strOut += ( '\\t %f %f %f %f' % (matrix(2,0), matrix(2,1), matrix(2,2), matrix(2,3)) ) + os.linesep\n strOut += ( '\\t %f %f %f %f]' % (matrix(3,0), matrix(3,1), matrix(3,2), matrix(3,3)) )\n \n return strOut", "def protein_to_fasta(conn, outputfile=None, verbose=False):\n\n if verbose:\n sys.stderr.write(f\"{color.GREEN}Creating fasta file{color.ENDC}\\n\")\n\n ex = conn.cursor().execute(\"SELECT protein_md5sum, protein_sequence from protein_sequence\")\n out = open(outputfile, 'w') if outputfile else sys.stdout\n for row in ex.fetchall():\n out.write(f\">{row[0]}\\n{row[1]}\\n\")\n \n if out is not sys.stdout:\n out.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse table to MADX sequence and write it to file.
def parse_table_to_madx_sequence_file(self, filename: str) -> None: parse_table_to_madx_sequence_file(self.name, self.len, self.table, filename)
[ "def parse_table_to_madx_line_file(self, filename: str):\n save_string(self.parse_table_to_madx_line_string(), filename)", "def parse_table_to_madx_sequence_string(self) -> str:\n return parse_table_to_madx_sequence_string(self.name, self.len, self.table)", "def parse_table_to_tracy_file(self, filename: str) -> None:\n parse_table_to_tracy_file(self.name, self.table, filename)", "def convert_file(in_file, out_file):\n sequences = SeqIO.parse(in_file, \"genbank\")\n g = open(out_file, \"w\")\n SeqIO.write(sequences, out_file, \"fasta\")", "def write_table(self):\n o = open(self.out_file, 'w')\n o.write(self.table)", "def parse_table_to_elegant_file(self, filename: str) -> None:\n self.add_drifts()\n\n parse_table_to_elegant_file(self.name, self.table, filename)", "def write_overrepresented_seq_table(self):\n table_path = os.path.join(self.path,\n self._build_combined_filename())\n table_path = re.sub('_qc', '_overrep_seqs', table_path)\n table_data = self._build_overrepresented_seq_table()\n logger.debug(\"writing overrepresented seqs to file '{}'\"\n .format(table_path))\n table_data.to_csv(table_path, index=False)", "def _write_table(self, table, path):\n io.write(table, path)", "def protein_to_fasta(conn, outputfile=None, verbose=False):\n\n if verbose:\n sys.stderr.write(f\"{color.GREEN}Creating fasta file{color.ENDC}\\n\")\n\n ex = conn.cursor().execute(\"SELECT protein_md5sum, protein_sequence from protein_sequence\")\n out = open(outputfile, 'w') if outputfile else sys.stdout\n for row in ex.fetchall():\n out.write(f\">{row[0]}\\n{row[1]}\\n\")\n \n if out is not sys.stdout:\n out.close()", "def write_seqfile(self,dirpath,seqfile_name,seqdb_dict):\n\t\n\t\twith open(os.path.join(dirpath,seqfile_name),\"w\") as seqfile:\n\t\t\tfor name,seq_dict in seqdb_dict.iteritems():\n\t\t\t\tfor seq_id in seq_dict:\t\t\n\t\t\t\t\tseqfile.write(\">{}|{}\\n{}\\n\".format(name,seq_dict[seq_id].id,seq_dict[seq_id].seq))\n\t\n\t\tseqfile.close()\t\t\t\n\t\t\t\n\t\treturn(os.path.join(dirpath,seqfile_name))", "def to_fasta_file(self, f):\n records = []\n\n for fragment in self.__genome.fragments.all():\n fragment = fragment.indexed_fragment()\n seq = Seq(fragment.sequence)\n rec = SeqRecord(seq, id=str(fragment.name), description='')\n records.append(rec)\n\n SeqIO.write(records, f, \"fasta\")", "def parseOutput(self):\n\n # Parse the output\n data_array = str(self.data).split(\"\\n\")\n data_array = [d.split(\"\\t\") for d in data_array]\n column_data = [d for d in data_array\n if not d[0].startswith(\"#\") and d[0] != \"\"]\n\n # Grab the amino acid header\n size = len(column_data[0])\n header = [d for d in data_array\n if d[0].startswith(\"#\") and len(d) == size]\n aa = [a.strip() for a in header[0][1:21]]\n\n # Put each column into an instance of AlignmentColumn\n self.columns = []\n for c in column_data:\n self.columns.append(AlignmentColumn(c,aa))", "def writeTable(RV):", "def archive(record):\n #\n # with open (\"archive50.txt\", \"a\", newline='') as arch:\n\n writer = csv.writer(arch, delimiter='\\t')\n writer.writerow(record)\n return", "def fasta_conversion(input_file, output_file):\n\n\twith open(input_file, \"r\") as input_handle:\n\t\tfor record in SeqIO.parse(input_handle, \"genbank\"):\n\t\t\tfor feature in record.features:\n\n\t\t\t\t#We take the locus tag, the accession and the protein sequence if\n\t\t\t\t# this exists. \n\n\t\t\t\tif feature.type == 'CDS':\n\t\t\t\t\ttry:\t\t\t\t\t\t\n\t\t\t\t\t\tif feature.qualifiers['translation'][0] != \" \":\n\t\t\t\t\t\t\tsys.stdout=open(output_file,'a')\n\t\t\t\t\t\t\tprint (\">\"+feature.qualifiers['locus_tag'][0]+\"@\"+ \n\t\t\t\t\t\t\t\trecord.name)\n\t\t\t\t\t\t\tprint(feature.qualifiers['translation'][0])\n\t\t\t\t\t\t\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\n\t\tsys.stdout.close()\n\t\tsys.stdout = open(\"/dev/stdout\", \"w\")", "def save(table, filename):\n\n with open(filename, 'wt', encoding='iso-8859-1') as outfile:\n csv.writer(outfile).writerows(table)", "def mmtformat(df, filename, ident_col_name, ra_col_name, dec_col_name,\n mag_col_name, ra_pm_col_name=None, dec_pm_col_name=None,\n epoch = 'J2000.0'):\n\n ident = df[ident_col_name]\n ra = df[ra_col_name]\n dec = df[dec_col_name]\n\n if ra_pm_col_name is None:\n ra_pm = '0.0'\n else:\n ra_pm = df[ra_pm_col_name]\n if dec_pm_col_name is None:\n dec_pm = '0.0'\n else:\n dec_pm = df[dec_pm_col_name]\n\n mag = df[mag_col_name]\n\n\n f = open(filename+'_MMT.dat', 'w')\n\n for i in df.index:\n\n f.write('{0:16}{1:13}{2:13}{3:4}{4:5}{5:04.1f} 0 {6:9}\\n'.format(ident[i],\n decra2hms(ra[i]),\n decdeg2dms(dec[i]),\n ra_pm,\n dec_pm,\n mag[i],\n epoch))\n print ('{0:15}{1:13}{2:13}{3:4}{4:5}{5:05.2f} {6:9}'.format(ident[i],\n decra2hms(ra[i]),\n decdeg2dms(dec[i]),\n ra_pm,\n dec_pm,\n mag[i],\n epoch))\n\n f.close()", "def write_output(output_file_name, seq_list, read_name, read1, read2, qv_read1, qv_read2):\r\n\r\n with open(output_file_name, 'a') as db_file:\r\n db_file.write(read_name + '\\t' + read1 + '\\t' + qv_read1 + '\\n')\r\n db_file.write(read_name + '\\t' + read2 + '\\t' + qv_read2 + '\\n')\r\n\r\n # For 5 alleles\r\n if len(seq_list) == 7: \r\n for name, sequence in seq_list[5:7]:\r\n db_file.write(name + '\\t' + sequence[0] + '\\n')\r\n \r\n # For 6 alleles\r\n if len(seq_list) == 8: \r\n for name, sequence in seq_list[6:8]:\r\n db_file.write(name + '\\t' + sequence[0] + '\\n')\r\n\r\n for name, sequence in seq_list[:-2]:\r\n db_file.write(name + '\\t' + sequence[0] + '\\n')\r\n db_file.write('$$$\\n')", "def writeTable(self, path):\r\n\r\n for tab in self.tables:\r\n tab.write(path)\r\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse table to Elegant lattice and return it as a string. Returns str Elegant Lattice
def parse_table_to_elegant_string(self) -> str: self.add_drifts() return parse_table_to_elegant_string(self.name, self.table)
[ "def lattice2str(lattice):\n latticeStr = []\n for i in range(len(lattice)):\n latticeStr.append(elem2str(lattice[i]))\n return latticeStr", "def parse_table_to_tracy_string(self) -> str:\n return parse_table_to_tracy_string(self.name, self.table)", "def parse_table_to_madx_line_string(self) -> str:\n self.add_drifts()\n defstr = _parse_table_to_madx_definitions(self.table)\n linestr = \"{}: LINE=({});\".format(\n self.name,\n \",\\n\\t\\t\".join(\n [\",\".join(c) for c in list(self.chunks(self.table.name.to_list(), 20))]\n ),\n )\n return defstr + \"\\n\\n\" + linestr", "def table(self, L, R, n):\n s = ''\n for x in linspace(L, R, n):\n y = self(x)\n s += '%12g %12g\\n' % (x, y)\n return s", "def __str__(self):\n table = []\n\n if self.css_class:\n table.append('<table class=%s>' % self.css_class)\n else:\n table.append('<table>')\n\n if self.header_row:\n table.append(str(self.header_row))\n\n for row in self.rows:\n table.append(str(row))\n\n table.append('</table>')\n\n return '\\n'.join(table)", "def table(self, L, R, n):\r\n s = ''\r\n for x in linspace(L, R, n):\r\n y = self(x)\r\n s += '(%12g, %12g)\\n' %(x, y)\r\n return s", "def __str__(self):\n row = []\n\n row.append('<tr>')\n\n for cell in self.cells:\n row.append(str(cell))\n\n row.append('</tr>')\n\n return '\\n'.join(row)", "def str2lattice(latticeStr):\n lattice = []\n for i in range(len(latticeStr)):\n if latticeStr[i] in ['\\n', '\\r\\n'] or latticeStr[i][0]=='!':\n continue\n elem = str2elem(latticeStr[i])\n if elem : #check if elem is not empty\n lattice.append(elem)\n return lattice", "def convert_lattice(file_in, file_out):\n open_fn = gzip.open if file_in.endswith('.gz') else open\n with open_fn(file_in, 'rt') as lattice, open(file_out, 'w') as dot:\n dot.write(\n \"digraph lattice {\\n\" \\\n \"\\trankdir=LR;\\n\" \\\n \"\\tnode [shape = ellipse; fontname = courier];\\n\" \\\n \"\\tedge [fontname = courier];\\n\\n\")\n while True:\n line = lattice.readline()\n if line.startswith('N='):\n break\n first_line = line.split()\n nodes, links = [int(i.split('=')[1]) for i in first_line]\n for _ in range(nodes):\n next_line = lattice.readline().split()\n content = tuple(i.split('=')[1] for i in next_line[0:3])\n dot.write(\"\\t%s [label = \\\"id=%s\\\\nt=%s\\\\nW=%s\\\"];\\n\" % (\n content[0], content[0], content[1], content[2]))\n dot.write(\"\\n\")\n for _ in range(links):\n next_line = lattice.readline().split()\n content = tuple(i.split('=')[1] for i in next_line[0:5])\n if next_line[5].startswith('n='):\n dot.write(\n \"\\t%s -> %s [label = \\\"id=%s\\\\na=%s\\\\nl=%s\\\\nn=%s\\\"];\\n\" % (\n content[1], content[2], content[0], content[3],\n content[4], next_line[5].split('=')[1]))\n else:\n dot.write(\"\\t%s -> %s [label = \\\"id=%s\\\\na=%s\\\\nl=%s\\\"];\\n\" % (\n content[1], content[2], content[0], content[3], content[4]))\n dot.write(\"}\")", "def linearize_table(self):\n pass", "def __str__(self):\n as_str = '\\t' + '\\t'.join(self.columns) + '\\n'\n if self.name:\n as_str = 'Plate: ' + self.name + '\\n' + as_str\n\n for i, row in enumerate(self.rows):\n line = row\n for j, column in enumerate(self.columns):\n line += '\\t' + str(self.values[self.cell_name(row, column)])\n as_str += line + '\\n'\n\n return as_str", "def tables():\n print('{:20}{:>5}{:>15}'.format('Tiger Woods', 52, '$1000211.22'))\n print('{:20}{:>5}{:>15}'.format('Elizabeth Smith', 123, '$1009.01'))\n print('{:20}{:>5}{:>15}'.format('Frank Lloyd Wright', 3, '$99.01'))\n print('{:20}{:>5}{:>15}'.format('Justin Timberlake', 39, '$199.01'))", "def _tab_print_ ( t , title = '' , prefix = '' , alignment = 'll' , xfmt = '%+.5g' , yfmt = '%+-.5g' ) :\n rows = [ ('Abscissa' , 'Value' ) ] \n for i in range ( t.size() ) :\n x = t.x ( i )\n y = t.y ( i )\n row = xfmt % x, yfmt % y\n rows.append ( row )\n \n if not title : title = 'Interpolation Table' \n import ostap.logger.table as T\n return T.table ( rows , title = title , prefix = prefix , alignment = alignment )", "def __str__(self):\n output = 'LinearHashTable:\\n'\n for i in range(self.n_slots):\n output += 'slot {0:8d} = '.format(i)\n item = self._data[i]\n if item == None:\n output = output + '-'\n else:\n output = output + str(item)\n output += '\\n'\n load_factor = float(self.n_items)/self.n_slots\n output += 'n_slots = {0:d}\\n'.format(self.n_slots)\n output += 'n_items in table = {0:d}\\n'.format(self.n_items)\n output += 'Load factor = {0:6.3f}\\n'.format(load_factor)\n return output", "def vpp_show_lisp_eid_table(node):\n\n vat = VatExecutor()\n vat.execute_script_json_out('lisp/show_lisp_eid_table.vat', node)\n return JsonParser().parse_data(vat.get_script_stdout())", "def test_tables_correctly_plotted():\n table = [ [1,2,3],\n [4,5,6],\n [7,8,9]]\n \n x_labels = [\"a\",\"b\",\"c\"]\n y_labels = [\"d\",\"e\",\"f\"]\n\n show_table(numpy.array(table), x_labels, y_labels)", "def emphasize_stringtable_elements(self, xml):\n regex = re.compile(r'\\[\\[VALUE_0x(?P<number>[0-9A-Fa-f]+)\\]\\]')\n xml = re.sub(regex, Style.BRIGHT + '[[VALUE_0x\\g<number>]]' + Style.RESET_ALL, xml)\n regex = re.compile(r'ST_0x(?P<number>[0-9A-Fa-f]+)\\]\\]')\n xml = re.sub(regex, Style.BRIGHT + 'ST_0x\\g<number>' + Style.RESET_ALL + ']]', xml)\n return xml", "def array_html_block_table(self, arr):\n\n (suppress, suppress_thresh) = self._get_suppress()\n\n st_tab = \"style='border: 2px solid black;'\"\n st_tr = \"style='border: 1px dotted; padding: 2px;'\"\n st_th = \"style='border: 1px dotted; padding: 2px; text-align: center;'\"\n st_tdval = \"style='border: 1px dotted; padding: 2px; text-align: right;'\"\n spc = arr.space\n if len(spc.ket_set):\n ket_indices = list(spc.ket_space().index_iter())\n else:\n ket_indices = [None]\n if len(spc.bra_set):\n bra_indices = list(spc.bra_space().index_iter())\n else:\n bra_indices = [None]\n fmt = spc.base_field.latex_formatter(arr.nparray.flatten(), dollar_if_tex=True)\n\n ht = ''\n\n if self.use_latex_label_in_html:\n ht += '$'+spc._latex_()+'$'\n else:\n # FIXME - here, and elsewhere, use unicode symbols '&#x27e8;' and '&#x27e9;'\n # for html.\n ht += spc._html_()+'<br>'\n\n ht += \"<table style='margin: 0px 0px;'>\\n\"\n\n if spc.ket_set:\n ht += \"<colgroup \"+st_tab+\"></colgroup>\\n\"\n if len(spc.bra_set):\n colgrp_size = spc.bra_space().shape[-1]\n for i in range(spc.bra_space().dim() // colgrp_size):\n ht += (\"<colgroup span=%d \"+st_tab+\"></colgroup>\\n\") % colgrp_size\n else:\n ht += \"<colgroup \"+st_tab+\"></colgroup>\\n\"\n\n if spc.bra_set:\n ht += \"<tbody \"+st_tab+\">\\n\"\n ht += '<tr '+st_tr+'>'\n if spc.ket_set:\n ht += '<td '+st_th+'> </td>'\n\n for b_idx in bra_indices:\n ht += '<td '+st_th+'><nobr>'\n\n #if self.use_latex_label_in_html:\n # ht += r'$\\scriptsize{\\left< '\n # ht += ','.join([str(x) for x in b_idx]) # FIXME - latex label for indices?\n # ht += r' \\right|}$'\n #else:\n ht += '&#x27e8;'+(','.join(['<tt>'+str(x)+'</tt>' for x in b_idx]))+'|'\n\n ht += '</nobr></td>'\n\n ht += '</tr>\\n'\n ht += '</tbody>\\n'\n\n last_k = None\n for k_idx in ket_indices:\n if k_idx is not None and len(k_idx) > 1 and k_idx[-2] != last_k:\n if last_k is not None:\n ht += '</tbody>\\n'\n ht += \"<tbody \"+st_tab+\">\\n\"\n last_k = k_idx[-2]\n ht += '<tr '+st_tr+'>'\n if spc.ket_set:\n ht += '<td '+st_th+'><nobr>'\n\n #if self.use_latex_label_in_html:\n # ht += r'$\\scriptsize{\\left| '\n # ht += ','.join([str(x) for x in k_idx]) # FIXME - latex label for indices?\n # ht += r' \\right>}$'\n #else:\n ht += '|'+(','.join(['<tt>'+str(x)+'</tt>' for x in k_idx]))+'&#x27e9;'\n\n ht += '</nobr></td>'\n for b_idx in bra_indices:\n if k_idx is None and b_idx is None:\n assert 0\n elif k_idx is None:\n idx = b_idx\n elif b_idx is None:\n idx = k_idx\n else:\n idx = k_idx + b_idx\n v = arr[idx]\n if suppress and spc.base_field.eval_suppress_small(v, suppress_thresh):\n if self.zero_color_html != '':\n vs = \"<font color='\"+self.zero_color_html+\"'>0</font>\"\n else:\n vs = \"0\"\n else:\n vs = \"<nobr><tt>\"+fmt(v)+\"</tt></nobr>\"\n ht += '<td '+st_tdval+'>'+vs+'</td>'\n ht += '</tr>\\n'\n ht += '</tbody>\\n'\n ht += '</table>\\n'\n\n return ht", "def parse_table_to_madx_remove_str(self) -> str:\n return parse_table_to_madx_remove_str(self.name, self.table)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse table to Elegant lattice and write it to file.
def parse_table_to_elegant_file(self, filename: str) -> None: self.add_drifts() parse_table_to_elegant_file(self.name, self.table, filename)
[ "def parse_table_to_tracy_file(self, filename: str) -> None:\n parse_table_to_tracy_file(self.name, self.table, filename)", "def parse_table_to_madx_line_file(self, filename: str):\n save_string(self.parse_table_to_madx_line_string(), filename)", "def convert_lattice(file_in, file_out):\n open_fn = gzip.open if file_in.endswith('.gz') else open\n with open_fn(file_in, 'rt') as lattice, open(file_out, 'w') as dot:\n dot.write(\n \"digraph lattice {\\n\" \\\n \"\\trankdir=LR;\\n\" \\\n \"\\tnode [shape = ellipse; fontname = courier];\\n\" \\\n \"\\tedge [fontname = courier];\\n\\n\")\n while True:\n line = lattice.readline()\n if line.startswith('N='):\n break\n first_line = line.split()\n nodes, links = [int(i.split('=')[1]) for i in first_line]\n for _ in range(nodes):\n next_line = lattice.readline().split()\n content = tuple(i.split('=')[1] for i in next_line[0:3])\n dot.write(\"\\t%s [label = \\\"id=%s\\\\nt=%s\\\\nW=%s\\\"];\\n\" % (\n content[0], content[0], content[1], content[2]))\n dot.write(\"\\n\")\n for _ in range(links):\n next_line = lattice.readline().split()\n content = tuple(i.split('=')[1] for i in next_line[0:5])\n if next_line[5].startswith('n='):\n dot.write(\n \"\\t%s -> %s [label = \\\"id=%s\\\\na=%s\\\\nl=%s\\\\nn=%s\\\"];\\n\" % (\n content[1], content[2], content[0], content[3],\n content[4], next_line[5].split('=')[1]))\n else:\n dot.write(\"\\t%s -> %s [label = \\\"id=%s\\\\na=%s\\\\nl=%s\\\"];\\n\" % (\n content[1], content[2], content[0], content[3], content[4]))\n dot.write(\"}\")", "def _dump_data(self, fileobj):\n if not fileobj and self._file:\n root = os.path.splitext(self._file.name)[0]\n fileobj = root + \".txt\"\n\n close_file = False\n\n if isinstance(fileobj, str):\n fileobj = open(fileobj, \"w\")\n close_file = True\n\n linewriter = csv.writer(fileobj, dialect=FITSTableDumpDialect)\n\n # Process each row of the table and output one row at a time\n def format_value(val, format):\n if format[0] == \"S\":\n itemsize = int(format[1:])\n return \"{:{size}}\".format(val, size=itemsize)\n elif format in np.typecodes[\"AllInteger\"]:\n # output integer\n return f\"{val:21d}\"\n elif format in np.typecodes[\"Complex\"]:\n return f\"{val.real:21.15g}+{val.imag:.15g}j\"\n elif format in np.typecodes[\"Float\"]:\n # output floating point\n return f\"{val:#21.15g}\"\n\n for row in self.data:\n line = [] # the line for this row of the table\n\n # Process each column of the row.\n for column in self.columns:\n # format of data in a variable length array\n # where None means it is not a VLA:\n vla_format = None\n format = _convert_format(column.format)\n\n if isinstance(format, _FormatP):\n # P format means this is a variable length array so output\n # the length of the array for this row and set the format\n # for the VLA data\n line.append(\"VLA_Length=\")\n line.append(f\"{len(row[column.name]):21d}\")\n _, dtype, option = _parse_tformat(column.format)\n vla_format = FITS2NUMPY[option[0]][0]\n\n if vla_format:\n # Output the data for each element in the array\n for val in row[column.name].flat:\n line.append(format_value(val, vla_format))\n else:\n # The column data is a single element\n dtype = self.data.dtype.fields[column.name][0]\n array_format = dtype.char\n if array_format == \"V\":\n array_format = dtype.base.char\n if array_format == \"S\":\n array_format += str(dtype.itemsize)\n\n if dtype.char == \"V\":\n for value in row[column.name].flat:\n line.append(format_value(value, array_format))\n else:\n line.append(format_value(row[column.name], array_format))\n linewriter.writerow(line)\n if close_file:\n fileobj.close()", "def write_table(self):\n o = open(self.out_file, 'w')\n o.write(self.table)", "def create_slf_file(self):\n mesh = open(self.name, 'w') \n mesh.write('numel numnp nmat nmode (This is for a beam bridge)\\n')\n mesh.write(str(len(self.edge_list))+'\\t'+str(len(self.node_list))\n + '\\t'+str(len(self.beams)) + '\\t0\\n')\n mesh.write('matl no., E mod, Poiss. Ratio,density, Area, Iy, Iz\\n')\n tables = open('./tables/CHSTables.txt', 'r')\n for i,beam in enumerate(self.beams):\n mesh.write(str(i)+' '+str(self.beams[i]['emod'])+'\\t0.3000\\t'\n + str(self.beams[i]['density'])+'\\t'+str(self.beams[i]['area'])\n + '\\t'+str(self.beams[i]['iy'])+'\\t'+str(self.beams[i]['ix']) + '\\n') \n mesh.write('el no.,connectivity, matl no, element type\\n')\n for i, edge in enumerate(self.edge_list): \n mesh.write(str(i)+'\\t'+str(edge['pt_a'])+'\\t'+str(edge['pt_b'])\n + '\\t'+str(edge['material'])+'\\t2 \\n')\n mesh.write('node no., coordinates\\n')\n for node in self.node_list:\n mesh.write(node['id']+'\\t'+str(node['x'])+'\\t'+str(node['y'])+'\\t'+str(node['z'])+\"\\n\")\n mesh.write(\"element with specified local z axis: x, y, z component\\n -10\\n\")\n mesh.write('prescribed displacement x: node disp value\\n')\n for node in self.fixed_list:\n# if node[1] == True: # un-comment when dealing with fixed-roller structures\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed displacement y: node disp value\\n')\n for node in self.fixed_list:\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed displacement z: node disp value\\n')\n for node in self.fixed_list:\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed angle phi x: node angle value\\n')\n for node in self.fixed_list:\n# if node[1] == True: # un-comment when dealing with fixed-roller structures\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed angle phi y: node angle value\\n')\n for node in self.fixed_list:\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed angle phi z: node angle value\\n')\n for node in self.fixed_list:\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nnode with point load x, y, z and 3 moments phi x, phi y, phi z\\n') \n if self.BROKEN:\n for node in self.nodeselfloads: \n trans = 0\n broken_long = 0\n for thing in self.load_nodes:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_cable_load\n trans = self.transverse_cable_load \n if self.GROUND_BROKEN:\n for thing in self.ground_node:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_ground_load_broken\n trans = self.transverse_ground_load\n broken_long = self.longitudinal_ground_load\n for thing in self.break_node:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_cable_load_broken\n broken_long = self.longitudinal_cable_load\n trans = self.transverse_cable_load\n else:\n for thing in self.ground_node:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_ground_load\n trans = self.transverse_ground_load\n for thing in self.break_node:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_cable_load_broken\n broken_long = self.longitudinal_cable_load \n trans = self.transverse_cable_load\n mesh.write(str(node[0])+'\\t'+str(broken_long)+'\\t'+str(trans)+'\\t-'+str(round(node[1],5))+'\\t0\\t0\\t0\\n')\n else:\n for node in self.nodeselfloads: \n trans = 0\n for yolk in self.load_nodes:\n if yolk == node[0]:\n node[1] = node[1] + self.vertical_cable_load\n trans = self.transverse_cable_load\n for thong in self.ground_node:\n if thong == node[0]:\n node[1] = node[1] + self.vertical_ground_load\n trans = self.transverse_ground_load\n mesh.write(str(node[0])+'\\t0\\t'+str(trans)+'\\t-'+str(round(node[1],5))+'\\t0\\t0\\t0\\n')\n mesh.write('-10\\nelement with distributed load in global beam y and z coordinates\\n') \n mesh.write('-10\\nelement no. and gauss pt. no. with local stress vector xx and moment xx,yy,zz\\n-10')\n mesh.close()", "def dump_table_contents(block_table, name):\n dump_file = open(name, 'w')\n\n dump_file.write(\"self.dim = \" + block_table.dim.__str__() + \"\\n\")\n dump_file.write(\"self.k_max = \" + block_table.k_max.__str__() + \"\\n\")\n dump_file.write(\"self.l_max = \" + block_table.l_max.__str__() + \"\\n\")\n dump_file.write(\"self.m_max = \" + block_table.m_max.__str__() + \"\\n\")\n dump_file.write(\"self.n_max = \" + block_table.n_max.__str__() + \"\\n\")\n dump_file.write(\"self.delta_12 = \" + block_table.delta_12.__str__() + \"\\n\")\n dump_file.write(\"self.delta_34 = \" + block_table.delta_34.__str__() + \"\\n\")\n dump_file.write(\"self.odd_spins = \" + block_table.odd_spins.__str__() + \"\\n\")\n dump_file.write(\"self.m_order = \" + block_table.m_order.__str__() + \"\\n\")\n dump_file.write(\"self.n_order = \" + block_table.n_order.__str__() + \"\\n\")\n dump_file.write(\"self.table = []\\n\")\n\n for l in range(0, len(block_table.table)):\n dump_file.write(\"derivatives = []\\n\")\n for i in range(0, len(block_table.table[0].vector)):\n poly_string = block_table.table[l].vector[i].__str__()\n poly_string = re.sub(\"([0-9]+\\.[0-9]+e?-?[0-9]+)\", r\"RealMPFR('\\1', prec)\", poly_string)\n dump_file.write(\"derivatives.append(\" + poly_string + \")\\n\")\n dump_file.write(\"self.table.append(PolynomialVector(derivatives, \" + block_table.table[l].label.__str__() + \", \" + block_table.table[l].poles.__str__() + \"))\\n\")\n\n dump_file.close()", "def _write_table(self, table, path):\n io.write(table, path)", "def save_inference_table(filename, table):\r\n save_nparray(filename, table, colnames=['case_id', 't', 'p_y', 'y'])", "def writeTable(RV):", "def parse_table_to_elegant_string(self) -> str:\n self.add_drifts()\n return parse_table_to_elegant_string(self.name, self.table)", "def output_tab(self, file, restrict=False):\n\t\tbasks = self.baskets()\n labels = \"MICRO_AND_BINUCLEATED_CELLS_MICRONUCLEI_EDU\tMICRO_AND_PROBEABPOSITIVE_CELLS_MICRONUCLEI_EDU\tMICRO_AND_PROBEBPOSITIVE_CELLS_MICRONUCLEI_EDU\tMICRO_AND_PROBEAPOSITIVE_CELLS_MICRONUCLEI_EDU\tINTERPHASE_CELLS_MICRONUCLEI_EDU\tMONONUCLEATED_CELLS_MICRONUCLEI_EDU\tBINUCLEATED_CELLS_MICRONUCLEI_EDU\tMICRONUCLEATED_CELLS_MICRONUCLEI_EDU\tCELLS_WITH_ONE_MICRONUCLEUS_MICRONUCLEI_EDU\tPCT_BINUCLEATED_CELLS_MICRONUCLEI_EDU\tNUCLEAR_DIVISION_INDEX_MICRONUCLEI_EDU\tPCT_CELLS_WITH_ONE_MICRONUCLEUS_MICRONUCLEI_CYTO\tPCT_MICRONUCLEATED_CELLS_MICRONUCLEI_CYTO\tMICRONUCLEI_PER_CELL_MICRONUCLEI_CYTO\tTOTAL_MICRONUCLEI_MICRONUCLEI_CYTO\tALL_ACTIN_MEAN_NUCL_INTEGR_INTENS_MULTIWAVESCORING_CYTO\tPOSITIVE_ACTIN_MEAN_NUCL_INTEGR_INTENS_MULTIWAVESCORING_CYTO\tALL_ACTIN_MEAN_STAIN_INTEGR_INTENS_MULTIWAVESCORING_CYTO\tPOSITIVE_ACTIN_MEAN_STAIN_INTEGR_INTENS_MULTIWAVESCORING_CYTO\tALL_ACTIN_MEAN_STAIN_AREA_MULTIWAVESCORING_CYTO\tPOSITIVE_ACTIN_MEAN_STAIN_AREA_MULTIWAVESCORING_CYTO\tPOSITIVE_TUBULIN_MEAN_NUCL_INTEGR_INTENS_MULTIWAVESCORING_CYTO\tALL_TUBULIN_MEAN_NUCL_INTEGR_INTENS_MULTIWAVESCORING_CYTO\tCELL_TUBULIN_NUCLEUS_INTEGR_INTENSITY_MULTIWAVESCORING_CYTO\tDNA_MEAN_AREA_MICRONUCLEI_EDU\tBREADTH_IMA_SUMMARY_CYTO\tINNER_RADIUS_IMA_SUMMARY_CYTO\tOUTER_RADIUS_IMA_SUMMARY_CYTO\tLENGTH_IMA_SUMMARY_CYTO\tPERIMETER_IMA_SUMMARY_CYTO\tMEAN_RADIUS_IMA_SUMMARY_CYTO\tEQUIV_RADIUS_IMA_SUMMARY_CYTO\tEQUIV_PROLATE_VOL_IMA_SUMMARY_CYTO\tALL_NUCLEI_MEAN_AREA_MULTIWAVESCORING_CYTO\tAREA_IMA_SUMMARY_CYTO\tEQUIV_SPHERE_SURFACE_AREA_IMA_SUMMARY_CYTO\tTOTAL_AREA_IMA_SUMMARY_CYTO\tPIXEL_AREA_IMA_SUMMARY_CYTO\tEQUIV_SPHERE_VOL_IMA_SUMMARY_CYTO\tEQUIV_OBLATE_VOL_IMA_SUMMARY_CYTO\tNEGATIVE_EDU_MEAN_CELL_INTEGR_INTENS_MULTIWAVESCORING_EDU\tNEGATIVE_EDU_MEAN_NUCL_INTEGR_INTENS_MULTIWAVESCORING_EDU\tOUTER_RADIUS_IMA_SUMMARY_EDU\tLENGTH_IMA_SUMMARY_EDU\tEQUIV_SPHERE_VOL_IMA_SUMMARY_EDU\tEQUIV_OBLATE_VOL_IMA_SUMMARY_EDU\tEQUIV_PROLATE_VOL_IMA_SUMMARY_EDU\tDAPI_STAINED_AREA_MULTIWAVESCORING_EDU\tCELL_TOTAL_AREA_MULTIWAVESCORING_EDU\tWIDTH_IMA_SUMMARY_EDU\tPERIMETER_IMA_SUMMARY_EDU\tEQUIV_RADIUS_IMA_SUMMARY_EDU\tMEAN_RADIUS_IMA_SUMMARY_EDU\tALL_CELLS_MEAN_AREA_MULTIWAVESCORING_EDU\tALL_NUCLEI_MEAN_AREA_MULTIWAVESCORING_EDU\tPIXEL_AREA_IMA_SUMMARY_EDU\tEQUIV_SPHERE_SURFACE_AREA_IMA_SUMMARY_EDU\tAREA_IMA_SUMMARY_EDU\tTOTAL_AREA_IMA_SUMMARY_EDU\tNUCLEAR_AREA_PER_CELL_TRANSFLUOR_EDU\tINNER_RADIUS_IMA_SUMMARY_EDU\tBREADTH_IMA_SUMMARY_EDU\tALL_PH3_MEAN_CELL_AVER_INTENS_MULTIWAVESCORING_EDU\tALL_PH3_MEAN_NUCL_AVER_INTENS_MULTIWAVESCORING_EDU\tALL_PH3_MEAN_NUCL_INTEGR_INTENS_MULTIWAVESCORING_EDU\tALL_PH3_MEAN_CELL_INTEGR_INTENS_MULTIWAVESCORING_EDU\tNEGATIVE_PH3_MEAN_CELL_INTEGR_INTENS_MULTIWAVESCORING_EDU\tNEGATIVE_PH3_MEAN_NUCL_INTEGR_INTENS_MULTIWAVESCORING_EDU\tPCT_CELLS_WITH_MULTI_MICRONUCLEI_MICRONUCLEI_EDU\tALL_EDU_MEAN_STAIN_AREA_MULTIWAVESCORING_EDU\tPCT_POSITIVE_EDU_MULTIWAVESCORING_EDU\tCELL_PH3_STAINED_AVERAGE_INTENSITY_MULTIWAVESCORING_CYTO\tCELL_PH3_STAINED_AVERAGE_INTENSITY_MULTIWAVESCORING_EDU\tNEGATIVE_PH3_MEAN_STAIN_AREA_MULTIWAVESCORING_EDU\tNEGATIVE_PH3_MEAN_STAIN_INTEGR_INTENS_MULTIWAVESCORING_EDU\tPOSITIVE_ACTIN_MEAN_CELL_AVER_INTENS_MULTIWAVESCORING_CYTO\tALL_ACTIN_MEAN_CELL_AVER_INTENS_MULTIWAVESCORING_CYTO\tPOSITIVE_ACTIN_MEAN_CYTO_AVER_INTENS_MULTIWAVESCORING_CYTO\tALL_ACTIN_MEAN_CYTO_AVER_INTENS_MULTIWAVESCORING_CYTO\tPIT_INTEGRATED_INTENSITY_TRANSFLUOR_CYTO\tPIT_COUNT_TRANSFLUOR_CYTO\tPIT_TOTAL_AREA_TRANSFLUOR_CYTO\tGRADIENT_INDEX_TRANSFLUOR_CYTO\tLAPLACIAN_INDEX_TRANSFLUOR_CYTO\tPOSITIVE_TUBULIN_MEAN_CYTO_AVER_INTENS_MULTIWAVESCORING_CYTO\tALL_TUBULIN_MEAN_CYTO_AVER_INTENS_MULTIWAVESCORING_CYTO\tALL_TUBULIN_MEAN_STAIN_AVER_INTENS_MULTIWAVESCORING_CYTO\tPOSITIVE_TUBULIN_MEAN_STAIN_AVER_INTENS_MULTIWAVESCORING_CYTO\tALL_TUBULIN_MEAN_CELL_AVER_INTENS_MULTIWAVESCORING_CYTO\tPOSITIVE_TUBULIN_MEAN_CELL_AVER_INTENS_MULTIWAVESCORING_CYTO\tNEGATIVE_ACTIN_MEAN_NUCL_INTEGR_INTENS_MULTIWAVESCORING_CYTO\tNEGATIVE_ACTIN_MEAN_STAIN_AVER_INTENS_MULTIWAVESCORING_CYTO\tNEGATIVE_ACTIN_MEAN_CYTO_AVER_INTENS_MULTIWAVESCORING_CYTO\tNEGATIVE_ACTIN_MEAN_CELL_AVER_INTENS_MULTIWAVESCORING_CYTO\tNEGATIVE_ACTIN_MEAN_NUCL_AVER_INTENS_MULTIWAVESCORING_CYTO\tMITOTIC_CELLS_MICRONUCLEI_CYTO\tTOTAL_CELLS_MICRONUCLEI_CYTO\tTOTAL_CELLS_MULTIWAVESCORING_CYTO\tPOSITIVE_TUBULIN_MULTIWAVESCORING_CYTO\tPOSITIVE_ACTIN_MULTIWAVESCORING_CYTO\tDNA_TOTAL_AREA_MICRONUCLEI_CYTO\tINTERPHASE_CELLS_MICRONUCLEI_CYTO\tMONONUCLEATED_CELLS_MICRONUCLEI_CYTO\tMITOTIC_CELLS_MICRONUCLEI_EDU\tPOSITIVE_EDU_MULTIWAVESCORING_EDU\tPIT_TOTAL_AREA_TRANSFLUOR_EDU\tPIT_COUNT_TRANSFLUOR_EDU\tNUCLEAR_INTEGRATED_INTENSITY_TRANSFLUOR_EDU\tNUCLEAR_TOTAL_AREA_TRANSFLUOR_EDU\tDNA_TOTAL_AREA_MICRONUCLEI_EDU\tTOTAL_CELLS_MICRONUCLEI_EDU\tTOTAL_CELLS_MULTIWAVESCORING_EDU\tNUCLEAR_COUNT_TRANSFLUOR_EDU\tELL_FORM_FACTOR_IMA_SUMMARY_CYTO\tELL_FORM_FACTOR_IMA_SUMMARY_EDU\tNEGATIVE_PH3_MEAN_STAIN_AVER_INTENS_MULTIWAVESCORING_EDU\tALL_PH3_MEAN_STAIN_INTEGR_INTENS_MULTIWAVESCORING_EDU\tALL_PH3_MEAN_STAIN_AREA_MULTIWAVESCORING_EDU\tPCT_POSITIVE_PH3_MULTIWAVESCORING_EDU\tPOSITIVE_PH3_MULTIWAVESCORING_EDU\tPOSITIVE_PH3_MULTIWAVESCORING_CYTO\tPCT_POSITIVE_PH3_MULTIWAVESCORING_CYTO\tALL_PH3_MEAN_STAIN_AREA_MULTIWAVESCORING_CYTO\tALL_PH3_MEAN_STAIN_INTEGR_INTENS_MULTIWAVESCORING_CYTO\tNEGATIVE_PH3_MEAN_STAIN_AREA_MULTIWAVESCORING_CYTO\tNEGATIVE_PH3_MEAN_STAIN_INTEGR_INTENS_MULTIWAVESCORING_CYTO\tPIT_AVERAGE_INTENSITY_TRANSFLUOR_EDU\tNEGATIVE_EDU_MEAN_STAIN_AVER_INTENS_MULTIWAVESCORING_EDU\tCELL_PROBEA_AVERAGE_INTENSITY_MICRONUCLEI_EDU\tCELL_EDU_NUCLEUS_AVERAGE_INTENSITY_MULTIWAVESCORING_EDU\tCELL_EDU_CELL_AVERAGE_INTENSITY_MULTIWAVESCORING_EDU\tALL_EDU_MEAN_CELL_INTEGR_INTENS_MULTIWAVESCORING_EDU\tALL_EDU_MEAN_NUCL_INTEGR_INTENS_MULTIWAVESCORING_EDU\tALL_EDU_MEAN_STAIN_INTEGR_INTENS_MULTIWAVESCORING_EDU\tPOSITIVE_EDU_MEAN_CELL_AVER_INTENS_MULTIWAVESCORING_EDU\tPOSITIVE_EDU_MEAN_NUCL_AVER_INTENS_MULTIWAVESCORING_EDU\tALL_EDU_MEAN_STAIN_AVER_INTENS_MULTIWAVESCORING_EDU\tPOSITIVE_EDU_MEAN_STAIN_AVER_INTENS_MULTIWAVESCORING_EDU\tPOSITIVE_EDU_MEAN_STAIN_INTEGR_INTENS_MULTIWAVESCORING_EDU\tPOSITIVE_EDU_MEAN_CELL_INTEGR_INTENS_MULTIWAVESCORING_EDU\tPOSITIVE_EDU_MEAN_NUCL_INTEGR_INTENS_MULTIWAVESCORING_EDU\tCELLULAR_GRADIENT_INDEX_TRANSFLUOR_EDU\tCELLULAR_LAPLACIAN_INDEX_TRANSFLUOR_EDU\tPIT_INTEGRATED_INTENSITY_TRANSFLUOR_EDU\tALL_EDU_MEAN_NUCL_AVER_INTENS_MULTIWAVESCORING_EDU\tALL_EDU_MEAN_CELL_AVER_INTENS_MULTIWAVESCORING_EDU\tCELLULAR_TEXTURE_INDEX_TRANSFLUOR_EDU\tLAPLACIAN_INDEX_TRANSFLUOR_EDU\tGRADIENT_INDEX_TRANSFLUOR_EDU\tTEXTURE_INDEX_TRANSFLUOR_EDU\tCELL_EDU_STAINED_AVERAGE_INTENSITY_MULTIWAVESCORING_EDU\tPOSITIVE_EDU_MEAN_STAIN_AREA_MULTIWAVESCORING_EDU\tCELL_EDU_STAINED_AREA_MULTIWAVESCORING_EDU\tPOSITIVE_PH3_MEAN_STAIN_AREA_MULTIWAVESCORING_EDU\tNEGATIVE_PH3_MEAN_STAIN_AVER_INTENS_MULTIWAVESCORING_CYTO\tPOSITIVE_PH3_MEAN_CYTO_AVER_INTENS_MULTIWAVESCORING_CYTO\tPOSITIVE_PH3_MEAN_CELL_AVER_INTENS_MULTIWAVESCORING_CYTO\tPOSITIVE_PH3_MEAN_STAIN_AREA_MULTIWAVESCORING_CYTO\tALL_PH3_MEAN_STAIN_AVER_INTENS_MULTIWAVESCORING_CYTO\tPOSITIVE_PH3_MEAN_NUCL_INTEGR_INTENS_MULTIWAVESCORING_CYTO\tPOSITIVE_PH3_MEAN_STAIN_INTEGR_INTENS_MULTIWAVESCORING_CYTO\tPOSITIVE_PH3_MEAN_STAIN_AVER_INTENS_MULTIWAVESCORING_CYTO\tPOSITIVE_PH3_MEAN_NUCL_AVER_INTENS_MULTIWAVESCORING_CYTO\tPOSITIVE_PH3_MEAN_CELL_INTEGR_INTENS_MULTIWAVESCORING_EDU\tPOSITIVE_PH3_MEAN_NUCL_INTEGR_INTENS_MULTIWAVESCORING_EDU\tPOSITIVE_PH3_MEAN_STAIN_INTEGR_INTENS_MULTIWAVESCORING_EDU\tPOSITIVE_PH3_MEAN_STAIN_AVER_INTENS_MULTIWAVESCORING_EDU\tPOSITIVE_PH3_MEAN_NUCL_AVER_INTENS_MULTIWAVESCORING_EDU\tPOSITIVE_PH3_MEAN_CELL_AVER_INTENS_MULTIWAVESCORING_EDU\tALL_PH3_MEAN_STAIN_AVER_INTENS_MULTIWAVESCORING_EDU\tNEGATIVE_TUBULIN_MEAN_NUCL_AVER_INTENS_MULTIWAVESCORING_CYTO\tNEGATIVE_TUBULIN_MEAN_CELL_AVER_INTENS_MULTIWAVESCORING_CYTO\tNEGATIVE_TUBULIN_MEAN_CYTO_AVER_INTENS_MULTIWAVESCORING_CYTO\tNEGATIVE_TUBULIN_MEAN_STAIN_INTEGR_INTENS_MULTIWAVESCORING_CYTO\tNEGATIVE_TUBULIN_MEAN_STAIN_AREA_MULTIWAVESCORING_CYTO\tNEGATIVE_TUBULIN_MEAN_STAIN_AVER_INTENS_MULTIWAVESCORING_CYTO\tNEGATIVE_TUBULIN_MEAN_NUCL_INTEGR_INTENS_MULTIWAVESCORING_CYTO\tNEGATIVE_TUBULIN_MEAN_CELL_INTEGR_INTENS_MULTIWAVESCORING_CYTO\tPCT_POSITIVE_TUBULIN_MULTIWAVESCORING_CYTO\tBINUCLEATED_CELLS_MICRONUCLEI_CYTO\tMULTINUCLEATED_CELLS_MICRONUCLEI_CYTO\tMULTIMONO_CELL_RATIO_MICRONUCLEI_CYTO\tMULTIDUALMONO_CELL_RATIO_MICRONUCLEI_CYTO\tDUALMONO_CELL_RATIO_MICRONUCLEI_CYTO\tPCT_BINUCLEATED_CELLS_MICRONUCLEI_CYTO\tMULTIDUALMONO_CELL_RATIO_MICRONUCLEI_EDU\tDUALMONO_CELL_RATIO_MICRONUCLEI_EDU\tDNA_MEAN_AREA_MICRONUCLEI_CYTO\tALL_PH3_MEAN_NUCL_INTEGR_INTENS_MULTIWAVESCORING_CYTO\tCELL_ACTIN_CYTOPLASM_AVERAGE_INTENSITY_MULTIWAVESCORING_CYTO\tCELL_ACTIN_CELL_AVERAGE_INTENSITY_MULTIWAVESCORING_CYTO\tNEGATIVE_ACTIN_MEAN_STAIN_INTEGR_INTENS_MULTIWAVESCORING_CYTO\tNEGATIVE_ACTIN_MEAN_STAIN_AREA_MULTIWAVESCORING_CYTO\tPCT_CELLS_WITH_MULTI_MICRONUCLEI_MICRONUCLEI_CYTO\tSHAPE_FACTOR_IMA_SUMMARY_CYTO\tPCT_PROBEA_POSITIVE_CELLS_MICRONUCLEI_EDU\tPCT_PROBEB_POSITIVE_CELLS_MICRONUCLEI_EDU\tPCT_PROBEAB_POSITIVE_CELLS_MICRONUCLEI_EDU\tPCT_CELLS_WITH_ONE_MICRONUCLEUS_MICRONUCLEI_EDU\tPCT_MICRONUCLEATED_CELLS_MICRONUCLEI_EDU\tMICRONUCLEI_PER_CELL_MICRONUCLEI_EDU\tMICRONUCLEI_PER_HEALTHY_CELL_MICRONUCLEI_EDU\tMICRONUCLEI_PER_MONONUCLEATED_CELL_MICRONUCLEI_CYTO\tMICRONUCLEI_PER_PROBEBPOSITIVE_CELL_MICRONUCLEI_EDU\tMICRONUCLEI_PER_PROBEABPOSITIVE_CELL_MICRONUCLEI_EDU\tMICRONUCLEI_PER_PROBEAPOSITIVE_CELL_MICRONUCLEI_EDU\tMICRONUCLEI_PER_MONONUCLEATED_CELL_MICRONUCLEI_EDU\tNEGATIVE_EDU_MEAN_STAIN_AREA_MULTIWAVESCORING_EDU\tNEGATIVE_EDU_MEAN_STAIN_INTEGR_INTENS_MULTIWAVESCORING_EDU\tCELLS_WITH_MULTI_MICRONUCLEI_MICRONUCLEI_CYTO\tCELL_ACTIN_NUCLEUS_AVERAGE_INTENSITY_MULTIWAVESCORING_CYTO\tCELL_ACTIN_STAINED_AVERAGE_INTENSITY_MULTIWAVESCORING_CYTO\tTEXTURE_INDEX_TRANSFLUOR_CYTO\tPIT_AVERAGE_INTENSITY_TRANSFLUOR_CYTO\tCELL_TUBULIN_CELL_AVERAGE_INTENSITY_MULTIWAVESCORING_CYTO\tALL_TUBULIN_MEAN_NUCL_AVER_INTENS_MULTIWAVESCORING_CYTO\tPOSITIVE_TUBULIN_MEAN_NUCL_AVER_INTENS_MULTIWAVESCORING_CYTO\tCELLULAR_GRADIENT_INDEX_TRANSFLUOR_CYTO\tCELL_GRADIENT_INDEX_TRANSFLUOR_CYTO\tCELL_LAPLACIAN_INDEX_TRANSFLUOR_CYTO\tCELL_TEXTURE_INDEX_TRANSFLUOR_CYTO\tCELL_TUBULIN_NUCLEUS_AVERAGE_INTENSITY_MULTIWAVESCORING_CYTO\tCELL_PIT_AVERAGE_INTENSITY_TRANSFLUOR_CYTO\tPIT_COUNT_PER_CELL_TRANSFLUOR_CYTO\tPIT_AREA_PER_CELL_TRANSFLUOR_CYTO\tPOSITIVE_ACTIN_MEAN_NUCL_AVER_INTENS_MULTIWAVESCORING_CYTO\tMULTINUCLEATED_CELLS_MICRONUCLEI_EDU\tMULTIMONO_CELL_RATIO_MICRONUCLEI_EDU\tPCT_MULTINUCLEATED_CELLS_MICRONUCLEI_EDU\tCELLS_WITH_MULTI_MICRONUCLEI_MICRONUCLEI_EDU\tALL_NUCLEI_MEAN_INTEGR_ITENSITY_MULTIWAVESCORING_EDU\tTOTAL_INTENSITY_IMA_SUMMARY_EDU\tALL_NUCLEI_MEAN_INTEGR_ITENSITY_MULTIWAVESCORING_CYTO\tTOTAL_INTENSITY_IMA_SUMMARY_CYTO\tPCT_MITOTIC_CELLS_MICRONUCLEI_EDU\tALL_NUCLEI_MEAN_AVERAGE_ITENSITY_MULTIWAVESCORING_CYTO\tAVERAGE_INTENSITY_IMA_SUMMARY_CYTO\tCELL_DNA_AVERAGE_INTENSITY_MICRONUCLEI_CYTO\tCELL_NUCLEAR_AVERAGE_INTENSITY_MICRONUCLEI_CYTO\tDAPI_AVERAGE_INTENSITY_MULTIWAVESCORING_CYTO\tCELL_NUCLEAR_AVERAGE_INTENSITY_TRANSFLUOR_CYTO\tCELL_DNA_AVERAGE_INTENSITY_MICRONUCLEI_EDU\tCELL_NUCLEAR_AVERAGE_INTENSITY_MICRONUCLEI_EDU\tCELL_NUCLEAR_AVERAGE_INTENSITY_TRANSFLUOR_EDU\tDAPI_AVERAGE_INTENSITY_MULTIWAVESCORING_EDU\tNUCLEAR_AVERAGE_INTENSITY_TRANSFLUOR_EDU\tALL_NUCLEI_MEAN_AVERAGE_ITENSITY_MULTIWAVESCORING_EDU\tAVERAGE_INTENSITY_IMA_SUMMARY_EDU\".split(\"\\t\")\n\t\tfile.write(\"Features\\t{}\\n\".format(\"\\t\".join(labels)))\n\t\tfor bask in basks:\n\t\t\tif restrict and bask.fingerprint[\"NUCLEAR_COUNT_TRANSFLUOR_EDU\"][0] > -0.6:\n\t\t\t\tfile.write(\"{}_{}\\t{}\\n\".format(bask.m, bask.rt, \"\\t\".join([str(bask.fingerprint[param][0])\n\t\t\t\t\t\tfor param in labels])))\n\t\t\telif restrict:\n\t\t\t\tcontinue\n\t\t\telse:\t\n\t\t\t\tfile.write(\"{}\\t{}\\n\".format(str(bask), \"\\t\".join([str(bask.fingerprint[param][0])\n\t\t\t\t\t\tfor param in labels])))", "def create_table(self):\n table_path = os.path.join(self.opts[\"data_dir\"], self.table_name())\n self.output_file = open_fw(table_path, encoding=self.encoding)\n self.output_file.write(u'<?xml version=\"1.0\" encoding=\"UTF-8\"?>')\n self.output_file.write(u'\\n<root>')\n self.table_names.append((self.output_file, table_path))\n self.auto_column_number = 1\n\n # Register all tables created to enable\n # testing python files having custom download function\n if self.script.name not in self.script_table_registry:\n self.script_table_registry[self.script.name] = []\n self.script_table_registry[self.script.name].append(\n (self.table_name(), self.table))", "def to_html(self,fn='tableone.html'):\n tablefmt = 'html'\n with open(fn, 'w') as f:\n f.write(tabulate(self.tableone, tablefmt=tablefmt))", "def write_lat_file(self):\n\n # If the lattice file exists, remove it and start over\n if os.path.isfile(self.filename):\n os.remove(self.filename)\n\n lat = open(self.filename, 'w')\n\n header = '? VERSION = 1.0\\n'\n header += '? UNITLENGTH = ' + str(self.unit_length) + '\\n'\n lat.write(header)\n\n quad_label = '#\\n'\n quad_label += '# Quads:\\n'\n quad_label += '# QF dB/dx L space\\n'\n quad_label += '#--------------------------------------\\n'\n lat.write(quad_label)\n\n # Start with quads\n for quad_array in self.elems_dict['QF']:\n quadline = 'QF '\n quadline += str(quad_array[0]) + ' '\n quadline += str(quad_array[1]) + ' '\n quadline += str(quad_array[2]) + ' \\n'\n lat.write(quadline)\n\n und_label = '#\\n'\n und_label += '# Undulators:\\n'\n und_label += '# AW AW0 L space\\n'\n und_label += '#--------------------------------------\\n'\n lat.write(und_label)\n\n # Add undulators\n for und_array in self.elems_dict['AW']:\n undline = 'AW '\n undline += str(und_array[0]) + ' '\n undline += str(und_array[1]) + ' '\n undline += str(und_array[2]) + ' \\n'\n lat.write(undline)\n\n lat.close()", "def write_table(self):\n with open(self.table_file, 'w') as table_f:\n pickle.dump(self.pollster_table, table_f)", "def to_latex(self,fn='tableone.tex'):\n tablefmt = 'latex'\n with open(fn, 'w') as f:\n f.write(tabulate(self.tableone, tablefmt=tablefmt))", "def main():\n with open(\"dataset.csv\", \"r\") as fp:\n x = from_csv(fp) # read from csv by PT\n y = PrettyTable.get_html_string(x) # translate to html, PT\n table_html = y # store in var\n html_file=open('table.html','w') # create and write\n html_file=html_file.write(table_html) # add var html", "def create_table(self, h5_file):\n if self.verbose:\n print \"Creating and populating\", self.file_prefix, self.type, \"table\"\n group_exists = False\n for x in h5_file:\n if x._v_name == self.file_prefix:\n group_exists = True\n group = x\n if not group_exists:\n group = h5_file.create_group(\"/\", self.file_prefix, self.file_prefix)\n table_def = {}\n for k in self.data_format:\n table_def[k] = self.data_format[k][1]\n table = h5_file.create_table(group, self.type, table_def)\n individual = table.row\n for x in self.read():\n for y in x:\n individual[y] = x[y]\n individual.append()\n table.flush()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse table to Tracy lattice and return it as a string. Returns str Tracy Lattice
def parse_table_to_tracy_string(self) -> str: return parse_table_to_tracy_string(self.name, self.table)
[ "def lattice2str(lattice):\n latticeStr = []\n for i in range(len(lattice)):\n latticeStr.append(elem2str(lattice[i]))\n return latticeStr", "def parse_table_to_elegant_string(self) -> str:\n self.add_drifts()\n return parse_table_to_elegant_string(self.name, self.table)", "def parse_table_to_madx_line_string(self) -> str:\n self.add_drifts()\n defstr = _parse_table_to_madx_definitions(self.table)\n linestr = \"{}: LINE=({});\".format(\n self.name,\n \",\\n\\t\\t\".join(\n [\",\".join(c) for c in list(self.chunks(self.table.name.to_list(), 20))]\n ),\n )\n return defstr + \"\\n\\n\" + linestr", "def __str__(self):\n table = []\n\n if self.css_class:\n table.append('<table class=%s>' % self.css_class)\n else:\n table.append('<table>')\n\n if self.header_row:\n table.append(str(self.header_row))\n\n for row in self.rows:\n table.append(str(row))\n\n table.append('</table>')\n\n return '\\n'.join(table)", "def __str__(self):\n row = []\n\n row.append('<tr>')\n\n for cell in self.cells:\n row.append(str(cell))\n\n row.append('</tr>')\n\n return '\\n'.join(row)", "def __str__(self):\n return_string = \"Truth Table type=\"\n return_string += 'REPORTING' if self.type == TruthTableType.REPORTING else 'TRANSITION'\n return_string += '\\n'\n for k,v in self.header.items():\n if k not in ['next_state', 'output']:\n return_string += '[' + k + '=' + ','.join(v) + ']'\n else:\n return_string += '[' + k + '=' + v + ']'\n return_string += '\\n'\n return_string += '--------------------------------------\\n'\n for transition_dict in self.transitions:\n for k,v in transition_dict.items():\n return_string += '[' + k + '=' + ','.join(v) + ']'\n return_string += '\\n'\n return return_string", "def __str__(self):\n as_str = '\\t' + '\\t'.join(self.columns) + '\\n'\n if self.name:\n as_str = 'Plate: ' + self.name + '\\n' + as_str\n\n for i, row in enumerate(self.rows):\n line = row\n for j, column in enumerate(self.columns):\n line += '\\t' + str(self.values[self.cell_name(row, column)])\n as_str += line + '\\n'\n\n return as_str", "def convert_lattice(file_in, file_out):\n open_fn = gzip.open if file_in.endswith('.gz') else open\n with open_fn(file_in, 'rt') as lattice, open(file_out, 'w') as dot:\n dot.write(\n \"digraph lattice {\\n\" \\\n \"\\trankdir=LR;\\n\" \\\n \"\\tnode [shape = ellipse; fontname = courier];\\n\" \\\n \"\\tedge [fontname = courier];\\n\\n\")\n while True:\n line = lattice.readline()\n if line.startswith('N='):\n break\n first_line = line.split()\n nodes, links = [int(i.split('=')[1]) for i in first_line]\n for _ in range(nodes):\n next_line = lattice.readline().split()\n content = tuple(i.split('=')[1] for i in next_line[0:3])\n dot.write(\"\\t%s [label = \\\"id=%s\\\\nt=%s\\\\nW=%s\\\"];\\n\" % (\n content[0], content[0], content[1], content[2]))\n dot.write(\"\\n\")\n for _ in range(links):\n next_line = lattice.readline().split()\n content = tuple(i.split('=')[1] for i in next_line[0:5])\n if next_line[5].startswith('n='):\n dot.write(\n \"\\t%s -> %s [label = \\\"id=%s\\\\na=%s\\\\nl=%s\\\\nn=%s\\\"];\\n\" % (\n content[1], content[2], content[0], content[3],\n content[4], next_line[5].split('=')[1]))\n else:\n dot.write(\"\\t%s -> %s [label = \\\"id=%s\\\\na=%s\\\\nl=%s\\\"];\\n\" % (\n content[1], content[2], content[0], content[3], content[4]))\n dot.write(\"}\")", "def table(self, L, R, n):\n s = ''\n for x in linspace(L, R, n):\n y = self(x)\n s += '%12g %12g\\n' % (x, y)\n return s", "def linearize_table(self):\n pass", "def table(self, L, R, n):\r\n s = ''\r\n for x in linspace(L, R, n):\r\n y = self(x)\r\n s += '(%12g, %12g)\\n' %(x, y)\r\n return s", "def display_line(line):\n table = get_ttable(line)\n parts = line.split()\n try:\n ones = int(parts[6])\n steps = int(parts[7])\n print display_ttable(table), \"# \",ones, \"\", steps, \"\", long_to_eng_str(ones,1,3), \"\", long_to_eng_str(steps,1,3)\n except:\n print display_ttable(table)", "def circuit_data_table(circuit: QuantumCircuit) -> wid.HTML:\n\n circuit = circuit.decompose()\n ops = circuit.count_ops()\n num_nl = circuit.num_nonlocal_gates()\n\n html = \"<table>\"\n html += \"\"\"<style>\ntable {\n font-family: \"IBM Plex Sans\", Arial, Helvetica, sans-serif;\n border-collapse: collapse;\n width: 100%;\n border-left: 2px solid #212121;\n}\n\nth {\n text-align: left;\n padding: 5px 5px 5px 5px;\n width: 100%;\n background-color: #988AFC;\n color: #fff;\n font-size: 14px;\n border-left: 2px solid #988AFC;\n}\n\ntd {\n text-align: left;\n padding: 5px 5px 5px 5px;\n width: 100%;\n font-size: 12px;\n font-weight: medium;\n}\n\ntr:nth-child(even) {background-color: #f6f6f6;}\n</style>\"\"\"\n html += f\"<tr><th>{circuit.name}</th><th></tr>\"\n html += f\"<tr><td>Width</td><td>{circuit.width()}</td></tr>\"\n html += f\"<tr><td>Depth</td><td>{circuit.depth()}</td></tr>\"\n html += f\"<tr><td>Total Gates</td><td>{sum(ops.values())}</td></tr>\"\n html += f\"<tr><td>Non-local Gates</td><td>{num_nl}</td></tr>\"\n html += \"</table>\"\n\n out_wid = wid.HTML(html)\n return out_wid", "def str2lattice(latticeStr):\n lattice = []\n for i in range(len(latticeStr)):\n if latticeStr[i] in ['\\n', '\\r\\n'] or latticeStr[i][0]=='!':\n continue\n elem = str2elem(latticeStr[i])\n if elem : #check if elem is not empty\n lattice.append(elem)\n return lattice", "def _tab_print_ ( t , title = '' , prefix = '' , alignment = 'll' , xfmt = '%+.5g' , yfmt = '%+-.5g' ) :\n rows = [ ('Abscissa' , 'Value' ) ] \n for i in range ( t.size() ) :\n x = t.x ( i )\n y = t.y ( i )\n row = xfmt % x, yfmt % y\n rows.append ( row )\n \n if not title : title = 'Interpolation Table' \n import ostap.logger.table as T\n return T.table ( rows , title = title , prefix = prefix , alignment = alignment )", "def _read_torchtext_tabular(cls, input_file):\n return open_split(input_file, lower_case=False)", "def __str__(self):\n\n # Get all combinations of keys for probabilities\n all_combinations = list(itertools.product(*self.all_random_variables))\n count_of_rv = len(self.random_variables)\n grouped_combinations = [all_combinations[i * count_of_rv:(i + 1) * count_of_rv] for i in\n range(int(len(all_combinations) / count_of_rv))]\n # Create header for each predecessor and random variables\n headers = self.predecessors + [f'P({self.node_name}={variable})' for variable in\n self.random_variables]\n # Row data where predecessor random variables are changing and each probability is inserted\n rows = [\n list(group[0][:-1]) + [self.probabilities[NetworkNode._probability_key(key)] for key in\n group] for group in grouped_combinations]\n return tabulate(tabular_data=rows, headers=headers, tablefmt='github')", "def getLattice() :\n lattice = [getElem('loop'),getElem('quad'),getElem('drift'),getElem('quad'),getElem('drift')]\n lattice[3].Kx = -lattice[3].Kx\n return lattice", "def tables():\n print('{:20}{:>5}{:>15}'.format('Tiger Woods', 52, '$1000211.22'))\n print('{:20}{:>5}{:>15}'.format('Elizabeth Smith', 123, '$1009.01'))\n print('{:20}{:>5}{:>15}'.format('Frank Lloyd Wright', 3, '$99.01'))\n print('{:20}{:>5}{:>15}'.format('Justin Timberlake', 39, '$199.01'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse table to Tracy lattice and write it to file.
def parse_table_to_tracy_file(self, filename: str) -> None: parse_table_to_tracy_file(self.name, self.table, filename)
[ "def write_table(self):\n o = open(self.out_file, 'w')\n o.write(self.table)", "def _write_table(self, table, path):\n io.write(table, path)", "def parse_table_to_madx_line_file(self, filename: str):\n save_string(self.parse_table_to_madx_line_string(), filename)", "def parse_table_to_elegant_file(self, filename: str) -> None:\n self.add_drifts()\n\n parse_table_to_elegant_file(self.name, self.table, filename)", "def save_inference_table(filename, table):\r\n save_nparray(filename, table, colnames=['case_id', 't', 'p_y', 'y'])", "def _dump_data(self, fileobj):\n if not fileobj and self._file:\n root = os.path.splitext(self._file.name)[0]\n fileobj = root + \".txt\"\n\n close_file = False\n\n if isinstance(fileobj, str):\n fileobj = open(fileobj, \"w\")\n close_file = True\n\n linewriter = csv.writer(fileobj, dialect=FITSTableDumpDialect)\n\n # Process each row of the table and output one row at a time\n def format_value(val, format):\n if format[0] == \"S\":\n itemsize = int(format[1:])\n return \"{:{size}}\".format(val, size=itemsize)\n elif format in np.typecodes[\"AllInteger\"]:\n # output integer\n return f\"{val:21d}\"\n elif format in np.typecodes[\"Complex\"]:\n return f\"{val.real:21.15g}+{val.imag:.15g}j\"\n elif format in np.typecodes[\"Float\"]:\n # output floating point\n return f\"{val:#21.15g}\"\n\n for row in self.data:\n line = [] # the line for this row of the table\n\n # Process each column of the row.\n for column in self.columns:\n # format of data in a variable length array\n # where None means it is not a VLA:\n vla_format = None\n format = _convert_format(column.format)\n\n if isinstance(format, _FormatP):\n # P format means this is a variable length array so output\n # the length of the array for this row and set the format\n # for the VLA data\n line.append(\"VLA_Length=\")\n line.append(f\"{len(row[column.name]):21d}\")\n _, dtype, option = _parse_tformat(column.format)\n vla_format = FITS2NUMPY[option[0]][0]\n\n if vla_format:\n # Output the data for each element in the array\n for val in row[column.name].flat:\n line.append(format_value(val, vla_format))\n else:\n # The column data is a single element\n dtype = self.data.dtype.fields[column.name][0]\n array_format = dtype.char\n if array_format == \"V\":\n array_format = dtype.base.char\n if array_format == \"S\":\n array_format += str(dtype.itemsize)\n\n if dtype.char == \"V\":\n for value in row[column.name].flat:\n line.append(format_value(value, array_format))\n else:\n line.append(format_value(row[column.name], array_format))\n linewriter.writerow(line)\n if close_file:\n fileobj.close()", "def save_gtfs_table(filename,table):\n\tfieldnames = table[0].keys()\n\tprint(\"Fieldnames: {}\".format(fieldnames))\n\ttablef = open(filename,mode=\"w\",encoding=\"utf-8\")\n\t#FIXME: extrasaction is set to ignore due to bug in csv.py in Python3.6\n\twriter=csv.DictWriter(tablef,fieldnames,extrasaction=\"ignore\")\n\twriter.writeheader()\n\twriter.writerows(table)\n\ttablef.close()", "def writeTable(RV):", "def write_table(self):\n with open(self.table_file, 'w') as table_f:\n pickle.dump(self.pollster_table, table_f)", "def write_tsv(self, filename):\n\n output = StringIO()\n\n # Add the header line\n output.write('model_name\\t')\n output.write('\\t'.join([r.func_name for r in self.reporters]))\n output.write('\\n')\n\n # Transpose the results list\n results = zip(*self.results)\n\n for model_name, result_row in zip(self.names, results):\n output.write(model_name + '\\t')\n output.write('\\t'.join([r.get_text() for r in result_row]))\n output.write('\\n')\n\n with open(filename, 'w') as f:\n f.write(output.getvalue())", "def writeTable(self, path):\r\n\r\n for tab in self.tables:\r\n tab.write(path)\r\n return True", "def save_table(table, filename):\n\n LOGGER.info(\"Saving output table to file %s\", filename)\n \n table.to_csv(filename, sep=\";\")\n LOGGER.info(\"Saved output table.\")", "def create_table(self):\n table_path = os.path.join(self.opts[\"data_dir\"], self.table_name())\n self.output_file = open_fw(table_path, encoding=self.encoding)\n self.output_file.write(u'<?xml version=\"1.0\" encoding=\"UTF-8\"?>')\n self.output_file.write(u'\\n<root>')\n self.table_names.append((self.output_file, table_path))\n self.auto_column_number = 1\n\n # Register all tables created to enable\n # testing python files having custom download function\n if self.script.name not in self.script_table_registry:\n self.script_table_registry[self.script.name] = []\n self.script_table_registry[self.script.name].append(\n (self.table_name(), self.table))", "def to_tsv(self, output_file):\n csvw = csv.writer(output_file, delimiter=\"\\t\", quoting=csv.QUOTE_NONE)\n for row in self.summary:\n csvw.writerow(row)", "def to_latex(self,fn='tableone.tex'):\n tablefmt = 'latex'\n with open(fn, 'w') as f:\n f.write(tabulate(self.tableone, tablefmt=tablefmt))", "def export_table_txt(self, db: str, table: str, path: str) -> bool:\n # authenticate whether the table exists or not\n authenticate = self.const.auth_table(db, table)\n\n try:\n if (authenticate is True):\n\n self.cursor.execute(f\"use {db}\")\n query = f\"select * from {table}\"\n self.cursor.execute(query)\n select_result = self.cursor.fetchall()\n # provides column names in the input table\n table_columns = self.cursor.column_names\n\n result = tabulate.tabulate(\n select_result,\n headers = list(table_columns),\n tablefmt = \"psql\"\n )\n\n if path == \"\":\n path = os.path.expanduser(\"~\")\n path = path.replace(\"\\\\\", \"/\")\n\n file = open(f\"{path}/{table}.txt\", \"w\")\n file.write(result)\n file.close()\n\n return True\n\n else:\n return False\n\n except:\n return False", "def write_table(table, filename, data_key=None, make_tree=None, **kwargs):\n try:\n import asdf\n except ImportError:\n raise Exception(\n \"The asdf module is required to read and write ASDF files\")\n\n if data_key and make_tree:\n raise ValueError(\"Options 'data_key' and 'make_tree' are not compatible\")\n\n if make_tree:\n tree = make_tree(table)\n else:\n tree = { data_key or 'data' : table }\n\n with asdf.AsdfFile(tree) as af:\n af.write_to(filename, **kwargs)", "def to_html(self,fn='tableone.html'):\n tablefmt = 'html'\n with open(fn, 'w') as f:\n f.write(tabulate(self.tableone, tablefmt=tablefmt))", "def save(self, savepath):\n self._tablepath = savepath\n self._latexTable(self._tablepath)\n self._latexTable(savepath)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return MADX string to install marker at start and end of the lattice. Returns str MADX install string that can be run with cpymad.
def madx_sequence_add_start_end_marker_string(self) -> str: return install_start_end_marker(self.name, self.len)
[ "def parse_table_to_madx_install_str(self) -> str:\n return parse_table_to_madx_install_str(self.name, self.table)", "def _create_cmd(self):\n comment = (\"#-------------------\\n\"\n \"# Install ANTs {}\\n\"\n \"#-------------------\".format(self.version))\n if self.use_binaries:\n chunks = [comment, self.install_binaries()]\n else:\n chunks = [comment, self.build_from_source_github()]\n return \"\\n\".join(chunks)", "def llvm_install_components():\n components = ['llvm-ar', 'llvm-cov', 'llvm-profdata', 'IndexStore', 'clang',\n 'clang-resource-headers', 'compiler-rt', 'clangd', 'LTO']\n if os.sys.platform == 'darwin':\n components.extend(['dsymutil'])\n else:\n components.extend(['lld'])\n return ';'.join(components)", "def generate_command(package, jailpath, additional_args=None):\n if additional_args is None: additional_args = ''\n command = \"pkg -c %s install --yes %s\" % (jailpath, package)\n return command.rstrip()", "def generate_string_latex(self):\n return '\\n'.join([at.generate_string_latex() for at in self.atom_list])", "def _pretty_print_install(self, install_cmd, packages, line_limit=80):\n install = [install_cmd]\n line = ' '\n # Remainder needed = space + len(pkg) + space + \\\n # Assuming 80 character lines, that's 80 - 3 = 77\n line_limit = line_limit - 3\n for pkg in packages:\n if len(line) + len(pkg) < line_limit:\n line = '{}{} '.format(line, pkg)\n else:\n install.append(line)\n line = ' {} '.format(pkg)\n\n if len(line) > 0:\n install.append(line)\n\n return install", "def _mdrun_str(job, op_name):\n mdrun_str = 'gmx mdrun -v -deffnm {} -ntmpi 1'.format(op_name)\n return mdrun_str", "def create_install_requires(requires_list):\n ret = \"\"\n first = True\n for req in requires_list:\n ret += \"{1}\\n \\\"{0}\\\"\".format(req, (\"\" if first else \",\"))\n first = False\n return ret", "def parse_table_to_madx_line_string(self) -> str:\n self.add_drifts()\n defstr = _parse_table_to_madx_definitions(self.table)\n linestr = \"{}: LINE=({});\".format(\n self.name,\n \",\\n\\t\\t\".join(\n [\",\".join(c) for c in list(self.chunks(self.table.name.to_list(), 20))]\n ),\n )\n return defstr + \"\\n\\n\" + linestr", "def msg():\n\n return \"\"\"ldamark [-h] --topics TOPICS [--iterations ITERATIONS] [--log LOG]\n --m {vsm,mallet} --f {init,train} corpus\n \"\"\"", "def create_pdb_line(self, index):\n output = \"ATOM \"\n output = (\n output + str(index).rjust(6) + self.atomname.rjust(5) +\n self.residue.rjust(4) + self.chain.rjust(2) + str(self.resid).rjust(4))\n coords = self.coordinates.as_array() # [x, y, z]\n output = output + (\"%.3f\" % coords[0]).rjust(12)\n output = output + (\"%.3f\" % coords[1]).rjust(8)\n output = output + (\"%.3f\" % coords[2]).rjust(8)\n output = output + self.element.rjust(24)\n return output", "def __latex__(self):\n a = self.MomentMatrix\n lines = str(a).replace('[', '').replace(']', '').splitlines()\n rv = [r'\\begin{bmatrix}']\n rv += [' ' + ' & '.join(l.split()) + r'\\\\' for l in lines]\n rv += [r'\\end{bmatrix}']\n return '\\n'.join(rv)", "def getSetup(self):\n _setup_str = \"\"\n self._setup = self._setup.strip()\n tolog(\"self setup: %s\" % self._setup)\n\n if self._setup and self._setup != \"\" and self._setup.strip() != \"\":\n if not self._setup.endswith(\";\"):\n self._setup += \";\"\n if not \"alias\" in self._setup:\n if \"atlasLocalSetup.sh\" in self._setup and \"--quiet\" not in self._setup:\n self._setup = self._setup.replace(\"atlasLocalSetup.sh\", \"atlasLocalSetup.sh --quiet\")\n if self._setup.startswith(\"export\") or self._setup.startswith(\"source\"):\n _setup_str = \"%s\" % self._setup\n else:\n _setup_str = \"source %s\" % self._setup\n else:\n _setup_str = self._setup\n\n if _setup_str != \"\":\n tolog(\"Using setup: %s\" % (_setup_str))\n\n return _setup_str", "def write_install_command(f, install_list, remove_list):\n command = ['install']\n for package in install_list:\n command.append('\"{}\"'.format(package))\n for package in remove_list:\n command.append('\"-{}\"'.format(package))\n print >>f, ' '.join(command)", "def write_init(self):\r\n line1 = \"@256\\nD=A\\n@SP\\nM=D\\n\"\r\n line2 = self.write_call(\"Sys.init\", 0)\r\n return line1 + line2", "def add_prefix(loc):\n return 'http://www.metanetx.org/cgi-bin/mnxget/mnxref/%s' % loc.lstrip('/')", "def test_install_prefix(mm_script, mm_conf):\n ret = subprocess.run(\n [\"powershell\", str(mm_script).replace(\" \", \"' '\"), \"-p\", \"squarepants\"],\n capture_output=True,\n check=False,\n text=True,\n )\n assert ret.returncode == 0, ret.stderr\n conf_file = mm_conf / \"minion\"\n assert conf_file.exists()\n assert conf_file.read_text().find(\"id: squarepants\") > -1", "def cmdset_string(self):\n name, alias = self.cmd()\n if not name:\n AssertionError('Command name is mandatory!')\n t = name\n if alias:\n t += ', ' + alias\n return t", "def _install_cdm(self):\n for cdm_file in os.listdir(self._addon_cdm_path()):\n if cdm_file.endswith(config.CDM_EXTENSIONS):\n self._log('[install_cdm] found file: {0}'.format(cdm_file))\n cdm_path_addon = os.path.join(self._addon_cdm_path(), cdm_file)\n cdm_path_inputstream = os.path.join(self._ia_cdm_path(), cdm_file)\n if self._os() == 'Windows': # copy on windows\n shutil.copyfile(cdm_path_addon, cdm_path_inputstream)\n else:\n if os.path.lexists(cdm_path_inputstream):\n os.remove(cdm_path_inputstream) # it's ok to overwrite\n os.symlink(cdm_path_addon, cdm_path_inputstream)\n\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to generate a MADX install element string based on the table. This string can be used by cpymad to install new elements. Returns str Install element string input for MADX.
def parse_table_to_madx_install_str(self) -> str: return parse_table_to_madx_install_str(self.name, self.table)
[ "def _create_cmd(self):\n comment = (\"#-------------------\\n\"\n \"# Install ANTs {}\\n\"\n \"#-------------------\".format(self.version))\n if self.use_binaries:\n chunks = [comment, self.install_binaries()]\n else:\n chunks = [comment, self.build_from_source_github()]\n return \"\\n\".join(chunks)", "def generate_install(self):\n\n install_fp = open(self.install_config.install_location + \"/autogenerated/install.sh\", \"w+\")\n install_fp.write(\"#!/bin/bash\\n\")\n \n install_fp.write(self.message)\n\n for module in self.install_config.get_module_list():\n if module.build == \"YES\":\n install_fp.write(\"{}={}\\n\".format(module.name, module.abs_path))\n\n for module in self.install_config.get_module_list():\n if module.build == \"YES\":\n install_fp.write(\"cd ${}\\n\".format(module.name))\n install_fp.write(\"make -sj\\n\")\n\n install_fp.close()", "def _pretty_print_install(self, install_cmd, packages, line_limit=80):\n install = [install_cmd]\n line = ' '\n # Remainder needed = space + len(pkg) + space + \\\n # Assuming 80 character lines, that's 80 - 3 = 77\n line_limit = line_limit - 3\n for pkg in packages:\n if len(line) + len(pkg) < line_limit:\n line = '{}{} '.format(line, pkg)\n else:\n install.append(line)\n line = ' {} '.format(pkg)\n\n if len(line) > 0:\n install.append(line)\n\n return install", "def parse_table_to_madx_line_string(self) -> str:\n self.add_drifts()\n defstr = _parse_table_to_madx_definitions(self.table)\n linestr = \"{}: LINE=({});\".format(\n self.name,\n \",\\n\\t\\t\".join(\n [\",\".join(c) for c in list(self.chunks(self.table.name.to_list(), 20))]\n ),\n )\n return defstr + \"\\n\\n\" + linestr", "def generate_string_latex(self):\n return '\\n'.join([at.generate_string_latex() for at in self.atom_list])", "def parse_table_to_madx_sequence_string(self) -> str:\n return parse_table_to_madx_sequence_string(self.name, self.len, self.table)", "def write_install_command(f, install_list, remove_list):\n command = ['install']\n for package in install_list:\n command.append('\"{}\"'.format(package))\n for package in remove_list:\n command.append('\"-{}\"'.format(package))\n print >>f, ' '.join(command)", "def getCleanInstall(self):\n xpath = self.root_tag + \"/updateParameters\" + self.version_filter + \"/cleanInstall\"\n self.debug(\"getCleanInstall(): xpath=\" + xpath + \"\\n\")\n # node_set = self.puke_dom.xml_select( xpath )\n node_set = self.getData(xpath)\n value = \"\"\n for node in node_set:\n # value = str( node.cleanInstall )\n value = node.getValue()\n # Endfor\n if (value is None): value = \"no\"\n\n return value.lower()", "def generate_command(package, jailpath, additional_args=None):\n if additional_args is None: additional_args = ''\n command = \"pkg -c %s install --yes %s\" % (jailpath, package)\n return command.rstrip()", "def create_install_requires(requires_list):\n ret = \"\"\n first = True\n for req in requires_list:\n ret += \"{1}\\n \\\"{0}\\\"\".format(req, (\"\" if first else \",\"))\n first = False\n return ret", "def install_req(self):\n if '(' in self.name:\n return\n print(\"Installing %s(v%s)\" % (self.name, self.version))\n name = self.pkg_location if hasattr(self, 'pkg_location') \\\n else self.name\n name = 'python-' + name if 'python-' not in name and \\\n '.rpm' not in name else name\n results, status = runCommand(self.install_cmd % name)\n if status:\n raise InstallError(self.install_cmd % name, str(self.req),\n msg=\"Unable to install dep\",\n frame=gfi(cf()), errno=errno.ESPIPE)", "def llvm_install_components():\n components = ['llvm-ar', 'llvm-cov', 'llvm-profdata', 'IndexStore', 'clang',\n 'clang-resource-headers', 'compiler-rt', 'clangd', 'LTO']\n if os.sys.platform == 'darwin':\n components.extend(['dsymutil'])\n else:\n components.extend(['lld'])\n return ';'.join(components)", "def creation_table_installation(self):\n\n\t\tself.c.execute(\"DROP TABLE IF EXISTS installations\")\n\t\tself.c.execute('''CREATE TABLE installations(comLib text,comInsee text,insCodePostal text,insLieuDit text,\n\t\t\t\t\t\tinsNoVoie text , insLibelleVoie text , nb_Equipements text, nb_FicheEquipement text)''')\n\t\tself.conn.commit()", "def _get_entity_element(e, t, key, cols=[], ins=None):\n if cols:\n output = '<Entity>\\n'\n output += _static_columns(e, t, key, ins)\n output += \"\\n\".join(cols)\n output += '</Entity>\\n'\n return output\n else:\n return \"\"", "def build_mnemonic_string(self, table, target_id, letter):\n request = \"SELECT textstring FROM %s WHERE xmlid = \\\"%s\\\"\" % (table, target_id)\n self.session.execute_query(request)\n row = self.session.cursor.fetchone()\n if row == None: \n self.session.warn(\"Invalid mnemonic\")\n return \"\"\n word = row[0]\n if letter != None:\n pos = word.lower().find(letter.lower())\n else:\n pos = -1\n if pos == -1:\n return word + (\"\"\"(<span style=\"text-decoration: underline\">%s\\\n </span>)\"\"\" % letter)\n else:\n # underline the mnemonic\n return word[0:pos] + (\"\"\"<span style=\"text-decoration: \\\n underline\">%s</span>\"\"\" % word[pos]) + word[pos+1:len(word)]", "def generate_summation_table(base: int) -> str:\r\n table = ''\r\n max_len = len(str(base * base))\r\n\r\n # Head line\r\n table += f''.ljust(max_len + 1, ' ')\r\n for x in range(1, base):\r\n table += f'{convert_number(x, base)}'.ljust(max_len + 1, ' ')\r\n table += '\\n'\r\n\r\n # Main content including vertical line\r\n for x in range(1, base):\r\n table += f'{convert_number(x, base)}'.ljust(max_len + 1, ' ')\r\n for y in range(1, base):\r\n table += f'{convert_number(x + y, base)}'.ljust(max_len + 1, ' ')\r\n table += '\\n'\r\n return table", "def to_MINT(self) -> str:\n # TODO: Eventually I need to modify the MINT generation to account for all the layout constraints\n\n full_layer_text = \"\"\n # Loop Over all the layers\n for layer in self.layers:\n componenttext = \"\\n\".join(\n [item.to_MINT() for item in self.components if item.layers[0] == layer]\n )\n connectiontext = \"\\n\".join(\n [item.to_MINT() for item in self.connections if item.layer == layer]\n )\n\n full_layer_text += (\n layer.to_MINT(\"{}\\n\\n{}\".format(componenttext, connectiontext)) + \"\\n\\n\"\n )\n\n full = \"DEVICE {}\\n\\n{}\".format(self.name, full_layer_text)\n return full", "def _generate_insert_columns_string(cls):\n\n return ', '.join(cls.COLUMNS)", "def _make_mods_xml_string( self ):\n doc = etree.ElementTree( self.mods )\n mods_string = etree.tostring( doc, pretty_print=True ).decode( 'utf-8', 'replace' )\n assert type(mods_string) == str\n return mods_string" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to generate a MADX remove element string based on the table. This string can be used by cpymad to remove elements.add() Returns str Remove element string input for MADX.
def parse_table_to_madx_remove_str(self) -> str: return parse_table_to_madx_remove_str(self.name, self.table)
[ "def removeElement(self):", "def on_remove_tid(self, event):\n if STATUS.currentSelectedFrame[STATUS.cur_workingtable] is None:\n return\n id_to_remove = ''\n ids = self.get_tid(event.widget.index)\n ids_array = ids.split(',')\n # Remove word_id in the trans entry :\n self.remove_tid(event.widget.index, ids_array[len(ids_array) - 1])", "def parse_table_to_madx_install_str(self) -> str:\n return parse_table_to_madx_install_str(self.name, self.table)", "def parse_table_to_elegant_string(self) -> str:\n self.add_drifts()\n return parse_table_to_elegant_string(self.name, self.table)", "def remove_selected_element(self) -> str:\r\n index_to_delete = self.lb_sel_params.curselection()[0]\r\n value_to_delete = self.lb_sel_params.get(index_to_delete)\r\n self.lb_sel_params.delete(index_to_delete)\r\n return value_to_delete", "def ungroom(elem):\n return elem", "def emphasize_stringtable_elements(self, xml):\n regex = re.compile(r'\\[\\[VALUE_0x(?P<number>[0-9A-Fa-f]+)\\]\\]')\n xml = re.sub(regex, Style.BRIGHT + '[[VALUE_0x\\g<number>]]' + Style.RESET_ALL, xml)\n regex = re.compile(r'ST_0x(?P<number>[0-9A-Fa-f]+)\\]\\]')\n xml = re.sub(regex, Style.BRIGHT + 'ST_0x\\g<number>' + Style.RESET_ALL + ']]', xml)\n return xml", "def parse_table_to_madx_sequence_string(self) -> str:\n return parse_table_to_madx_sequence_string(self.name, self.len, self.table)", "def remove(self):\n\t\tdata = self.data\n\t\timd5 = self.IMD5Header()\n\t\tif(data[:4] != \"IMD5\"):\n\t\t\t\tif(fn != \"\"):\n\t\t\t\t\topen(fn, \"wb\").write(data) \n\t\t\t\t\treturn fn\n\t\t\t\telse:\n\t\t\t\t\treturn self.f\n\t\tdata = data[len(imd5):]\n\t\t\n\t\treturn data", "def test_mremove(ac_dc_network):\n network = ac_dc_network\n\n generators = {\"Manchester Wind\", \"Frankfurt Wind\"}\n\n network.mremove(\"Generator\", generators)\n\n assert not generators.issubset(network.generators.index)\n assert not generators.issubset(network.generators_t.p_max_pu.columns)", "def remove_element_from_redis(self, element: Union[Element, str]) -> None:\n if isinstance(element, Element):\n element_id = element.get_id()\n elif isinstance(element, str):\n element_id = element\n else:\n return\n\n connection = get_redis_connection()\n connection.delete(f'{self.__redis_name}:elements:{element_id}')", "def test_remove_x():\r\n print \"Computed:\", \"\\\"\" + remove_x(\"\") + \"\\\"\", \"Expected: \\\"\\\"\"\r\n print \"Computed:\", \"\\\"\" + remove_x(\"cat\") + \"\\\"\", \"Expected: \\\"cat\\\"\"\r\n print \"Computed:\", \"\\\"\" + remove_x(\"xxx\") + \"\\\"\", \"Expected: \\\"\\\"\"\r\n print \"Computed:\", \"\\\"\" + remove_x(\"dxoxg\") + \"\\\"\", \"Expected: \\\"dog\\\"\"\r\n print \"Computed:\", \"\\\"\" + remove_x(\"bxbxox\") + \"\\\"\", \"Expected: \\\"bbo\\\"\"\r\n print \"Computed:\", \"\\\"\" + remove_x(\"xxx\") + \"\\\"\", \"Expected: \\\"\\\"\"", "def get_unregister_tac_description(self) -> Description:\n description = Description(\n self.context.parameters.remove_service_data,\n data_model=AGENT_REMOVE_SERVICE_MODEL,\n )\n return description", "def qstrvec_t_remove(*args) -> \"bool\":\n return _ida_pro.qstrvec_t_remove(*args)", "def remove(self, element: _SetElementT) -> None:\n del self._elements[element]", "def generate_uninstall(self):\n\n uninstall_fp = open(self.install_config.install_location + \"/autogenerated/uninstall.sh\", \"w+\")\n uninstall_fp.write(\"#!/bin/bash\\n\")\n\n uninstall_fp.write(self.message)\n\n modules = self.install_config.get_module_list()\n modules.reverse()\n\n for module in modules:\n if module.build == \"YES\":\n uninstall_fp.write(\"{}={}\\n\".format(module.name, module.abs_path))\n\n for module in modules:\n if module.build == \"YES\":\n uninstall_fp.write(\"cd ${}\\n\".format(module.name))\n uninstall_fp.write(\"make clean uninstall\\n\")\n uninstall_fp.write(\"make clean uninstall\\n\")\n \n modules.reverse()\n uninstall_fp.close()", "def _eliminarElem(elem,l):\r\n if len(elem) == 1:\r\n e = elem[0]\r\n else:\r\n e = elem\r\n return l[:l.index(e)] + l[l.index(e) + 1:]", "def removeTable(self, table_name):\n pass", "def StripIdentifiersAndImmediates(stmt: str) -> str:\n # Remove local identifiers\n stmt = re.sub(rgx.local_id, \"\", stmt)\n # Global identifiers\n stmt = re.sub(rgx.global_id, \"\", stmt)\n # Remove labels\n if re.match(r\"; <label>:\\d+:?(\\s+; preds = )?\", stmt):\n stmt = re.sub(r\":\\d+\", \":\", stmt)\n elif re.match(rgx.local_id_no_perc + r\":(\\s+; preds = )?\", stmt):\n stmt = re.sub(rgx.local_id_no_perc + \":\", \":\", stmt)\n\n # Remove floating point values\n stmt = re.sub(rgx.immediate_value_float_hexa, \"\", stmt)\n stmt = re.sub(rgx.immediate_value_float_sci, \"\", stmt)\n\n # Remove integer values\n if (\n re.match(\"<%ID> = extractelement\", stmt) is None\n and re.match(\"<%ID> = extractvalue\", stmt) is None\n and re.match(\"<%ID> = insertelement\", stmt) is None\n and re.match(\"<%ID> = insertvalue\", stmt) is None\n ):\n stmt = re.sub(r\"(?<!align)(?<!\\[) \" + rgx.immediate_value_int, \" \", stmt)\n\n # Remove string values\n stmt = re.sub(rgx.immediate_value_string, \" \", stmt)\n\n # Remove index types\n if (\n re.match(\" = extractelement\", stmt) is not None\n or re.match(\" = insertelement\", stmt) is not None\n ):\n stmt = re.sub(r\"i\\d+ \", \" \", stmt)\n\n return stmt" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to convert the table to a MADX line definition lattice. Returns str MADX lattice definition string.
def parse_table_to_madx_line_string(self) -> str: self.add_drifts() defstr = _parse_table_to_madx_definitions(self.table) linestr = "{}: LINE=({});".format( self.name, ",\n\t\t".join( [",".join(c) for c in list(self.chunks(self.table.name.to_list(), 20))] ), ) return defstr + "\n\n" + linestr
[ "def to_linestringm_wkt(self):\n # Shapely only supports x, y, z. Therefore, this is a bit hacky!\n coords = \"\"\n for index, row in self.df.iterrows():\n pt = row[self.get_geom_column_name()]\n t = to_unixtime(index)\n coords += \"{} {} {}, \".format(pt.x, pt.y, t)\n wkt = \"LINESTRING M ({})\".format(coords[:-2])\n return wkt", "def parse_table_to_madx_install_str(self) -> str:\n return parse_table_to_madx_install_str(self.name, self.table)", "def parse_table_to_madx_sequence_string(self) -> str:\n return parse_table_to_madx_sequence_string(self.name, self.len, self.table)", "def parse_table_to_madx_line_file(self, filename: str):\n save_string(self.parse_table_to_madx_line_string(), filename)", "def __latex__(self):\n a = self.MomentMatrix\n lines = str(a).replace('[', '').replace(']', '').splitlines()\n rv = [r'\\begin{bmatrix}']\n rv += [' ' + ' & '.join(l.split()) + r'\\\\' for l in lines]\n rv += [r'\\end{bmatrix}']\n return '\\n'.join(rv)", "def __create_lines_table(self):\r\n i = 0\r\n rows = []\r\n cols = []\r\n self.__add_item('number', self.tab1, i, 0, cols)\r\n self.__add_item('last stop', self.tab1, i, 1, cols)\r\n self.__add_item('route', self.tab1, i, 2, cols)\r\n self.__add_item('frequency', self.tab1, i, 3, cols)\r\n self.__add_item('bus capacity', self.tab1, i, 4, cols)\r\n rows.append(cols)\r\n i += 1\r\n for line in self.simulation.lines:\r\n cols = []\r\n self.__add_item(line.number, self.tab1, i, 0, cols)\r\n self.__add_item(line.last_stop_name(0), self.tab1, i, 1, cols)\r\n _route = [stop.name.encode(\"utf-8\") for stop in line.routes[0] if stop.name != \"P\"]\r\n self.__add_item(_route, self.tab1, i, 2, cols)\r\n self.__add_item(line.frequencies[0], self.tab1, i, 3, cols)\r\n self.__add_item(line.bus_capacity, self.tab1, i, 4, cols)\r\n i += 1\r\n cols = []\r\n self.__add_item(line.number, self.tab1, i, 0, cols)\r\n self.__add_item(line.last_stop_name(1), self.tab1, i, 1, cols)\r\n _route = [stop.name.encode(\"utf-8\") for stop in line.routes[1] if stop.name != \"P\"]\r\n self.__add_item(_route, self.tab1, i, 2, cols)\r\n self.__add_item(line.frequencies[1], self.tab1, i, 3, cols)\r\n self.__add_item(line.bus_capacity, self.tab1, i, 4, cols)\r\n i += 1\r\n rows.append(cols)", "def generate_string_latex(self):\n return '\\n'.join([at.generate_string_latex() for at in self.atom_list])", "def generate_lattice(self, verbose=False):\n if not self._lattice:\n lat = StrictOrders().get_orders(xrange(1, self.set_n + 1), verbose)\n self._lattice = lat", "def parse_table_to_elegant_string(self) -> str:\n self.add_drifts()\n return parse_table_to_elegant_string(self.name, self.table)", "def makeLattice(sequence):\n\tlattice=Lattice(sequence.getId())\n\tseq_pos=sequence.getPosition()\n\tseq_len=sequence.getLength()\n\n\t#process all thick (len!=0) elements first\n\tprocessThickElements(sequence,lattice)\n\n\t#fill lattice up to end with drift space\n\tif seq_len > lattice.getLength():\n\t\tlen=seq_len-lattice.getLength()\n\t\tlattice.append(Drift(seq_len-len*0.5,len))\n\n\t#special handling for DPLT elements\n\tif sequence.getId() == \"DTL1\":\n\t\tlast=sequence.getNodeWithId(\"DTL_Diag:DPLT:BPM02\")\n\t\tlen=last.getPosition()-lattice.getLength()\n\t\tlattice.append(Drift(lattice.getLength()+len*0.5,len))\n\n\t#process all thin (len=0) ones\n\tprocessThinElements(sequence,lattice)\n\n\treturn lattice", "def table(self, L, R, n):\n s = ''\n for x in linspace(L, R, n):\n y = self(x)\n s += '%12g %12g\\n' % (x, y)\n return s", "def table(self, L, R, n):\r\n s = ''\r\n for x in linspace(L, R, n):\r\n y = self(x)\r\n s += '(%12g, %12g)\\n' %(x, y)\r\n return s", "def linearize_table(self):\n pass", "def getLattice() :\n lattice = [getElem('loop'),getElem('quad'),getElem('drift'),getElem('quad'),getElem('drift')]\n lattice[3].Kx = -lattice[3].Kx\n return lattice", "def createAltLine(self,line):\n # Parse line\n name, reactLine, prodLine, rLabelLine, pLabelLine, reversible, sym = self.parseLine(line)\n reacts = [x.strip() for x in reactLine.split('+')]\n prods = [x.strip() for x in prodLine.split('+')]\n rLabel = map(lambda x: utils.symsplit(x.strip()) if ';' in x else x.strip(), rLabelLine.split('+'))\n pLabel = map(lambda x: utils.symsplit(x.strip()) if ';' in x else x.strip(), pLabelLine.split('+')) \n \n # If there is a symmetric molecule\n if sym:\n ## Duplicate metabolites\n newReacts = []\n for react in reacts:\n newReacts.append(react)\n newReacts.append(react)\n newProds = []\n for prod in prods:\n newProds.append(prod)\n newProds.append(prod)\n newReacts = AtomTransition.convert2PS(newReacts)\n newProds = AtomTransition.convert2PS(newProds)\n \n ## Take care of labeling\n # Find out all carbon letters (= inputKeys)\n inputKeys=[]\n for lab in rLabel:\n if type(lab) == list:\n for comp in lab:\n inputKeys.extend([x for x in comp])\n else:\n inputKeys.extend([x for x in lab])\n inputKeys = sorted(set(inputKeys))\n \n # Find equivalent for each of the old carbon letters \n all_letters = string.ascii_letters # All possible letters\n avoid = ''.join(inputKeys) # These are taken letters, to be avoided in choosing new equivalents\n keyDict = {}\n for key in inputKeys:\n keyDict[key] = [x for x in all_letters if x not in avoid][0]\n avoid = avoid + keyDict[key]\n \n # Add new labeling for new metabolites \n newRlabel = []\n for lab in rLabel:\n if type(lab) is list: # Only works for two alternative labelings !!!\n newRlabel.append(lab[0])\n newRlabel.append(''.join([keyDict[x] for x in lab[1]]))\n else:\n # Creating new labeling for new metabolites\n newRlabel.append(lab)\n newLab = ''.join([keyDict[x] for x in lab])\n newRlabel.append(newLab)\n newPlabel = []\n for lab in pLabel:\n if type(lab) is list:\n newPlabel.append(lab[0])\n newPlabel.append(''.join([keyDict[x] for x in lab[1]]))\n else:\n # Creating new labeling for new metabolites\n newPlabel.append(lab)\n newLab = ''.join([keyDict[x] for x in lab]) \n newPlabel.append(newLab)\n else:\n # Convert reactants and products to pseudo metabolits (e.g. glu_L_c --> glu_L_c__ps1) if needed\n newReacts = AtomTransition.convert2PS(reacts)\n newProds = AtomTransition.convert2PS(prods)\n \n newRlabel = rLabel\n newPlabel = pLabel\n \n ## Join all into new line\n rxnSymb = ' <==> ' if reversible else ' --> '\n altLine = name \n altLine += ' \\t '+' + '.join(newReacts)+rxnSymb+' + '.join(newProds)\n altLine += ' \\t '+' + '.join(newRlabel)+' : '+' + '.join(newPlabel)\n \n return altLine", "def display_line(line):\n table = get_ttable(line)\n parts = line.split()\n try:\n ones = int(parts[6])\n steps = int(parts[7])\n print display_ttable(table), \"# \",ones, \"\", steps, \"\", long_to_eng_str(ones,1,3), \"\", long_to_eng_str(steps,1,3)\n except:\n print display_ttable(table)", "def listToLine(self,list):\n string=''\n for entry in list:\n string+=entry+\" \"\n string=string[:-1]+linesep\n return string", "def make_lattice(self, latt_type = 'cubic', lat_parms):\n\n if latt_type = 'cubic':\n lx, ly, lz = lat_parms\n latt = {}\n latt['box'] = ['cubic', lx, ly, lz]\n latt['xyzs'] = []\n\n # box dimensions in lattice units\n\n # layer number\n for iz in range(lz):\n # layer structure\n for iy in range(ly):\n for ix in range(lx):\n latt['xyzs'].append([ix, iy, iz,1])\n\n elif latt_type = 'bcc':\n lx, ly, lz = lat_parms\n latt = {}\n latt['box'] = ['bcc', lx, ly, lz]\n latt['xyzs'] = []\n\n # box dimensions in lattice units\n\n # layer number\n for iz in range(lz):\n # layer structure\n for iy in range(ly):\n for ix in range(lx):\n if ix + 0.5 <= (lx - 1) and iy + 0.5 <= (ly - 1) and iz + 0.5 <= (lz - 1):\n latt['xyzs'].append([ix + 0.5, iy + 0.5, iz + 0.5, 1])\n latt['xyzs'].append([1 * ix, 1 * iy, 1 * iz, 1])\n\n\n\n elif latt_type = 'fcc':\n lx, ly, lz = lat_parms\n\n latt = {}\n latt['nat'] = lx * ly * lz\n latt['box'] = ['fcc', 2 * lx, ly, lz]\n latt['xyzs'] = []\n\n # box dimensions in lattice units\n\n # layer number\n for iz in range(lz):\n # layer structure\n for iy in range(ly):\n for ix in range(lx):\n rx = 2 * ix + (iy + iz) % 2\n latt['xyzs'].append([rx, iy, iz,1])\n\n return latt", "def parse_table_to_madx_remove_str(self) -> str:\n return parse_table_to_madx_remove_str(self.name, self.table)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to convert table to madx line def lattice file string and write to file.
def parse_table_to_madx_line_file(self, filename: str): save_string(self.parse_table_to_madx_line_string(), filename)
[ "def parse_table_to_madx_line_string(self) -> str:\n self.add_drifts()\n defstr = _parse_table_to_madx_definitions(self.table)\n linestr = \"{}: LINE=({});\".format(\n self.name,\n \",\\n\\t\\t\".join(\n [\",\".join(c) for c in list(self.chunks(self.table.name.to_list(), 20))]\n ),\n )\n return defstr + \"\\n\\n\" + linestr", "def create_slf_file(self):\n mesh = open(self.name, 'w') \n mesh.write('numel numnp nmat nmode (This is for a beam bridge)\\n')\n mesh.write(str(len(self.edge_list))+'\\t'+str(len(self.node_list))\n + '\\t'+str(len(self.beams)) + '\\t0\\n')\n mesh.write('matl no., E mod, Poiss. Ratio,density, Area, Iy, Iz\\n')\n tables = open('./tables/CHSTables.txt', 'r')\n for i,beam in enumerate(self.beams):\n mesh.write(str(i)+' '+str(self.beams[i]['emod'])+'\\t0.3000\\t'\n + str(self.beams[i]['density'])+'\\t'+str(self.beams[i]['area'])\n + '\\t'+str(self.beams[i]['iy'])+'\\t'+str(self.beams[i]['ix']) + '\\n') \n mesh.write('el no.,connectivity, matl no, element type\\n')\n for i, edge in enumerate(self.edge_list): \n mesh.write(str(i)+'\\t'+str(edge['pt_a'])+'\\t'+str(edge['pt_b'])\n + '\\t'+str(edge['material'])+'\\t2 \\n')\n mesh.write('node no., coordinates\\n')\n for node in self.node_list:\n mesh.write(node['id']+'\\t'+str(node['x'])+'\\t'+str(node['y'])+'\\t'+str(node['z'])+\"\\n\")\n mesh.write(\"element with specified local z axis: x, y, z component\\n -10\\n\")\n mesh.write('prescribed displacement x: node disp value\\n')\n for node in self.fixed_list:\n# if node[1] == True: # un-comment when dealing with fixed-roller structures\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed displacement y: node disp value\\n')\n for node in self.fixed_list:\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed displacement z: node disp value\\n')\n for node in self.fixed_list:\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed angle phi x: node angle value\\n')\n for node in self.fixed_list:\n# if node[1] == True: # un-comment when dealing with fixed-roller structures\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed angle phi y: node angle value\\n')\n for node in self.fixed_list:\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed angle phi z: node angle value\\n')\n for node in self.fixed_list:\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nnode with point load x, y, z and 3 moments phi x, phi y, phi z\\n') \n if self.BROKEN:\n for node in self.nodeselfloads: \n trans = 0\n broken_long = 0\n for thing in self.load_nodes:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_cable_load\n trans = self.transverse_cable_load \n if self.GROUND_BROKEN:\n for thing in self.ground_node:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_ground_load_broken\n trans = self.transverse_ground_load\n broken_long = self.longitudinal_ground_load\n for thing in self.break_node:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_cable_load_broken\n broken_long = self.longitudinal_cable_load\n trans = self.transverse_cable_load\n else:\n for thing in self.ground_node:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_ground_load\n trans = self.transverse_ground_load\n for thing in self.break_node:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_cable_load_broken\n broken_long = self.longitudinal_cable_load \n trans = self.transverse_cable_load\n mesh.write(str(node[0])+'\\t'+str(broken_long)+'\\t'+str(trans)+'\\t-'+str(round(node[1],5))+'\\t0\\t0\\t0\\n')\n else:\n for node in self.nodeselfloads: \n trans = 0\n for yolk in self.load_nodes:\n if yolk == node[0]:\n node[1] = node[1] + self.vertical_cable_load\n trans = self.transverse_cable_load\n for thong in self.ground_node:\n if thong == node[0]:\n node[1] = node[1] + self.vertical_ground_load\n trans = self.transverse_ground_load\n mesh.write(str(node[0])+'\\t0\\t'+str(trans)+'\\t-'+str(round(node[1],5))+'\\t0\\t0\\t0\\n')\n mesh.write('-10\\nelement with distributed load in global beam y and z coordinates\\n') \n mesh.write('-10\\nelement no. and gauss pt. no. with local stress vector xx and moment xx,yy,zz\\n-10')\n mesh.close()", "def write_lat_file(self):\n\n # If the lattice file exists, remove it and start over\n if os.path.isfile(self.filename):\n os.remove(self.filename)\n\n lat = open(self.filename, 'w')\n\n header = '? VERSION = 1.0\\n'\n header += '? UNITLENGTH = ' + str(self.unit_length) + '\\n'\n lat.write(header)\n\n quad_label = '#\\n'\n quad_label += '# Quads:\\n'\n quad_label += '# QF dB/dx L space\\n'\n quad_label += '#--------------------------------------\\n'\n lat.write(quad_label)\n\n # Start with quads\n for quad_array in self.elems_dict['QF']:\n quadline = 'QF '\n quadline += str(quad_array[0]) + ' '\n quadline += str(quad_array[1]) + ' '\n quadline += str(quad_array[2]) + ' \\n'\n lat.write(quadline)\n\n und_label = '#\\n'\n und_label += '# Undulators:\\n'\n und_label += '# AW AW0 L space\\n'\n und_label += '#--------------------------------------\\n'\n lat.write(und_label)\n\n # Add undulators\n for und_array in self.elems_dict['AW']:\n undline = 'AW '\n undline += str(und_array[0]) + ' '\n undline += str(und_array[1]) + ' '\n undline += str(und_array[2]) + ' \\n'\n lat.write(undline)\n\n lat.close()", "def _dump_data(self, fileobj):\n if not fileobj and self._file:\n root = os.path.splitext(self._file.name)[0]\n fileobj = root + \".txt\"\n\n close_file = False\n\n if isinstance(fileobj, str):\n fileobj = open(fileobj, \"w\")\n close_file = True\n\n linewriter = csv.writer(fileobj, dialect=FITSTableDumpDialect)\n\n # Process each row of the table and output one row at a time\n def format_value(val, format):\n if format[0] == \"S\":\n itemsize = int(format[1:])\n return \"{:{size}}\".format(val, size=itemsize)\n elif format in np.typecodes[\"AllInteger\"]:\n # output integer\n return f\"{val:21d}\"\n elif format in np.typecodes[\"Complex\"]:\n return f\"{val.real:21.15g}+{val.imag:.15g}j\"\n elif format in np.typecodes[\"Float\"]:\n # output floating point\n return f\"{val:#21.15g}\"\n\n for row in self.data:\n line = [] # the line for this row of the table\n\n # Process each column of the row.\n for column in self.columns:\n # format of data in a variable length array\n # where None means it is not a VLA:\n vla_format = None\n format = _convert_format(column.format)\n\n if isinstance(format, _FormatP):\n # P format means this is a variable length array so output\n # the length of the array for this row and set the format\n # for the VLA data\n line.append(\"VLA_Length=\")\n line.append(f\"{len(row[column.name]):21d}\")\n _, dtype, option = _parse_tformat(column.format)\n vla_format = FITS2NUMPY[option[0]][0]\n\n if vla_format:\n # Output the data for each element in the array\n for val in row[column.name].flat:\n line.append(format_value(val, vla_format))\n else:\n # The column data is a single element\n dtype = self.data.dtype.fields[column.name][0]\n array_format = dtype.char\n if array_format == \"V\":\n array_format = dtype.base.char\n if array_format == \"S\":\n array_format += str(dtype.itemsize)\n\n if dtype.char == \"V\":\n for value in row[column.name].flat:\n line.append(format_value(value, array_format))\n else:\n line.append(format_value(row[column.name], array_format))\n linewriter.writerow(line)\n if close_file:\n fileobj.close()", "def convert_lattice(file_in, file_out):\n open_fn = gzip.open if file_in.endswith('.gz') else open\n with open_fn(file_in, 'rt') as lattice, open(file_out, 'w') as dot:\n dot.write(\n \"digraph lattice {\\n\" \\\n \"\\trankdir=LR;\\n\" \\\n \"\\tnode [shape = ellipse; fontname = courier];\\n\" \\\n \"\\tedge [fontname = courier];\\n\\n\")\n while True:\n line = lattice.readline()\n if line.startswith('N='):\n break\n first_line = line.split()\n nodes, links = [int(i.split('=')[1]) for i in first_line]\n for _ in range(nodes):\n next_line = lattice.readline().split()\n content = tuple(i.split('=')[1] for i in next_line[0:3])\n dot.write(\"\\t%s [label = \\\"id=%s\\\\nt=%s\\\\nW=%s\\\"];\\n\" % (\n content[0], content[0], content[1], content[2]))\n dot.write(\"\\n\")\n for _ in range(links):\n next_line = lattice.readline().split()\n content = tuple(i.split('=')[1] for i in next_line[0:5])\n if next_line[5].startswith('n='):\n dot.write(\n \"\\t%s -> %s [label = \\\"id=%s\\\\na=%s\\\\nl=%s\\\\nn=%s\\\"];\\n\" % (\n content[1], content[2], content[0], content[3],\n content[4], next_line[5].split('=')[1]))\n else:\n dot.write(\"\\t%s -> %s [label = \\\"id=%s\\\\na=%s\\\\nl=%s\\\"];\\n\" % (\n content[1], content[2], content[0], content[3], content[4]))\n dot.write(\"}\")", "def __create_lines_table(self):\r\n i = 0\r\n rows = []\r\n cols = []\r\n self.__add_item('number', self.tab1, i, 0, cols)\r\n self.__add_item('last stop', self.tab1, i, 1, cols)\r\n self.__add_item('route', self.tab1, i, 2, cols)\r\n self.__add_item('frequency', self.tab1, i, 3, cols)\r\n self.__add_item('bus capacity', self.tab1, i, 4, cols)\r\n rows.append(cols)\r\n i += 1\r\n for line in self.simulation.lines:\r\n cols = []\r\n self.__add_item(line.number, self.tab1, i, 0, cols)\r\n self.__add_item(line.last_stop_name(0), self.tab1, i, 1, cols)\r\n _route = [stop.name.encode(\"utf-8\") for stop in line.routes[0] if stop.name != \"P\"]\r\n self.__add_item(_route, self.tab1, i, 2, cols)\r\n self.__add_item(line.frequencies[0], self.tab1, i, 3, cols)\r\n self.__add_item(line.bus_capacity, self.tab1, i, 4, cols)\r\n i += 1\r\n cols = []\r\n self.__add_item(line.number, self.tab1, i, 0, cols)\r\n self.__add_item(line.last_stop_name(1), self.tab1, i, 1, cols)\r\n _route = [stop.name.encode(\"utf-8\") for stop in line.routes[1] if stop.name != \"P\"]\r\n self.__add_item(_route, self.tab1, i, 2, cols)\r\n self.__add_item(line.frequencies[1], self.tab1, i, 3, cols)\r\n self.__add_item(line.bus_capacity, self.tab1, i, 4, cols)\r\n i += 1\r\n rows.append(cols)", "def createtxtED(mapa,dirs):\n x=grass.read_command('r.stats',flags='a',input=mapa)\n \n y=x.split('\\n')\n os.chdir(dirs)\n txtsaida=mapa+'PCT_Borda.txt'\n txtreclass=open(mapa+'_EDGE.txt','w')\n txtreclass.write('COD'',''HA\\n')\n if y!=0:\n for i in y:\n if i !='':\n ##print i\n f=i.split(' ')\n if '*' in f :\n break\n else:\n ##print f\n ids=f[0]\n ids=int(ids)\n ##print ids\n ha=f[1]\n ha=float(ha)\n haint=float(ha)\n \n haint=haint/10000+1\n ##print haint\n \n ##print haint\n haint=round(haint,2)\n txtreclass.write(`ids`+','+`haint`+'\\n')\n txtreclass.close()", "def to_latex(self,fn='tableone.tex'):\n tablefmt = 'latex'\n with open(fn, 'w') as f:\n f.write(tabulate(self.tableone, tablefmt=tablefmt))", "def save_lattice(lattice, filename):\n np.save(filename, lattice)\n print (\"SOM lattice saved at %s\" %filename)", "def write_file(output_name, parsed_xQTL_list):\n with open(output_name, \"w\") as thefile:\n thefile.write(\"metabolite\\tchr\\tpeak_mb\\tinf_mb\\tsup_mb\\tlod\\n\")\n for xQTL in parsed_xQTL_list:\n xQTL = [str(element) for element in xQTL]\n line = \"\\t\".join(xQTL)\n thefile.write(line + \"\\n\")", "def save_line_data(line, name='temp'):\n x, y = line.get_xdata(), line.get_ydata()\n x, y = x[1:], y[1:] # remove initial NaN value\n # x, y = linspace_xy(x, y, 1e3)\n xy = np.column_stack((x, y))\n\n with open(name, 'wt') as f:\n fmt_str = '{0:f} {1:f}'\n for row in xy:\n print(fmt_str.format(row[0], row[1]), file=f)", "def mmtformat(df, filename, ident_col_name, ra_col_name, dec_col_name,\n mag_col_name, ra_pm_col_name=None, dec_pm_col_name=None,\n epoch = 'J2000.0'):\n\n ident = df[ident_col_name]\n ra = df[ra_col_name]\n dec = df[dec_col_name]\n\n if ra_pm_col_name is None:\n ra_pm = '0.0'\n else:\n ra_pm = df[ra_pm_col_name]\n if dec_pm_col_name is None:\n dec_pm = '0.0'\n else:\n dec_pm = df[dec_pm_col_name]\n\n mag = df[mag_col_name]\n\n\n f = open(filename+'_MMT.dat', 'w')\n\n for i in df.index:\n\n f.write('{0:16}{1:13}{2:13}{3:4}{4:5}{5:04.1f} 0 {6:9}\\n'.format(ident[i],\n decra2hms(ra[i]),\n decdeg2dms(dec[i]),\n ra_pm,\n dec_pm,\n mag[i],\n epoch))\n print ('{0:15}{1:13}{2:13}{3:4}{4:5}{5:05.2f} {6:9}'.format(ident[i],\n decra2hms(ra[i]),\n decdeg2dms(dec[i]),\n ra_pm,\n dec_pm,\n mag[i],\n epoch))\n\n f.close()", "def parse_table_to_madx_sequence_file(self, filename: str) -> None:\n parse_table_to_madx_sequence_file(self.name, self.len, self.table, filename)", "def write_data(natoms, lx,ly, tpe, molid, x, y, bid, btpe, b1, b2, aid, atpe, a1, a2, a3,ofname):\n ofile = open(ofname, 'w')\n ### write down header information\n ofile.write('LAMMPS data file filaments in 2D\\n\\n')\n ofile.write(str(natoms) + ' atoms\\n')\n ofile.write('1 atom types\\n')\n ofile.write(str(max(bid)) + ' bonds\\n')\n ofile.write('1 bond types\\n')\n ofile.write(str(max(aid)) + ' angles\\n')\n ofile.write('1 angle types\\n\\n')\n ofile.write('0.0 ' + str(lx) + ' xlo xhi\\n')\n ofile.write('0.0 ' + str(ly) + ' ylo yhi\\n')\n ofile.write('-2.5 2.5 zlo zhi\\n\\n')\n ofile.write('Masses\\n\\n')\n ofile.write('1 1\\n\\n')\n ### Atoms section\n ofile.write('Atoms\\n\\n')\n for i in range(natoms):\n ofile.write(str(i+1) + ' ' + str(molid[i]) + ' ' + str(tpe[i]) + ' ' + str(x[i]) + ' ' + str(y[i]) + ' 0.0\\n')\n ofile.write('\\n')\n ### Bonds section\n ofile.write('Bonds\\n\\n')\n for i in range(len(bid)):\n ofile.write(str(bid[i]) + ' ' + str(btpe[i]) + ' ' + str(b1[i]) + ' ' + str(b2[i]) + '\\n')\n ofile.write('\\n')\n ### Angles section\n ofile.write('Angles\\n\\n')\n for i in range(len(aid)):\n ofile.write(str(aid[i]) + ' ' + str(atpe[i]) + ' ' + str(a1[i]) + ' ' + str(a2[i]) + ' ' + str(a3[i]) + '\\n')\n ofile.write('\\n')\n ofile.close()\n return", "def output2file(list_all_coord, list4bed):\n\n #db = open(\"LRG_coord.txt\",\"w\")\n #db_csv = open(\"LRG_coord.csv\",\"w\")\n #bed = open (\"LRG_bed\", \"w\")\n \n db = open(\"./Outputs/LRG_coord.txt\",\"w\")\n db_csv = open(\"./Outputs/LRG_coord.csv\",\"w\")\n bed = open (\"./Outputs/LRG_bed\", \"w\")\n \n \n headings = [\"transcript\",\"exon\", \"ex_start\", \"ex_end\", \"tr_start\", \"tr_end\", \"pt_start\", \"pt_end\"]\n bed_headings = [\"chr\", \"start\", \"end\", \"strand\", \"transcript\" ]\n \n #Writting tab separated text file\n db.write(\"\\t\".join(headings) + \"\\n\") # writting headings\n for group in list_all_coord: \n db.write (\"\\t\".join(group) + \"\\n\") # writting coordinates\n \n #Writting csv file\n db_csv.write(\",\".join(headings) + \"\\n\") # writting headings\n for group in list_all_coord: \n db_csv.write (\",\".join(group) + \"\\n\") # writting coordinates\n\n bed.write(\"\\t\".join(bed_headings) + \"\\n\") # writting headings\n for group in list4bed: \n bed.write (\"\\t\".join(group) + \"\\n\")\n\n db.close()\n db_csv.close()\n bed.close()\n \n return", "def ensmblTx2BED(ensemblPath,BEDoutPath):\n # +++++ func specific Defs +++++\n def getBlockSizes(tx):\n blkSzList = []\n for exn in tx:\n blkSzList.append(str(int(exn[11])-int(exn[10])+1))\n return ','.join(blkSzList)\n \n def getBlockStarts(tx,chrmStart):\n blkStrtList = []\n for exn in tx:\n blkStrtList.append(str(int(exn[10])-1-int(chrmStart)))\n return ','.join(blkStrtList)\n \n # +++++ initialize ensembl data +++++\n txList = map(lambda l: l.strip('\\n') , open(ensemblPath, 'rU'))\n txList.pop(0)\n txList = JamesDefs.groupByField_silent(txList,1)\n \n # +++++ prepare destination file +++++\n bedFile = open(BEDoutPath,'w')\n bedFile.write('track name=\"Ensembl Aa Tx Definitions\" description=\"From %s\" useScore=0\\n' % (ensemblPath))\n \n # +++++ loop through the Txs +++++\n for tx in txList:\n # --- sort tx based on lowest coords of each exon ---\n tx.sort(key=lambda x: int(x[10]))\n \n chrm = tx[0][2]\n chrmStart = str(int(tx[0][5])-1)\n chrmEnd = tx[0][6]\n name = tx[0][1]\n score = '0'\n strand = tx[0][7]\n thkStart = chrmStart\n thkEnd = chrmEnd\n rgb = '0'\n blkCount = str(len(tx))\n blkSizes = getBlockSizes(tx)\n blkStarts = getBlockStarts(tx,chrmStart)\n \n # --- write out line ---\n bedFile.write('%s\\n' % ('\\t'.join([chrm, \n chrmStart,\n chrmEnd,\n name,\n score,\n strand,\n thkStart,\n thkEnd,\n rgb,\n blkCount,\n blkSizes,\n blkStarts])))", "def to_tex(path,texcmd,s):\n df = pd.read_table(path,names=['line'])\n line = '\\\\nc{\\\\%s}{%s}' % (texcmd,s)\n b = df.line.str.contains(texcmd)\n if b.sum()==0:\n df = pd.concat([df,pd.DataFrame([line],columns=['line'])])\n elif b.sum()==1:\n df.ix[df.index[b],'line'] = line\n else:\n print \"multiple ref\"\n df.to_csv(path,index=False,sep=' ',header=False)", "def zeropoint_offsets_table_1ccd(field,pointing,ccd,library):\n\n columns = root_catalogs+'f0%ip0%i_%i_tot_ISO_%s.columns' %(field,pointing,ccd,library)\n print 'Reading the file...',columns\n offset = get_data(columns,4,23)\n base = arange(23)+1\n\n nameout = root_catalogs+'f0%ip0%i_%i_tot_ISO_%s_tableZPLATEX.txt' %(field,pointing,ccd,library)\n fileout = open(nameout,'w') \n\n tabla = \"\"\"\n\\begin{table*}\n\\caption{PHOTOMETRIC ZERO POINT OFFSETS FROM SED FITTING}\n\\begin{center}\n\\label{campos}\n\\begin{tabular}{|l|c|c|c|c|c|c|c|}\n\\hline\n\\hline\nFILTER & ZP Offset \\\\\n($\\lambda_{eff}$) & CCD%i \\\\\n\\hline\n%s & %.3f \\\\\n%s & %.3f \\\\\n%s & %.3f \\\\\n%s & %.3f \\\\\n%s & %.3f \\\\ \n%s & %.3f \\\\\n%s & %.3f \\\\\n%s & %.3f \\\\\n%s & %.3f \\\\\n%s & %.3f \\\\\n%s & %.3f \\\\\n%s & %.3f \\\\\n%s & %.3f \\\\\n%s & %.3f \\\\\n%s & %.3f \\\\\n%s & %.3f \\\\\n%s & %.3f \\\\\n%s & %.3f \\\\\n%s & %.3f \\\\\n%s & %.3f \\\\\n%s & %.3f \\\\\n%s & %.3f \\\\\n%s & %.3f \\\\\n\\hline\n\\end{tabular}\n\\end{center}\n\\end{table*} \n \"\"\" %(field,pointing,ccd,ccd,\n bands[0],offset[0],bands[1],offset[1],bands[2],offset[2],bands[3],offset[3],bands[4],offset[4],\n bands[5],offset[5],bands[6],offset[6],bands[7],offset[7],bands[8],offset[8],bands[9],offset[9],\n bands[10],offset[10],bands[11],offset[11],bands[12],offset[12],bands[13],offset[13],bands[14],offset[14],\n bands[15],offset[15],bands[16],offset[16],bands[17],offset[17],bands[18],offset[18],bands[19],offset[19],\n bands[20],offset[20],bands[21],offset[21],bands[22],offset[22])\n\n\n fileout.write(tabla)\n fileout.close()", "def writeIMPACT(filename,beam,lattice=[]):\n beamStrList=beam2str(beam) \n latticeStrList=lattice2str(lattice)\n \n \n f=open(filename,'w') \n f.writelines(beamStrList)\n f.writelines(latticeStrList)\n f.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to return quadrupole strengths as a dict.
def get_quad_strengths(self) -> dict: return ( self.table.loc[self.table.family == "QUADRUPOLE", ["name", "K1"]] .set_index("name", drop=True) .to_dict()["K1"] )
[ "def _getSquaresDict(self) -> dict:\n squares = {}\n for y in range(0, len(self._map)):\n row = self._map[y]\n for x in range(0, len(row)):\n char = row[x]\n pos = array([x, y])\n if char in squares.keys():\n squares[char].append(pos)\n else:\n squares[char] = [pos]\n \n return squares", "def getWeightDict():\n \n weightDict = {}\n ## A list with weights in the same order as the suit factors above\n weightDict[8] = [0.29,0.22,0.21,0.28] \n weightDict[6] = [0.22,0.14,0.11,0.23,0.30]\n weightDict[5] = [0.29,0.34,0.37]\n weightDict[9] = [0.53,0.45,0.02]\n weightDict[4] = [0.46,0.35,0.19]\n return weightDict", "def get_sext_strengths(self) -> dict:\n if \"SEXTUPOLE\" in self.table.family.values:\n return (\n self.table.loc[self.table.family == \"SEXTUPOLE\", [\"name\", \"K2\"]]\n .set_index(\"name\", drop=True)\n .to_dict()[\"K2\"]\n )\n else:\n return {}", "def state(self) -> dict:\n res = {}\n res[\"name:\"] = self.name\n for i in range(len(self.__squads)):\n res[f\"{i + 1} squad\"] = self.__squads[i].health\n return res", "def get_options(self) -> Dict:\n\n center = max(self.center.get(), 1)\n linewidth= max(self.linewidth.get(), 1)\n power = max(self.power.get(), 1)\n\n out = {'power': power, 'linewidth': linewidth, 'center': center}\n return out", "def slot_key_db() -> Dict[str, List]:\n\n return {\n \"q50\": \"second_person_plural\",\n \"q28\": \"cot_caught\",\n \"q80\": \"rain_sun\",\n \"q66\": \"crawfish\",\n \"q110\": \"halloween\",\n \"q64\": \"sandwich\",\n \"q90\": \"side_road\",\n \"q105\": \"beverage\",\n \"q73\": \"shoes\",\n \"q79\": \"highway\",\n \"q58\": \"yard_sale\",\n \"q107\": \"rubbernecking\",\n \"q94\": \"frosting\",\n \"q14\": \"lawyer\",\n \"q76\": \"kitty_corner\",\n \"q65\": \"firefly\",\n \"q60\": \"verge\",\n \"q118\": \"brew_thru\",\n \"q103\": \"water_fountain\",\n }", "def generate_weighted_dictionary(self, combinations, evidences):\n dictionary = dict()\n\n for combination in combinations:\n alarm_bayes = AlarmBayes()\n for i in range(0,len(combination)):\n alarm_bayes.all_nodes[i].set_assignment(combination[i])\n current_weight = alarm_bayes.get_current_weight(evidences)\n\n dictionary[combination] = [0, current_weight]\n #for combination -ends\n return dictionary", "def build_weights(self) -> Dict[object, float]:\n self.build()\n\n self._weights = [np.random.rand() for x in range(0, self.n)]\n return dict(zip(self.build_property, self.weights))", "def quad2dict(l, pyfriendly=False):\n if pyfriendly:\n l = map(vcs.make_pyfriendly, l)\n return dict([ (x[1], x[3]) for x in l ])", "def calculateChordStrengths(delaunayMap):\n\n result = [None] * delaunayMap.maxEdgeLabel()\n for edge in delaunayMap.edgeIter():\n if edge.flag(CONTOUR_SEGMENT):\n continue\n result[edge.label()] = chordStrength(edge)\n return result", "def gains(self) -> dict:\n return self._gains", "def get_solution():\n if config.TAQUIN_SIZE == 1:\n return {0: (0, 0)}\n solution = {}\n y = 0\n ymin = 0\n ymax = config.TAQUIN_SIZE - 1\n x= 0\n xmin = 0\n xmax = config.TAQUIN_SIZE - 1\n cpt = 1\n dir = 1\n for i in range(config.TAQUIN_SIZE ** 2):\n solution[cpt] = (x, y)\n cpt = cpt + 1 if cpt < config.TAQUIN_SIZE ** 2 - 1 else 0\n if x == xmin and y < ymax:\n if dir == 4:\n ymin += 1\n dir = 1\n y += 1\n elif y == ymax and x < xmax:\n if dir == 1:\n xmin += 1\n dir = 2\n x += 1\n elif x == xmax and y > ymin:\n if dir == 2:\n ymax -= 1\n dir = 3\n y -= 1\n elif y == ymin and x > xmin:\n if dir == 3:\n dir = 4\n xmax -= 1\n x -= 1\n return solution", "def slot_key_db() -> Dict[str, List]:\n\n return {'Q01':'second_person_plural',\n 'Q02':'bug',\n 'Q03':'highway',\n 'Q04':'firefly',\n 'Q05':'wild_cat',\n 'Q06':'shoes',\n 'Q07':'yard_sale',\n 'Q08':'mary_merry_marry',\n 'Q09':'frosting',\n 'Q10':'highway',\n 'Q11':'rubbernecking',\n 'Q12':'cot_caught',\n 'Q13':'school_college',\n 'Q14':'freight',\n 'Q15':'second_syllabe',\n 'Q16':'beverage',\n 'Q17':'sandwich',\n 'Q18':'brew_thru',\n 'Q19':'crawfish',\n 'Q20':'rain_sun',\n 'Q21':'road_meet_in_circle',\n 'Q22':'halloween',\n 'Q23':'water_fountain',\n 'Q24':'firefly'}", "def gqlevels(self):\n name = DHCP.GQLEVELS\n\n reldict = self.getLevels()[HHGQ]\n gq_range = list(range(1, 8))\n\n gq_groupings = sk.getSubsetTuples(reldict, gq_range)\n\n groupings = { HHGQ: dict(gq_groupings) }\n\n return name, groupings", "def get_api_weights_dict(self):\n api_names = [\"login\", \"viewUser\", \"getUsersByMobileNumber\", \"listChats\", \"listMessages\", \"createChat\",\n \"createMessage\", \"signup\"]\n api_weights_dict = {api_names[i]: self.input_args[i + 3] / self.total_weight for i in range(len(api_names))}\n return api_weights_dict", "def gqlevels(self):\n name = \"gqlevels\"\n\n reldict = self.getLevels()[HHGQ]\n gq_range = range(1, 8)\n\n gq_groupings = sk.getSubsetTuples(reldict, gq_range)\n\n groupings = {HHGQ: dict(gq_groupings)}\n\n return name, groupings", "def asDict(self) -> Dict[str, COORDINATE_TYPE]:\n return {\"x\": self.x, \"y\": self.y, \"width\": self.width, \"height\": self.height}", "def get_strength(self):\n return self.__strength", "def generate_dictionary(self, combinations):\n dictionary = dict()\n\n for combination in combinations:\n dictionary[combination] = 0\n #for combination -ends\n return dictionary" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to return sextupole strengths as a dict
def get_sext_strengths(self) -> dict: if "SEXTUPOLE" in self.table.family.values: return ( self.table.loc[self.table.family == "SEXTUPOLE", ["name", "K2"]] .set_index("name", drop=True) .to_dict()["K2"] ) else: return {}
[ "def get_quad_strengths(self) -> dict:\n return (\n self.table.loc[self.table.family == \"QUADRUPOLE\", [\"name\", \"K1\"]]\n .set_index(\"name\", drop=True)\n .to_dict()[\"K1\"]\n )", "def x_threat_rating_map(self):\n return {\n '0': 'Threat Rating: Unknown',\n '1': 'Threat Rating: Suspicious',\n '2': 'Threat Rating: Low',\n '3': 'Threat Rating: Moderate',\n '4': 'Threat Rating: High',\n '5': 'Threat Rating: Very High',\n }", "def getWeightDict():\n \n weightDict = {}\n ## A list with weights in the same order as the suit factors above\n weightDict[8] = [0.29,0.22,0.21,0.28] \n weightDict[6] = [0.22,0.14,0.11,0.23,0.30]\n weightDict[5] = [0.29,0.34,0.37]\n weightDict[9] = [0.53,0.45,0.02]\n weightDict[4] = [0.46,0.35,0.19]\n return weightDict", "def strength(self, strengths: nd.NumDict) -> float:\n\n weighted = nd.keep(strengths, keys=self.weights) * self.weights\n \n return nd.val_sum(weighted)", "def get_strength(self):\n return self.__strength", "def ethnicity_demographics(self) -> Dict[str, float]:\n return {'Hispanic': 0.486, \"White\": 0.261, 'Asian': 0.154, 'Black': 0.09,\n \"American Indian/Alaska Native\": 0.014, 'Native Hawaiian/Pacific Islander': 0.004, 'Other': 0.031}", "def generate_weighted_dictionary(self, combinations, evidences):\n dictionary = dict()\n\n for combination in combinations:\n alarm_bayes = AlarmBayes()\n for i in range(0,len(combination)):\n alarm_bayes.all_nodes[i].set_assignment(combination[i])\n current_weight = alarm_bayes.get_current_weight(evidences)\n\n dictionary[combination] = [0, current_weight]\n #for combination -ends\n return dictionary", "def passwordContent(strength):\n if strength == 1:\n chars = set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890!\"£$%^&*(){}[]#~:;@<>,.?/\\|-_+=') \n elif strength == 2:\n chars = set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890')\n elif strength == 3: #if taken seriously would use a dictionairy file but do not have the knowledge at the moment\n chars = ['yes','no','somewhere','in','between','is','there','a','point']\n return chars", "def gains(self) -> dict:\n return self._gains", "def getReconstructibleDevices() -> dict:\n rec = {\"stove\":getPowerStove}\n return rec", "def slot_key_db() -> Dict[str, List]:\n\n return {\n \"q50\": \"second_person_plural\",\n \"q28\": \"cot_caught\",\n \"q80\": \"rain_sun\",\n \"q66\": \"crawfish\",\n \"q110\": \"halloween\",\n \"q64\": \"sandwich\",\n \"q90\": \"side_road\",\n \"q105\": \"beverage\",\n \"q73\": \"shoes\",\n \"q79\": \"highway\",\n \"q58\": \"yard_sale\",\n \"q107\": \"rubbernecking\",\n \"q94\": \"frosting\",\n \"q14\": \"lawyer\",\n \"q76\": \"kitty_corner\",\n \"q65\": \"firefly\",\n \"q60\": \"verge\",\n \"q118\": \"brew_thru\",\n \"q103\": \"water_fountain\",\n }", "def get_strength(character):\n return CharacterAttributeCalculator._get_attribute(attribute=\"strength\", character=character)", "def test_accessing_predefined_strength() -> None:\n assert strength.weak < strength.medium\n assert strength.medium < strength.strong\n assert strength.strong < strength.required", "def utility_characterization_factors(self) -> dict[tuple[str, str], tuple[float, AbsoluteUnitsOfMeasure]]:\n return bst.HeatUtility.characterization_factors", "def stream_utility_prices(self) -> dict[str, float]:\n return bst.stream_utility_prices", "def needs(self) :\r\n return ({'water need':self._waterNeed,'food need':self._foodNeed})", "def get_switch_states_dict(self):\n num_switches = self.opendss.num_switches\n switch_state = [0, 1]\n switch_states = list(product(switch_state, repeat=num_switches))\n ss_list = [str(x).strip('()').replace(',', '').replace(' ', '') for x in switch_states]\n ss_dict = dict(zip(ss_list, range(len(ss_list))))\n return ss_dict", "def password_strength(password):\n\n special = [\"!\", '\"', \"£\", \"$\", \"%\", \"^\", \"&\", \"*\", \"(\", \")\", \"{\", \"}\", \"[\", \"]\", \"~\", \"-\", \"_\", \"+\",\"=\", \"<\", \">\", \",\", \".\", \"/\", \"?\"]\n\n num_of_lowers = 0\n num_of_uppers = 0\n num_of_specials = 0\n\n # Count number of lowercase, uppercase and special characters there are", "def getparamexogdict():\n paramssdict = {'ALPHA': 0.3, 'BETA': 0.95, 'DELTA': 0.1, 'RHO': 0.9, 'SIGMA': 0.1, 'ME_c': 0.01, 'ME_y': 0.01}\n return(paramssdict)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to load a dictionary with strength settings to the table. The col attribute is where the strengths will be loaded to.
def load_strengths_to_table(self, strdc: dict, col: str) -> None: self.history.put((deepcopy(self.name), deepcopy(self.len), deepcopy(self.table))) for k, v in strdc.items(): self.table.loc[self.table["name"] == k, col] = v
[ "def setTableattrs( self, indict ):\n\n for key in indict.keys():\n val = indict[key]\n tpair = \"\"\" %s=\"%s\" \"\"\" % (key,val)\n self.tabattr = self.tabattr + tpair", "def get_sext_strengths(self) -> dict:\n if \"SEXTUPOLE\" in self.table.family.values:\n return (\n self.table.loc[self.table.family == \"SEXTUPOLE\", [\"name\", \"K2\"]]\n .set_index(\"name\", drop=True)\n .to_dict()[\"K2\"]\n )\n else:\n return {}", "def setRowattrs( self, row, indict ):\n\n if row >= self.maxRow:\n# print \"INdexs ROW out of range\"\n return None\n \n for key in indict.keys():\n\n val = indict[key]\n\n if( self.rowattr.has_key( row ) ):\n pval = self.rowattr[ (row) ]\n self.rowattr[ row ] = \"\"\"%s, %s=\"%s\" \"\"\" % ( pval, key, val )\n else:\n self.rowattr[ row ] =\"\"\"%s=\"%s\" \"\"\" % ( key, val )", "def gettabledict(self, tablename):\n urlpath = '/' + tablename\n return self.getdict(urlpath)", "def check_one_col(self, attr, verbosity=1):\n myvect = getattr(self,attr)\n column = bb_column(attr, myvect)\n \n ## if verbosity > 0:\n ## print('='*20)\n ## print('attr: %s' % attr)\n ## print('empty grades: %i' % emptygrades)\n ## print('Needs Grading: %i' % ng)\n ## if p_poss is None:\n ## print('points possible not found')\n ## else:\n ## print('points possible: %0.5g' % p_poss)\n ## print('ave: %0.5g' % ave) \n ## print('max: %0.5g' % mymax)\n ## print('min: %0.5g' % mymin)\n\n \n ## return mydict", "def _load_mapping_table(self):\n\n mapping_file_name = self._specification[u\"configuration\"].\\\n get(u\"mapping-file\", None)\n if mapping_file_name:\n try:\n with open(mapping_file_name, u'r') as mfile:\n mapping = load(mfile, Loader=FullLoader)\n # Make sure everything is lowercase\n self._specification[u\"configuration\"][u\"mapping\"] = \\\n {key.lower(): val.lower() for key, val in\n mapping.items()}\n logging.debug(f\"Loaded mapping table:\\n{mapping}\")\n except (YAMLError, IOError) as err:\n raise PresentationError(\n msg=f\"An error occurred while parsing the mapping file \"\n f\"{mapping_file_name}\",\n details=repr(err)\n )\n else:\n self._specification[u\"configuration\"][u\"mapping\"] = dict()", "def set_dictionary(self, col_name: str, dictionary: List[str]) -> None:\n\n col_idx = self._dataspec_column_index.get(col_name)\n if col_idx is None:\n raise ValueError(f\"Unknown feature \\\"{col_name}\\\"\")\n\n if py_tree.dataspec.OUT_OF_DICTIONARY not in dictionary:\n raise ValueError(\n \"fThe dictionary should contain an \\\"{OUT_OF_DICTIONARY}\\\" value\")\n\n column_spec = self._dataspec.columns[col_idx]\n self._check_column_has_dictionary(column_spec)\n\n column_spec.categorical.number_of_unique_values = len(dictionary)\n column_spec.categorical.items.clear()\n # The OOB value should be the first one.\n column_spec.categorical.items[py_tree.dataspec.OUT_OF_DICTIONARY].index = 0\n for item in dictionary:\n if item == py_tree.dataspec.OUT_OF_DICTIONARY:\n continue\n column_spec.categorical.items[item].index = len(\n column_spec.categorical.items)", "def load_table(self, table_id: str) -> dict:\n return self._db.load_table(table_id)", "def setup_column_prefs( self ):\n\n\t\tpass", "def load_char_table(self, selected_texts):\n\t\tself.char_table = character_tool.load_original_char_table(selected_texts)", "def get_column_prefs_widths_dict( self ):\n\n\t\treturn {}", "def get_table(self, key):\n if self.sens and self.has_key(key):\n return self.tbdict_sen[key]\n if self.has_key(key):\n return self.tbdict[key]", "def __init__(self, name, data_w_tables):\n self.name = name\n self.settings_by_table = {}\n table_feature = get_param('table_feature')\n for table_name in np.unique(data_w_tables[table_feature]):\n table_data = data_w_tables.loc[data_w_tables[table_feature] == str(table_name)]\n self.settings_by_table[str(table_name)] = Settings(str(table_name), table_data)", "def __init__(self, stats, **params):\n gui.Table.__init__(self, **params)\n self.baseStats = stats # keep reference to original player stats\n self.updatedStats = copy(stats) # the new stats of player\n self.increments = {'hp':5, 'mana':5, 'stamina':5,'strength':0.1, 'atkSpeed':0.05} # how much to add/subtract for each skill\n self.usedPts = {'hp':0, 'mana':0, 'stamina':0,'strength':0, 'atkSpeed':0} # points that have been used corresponding to stat that used it\n \n spacer = gui.Spacer(width=5, height=5)\n # Health row\n plusBtn = gui.Button(\"+\")\n plusBtn.connect(gui.CLICK, self.plusBtnClicked, 'hp')\n minusBtn = gui.Button(\"-\")\n minusBtn.connect(gui.CLICK, self.minusBtnClicked, 'hp')\n self.tr()\n self.td(gui.Label(\"Max HP:\"), align=-1)\n self.hpLabel = gui.Label(str(stats.hp))\n self.td(minusBtn)\n self.td(spacer)\n self.td(self.hpLabel)\n self.td(spacer)\n self.td(plusBtn)\n\n # Mana row\n plusBtn = gui.Button(\"+\")\n plusBtn.connect(gui.CLICK, self.plusBtnClicked, 'mana')\n minusBtn = gui.Button(\"-\")\n minusBtn.connect(gui.CLICK, self.minusBtnClicked, 'mana')\n self.tr()\n self.td(gui.Label(\"Max Mana:\"), align=-1)\n self.manaLabel = gui.Label(str(stats.mana))\n self.td(minusBtn)\n self.td(spacer)\n self.td(self.manaLabel)\n self.td(spacer)\n self.td(plusBtn)\n\n # Stamina row\n plusBtn = gui.Button(\"+\")\n plusBtn.connect(gui.CLICK, self.plusBtnClicked, 'stamina')\n minusBtn = gui.Button(\"-\")\n minusBtn.connect(gui.CLICK, self.minusBtnClicked, 'stamina')\n self.tr()\n self.td(gui.Label(\"Max Stamina:\"), align=-1)\n self.staminaLabel = gui.Label(str(stats.stamina))\n self.td(minusBtn)\n self.td(spacer)\n self.td(self.staminaLabel)\n self.td(spacer)\n self.td(plusBtn)\n \n # Strength row\n plusBtn = gui.Button(\"+\")\n plusBtn.connect(gui.CLICK, self.plusBtnClicked, 'strength')\n minusBtn = gui.Button(\"-\")\n minusBtn.connect(gui.CLICK, self.minusBtnClicked, 'strength')\n self.tr()\n self.td(gui.Label(\"Strength:\"), align=-1)\n self.strLabel = gui.Label(\"{:.2f}\".format(stats.strength))\n self.td(minusBtn)\n self.td(spacer)\n self.td(self.strLabel)\n self.td(spacer)\n self.td(plusBtn)\n \n # Attack Speed row\n plusBtn = gui.Button(\"+\")\n plusBtn.connect(gui.CLICK, self.plusBtnClicked, 'atkSpeed')\n minusBtn = gui.Button(\"-\")\n minusBtn.connect(gui.CLICK, self.minusBtnClicked, 'atkSpeed')\n self.tr()\n self.td(gui.Label(\"Attack Speed:\"), align=-1)\n self.speedLabel = gui.Label(\"{:.2f}\".format(stats.atkSpeed))\n self.td(minusBtn)\n self.td(spacer)\n self.td(self.speedLabel)\n self.td(spacer)\n self.td(plusBtn)\n \n # Available skill points row\n self.tr()\n self.td(gui.Label(\"Available Skill Points:\"), align=-1)\n self.skillLabel = gui.Label(str(stats.skillPts))\n self.td(self.skillLabel)", "def load_state_dict(self, state_dict: Dict):", "def populate_attributes_table(conn):\n log_info('. populating \"attributes\" table')\n curs = conn.cursor()\n attributes_map = {}\n for attribute in ATTRIBUTES:\n internal_id, internal_name, label, hidden = attribute\n attributes_map[internal_name] = internal_id\n log_verbose('Adding attribute', internal_name, ':', internal_id)\n curs.execute(\"\"\"\n insert into attrs_tab(attr_id, attr_hidden, attr_value)\n values (?, ?, ?);\n \"\"\", (internal_id, 1 if hidden else 0, label,))\n curs.close()\n conn.commit()\n return attributes_map", "def tune_information(filename):\n tunes = dict()\n if filename:\n with open(filename, 'r', newline='') as tune_file:\n reader = csv.reader(tune_file)\n for line in reader:\n glyph_name = line[0]\n tune = line[1].replace(' ', ',')\n tunes[glyph_name] = tune\n return tunes", "def edit_dictionary():", "def load_from_dict(self, dict):\n self.width = dict[\"width\"]\n self.height = dict[\"height\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an authenticated SOAP client to access an SOAP service.
def _create_client(self, service): # We must specify service and port for Nuxeo client = suds.client.Client( '/'.join((self.settings.repository_url, service)) + '?wsdl', proxy=self.proxies, service=service, port=service + 'Port', cachingpolicy=1) if self.settings.repository_user: # Timestamp must be included, and be first for Alfresco. auth = suds.wsse.Security() auth.tokens.append(suds.wsse.Timestamp(validity=300)) auth.tokens.append(suds.wsse.UsernameToken( self.settings.repository_user, self.settings.repository_password)) client.set_options(wsse=auth) return client.service
[ "def _create_suds_client(self):\n\n self.client = Client(const.WSDLLOCAL)\n self.client.set_options(service = ApiClient._sdict[self.service][0],\n headers = {'user-agent': const.USERAGENT})\n\n # put username (and password if necessary) into the headers.\n # note that another way to do this is to call betdaq.set_user,\n # so the username and password in const.py do not need to be\n # specified.\n self.set_headers(const.BDAQUSER, const.BDAQPASS)", "def zeep_client(self) -> ZeepClient:\n if self._zeep_client is None:\n self._zeep_client = Oauth2Request.get_soap_client()\n return self._zeep_client", "def create_auth_client(self):\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION=self.auth_token)\n return client", "def _init_client():\n return _Client(_ARM_WS_URL)", "def create_client(self, username=None, service=None, host=None):\n return create_client(username, service, host)", "def _new_connection(self):\n\n # The following dictionary has the targetNamespace as the key and a list\n # of namespaces that need to be imported as the value for that key\n patches = { \"urn:SecurityServer\": [\"http://authentication.integration.crowd.atlassian.com\",\n \"http://soap.integration.crowd.atlassian.com\",\n \"http://exception.integration.crowd.atlassian.com\",\n \"http://rmi.java\"] ,\n \"http://soap.integration.crowd.atlassian.com\": [\"urn:SecurityServer\"] }\n\n # Create an ImportDoctor to use\n doctor = suds.xsd.doctor.ImportDoctor()\n\n # Patch all the imports into the proper targetNamespaces\n for targetNamespace in patches:\n for nsimport in patches[targetNamespace]:\n imp = suds.xsd.doctor.Import(nsimport)\n imp.filter.add(targetNamespace)\n doctor.add(imp)\n \n soap_client = suds.client.Client(self.crowd_home_uri + 'services/SecurityServer?wsdl', doctor=doctor)\n\n auth_context = soap_client.factory.create('ns1:ApplicationAuthenticationContext')\n auth_context.name = self.crowd_app_name\n auth_context.credential.credential = self.crowd_app_passwd\n self.token = soap_client.service.authenticateApplication(auth_context)\n\n return soap_client", "def createClient(self, secure):\n props = self.getPropertyMap()\n if not secure:\n insecure = self.getSession().getConfigService().getConfigValue(\"omero.router.insecure\")\n if insecure is not None and insecure != \"\":\n props[\"Ice.Default.Router\"] = insecure\n else:\n self.__logger.warn(\"Could not retrieve \\\"omero.router.insecure\\\"\")\n\n nClient = omero.client(props)\n nClient.__insecure = not secure\n nClient.setAgent(\"%s;secure=%s\" % (self.__agent, secure))\n nClient.joinSession(self.getSessionId())\n return nClient", "def _get_client(self):\n options = {\n 'webdav_hostname': 'https://'+self.stg_auth.get_credential('hostname'),\n 'webdav_login': self.stg_auth.get_credential('login'),\n 'webdav_password': self.stg_auth.get_credential('password')\n }\n return Client(options=options)", "def get_client(self, service):\n try:\n return boto3.client(\n service,\n region_name=self.region,\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key,\n config=self.proxy_config,\n )\n except ClientError as e:\n fail(\"AWS %s service failed with exception: %s\" % (service, e))", "def get_authenticated_client(self):\n\n # Start by creating a client pointing to the right server\n client = ApiClient(self.server)\n\n # Authenticate. This will start a session and store the cookie\n auth = AuthenticationApi(client)\n auth.login(Login(username=self.admin_username, password=self.admin_password))\n\n self.client = client", "def get_client(token_path, redirect_url, api_key, account_id):\n client = easy_client(\n api_key=api_key,\n redirect_uri=redirect_url,\n token_path=token_path)\n stream_client = StreamClient(client, account_id=account_id)\n return stream_client", "async def _create_client(self):\n frozen_credentials = (\n await self._source_credentials.get_frozen_credentials()\n )\n return self._client_creator(\n 'sts',\n aws_access_key_id=frozen_credentials.access_key,\n aws_secret_access_key=frozen_credentials.secret_key,\n aws_session_token=frozen_credentials.token,\n )", "def get_cas_client():\n return CASClient(auth_settings.CAS_SERVER,\n settings.SERVICE_URL,\n proxy_url=settings.PROXY_URL,\n proxy_callback=settings.PROXY_CALLBACK_URL,\n auth_prefix=auth_settings.CAS_AUTH_PREFIX,\n self_signed_cert=auth_settings.SELF_SIGNED_CERT)", "def client(\n username: Optional[str] = None,\n api_key: Optional[str] = None,\n session: Optional[sessions.Session] = None,\n):\n has_login = (username is not None) and (api_key is not None)\n has_session = session is None\n\n if not has_session:\n if has_login:\n session = sessions.Session(\n credentials.Credentials(username=username, api_key=api_key)\n )\n else:\n raise MissingAuthentication()\n return client_mod.Client(session)", "def create_keyvault_client(args: object) -> tuple():\n if (args.KeyVaultUrl == None):\n return (None, None)\n\n credentials = ServicePrincipalCredentials(\n client_id=args.ServicePrincipalCredentialsClientID,\n secret=args.ServicePrincipalCredentialsSecret,\n tenant=args.ServicePrincipalCredentialsTenant)\n\n client = KeyVaultClient(credentials)\n\n return (client, args.KeyVaultUrl)", "def init_client(self):\n self._transport = RequestsHTTPTransport(url=self._url,\n use_json=True,\n headers={\n \"Content-type\":\n \"application/json\",\n \"Authorization\":\n \"bearer \" +\n str(self._token).strip()\n },\n verify=False)\n self._client = Client(retries=3,\n transport=self._transport,\n fetch_schema_from_transport=False)", "def get_service(credentials):\n # Use the authorize() function of OAuth2Credentials to apply necessary \n # credential headers to all requests.\n http = credentials.authorize(http = httplib2.Http())\n\n # Construct the service object for the interacting with the DS API.\n service = build('doubleclicksearch', 'v2', http=http)\n return service", "def create_sns_client(config):\n client = AwsSNSClient(access_key=config['AWS_ACCESS_KEY'],\n secret_key=config['AWS_SECRET_KEY'],\n region=config['AWS_REGION'])\n client.initialize()\n return client", "def client(self):\n try:\n return self._client\n except AttributeError: # create a new client\n self._client = self._session.create_client(\n service_name='swf', region_name=self._aws_region)\n return self._client" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a test player.
def _test_gsplayer(name, money, shares_map, tiles): player = GameStatePlayer() player.name = name player.money = money player.shares_map = shares_map player.tiles = tiles return player
[ "def test_player_details_by_player(self):\n pass", "def create_player(name):\n if name.lower() == \"ai\":\n return Player(name.upper(), 'computer')\n else:\n return Player(name.title(), 'human')", "def new_player(self, client_id, player_id=None, player_type = None, is_human=None,name=None) -> Player:\n raise NotImplementedError()", "def setUpTestData(cls):\n profile = models.Profile.objects.get(user__username=\"John Doe\")\n game = models.Game.create()\n game.add_player(profile)", "def player_model_fixture(db_mock=Mock(), **kwargs):\n p = Player(offline=True)\n p._id = 1\n p._new = False\n p.db = db_mock\n p.name = \"Henk\"\n p.gamertag = \"testGamertag\"\n p.discord = \"testDiscord\"\n p.rank = 1\n p.wins = 1\n p.losses = 1\n p.timeout = datetime.now()\n p.challenged = False\n p.password = \"test\"\n for key, value in kwargs.items():\n if not hasattr(p, key):\n raise KeyError(\"Unknown player model attribute: {}\".format(key))\n setattr(p, key, value)\n return p", "def test_challenge_player_6(self):\n pass", "def test_set_player_name(self):\n self.game.set_player(1, \"Wille\")\n p_1 = self.game.player1\n exp = \"Lucas\"\n self.game.set_player_name(\"Lucas\", p_1)\n self.assertEqual(exp, p_1.name)", "def test_player_v_player():\n\n time.sleep(1)\n example_player = Player(headers=HEADERS,\n endpoint='playervsplayer')\n\n table_names = example_player.data.keys()\n\n assert 'Overall' in table_names\n assert 'OnOffCourt' in table_names\n assert 'ShotDistanceOverall' in table_names\n assert 'ShotDistanceOnCourt' in table_names\n assert 'ShotDistanceOffCourt' in table_names\n assert 'ShotAreaOverall' in table_names\n assert 'ShotAreaOnCourt' in table_names\n assert 'ShotAreaOffCourt' in table_names\n assert 'PlayerInfo' in table_names\n assert 'VsPlayerInfo' in table_names\n\n example_overall = example_player.data['Overall'][0]\n example_on_off = example_player.data['OnOffCourt'][0]\n example_dist = example_player.data['ShotDistanceOverall'][0]\n example_dist_on = example_player.data['ShotDistanceOnCourt'][0]\n example_dist_off = example_player.data['ShotDistanceOffCourt'][0]\n example_area = example_player.data['ShotAreaOverall'][0]\n example_area_on = example_player.data['ShotAreaOnCourt'][0]\n example_area_off = example_player.data['ShotAreaOffCourt'][0]\n example_player_inf = example_player.data['PlayerInfo'][0]\n example_vs_player_inf = example_player.data['VsPlayerInfo'][0]\n\n assert list(example_overall.keys()) == ['GROUP_SET',\n 'GROUP_VALUE',\n 'PLAYER_ID',\n 'PLAYER_NAME',\n 'GP',\n 'W',\n 'L',\n 'W_PCT',\n 'MIN',\n 'FGM',\n 'FGA',\n 'FG_PCT',\n 'FG3M',\n 'FG3A',\n 'FG3_PCT',\n 'FTM',\n 'FTA',\n 'FT_PCT',\n 'OREB',\n 'DREB',\n 'REB',\n 'AST',\n 'TOV',\n 'STL',\n 'BLK',\n 'BLKA',\n 'PF',\n 'PFD',\n 'PTS',\n 'PLUS_MINUS',\n 'NBA_FANTASY_PTS',\n 'CFID',\n 'CFPARAMS']\n\n assert list(example_on_off.keys()) == ['GROUP_SET',\n 'PLAYER_ID',\n 'PLAYER_NAME',\n 'VS_PLAYER_ID',\n 'VS_PLAYER_NAME',\n 'COURT_STATUS',\n 'GP',\n 'W',\n 'L',\n 'W_PCT',\n 'MIN',\n 'FGM',\n 'FGA',\n 'FG_PCT',\n 'FG3M',\n 'FG3A',\n 'FG3_PCT',\n 'FTM',\n 'FTA',\n 'FT_PCT',\n 'OREB',\n 'DREB',\n 'REB',\n 'AST',\n 'TOV',\n 'STL',\n 'BLK',\n 'BLKA',\n 'PF',\n 'PFD',\n 'PTS',\n 'PLUS_MINUS',\n 'NBA_FANTASY_PTS',\n 'CFID',\n 'CFPARAMS']\n\n assert list(example_dist.keys()) == ['GROUP_SET',\n 'GROUP_VALUE',\n 'PLAYER_ID',\n 'PLAYER_NAME',\n 'FGM',\n 'FGA',\n 'FG_PCT',\n 'CFID',\n 'CFPARAMS']\n\n assert list(example_dist_on.keys()) == ['GROUP_SET',\n 'PLAYER_ID',\n 'PLAYER_NAME',\n 'VS_PLAYER_ID',\n 'VS_PLAYER_NAME',\n 'COURT_STATUS',\n 'GROUP_VALUE',\n 'FGM',\n 'FGA',\n 'FG_PCT',\n 'CFID',\n 'CFPARAMS']\n assert list(example_dist_off.keys()) == ['GROUP_SET',\n 'PLAYER_ID',\n 'PLAYER_NAME',\n 'VS_PLAYER_ID',\n 'VS_PLAYER_NAME',\n 'COURT_STATUS',\n 'GROUP_VALUE',\n 'FGM',\n 'FGA',\n 'FG_PCT',\n 'CFID',\n 'CFPARAMS']\n\n assert list(example_area.keys()) == ['GROUP_SET',\n 'GROUP_VALUE',\n 'PLAYER_ID',\n 'PLAYER_NAME',\n 'FGM',\n 'FGA',\n 'FG_PCT',\n 'CFID',\n 'CFPARAMS']\n\n assert list(example_area_on.keys()) == ['GROUP_SET',\n 'PLAYER_ID',\n 'PLAYER_NAME',\n 'VS_PLAYER_ID',\n 'VS_PLAYER_NAME',\n 'COURT_STATUS',\n 'GROUP_VALUE',\n 'FGM',\n 'FGA',\n 'FG_PCT',\n 'CFID',\n 'CFPARAMS']\n\n assert list(example_area_off.keys()) == ['GROUP_SET',\n 'PLAYER_ID',\n 'PLAYER_NAME',\n 'VS_PLAYER_ID',\n 'VS_PLAYER_NAME',\n 'COURT_STATUS',\n 'GROUP_VALUE',\n 'FGM',\n 'FGA',\n 'FG_PCT',\n 'CFID',\n 'CFPARAMS']\n\n assert list(example_player_inf.keys()) == ['PERSON_ID',\n 'FIRST_NAME',\n 'LAST_NAME',\n 'DISPLAY_FIRST_LAST',\n 'DISPLAY_LAST_COMMA_FIRST',\n 'DISPLAY_FI_LAST',\n 'BIRTHDATE',\n 'SCHOOL',\n 'COUNTRY',\n 'LAST_AFFILIATION']\n\n assert list(example_vs_player_inf.keys()) == ['PERSON_ID',\n 'FIRST_NAME',\n 'LAST_NAME',\n 'DISPLAY_FIRST_LAST',\n 'DISPLAY_LAST_COMMA_FIRST',\n 'DISPLAY_FI_LAST',\n 'BIRTHDATE',\n 'SCHOOL',\n 'COUNTRY',\n 'LAST_AFFILIATION']", "def player():\n\n name_id = 1\n return card_game.Player(name_id)", "def __init_player(self, name, client_id):\n self.current_input_number += 1\n\n # Construct init player message\n message = messagepb.ClientGameMessage()\n\n init_player = messagepb.InitPlayer()\n init_player.name = name\n init_player.client_id = client_id\n message.input_sequence_number = self.current_input_number\n message.init_player_payload.CopyFrom(init_player)\n\n self.ws.send(message.SerializeToString())", "def setUp(self):\n self.game = Game(\"Test Space Wars\")\n self.game.main_loop(testmode = True)", "def main(player1, player2, time):\n def make_player(name, num):\n if name == 'ai':\n return AIPlayer(num)\n elif name == 'random':\n return RandomPlayer(num)\n elif name == 'human':\n return HumanPlayer(num)\n\n game = Game(make_player(player1, 1), make_player(player2, 2), time)", "def test_player_name(player):\n assert player.name == \"Player 1\"", "def __init__(self):\n Player.Player.__init__(self)\n self.past_moves = [] # BEPCPlayer's past moves (for current match)\n self.opp_moves = [] # Opponent's past moves (for current match)\n self.win_record = [] # A 1 represents a win for that position's round, a 0 is a loss\n self.player_number = -1 # BEPCPlayer's number for current round\n self.opp_number = -1 # opponent's number for current round\n self.name = \"Bill and Paul\" # This player's name", "def test_fantasy_players_with_adp(self):\n pass", "def _create_players(self) -> None:\n murderer_index, policeman_index = random.sample(\n range(len(self._players)), k=2,\n )\n self._mafia = self._players[murderer_index] = Murderer(\n self._players[murderer_index].name,\n self._players[murderer_index].userid,\n )\n self._policeman = self._players[policeman_index] = Policeman(\n self._players[policeman_index].name,\n self._players[policeman_index].userid,\n )\n for player in self._players:\n bot.send_message(player.userid, f'Your role: {player._role}')", "def init_player_and_game(self):\n if not self.player_name:\n self.setup_gui_first_time()\n if self.player_type == NoxWindow.__name__:\n self.player = NoxWindow(self.player_name)\n menu = self.menuBar.addMenu(\"Emulator\")\n action = menu.addAction(f\"Make {self.player.name} restartable\")\n action.triggered.connect(self.player.set_config_for_bot)\n if self.player_type == BlueStacks.__name__:\n self.player = BlueStacks(self.player_name)\n if not self.player.restartable:\n self.restart_game_button.setEnabled(False)\n self.restart_game_button.setText(f\"{self.restart_game_button.text()}\\n\"\n \"[Unavailable (check logs)]\")\n self.restart_game_button = None\n self.game = Game(self.player)\n if self.game_app_rect:\n self.game.ui['GAME_APP'].button = Rect(*self.game_app_rect)", "def test_player_details_by_available(self):\n pass", "def test_constructor_valid_input(self):\n self.assertEqual(1, self.player1.get_player_id(), \"Player number should be number 1\")\n self.assertEqual(2, self.player2.get_player_id(), \"Player number should be number 2\")\n self.assertEqual(3, self.player3.get_player_id(), \"Player number should be number 3\")\n self.assertEqual(\"Los Angeles Lakers\", self.player_manager.get_team_name(), \"Team Name should be Los Angeles Lakers\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns whether this player has shares of given hotel
def has_shares_of(self, hotel): return self.shares_map[hotel] > 0
[ "def players_with_stocks(self, hotel):\r\n return [(p, p.shares_map[hotel])\r\n for p in self.players if p.has_shares_of(hotel)]", "def buy_stock(self, hotel):\r\n stock_price = self.board.stock_price(hotel)\r\n\r\n if stock_price is None:\r\n raise GameStateError(\"Cannot buy a hotel that is not in play\")\r\n\r\n if self.shares_map[hotel] == 0:\r\n raise GameStateError(\"{0} has no shares to buy\".format(hotel))\r\n\r\n if self.current_player.money < stock_price:\r\n raise GameStateError(\"current player can't afford stock for \"+hotel)\r\n\r\n self.shares_map[hotel] -= 1\r\n self.current_player.money -= stock_price\r\n self.current_player.shares_map[hotel] += 1", "def remove_all_shares(self, hotel):\r\n self.shares_map[hotel] = 0", "def is_sharing_heroes(squad1, squad2) -> bool:\n for hero in squad1.heroes:\n if hero in squad2.heroes:\n return True\n return False", "def sellback(self, name, sell_hotels, initial_state):\r\n player = self.player_with_name(name)\r\n for hotel in sell_hotels:\r\n if player.has_shares_of(hotel):\r\n hotel_price = initial_state.board.stock_price(hotel)\r\n\r\n # TODO: remove this\r\n assert hotel_price is not None\r\n\r\n stocks_amount = player.shares_map[hotel]\r\n player.money += hotel_price * stocks_amount\r\n\r\n self.shares_map[hotel] += stocks_amount\r\n player.remove_all_shares(hotel)", "def has_won(player):\n return player.x == 0 and player.y == 0 and player.has_gold", "def is_good_buy(self, ticker: str) -> bool:\n pass", "def gameover(self):\n for ship in self.ships_list:\n for coordinate in ship.ship_coordinates():\n if coordinate not in self.hits_lists:\n return False\n return True", "def shares_vertex(self, other) -> bool:\n points = {self.p1, self.p2, self.p3}\n return other.p1 in points or other.p2 in points or other.p3 in points", "def is_price_reached(share, price):\n\n max = share.High.max()\n if price <= max:\n return True\n else:\n return False", "def any_share(self):\n return next(iter(self._shares), None)", "def beats(self, winner, loser):\n\n dic = self.higher.get(loser)\n if (dic and dic.get(winner)):\n return True\n return False", "def is_home(self, team):\r\n return team == self.home", "def is_sufficient(self):\n return (self.threshold > 0) and (len(self._shares) >= self.threshold)", "async def is_trading(self, player):\n\n is_trading = False\n\n for trade in self.__cache:\n await asyncio.sleep(0)\n\n data = [trade[\"player_a\"], trade[\"player_b\"]]\n\n if player.id in data:\n is_trading = True\n break\n\n return is_trading", "def determineIfBought(self):\n \n #the current player is switched before this method is called so the pawn\n # has to be switched \n if self._current == 0:\n pawn = self._pawns[1]\n pos = pawn.getPosition()\n prop = self._prop[pos]\n #if the pawn is owned, pay rent and move the property card and text\n # off the screen \n if prop.getCanBuy() is False:\n self._money[1].subtractMoney(20)\n self._money[0].addMoney(20)\n prop.moveTo(1100, 300)\n self._text.moveTo((1250, 300))\n return False\n else:\n return True\n else:\n #same thing just for other pawn\n pawn = self._pawns[0]\n pos = pawn.getPosition()\n prop = self._prop[pos]\n if prop.getCanBuy() is False:\n self._money[0].subtractMoney(20)\n self._money[1].addMoney(20)\n prop.moveTo(1100, 300)\n self._text.moveTo((1250, 300))\n return False\n else:\n return True", "def has_fullhouse(self):\n \n ranks = [ ]\n c3=0\n c2=0\n for card in self.cards:\n ranks.append(card.rank)\n for i in ranks:\n if ranks.count(i) == 3:\n c3+=1\n if ranks.count(i) == 2:\n c2+=1\n if c3 == 3 and c2 == 2:\n return True\n return False", "def _bcastIsOwn(self, host):\n netinfo = NetworkInfo()\n local_addresses = netinfo.get_local_addresses()\n return host in local_addresses", "def share_with_neighbours(self, neighbourhood):\n for agent in self.agents:\n if agent == self:\n continue\n else:\n distance = self.distance_between(agent) \n if distance <= neighbourhood:\n sum = self.store + agent.store\n average = sum / 2\n self.store = average\n agent.store = average" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
removes all hotel shares from this player
def remove_all_shares(self, hotel): self.shares_map[hotel] = 0
[ "def _remove_cheaters(self, board: Board) -> None:\n for player in self._cheaters:\n if player in self._players:\n del self._players[player]\n if player in board.live_players:\n board.remove_player(player)\n for observer in self._observers:\n observer.cheater_removed(player, board.get_board_state())", "def delete_share(self, share, share_server):", "def remove_share(self, share):\n self._shares.discard(share)", "def sellback(self, name, sell_hotels, initial_state):\r\n player = self.player_with_name(name)\r\n for hotel in sell_hotels:\r\n if player.has_shares_of(hotel):\r\n hotel_price = initial_state.board.stock_price(hotel)\r\n\r\n # TODO: remove this\r\n assert hotel_price is not None\r\n\r\n stocks_amount = player.shares_map[hotel]\r\n player.money += hotel_price * stocks_amount\r\n\r\n self.shares_map[hotel] += stocks_amount\r\n player.remove_all_shares(hotel)", "async def _prune(self, ctx):\n tokens = []\n playerdb = []\n settings = await self.config.guild(ctx.guild).all()\n for member in settings[\"playerstats\"]:\n if \"xuid\" in settings[\"playerstats\"][member]:\n xuid = settings[\"playerstats\"][member][\"xuid\"]\n playerdb.append(xuid)\n for cname in settings[\"clusters\"]:\n for sname in settings[\"clusters\"][cname][\"servers\"]:\n if \"api\" in settings[\"clusters\"][cname][\"servers\"][sname]:\n api = settings[\"clusters\"][cname][\"servers\"][sname][\"api\"]\n gt = settings[\"clusters\"][cname][\"servers\"][sname][\"gamertag\"]\n tokens.append((api, gt))\n if tokens:\n embed = discord.Embed(\n description=f\"Gathering Data...\"\n )\n embed.set_footer(text=\"This may take a while, sit back and relax.\")\n embed.set_thumbnail(url=LOADING)\n msg = await ctx.send(embed=embed)\n friendreq = \"https://xbl.io/api/v2/friends\"\n for host in tokens:\n purgelist = []\n key = host[0]\n gt = host[1]\n embed = discord.Embed(\n description=f\"Gathering data for {gt}...\"\n )\n embed.set_thumbnail(url=LOADING)\n await msg.edit(embed=embed)\n data, status = await self.apicall(friendreq, key)\n if status == 200:\n embed = discord.Embed(\n description=f\"Pruning players from {gt}...\"\n )\n embed.set_footer(text=\"This may take a while, sit back and relax.\")\n embed.set_thumbnail(url=LOADING)\n await msg.edit(embed=embed)\n async with ctx.typing():\n for friend in data[\"people\"]:\n xuid = friend[\"xuid\"]\n playertag = friend[\"gamertag\"]\n if xuid not in playerdb:\n purgelist.append((xuid, playertag))\n trash = len(purgelist)\n cur_member = 1\n for xuid in purgelist:\n status, remaining = await self._purgewipe(xuid[0], key)\n if int(remaining) < 30:\n await ctx.send(f\"`{gt}` low on remaining API calls `(30)`. Skipping for now.\")\n break\n elif int(status) != 200:\n await msg.edit(f\"`{gt}` failed to unfriend `{xuid[1]}`.\")\n continue\n else:\n embed = discord.Embed(\n description=f\"Pruning `{xuid[1]}` from {gt}...\\n\"\n f\"`{cur_member}/{trash}` pruned.\"\n )\n embed.set_footer(text=\"This may take a while, sit back and relax.\")\n embed.set_thumbnail(url=LOADING)\n await msg.edit(embed=embed)\n cur_member += 1\n\n embed = discord.Embed(\n description=f\"Purge Complete\",\n color=discord.Color.green()\n )\n embed.set_thumbnail(url=SUCCESS)\n await msg.edit(embed=embed)", "def remove_player(self, player_shot: Name):\n del self.players[player_shot]\n for name, player in self.players.items():\n player.remove_player(player_shot)", "def remove_all_friends(self): \r\n self.setOfFriends.clear()", "def purge():", "def removeOfferings(self):\n self.clearOfferingRelatedItems()\n self.lbxOfferings.clear()", "def unload():\n player_attributes.unregister_attribute('hostage_rescues')\n player_attributes.unregister_attribute('hostage_stops')\n player_attributes.unregister_attribute('hostage_kills')", "def remove_player(self):\n if self.num_player > 0:\n self.num_player -= 1\n self.available_place += 1\n self.update_full_status()\n self.save()", "def clear_used_players(team_num, in_team_num):\n del all_guard_on_team[:]\n for names in exper_players:\n for key,value in names.items():\n if value in team_num:\n del exper_players[0:3]\n for in_names in non_exper_players:\n for key,value in in_names.items():\n if value in in_team_num:\n del non_exper_players[0:3] \n main_menu()", "def clear_guesses(self):\n for player in self.players:\n player.guess = []", "def delete(self, specter):\n for w in list(self.wallets.keys()):\n wallet = self.wallets[w]\n self.delete_wallet(wallet, specter.bitcoin_datadir, specter.chain)\n delete_folder(self.data_folder)", "def delete(self):\r\n for alternative in self.alternatives:\r\n alternative.delete()\r\n self.reset_winner()\r\n self.redis.srem('experiments', self.name)\r\n self.redis.delete(self.name)\r\n self.increment_version()", "def clean_up_player(self):\n #LOGGER.debug('Clean up player')\n STATUS_PLAYING = boardgame.utils.Room.STATUS_PLAYING\n STATUS_WAITING = boardgame.utils.Room.STATUS_WAITING \n keys = self._player_list.keys()\n for key in keys: \n _player = self._player_list[key]\n if _player:\n current_time = time.time()\n playerstatus = _player.get_player_status()\n delta_time = current_time - playerstatus.active_time\n if (delta_time > MAX_IDLE_TIME_GAME) and (delta_time < MAX_IDLE_TIME_CONNECTION):\n current_room_id = playerstatus.current_room\n if current_room_id:\n room = self._game.get_room(current_room_id)\n if not room:\n LOGGER.error(' '.join(['This player',str(_player),'hold record',current_room_id,'which is not existed.']))\n continue\n if (room.get_playing_status() == STATUS_PLAYING) and (room.get_current_player() == playerstatus.username): \n room.part(_player)\n elif (room.get_playing_status() == STATUS_WAITING) and (playerstatus.status == STATUS_UNREADY):\n room.part(_player) \n if delta_time > MAX_IDLE_TIME_CONNECTION:\n current_room_id = playerstatus.current_room\n if current_room_id:\n room = self._game.get_room(current_room_id)\n room.part(_player) \n del self._player_list[key]\n LOGGER.debug('Player '+ playerstatus.username +' has quit the game.')", "def clear_books(self):\n self.prebuy = self.buybook\n self.presell = self.sellbook\n\n self.sellbook = []\n self.buybook = []", "def deletePlayers():\n with get_cursor() as cursor:\n cursor.execute(\"delete from allplayers\")", "def removefshare(self, protocol, vfs, sharename,\n fpg=None, fstore=None):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a [(player, share_count)] for players in this game with stocks in the given hotel to the number of stocks they have in that hotel
def players_with_stocks(self, hotel): return [(p, p.shares_map[hotel]) for p in self.players if p.has_shares_of(hotel)]
[ "def majority_stockholders(self, hotel):\r\n players_with_stocks = self.players_with_stocks(hotel)\r\n max_stocks = max([s for p, s in players_with_stocks])\r\n return set([p for p, s in players_with_stocks if s == max_stocks])", "def get_sellbacks(self, tile, hotel):\n if self.game_state.board.valid_merge_placement(tile, hotel):\n acquirees = self.game_state.board.acquirees(tile, hotel)\n\n all_sellbacks = \\\n map(list,\n chain(*[combinations(acquirees, c) for c in range(len(acquirees)+1)]))\n names = [p.name for p in self.game_state.players]\n\n return imap(dict, product(*(product([n], all_sellbacks) for n in names)))\n else:\n return [{}]", "def winnings_total(players):\n return sum(map(lambda p: p.winnings, players))", "def minority_stockholders(self, hotel):\r\n not_majority_shareholders = \\\r\n [(p, s) for p, s in self.players_with_stocks(hotel)\r\n if p not in self.majority_stockholders(hotel)]\r\n if len(not_majority_shareholders) == 0:\r\n return set([])\r\n max_stocks = max([s for p, s in not_majority_shareholders])\r\n return set([p for p, s in not_majority_shareholders if s == max_stocks])", "def buy_stock(self, hotel):\r\n stock_price = self.board.stock_price(hotel)\r\n\r\n if stock_price is None:\r\n raise GameStateError(\"Cannot buy a hotel that is not in play\")\r\n\r\n if self.shares_map[hotel] == 0:\r\n raise GameStateError(\"{0} has no shares to buy\".format(hotel))\r\n\r\n if self.current_player.money < stock_price:\r\n raise GameStateError(\"current player can't afford stock for \"+hotel)\r\n\r\n self.shares_map[hotel] -= 1\r\n self.current_player.money -= stock_price\r\n self.current_player.shares_map[hotel] += 1", "def get_players(self):\r\n players = {}\r\n for score in self.get_scores():\r\n name = score.get_player()\r\n try:\r\n players[name] += 1\r\n except KeyError:\r\n players[name] = 1\r\n return players", "def gamerslots():\n connection = connect()\n cursor = connection.cursor()\n sqlquery = \"SELECT * FROM slots;\"\n cursor.execute(sqlquery)\n outcome = cursor.fetchall()\n # If the top two outcome have more than 0 wins AND are equal then reorder them\n # by total wins divided by total games played\n if (outcome[0][2] != 0) and (outcome[0][2] == outcome[1][2]):\n sqlquery = \"SELECT gamer_id, gamer_name, wonplayer, played \" \\\n \"FROM slots ORDER BY (cast(wonplayer AS DECIMAL)/played) DESC;\"\n cursor.execute(sqlquery)\n outcome = cursor.fetchall()\n connection.close()\n\n return outcome", "def playerStandings():\n\n conn = connect()\n cur = conn.cursor()\n\n cur.execute(\"select id, name, wins, (wins+loses) as matches from players order by wins desc\")\n\n return cur.fetchall()\n\n conn.close()", "def sellback(self, name, sell_hotels, initial_state):\r\n player = self.player_with_name(name)\r\n for hotel in sell_hotels:\r\n if player.has_shares_of(hotel):\r\n hotel_price = initial_state.board.stock_price(hotel)\r\n\r\n # TODO: remove this\r\n assert hotel_price is not None\r\n\r\n stocks_amount = player.shares_map[hotel]\r\n player.money += hotel_price * stocks_amount\r\n\r\n self.shares_map[hotel] += stocks_amount\r\n player.remove_all_shares(hotel)", "def analyze_games(game_data, player) -> str:\n equalize_table = {}\n for game_dict in game_data:\n if ignore_game(game_dict):\n continue\n (opening_name, status) = get_game_data(game_dict, player)\n if opening_name not in equalize_table:\n equalize_table[opening_name] = {\n Result.WIN: 0,\n Result.LOSS: 0,\n Result.EQUAL: 0\n }\n equalize_table[opening_name][status] += 1\n return equalize_table_to_str(equalize_table)", "def rank_stock(self):\n stock_list = []\n for player in self.player_list:\n stock_list.extend(player.stock_list)\n\n stock_list.sort(key=lambda stock: stock.transaction_count,\n reverse=True)\n\n for stock in stock_list[:100]:\n if stock.transaction_count == 0:\n continue\n print (\"Stock {0} current price {1}, \"\n \"transaction count {2}\".format(\n stock.identifier,\n stock.sales_price,\n stock.transaction_count))\n\n return stock_list", "def stats_winCount(data):\n\n # Prepare the result data\n result = helper_prepPerPlayerResults(data)\n # Crunch\n for game in data:\n winner = helper_getWinnerOfGame(game)\n result[winner] += 1\n # Print\n helper_print(\"Win counts\", result)", "def nb_ships_on_planets(self, player):\n nb = 0\n for p in self.planets:\n if p.owner is player:\n nb += p.nb_ships\n return nb", "def countgamers():\n connection = connect()\n cursor = connection.cursor()\n sqlquery = \"SELECT COUNT(*) FROM gamer\"\n cursor.execute(sqlquery)\n count = cursor.fetchone()[0]\n connection.close()\n return count", "def nb_ships_in_fleets(self, player):\n nb = 0\n for f in self.fleets:\n if f.owner is player:\n nb += f.nb_ships\n return nb", "def has_shares_of(self, hotel):\r\n return self.shares_map[hotel] > 0", "def payout(self, hotel, price, state):\r\n\r\n def to_current_player(player):\r\n \"\"\" returns the player from this gamestate with player's name \"\"\"\r\n return self.player_with_name(player.name)\r\n\r\n majority_stockholders = \\\r\n [to_current_player(p)\r\n for p in state.majority_stockholders(hotel)]\r\n minority_stockholders = \\\r\n [to_current_player(p)\r\n for p in state.minority_stockholders(hotel)]\r\n majority_payout = MAJORITY_PAYOUT_SCALE * price\r\n minority_payout = MINORITY_PAYOUT_SCALE * price\r\n\r\n if len(majority_stockholders) == 1:\r\n player = majority_stockholders.pop()\r\n player.money += majority_payout\r\n if len(minority_stockholders) == 1:\r\n player = minority_stockholders.pop()\r\n player.money += minority_payout\r\n elif len(minority_stockholders) > 1:\r\n payout = \\\r\n divide_and_round_integers(minority_payout,\r\n len(minority_stockholders))\r\n for player in minority_stockholders:\r\n player.money += payout\r\n else:\r\n payout = \\\r\n divide_and_round_integers(majority_payout + minority_payout,\r\n len(majority_stockholders))\r\n for player in majority_stockholders:\r\n player.money += payout", "def swissPairings():\n pairing_index = 0\n pairs = []\n DB = psycopg2.connect(\"dbname=tournament\")\n c = DB.cursor()\n \n c.execute(\"SELECT playerID from match_record\")\n number_of_pairings = len(c.fetchall())/2\n\n while pairing_index < number_of_pairings:\n c.execute(\"SELECT playerID, name from match_record ORDER BY wins DESC LIMIT 2 OFFSET %s\", (pairing_index * 2,))\n current_list = c.fetchall()\n new_tuple = current_list[0] + current_list[1]\n pairs.append(new_tuple)\n pairing_index = pairing_index + 1\n \n return pairs", "def getPlayerOMW(standings, playerId):\n c = connect()\n cur = c.cursor()\n\n played_against = []\n cur.execute(\"SELECT played_against FROM matchinfo WHERE player_id = %s;\", (playerId,))\n for row in cur:\n played_against.append(row)\n closeConnection(c,cur)\n \n sum_score = 0\n for opponent in played_against:\n sum_score = sum_score + getPlayerScore(standings, playerId)\n\n return sum_score" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the set of the players in this game with the most stocks in the given hotel
def majority_stockholders(self, hotel): players_with_stocks = self.players_with_stocks(hotel) max_stocks = max([s for p, s in players_with_stocks]) return set([p for p, s in players_with_stocks if s == max_stocks])
[ "def minority_stockholders(self, hotel):\r\n not_majority_shareholders = \\\r\n [(p, s) for p, s in self.players_with_stocks(hotel)\r\n if p not in self.majority_stockholders(hotel)]\r\n if len(not_majority_shareholders) == 0:\r\n return set([])\r\n max_stocks = max([s for p, s in not_majority_shareholders])\r\n return set([p for p, s in not_majority_shareholders if s == max_stocks])", "def players_with_stocks(self, hotel):\r\n return [(p, p.shares_map[hotel])\r\n for p in self.players if p.has_shares_of(hotel)]", "def get_sellbacks(self, tile, hotel):\n if self.game_state.board.valid_merge_placement(tile, hotel):\n acquirees = self.game_state.board.acquirees(tile, hotel)\n\n all_sellbacks = \\\n map(list,\n chain(*[combinations(acquirees, c) for c in range(len(acquirees)+1)]))\n names = [p.name for p in self.game_state.players]\n\n return imap(dict, product(*(product([n], all_sellbacks) for n in names)))\n else:\n return [{}]", "def select_strongest_using_pool(self, pool):\n\n scores = []\n\n for i,monkey in enumerate(self.monkeys):\n d = {}\n for stocksymbol, df in pool.items():\n gain = monkey.trade(df)\n d[stocksymbol] = gain\n \n d[\"aggregate\"] = sum(d.values())/len(d)\n\n scores.append(d)\n\n scores = [(i,val) for i,val in enumerate(scores)]\n scores.sort(key=lambda x:-x[1][\"aggregate\"])\n\n strongest = [i for i,val in scores[:self.num_keep]]\n strongest = [self.monkeys[i] for i in strongest]\n\n scores = [val[\"aggregate\"] for i,val in scores]\n \n return strongest, {\"high\": max(scores), \"average\": sum(scores)/len(scores), \"low\": min(scores)}", "def getTopGenres(self):\r\n\r\n genres = []\r\n for artist in (self.taShort + self.taMed + self.taLong):\r\n genres.extend(artist[\"genres\"])\r\n\r\n topGenreCount = Counter(genres).most_common()\r\n\r\n topGenres = []\r\n\r\n for genre in topGenreCount:\r\n topGenres.append(genre)\r\n\r\n return topGenres", "def _get_top_games(self):\n _top_games = dict()\n for entry in self._client.games.get_top():\n _top_games[int(entry['game']['id'])] = entry['game']['name']\n logging.debug('>> Found the following games: ' + ', '.join(_top_games.values()))\n return _top_games", "def most_popular_genre(self):\r\n\r\n my_dictionary = {\"Folk\": 0, \"Rock\": 0, \"Pop\": 0}\r\n # key => value\r\n i = 1\r\n for melody in self.__melodies:\r\n if melody.get_genre() in my_dictionary:\r\n my_dictionary[melody.get_genre()] = my_dictionary[melody.get_genre()] + 1\r\n elif not (melody.get_genre() in my_dictionary):\r\n my_dictionary.update({melody.get_genre(): i})\r\n max = 0\r\n get_max_genre = \"\"\r\n\r\n for x in my_dictionary.keys():\r\n if my_dictionary[x] > max:\r\n max = my_dictionary[x]\r\n get_max_genre = x\r\n return get_max_genre", "def strongest_planets(self, owner, count=1):\n planets = self.find_planets(owner=owner)\n if count > 0:\n sorted_planets = sorted(planets, key=lambda p : p.ships_available, reverse=True)\n if count >= len(planets):\n return sorted_planets\n return sorted_planets[:count]\n return []", "def retrieve_by_popularity(quota, cand_list, popularity_dict):\n result_list = [] # return1\n \n # Step 1: Descendingly sort the songs by popularity.\n item_list = [i for (i,_) in Counter(popularity_dict).most_common()]\n\n # Step 2: Retrieve the top-k most popular items.\n for item in item_list:\n if len(result_list) >= quota:\n break\n if item in cand_list:\n result_list.append(item)\n \n return result_list", "def top_genre_across(self) -> None:\n genres = {}\n for book in self.books:\n for genre in book.genres.values():\n cur_value = genres.get(genre.name)\n if cur_value is None:\n cur_value = 0\n genres[genre.name] = cur_value + genre.get_score(genre)\n\n max_genre = None\n max_genre_score = -1e10\n for genre in genres.keys():\n print(f\"Genre: {genre} has the score: {genres[genre]}\")\n if genres[genre] > max_genre_score:\n max_genre = genre\n max_genre_score = genres[genre]\n\n print(f\"The {max_genre} has highest score of {max_genre_score}\")", "def most(L):\n return max(set(L),key = L.count)", "def get_popular_tickets(tickets):\r\n popular_tickets = []\r\n #\r\n # TODO - your code here\r\n # \r\n for ticket in tickets:\r\n if len(ticket['people']['watchers']) >= 8:\r\n popular_tickets.append(ticket)\r\n return popular_tickets", "def employees_with_most_sales():\n\n print('--- Show employees with most items sold ---\\n')\n\n employees = get_employees()\n sales = get_sales()\n\n products_by_employee = {}\n\n # fill the dictionary with initial values\n for e in employees:\n products_by_employee[e['id']] = 0\n\n # add the number of products sold by each employee\n for s in sales:\n employee_id = s['employee_id']\n products_by_employee[employee_id] += s['num_products']\n\n top_employees = []\n\n # get the top 3\n for i in range(3):\n top_employee = {\"employee_id\": -1, \"products\": -1}\n\n for k in products_by_employee:\n total_products = products_by_employee[k]\n # if the current employee has more sales, replace top_employee\n if total_products > top_employee['products']:\n top_employee = {\n \"employee_id\": k,\n \"products\": total_products\n }\n # add to podium\n top_employees.append(top_employee)\n # delete from list to calculate other placees\n del products_by_employee[top_employee['employee_id']]\n\n print('Top 3 employees:')\n place = 0\n for el in top_employees:\n place += 1\n employee = find_by_key(employees, 'id', el['employee_id'])\n print(\"%s) %s %s with %s items sold\" % (\n place,\n employee['name'],\n employee['last_name'],\n el['products']\n ))", "def get_max_houses(self, battery):\n free_space = battery.capacity - battery.get_total_input()\n houses = self.unconnected_houses()\n houses.sort(key=attrgetter('output'))\n max_houses = 0\n if houses:\n for house in houses:\n if (free_space - house.output) >= 0:\n max_houses = max_houses + 1\n free_space = free_space - house.output \n return max_houses", "def winners(ballot):\r\n\r\n candidates ={person:0 for i in range(len(ballot)) for person in ballot[i].split()}\r\n for entry in ballot:\r\n persons = entry.split()\r\n for i in range(len(persons)):\r\n candidates[persons[i]] += len(persons) - i\r\n\r\n topVotes = candidates[max(candidates, key=candidates.get)]\r\n winners = []\r\n for person in candidates:\r\n if candidates[person] == topVotes:\r\n winners.append(person)\r\n\r\n return sorted(winners)", "def get_top_genres(self, songs, limit):\n spotify_api_manager = SpotifyAPIManager()\n genres = {}\n for song in songs:\n genre_lst = spotify_api_manager.get_song_genres(song)\n for genre in genre_lst:\n if genre not in genres:\n genres[genre] = 1\n else:\n genres[genre] += 1\n # get top 5 genres, or less if fewer are returned\n limit = min(5, len(genres.keys()))\n top_genres = dict(sorted(genres.items(), key=operator.itemgetter(1), reverse=True)[:limit])\n return top_genres", "def rank_stock(self):\n stock_list = []\n for player in self.player_list:\n stock_list.extend(player.stock_list)\n\n stock_list.sort(key=lambda stock: stock.transaction_count,\n reverse=True)\n\n for stock in stock_list[:100]:\n if stock.transaction_count == 0:\n continue\n print (\"Stock {0} current price {1}, \"\n \"transaction count {2}\".format(\n stock.identifier,\n stock.sales_price,\n stock.transaction_count))\n\n return stock_list", "def high_price(stock_prices):\n\n highest = stock_prices[0]\n\n for price in stock_prices:\n if price > highest:\n highest = price\n\n return highest", "def get_highest(self):\n max_lst = []\n max_score = 0\n for e, score in self.items():\n if score > max_score:\n max_lst=[e]\n max_score = score\n elif score == max_score:\n max_lst.append(e)\n return max_lst" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns the set of the players in this game with the second most stocks in the hotel
def minority_stockholders(self, hotel): not_majority_shareholders = \ [(p, s) for p, s in self.players_with_stocks(hotel) if p not in self.majority_stockholders(hotel)] if len(not_majority_shareholders) == 0: return set([]) max_stocks = max([s for p, s in not_majority_shareholders]) return set([p for p, s in not_majority_shareholders if s == max_stocks])
[ "def majority_stockholders(self, hotel):\r\n players_with_stocks = self.players_with_stocks(hotel)\r\n max_stocks = max([s for p, s in players_with_stocks])\r\n return set([p for p, s in players_with_stocks if s == max_stocks])", "def get_sellbacks(self, tile, hotel):\n if self.game_state.board.valid_merge_placement(tile, hotel):\n acquirees = self.game_state.board.acquirees(tile, hotel)\n\n all_sellbacks = \\\n map(list,\n chain(*[combinations(acquirees, c) for c in range(len(acquirees)+1)]))\n names = [p.name for p in self.game_state.players]\n\n return imap(dict, product(*(product([n], all_sellbacks) for n in names)))\n else:\n return [{}]", "def players_with_stocks(self, hotel):\r\n return [(p, p.shares_map[hotel])\r\n for p in self.players if p.has_shares_of(hotel)]", "def _get_top_games(self):\n _top_games = dict()\n for entry in self._client.games.get_top():\n _top_games[int(entry['game']['id'])] = entry['game']['name']\n logging.debug('>> Found the following games: ' + ', '.join(_top_games.values()))\n return _top_games", "def getTopGenres(self):\r\n\r\n genres = []\r\n for artist in (self.taShort + self.taMed + self.taLong):\r\n genres.extend(artist[\"genres\"])\r\n\r\n topGenreCount = Counter(genres).most_common()\r\n\r\n topGenres = []\r\n\r\n for genre in topGenreCount:\r\n topGenres.append(genre)\r\n\r\n return topGenres", "def select_strongest_using_pool(self, pool):\n\n scores = []\n\n for i,monkey in enumerate(self.monkeys):\n d = {}\n for stocksymbol, df in pool.items():\n gain = monkey.trade(df)\n d[stocksymbol] = gain\n \n d[\"aggregate\"] = sum(d.values())/len(d)\n\n scores.append(d)\n\n scores = [(i,val) for i,val in enumerate(scores)]\n scores.sort(key=lambda x:-x[1][\"aggregate\"])\n\n strongest = [i for i,val in scores[:self.num_keep]]\n strongest = [self.monkeys[i] for i in strongest]\n\n scores = [val[\"aggregate\"] for i,val in scores]\n \n return strongest, {\"high\": max(scores), \"average\": sum(scores)/len(scores), \"low\": min(scores)}", "def rank_stock(self):\n stock_list = []\n for player in self.player_list:\n stock_list.extend(player.stock_list)\n\n stock_list.sort(key=lambda stock: stock.transaction_count,\n reverse=True)\n\n for stock in stock_list[:100]:\n if stock.transaction_count == 0:\n continue\n print (\"Stock {0} current price {1}, \"\n \"transaction count {2}\".format(\n stock.identifier,\n stock.sales_price,\n stock.transaction_count))\n\n return stock_list", "def get_most_played(filename):\n games = read_data_from_file(filename)\n\n list_of_games = [game.rstrip().split(\"\\t\") for game in games]\n sold_copies = [(float(shooter[1])) for shooter in list_of_games]\n index_top_sold_game = sold_copies.index(max(sold_copies))\n\n return list_of_games[index_top_sold_game][0]", "def high_price(stock_prices):\n\n highest = stock_prices[0]\n\n for price in stock_prices:\n if price > highest:\n highest = price\n\n return highest", "def gamerslots():\n connection = connect()\n cursor = connection.cursor()\n sqlquery = \"SELECT * FROM slots;\"\n cursor.execute(sqlquery)\n outcome = cursor.fetchall()\n # If the top two outcome have more than 0 wins AND are equal then reorder them\n # by total wins divided by total games played\n if (outcome[0][2] != 0) and (outcome[0][2] == outcome[1][2]):\n sqlquery = \"SELECT gamer_id, gamer_name, wonplayer, played \" \\\n \"FROM slots ORDER BY (cast(wonplayer AS DECIMAL)/played) DESC;\"\n cursor.execute(sqlquery)\n outcome = cursor.fetchall()\n connection.close()\n\n return outcome", "def most_popular_genre(self):\r\n\r\n my_dictionary = {\"Folk\": 0, \"Rock\": 0, \"Pop\": 0}\r\n # key => value\r\n i = 1\r\n for melody in self.__melodies:\r\n if melody.get_genre() in my_dictionary:\r\n my_dictionary[melody.get_genre()] = my_dictionary[melody.get_genre()] + 1\r\n elif not (melody.get_genre() in my_dictionary):\r\n my_dictionary.update({melody.get_genre(): i})\r\n max = 0\r\n get_max_genre = \"\"\r\n\r\n for x in my_dictionary.keys():\r\n if my_dictionary[x] > max:\r\n max = my_dictionary[x]\r\n get_max_genre = x\r\n return get_max_genre", "def get_top_genres(self, songs, limit):\n spotify_api_manager = SpotifyAPIManager()\n genres = {}\n for song in songs:\n genre_lst = spotify_api_manager.get_song_genres(song)\n for genre in genre_lst:\n if genre not in genres:\n genres[genre] = 1\n else:\n genres[genre] += 1\n # get top 5 genres, or less if fewer are returned\n limit = min(5, len(genres.keys()))\n top_genres = dict(sorted(genres.items(), key=operator.itemgetter(1), reverse=True)[:limit])\n return top_genres", "def top_genre_across(self) -> None:\n genres = {}\n for book in self.books:\n for genre in book.genres.values():\n cur_value = genres.get(genre.name)\n if cur_value is None:\n cur_value = 0\n genres[genre.name] = cur_value + genre.get_score(genre)\n\n max_genre = None\n max_genre_score = -1e10\n for genre in genres.keys():\n print(f\"Genre: {genre} has the score: {genres[genre]}\")\n if genres[genre] > max_genre_score:\n max_genre = genre\n max_genre_score = genres[genre]\n\n print(f\"The {max_genre} has highest score of {max_genre_score}\")", "def retrieve_by_popularity(quota, cand_list, popularity_dict):\n result_list = [] # return1\n \n # Step 1: Descendingly sort the songs by popularity.\n item_list = [i for (i,_) in Counter(popularity_dict).most_common()]\n\n # Step 2: Retrieve the top-k most popular items.\n for item in item_list:\n if len(result_list) >= quota:\n break\n if item in cand_list:\n result_list.append(item)\n \n return result_list", "def top_match(self):\n\n # If no matches return empty list\n if len([x for x in self.matches().keys()]) == 0:\n return []\n\n # get and sort the list of matches previously used\n mtch_lst = [(k, v) for k, v in self.matches().items()]\n srtd = sorted(mtch_lst, reverse=True, key=lambda x: x[1])\n\n # check if there are any ties\n top_score = srtd[0][1]\n return [x[0] for x in srtd if x[1] == top_score]", "def get_top_players(lineup: List[Player], slot: str, n: int) -> List[Player]:\n # Gather players of the desired position\n eligible_players = []\n for player in lineup:\n if slot in player.eligibleSlots:\n eligible_players.append(player)\n\n return sorted(eligible_players, key=lambda x: x.points, reverse=True)[:n]", "def get_top(self, num: int=10) -> List[Tuple[str, int]]:\n self.db.execute(\"SELECT discord_id, score FROM players ORDER BY score DESC LIMIT ?;\", (num,))\n return self.db.fetchall()", "def get_highest(self):\n max_lst = []\n max_score = 0\n for e, score in self.items():\n if score > max_score:\n max_lst=[e]\n max_score = score\n elif score == max_score:\n max_lst.append(e)\n return max_lst", "def max_player_stats(self):\r\n game_players = list(self.players)\r\n play_players = list(self.drives.plays().players())\r\n max_players = OrderedDict()\r\n\r\n # So this is a little tricky. It's possible for a player to have\r\n # only statistics at the play level, and therefore not be represented\r\n # in the game level statistics. Therefore, we initialize our\r\n # max_players with play-by-play stats first. Then go back through\r\n # and combine them with available game statistics.\r\n for pplay in play_players:\r\n newp = nflgame.player.GamePlayerStats(pplay.playerid,\r\n pplay.name, pplay.home,\r\n pplay.team)\r\n maxstats = {}\r\n for stat, val in pplay._stats.iteritems():\r\n maxstats[stat] = val\r\n\r\n newp._overwrite_stats(maxstats)\r\n max_players[pplay.playerid] = newp\r\n\r\n for newp in max_players.itervalues():\r\n for pgame in game_players:\r\n if pgame.playerid != newp.playerid:\r\n continue\r\n\r\n maxstats = {}\r\n for stat, val in pgame._stats.iteritems():\r\n maxstats[stat] = max([val,\r\n newp._stats.get(stat, -_MAX_INT)])\r\n\r\n newp._overwrite_stats(maxstats)\r\n break\r\n return nflgame.seq.GenPlayerStats(max_players)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Places a tile for this game's current player at the given coord and updates a player with their new stock if there's a found. If not possible, raises a GameStateError.
def place_a_tile(self, coord, hotel=None): def _found(): """ This gamestate's current player makes a move to found the given hotel at the given coord, rewarding them with an appropriate amount of shares. """ if hotel in self.board.hotels_in_play: raise GameStateError("tried to found a hotel that's \ already in play" + hotel) else: self.board.found(coord, hotel) # TODO: What to do about the ELSE case here? # Relevant if players keep shares in acquired hotels # # currently is no stock is available # the founding player recieves nothing if self.shares_map[hotel] > FOUND_SHARES: self.current_player.add_shares(hotel, FOUND_SHARES) self.shares_map[hotel] -= FOUND_SHARES move_type = self.board.query(coord) if SINGLETON == move_type: if hotel is not None: raise GameStateError('Placing a singleton can not take a hotel') self.board.singleton(coord) elif FOUND == move_type: if hotel is None: raise GameStateError('found requires a hotel name') _found() elif GROW == move_type: if hotel is not None: raise GameStateError('Placing a grow should not take a hotel') self.board.grow(coord) elif MERGE == move_type: # DOES NOTHING FOR THE PAYOUT if hotel is None: raise GameStateError('merge requires a hotel name') self.board.merge(coord, hotel) elif INVALID == move_type: raise GameStateError("illegal tile placement") self.current_player.tiles.remove(coord)
[ "def place_player(self, gridpos=(0,0)):\n x,y = gridpos\n if x < 0 or x > self.gridsize-1 or y < 0 or y > self.gridsize-1:\n # Restrict movement to within the grid\n return\n tile = self.grid[x][y]\n if tile:\n if type(tile) == Wall:\n # Don't move if the square is a wall\n return\n elif type(tile) == Teleporter:\n State.teleport = tile.destination\n return\n elif type(tile) == Key and tile.visible:\n tile.pick_up()\n elif type(tile) == Door and tile.locked:\n # Door is locked, don't move\n return\n old_x,old_y = State.player.gridpos\n State.player.gridpos = gridpos\n State.player.pos = self.calc_pos(gridpos)\n self.grid[old_x][old_y] = None\n self.grid[x][y] = State.player", "def place_player(self, player):\n # -- get previous position of the item\n pos = np.argwhere(self.matrix == player.item_value)\n if len(pos) != 0:\n # -- delete the item\n pos = pos[0]\n self.matrix[pos[0], pos[1]] = MapItemList.EMPTY\n\n # -- place the item\n self.place(player.box_x, player.box_y, player.item_value)", "def place_piece(board, x, y, player):\n can_place = isfree(board, x, y)\n if can_place:\n board[(x,y)] = player\n return can_place", "def move_entity(self, entity, x, y, is_player = False):\n old_tile = self.tiles[entity.x][entity.y]\n new_tile = self.tiles[x][y]\n \n old_tile.entity = None\n new_tile.entity = entity\n \n entity.x = x\n entity.y = y\n \n if is_player and new_tile.inventory:\n ui.Screens.msg.add_message(\"You see %s on the ground.\" % new_tile.inventory.indef_name)", "def placeSymbol(self,posY, posX, player):\n \"\"\"posX and posY are referenced to the board as carthesian values\"\"\"\n print((posY,posX))\n if posY > 2 or posX > 2 or posX < 0 or posY < 0:\n raise Exception.TTTException(4)\n if type(posX) is not int or type(posY) is not int:\n raise Exception.TTTException(5)\n if self.board[posY][posX] is ' ' and self.play == 1 and self.stack.canInsert(player) is True:\n self.stack.pop()\n li = list(self.board[posY])\n li[posX] = player\n self.board[posY] = li\n self.stack.push(player)\n self.win(posY, posX, player)\n self.printBoard()\n print(self.stack.getStack())\n elif self.play == 0:\n raise Exception.TTTException(2)\n elif self.board[posY][posX] is not ' ':\n raise Exception.TTTException(1)\n else:\n raise Exception.TTTException(3)", "def reserved_move(self, player_name, coord):\n player = self.get_active_player(player_name)\n board = self.get_board()\n if player.get_reserve_pieces() < 1:\n return 'no pieces in reserve'\n board[coord[0]][coord[1]].append(player.get_player_color())\n player.remove_reserve_piece()\n board = self.process_stack(board, coord, player)\n self.set_board(board)\n self.get_next_player(player)\n return 'successfully moved'", "def pickup(self, slot_x):\n player_inventory[self.name] += 1 #add item to inventory\n self.tile_y = 15 #last row reserved for inventory\n self.tile_x = slot_x #chosen inventory slot\n self.y = self.tile_y * TILESIZE #set new y position on screen\n self.x = self.tile_x * TILESIZE #set new x position on screen", "def spawn(self, tile_y, tile_x):\n self.tile_y = tile_y #y position on grid\n self.tile_x = tile_x #x position on grid\n self.level.frame[self.tile_y][self.tile_x] == 'O' #token for presence of spawned sprite\n self.y = self.tile_y * TILESIZE #y position on screen\n self.x = self.tile_x * TILESIZE #x position on screen", "def _update_location(self, take: int, loc, id: int = None):\n\n if self.wrap:\n x_l, x_h = loc[0]-self.play_window, loc[0]+self.play_window\n y_l, y_h = loc[1]-self.play_window, loc[1]+self.play_window\n else:\n x_l, x_h = max(0, loc[0]-self.play_window), min(self.grid_x, loc[0]+self.play_window)\n y_l, y_h = max(0, loc[1]-self.play_window), min(self.grid_y, loc[1]+self.play_window)\n\n if take:\n assert self.grid[loc] == 0 \n # We're new at this location, we have to \n # 1. Register ourselves with everyone around this location\n for x in range(x_l, x_h+1):\n for y in range(y_l, y_h+1):\n x_mod, y_mod = x % self.grid_x, y % self.grid_y\n\n if (x_mod,y_mod) != loc and self.grid[x_mod,y_mod] != 0:\n # Found a match-up\n self.match_ups[int(self.grid[x_mod,y_mod])].append(id)\n self.match_ups[id].append(int(self.grid[x_mod,y_mod]))\n\n # TODO append to play_neighbourhood\n self.players[id-1].play_neighbourhood.append(int(self.grid[x_mod,y_mod]))\n self.players[int(self.grid[x_mod,y_mod])-1].play_neighbourhood.append(id)\n\n # 2. Update the grid state\n self.grid[loc] = id\n else:\n assert self.grid[loc] == id\n # We're leaving this location, we have to \n # 1. We remove ourselves from this location\n for x in range(x_l, x_h+1):\n for y in range(y_l, y_h+1):\n x_mod, y_mod = x % self.grid_x, y % self.grid_y\n if (x_mod,y_mod) != loc and self.grid[x_mod,y_mod] != 0:\n # Found a match-up\n self.match_ups[int(self.grid[x_mod,y_mod])] = list(filter(lambda x: x != id, self.match_ups[int(self.grid[x_mod,y_mod])]))\n\n # TODO Remove ourselves from other players\n self.players[int(self.grid[x_mod,y_mod])-1].play_neighbourhood = list(filter(lambda x: x != id, self.players[int(self.grid[x_mod,y_mod])-1].play_neighbourhood))\n\n # TODO Reset neighbour list\n self.players[id-1].play_neighbourhood = []\n\n self.match_ups[id] = [] # Can reset ourselves\n\n # 2. Update the grid state\n self.grid[self.players[id-1].loc] = 0", "def tradeTiles(self):\n self.passCounter=0\n self.tilePile+=self.placedTiles\n self.placedTiles=[]\n while(\"\" in self.playerTiles[self.currentPlayer]):\n index=self.playerTiles[self.currentPlayer].index(\"\")\n self.playerTiles[self.currentPlayer].pop(index)\n self.getSevenTiles(self.currentPlayer)\n self.trade=not self.trade\n self.coverTiles=True\n self.messageBox=True\n tkMessageBox.showinfo(title=\"Turn Change\",\n message=\"Player %d are you ready?\"\n %(((self.currentPlayer+1)%len(\n self.playerTiles))+1))\n self.currentPlayer=((self.currentPlayer+1)%len(self.playerTiles))\n self.coverTiles=False\n self.messageBox=False\n self.selectedLetter=None\n self.passCounter=0", "def set_spawn(game_map: GameMap, coord: Coordinate):\n if not((0 <= coord[0] < game_map.size[0]) and (0 <= coord[1] < game_map.size[1])):\n return\n faction = FACTIONS.get(coord, None)\n if faction is None:\n return\n prev_spawn = SPAWN_POINTS.get(faction, None)\n SPAWN_POINTS[faction] = coord\n render_pixel(game_map, coord)\n if not prev_spawn is None:\n render_pixel(game_map, prev_spawn)", "def _give_player_tile(self, player, tile):\r\n player.tiles.add(tile)\r\n self.tile_deck.remove(tile)", "def set_tile(self, x, y, tile):\n self.tiles[(x, y)] = tile", "def place_ship(self,row,column):\n if self.board[row][column] == \"S\":\n return \"ship already present\"\n else:\n self.board[row][column] = \"S\"", "def test_place_tile():\r\n gc = GameController()\r\n board = Board(600, 600, 8, gc, WHITE, BLACK)\r\n board.place_tile(0, 0, board.WHITE)\r\n assert board.tiles[0][0] is not None\r\n assert board.tiles[0][0].color == board.WHITE\r\n assert board.tiles[0][0].x == board.BOX_SIZE//2\r\n assert board.tiles[0][0].y == board.BOX_SIZE//2\r\n\r\n board.place_tile(0, 1, board.BLACK)\r\n assert board.tiles[0][1].color == board.BLACK\r\n assert board.tiles[0][1].x == board.BOX_SIZE//2 + board.BOX_SIZE\r\n assert board.tiles[0][1].y == board.BOX_SIZE//2\r\n\r\n board.place_tile(0, 0, board.BLACK)\r\n assert board.tiles[0][1].color == board.BLACK", "def place(state: State, n_player: int, shape: str, col: str) -> int:\n if state.players[n_player].quota[shape] == 0:\n return -1\n\n for row in range(state.board.row - 1, -1, -1):\n if state.board[row, col].shape == ShapeConstant.BLANK:\n piece = Piece(shape, GameConstant.PLAYER_COLOR[n_player])\n state.board.set_piece(row, col, piece)\n state.players[n_player].quota[shape] -= 1\n return row\n\n return -1", "def add_tile(self, tile):\n x = tile.x\n y = tile.y\n if self.tile_exists(x, y):\n raise ValueError('A tile already exists at ({}, {})'.format(x, y))\n\n self.world[(x, y)] = tile", "def place(self):\n print('Its ' + self.identify_piece(self.game.turn) + ' player\\'s turn to play')\n while True:\n position = self.input_number('Choose a spot to place: ') - 1\n\n result = self.game.can_place_piece(self.game.turn, position)\n if result == Game.CanPlaceResults.Ok:\n self.game.place_piece(self.game.turn, position)\n player = self.game.get_player_from_piece(self.game.turn)\n player.previous_move[1] = position\n break\n elif result == Game.CanPlaceResults.Occupied:\n print(\"There is already something at this position.\")\n elif result == Game.CanPlaceResults.WrongPiece:\n print(\"Wrong turn (this shouldn't be possible to happen).\")\n elif result == Game.CanPlaceResults.WrongState:\n print(\"Placing is not allowed at this time (this shouldn't be possible to happen).\")\n return # Safety return here. Wrong state means no placement can happen\n elif result == Game.CanPlaceResults.OutsideBoard:\n print(\"Position is outside the board.\")\n else:\n print(\"Something went wrong.\")", "def add_entity_as_inventory(self, x, y, entity):\n tile = self.tiles[x][y]\n if tile.inventory is None:\n tile.inventory = entity\n entity.owner = map\n entity.x = x\n entity.y = y\n self.entities.append(entity)\n else:\n raise LogicException(\"Entity placed as inventory on a tile with full inventory.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sell all stocks from given player back to the to the pool for each of the given hotels.
def sellback(self, name, sell_hotels, initial_state): player = self.player_with_name(name) for hotel in sell_hotels: if player.has_shares_of(hotel): hotel_price = initial_state.board.stock_price(hotel) # TODO: remove this assert hotel_price is not None stocks_amount = player.shares_map[hotel] player.money += hotel_price * stocks_amount self.shares_map[hotel] += stocks_amount player.remove_all_shares(hotel)
[ "def get_sellbacks(self, tile, hotel):\n if self.game_state.board.valid_merge_placement(tile, hotel):\n acquirees = self.game_state.board.acquirees(tile, hotel)\n\n all_sellbacks = \\\n map(list,\n chain(*[combinations(acquirees, c) for c in range(len(acquirees)+1)]))\n names = [p.name for p in self.game_state.players]\n\n return imap(dict, product(*(product([n], all_sellbacks) for n in names)))\n else:\n return [{}]", "def _sell_everything(self):\n\n for ticker, asset in self._assets.items():\n self.buy_asset(ticker, - asset.quantity)", "def buy_stock(self, hotel):\r\n stock_price = self.board.stock_price(hotel)\r\n\r\n if stock_price is None:\r\n raise GameStateError(\"Cannot buy a hotel that is not in play\")\r\n\r\n if self.shares_map[hotel] == 0:\r\n raise GameStateError(\"{0} has no shares to buy\".format(hotel))\r\n\r\n if self.current_player.money < stock_price:\r\n raise GameStateError(\"current player can't afford stock for \"+hotel)\r\n\r\n self.shares_map[hotel] -= 1\r\n self.current_player.money -= stock_price\r\n self.current_player.shares_map[hotel] += 1", "def payout(self, hotel, price, state):\r\n\r\n def to_current_player(player):\r\n \"\"\" returns the player from this gamestate with player's name \"\"\"\r\n return self.player_with_name(player.name)\r\n\r\n majority_stockholders = \\\r\n [to_current_player(p)\r\n for p in state.majority_stockholders(hotel)]\r\n minority_stockholders = \\\r\n [to_current_player(p)\r\n for p in state.minority_stockholders(hotel)]\r\n majority_payout = MAJORITY_PAYOUT_SCALE * price\r\n minority_payout = MINORITY_PAYOUT_SCALE * price\r\n\r\n if len(majority_stockholders) == 1:\r\n player = majority_stockholders.pop()\r\n player.money += majority_payout\r\n if len(minority_stockholders) == 1:\r\n player = minority_stockholders.pop()\r\n player.money += minority_payout\r\n elif len(minority_stockholders) > 1:\r\n payout = \\\r\n divide_and_round_integers(minority_payout,\r\n len(minority_stockholders))\r\n for player in minority_stockholders:\r\n player.money += payout\r\n else:\r\n payout = \\\r\n divide_and_round_integers(majority_payout + minority_payout,\r\n len(majority_stockholders))\r\n for player in majority_stockholders:\r\n player.money += payout", "def players_with_stocks(self, hotel):\r\n return [(p, p.shares_map[hotel])\r\n for p in self.players if p.has_shares_of(hotel)]", "def buySoldiers(self):\n\n num=self.getNumSoldiers()\n\n if num>0:\n ans=self._player.buySoldiers(num, self._city_obj)\n print ans", "def cut_losses(self, prices):\n # Price and inventory structure for reference\n # Prices = {product: (prices, amounts)}\n # Inventory = {product: (amount, cost)}\n # Determine which product to dump if it is in excess.\n excess_product = self.any_excess(set(prices.keys()))\n if excess_product:\n to_sell = excess_product\n sell_num = self.excess_stock(excess_product)\n\n # If there is no excess product, identify minimum asset loss\n else:\n final_assets = -math.inf\n to_sell = None\n sell_num = 0\n for product, info in self.inventory.items():\n # Calculate the number of the currently assessed product required to\n # offset the negative gold cost\n tmp_num = -int(self.gold // prices[product][0])\n # Only consider the items in inventory that can fully amortise the negative gold\n if info[0] >= tmp_num:\n # Assess the situation by creating a faux inventory for analysis\n tmp_inv = copy.deepcopy(self.inventory)\n tmp_inv, _ = self.update_inv_gold(prices, tmp_inv, product, tmp_num, gold=0, action=1)\n tmp_assets = sum([cost for amt, cost in tmp_inv.values()])\n if tmp_assets >= final_assets:\n final_assets = tmp_assets\n to_sell = product\n sell_num = tmp_num\n\n # If the player does not have enough in his inventory, he will decide to dump the most expensive of\n # any one of the player's inventory.\n if to_sell is None:\n to_sell = max(self.inventory, key=lambda x: self.inventory[x][0] * prices[x][0])\n sell_num = self.inventory[to_sell][0]\n\n # Return the command tuple for the stategy output\n return Command.SELL, (to_sell, sell_num)", "def match_orders(self, agents_dict):\n prices = []\n buy = self.buybook.copy()\n for order_buy in buy:\n price_buy = order_buy[0]\n time_buy = order_buy[1]\n quantity_buy = order_buy[2]\n buy_id = order_buy[3]\n agent_buy = agents_dict[buy_id]\n order_type_buy = order_buy[4]\n day = order_buy[5]\n\n print('BUY ORDER:', order_buy)\n\n len_sellbook = len(self.sellbook)\n if len_sellbook == 0:\n break\n\n sell = self.sellbook.copy()\n remaining_stocks = quantity_buy\n for order_sell in sell:\n\n print('first87', remaining_stocks)\n if remaining_stocks == 0:\n continue\n\n price_sell = order_sell[0]\n time_sell = order_sell[1]\n quantity_sell = order_sell[2]\n sell_id = order_sell[3]\n agent_sell = agents_dict[sell_id]\n order_type_sell = order_sell[4]\n print('SELL ORDER:', order_sell)\n Q_mod = '*'\n if sell_id == buy_id:\n print('Cannot match orders from the same agent')\n continue\n\n if quantity_sell == 0:\n print('Skipped due to q=0')\n continue\n\n if order_type_buy == 'L':\n print('in l1')\n \"\"\"\n For now limit orders only match at equal price and also matched with market orders on the sell side\n \"\"\"\n if order_type_sell == 'L':\n print('in l12')\n if price_sell == price_buy:\n\n if remaining_stocks <= quantity_sell:\n quantity = remaining_stocks\n agent_sell.record(direction=\"SELL\", price=price_sell, quantity=quantity)\n agent_buy.record(direction=\"BUY\", price=price_sell, quantity=quantity)\n prices.append(price_sell)\n print(\"DEAL1\", 'buyid:', id(agent_buy), 'sellid:', id(agent_sell), price_sell)\n remaining_stocks = 0\n Q_mod = quantity\n self.change_q_in_order(id=sell_id, delta_q=Q_mod, book=\"SELL\")\n row = [day, datetime.datetime.now().timestamp(), price_sell, quantity,\n id(agent_buy), id(agent_sell),\n agent_buy.type, agent_sell.type,\n order_type_buy, order_type_sell]\n self.add_to_db(row)\n\n elif remaining_stocks > quantity_sell:\n quantity = quantity_sell\n agent_sell.record(direction=\"SELL\", price=price_sell, quantity=quantity)\n agent_buy.record(direction=\"BUY\", price=price_sell, quantity=quantity)\n prices.append(price_sell)\n remaining_stocks = remaining_stocks - quantity\n Q_mod = quantity\n self.change_q_in_order(id=sell_id, delta_q=Q_mod, book=\"SELL\")\n\n print(\"DEAL2\", 'buyid:', id(agent_buy), 'sellid:', id(agent_sell), price_sell)\n row = [day, datetime.datetime.now().timestamp(), price_sell, quantity,\n id(agent_buy), id(agent_sell),\n agent_buy.type, agent_sell.type,\n order_type_buy, order_type_sell]\n self.add_to_db(row)\n\n elif price_sell < price_buy:\n # TODO: quantity should be adjusted since the price is changed\n if remaining_stocks <= quantity_sell:\n quantity = remaining_stocks\n agent_sell.record(direction=\"SELL\", price=price_sell, quantity=quantity)\n agent_buy.record(direction=\"BUY\", price=price_sell, quantity=quantity)\n prices.append(price_sell)\n\n remaining_stocks = 0\n Q_mod = quantity\n self.change_q_in_order(id=sell_id, delta_q=Q_mod, book=\"SELL\")\n print(\"DEAL3\", 'buyid:', id(agent_buy), 'sellid:', id(agent_sell), price_sell)\n row = [day, datetime.datetime.now().timestamp(), price_sell, quantity,\n id(agent_buy), id(agent_sell),\n agent_buy.type, agent_sell.type,\n order_type_buy, order_type_sell]\n self.add_to_db(row)\n\n elif remaining_stocks > quantity_sell:\n quantity = quantity_sell\n agent_sell.record(direction=\"SELL\", price=price_sell, quantity=quantity)\n agent_buy.record(direction=\"BUY\", price=price_sell, quantity=quantity)\n prices.append(price_sell)\n remaining_stocks = remaining_stocks - quantity\n Q_mod = quantity\n self.change_q_in_order(id=sell_id, delta_q=Q_mod, book=\"SELL\")\n print('DEAL4', 'buyid:', id(agent_buy), 'sellid:', id(agent_sell), price_sell)\n row = [day, datetime.datetime.now().timestamp(), price_sell, quantity,\n id(agent_buy), id(agent_sell),\n agent_buy.type, agent_sell.type,\n order_type_buy, order_type_sell]\n self.add_to_db(row)\n\n\n elif order_type_sell == 'M':\n print('in M11')\n\n if remaining_stocks <= quantity_sell:\n\n print('in d3')\n quantity = remaining_stocks\n agent_sell.record(direction=\"SELL\", price=price_buy, quantity=quantity)\n agent_buy.record(direction=\"BUY\", price=price_buy, quantity=quantity)\n prices.append(price_buy)\n remaining_stocks = 0\n Q_mod = quantity\n self.change_q_in_order(id=sell_id, delta_q=Q_mod, book=\"SELL\")\n print(\"DEAL5\", 'buyid:', id(agent_buy), 'sellid:', id(agent_sell), price_buy)\n row = [day, datetime.datetime.now().timestamp(), price_buy, quantity,\n id(agent_buy), id(agent_sell),\n agent_buy.type, agent_sell.type,\n order_type_buy, order_type_sell]\n self.add_to_db(row)\n\n\n elif remaining_stocks > quantity_sell:\n print('in d4')\n quantity = quantity_sell\n agent_sell.record(direction=\"SELL\", price=price_buy, quantity=quantity)\n agent_buy.record(direction=\"BUY\", price=price_buy, quantity=quantity)\n prices.append(price_buy)\n remaining_stocks = remaining_stocks - quantity\n print('DEAL6', 'buyid:', id(agent_buy), 'sellid:', id(agent_sell), price_buy)\n Q_mod = quantity\n self.change_q_in_order(id=sell_id, delta_q=Q_mod, book=\"SELL\")\n row = [day, datetime.datetime.now().timestamp(), price_buy, quantity,\n id(agent_buy), id(agent_sell),\n agent_buy.type, agent_sell.type,\n order_type_buy, order_type_sell]\n self.add_to_db(row)\n else:\n print(order_type_buy, order_type_sell, 'skipped L1')\n\n elif order_type_buy == 'M':\n print(agent_buy.money)\n if order_type_sell == 'L':\n print('in m2')\n \"\"\"\n For market orders any sell order is applicable\n \"\"\"\n if agent_buy.money >= price_sell * quantity_sell:\n quantity = quantity_sell\n agent_sell.record(direction=\"SELL\", price=price_sell, quantity=quantity)\n agent_buy.record(direction=\"BUY\", price=price_sell, quantity=quantity)\n prices.append(price_sell)\n Q_mod = quantity\n self.change_q_in_order(id=sell_id, delta_q=Q_mod, book=\"SELL\")\n print('DEAL7', 'buyid:', id(agent_buy), 'sellid:', id(agent_sell), price_sell)\n row = [day, datetime.datetime.now().timestamp(), price_sell, quantity,\n id(agent_buy), id(agent_sell),\n agent_buy.type, agent_sell.type,\n order_type_buy, order_type_sell]\n self.add_to_db(row)\n\n elif agent_buy.money <= price_sell * quantity_sell:\n quantity = int(agent_buy.money / price_sell)\n if quantity > 0:\n agent_sell.record(direction=\"SELL\", price=price_sell, quantity=quantity)\n agent_buy.record(direction=\"BUY\", price=price_sell, quantity=quantity)\n prices.append(price_sell)\n Q_mod = quantity\n self.change_q_in_order(id=sell_id, delta_q=Q_mod, book=\"SELL\")\n print('DEAL8', 'buyid:', id(agent_buy), 'sellid:', id(agent_sell), price_sell)\n row = [day, datetime.datetime.now().timestamp(), price_sell, quantity,\n id(agent_buy), id(agent_sell),\n agent_buy.type, agent_sell.type,\n order_type_buy, order_type_sell]\n self.add_to_db(row)\n\n elif order_type_sell == 'M':\n pass\n\n else:\n print('skipped m2')\n else:\n print('skipped everything: order_type is not in correct format',\n order_type_buy, order_type_sell)\n print('REMAINS TO BUY', remaining_stocks)\n # print(Q_mod)\n\n \"\"\"\n Modifying sellers quantity to avoid double selling\n Removing orders with quantity equal to zero\n \"\"\"\n # if Q_mod != '*':\n # self.change_q_in_order(id=sell_id, delta_q=Q_mod, book=\"SELL\")\n self.remove_zero_q_orders()\n # print(self.buybook)\n # print(self.sellbook)\n\n self.preprices = prices", "def merge_payout(self, tile, maybeHotel, initial_state, end_of_game=False):\r\n if not initial_state.board.valid_merge_placement(tile, maybeHotel):\r\n return\r\n\r\n acquirer = maybeHotel\r\n acquirees = initial_state.board.acquirees(tile, acquirer)\r\n\r\n for acquiree in acquirees:\r\n\r\n stock_price = initial_state.board.stock_price(acquiree)\r\n # TODO: Remove this...\r\n assert stock_price is not None\r\n\r\n self.payout(acquiree, stock_price, initial_state)", "def buy_sell(self, prices):\n # Check if the market sells the target list products\n to_trade = {target for target in self.profit_order if prices.get(target)}\n\n # if the market doesn't sell the target products, function ends\n if not to_trade:\n return None, None\n\n # check if it's the right market to sell\n # While the market is the right one to sell, we must have the an amount in our inventory to sell\n sell_set = {product for product in to_trade\n if prices[product][0] >= self.price_stats[product][1]\n and self.inventory[product][0] > 0}\n\n # The right market to buy MUST have non-zero items to buy\n buy_set = {product for product in to_trade\n if prices[product][0] <= self.price_stats[product][2]\n and prices[product][1] > 0}\n\n \n return buy_set, sell_set", "def dump_stock(self, prices):\n for product in prices.keys():\n # If a particular product in inventory does not meet the goal and occupies inventory space\n # Dump the item. Otherwise, sell any excess stock.\n if 0 < self.inventory[product][0] < self.goal[product]:\n to_dump = self.inventory[product][0]\n else:\n to_dump = self.excess_stock(product)\n\n if to_dump:\n return Command.SELL, (product, to_dump)\n return Command.PASS, None", "def sell(self, bike):\n self.profit += (1.2*(bike.cost)-(bike.cost)) \n self.inventory.remove(bike)", "def check_sell(self, data={}):\r\n \r\n to_sell = []\r\n rank_dict = {}\r\n for my_position in self.position:\r\n \r\n # compare current_price with value\r\n actual_value = my_position['current_price'] * my_position['num_shares']\r\n bought_value = my_position['total_invested']\r\n # check if current price significantly dropped from bought\r\n if bought_value * (1 - self.stop_loss) >= actual_value:\r\n to_sell.append(my_position)\r\n #rank the coin based on distance from bought value to ensure priority over other sell conditions \r\n rank_dict[my_position['code']] = actual_value - bought_value\r\n elif bought_value * self.profit_take <= actual_value:\r\n to_sell.append(my_position)\r\n # rank the coin based on the gain of selling\r\n rank_dict[my_position['code']] = bought_value - actual_value\r\n elif data[my_position[\"code\"]][\"close\"] >= self.statbot.calc_bands(my_position[\"code\"])[1] and self.statbot.get_rsi(my_position[\"code\"]) >= 70:\r\n diff = abs(data[my_position[\"code\"]][\"close\"] - self.statbot.calc_bands(my_position[\"code\"])[1])\r\n to_sell.append(my_position)\r\n #rank the coin based on the score calculated in get score using difference between bands and rsi\r\n rank_dict[my_position['code']] = self.get_score(SELL, self.statbot.get_rsi(my_position['code']), diff)\r\n \r\n for my_position in to_sell:\r\n self.sell(my_position['code'], my_position['current_price'])\r\n \r\n if len(self.selling) != 0:\r\n # sorts buying based on value of rank\r\n self.selling.sort(key = lambda x : rank_dict[x['code']])", "def majority_stockholders(self, hotel):\r\n players_with_stocks = self.players_with_stocks(hotel)\r\n max_stocks = max([s for p, s in players_with_stocks])\r\n return set([p for p, s in players_with_stocks if s == max_stocks])", "def unequip(self,slot_number):\n for n in range(1,10):\n if self.backpack[n] == \"x\":\n self.backpack[n] = self.inventory.pop([slot_number])", "def _UpdateBetsSplitAction(self, current_hand, split_hand):\n for bet in current_hand.bets:\n # Sanity check for wallet to pay.\n if bet.wallet_name not in self.wallets:\n raise PlayerException('Double bet missing wallet named: %s' % bet.wallet_name)\n\n self.wallets[bet.wallet_name].money_units -= bet.money_units\n split_hand.AddBet(bet)", "def remove_all_shares(self, hotel):\r\n self.shares_map[hotel] = 0", "def buy_in(t, amt):\n for player in t.players:\n place_bet(amt, player, t.pot)", "def get_all_buy_opportunities(tickers: List[str]) -> List[BuyOpportunity]:\n with Pool(processes=NUM_WORKERS) as process_pool:\n buy_opportunities: List[BuyOpportunity] = process_pool.map(get_buy_opportunity, tickers)\n \n return buy_opportunities" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this game's current player buys a share of stock in the given hotel
def buy_stock(self, hotel): stock_price = self.board.stock_price(hotel) if stock_price is None: raise GameStateError("Cannot buy a hotel that is not in play") if self.shares_map[hotel] == 0: raise GameStateError("{0} has no shares to buy".format(hotel)) if self.current_player.money < stock_price: raise GameStateError("current player can't afford stock for "+hotel) self.shares_map[hotel] -= 1 self.current_player.money -= stock_price self.current_player.shares_map[hotel] += 1
[ "def sellback(self, name, sell_hotels, initial_state):\r\n player = self.player_with_name(name)\r\n for hotel in sell_hotels:\r\n if player.has_shares_of(hotel):\r\n hotel_price = initial_state.board.stock_price(hotel)\r\n\r\n # TODO: remove this\r\n assert hotel_price is not None\r\n\r\n stocks_amount = player.shares_map[hotel]\r\n player.money += hotel_price * stocks_amount\r\n\r\n self.shares_map[hotel] += stocks_amount\r\n player.remove_all_shares(hotel)", "def players_with_stocks(self, hotel):\r\n return [(p, p.shares_map[hotel])\r\n for p in self.players if p.has_shares_of(hotel)]", "def has_shares_of(self, hotel):\r\n return self.shares_map[hotel] > 0", "def buy(self, date, shares, price):\n # step 1\n fee = self.broker.calcFee(shares, price)\n # step 2\n order_volume = shares * price\n # step 3\n if self.broker.balance < ( order_volume + fee ) :\n # zero transaction\n shares = 0\n fee = 0\n order_volume = shares * price\n # step 4\n self.orderbook.addTransaction(date, 'buy', self.__stock, shares, price, fee)\n self.broker.balance -= order_volume + fee", "def payout(self, hotel, price, state):\r\n\r\n def to_current_player(player):\r\n \"\"\" returns the player from this gamestate with player's name \"\"\"\r\n return self.player_with_name(player.name)\r\n\r\n majority_stockholders = \\\r\n [to_current_player(p)\r\n for p in state.majority_stockholders(hotel)]\r\n minority_stockholders = \\\r\n [to_current_player(p)\r\n for p in state.minority_stockholders(hotel)]\r\n majority_payout = MAJORITY_PAYOUT_SCALE * price\r\n minority_payout = MINORITY_PAYOUT_SCALE * price\r\n\r\n if len(majority_stockholders) == 1:\r\n player = majority_stockholders.pop()\r\n player.money += majority_payout\r\n if len(minority_stockholders) == 1:\r\n player = minority_stockholders.pop()\r\n player.money += minority_payout\r\n elif len(minority_stockholders) > 1:\r\n payout = \\\r\n divide_and_round_integers(minority_payout,\r\n len(minority_stockholders))\r\n for player in minority_stockholders:\r\n player.money += payout\r\n else:\r\n payout = \\\r\n divide_and_round_integers(majority_payout + minority_payout,\r\n len(majority_stockholders))\r\n for player in majority_stockholders:\r\n player.money += payout", "def _buy(self):\r\n self._handleLogs(self.game.buy())\r\n self.redraw()", "async def buy(self, ctx, stock: str, amount: int):\n if not self.trading:\n await ctx.channel.send(embed=self.embed(\"Trading has been disabled currently!\"))\n return\n if ctx.author.id not in self.users:\n await ctx.channel.send(embed=self.embed(\"You need to set your handle using the `+register` command first.\"))\n return\n if amount <= 0:\n await ctx.channel.send(embed=self.embed(\"You must buy atleast 1 stock.\"))\n return\n info = self.db.get_stock(stock)\n rating = await self.cf.get_rating(stock)\n money = self.db.get_balance(ctx.author.id)\n if len(info) == 0:\n await ctx.channel.send(embed=self.embed(\"No stock called '%s' found in database.\" % stock, 0xFF0000))\n return\n market = 0\n owned = 0\n owns = False\n for owner, quantity in info:\n if owner == ctx.author.id:\n owns = True\n owned = quantity\n if owner == -1:\n market = quantity\n if amount > market:\n await ctx.channel.send(embed=self.embed(\"You cannot buy more stocks than avaiable in the market!\"))\n return\n cost = amount * self.stock_value(rating)\n if cost > money:\n await ctx.channel.send(embed=self.embed(\"You do not have enough money to purchase %d stocks!\" % amount))\n return\n self.db.set_balance(ctx.author.id, money - cost)\n if owns:\n self.db.update_holding(ctx.author.id, stock, owned + amount)\n else:\n self.db.create_holding(ctx.author.id, stock, owned + amount)\n self.db.update_market(stock, market - amount)\n\n await ctx.channel.send(\n embed=self.embed(ctx.author.mention + \", Successfully purchased %d stocks of **%s** for **$%.2f!**\"\n \"\\n\\n Your new balance is **$%.2f**.\"\n % (amount, stock, cost, money-cost), 0x00FF00))", "async def buy(self, ctx, *, auction_item: str):\n author = ctx.author\n await self._set_bank(author)\n i = 0;\n items = [item for item in self._shop[\"picitems\"] if item[\"name\"] in self.settings[\"user\"][str(author.id)][\"items\"]]\n for item2 in self._shop[\"picitems\"]:\n if item2[\"name\"].lower() == auction_item.lower():\n for item in items:\n i = i + 1\n if i >= 1:\n await ctx.send(\"You already own a pickaxe, sell your pickaxe and try again :no_entry:\")\n return\n filtered = filter(lambda x: x[\"name\"].lower() == auction_item.lower(), self._auction[\"items\"]) \n filtered = sorted(filtered, key=lambda x: x[\"price\"])\n if not filtered:\n await ctx.send(\"There is no `{}` on the auction house :no_entry:\".format(auction_item.title()))\n return\n server = ctx.guild\n channel = ctx.channel\n author = ctx.author\n \n if server.id not in PagedResultData.paged_results:\n PagedResultData.paged_results[server.id] = dict()\n \n if channel.id not in PagedResultData.paged_results[server.id]:\n PagedResultData.paged_results[server.id][channel.id] = dict()\n \n paged_result = PagedResult(filtered, lambda item: \"\\n**Name:** \" + item[\"name\"] + \"\\n**Price:** \" + str(item[\"price\"]) + \"\\n\" + (\"**Durability:** \" + str(item[\"durability\"]) + \"\\n\" if \"durability\" in item else \"\") + (\"**Amount:** \" + str(item[\"amount\"]) + \"\\n\" if \"amount\" in item else \"**Amount:** 1\"))\n paged_result.list_indexes = True\n paged_result.selectable = True\n \n async def selected(event):\n item = event.entry\n if item not in self._auction[\"items\"]:\n await channel.send(\"That item was recently bought :no_entry:\")\n return\n owner = discord.utils.get(self.bot.get_all_members(), id=int(item[\"ownerid\"]))\n if owner == ctx.message.author:\n await channel.send(\"You can't buy your own items :no_entry:\")\n return\n if item[\"price\"] > self.settings[\"user\"][str(author.id)][\"balance\"]:\n await channel.send(\"You don't have enough money for that item :no_entry:\")\n return\n self._auction[\"items\"].remove(item)\n \n self.settings[\"user\"][str(author.id)][\"balance\"] -= item[\"price\"]\n self.settings[\"user\"][str(owner.id)][\"balance\"] += item[\"price\"]\n \n try:\n if item[\"durability\"]:\n self.settings[\"user\"][str(author.id)][\"pickdur\"] = item[\"durability\"]\n except:\n pass\n \n try:\n if item[\"amount\"]:\n pass\n except:\n item[\"amount\"] = 1\n \n for x in range(0, item[\"amount\"]):\n self.settings[\"user\"][str(author.id)][\"items\"].append(item[\"name\"].title())\n try:\n await channel.send(\"You just bought `{} {}` for **${:,}** :tada:\".format(item[\"amount\"], item[\"name\"], item[\"price\"]))\n except:\n await channel.send(\"You just bought `1 {}` for **${:,}** :tada:\".format(item[\"name\"], item[\"price\"]))\n try:\n await owner.send(\"Your `{}` just got bought on the auction house, it was sold for **${:,}** :tada:\".format(item[\"name\"], item[\"price\"]))\n except:\n pass\n \n dataIO.save_json(self._auction_file, self._auction)\n dataIO.save_json(self.location, self.settings)\n \n paged_result.on_select = selected\n\n message = await channel.send(embed=paged_result.get_current_page_embed())\n\n paged_result.message_id = message.id\n\n PagedResultData.paged_results[server.id][channel.id][author.id] = paged_result", "def wins(self, pot, share=1):\n amt = share*pot.chips\n print(\"+ %d chips\" % amt)\n self.chips += amt", "def buy_in(t, amt):\n for player in t.players:\n place_bet(amt, player, t.pot)", "def execute(self, action, spread, stock1_price, stock2_price, penalty):\n action = Actions(action)\n if action == Actions.BUY:\n self.spread_when_bought = spread\n if self.status == Status.INVESTED_IN_SPREAD:\n first = False\n if(penalty != 1):\n if self.stock1_balance > 0:\n # sell stock 1\n first = True\n self.balance, self.stock1_balance = self.sell(stock1_price, self.stock1_balance)\n elif self.stock2_balance > 0:\n # sell stock 2\n self.balance, self.stock2_balance = self.sell(stock2_price, self.stock2_balance)\n\n self.balance = self.balance*penalty\n\n if first:\n self.balance, self.stock1_balance = self.buy(stock1_price)\n else:\n self.balance, self.stock2_balance = self.buy(stock2_price)\n\n return # Cannot invest if already invested\n\n # Invest in spread\n if spread < 0:\n # buy stock 1\n self.balance, self.stock1_balance = self.buy(stock1_price)\n else:\n # buy stock 2\n self.balance, self.stock2_balance = self.buy(stock2_price)\n\n self.status = Status.INVESTED_IN_SPREAD\n elif action == Actions.SELL:\n if self.status == Status.OUT_OF_SPREAD:\n self.balance = self.balance*penalty\n return # Cannot sell if not invested\n\n if self.stock1_balance > 0:\n # sell stock 1\n self.balance, self.stock1_balance = self.sell(stock1_price, self.stock1_balance)\n elif self.stock2_balance > 0:\n # sell stock 2\n self.balance, self.stock2_balance = self.sell(stock2_price, self.stock2_balance)\n\n self.status = Status.OUT_OF_SPREAD\n elif action == Actions.HOLD:\n \n return", "def check_buy(self, data):\r\n for my_position in self.position:\r\n high_price = data[my_position['code']]['high']\r\n value_that_we_have = my_position['value']\r\n \r\n # if one of our stocks has dropped by 5%, buy more of it in the hopes that it will go up\r\n if value_that_we_have * .95 >= high_price:\r\n self.buy(my_position['code'], high_price, my_position['total_invested'] * 0.025)\r\n \r\n rank_dict = {}\r\n # check for new stock\r\n for key in data:\r\n # if key doesnt exist in position \r\n if not any(key in pos for pos in self.position):\r\n diff = abs(data[key]['close'] - self.statbot.calc_bands(key)[0])\r\n if data[key][\"close\"] < self.statbot.calc_bands(key)[0] and self.statbot.get_rsi(key) <= 30:\r\n #access exchange api to purchase more stock\r\n self.add_buy(key, data[key][\"close\"], self.get_buy_amount())\r\n rank_dict[key] = self.get_score(BUY, self.statbot.get_rsi(key), diff)\r\n\r\n # check if buying any\r\n if len(self.buying) != 0:\r\n # sorts buying based on value of rank\r\n self.buying.sort(key = lambda x : -rank_dict[x['code']])", "def sell(self, beverage_name):\n dict = {}\n for i in self.stock:\n dict[i.name] = i.price\n\n if beverage_name in dict:\n print(\"Here is your \" + beverage_name + \"!\")\n else:\n print(\"Sorry! I do not have \" + beverage_name + \"...\")", "def share(formonline, overseer):", "def _transfer(self, sell_item, buy_item, price_paid):\n # transfer ownership\n seller = sell_item.owner\n sell_item.owner = buy_item.owner\n sell_item.price = price_paid\n # remove items from Market\n self.items_to_sell = [x for x in self.items_to_sell if x is not sell_item]\n self.items_to_buy = [x for x in self.items_to_buy if x is not buy_item]\n return sell_item", "async def sell(self, ctx, item: str, price: int, amount: int=None):\n author = ctx.author\n if amount == None:\n amount = 1\n if amount <= 0:\n await ctx.send(\"You can't sell no items, we're not ebay :no_entry:\")\n return\n if price < 0:\n await ctx.send(\"You can't sell something for less than $0 :no_entry:\")\n return\n await self._set_bank(author)\n item3 = [x.lower() for x in self.settings[\"user\"][str(author.id)][\"items\"]]\n if item3.count(item.lower()) < amount:\n await ctx.send(\"You don't have that amount of `{}` to sell :no_entry:\".format(item))\n return \n if item.lower() in item3:\n auction = {}\n for item2 in self._shop[\"picitems\"]:\n if item.lower() == item2[\"name\"].lower():\n auction[\"durability\"] = self.settings[\"user\"][str(author.id)][\"pickdur\"]\n self.settings[\"user\"][str(author.id)][\"pickdur\"] = None\n for item2 in self._shop[\"items\"] + self._mine[\"items\"]:\n if item.lower() == item2[\"name\"].lower():\n auction[\"durability\"] = None\n auction[\"name\"] = item.title()\n auction[\"ownerid\"] = str(author.id)\n auction[\"price\"] = price\n auction[\"amount\"] = amount\n for x in range(0, amount):\n self.settings[\"user\"][str(author.id)][\"items\"].remove(item.title())\n self._auction[\"items\"].append(auction)\n dataIO.save_json(self._auction_file, self._auction)\n dataIO.save_json(self.location, self.settings)\n await ctx.send(\"Your item has been put on the auction house <:done:403285928233402378>\")\n else:\n await ctx.send(\"You don't own that item :no_entry:\")", "def soldout():", "def release(self):\n\n self.transaction(self.holdingshares, ['Cover', 'Sell'][self.action])\n self.holding = 0\n print \" --- %s: released %s shares at gain of %s ---\" % (self.ticker, self.shares, self.gains)", "def buy_stock(self, stock_symbol, quantity, price):\n picked_stock = self.get_stock(stock_symbol)\n\n timestamp = time.time()\n new_trade = Trade(\n timestamp,\n quantity,\n Trade.BUY_INDICATOR,\n price\n )\n\n picked_stock.record_trade(new_trade)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform a merger payout if the given tile, hotel constitute a merger.
def merge_payout(self, tile, maybeHotel, initial_state, end_of_game=False): if not initial_state.board.valid_merge_placement(tile, maybeHotel): return acquirer = maybeHotel acquirees = initial_state.board.acquirees(tile, acquirer) for acquiree in acquirees: stock_price = initial_state.board.stock_price(acquiree) # TODO: Remove this... assert stock_price is not None self.payout(acquiree, stock_price, initial_state)
[ "def payout(self, hotel, price, state):\r\n\r\n def to_current_player(player):\r\n \"\"\" returns the player from this gamestate with player's name \"\"\"\r\n return self.player_with_name(player.name)\r\n\r\n majority_stockholders = \\\r\n [to_current_player(p)\r\n for p in state.majority_stockholders(hotel)]\r\n minority_stockholders = \\\r\n [to_current_player(p)\r\n for p in state.minority_stockholders(hotel)]\r\n majority_payout = MAJORITY_PAYOUT_SCALE * price\r\n minority_payout = MINORITY_PAYOUT_SCALE * price\r\n\r\n if len(majority_stockholders) == 1:\r\n player = majority_stockholders.pop()\r\n player.money += majority_payout\r\n if len(minority_stockholders) == 1:\r\n player = minority_stockholders.pop()\r\n player.money += minority_payout\r\n elif len(minority_stockholders) > 1:\r\n payout = \\\r\n divide_and_round_integers(minority_payout,\r\n len(minority_stockholders))\r\n for player in minority_stockholders:\r\n player.money += payout\r\n else:\r\n payout = \\\r\n divide_and_round_integers(majority_payout + minority_payout,\r\n len(majority_stockholders))\r\n for player in majority_stockholders:\r\n player.money += payout", "def place_a_tile(self, coord, hotel=None):\r\n def _found():\r\n \"\"\"\r\n This gamestate's current player makes a move to found the given\r\n hotel at the given coord, rewarding them with an appropriate amount\r\n of shares.\r\n\r\n \"\"\"\r\n if hotel in self.board.hotels_in_play:\r\n raise GameStateError(\"tried to found a hotel that's \\\r\n already in play\" + hotel)\r\n else:\r\n self.board.found(coord, hotel)\r\n # TODO: What to do about the ELSE case here?\r\n # Relevant if players keep shares in acquired hotels\r\n #\r\n # currently is no stock is available\r\n # the founding player recieves nothing\r\n if self.shares_map[hotel] > FOUND_SHARES:\r\n self.current_player.add_shares(hotel, FOUND_SHARES)\r\n self.shares_map[hotel] -= FOUND_SHARES\r\n\r\n move_type = self.board.query(coord)\r\n\r\n if SINGLETON == move_type:\r\n if hotel is not None:\r\n raise GameStateError('Placing a singleton can not take a hotel')\r\n self.board.singleton(coord)\r\n elif FOUND == move_type:\r\n if hotel is None:\r\n raise GameStateError('found requires a hotel name')\r\n _found()\r\n elif GROW == move_type:\r\n if hotel is not None:\r\n raise GameStateError('Placing a grow should not take a hotel')\r\n self.board.grow(coord)\r\n elif MERGE == move_type: # DOES NOTHING FOR THE PAYOUT\r\n if hotel is None:\r\n raise GameStateError('merge requires a hotel name')\r\n self.board.merge(coord, hotel)\r\n elif INVALID == move_type:\r\n raise GameStateError(\"illegal tile placement\")\r\n\r\n self.current_player.tiles.remove(coord)", "def is_sink(elevation_map: List[List[int]], cell: List[int]) -> bool:\n\n row = cell[0]\n column = cell[1]\n em_len = len(elevation_map)\n if in_elevation_map(row, column, em_len):\n for i in range(row-1, row+2):\n for j in range(column-1, column+2):\n if in_elevation_map(i, j, em_len) and \\\n elevation_map[i][j] < elevation_map[row][column]:\n return False\n return True\n else:\n return False", "def is_sink(elevation_map: List[List[int]], cell: List[int]) -> bool:\n if cell[0] <= len(elevation_map) - 1 and cell[1] <= len(elevation_map) - 1:\n for i in [-1, 0, 1]:\n for j in [-1, 0, 1]:\n if (cell[0] + i in range(len(elevation_map)) \n and cell[1] + j in range(len(elevation_map)) \n and elevation_map[cell[0] + i][cell[1] + j] <\n elevation_map[cell[0]][cell[1]]):\n return False\n return True\n else:\n return False", "def get_sellbacks(self, tile, hotel):\n if self.game_state.board.valid_merge_placement(tile, hotel):\n acquirees = self.game_state.board.acquirees(tile, hotel)\n\n all_sellbacks = \\\n map(list,\n chain(*[combinations(acquirees, c) for c in range(len(acquirees)+1)]))\n names = [p.name for p in self.game_state.players]\n\n return imap(dict, product(*(product([n], all_sellbacks) for n in names)))\n else:\n return [{}]", "def visit(self, hero):\n if self not in Monster.visited_monsters:\n Monster.visited_monsters.append(self)\n hero.give_gem()\n else:\n print(\"You have already took your gem from this monster, try looking somewhere else.\\n\")", "def take_hit(self, hit):\n\n inner_p = self.master.from_global_to_self(hit.trace.p)\n inner_p = gm.Point2(inner_p.x, inner_p.y)\n inner_trace = hit.trace.copy()\n inner_trace.p = inner_p\n cleaved = False\n if CHOP in hit.features:\n self.body_parts.sort(lambda a, b: a.chop_priority - b.chop_priority)\n else:\n self.body_parts.sort(lambda a, b: a.stab_priority - b.stab_priority)\n for part in self.body_parts:\n in_p = part.shape.intersect(inner_trace)\n if in_p is not None:\n p = self.master.from_self_to_global(part.shape.pc)\n eff.Blood().add_to_surface(p)\n part.collide(hit)\n if CLEAVE not in hit.features:\n break\n cleaved = True\n else:\n if not cleaved:\n return\n if PENETRATE not in hit.features:\n hit.complete()", "def hillclimber(map):\n for i in range(40000):\n \n house1 = map.houses[random.randrange(150)]\n house2 = map.houses[random.randrange(150)]\n\n battery1 = house1.connected\n battery2 = house2.connected\n if battery1 is not None and battery2 is not None:\n if battery1.id == battery2.id:\n pass\n elif battery1.power + house1.output - house2.output < 0:\n pass \n elif battery2.power + house2.output - house1.output < 0:\n pass\n elif (distance(house1, battery2) + distance(house2, battery1)) < (distance(house1, battery1) + distance(house2, battery2)):\n map.swap(house1, house2)\n\n elif battery1 is None:\n check(house1, map.batteries)\n \n else:\n check(house2, map.batteries)", "def _handle_leaving(self) -> None:\n\n for elevator in self.elevators:\n to_remove = []\n for person in elevator.passengers:\n if person.target == elevator.current_floor:\n to_remove.append(True)\n self.stats.people_completed.append(person)\n if self.visualize:\n self.visualizer.show_disembarking(person, elevator)\n else:\n to_remove.append(False)\n new_list = [p for i, p in enumerate(elevator.passengers)\n if not to_remove[i]]\n elevator.passengers = new_list[:]", "def reply_to_auction(self, other, game, auction_price):\n my_neighborhood = True\n for neighborhood in game.neighborhoods.values():\n if game.board[other.location] in neighborhood:\n for street in neighborhood:\n if street.owned_by is not None and street.owned_by != self:\n my_neighborhood = False\n break\n a = random.randint(-game.board[other.location].price / 10,\n game.board[other.location].price / 10)\n if isinstance(game.board[other.location], Street) and my_neighborhood:\n reply = min(auction_price + 5,\n game.board[other.location].rent_h + a, self.cash)\n else:\n reply = min(auction_price + 5,\n game.board[other.location].price + a, self.cash)\n game.cover_n_central(self.name + \" bids \" + str(reply) + \".\")\n return reply", "def can_hike_to(elevation_map: List[List[int]], start: List[int],\n dest: List[int], supplies: int) -> bool:\n while start != dest:\n if start[0] == dest[0]:\n while start[1] != dest[1]:\n supplies = supplies - abs(elevation_map[start[0]][start[1] - 1] \n - elevation_map[start[0]][start[1]]) \n start[1] = start[1] - 1\n \n elif start[1] == dest[1]:\n while start[0] != dest[0]:\n supplies = supplies - abs(elevation_map[start[0] - 1][start[1]] \n - elevation_map[start[0]][start[1]]) \n start[0] = start[0] - 1\n \n else:\n if abs(elevation_map[start[0]][start[1] - 1] \n - elevation_map[start[0]][start[1]]) < abs(elevation_map[\n start[0] - 1][start[1]] - elevation_map[\n start[0]][start[1]]):\n \n supplies = supplies - abs(elevation_map[start[0]][start[1] - 1] \n - elevation_map[start[0]][start[1]])\n start[1] = start[1] - 1\n \n else:\n supplies = supplies - abs(elevation_map[start[0] - 1][start[1]] \n - elevation_map[start[0]][start[1]])\n start[0] = start[0] - 1\n if supplies < 0:\n return False\n return True", "def merge(self, plan):\n # find all pairs of adjacent, same-function rooms keyed by function\n groups = self._group_rooms_by_function(plan)\n pairs = {function:[] for function in plan.functions}\n for function in plan.functions:\n for rid in groups[function]:\n for adjacent_rid in plan.rooms[rid].find_adjacent_rids(plan.grids):\n if adjacent_rid in groups[function]:\n pairs[function].append((rid, adjacent_rid))\n\n # purge the function without adjacent pairs\n # if no function contains adjacent pairs, exit this action.\n pairs = purge_dict(pairs)\n if not pairs: \n if not self.silent:\n print(\"No pairs of adjacent same-function rooms found.\")\n return\n\n # randomly pick a function by weighting. (we may want to using weights\n # to control the chance of merge for different room function)\n # redo picking if the picked function has no adjacent pairs\n picked_function = \"\"\n while picked_function not in pairs.keys():\n picked_function = random_pick(plan.functions, p=plan.pr_merge)\n\n # randomly pick a pair of adjacent rooms\n # create a new room with same function and merged xys\n picked_pair = random_choice(pairs[picked_function])\n room_new = Room(\n function=picked_function, \n rid = plan.room_count,\n xys=plan.rooms[picked_pair[0]].xys + plan.rooms[picked_pair[1]].xys\n )\n plan.room_count += 1\n if not self.silent:\n print(\"Merge rooms %d and %d\" % (plan.rooms[picked_pair[0]].rid,\n plan.rooms[picked_pair[1]].rid))\n\n # update self.rooms\n plan.rooms[picked_pair[0]] = None\n plan.rooms[picked_pair[1]] = None\n plan.rooms.append(room_new)\n if not self.silent:\n print(\"into room %d\" % room_new.rid)\n\n # update the rid of merged xys\n for xy in room_new.xys:\n plan.grids[xy].rid = room_new.rid", "def step(self):\r\n self.__schedule_elevators()\r\n for elevator_id, elevator in enumerate(self.elevators):\r\n elevator.step()\r\n\r\n if elevator.get_current_floor() == elevator.get_target_floor():\r\n #print(\"Elevator : \" + str(elevator_id) + \" Door Opens \")\r\n #print(\"Elevator : \" + str(elevator_id) + \" Door Closes \")\r\n self.elevator_queues[elevator_id].popleft()\r\n self.elevators[elevator_id].set_target_floor(None)", "def recovery(unit, healer_unit):\n\n if healer_unit.__class__.__name__ == 'Healer':\n healer_unit.heal(unit)", "def action(self, colony):\n destination = self.place.exit\n # BEGIN 4.4\n \"*** YOUR CODE HERE ***\"\n # END 4.4\n if self.blocked():\n self.sting(self.place.dragon)\n elif self.armor > 0 and destination is not None:\n self.move_to(destination)", "def run_harvester(creep):\n # check if creep needs filling or not\n checkNeedFilling(creep)\n\n # if creep needs to fill energy, go to source.\n if creep.memory.filling:\n sourceMining(creep)\n\n # if ceep energy is full, decide target and go to work.\n else:\n\n decide = Decide()\n decide.decideWorkTarget(creep)\n\n work2target = Work2target()\n work2target.workingOnTarget(creep)\n\n ######################## In case target is not decided as expected, run this.\n #target = _(creep.room.find(FIND_STRUCTURES)) \\\n # .filter(lambda s: ((s.structureType == STRUCTURE_SPAWN or s.structureType == STRUCTURE_EXTENSION) \\\n # and s.energy < s.energyCapacity) or s.structureType == STRUCTURE_CONTROLLER) \\\n # .sample()\n #\n #creep.memory.target = target.id\n ###################\n\n creep_type = getCreepType(creep)\n #print(\"WORK number = {}\".format(len([s for s in creep_type if s == 'work'])))\n #print(str(creep_type[0]))\n\n\n #Monitoring targeting activities.\n if creep.memory.target:\n target_print = Game.getObjectById(creep.memory.target)\n\n print (\"{}({}) targets name({}) type({}) {}\".format(creep.name,creep_type,target_print.name,target_print.structureType,target_print.id))", "def run_region_merging(self, max_iters=1):\n \n for iteration in tqdm.tqdm(range(max_iters)):\n # Compute the merged regions\n if self.run_merging_pass():\n break\n \n # Update the RAG\n self.make_rag()", "def is_adopter(graph, node):\n return graph.node[node]['adopter'] == 1", "def flee(self, tile):\n\t\tavailable_moves = tile.adjacent_moves()\n\t\tr = random.randint(0, len(available_moves) - 1)\n\t\tself.do_action(available_moves[r])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Payout the merger bonus for the given hotel at the given price
def payout(self, hotel, price, state): def to_current_player(player): """ returns the player from this gamestate with player's name """ return self.player_with_name(player.name) majority_stockholders = \ [to_current_player(p) for p in state.majority_stockholders(hotel)] minority_stockholders = \ [to_current_player(p) for p in state.minority_stockholders(hotel)] majority_payout = MAJORITY_PAYOUT_SCALE * price minority_payout = MINORITY_PAYOUT_SCALE * price if len(majority_stockholders) == 1: player = majority_stockholders.pop() player.money += majority_payout if len(minority_stockholders) == 1: player = minority_stockholders.pop() player.money += minority_payout elif len(minority_stockholders) > 1: payout = \ divide_and_round_integers(minority_payout, len(minority_stockholders)) for player in minority_stockholders: player.money += payout else: payout = \ divide_and_round_integers(majority_payout + minority_payout, len(majority_stockholders)) for player in majority_stockholders: player.money += payout
[ "def pay(self, cost):\n if self.is_affordable(cost):\n self.money -= cost", "def sell_to_bank(self, game):\n owner = self.find_owner(game=game)\n owner.liquid_holdings += self.price * 0.5\n owner.property_holdings.remove(self)\n game.bank.property_holdings.append(self)", "def reduce_price(self, reduction):\r\n self._price = self._price - reduction", "def sell(self, bike):\n self.profit += (1.2*(bike.cost)-(bike.cost)) \n self.inventory.remove(bike)", "def update(self, target):\n change = (self.coeff * (target - self.price) +\n self.momentum * self.last_change)\n self.last_change = change\n \n limiter = self.buyer and min or max\n self.price = int(limiter(self.price + change, self.limit))", "def merge_payout(self, tile, maybeHotel, initial_state, end_of_game=False):\r\n if not initial_state.board.valid_merge_placement(tile, maybeHotel):\r\n return\r\n\r\n acquirer = maybeHotel\r\n acquirees = initial_state.board.acquirees(tile, acquirer)\r\n\r\n for acquiree in acquirees:\r\n\r\n stock_price = initial_state.board.stock_price(acquiree)\r\n # TODO: Remove this...\r\n assert stock_price is not None\r\n\r\n self.payout(acquiree, stock_price, initial_state)", "def sellPrice(self):\n return self.initial_btcprice * (1 + FEE + self.strategy)", "def increase_price(self, ingredient):\n self.total_price += ingredient.price", "def pay_bonus(self):\n if not self.bonus_paid:\n \"\"\"send pay bonus request to toloka\"\"\"\n\n client = TolokaClient(self.sandbox)\n user_id = self.toloka_user_id\n bonus = float(self.owner.payoff_in_real_world_currency())\n # TODO:\n \"\"\"We will customize these messages later \"\"\"\n title = DEFAULT_BONUS_TITLE\n message = DEFAULT_BONUS_MESSAGE\n resp = client.pay_bonus(user_id, bonus, title, message)\n self.bonus_paid = True\n self.save()\n return dict(error=False, **resp)\n else:\n return dict(error=True, errmsg='Bonus already paid')", "def buy(self, bike):\n self.fund -= 1.2*(bike.cost)\n self.garage.append(bike)\n print(\"\\n\" +bike.model)", "def reply_to_auction(self, other, game, auction_price):\n my_neighborhood = True\n for neighborhood in game.neighborhoods.values():\n if game.board[other.location] in neighborhood:\n for street in neighborhood:\n if street.owned_by is not None and street.owned_by != self:\n my_neighborhood = False\n break\n a = random.randint(-game.board[other.location].price / 10,\n game.board[other.location].price / 10)\n if isinstance(game.board[other.location], Street) and my_neighborhood:\n reply = min(auction_price + 5,\n game.board[other.location].rent_h + a, self.cash)\n else:\n reply = min(auction_price + 5,\n game.board[other.location].price + a, self.cash)\n game.cover_n_central(self.name + \" bids \" + str(reply) + \".\")\n return reply", "def update_price(self, company: Company):\n pass", "def pay_bet(self):\n self.wallet -= self.bet\n self.bet = 0", "def update(self, btcprice):\n if btcprice <= self.buyPrice():\n if usd.hasFunds(self.distributedBalance):\n buy(self.distributedBalance, btcprice)\n else:\n self.usd.insufficientFunds()\n for transaction in self.book:\n if btcprice >= transaction.sellPrice():\n print 'Profit: ',\n self.sell(transaction, btcprice)\n if btcprice <= (transaction.initial_btcprice * 0.999):\n print 'Loss: ',\n self.sell(transaction, btcprice)", "def buy_stock(self, hotel):\r\n stock_price = self.board.stock_price(hotel)\r\n\r\n if stock_price is None:\r\n raise GameStateError(\"Cannot buy a hotel that is not in play\")\r\n\r\n if self.shares_map[hotel] == 0:\r\n raise GameStateError(\"{0} has no shares to buy\".format(hotel))\r\n\r\n if self.current_player.money < stock_price:\r\n raise GameStateError(\"current player can't afford stock for \"+hotel)\r\n\r\n self.shares_map[hotel] -= 1\r\n self.current_player.money -= stock_price\r\n self.current_player.shares_map[hotel] += 1", "def pay_gold(self,accessories):\n print(\"GOLD PAID\")", "def BuyOption(self, optionname, ordcode, price, no):\n print(\"buy opttion called\")\n \n self.deposit = self.deposit - price*no*self.unitprice\n \n #기존에 있는 옵션이면 수치 변경 \n optboughtno = len(self.boughtopt) \n optpresence = False\n for i in range(optboughtno):\n if self.boughtopt[i,0]==optionname:\n \n totalprice = pd.to_numeric(self.boughtopt[i,1])*pd.to_numeric(self.boughtopt[i,2]) + pd.to_numeric(price)*pd.to_numeric(no)\n totalno = pd.to_numeric(self.boughtopt[i,2])+pd.to_numeric(no)\n \n self.boughtopt[i,1] = totalprice/totalno\n self.boughtopt[i,2] = totalno\n \n \n optpresence = True\n break \n #기존에 있던 option인가?\n if optpresence == False: \n newopt = np.array([optionname, price, no])\n self.boughtopt = np.vstack((self.boughtopt,newopt))", "def takeFerry(self, party): #Party\n party.money -= self.ferryfee\n return", "def record_buy(self, buy_amount, price):\n\t\tself.asset += buy_amount\n\t\tself.cash -= buy_amount * price" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
end the this game's current player's turn, allocating a tile if possible and moving on to the next player
def done(self, tile): if len(self.tile_deck) > 0: if tile in self.tile_deck: self._give_player_tile(self.current_player, tile) else: raise GameStateError("tile not in deck " + tile) else: raise GameStateError("tile_deck is empty") self.players.rotate(-1)
[ "def end_turn(self):\n for x in self.units.keys():\n x.tick()\n self.side1.tick()\n self.side2.tick()", "def end_turn(self, block):\n for i, j in block.call_shape():\n self._board[i][j] = 2\n self._occupied.append([i, j])\n self.game_over()\n if not self._is_game_over:\n self._on_new_tetrino()", "def endTurn(self):\n self.possible_moves = {}\n if len(self.rolled_numbers) == 0:\n self.backgammon.changeTurn()\n self.dice_frame.makeActive()\n\n self.updateField()", "def end_turn(self):\n for unit in self.me.units:\n unit.reset_movement()\n self.hide_unit_range()\n self.me.collect_from_cities() # maybe this should be here?", "def _endTurn(self):\r\n self._handleLogs(self.game.endTurn())\r\n self.redraw()", "def tradeTiles(self):\n self.passCounter=0\n self.tilePile+=self.placedTiles\n self.placedTiles=[]\n while(\"\" in self.playerTiles[self.currentPlayer]):\n index=self.playerTiles[self.currentPlayer].index(\"\")\n self.playerTiles[self.currentPlayer].pop(index)\n self.getSevenTiles(self.currentPlayer)\n self.trade=not self.trade\n self.coverTiles=True\n self.messageBox=True\n tkMessageBox.showinfo(title=\"Turn Change\",\n message=\"Player %d are you ready?\"\n %(((self.currentPlayer+1)%len(\n self.playerTiles))+1))\n self.currentPlayer=((self.currentPlayer+1)%len(self.playerTiles))\n self.coverTiles=False\n self.messageBox=False\n self.selectedLetter=None\n self.passCounter=0", "def end_transform_to_tile(self):\n\n self.context.restore()", "def endgame(self):\n # TODO Write something for an endgame screen\n pass", "def _give_player_tile(self, player, tile):\r\n player.tiles.add(tile)\r\n self.tile_deck.remove(tile)", "def test_end_turn_wrap_around(self):\n self.game.setCurrentPlayerIndex(len(self.game.getPlayers()) - 1)\n self.game.setMoveIndex(5)\n\n self.game.endTurn()\n\n self.assertEqual(self.game.getCurrentPlayerIndex(), 0)\n self.assertEqual(self.game.getMoveIndex(), 6)", "def end_game(self, game):\r\n\t\tgame.player1.game = game.player2.game = None\r\n\t\tdel self.games[game.id]", "def finish_exercise(self, winner_id):\n pass", "def next_turn(self):\n temp = self.current_player\n self.current_player = self.opponent\n self.opponent = temp", "def end_player_turn(self, player, result, natural=False):\n logger.info('[RESULT] {0}: {1}'.format(player, result))\n bet = self.player_records[player].bet\n if result == 'W':\n if natural:\n bet = bet * 1.5\n self.dealer.pay(player, bet)\n elif result == 'L':\n player.pay(self.dealer, bet)\n\n self.set_player_finished(player, result)", "def move(self, action):\n tile_type, from_pile, to_stack, nbr_to_move = action\n\n # Check for errors\n if self.winner is not None:\n raise Exception(\"Game already won\")\n #elif pile < 0 or pile >= len(self.piles):\n # raise Exception(\"Invalid pile\")\n #elif count < 1 or count > self.piles[pile]:\n # raise Exception(\"Invalid number of objects\")\n\n # get the tiles from the factory\n nbr_tiles, penalty = self.factory.remove_tiles_from_pile(from_pile, tile_type)\n\n if to_stack == penalty_stack_row_idx:\n # these tiles are going straight to penalty\n self.players[self.current_player_idx].add_tiles_to_penalty(nbr_tiles, tile_type)\n else:\n # put the tiles on the floor\n self.players[self.current_player_idx].move_tiles_to_row(nbr_tiles, tile_type, to_stack)\n\n if penalty == 1:\n self.players[self.current_player_idx].add_penalty_tile_to_penalty_stack()\n\n # check if the round is over\n if self.factory.get_tile_count_in_piles() == 0:\n # score this round and setup the next round \n # if the game is over, determine the winner\n if self.process_end_round():\n self.set_winner()\n # the end of round method also sets the next player\n else:\n # check if the player just did something which will end the game soon\n if not self.is_last_round:\n self.is_last_round = self.players[self.current_player_idx].has_a_completed_row()\n # pass the baton to the next player\n self.switch_player()\n\n \n\n # Update pile\n #self.piles[pile] -= count\n #self.switch_player()\n\n # Check for a winner\n #if all(pile == 0 for pile in self.piles):\n # self.winner = self.player", "def stop_game(self):\n\n full_cell = 0\n for i in range(4):\n for j in range(4):\n if (self.grid[i][j] != 0):\n full_cell += 1\n if (full_cell == 16 and not self.possible_action()):\n self.status = False\n return self.status", "def exit_dungeon(self, coords):\n self.current_level.move_player_dungeon(coords)", "def destroy(self):\n self.game_map.lose = True", "def __go_to_exit(self):\r\n\r\n if self.__controller.green_square_found:\r\n self.__controller.forwards()\r\n\r\n if self.__controller.laser_minimum < 0.5:\r\n self.__moving_to_exit = False\r\n\r\n if not self.__controller.green_square_found:\r\n print(\"I FOUND THE EXIT\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
gives the player in this game the tile and removes it from the deck
def _give_player_tile(self, player, tile): player.tiles.add(tile) self.tile_deck.remove(tile)
[ "def remove_player(self, player_shot: Name):\n del self.players[player_shot]\n for name, player in self.players.items():\n player.remove_player(player_shot)", "def remove_player(self):\n if self.num_player > 0:\n self.num_player -= 1\n self.available_place += 1\n self.update_full_status()\n self.save()", "def remove_player(self, player):\n try:\n self.players.remove(player)\n except ValueError:\n pass", "def remove_from_hand(self, card):\n if card and card in self.hand:\n position = self.hand.index(card)\n del self.hand[position]\n return card", "def deal(self):\n \n topCard = self._deck[0]\n self._deck.remove(topCard)\n self._discardPile.append(topCard)\n return topCard", "def remove_player(self, player: \"CardPlayer\"):\n self.log.debug('%r leaving.' % player)\n if player in self.game.players:\n self.game.remove_player(player)\n player.table = None\n elif player in self.waiting:\n self.log.debug('%r stopped waiting.' % player)\n self.waiting.remove(player)\n else:\n self._assert(False, 'CardPlayer not even seated.')\n\n # Check if any CardPlayers are seated, aside from the CardDealer.\n if not self.game.active:\n self.game.cleanup()", "def removePlayer(self, number):\n del(self.players[number])", "def discard(self, card):\n \n self.hand.pop(self.hand.index(card))\n self.cardList.append(card)", "def discard_tile(self, player_id, tile_code):\n tile_states = self.get_state(tile_code)\n if player_id == 0: # discard our tile\n first_in_hand_index = tile_states.index(TileState.InMyHand)\n self.set_tile_status(tile_code, first_in_hand_index, TileState.InMyDiscardTiles)\n else: # other player discarded a tile\n first_unknown_index = tile_states.index(TileState.Unknown)\n self.set_tile_status(tile_code, first_unknown_index, TileState(1 + player_id))", "def choose_card_to_drop(player):\r\n print(\"It's your turn!\")\r\n card_to_drop = int(input(\"Which card do you want to discard? \"))\r\n return player.hand.cards[card_to_drop-1]", "def player_discard(self, cards: dict, direction: str):\n # print(cards)\n # print(self.p1.hand)\n # use a dict e.g. {pX: [4C, 5H, 3C]}\n if direction == \"pass\":\n pass\n for card in cards:\n # print(\"inner loop\")\n self.p1.hand.remove(card)\n if direction == \"left\":\n self.p2.hand.append(card)\n if direction == \"right\":\n self.p4.hand.append(card)\n if direction == \"top\":\n self.p3.hand.append(card)", "def removeSuit(self, suit):\n #self.notify.info(\"Suit planner removing suit %s\" % (suit.doId))\n\n # be sure to clear the zone that the suit is in since it\n # is going to be removed completely\n self.zoneChange( suit, suit.zoneId )\n\n if self.suitList.count( suit ) > 0:\n self.suitList.remove( suit )\n\n if suit.flyInSuit:\n self.numFlyInSuits -= 1\n if suit.buildingSuit:\n self.numBuildingSuits -= 1\n if suit.attemptingTakeover:\n self.numAttemptingTakeover -= 1\n\n assert self.numFlyInSuits + self.numBuildingSuits == len(self.suitList)\n assert self.numAttemptingTakeover == self.countTakeovers()\n\n suit.requestDelete()\n return", "def discardPileShuffle(self):\n self._deck = self._discardPile\n self.shuffle()\n self._discardPile = []", "def remove_piece(self):\n piece = self.piece\n self.piece = None\n return piece", "def __drop_piece(data, row, col, piece):\r\n data.game_board.drop_piece(row, col, piece)\r\n data.turn += 1\r\n data.turn %= 2\r\n if data.game_board.winning_move(piece, row, col):\r\n data.game_over = True\r\n data.winner = piece", "def trade_card_of_hand(current_player_hand, deck, player_choice):\n count_of_cards_in_deck = len(deck) - 1\n random_index = random.randrange(0, count_of_cards_in_deck, 1)\n current_player_hand[player_choice - 1] = deck[random_index]\n deck.remove(deck[random_index])", "def discard():\n player = current_player._get_current_object()\n if not player:\n abort(400)\n\n args = request.get_json()\n card_id = args.get('card', None)\n if card_id is None:\n abort(400)\n\n card = Card.query.get(card_id)\n if card is not None and card in player.cards:\n player.cards.remove(card)\n db.session.commit()\n return player_state()", "def remove_player(self, user_id):\n try:\n del self.players[user_id]\n except KeyError:\n # TODO: Should we allow this to percolate up to the caller?\n pass", "def preRemovePlayer(game):\n from .verb import ExitVerb\n\n x = game.me.location\n while isinstance(game.me.location, Thing):\n ExitVerb().verbFunc(game)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns whether we can we do a done move with the given tile
def is_valid_done(self, tile): return tile in self.tile_deck
[ "def check_if_next_tile_is_hit(self):\n board = self._board_object.get_board()\n if self._direction == 'down':\n if board[self._row + 1][self._column] == 'a' or board[self._row + 1][self._column] == 'h':\n return True\n if self._direction == 'up':\n if board[self._row - 1][self._column] == 'a' or board[self._row - 1][self._column] == 'h':\n return True\n if self._direction == 'right':\n if board[self._row][self._column + 1] == 'a' or board[self._row][self._column + 1] == 'h':\n return True\n if self._direction == 'left':\n if board[self._row][self._column - 1] == 'a' or board[self._row][self._column - 1] == 'h':\n return True\n return False", "def can_move(self):\n # If no unit is selected, we can't.\n if not self.sel_unit: return False\n \n # If the unit is done its move, we can't.\n return not self.sel_unit.turn_state[0]", "def tiles_finished(self):\r\n some_tiles_left = len(self.available_tiles) > 0\r\n return not some_tiles_left", "def is_finished(self):\n return len(self.legalMoves) == 0", "def check_direction(self,\n clicked_tile,\n direction,\n reversible_tiles_from_this_location):\n\n reference_tile = clicked_tile\n next_tile, status = self.give_me_next_tile(reference_tile, direction)\n\n # ic()\n # if next_tile:\n # debug_print(f\"next tile: {next_tile.get_name()} --- Status: {status}\")\n # else:\n # debug_print(\"No tile\")\n if status == THERE_IS_NO_TILE:\n # print(THERE_IS_NO_TILE, direction)\n return False\n if self.is_error(status, direction):\n return False\n if not self.is_the_opponent_tile(next_tile):\n # print(f\"The next pawn {direction} is not the one of the player nbr\", self.get_opponent_player())\n return False\n # ic()\n # debug_print(\"Next tile: \", next_tile.get_name())\n # debug_print(f\"La tuile suivante est à la position: {next_tile.get_name()}. Propriétaire de la tuile suivante: {next_tile.get_owner()} --- Le joueur adverse est le joueur nbr{self.get_opponent_player()}\", \"black\", \"green\")\n\n has_browsed_the_max_of_tiles_in_this_direction = NOT_YET\n while has_browsed_the_max_of_tiles_in_this_direction == NOT_YET:\n\n # print(f\"Direction: {direction} --- Est-ce une case adverse ?: {self.is_the_opponent_tile(next_tile)} --- Position: {next_tile.get_name()}\")\n if self.is_the_opponent_tile(next_tile):\n reversible_tiles_from_this_location.append(next_tile)\n\n reference_tile = next_tile\n next_tile, status = self.give_me_next_tile(reference_tile, direction)\n\n if self.is_error(status, direction) or next_tile.is_empty():\n has_browsed_the_max_of_tiles_in_this_direction =\\\n YES__NO_MORE_USED_TILE\n if self.is_the_tile_of_the_current_player(next_tile):\n has_browsed_the_max_of_tiles_in_this_direction =\\\n YES__SAME_COLOR_REACHED\n\n if has_browsed_the_max_of_tiles_in_this_direction ==\\\n YES__NO_MORE_USED_TILE:\n # print(YES__NO_MORE_USED_TILE, \" after this one.\")\n reversible_tiles_from_this_location = None\n return False\n if has_browsed_the_max_of_tiles_in_this_direction ==\\\n YES__SAME_COLOR_REACHED:\n # print(YES__SAME_COLOR_REACHED)\n self.grid.add_to_clickable_tiles_list(\n reversible_tiles_from_this_location)\n # print(f\"reversible tiles: {len(reversible_tiles_from_this_location)}\")\n return True", "def can_move(self):\n if self.shift_up(testRun=True) or self.shift_right(testRun=True) or self.shift_down(testRun=True) or self.shift_left(testRun=True):\n return True\n else:\n return False", "def falls_into_pit(move):\n if move not in PROGRESS_MOVES:\n return False\n center = len(sight()) // 2\n start_pos = (center, center)\n move_direction = direction_of_move(move)\n new_pos = get_pos_in_direction(start_pos, move_direction)\n new_cell = get_cell_in_sight(new_pos)\n if new_cell.floor == PIT:\n return True\n elif move != FORWARD_TWO:\n return False\n # If you moved twice, but a wall was between you and the pit\n elif new_cell.content and new_cell.content[TYPE] in [WALL, MOUNTED_LASER]:\n return False\n else:\n two_pos = get_pos_in_direction(new_pos, move_direction)\n two_cell = get_cell_in_sight(two_pos)\n return two_cell.floor == PIT", "def isValidMove(self, movecoords):\n valid_move = False\n exits = self.tile_holder[0].exits # dictionary with this tiles exits\n if movecoords == (1, 0) and exits['e'] == 1:\n valid_move = True\n elif movecoords == (-1, 0) and exits['w'] == 1:\n valid_move = True\n elif movecoords == (0, 1) and exits['s'] == 1:\n valid_move = True\n elif movecoords == (0, -1) and exits['n'] == 1:\n valid_move = True\n\n return valid_move", "def finished(self):\n return self.board == self.goal", "def is_game_finished(self):\n return len(self._possible_moves) == 0", "def has_moves(self, piece):\n \n # loop through all the moves and flag if any is possible\n moves = [piece.move_left, piece.move_right, piece.rotate_clockwise, piece.rotate_counter_clockwise]\n available = []\n for move in moves:\n move()\n available.append(self.is_valid_move(piece))\n piece.reverse_move()\n\n return any(available) == True", "def canMove(self, from_cell, to_cell):\n return self.board.isEmptyLegalCell(to_cell) and \\\n (self.board.isKing(from_cell) or\n self.board.isForwardToTeter(from_cell, to_cell))", "def is_move_complete(self) -> bool:\n info = yield from self._get_stepper_info()\n if info is None:\n # read did fail\n return False\n return info['position'] == self._position", "def can_move(self, direction):\r\n row = self.zero_location[0]\r\n col = self.zero_location[1]\r\n\r\n if direction == 'up' and row != 0:\r\n return True\r\n if direction == 'down' and row != 2:\r\n return True\r\n if direction == 'left' and col != 0:\r\n return True\r\n if direction == 'right' and col != 2:\r\n return True\r\n return False", "def is_complete(self):\n for tile_group in itertools.chain(self.boxes, self.rows, self.columns):\n if not tile_group.is_complete():\n return False\n return True", "def contains_move(self, t, distance):\n current_position = t.pos()\n t.silent_forward(distance)\n proposed_position = t.pos()\n t.go_to(current_position)\n return self.contains(proposed_position)", "def has_finished(grid):\n\n if not get_cell_count(grid) and grid.generation > 0:\n return True\n\n return False", "def player_has_moves(self) -> bool:\r\n result = False\r\n for row_index in range(self.boardrows):\r\n for col_index in range(self.boardcols):\r\n if self.board[row_index][col_index] == self.current_player_piece:\r\n result = self._piece_check(row_index, col_index, 'open')\r\n if result:\r\n return result\r\n\r\n return result", "def has_valid_move(board):\n for row in range(len(board)):\n for col in range(len(board)):\n # If an empty space is found, immediately return true\n if board[row][col] == 0:\n return True\n\n # If not at edge of board:\n if (not row == len(board) - 1) and (board[row][col] == board[row + 1][col]):\n return True # If there are two vertically adjacent matching tiles\n if (not col == len(board) - 1) and (board[row][col] == board[row][col + 1]):\n return True\n\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates self.ticket_list with all tickets seen in a given view
def get_tickets_in_view(self): logger.info("Entered get_tickets_in_view") try: page_num = 1 while True: url_to_request = self.freshdesk_info['url'] + self.freshdesk_info['view_url'].format(self.freshdesk_info['view_number']) + str(page_num) logger.debug("Requesting {}".format(url_to_request)) r = requests.get(url_to_request, auth=(self.freshdesk_info['api_key'], "X")) returned_json = json.loads(r.text) logger.debug("We received json back: {}".format(returned_json)) # if we received no tickets, we break and stop requesting more if not returned_json: logger.debug("We broke out because no json was returned") break page_num += 1 self.ticket_list.extend(returned_json) time.sleep(self.sleep_time) except KeyboardInterrupt: raise except Exception as e: logger.warning("Error in get_tickets_in_view: {}".format(str(e))) raise
[ "def all_tickets(request):\n tickets = Ticket.objects.all()\n return render(request, \"tickets.html\", {'tickets': tickets})", "def test_view_all_tickets(app):\n for i in range(10):\n tick = Ticket(\n title=\"Ticket {}\".format(i),\n text=\"Text {}\".format(i),\n creator=\"creator{}@gmail.com\".format(i),\n assignee=\"assignee{}@gmail.com\".format(i),\n status=TicketStatus.Progress.value,\n create_time=datetime.now()\n )\n\n tick.save()\n\n # Retrieve the ticket from the app\n resp = app.get(\"/tickets\")\n assert resp.status_code == 200\n\n gotten_tickets = QuerySet(Ticket, []).from_json(resp.data)\n\n assert compare_ticket_lists(gotten_tickets, list(Ticket.objects))", "def display_all_tickets(self):\n\n self.model.get_all_tickets()\n pass", "def listtickets( self, p ) :\n if isinstance( p, (str, unicode) ) :\n p = Project( p )\n res = self.srvr.listtickets( p.projectname )\n self._doexception( res )\n tickets = []\n for tid in res['tickets'] :\n summary = res['tickets'][tid][0]\n tickets.append( Ticket( self, p, int(tid), summary=summary ))\n return tickets", "def edit_tickets(request):\n\n user = request.user\n user_tickets = user.ticket_set.all()\n user_reviews = user.review_set.all()\n user_reviews_tickets = [review.ticket for review in user_reviews]\n answered_tickets = [\n ticket for ticket in user_tickets\n if ticket in user_reviews_tickets\n ]\n uncommented_user_tickets = user_tickets.exclude(title__in=answered_tickets)\n commented_user_tickets = user_tickets.filter(title__in=answered_tickets)\n ordered_tickets = sorted(user_tickets,\n key=operator.attrgetter('time_created'),\n reverse=True)\n context = {\"user\": user, \"user_tickets\": ordered_tickets,\n \"uncommented_tickets\": uncommented_user_tickets,\n \"commented_tickets\": commented_user_tickets}\n return render(request, \"blog/edit_tickets.html\", context)", "def sample_tickets():\n ticket1 = Ticket(_id='Tix1',\n url='tix.com.au/Tix1',\n external_id='Tix-Au-1',\n created_at='2017-06-23T10:31:39 -10:00',\n type='Bug',\n subject='subject1',\n description='description1',\n priority='high',\n status='',\n submitter_id='1',\n assignee_id='2',\n organization_id='1000',\n tags=['FIXME', 'TODO'],\n has_incidents=True,\n due_at='2017-06-23T10:31:39 -12:00',\n via='NoOne')\n\n ticket2 = Ticket(_id='Tix2',\n url='tix.com.au/Tix2',\n external_id='Tix-Au-2',\n created_at='2017-06-23T10:31:39 -10:00',\n subject='subject1',\n priority='high',\n status='',\n submitter_id='1',\n organization_id='1000',\n tags=['FIXME2', 'TODO2'],\n has_incidents=True,\n via='NoOne')\n\n tickets_list = [ticket1, ticket2]\n return tickets_list", "def getTicketsListHTML(self, project, category, solved, lang, tickets, selectedTicketRelativeID=0, unread={'bug':[], 'feature':[]}):\n mytemplate = Template(filename='templates/'+lang+'/tickets_list.html', output_encoding='utf-8', default_filters=['decode.utf8'], input_encoding='utf-8')\n ticketsList = mytemplate.render(\n projectWebName = project,\n category = category,\n tickets = tickets,\n selectedTicketRelativeID = selectedTicketRelativeID,\n sortMode = cherrypy.session.get(str(project)+'_sortMode','lastModDown'),\n unread = unread[category]\n )\n return ticketsList", "def get_opened_tickets(request):\n if request.method == \"GET\":\n opened_ticket_list = Ticket.objects.filter(status=\"Open\")\n serializer = TicketSerializer(\n opened_ticket_list,\n many=True\n )\n response_data = serializer.data\n return Response(response_data, status=status.HTTP_200_OK)", "def fetchtickets( self ) :\n self.tickets = self.client.listtickets( self )\n return self.tickets", "def ticket_detail(request, pk):\n ticket = get_object_or_404(Ticket, pk=pk)\n ticket.views += 1\n ticket.save()\n return render(request, \"ticketdetail.html\", {'ticket': ticket})", "def owned_tickets(self, user, only_open=0):\n items = []\n for i, t in enumerate(self.__items):\n if t.user is user and (not only_open or not t.is_closed()):\n t._v_index = i\n items.append(t)\n return items", "def get_tickets_list(self, **kwargs):\n\n all_params = ['page', 'per_page', '_from', 'to', 'sort_dir', 'sort_field', 'filters']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_tickets_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/tickets'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'page' in params:\n query_params['_page'] = params['page']\n if 'per_page' in params:\n query_params['_perPage'] = params['per_page']\n if '_from' in params:\n query_params['_from'] = params['_from']\n if 'to' in params:\n query_params['_to'] = params['to']\n if 'sort_dir' in params:\n query_params['_sortDir'] = params['sort_dir']\n if 'sort_field' in params:\n query_params['_sortField'] = params['sort_field']\n if 'filters' in params:\n query_params['_filters'] = params['filters']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['privileges', 'apikey']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[Ticket]',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def get_tickets(request):\n get_tickets = Ticket.objects.filter(most_recent_update__lte=timezone.now()\n ).order_by('-most_recent_update')\n \n # Pagination\n paginator = Paginator(get_tickets, 5)\n page = request.GET.get('page', 1)\n \n # Handle out of range and invalid page numbers:\n try:\n tickets = paginator.page(page)\n except PageNotAnInteger:\n tickets = paginator.page(1)\n except EmptyPage:\n tickets = paginator.page(paginator.num_pages)\n\n return render(request, \"tickets.html\", {'tickets': tickets})", "def list_tickets(self):\n url = API_ROOT + \"/api/v2/tickets.json\"\n tickets = []\n while url:\n response = self._get(url)\n data = response.json()\n tickets.extend(data['tickets'])\n url = data['next_page']\n return tickets", "def _get_ticket_history(self, rt, requesting_username, ticket_id):\n ticket_history = rt.getTicketHistory(ticket_id)\n ticket_history = list(filter(lambda h: h['Type'] in ALLOWED_HISTORY_TYPES, ticket_history))\n for entry in ticket_history:\n if entry['Type'] == \"Status\":\n entry['Content'] = entry['Description']\n\n # Determine who created this message using portal\n if entry['Creator'] == \"portal\":\n # Check if its a reply submitted on behalf of a user\n submitted_for_user = re.search(r'\\[Reply submitted on behalf of (.*?)\\]',\n entry['Content'].splitlines()[-1]) if entry['Content'] else False\n if submitted_for_user:\n entry['Creator'] = submitted_for_user.group(1)\n entry[\"Content\"] = entry['Content'][:entry['Content'].rfind('\\n')]\n\n # if user info is in the ticket metadata\n if not submitted_for_user and entry['Type'] == \"Create\":\n submitted_for_user = re.findall(r'authenticated_user:[\\r\\n]+([^\\r\\n]+)',\n entry['Content'], re.MULTILINE) if entry['Content'] else False\n if submitted_for_user:\n entry['Creator'] = submitted_for_user[-1]\n\n if entry['Type'] == \"Create\":\n entry[\"Content\"] = entry['Content'][:entry['Content'].rfind(METADATA_HEADER)]\n\n entry[\"IsCreator\"] = True if requesting_username == entry['Creator'] else False\n\n known_user = get_user_model().objects.filter(username=entry['Creator']).first()\n if known_user:\n entry['Creator'] = \"{} {}\".format(known_user.first_name, known_user.last_name)\n return ticket_history", "def build_active_sheet_tickets(self):\n \n logging.debug(\" in build_active_sheet_tickets \")\n \n self.sheet.open_sheet()\n for record in self.sheet.all_sheet_records:\n if(record['event_status'] == \"Y\"):\n self.active_sheet_events.append(record['event_id'])\n self.active_sheet_tickets[str(record['ticket_id'])] = record", "def feedback_tickets(self, user, only_open=0):\n items = []\n for i, t in enumerate(self.__items):\n if t.is_feedback() and (t.user is user) and (not only_open or not t.is_closed()):\n t._v_index = i\n items.append(t)\n return items", "def get(self, request, ticket_id=None):\n if not request.user.is_authenticated:\n raise PermissionDenied\n\n rt = rtUtil.DjangoRt()\n if ticket_id:\n if not rt.hasAccess(ticket_id, request.user.email):\n raise PermissionDenied\n ticket = rt.getTicket(ticket_id)\n return JsonResponse({'tickets': [ticket]})\n else:\n user_tickets = rt.getUserTickets(request.user.email)\n return JsonResponse({'tickets': user_tickets})", "def get_tickets_new() -> List[TicketModel]:\n new_tickets = []\n for ticket in TicketModel.select():\n first_cell = CellModel.get_or_none(ticket=ticket.id)\n if first_cell is None:\n new_tickets.append(ticket)\n return new_tickets" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds all tickets with original_status and changes to new_status If the revert argument is true, it will only change tickets found in the changed.txt file in the script directory. This is so we don't clobber views and only undo what we last did
def change_ticket_statuses(self, original_status, new_status, revert): def make_request(ticket_number): url_to_request = ticket_update_url + str(ticket_number) logger.debug("Sending request: {} with data: {}".format(url_to_request, send_data)) r = requests.put(url_to_request, auth=(self.freshdesk_info['api_key'], "X"), data=send_data, headers=headers) logger.info('Updated ticket {} with request {}'.format(ticket_number, url_to_request)) logger.info("Entered change_ticket_statuses") headers = {'Content-Type': 'application/json'} ticket_update_url = self.freshdesk_info['url'] + self.freshdesk_info['ticket_view'] send_data = json.dumps({'status': int(new_status)}) logger.debug("We have ticket list to check: {}".format(self.ticket_list)) if(revert): with open(path_to_changed_file, 'r') as changed_file: for ticket_number in changed_file: try: make_request(ticket_number) except requests.exceptions.RequestException as e: logger.warning("Requests exception when changing ticket status: {}".format(str(e))) pass except Exception as e: logger.error("Unhandled exception when reverting ticket status! {}".format(str(e))) print("Unhandled exception when changing ticket status! {}".format(str(e))) pass else: with open(path_to_changed_file, 'w') as changed_file: for ticket in self.ticket_list: try: changed = False logger.debug("Checking if ticket status {} matches original status {}".format(ticket['status'], original_status)) if str(ticket['status']) == str(original_status): make_request(ticket['display_id']) changed = True except requests.exceptions.RequestException as e: logger.warning("Requests exception when changing ticket status: {}".format(str(e))) pass except Exception as e: logger.error("Unhandled exception when changing ticket status! {}".format(str(e))) print("Unhandled exception when changing ticket status! {}".format(str(e))) pass else: # write ticket number to changed file if changed: changed_file.write(str(ticket['display_id']) + '\n')
[ "def _localChanges(repos, changeSet, curTrove, srcTrove, newVersion, root, flags,\n withFileContents=True, forceSha1=False,\n ignoreTransient=False, ignoreAutoSource=False,\n crossRepositoryDeltas = True, allowMissingFiles = False,\n callback=UpdateCallback(), statCache = {}):\n assert(root)\n\n newTrove = curTrove.copy()\n # we don't use capsules for local diffs, ever\n newTrove.troveInfo.capsule.type.set('')\n newTrove.changeVersion(newVersion)\n\n # There's no point in layering file changes on top of a phantom trove\n # because there's no way to install the original capsule.\n if curTrove.getVersion().onPhantomLabel():\n return False, newTrove\n\n pathIds = {}\n for (pathId, path, fileId, version) in newTrove.iterFileList():\n pathIds[pathId] = True\n\n # Iterating over the files in newTrove would be much more natural\n # then iterating over the ones in the old trove, and then going\n # through newTrove to find what we missed. However, doing it the\n # hard way lets us iterate right over the changeset we get from\n # the repository.\n if srcTrove:\n fileList = [ x for x in srcTrove.iterFileList() ]\n # need to walk changesets in order of fileid\n fileList.sort()\n else:\n fileList = []\n\n # Used in the loops to determine whether to mark files as config\n # would be nice to have a better list...\n\n isSrcTrove = curTrove.getName().endswith(':source')\n\n if isinstance(srcTrove, trove.TroveWithFileObjects):\n srcFileObjs = [ srcTrove.getFileObject(x[2]) for x in fileList ]\n else:\n srcFileObjs = repos.getFileVersions( [ (x[0], x[2], x[3]) for x in\n fileList ],\n allowMissingFiles=allowMissingFiles)\n for (pathId, srcPath, srcFileId, srcFileVersion), srcFile in \\\n itertools.izip(fileList, srcFileObjs):\n # files which disappear don't need to make it into newTrove\n if not pathIds.has_key(pathId): continue\n del pathIds[pathId]\n\n # transient files never show up in in local changesets...\n if ignoreTransient and srcFile.flags.isTransient():\n continue\n\n if ignoreAutoSource:\n if srcFile.flags.isAutoSource() and \\\n curTrove.fileIsAutoSource(pathId):\n # file was autosourced and still is; ignore it\n continue\n elif srcFile.flags.isAutoSource():\n # file was autosourced but was now added. keep going so\n # it shows up in the diff\n pass\n elif curTrove.fileIsAutoSource(pathId):\n # file was removed (which gets marked as autosourced). remove\n # it from the newTrove to get the diff right\n newTrove.removeFile(pathId)\n continue\n\n (path, fileId, version) = newTrove.getFile(pathId)\n\n if isSrcTrove:\n if path in curTrove.pathMap:\n info = curTrove.pathMap[path]\n if type(info) == tuple:\n # this file hasn't changed -- just keep going\n continue\n else:\n realPath = info\n isAutoSource = True\n else:\n isAutoSource = False\n realPath = util.joinPaths(root, path)\n else:\n realPath = util.joinPaths(root, path)\n\n if forceSha1:\n possibleMatch = None\n else:\n possibleMatch = srcFile\n\n try:\n f = files.FileFromFilesystem(realPath, pathId,\n possibleMatch = possibleMatch,\n statBuf =\n statCache.get(realPath, None))\n except OSError, e:\n if isSrcTrove:\n callback.error(\n \"%s is missing (use remove if this is intentional)\"\n % util.normpath(path))\n return None\n\n if e.errno == errno.ENOENT and flags.ignoreMissingFiles:\n pass\n elif e.errno == errno.ENOENT and not flags.missingFilesOkay:\n callback.warning(\n \"%s is missing (use remove if this is intentional)\"\n % util.normpath(path))\n else:\n callback.warning(\n \"cannot remove %s: %s\" % (util.normpath(path), e.strerror))\n\n newTrove.removeFile(pathId)\n continue\n\n _mergeFileChanges(f, srcFile)\n\n if isSrcTrove:\n f.flags.isSource(set = True)\n f.flags.isAutoSource(set = isAutoSource)\n f.flags.isConfig(set = curTrove.fileIsConfig(pathId))\n\n\n if not f.eq(srcFile, ignoreOwnerGroup = flags.ignoreUGids):\n newFileId = f.fileId()\n if isSrcTrove:\n newTrove.addFile(pathId, path, newVersion, newFileId,\n isConfig = f.flags.isConfig(),\n isAutoSource = f.flags.isAutoSource())\n else:\n newTrove.addFile(pathId, path, newVersion, newFileId)\n\n needAbsolute = (not crossRepositoryDeltas and\n (srcFileVersion.trailingLabel().getHost() !=\n newVersion.trailingLabel().getHost()))\n\n if needAbsolute:\n (filecs, hash) = changeset.fileChangeSet(pathId, None, f)\n else:\n (filecs, hash) = changeset.fileChangeSet(pathId, srcFile, f)\n\n changeSet.addFile(srcFileId, newFileId, filecs)\n\n if hash and withFileContents:\n newCont = filecontents.FromFilesystem(realPath)\n\n if srcFile.hasContents:\n if needAbsolute or not f.flags.isConfig():\n changeSet.addFileContents(pathId, newFileId,\n changeset.ChangedFileTypes.file,\n newCont, f.flags.isConfig())\n else:\n srcCont = repos.getFileContents(\n [ (srcFileId, srcFileVersion) ])[0]\n # make sure we don't depend on contents in the\n # database; those could disappear before we write\n # this out\n if srcCont:\n srcCont = filecontents.FromString(\n srcCont.get().read())\n\n (contType, cont) = changeset.fileContentsDiff(\n srcFile, srcCont, f, newCont)\n\n changeSet.addFileContents(pathId, newFileId,\n contType, cont,\n f.flags.isConfig())\n\n # anything left in pathIds has been newly added\n for pathId in pathIds.iterkeys():\n (path, fileId, version) = newTrove.getFile(pathId)\n\n if isSrcTrove:\n if path in curTrove.pathMap:\n if type(curTrove.pathMap[path]) is tuple:\n # this is an autosourced file which existed somewhere\n # else with a different pathId. The contents haven't\n # changed though, and the fileId/version is valid\n continue\n else:\n realPath = curTrove.pathMap[path]\n isAutoSource = True\n else:\n realPath = util.joinPaths(root, path)\n isAutoSource = False\n\n if not isinstance(version, versions.NewVersion):\n srcFile = repos.getFileVersion(pathId, fileId, version)\n if ignoreAutoSource and srcFile.flags.isAutoSource():\n # this is an autosource file which was newly added,\n # probably by a merge (if it was added on the command\n # line, it's version would be NewVersion)\n changeSet.addFile(None, srcFile.fileId(), srcFile.freeze())\n newTrove.addFile(pathId, path, version, srcFile.fileId(),\n isConfig=srcFile.flags.isConfig(),\n isAutoSource=True)\n continue\n else:\n realPath = util.joinPaths(root, path)\n\n # if we're committing against head, this better be a new file.\n # if we're generating a diff against someplace else, it might not\n # be.\n assert(srcTrove or isinstance(version, versions.NewVersion))\n\n f = files.FileFromFilesystem(realPath, pathId,\n statBuf = statCache.get(realPath, None))\n\n if isSrcTrove:\n f.flags.isSource(set = True)\n f.flags.isAutoSource(set = isAutoSource)\n f.flags.isConfig(set= curTrove.fileIsConfig(pathId))\n newTrove.addFile(pathId, path, newVersion, f.fileId(),\n isConfig = f.flags.isConfig(),\n isAutoSource = f.flags.isAutoSource())\n else:\n # this can't happen since we don't allow files to be added to\n # troves for installed systems\n newTrove.addFile(pathId, path, newVersion, f.fileId())\n\n # new file, so this part is easy\n changeSet.addFile(None, f.fileId(), f.freeze())\n\n if f.hasContents and withFileContents:\n newCont = filecontents.FromFilesystem(realPath)\n changeSet.addFileContents(pathId, f.fileId(),\n changeset.ChangedFileTypes.file,\n newCont, f.flags.isConfig())\n\n # local changes don't use capsules to store information\n newTrove.troveInfo.capsule.reset()\n\n # compute new signatures -- the old ones are invalid because of\n # the version change\n newTrove.invalidateDigests()\n newTrove.computeDigests()\n\n (csTrove, filesNeeded, pkgsNeeded) = newTrove.diff(srcTrove, absolute = srcTrove is None)\n\n if (csTrove.getOldFileList() or csTrove.getChangedFileList()\n or csTrove.getNewFileList()\n or [ x for x in csTrove.iterChangedTroves()]):\n foundDifference = True\n else:\n foundDifference = False\n\n changeSet.newTrove(csTrove)\n\n return (foundDifference, newTrove)", "def _render_diff(self, req, ticket, data, text_fields):\n new_version = int(req.args.get('version', 1))\n old_version = int(req.args.get('old_version', new_version))\n if old_version > new_version:\n old_version, new_version = new_version, old_version\n\n # get the list of versions having a description change\n history = self._get_history(req, ticket)\n changes = {}\n descriptions = []\n old_idx = new_idx = -1 # indexes in descriptions\n for change in history:\n version = change['version']\n changes[version] = change\n if any(f in text_fields for f in change['fields']):\n if old_version and version <= old_version:\n old_idx = len(descriptions)\n if new_idx == -1 and new_version and version >= new_version:\n new_idx = len(descriptions)\n descriptions.append((version, change))\n\n # determine precisely old and new versions\n if old_version == new_version:\n if new_idx >= 0:\n old_idx = new_idx - 1\n if old_idx >= 0:\n old_version, old_change = descriptions[old_idx]\n else:\n old_version, old_change = 0, None\n num_changes = new_idx - old_idx\n if new_idx >= 0:\n new_version, new_change = descriptions[new_idx]\n else:\n raise TracError(_(\"No differences to show\"))\n\n tnew = ticket.resource(version=new_version)\n told = ticket.resource(version=old_version)\n\n req.perm(tnew).require('TICKET_VIEW')\n req.perm(told).require('TICKET_VIEW')\n\n # determine prev and next versions\n prev_version = old_version\n next_version = None\n if new_idx < len(descriptions) - 1:\n next_version = descriptions[new_idx+1][0]\n\n # -- old properties (old_ticket) and new properties (new_ticket)\n\n # assume a linear sequence of change numbers, starting at 1, with gaps\n def replay_changes(values, old_values, from_version, to_version):\n for version in range(from_version, to_version+1):\n if version in changes:\n for k, v in changes[version]['fields'].iteritems():\n values[k] = v['new']\n if old_values is not None and k not in old_values:\n old_values[k] = v['old']\n\n old_ticket = {}\n if old_version:\n replay_changes(old_ticket, None, 1, old_version)\n\n new_ticket = dict(old_ticket)\n replay_changes(new_ticket, old_ticket, old_version+1, new_version)\n\n field_labels = TicketSystem(self.env).get_ticket_field_labels()\n\n changes = []\n\n def version_info(t, field=None):\n path = _(\"Ticket #%(id)s\", id=ticket.id)\n # TODO: field info should probably be part of the Resource as well\n if field:\n path = tag(path, Markup(' &ndash; '),\n field_labels.get(field, field.capitalize()))\n if t.version:\n rev = _(\"Version %(num)s\", num=t.version)\n shortrev = 'v%d' % t.version\n else:\n rev, shortrev = _(\"Initial Version\"), _(\"initial\")\n return {'path': path, 'rev': rev, 'shortrev': shortrev,\n 'href': get_resource_url(self.env, t, req.href)}\n\n # -- prop changes\n props = []\n for k, v in new_ticket.iteritems():\n if k not in text_fields:\n old, new = old_ticket[k], new_ticket[k]\n if old != new:\n label = field_labels.get(k, k.capitalize())\n prop = {'name': label, 'field': k,\n 'old': {'name': label, 'value': old},\n 'new': {'name': label, 'value': new}}\n rendered = self._render_property_diff(req, ticket, k,\n old, new, tnew)\n if rendered:\n prop['diff'] = tag.li(\n tag_(\"Property %(label)s %(rendered)s\",\n label=tag.strong(label), rendered=rendered))\n props.append(prop)\n changes.append({'props': props, 'diffs': [],\n 'new': version_info(tnew),\n 'old': version_info(told)})\n\n # -- text diffs\n diff_style, diff_options, diff_data = get_diff_options(req)\n diff_context = 3\n for option in diff_options:\n if option.startswith('-U'):\n diff_context = int(option[2:])\n break\n if diff_context < 0:\n diff_context = None\n\n for field in text_fields:\n old_text = old_ticket.get(field)\n old_text = old_text.splitlines() if old_text else []\n new_text = new_ticket.get(field)\n new_text = new_text.splitlines() if new_text else []\n diffs = diff_blocks(old_text, new_text, context=diff_context,\n ignore_blank_lines='-B' in diff_options,\n ignore_case='-i' in diff_options,\n ignore_space_changes='-b' in diff_options)\n\n changes.append({'diffs': diffs, 'props': [], 'field': field,\n 'new': version_info(tnew, field),\n 'old': version_info(told, field)})\n\n # -- prev/up/next links\n if prev_version:\n add_link(req, 'prev', get_resource_url(self.env, ticket.resource,\n req.href, action='diff',\n version=prev_version),\n _(\"Version %(num)s\", num=prev_version))\n add_link(req, 'up', get_resource_url(self.env, ticket.resource,\n req.href, action='history'),\n _(\"Ticket History\"))\n if next_version:\n add_link(req, 'next', get_resource_url(self.env, ticket.resource,\n req.href, action='diff',\n version=next_version),\n _(\"Version %(num)s\", num=next_version))\n\n prevnext_nav(req, _(\"Previous Change\"), _(\"Next Change\"),\n _(\"Ticket History\"))\n add_stylesheet(req, 'common/css/diff.css')\n add_script(req, 'common/js/diff.js')\n\n data.update({\n 'title': _(\"Ticket Diff\"),\n 'resource': ticket.resource,\n 'old_version': old_version, 'new_version': new_version,\n 'changes': changes, 'diff': diff_data,\n 'num_changes': num_changes, 'change': new_change,\n 'old_ticket': old_ticket, 'new_ticket': new_ticket,\n 'longcol': '', 'shortcol': ''\n })\n\n return 'diff_view.html', data, None", "def handle_ref(old, new, ref, env):\n\n from trac.util.text import to_unicode\n\n if VERBOSE:\n print ref\n # Regex the ticket number out of the refname\n match = ticket_from_ref_re.search(ref)\n\n tkt_id_from_ref = DEFAULT_POST_RECEIVE_TKT_ID\n if match:\n tkt_id_from_ref = int(match.group(1))\n\n if VERBOSE:\n print \"Parsed ticket from refname: %s\" % tkt_id_from_ref\n # Get the list of hashs for commits in the changeset.\n args = (old == '0' * 40) and [new] or [new, '^' + old]\n\n pending_commits = call_git('rev-list', args).splitlines()\n if VERBOSE:\n print \"pending commits: %s\" % pending_commits\n if not pending_commits:\n return\n\n # Get the subset of pending commits that are already seen.\n db = env.get_db_cnx()\n cursor = db.cursor()\n\n try:\n cursor.execute('SELECT sha1 FROM git_seen WHERE sha1 IN (%s)'\n % ', '.join(['%s'] * len(pending_commits)), pending_commits)\n seen_commits = map(itemgetter(0), cursor.fetchall())\n except psycopg2.ProgrammingError:\n # almost definitely due to git_seen missing\n cursor.close()\n db.close()\n # get a new cursor\n db = env.get_db_cnx()\n cursor = db.cursor()\n cursor.execute('CREATE TABLE git_seen (sha1 VARCHAR(40));')\n seen_commits = []\n\n ticket_msgs = defaultdict(list)\n # Iterate over commits, starting with earliest\n for commit in reversed(pending_commits):\n # If the commit was seen already, we do not repost it.\n if commit in seen_commits and not REPOST_SEEN:\n continue\n\n remember_commit(commit, db, cursor)\n\n # Get message from commit\n msg = get_commit_message(commit, env)\n\n # First check for explicit \"Refs #999\"-style ticket refs.\n matches = ticket_from_explicit_refs_re.findall(msg)\n for ticket_id in matches:\n ticket_msgs[ticket_id].append(to_unicode(msg))\n\n # If a merge commit, try to identify origin ticket.\n match = ticket_from_msg_re.search(msg)\n if match:\n source_tkt_id = int(match.group(1))\n target_tkt_id = match.group(2)\n ticket_msgs[source_tkt_id].append(to_unicode(msg))\n if target_tkt_id:\n ticket_msgs[int(target_tkt_id)].append(to_unicode(msg))\n else:\n # Otherwise, we comment on the ticket corresponding to the ref\n ticket_msgs[tkt_id_from_ref].append(to_unicode(msg))\n\n # the wire (hook) hears all\n author = \"the wire\"\n\n try:\n if POST_COMMENT:\n for tkt_id, commit_msgs in ticket_msgs.items():\n print \"Posting to ticket #%s\" % tkt_id\n post_to_ticket('\\n----\\n'.join(commit_msgs),\n author, tkt_id, env)\n except Exception, e:\n msg = 'Unexpected error processing commit %s: %s' % (commit[:7], e)\n print >>sys.stderr, msg\n db.rollback()\n else:\n db.commit()", "def test_diff_viewer_filter_by_change_type(repo_with_diffs: Tuple[Repo, Commit, Commit]):\n repo, previous_head, new_head = repo_with_diffs\n with DiffViewer(previous_head, new_head) as viewer:\n # we added 1 file, we expect the added() generator to return only 1 diff\n diffs = list(viewer.added())\n assert len(diffs) == 1\n paths = diff_paths(diffs)\n assert Path(\"other/gbac.rego\") in paths\n\n # we modified 1 file, we expect the modified() generator to return only 1 diff\n diffs = list(viewer.modified())\n assert len(diffs) == 1\n paths = diff_paths(diffs)\n assert Path(\"mylist.txt\") in paths\n\n # we deleted 1 file, we expect the deleted() generator to return only 1 diff\n diffs = list(viewer.deleted())\n assert len(diffs) == 1\n paths = diff_paths(diffs)\n assert Path(\"other/data.json\") in paths\n\n # we renamed 1 file, we expect the renamed() generator to return only 1 diff\n diffs = list(viewer.renamed())\n assert len(diffs) == 1\n paths = diff_paths(diffs)\n assert len(paths) == 2 # both old and new file name\n assert Path(\"ignored.json\") in paths\n assert Path(\"ignored2.json\") in paths", "def update_hit_review_status(HITId=None, Revert=None):\n pass", "def patch_git_rename(sbox):\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # a simple --git rename patch\n unidiff_patch = [\n \"diff --git a/iota b/iota2\\n\",\n \"similarity index 100%\\n\",\n \"rename from iota\\n\",\n \"rename to iota2\\n\",\n ]\n\n patch_file_path = sbox.get_tempname('my.patch')\n svntest.main.file_write(patch_file_path, ''.join(unidiff_patch))\n\n expected_output = wc.State(wc_dir, {\n 'iota' : Item(status='D '),\n 'iota2' : Item(status='A ')\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.remove('iota')\n expected_disk.add({'iota2' : Item(contents=\"This is the file 'iota'.\\n\")})\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'iota2' : Item(status='A ', copied='+', wc_rev='-', moved_from='iota'),\n })\n expected_status.tweak('iota', status='D ', wc_rev=1, moved_to='iota2')\n expected_skip = wc.State('', { })\n svntest.actions.run_and_verify_patch(wc_dir, patch_file_path,\n expected_output, expected_disk,\n expected_status, expected_skip,\n [], True, True)\n\n # Retry\n expected_output = wc.State(wc_dir, {\n 'iota2' : Item(status='G ')\n })\n svntest.actions.run_and_verify_patch(wc_dir, patch_file_path,\n expected_output, expected_disk,\n expected_status, expected_skip,\n [], True, True)\n\n # Reverse\n expected_output = wc.State(wc_dir, {\n 'iota2' : Item(status='D '),\n 'iota' : Item(status='A '),\n })\n expected_disk.remove('iota2')\n expected_disk.add({\n 'iota' : Item(contents=\"This is the file 'iota'.\\n\"),\n })\n expected_status.remove('iota2')\n expected_status.tweak('iota', moved_to=None, status=' ')\n svntest.actions.run_and_verify_patch(wc_dir, patch_file_path,\n expected_output, expected_disk,\n expected_status, expected_skip,\n [], True, True,\n '--reverse-diff')\n\n # Retry reverse\n # svntest.actions.run_and_verify_patch(wc_dir, patch_file_path,\n # expected_output, expected_disk,\n # expected_status, expected_skip,\n # [], True, True,\n # '--reverse-diff')", "def bt_Revert_OnClick(self, event):\n self.update_py_files(['$MODULE_NAME', '$AUTHOR',\n self.st_VersionPrev.GetLabel(),\n self.st_BuildPrev.GetLabel(),\n self.st_CreatedPrev.GetLabel()])\n return", "def revert(self):\n index = self.get_stack_index()\n finfo = self.data[index]\n filename = finfo.filename\n if finfo.editor.document().isModified():\n answer = QMessageBox.warning(self, self.title,\n _(\"All changes to <b>%s</b> will be lost.\"\n \"<br>Do you want to revert file from disk?\"\n ) % osp.basename(filename),\n QMessageBox.Yes|QMessageBox.No)\n if answer != QMessageBox.Yes:\n return\n self.reload(index)", "def list_changed(project):\n print \"Files that need re-rendering: \"\n for changed_page in project.list_changed()[0]:\n print \" \" + changed_page\n print \"Files that don't need to be rendered: \"\n for unchanged_page in project.list_changed()[1]:\n print \" \" + unchanged_page", "def runTest(self):\n name = 'TestTicketHistoryDiff'\n ticketid = self._tester.create_ticket(name)\n self._tester.go_to_ticket(ticketid)\n tc.formvalue('propertyform', 'description', random_sentence(6))\n tc.submit('submit')\n\n # [BLOODHOUND] Description 'modified' in comments feed inside <span />\n tc.find('Description<[^>]*>\\\\s*<[^>]*>\\\\s*modified \\\\(<[^>]*>diff', 's')\n tc.follow('diff')\n tc.find('Changes\\\\s*between\\\\s*<[^>]*>Initial Version<[^>]*>\\\\s*and' \\\n '\\\\s*<[^>]*>Version 1<[^>]*>\\\\s*of\\\\s*<[^>]*>Ticket #' , 's')", "def get_difference_between_revisions(revision_one,timestamp_one,revision_two,timestamp_two):\r\n difference_holder = []\r\n \r\n added_text_holder = []\r\n \r\n removed_text_holder = []\r\n \r\n url = \"https://en.wikipedia.org/w/api.php?action=compare&format=json&fromrev=\" + revision_one +\"&torev=\" + revision_two\r\n \r\n response = urllib.request.urlopen(url).read() \r\n \r\n link_info = (response.decode('utf-8'))\r\n \r\n j = json.loads(link_info)\r\n \r\n com = j[\"compare\"]['*']\r\n \r\n soup = BeautifulSoup(com,'lxml')\r\n \r\n \r\n lister = soup.find_all('td')\r\n \r\n lsz_added = map(str,lister)\r\n \r\n lsz_removed = map(str,lister)\r\n \r\n indices_two = [i for i, text in enumerate(lsz_removed) if 'deletedline' in text]\r\n \r\n indices = [i for i, text in enumerate(lsz_added) if 'addedline' in text]\r\n \r\n for added_text in indices:\r\n if lister[added_text].get_text() in added_text_holder:\r\n break\r\n else:\r\n if lister[added_text].get_text() != \"\":\r\n edited_text = lister[added_text].get_text().split(\",\")\r\n fixed_added_text = \" \".join(edited_text)\r\n added_text_holder.append(revision_one + \"sez\" + timestamp_one + \"sez\" + revision_two + \"sez\" + timestamp_two + \"sez\" +\"added text: \" +fixed_added_text)\r\n \r\n \r\n for deleted_text in indices_two:\r\n if lister[deleted_text].get_text() in removed_text_holder:\r\n break\r\n else:\r\n if lister[deleted_text].get_text() != \"\":\r\n edited_text = lister[deleted_text].get_text().split(\",\")\r\n fixed_deleted_text = \" \".join(edited_text) \r\n removed_text_holder.append(revision_one + \"sez\" + timestamp_one + \"sez\" + revision_two + \"sez\" + timestamp_two + \"sez\" +\"removed text: \" + fixed_deleted_text) \r\n \r\n difference_holder.append(added_text_holder)\r\n difference_holder.append(removed_text_holder)\r\n \r\n return difference_holder", "def test_diff_viewer_affected_paths(repo_with_diffs: Tuple[Repo, Commit, Commit]):\n repo, previous_head, new_head = repo_with_diffs\n with DiffViewer(previous_head, new_head) as viewer:\n paths = viewer.affected_paths()\n # we touched 4 files, 1 is a rename so it has two paths (old and new)\n assert len(paths) == 5\n assert Path(\"other/gbac.rego\") in paths\n assert Path(\"mylist.txt\") in paths\n assert Path(\"other/data.json\") in paths\n assert Path(\"ignored.json\") in paths\n assert Path(\"ignored2.json\") in paths", "def do_revert(self, line):\n\t\t# if not self.command_history:\n\t\ttry: \n\t\t\tif not self.song_list[self.song_name][1]:\n\t\t\t\tprint \"No existing history of edits\"\n\t\t\telse:\n\t\t\t\tself.song_list[self.song_name][0] = list(reversed(self.song_list[self.song_name][2]))[int(line)-1]\n\t\t\t\tself.curr_song = self.song_list[self.song_name][0]\n\t\t\t\t# Song history\n\t\t\t\tdel self.song_list[self.song_name][2][-(int(line)-1):]\n\t\t\t\t# Command history\n\t\t\t\tdel self.song_list[self.song_name][1][-(int(line)-1):]\n\t\texcept:\n\t\t\tprint \"Cannot reach specified state\"", "def changed_files(revset, filter_re=None):\n require('code_dir')\n\n with cd(env.code_dir):\n result = run(\"hg status --rev '%s'\" % revset, quiet=True).splitlines()\n\n if filter_re:\n regex = re.compile(filter_re)\n result = filter(lambda filename: regex.search(filename), result)\n\n return result", "def revert(self, view):\r\n sublime.set_timeout(lambda: do_revert(view), 50)\r\n self.just_reverted = True\r\n sublime.status_message('Showing current version')", "def list_changes(self):\n\n self.changes = {'new': {}, 'update': {}, 'delete': {}}\n source_base = self.source_config.get('table')\n idfield = self.source_config.get('id')\n\n lap = timer()\n if (len(source_base)):\n leftdiffquery = 'SELECT {source}_import.id, {source}_import.hash ' \\\n 'FROM {source}_import ' \\\n 'FULL OUTER JOIN {source}_current ON {source}_import.hash = {source}_current.hash ' \\\n 'WHERE {source}_current.hash is null'.format(source=source_base)\n neworupdates = self.db.select(leftdiffquery)\n logger.debug(\n '[{elapsed:.2f} seconds] Left full outer join on \"{source}\"'.format(source=source_base, elapsed=(timer() - lap)))\n lap = timer()\n\n rightdiffquery = 'SELECT {source}_current.id, {source}_current.hash ' \\\n 'FROM {source}_import ' \\\n 'FULL OUTER JOIN {source}_current ON {source}_import.hash = {source}_current.hash ' \\\n 'WHERE {source}_import.hash is null'.format(source=source_base)\n updateordeletes = self.db.select(rightdiffquery)\n logger.debug(\n '[{elapsed:.2f} seconds] Right full outer join on \"{source}\"'.format(source=source_base, elapsed=(timer() - lap)))\n lap = timer()\n\n importtable = globals()[source_base.capitalize() + '_import']\n currenttable = globals()[source_base.capitalize() + '_current']\n\n # new or update\n for result in neworupdates:\n r = importtable.get(hash=result[1])\n if (r.rec):\n uuid = r.rec[idfield]\n self.changes['new'][uuid] = [r.id]\n\n # updates or deletes\n for result in updateordeletes:\n r = currenttable.get(hash=result[1])\n if (r.rec):\n uuid = r.rec[idfield]\n if self.changes['new'].get(uuid, False):\n self.changes['update'][uuid] = self.changes['new'].get(uuid)\n self.changes['update'][uuid].append(r.id)\n del self.changes['new'][uuid]\n else :\n self.changes['delete'][uuid] = [r.id]\n\n if (len(self.changes['new']) or len(self.changes['update']) or len(self.changes['delete'])):\n logger.info(\n '[{elapsed:.2f} seconds] identified {new} new, {update} updated and {delete} removed'.format(\n new=len(self.changes['new']),\n update=len(self.changes['update']),\n delete=len(self.changes['delete']),\n elapsed=(timer() - lap)\n )\n )\n\n else:\n logger.info('No changes')\n\n return self.changes", "def update_requests_status(db: Session, original_statuses: List[models.Statuses], new_status: models.Statuses):\n old_statuses = [status.name for status in original_statuses]\n db_requests = db.query(models.Request).filter(models.Request.status.in_(old_statuses)).all()\n for db_request in db_requests:\n setattr(db_request, 'status', new_status.name)\n db.flush()\n db.commit()\n return len(db_requests)", "def test_defects_revision_history():\n rally = Rally(server=RALLY, user=RALLY_USER, password=RALLY_PSWD)\n response = rally.get('Defect', fetch=True, limit=10)\n \n defect1 = response.next()\n defect2 = response.next()\n assert defect1.oid != defect2.oid\n\n d1_revs = defect1.RevisionHistory.Revisions\n d2_revs = defect2.RevisionHistory.Revisions\n\n assert type(d1_revs) == list\n assert type(d2_revs) == list\n\n d1_rev1 = d1_revs.pop() # now the revs are in stack order, newest first, original the last\n d2_rev1 = d2_revs.pop() # ditto\n\n assert d1_rev1.RevisionNumber == 0\n assert d2_rev1.RevisionNumber == 0\n\n assert d1_rev1.Description != \"\" and len(d1_rev1.Description) > 0\n assert d2_rev1.Description != \"\" and len(d2_rev1.Description) > 0\n\n assert d1_rev1._hydrated == True\n assert d2_rev1._hydrated == True", "def check_changes(self):\n if not self.has_local_repo():\n raise RuntimeError(\"No local repository connected. Aborting...\")\n self.changed_files = []\n self.removed_files = []\n # modified files\n diff = self.local_repo.index.diff(None)\n for d in diff:\n if d.change_type == 'D':\n self.removed_files.append(d.a_path)\n elif d.a_path == d.b_path:\n self.changed_files.append(d.a_path)\n else:\n UserWarning(\"Diff a_path != b_path ({} vs {})\".format(d.a_path, d.b_path))\n self.changed_files.append(d.a_path)\n self.changed_files.append(d.b_path)\n # new files\n for f in self.local_repo.untracked_files:\n if f in self.GIT_EXCLUDE:\n continue\n if any(f.startswith(rule) for rule in self.GIT_EXCLUDE):\n continue\n self.changed_files.append(f)\n # return just the answer (don't make the lists public)\n if self.changed_files or self.removed_files:\n return True\n else:\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a value from the json config using dot accessor
def get(self, key): return self._get(self._config, key.split('.'))
[ "def jsonpath(ctx, **kwargs):\n kwargs[\"_get_value\"] = True\n run_command_with_config(ConfigCommand, ctx, **kwargs)", "def dotdictget(myjson, dotdict):\n if re_delim.match(dotdict):\n normalized_dotdict = dotdict\n else:\n normalized_dotdict = '.' + dotdict\n\n return _dotdictget(myjson, normalized_dotdict, [])", "def _get_value(obj, key, default=missing):\n if \".\" in key:\n return _get_value_for_keys(obj, key.split(\".\"), default)\n else:\n return _get_value_for_key(obj, key, default)", "def get_config_value(self, key: str) -> Union[str, int]:\n with open(self.path) as config:\n data = json.load(config)\n return data[key]", "def get(self, name: str, default=None):\n if name in self.__config:\n return self.__config[name]\n if '.' in name:\n names = name.split('.')\n cur = self.__config\n for name in names:\n if type(cur) is dict and name in cur:\n cur = cur[name]\n else:\n return default\n return cur\n return default", "def get_value(self, key: str):\n try:\n return self._config_contents[key]\n except KeyError:\n print(f\"Could not find the desired key: {key} in the config file\")", "def test_read_nested_val(self):\n sample_json = {'level1': {'level2': {'level3': {'int': 42}}}}\n self.assertEqual(\n chrome_defaults.get_json_field(\n sample_json, 'level1.level2.level3.int'),\n 42)", "def get_value_from_str_dotted_key(d, dotted_key):\n keys = dotted_key.split('.')\n temp = copy.deepcopy(d)\n try:\n for key in keys:\n temp = temp[key]\n return temp\n except KeyError:\n return None", "def config(key):\n with open(\"aws_config.json\") as conf:\n return json.load(conf)[key]", "def find_json_value(json_blob, path):\n value = None\n\n node = json_blob\n key = path\n\n while node is not None and key is not None:\n if isinstance(key, int) and isinstance(node, list):\n try:\n value = node[key]\n except IndexError:\n value = None\n break\n elif isinstance(key, str) and '.' not in key:\n value = node.get(key, None)\n break\n else:\n # traverse to next level\n level_key, key = key.split('.', 1)\n\n try:\n key = int(key)\n except ValueError:\n pass\n\n try:\n # check if key is actually a list index\n index = int(level_key)\n node = node[index]\n except (\n ValueError,\n KeyError,\n ) as e:\n # key is just a str\n node = node.get(level_key, None)\n\n return value", "def get_config():\n with open(\"config.json\", \"r\") as f:\n data = f.read()\n return json.loads(data)", "def util_json_get_value ( s_json, key ):\n\n try: \n t = json.loads(s_json, strict=False)\n except ValueError:\n return ''\n\n try: \n value = t[key]\n except KeyError:\n return ''\n\n return value", "def get_value(self, config_field):\n raise NotImplementedError", "def _get_value_metadata(cfg, data=None):\n if cfg.get('key'):\n return self.metadata.get(cfg.get('key'))", "def get(self, section, key):\n return self.config_content.get(section, key)", "def get_by_complex_key(cls, json_dict, key):\n key_arr = key.strip().split('.')\n value = \"\"\n d = json_dict.copy()\n for k in key_arr:\n if k not in d.keys():\n d = ''\n break\n else:\n d = d[k]\n value = d\n return value", "def get_driver_property(self, driver, prop):\n return self.node.get(driver, {}).get(prop)", "def get_value_by_path(data, path):\n\n if not isinstance(data, dict) or path == '':\n return None\n\n value_keys = path.split('.')\n result = data\n\n for key in value_keys:\n if key in result.keys():\n result = result[key]\n else:\n result = None\n break\n\n return result", "def get_value(self, section, key, default):\n return self.__config.get(section, key, fallback=default)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unpacks the dictionary response from Boto3 and makes a nice dataclass with the some of the more useful details of the EC2 instance
def instance_from_response(response: Dict) -> List[EC2Instance]: ec2_instances = [] for reservation in response.get("Reservations"): for instance in reservation.get("Instances"): if dns := instance.get("PublicDnsName"): public_dns_name = dns else: public_dns_name = "NONE" if ip := instance.get("PublicIpAddress"): public_ip_address = ip else: public_ip_address = "NONE" ec2_instance = EC2Instance( image_id=instance.get("ImageId"), instance_id=instance.get("InstanceId"), instance_type=instance.get("InstanceType"), launch_time=instance.get("LaunchTime"), availability_zone=instance.get("Placement").get("AvailabilityZone"), private_dns_name=instance.get("PrivateDnsName"), private_ip_address=instance.get("PrivateIpAddress"), public_dns_name=public_dns_name, public_ip_address=public_ip_address, state=instance.get("State").get("Name"), subnet_id=instance.get("SubnetId"), vpc_id=instance.get("VpcId"), tags=instance.get("Tags"), ) ec2_instances.append(ec2_instance) return ec2_instances
[ "def get_instance():\n logging.debug(\"Querying cloud-init for instance-id\")\n instance_id = requests.get(\"http://169.254.169.254/latest/meta-data/instance-id\").text\n client = boto3.client('ec2')\n ec2_resource = boto3.resource('ec2')\n aws_instance = client.describe_instances(InstanceIds=[instance_id])\n instance = aws_instance['Reservations'][0]['Instances'][0]\n ebs_volumes = []\n for device in instance['BlockDeviceMappings']:\n volume_info = ec2_resource.Volume(device['Ebs']['VolumeId'])\n ebs_volume = {u\"VolumeId\": device['Ebs']['VolumeId'],\n u\"DeviceName\": device['DeviceName'],\n u\"volume_type\": volume_info.volume_type,\n u\"size\": volume_info.size,\n u\"snapshot_id\": volume_info.snapshot_id,\n u\"iops\": volume_info.iops,\n u\"availability_zone\": volume_info.availability_zone,\n u\"encrypted\": volume_info.encrypted,\n u\"volume_tags\": volume_info.tags }\n ebs_volumes.append(ebs_volume)\n instance[u'volumes'] = ebs_volumes\n return instance", "def _load_instance(self, instance_id):\n try:\n response = self._client.describe_instances(InstanceIds=(instance_id,))\n self._ec2_data = response['Reservations'][0]['Instances'][0]\n except (ClientError, IndexError) as e:\n raise Ec2Exception(\"Instance %s Not Found:\\n %s\" % \\\n (instance_id, str(e),)\n )", "def _retrieve_instances_info_from_ec2(self, instance_ids: list):\n complete_instances = []\n partial_instance_ids = []\n\n if instance_ids:\n try:\n ec2_client = boto3.client(\"ec2\", region_name=self._region, config=self._boto3_config)\n paginator = ec2_client.get_paginator(\"describe_instances\")\n response_iterator = paginator.paginate(InstanceIds=instance_ids)\n filtered_iterator = response_iterator.search(\"Reservations[].Instances[]\")\n\n for instance_info in filtered_iterator:\n try:\n # Try to build EC2Instance objects using all the required fields\n EC2Instance.from_describe_instance_data(instance_info)\n complete_instances.append(instance_info)\n except KeyError as e:\n logger.debug(\"Unable to retrieve instance info: %s\", e)\n partial_instance_ids.append(instance_info[\"InstanceId\"])\n except ClientError as e:\n logger.debug(\"Unable to retrieve instance info: %s\", e)\n partial_instance_ids.extend(instance_ids)\n\n return complete_instances, partial_instance_ids", "def parse(self, configuration):\n return {\n 'Instances': {\n 'Ec2KeyName': configuration['ec2_key_name'],\n 'KeepJobFlowAliveWhenNoSteps': configuration['keep_alive'],\n 'TerminationProtected': configuration['termination_protected'],\n 'Ec2SubnetId': configuration['subnet_id'],\n 'EmrManagedMasterSecurityGroup': configuration['master']['security_group'],\n 'EmrManagedSlaveSecurityGroup': configuration['core']['secutiry_group'],\n 'InstanceGroups': self.__get_instances_groups(configuration)\n }\n }", "def get_service_instance_data(instance_name):\n instance_data_for_req = {}\n instance_data = cf_api.get_service_instance(instance_name)['entity']\n service_plan_data = cf_api.cf_curl_get(instance_data['service_plan_url'])['entity']\n service_url = service_plan_data['service_url']\n service_data = cf_api.cf_curl_get(service_url)['entity']\n instance_data_for_req['plan'] = service_plan_data['name']\n instance_data_for_req['tags'] = instance_data['tags']\n instance_data_for_req['label'] = service_data['label']\n instance_key_data = cf_api.get_temporary_key_data(instance_name)['entity']['credentials']\n return instance_data_for_req, instance_key_data", "def _fetch_instance_info(region, proxy_config, instance_type):\n emsg_format = \"Error when calling DescribeInstanceTypes for instance type {instance_type}: {exception_message}\"\n ec2_client = boto3.client(\"ec2\", region_name=region, config=proxy_config)\n try:\n return ec2_client.describe_instance_types(InstanceTypes=[instance_type]).get(\"InstanceTypes\")[0]\n except ClientError as client_error:\n log.critical(\n emsg_format.format(\n instance_type=instance_type, exception_message=client_error.response.get(\"Error\").get(\"Message\")\n )\n )\n raise # NOTE: raising ClientError is necessary to trigger retries\n except Exception as exception:\n emsg = emsg_format.format(instance_type=instance_type, exception_message=exception)\n log.critical(emsg)\n raise CriticalError(emsg)", "def describe_ec2_instances(ec2, ec2_filter):\r\n tmp_instances = []\r\n instances = []\r\n resp = ec2.describe_instances(Filters=ec2_filter)\r\n for res in resp['Reservations']:\r\n tmp_instances.extend(res['Instances'])\r\n while 'NextToken' in resp:\r\n resp = ec2.describe_instances(Filters=ec2_filter,\r\n NextToken=resp['NextToken'])\r\n for res in resp['Reservations']:\r\n tmp_instances.extend(res['Instances'])\r\n\r\n for inst in tmp_instances:\r\n instances.append({'InstanceId': inst['InstanceId'],\r\n 'State': inst['State'],\r\n 'BlockDeviceMappings': inst['BlockDeviceMappings'],\r\n 'AttemptCount': 0,\r\n 'Tags': inst['Tags']})\r\n return instances", "def create_instance(config):\n\n try:\n client = boto3.client('ec2')\n except Exception as e:\n print(f'An error occurred while creating the boto3 client: {e}')\n sys.exit(1)\n\n ami_id = _get_ami_id(client, config.ami_type, config.architecture, config.root_device_type, config.virtualization_type)\n default_vpc_id = _ensure_default_vpc(client)\n key_pair_names = _create_key_pairs(client, config)\n\n blockDeviceMappings = []\n for volume in config.volumes:\n blockDeviceMappings.append({\n 'DeviceName': volume.device,\n 'Ebs': {\n 'DeleteOnTermination': True,\n 'VolumeSize': volume.size_gb,\n 'VolumeType': 'gp2',\n },\n })\n\n res = client.run_instances(\n BlockDeviceMappings=blockDeviceMappings,\n\n ImageId=ami_id,\n InstanceType=config.instance_type,\n\n MaxCount=config.max_count,\n MinCount=config.min_count,\n\n SecurityGroupIds=[\n _create_security_group(client, default_vpc_id)\n ],\n\n UserData=_user_data_script(config),\n )\n\n ec2 = boto3.resource('ec2')\n instances = res['Instances']\n\n for i, instance in enumerate(instances):\n public_ip = ec2.Instance(instance['InstanceId']).public_ip_address\n print(f'instance {i} public ip address = {public_ip}')", "def getReservedInstances(verbose):\n lres = {}\n jResp = EC2C.describe_reserved_instances()\n for reserved in jResp['ReservedInstances']:\n if reserved['State'] == 'active':\n if verbose:\n lres[reserved['InstanceType']] = str(reserved['Start'])+\";\"+\\\n str(reserved['End'])+\";\"+\\\n str(reserved['InstanceCount'])+\";\"+\\\n reserved['ProductDescription']+\";\"+\\\n str(reserved['UsagePrice'])\n else:\n if re.search(\"win\", reserved['ProductDescription'], re.IGNORECASE):\n os = \"windows\"\n elif re.search(\"red hat\", reserved['ProductDescription'], re.IGNORECASE):\n os = \"redhat\"\n elif re.search(\"suse\", reserved['ProductDescription'], re.IGNORECASE):\n os = \"suse\"\n else:\n os = \"linux\"\n lres[reserved['InstanceType']+\";\"+os] = str(reserved['InstanceCount'])\n return lres", "def interpret_response(cls, response):\t\t\r\n\t\tif len(response.data) < 1:\r\n\t\t\traise InvalidResponseException(response, \"Response data must be at least 1 bytes\")\r\n\r\n\t\tresponse.service_data = cls.ResponseData()\r\n\t\tresponse.service_data.sequence_number_echo = response.data[0]\r\n\t\tresponse.service_data.parameter_records = response.data[1:] if len(response.data) > 1 else b''", "def get_elb_data(region):\n if debug:\n logger.debug(\"Getting existing Classic Load Balancer data\")\n elbc = boto3.client('elb', region_name=region)\n # Describes the specified Classic Load Balancer.\n try:\n paginator = elbc.get_paginator('describe_load_balancers')\n except botocore.exceptions.ClientError as e:\n logger.error(e.response['Error']['Message'])\n elb_data = []\n for describe_load_balancers in paginator.paginate():\n # Render a dictionary that contains the Classic Load Balancer attributes\n for lb in describe_load_balancers['LoadBalancerDescriptions']:\n elb_item = {}\n elb_item['DNSName'] = lb['DNSName']\n elb_item['Scheme'] = lb['Scheme']\n elb_item['HostedZoneID'] = lb['CanonicalHostedZoneNameID']\n elb_item['Name'] = lb['LoadBalancerName']\n elb_item['ConsoleLink'] = CONSOLE_PREFIX + str(region) + '#LoadBalancers:loadBalancerName=' + lb['LoadBalancerName']\n elb_item['CreatedTime'] = lb['CreatedTime']\n elb_item['AvailabilityZones'] = lb['AvailabilityZones']\n elb_item['BackendInstances'] = lb['Instances']\n # Check if a Classic Load Balancer is in EC2-Classic or EC2-VPC\n if not lb['Subnets']:\n elb_item['EC2Platform'] = 'EC2-Classic'\n elb_item['Subnets'] = None\n elb_item['SecurityGroup'] = lb['SourceSecurityGroup']['GroupName']\n elb_item['VPCId'] = None\n else:\n elb_item['EC2Platform'] = 'EC2-VPC'\n elb_item['Subnets'] = lb['Subnets']\n elb_item['SecurityGroup'] = lb['SecurityGroups']\n elb_item['VPCId'] = lb['VPCId']\n elb_data.append(elb_item)\n if debug:\n logger.debug(\"elb data:\")\n logger.debug(elb_data)\n return elb_data", "def json_serialize_instance(instance):\n\n attributes = [\n 'instance_id',\n 'public_dns_name',\n 'public_ip_address',\n 'private_ip_address',\n 'tags',\n 'instance_type',\n 'architecture',\n 'image_id',\n 'vpc_id',\n 'subnet_id',\n 'security_groups',\n ]\n\n return json.dumps({a: getattr(instance, a) for a in attributes})", "def _return_instance(self, instance, error=None):\n print (\"6. me me\")\n self.set_header(\"Content-Type\", \"application/json; charset=UTF-8\")\n self.write(json.dumps(instance, default=json_util.default))\n self.finish()", "def test_instance_info(mocker):\n from AWSRecreateSG import instance_info\n from test_data.sample import INSTANCE_INFO\n mocker.patch.object(demisto, \"executeCommand\", return_value=INSTANCE_INFO)\n args = {\"instance_id\": \"fake-instance-id\", \"public_ip\": \"1.1.1.1\"}\n result = instance_info(**args)\n assert result == {'eni-00000000000000000': ['sg-00000000000000000']}", "def _return_instance(self, instance, error=None):\n print (\"instance is\", instance)\n self.set_header(\"Content-Type\", \"application/json; charset=UTF-8\")\n self.write(json.dumps(instance, default=json_util.default))\n self.finish()", "def get_instance_metadata(cls, instances, no_dev=False):\n\n instance_data = []\n\n for instance in instances:\n instance_dict = {}\n\n if instance.tags:\n instance_dict['name'] = instance.tags[0]['Value']\n else:\n instance_dict['name'] = ''\n\n instance_dict['type'] = instance.instance_type\n instance_dict['id'] = instance.id\n instance_dict['tags'] = []\n instance_dict['state'] = instance.state['Name']\n instance_dict['launch_time'] = instance.launch_time\n\n if no_dev:\n if instance_dict['name'] != MANTRA_DEVELOPMENT_TAG_NAME:\n instance_data.append(instance_dict)\n else:\n if instance_dict['name'] == MANTRA_DEVELOPMENT_TAG_NAME:\n instance_dict['tags'] += ['development']\n instance_data.append(instance_dict)\n\n return instance_data", "def get_instance_metadata(self, metadata_type):\n if metadata_type in [\"mac\", \"instance-id\", \"security-groups\"]:\n request = self.get_and_append_imdsv2_token_header(os.path.join(\n self.INSTANCE_METADATA_API, metadata_type))\n return urlopen(request,\n timeout=self.METADATA_API_TIMEOUT_SECONDS).read().decode('utf-8')\n elif metadata_type in [\"vpc-id\", \"subnet-id\"]:\n mac = self.get_instance_metadata(\"mac\")\n request = self.get_and_append_imdsv2_token_header(os.path.join(\n self.NETWORK_METADATA_API, mac, metadata_type))\n return urlopen(request,\n timeout=self.METADATA_API_TIMEOUT_SECONDS).read().decode('utf-8')\n elif metadata_type in [\"region\", \"privateIp\"]:\n request = self.get_and_append_imdsv2_token_header(self.INSTANCE_IDENTITY_API)\n identity_data = urlopen(request,\n timeout=self.METADATA_API_TIMEOUT_SECONDS) \\\n .read().decode('utf-8')\n return json.loads(identity_data).get(metadata_type) if identity_data else None\n elif metadata_type in [\"role\"]:\n # Arg timeout is in MS.\n fetcher = InstanceMetadataFetcher(\n timeout=self.METADATA_API_TIMEOUT_SECONDS, num_attempts=2)\n c = fetcher.retrieve_iam_role_credentials()\n # This will return None in case of no assigned role on the instance.\n return c.get(\"role_name\")\n else:\n raise YBOpsRuntimeError(\"Unsupported metadata type: {}\".format(metadata_type))", "def run_instances(self):\n # create an entry in the s3 log for the start of this task \n self.log_to_s3('run-instances-start.log', 'start')\n\n session = botocore.session.get_session()\n client = session.create_client('ec2', region_name=self.aws_region)\n\n # convert user-data to base64\n user_data = ''\n # NOTE conversion of file to string, then string to bytes, the bytes encoded \n # base64 - then decode the base64 bytes into base64 string\n with open(self.ec2_user_data, 'r') as f:\n user_data = base64.b64encode(bytes(f.read(), \"utf-8\")).decode(\"utf-8\")\n\n if self.ec2_type in (CONST.VALID_EC2_INSTANCE_TYPES_EBS_ONLY).split('|'):\n # block device mapping for ebs backed instances\n # creates an ephemeral EBS volume (delete on terminate)\n # Note that gp2 instance type is EBS SSD\n custom_block_device_mapping = [{\n 'DeviceName': '/dev/sdb',\n 'VirtualName': 'ephemeral0',\n 'Ebs':{\n 'VolumeSize': self.ec2_ebs_only_volume_size,\n 'VolumeType': self.ec2_ebs_only_volume_type,\n },\n }]\n else:\n # block device mapping allows for 2 extra drives\n # - works for either single ssd or 2 ssd's\n custom_block_device_mapping = [ \n {\n 'DeviceName': '/dev/sdb',\n 'VirtualName': 'ephemeral0'\n },\n {\n 'DeviceName': '/dev/sdc',\n 'VirtualName': 'ephemeral1'\n }\n ]\n\n r = client.request_spot_instances(\n InstanceCount=self.ec2_count,\n SpotPrice=self.ec2_spot_price,\n LaunchSpecification= {\n 'SecurityGroupIds': [\n self.ec2_security_group_id,\n ],\n 'SecurityGroups': [\n self.ec2_security_groups,\n ],\n 'Placement': {\n 'AvailabilityZone': self.ec2_availability_zone,\n },\n 'BlockDeviceMappings': custom_block_device_mapping,\n 'IamInstanceProfile': {\n 'Arn': self.ec2_arn_id,\n },\n 'UserData': user_data,\n 'ImageId': self.ec2_image_id,\n 'InstanceType': self.ec2_type,\n 'KeyName': self.ec2_security_key,\n },\n )\n\n # get the spot instance request ids\n spot_ids = []\n lgr.debug(CONST.DEBUG + colour_msg(Colour.CYAN, 'Spot request ids:'))\n for i, spot_inst in enumerate(r['SpotInstanceRequests']):\n inst_str = '[' + str(i) + ']'\n lgr.debug(CONST.DEBUG + colour_msg(Colour.PURPLE, \n inst_str + '\\t' + spot_inst['SpotInstanceRequestId']))\n spot_ids.append(spot_inst['SpotInstanceRequestId'])\n utility.list_to_file(CONST.SPOT_REQUEST_IDS, spot_ids)\n\n # create a list of spot instance statuses - so we can print out\n # some updates to the user\n spot_status = ['']*len(spot_ids)\n # Expecting status codes of \"pending-evaluation\", \"pending-fulfillment\", or \n # fulfilled. Any other status-code should be printed out & the program \n # terminated.\n expected_status = ['fulfilled', 'pending-evaluation', 'pending-fulfillment']\n instance_ids = [None]*len(spot_ids)\n\n # check the status of the spot requests\n while True:\n fulfilled = 0\n for i, id in enumerate(spot_ids):\n inst_str = '[' + str(i) + ']'\n r = client.describe_spot_instance_requests(SpotInstanceRequestIds=[id])\n status_code = r['SpotInstanceRequests'][0]['Status']['Code']\n if status_code not in expected_status:\n lgr.error(CONST.ERROR + \n colour_msg(Colour.CYAN, 'Unexpected status for spot request ') +\n colour_msg(Colour.PURPLE, id) +\n colour_msg(Colour.CYAN, ': ') +\n colour_msg(Colour.PURPLE, status_code))\n sys.exit(1)\n if status_code != spot_status[i]:\n lgr.debug(CONST.DEBUG + \n colour_msg(Colour.CYAN, 'Spot instance request: ') +\n colour_msg(Colour.PURPLE, inst_str) +\n colour_msg(Colour.CYAN, '\\tStatus: ') +\n colour_msg(Colour.PURPLE, status_code))\n spot_status[i] = status_code\n if status_code == 'fulfilled':\n fulfilled += 1\n # record the instance id\n instance_ids[i] = r['SpotInstanceRequests'][0]['InstanceId']\n if fulfilled == len(spot_ids):\n break\n time.sleep(1)\n\n utility.list_to_file(CONST.INSTANCE_IDS, instance_ids)\n lgr.debug(CONST.DEBUG + colour_msg(Colour.CYAN, 'Instance Ids:'))\n for i, id in enumerate(instance_ids):\n inst_str = '[' + str(i) + ']'\n lgr.debug(CONST.DEBUG + colour_msg(Colour.PURPLE, inst_str + '\\t' + id))\n tag_val = self.ec2_instance_tag + str(i)\n client.create_tags(Resources=[id], Tags=[{'Key':'Name', 'Value':tag_val}])\n\n # monitor the instances until all running\n instance_states = ['']*len(instance_ids)\n expected_states = ['running', 'pending']\n instance_ips = [None]*len(instance_ids)\n running = 0\n while True:\n running = 0\n for i, id in enumerate(instance_ids):\n inst_str = '[' + str(i) + ']'\n r = client.describe_instances(InstanceIds=[id])\n state = r['Reservations'][0]['Instances'][0]['State']['Name']\n if state not in expected_states:\n lgr.error(CONST.ERROR + \n colour_msg(Colour.CYAN, \n 'Unexpected instance state for instance-id ') +\n colour_msg(Colour.PURPLE, id) +\n colour_msg(Colour.CYAN, ': \\t') +\n colour_msg(Colour.PURPLE, state))\n sys.exit(1)\n if state != instance_states[i]:\n lgr.debug(CONST.DEBUG + \n colour_msg(Colour.CYAN, 'Instance id: ') +\n colour_msg(Colour.PURPLE, inst_str) +\n colour_msg(Colour.CYAN, '\\tState: ') +\n colour_msg(Colour.PURPLE, state))\n instance_states[i] = state\n if state == 'running':\n running += 1\n # record the instance id\n instance_ips[i] = r['Reservations'][0]['Instances'][0]['PublicDnsName']\n if running == len(instance_ids):\n break\n time.sleep(10)\n\n lgr.debug(CONST.DEBUG + colour_msg(Colour.CYAN, 'Instance Ips:'))\n for i, id in enumerate(instance_ips):\n inst_str = '[' + str(i) + ']'\n lgr.debug(CONST.DEBUG + colour_msg(Colour.PURPLE, inst_str + '\\t' + id))\n \n utility.list_to_file(CONST.INSTANCE_IPS_FILE, instance_ips)\n # need to at least wait until all the instances are reachable\n # possible statuses: (passed | failed | initializing | insufficient-data )\n reachability = ['']*len(instance_ids)\n while True:\n passed = 0\n for i, id in enumerate(instance_ids):\n inst_str = '[' + str(i) + ']'\n r = client.describe_instance_status(InstanceIds=[id])\n state = r['InstanceStatuses'][0]['InstanceStatus']['Details'][0]['Status']\n if state != reachability[i]:\n lgr.debug(CONST.DEBUG + \n colour_msg(Colour.CYAN, 'Instance id: ') +\n colour_msg(Colour.PURPLE, inst_str) +\n colour_msg(Colour.CYAN, '\\tReachability: ') +\n colour_msg(Colour.PURPLE, state))\n reachability[i] = state\n if state == 'passed':\n passed += 1\n if passed == len(instance_ids):\n break\n time.sleep(10)\n \n lgr.info(CONST.INFO + colour_msg(Colour.GREEN, 'Instances are reachable'))\n \n # if user-data configuration file supplied - check that it has worked\n # Note that this checker is run once on each instance\n if self.ec2_user_data:\n lgr.info(CONST.INFO + colour_msg(Colour.CYAN, \n 'Starting job to monitor user-data configuration...'))\n # at the moment is calling a local script that does the checking\n result = subprocess.call('./' + self.ec2_user_data_check) \n if result:\n lgr.error(CONST.ERROR + colour_msg(Colour.CYAN, \n 'user data checker FAILED'))\n sys.exit(1)\n\n # create an entry in the s3 log for finish this task \n self.log_to_s3('run-instances-finish.log', 'finish')\n\n # return the list of ip's for the newly created instances\n return utility.file_to_list(CONST.INSTANCE_IPS_FILE)", "def get_current_host_info(self, args):\n try:\n metadata = {}\n for metadata_type in args.metadata_types:\n # Since sometime metadata might have multiple values separated by \\n, we would\n # replace it with comma instead.\n metadata[metadata_type] = \\\n self.get_instance_metadata(metadata_type).replace(\"\\n\", \",\")\n return metadata\n except (URLError, socket.timeout):\n raise YBOpsRuntimeError(\"Unable to auto-discover AWS provider information\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints summary of EC2 instance details
def print_instance_summary(self, instance: EC2Instance): print(instance.instance_id) self.not_quiet("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") self.verbose_output(f" AMI: {instance.image_id}") self.not_quiet(f" Type: {instance.instance_type}") self.verbose_output(f" Launched: {instance.launch_time}") self.verbose_output(f" AZ: {instance.availability_zone}") self.verbose_output(f" Private DNS: {instance.private_dns_name}") self.verbose_output(f" Public DNS: {instance.public_dns_name}") self.not_quiet(f" Private IP: {instance.private_ip_address}") self.not_quiet(f" Public IP: {instance.public_ip_address}") self.verbose_output(f" Subnet Id: {instance.subnet_id}") self.verbose_output(f" VPC Id: {instance.vpc_id}") self.not_quiet(f" State: {instance.state}") self.verbose_output(f" Tags: {instance.tags}") self.not_quiet("\n")
[ "def do_printInstances(self,args):\n parser = CommandArgumentParser(\"printInstances\")\n parser.add_argument(dest='filters',nargs='*',default=[\"*\"],help='Filter instances');\n parser.add_argument('-a','--addresses',action='store_true',dest='addresses',help='list all ip addresses');\n parser.add_argument('-t','--tags',action='store_true',dest='tags',help='list all instance tags');\n parser.add_argument('-d','--allDetails',action='store_true',dest='details',help='print all instance details');\n parser.add_argument('-r','--refresh',action='store_true',dest='refresh',help='refresh');\n parser.add_argument('-z','--zones',dest='availabilityZones',nargs='+',help='Only include specified availability zones');\n args = vars(parser.parse_args(args))\n \n client = AwsConnectionFactory.getEc2Client()\n\n filters = args['filters']\n addresses = args['addresses']\n tags = args['tags']\n details = args['details']\n availabilityZones = args['availabilityZones']\n needDescription = addresses or tags or details\n\n if args['refresh']:\n self.scalingGroupDescription = self.client.describe_auto_scaling_groups(AutoScalingGroupNames=[self.scalingGroup])\n \n # print \"AutoScaling Group:{}\".format(self.scalingGroup)\n print \"=== Instances ===\"\n instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances']\n\n instances = filter( lambda x: fnmatches(x['InstanceId'],filters),instances)\n if availabilityZones:\n instances = filter( lambda x: fnmatches(x['AvailabilityZone'],availabilityZones),instances)\n \n index = 0\n for instance in instances:\n instance['index'] = index\n print \"* {0:3d} {1} {2} {3}\".format(index,instance['HealthStatus'],instance['AvailabilityZone'],instance['InstanceId'])\n description = None\n if needDescription:\n description = client.describe_instances(InstanceIds=[instance['InstanceId']])\n if addresses:\n networkInterfaces = description['Reservations'][0]['Instances'][0]['NetworkInterfaces']\n number = 0\n print \" Network Interfaces:\"\n for interface in networkInterfaces:\n print \" * {0:3d} {1}\".format(number, interface['PrivateIpAddress'])\n number +=1\n if tags:\n tags = description['Reservations'][0]['Instances'][0]['Tags']\n print \" Tags:\"\n for tag in tags:\n print \" * {0} {1}\".format(tag['Key'],tag['Value'])\n if details:\n pprint(description)\n \n index += 1", "def _show_instances(self):\n conn = ec2.connect_to_region(\n self.availability_zone,\n aws_access_key_id=self.access_key_id,\n aws_secret_access_key=self.secret_access_key,\n )\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n print reservation\n for instance in reservation.instances:\n print instance\n print '- AMI ID:', instance.image_id\n print '- Instance Type:', instance.instance_type\n print '- Availability Zone:', instance.placement", "def describe(self):\n print(Controller().describe_instances())", "def echo_ip():\n ec2conn = connect_to_region('us-west-2',\n aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n reservations = ec2conn.get_all_instances()\n #print reservations.AWS_INSTANCE_ID\n\n instances = [i for r in reservations for i in r.instances]\n for i in instances:\n if i.id == AWS_INSTANCE_ID:\n pprint(i.ip_address)", "def info():\n return render_template(\n os.path.join(os.path.dirname(__file__), 'templates/instance_info.html'),\n concurrents=concurrents,\n current_requests=current_requests,\n os=os,\n runtime=os.getenv('GAE_RUNTIME'),\n )", "def print_formatted_instances(running_instances) -> None:\n BASIC_FORMAT = \"{:^3} {:^20} {:^20} {:^20}\"\n headers = ['#', 'ID', 'Public IPv4', 'Launch Datetime']\n print(BASIC_FORMAT.format(*headers))\n print(BASIC_FORMAT.format(*[\"-\" * len(i) for i in headers]))\n for i, instance in enumerate(running_instances):\n print(BASIC_FORMAT.format(\n *[i + 1, instance.id, instance.public_ip_address, instance.launch_time.strftime(\"%Y/%m/%d %H:%M:%S\")])\n )", "def get_listing():\n\n ec2 = boto3.client('ec2')\n listing = []\n\n try:\n full_listing = ec2.describe_instances(\n Filters=[\n {\n 'Name': 'instance-state-name',\n 'Values': [ 'running' ]\n }\n ],\n MaxResults=1000)\n except Exception as e:\n print(e)\n sys.exit(1)\n\n for reservation in full_listing['Reservations']:\n for instance in reservation['Instances']:\n listing.append(instance)\n\n return listing", "def _print_reservation(reservation):\n num_running = 0\n for inst in reservation.instances:\n if inst.state != u'running':\n continue\n print \"ID: %s\" % inst.id\n print \"state: %s\" % inst.state\n print \"IP: %s\" % inst.ip_address\n print \"private IP: %s\" % inst.private_ip_address\n print \"DNS: %s\" % inst.public_dns_name\n print \"private DNS: %s\" % inst.private_dns_name\n print \"architecture: %s\" % inst.architecture\n print \"image ID: %s\" % inst.image_id\n print \"class: %s\" % inst.instance_class\n print \"type: %s\" % inst.instance_type\n print \"key_name: %s\" % inst.key_name\n print \"launch time: %s\" % inst.launch_time\n print \"\"\n num_running += 1\n\n return num_running", "def showinstances():\n username, conn = _getbotoconn(auth_user)\n\n print \"all instances running under the %s account\" % username\n\n num_running = 0\n reservations = conn.get_all_instances()\n for reservation in reservations:\n num_running += _print_reservation(reservation)\n\n return num_running", "def test_instance(self):\n self._test_instance(\"ec2-test\", debug=False)", "def instances_status(cfg: Config):\n print_instances(Instance.elb_instances(target_group_arn_for(cfg)), number=False)", "def describe_ec2_instances(ec2, ec2_filter):\r\n tmp_instances = []\r\n instances = []\r\n resp = ec2.describe_instances(Filters=ec2_filter)\r\n for res in resp['Reservations']:\r\n tmp_instances.extend(res['Instances'])\r\n while 'NextToken' in resp:\r\n resp = ec2.describe_instances(Filters=ec2_filter,\r\n NextToken=resp['NextToken'])\r\n for res in resp['Reservations']:\r\n tmp_instances.extend(res['Instances'])\r\n\r\n for inst in tmp_instances:\r\n instances.append({'InstanceId': inst['InstanceId'],\r\n 'State': inst['State'],\r\n 'BlockDeviceMappings': inst['BlockDeviceMappings'],\r\n 'AttemptCount': 0,\r\n 'Tags': inst['Tags']})\r\n return instances", "def get_vm_info(self):\n output = \"%-15s %-30s %-15s %-10s %-15s %-15s %-15s %-15s %-15s %-15s %-10s %-10d %-10d %-10d\" % (name,uuid,owner,group,activity,hostname,network,ipaddress,\n clusteraddr, image_name,flavor,cpucores,memory,storage)\n return output", "def test_ec2_instances(self):\n for instance in self.instances:\n self.assertEqual(instance, self.ec2.getInstance(instance).tags['Name'])", "def showDetails(self):\n for k,v in self._parser.getDetailsDict().items():\n print \"%11s : %s\" % (k, str(v)[:60])", "def test_get_instance_details(self):\n module = install_bundle(self.framework)\n\n # Invalid component names\n for invalid in (None, \"\", [1], [\"a\", \"b\"]):\n self.assertRaises(ValueError, self.ipopo.get_instance_details,\n invalid)\n\n # Get its details\n details = self.ipopo.get_instance_details(module.BASIC_INSTANCE)\n\n # Test if instance details are really in the output\n self.assertIs(type(details), dict,\n \"Details result must be a dictionary\")\n\n self.assertEqual(details['factory'], module.BASIC_FACTORY,\n \"Invalid factory name\")\n self.assertEqual(details['name'], module.BASIC_INSTANCE,\n \"Invalid component name\")\n\n self.assertIs(type(details['state']), int,\n \"Component state must be an integer\")\n self.assertIs(type(details['services']), dict,\n \"Services details must be in a dictionary\")\n self.assertIs(type(details['dependencies']), dict,\n \"Dependencies details must be in a dictionary\")", "def dumpinstance():\n\n parent = localAvatar.getParent()\n parent = parent.getParent()\n parent.writeBamFile('dump.bam')\n\n return 'Instance Dumped'", "def get_vm_info_pretty(self):\n output = self.get_vm_info_header()\n output += self.get_vm_info()\n return output", "def _retrieve_instances_info_from_ec2(self, instance_ids: list):\n complete_instances = []\n partial_instance_ids = []\n\n if instance_ids:\n try:\n ec2_client = boto3.client(\"ec2\", region_name=self._region, config=self._boto3_config)\n paginator = ec2_client.get_paginator(\"describe_instances\")\n response_iterator = paginator.paginate(InstanceIds=instance_ids)\n filtered_iterator = response_iterator.search(\"Reservations[].Instances[]\")\n\n for instance_info in filtered_iterator:\n try:\n # Try to build EC2Instance objects using all the required fields\n EC2Instance.from_describe_instance_data(instance_info)\n complete_instances.append(instance_info)\n except KeyError as e:\n logger.debug(\"Unable to retrieve instance info: %s\", e)\n partial_instance_ids.append(instance_info[\"InstanceId\"])\n except ClientError as e:\n logger.debug(\"Unable to retrieve instance info: %s\", e)\n partial_instance_ids.extend(instance_ids)\n\n return complete_instances, partial_instance_ids" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
red slider event handler
def callback_red(*args): global red_int col = "red" str_val = str(r_slide_val.get()) red_int = code_shrtn(str_val, 20, 30, 60, 80, col) update_display(red_int, green_int, blue_int)
[ "def callback_blue(*args):\n global blue_int\n col = \"blue\"\n str_val = str(b_slide_val.get())\n blue_int = code_shrtn(str_val, 180, 30, 60, 80, col)\n update_display(red_int, green_int, blue_int)", "def slider_released(self):\n self.update_status(\"status\", \"Length Updated\")", "def callback_green(*args):\n global green_int\n col = \"darkgreen\"\n str_val = str(g_slide_val.get())\n green_int = code_shrtn(str_val, 100, 30, 60, 80, col)\n update_display(red_int, green_int, blue_int)", "def _on_slider_pressed(self):\n # This flag will activate fast_draw_slice_at_index\n # Which will redraw sliced images quickly\n self._slider_flag = True", "def _on_slider_released(self):\n # This flag will deactivate fast_draw_slice_at_index\n self._slider_flag = False\n\n index = self._slice_slider.value()\n cube_views = self._cv_layout.cube_views\n active_cube = self._cv_layout._active_cube\n active_widget = active_cube._widget\n\n # If the active widget is synced then we need to update the image\n # in all the other synced views.\n if active_widget.synced and not self._cv_layout._single_viewer_mode:\n for view in cube_views:\n if view._widget.synced:\n view._widget.update_slice_index(index)\n self._cv_layout.synced_index = index\n else:\n # Update the image displayed in the slice in the active view\n active_widget.update_slice_index(index)\n\n # Now update the slice and wavelength text boxes\n self._update_slice_textboxes(index)\n\n specviz_dispatch.changed_dispersion_position.emit(pos=index)", "def slider(self, size=(300, 30, 3), spoint=50, scolor=(100, 55, 100)):\n swidth=10\n #swidth=int(5/50*spoint)\n #swidth = np.clip(swidth, 5, spoint)\n swindow=np.zeros(self.size).astype(np.uint8)\n swindow[:self.ssize[0], 0:self.ssize[1]] += np.uint8([255, 255, 255]) \n r1 = np.clip(spoint-swidth, swidth, self.ssize[0]-swidth)\n r2 = np.clip(spoint+swidth, swidth, self.ssize[0]-swidth)\n spoint = int(10/50 * spoint)\n #print(r1, r2, spoint)\n \n \n swindow[r1:r2, :self.ssize[1]] = scolor\n self.point=(spoint, self.point[1])\n #cv2.imshow(\"slider\", swindow.astype(np.uint8))\n return swindow.astype(np.uint8)", "def slider_changed(self, indx):\n if indx == 0 or indx == 1:\n self.combo_box_changed(tab_id=self.tab_index, combo_id=\"0_1\")\n\n # Change the filtered image after changing the noisy image\n if (self.img0_noisy.image is not None) and self.combo_filter.currentIndex() != 0:\n self.combo_box_changed(tab_id=self.tab_index, combo_id=\"0_2\")\n\n elif indx == 2 or indx == 3:\n self.combo_box_changed(tab_id=self.tab_index, combo_id=\"0_2\")\n\n # Test Images Slider\n elif indx == 4:\n pass", "def update_slider(event):\n \n global interval\n interval = round(event)", "def slider_reconnect(self):\n self.sender().valueChanged.connect(self.slider_changed)\n self.sender().valueChanged.emit(self.sender().value())", "def fig_callback(self, event):\n global multi\n ax=event.inaxes\n cal=self.calibrator.activegamma\n \n #if self.selecting: return\n \n for source in cal:\n axes=self.figures[source]['axes']\n if ax in axes:\n currentaxes=axes\n currentfigure=self.figures[source]['figure']\n \n self.multi=MultiCursor(currentfigure.canvas, currentaxes, color='r', lw=1.5,\n horizOn=False, vertOn=True, useblit=False)", "def test_manipulate_sliders(self):\n self.manipulate.toggle()\n self.manipulate.focus_slider(\"bri\")\n self.manipulate.change_slider(True, False)\n received_value = self.manipulate.scale_bri.get_value()\n self.assertEqual(received_value, -1)\n self.manipulate.focus_slider(\"con\")\n self.assertTrue(self.manipulate.scale_con.is_focus())\n self.manipulate.focus_slider(\"sha\")\n self.assertTrue(self.manipulate.scale_sha.is_focus())\n self.manipulate.focus_slider(\"bri\")\n self.assertTrue(self.manipulate.scale_bri.is_focus())\n self.manipulate.button_clicked(None, False)", "def on_valueSlider_valueChanged(self):\n self.changeSlider = False\n if self.changeCurrspin: self.currspin.setValue(self.getSliderValue())\n self.changeSlider = True", "def sliderHSV(self, client):\r\n\r\n def __nothing():\r\n pass\r\n\r\n windowName = \"slider for ball detection\"\r\n cv2.namedWindow(windowName)\r\n cv2.createTrackbar(\"minS1\", windowName, 43, 60, __nothing)\r\n cv2.createTrackbar(\"minV1\", windowName, 46, 65, __nothing)\r\n cv2.createTrackbar(\"maxH1\", windowName, 10, 20, __nothing)\r\n cv2.createTrackbar(\"minH2\", windowName, 156, 175, __nothing)\r\n while 1:\r\n self.updateFrame(client)\r\n minS1 = cv2.getTrackbarPos(\"minS1\", windowName)\r\n minV1 = cv2.getTrackbarPos(\"minV1\", windowName)\r\n maxH1 = cv2.getTrackbarPos(\"maxH1\", windowName)\r\n minH2 = cv2.getTrackbarPos(\"minH2\", windowName)\r\n minHSV1 = np.array([0, minS1, minV1])\r\n maxHSV1 = np.array([maxH1, 255, 255])\r\n minHSV2 = np.array([minH2, minS1, minV1])\r\n maxHSV2 = np.array([180, 255, 255])\r\n self.updateBallData(client, colorSpace=\"HSV\", minHSV1=minHSV1,\r\n maxHSV1=maxHSV1, minHSV2=minHSV2,\r\n maxHSV2=maxHSV2, saveFrameBin=True, fitting=True)\r\n cv2.imshow(windowName, self._frameBin)\r\n self.showBallPosition()\r\n k = cv2.waitKey(10) & 0xFF\r\n if k == 27:\r\n break\r\n cv2.destroyAllWindows()", "def drawScene(self, event):", "def intSlider(docTag=\"string\", height=int, defineTemplate=\"string\", parent=\"string\", step=int, numberOfPopupMenus=bool, useTemplate=\"string\", width=int, dragCallback=\"string\", value=int, highlightColor=float, annotation=\"string\", changeCommand=\"string\", preventOverride=bool, popupMenuArray=bool, minValue=int, dragCommand=\"string\", exists=bool, maxValue=int, enable=bool, enableBackground=bool, visibleChangeCommand=\"string\", visible=bool, horizontal=bool, fullPathName=bool, dropCallback=\"string\", noBackground=bool, backgroundColor=float, manage=bool, isObscured=bool):\n pass", "def handleEvent(self, action: 'SoHandleEventAction') -> \"void\":\n return _coin.SoVRMLDragSensor_handleEvent(self, action)", "def on_slider(self, slider_index):\n self.slider_has_moved = True\n slider_pos = int(slider_index * self.video_len / self.slider_ticks)\n if abs(slider_pos - self.current_pos()) > 1:\n self.set_frame(slider_pos)\n self.show_frame(self.get_frame())\n self.frame_num = self.current_pos()\n self.slider_num = slider_index", "def valueSlider_doubleClicked(self, *args, **kwargs):\n print \"double!!!\"\n self.setSliderValue(self.defaultValue)\n return QWidget.mouseDoubleClickEvent(self.ui.valueSlider, *args, **kwargs)", "def slider(self, parent, variable, low, high, label):\n widget = Scale(parent, orient='vertical',\n from_=high, to=low, # range of slider\n # tickmarks on the slider \"axis\":\n tickinterval=(high-low)/5.0,\n # the steps of the counter above the slider:\n resolution=(high-low)/100.0,\n label=label, # label printed above the slider\n length=300, # length of slider in pixels\n variable=variable) # slider value is tied to variable\n widget.pack(side='right')\n return widget" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
green slider event handler
def callback_green(*args): global green_int col = "darkgreen" str_val = str(g_slide_val.get()) green_int = code_shrtn(str_val, 100, 30, 60, 80, col) update_display(red_int, green_int, blue_int)
[ "def callback_blue(*args):\n global blue_int\n col = \"blue\"\n str_val = str(b_slide_val.get())\n blue_int = code_shrtn(str_val, 180, 30, 60, 80, col)\n update_display(red_int, green_int, blue_int)", "def callback_red(*args):\n global red_int\n col = \"red\"\n str_val = str(r_slide_val.get())\n red_int = code_shrtn(str_val, 20, 30, 60, 80, col)\n update_display(red_int, green_int, blue_int)", "def _on_slider_pressed(self):\n # This flag will activate fast_draw_slice_at_index\n # Which will redraw sliced images quickly\n self._slider_flag = True", "def slider_released(self):\n self.update_status(\"status\", \"Length Updated\")", "def slider(self, size=(300, 30, 3), spoint=50, scolor=(100, 55, 100)):\n swidth=10\n #swidth=int(5/50*spoint)\n #swidth = np.clip(swidth, 5, spoint)\n swindow=np.zeros(self.size).astype(np.uint8)\n swindow[:self.ssize[0], 0:self.ssize[1]] += np.uint8([255, 255, 255]) \n r1 = np.clip(spoint-swidth, swidth, self.ssize[0]-swidth)\n r2 = np.clip(spoint+swidth, swidth, self.ssize[0]-swidth)\n spoint = int(10/50 * spoint)\n #print(r1, r2, spoint)\n \n \n swindow[r1:r2, :self.ssize[1]] = scolor\n self.point=(spoint, self.point[1])\n #cv2.imshow(\"slider\", swindow.astype(np.uint8))\n return swindow.astype(np.uint8)", "def update_slider(event):\n \n global interval\n interval = round(event)", "def on_valueSlider_valueChanged(self):\n self.changeSlider = False\n if self.changeCurrspin: self.currspin.setValue(self.getSliderValue())\n self.changeSlider = True", "def slider_changed(self, indx):\n if indx == 0 or indx == 1:\n self.combo_box_changed(tab_id=self.tab_index, combo_id=\"0_1\")\n\n # Change the filtered image after changing the noisy image\n if (self.img0_noisy.image is not None) and self.combo_filter.currentIndex() != 0:\n self.combo_box_changed(tab_id=self.tab_index, combo_id=\"0_2\")\n\n elif indx == 2 or indx == 3:\n self.combo_box_changed(tab_id=self.tab_index, combo_id=\"0_2\")\n\n # Test Images Slider\n elif indx == 4:\n pass", "def sliderHSV(self, client):\r\n\r\n def __nothing():\r\n pass\r\n\r\n windowName = \"slider for ball detection\"\r\n cv2.namedWindow(windowName)\r\n cv2.createTrackbar(\"minS1\", windowName, 43, 60, __nothing)\r\n cv2.createTrackbar(\"minV1\", windowName, 46, 65, __nothing)\r\n cv2.createTrackbar(\"maxH1\", windowName, 10, 20, __nothing)\r\n cv2.createTrackbar(\"minH2\", windowName, 156, 175, __nothing)\r\n while 1:\r\n self.updateFrame(client)\r\n minS1 = cv2.getTrackbarPos(\"minS1\", windowName)\r\n minV1 = cv2.getTrackbarPos(\"minV1\", windowName)\r\n maxH1 = cv2.getTrackbarPos(\"maxH1\", windowName)\r\n minH2 = cv2.getTrackbarPos(\"minH2\", windowName)\r\n minHSV1 = np.array([0, minS1, minV1])\r\n maxHSV1 = np.array([maxH1, 255, 255])\r\n minHSV2 = np.array([minH2, minS1, minV1])\r\n maxHSV2 = np.array([180, 255, 255])\r\n self.updateBallData(client, colorSpace=\"HSV\", minHSV1=minHSV1,\r\n maxHSV1=maxHSV1, minHSV2=minHSV2,\r\n maxHSV2=maxHSV2, saveFrameBin=True, fitting=True)\r\n cv2.imshow(windowName, self._frameBin)\r\n self.showBallPosition()\r\n k = cv2.waitKey(10) & 0xFF\r\n if k == 27:\r\n break\r\n cv2.destroyAllWindows()", "def _on_slider_released(self):\n # This flag will deactivate fast_draw_slice_at_index\n self._slider_flag = False\n\n index = self._slice_slider.value()\n cube_views = self._cv_layout.cube_views\n active_cube = self._cv_layout._active_cube\n active_widget = active_cube._widget\n\n # If the active widget is synced then we need to update the image\n # in all the other synced views.\n if active_widget.synced and not self._cv_layout._single_viewer_mode:\n for view in cube_views:\n if view._widget.synced:\n view._widget.update_slice_index(index)\n self._cv_layout.synced_index = index\n else:\n # Update the image displayed in the slice in the active view\n active_widget.update_slice_index(index)\n\n # Now update the slice and wavelength text boxes\n self._update_slice_textboxes(index)\n\n specviz_dispatch.changed_dispersion_position.emit(pos=index)", "def handle_mouse_press(self, event):\r\n\r\n self._color_index = (self._color_index + 1) % len(self._colors)\r\n self._color = self._colors[self._color_index]\r\n self._circle.set_fill_color(self._color)\r\n self._circle.set_border_color(self._color)", "def OnSetHue(self, evt=None):\n\t\t#self.hue = self.hueSlider.GetValue() * 2\n\t\t#print( 'new hue: {}'.format( self.hue ) )\n\t\t#if( self.SetHueCallback ):\n\t\t\t#self.SetHueCallback( self.hue )\n\t\tself._OnSet( self.hue, 'hue', self.hueSlider, self.SetHueCallback )", "def _updateValue(self,event):\n self.gain.set(self.slider.get())\n self.value.set(str(self._Gain(self.gain.get()))+\" dB\")\n self.valueLbl.update()", "def min_slider_changed_handler(self):\n if self.min_slider.value() > self.max_slider.value():\n self.min_slider.setValue(self.max_slider.value())\n self.cs.set_min(self.min_slider.value())\n self.update_test_image()", "def on_rb_colour_click(self, event):\n del event\n print(self.colours[self.colour_box.GetSelection()])", "def valueSlider_doubleClicked(self, *args, **kwargs):\n print \"double!!!\"\n self.setSliderValue(self.defaultValue)\n return QWidget.mouseDoubleClickEvent(self.ui.valueSlider, *args, **kwargs)", "def fig_callback(self, event):\n global multi\n ax=event.inaxes\n cal=self.calibrator.activegamma\n \n #if self.selecting: return\n \n for source in cal:\n axes=self.figures[source]['axes']\n if ax in axes:\n currentaxes=axes\n currentfigure=self.figures[source]['figure']\n \n self.multi=MultiCursor(currentfigure.canvas, currentaxes, color='r', lw=1.5,\n horizOn=False, vertOn=True, useblit=False)", "def on_range_update(event):\n label = event.currentTarget.nextElementSibling\n label.innerText = event.currentTarget.value\n plot_waveform()", "def test_small_change(self):\n self.assertEqual(self.slider.small_change(), 0.1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
blue slider event handler
def callback_blue(*args): global blue_int col = "blue" str_val = str(b_slide_val.get()) blue_int = code_shrtn(str_val, 180, 30, 60, 80, col) update_display(red_int, green_int, blue_int)
[ "def slider_released(self):\n self.update_status(\"status\", \"Length Updated\")", "def _on_slider_pressed(self):\n # This flag will activate fast_draw_slice_at_index\n # Which will redraw sliced images quickly\n self._slider_flag = True", "def slider_changed(self, indx):\n if indx == 0 or indx == 1:\n self.combo_box_changed(tab_id=self.tab_index, combo_id=\"0_1\")\n\n # Change the filtered image after changing the noisy image\n if (self.img0_noisy.image is not None) and self.combo_filter.currentIndex() != 0:\n self.combo_box_changed(tab_id=self.tab_index, combo_id=\"0_2\")\n\n elif indx == 2 or indx == 3:\n self.combo_box_changed(tab_id=self.tab_index, combo_id=\"0_2\")\n\n # Test Images Slider\n elif indx == 4:\n pass", "def callback_green(*args):\n global green_int\n col = \"darkgreen\"\n str_val = str(g_slide_val.get())\n green_int = code_shrtn(str_val, 100, 30, 60, 80, col)\n update_display(red_int, green_int, blue_int)", "def _on_slider_released(self):\n # This flag will deactivate fast_draw_slice_at_index\n self._slider_flag = False\n\n index = self._slice_slider.value()\n cube_views = self._cv_layout.cube_views\n active_cube = self._cv_layout._active_cube\n active_widget = active_cube._widget\n\n # If the active widget is synced then we need to update the image\n # in all the other synced views.\n if active_widget.synced and not self._cv_layout._single_viewer_mode:\n for view in cube_views:\n if view._widget.synced:\n view._widget.update_slice_index(index)\n self._cv_layout.synced_index = index\n else:\n # Update the image displayed in the slice in the active view\n active_widget.update_slice_index(index)\n\n # Now update the slice and wavelength text boxes\n self._update_slice_textboxes(index)\n\n specviz_dispatch.changed_dispersion_position.emit(pos=index)", "def on_valueSlider_valueChanged(self):\n self.changeSlider = False\n if self.changeCurrspin: self.currspin.setValue(self.getSliderValue())\n self.changeSlider = True", "def callback_red(*args):\n global red_int\n col = \"red\"\n str_val = str(r_slide_val.get())\n red_int = code_shrtn(str_val, 20, 30, 60, 80, col)\n update_display(red_int, green_int, blue_int)", "def update_slider(event):\n \n global interval\n interval = round(event)", "def slider(self, size=(300, 30, 3), spoint=50, scolor=(100, 55, 100)):\n swidth=10\n #swidth=int(5/50*spoint)\n #swidth = np.clip(swidth, 5, spoint)\n swindow=np.zeros(self.size).astype(np.uint8)\n swindow[:self.ssize[0], 0:self.ssize[1]] += np.uint8([255, 255, 255]) \n r1 = np.clip(spoint-swidth, swidth, self.ssize[0]-swidth)\n r2 = np.clip(spoint+swidth, swidth, self.ssize[0]-swidth)\n spoint = int(10/50 * spoint)\n #print(r1, r2, spoint)\n \n \n swindow[r1:r2, :self.ssize[1]] = scolor\n self.point=(spoint, self.point[1])\n #cv2.imshow(\"slider\", swindow.astype(np.uint8))\n return swindow.astype(np.uint8)", "def sliderHSV(self, client):\r\n\r\n def __nothing():\r\n pass\r\n\r\n windowName = \"slider for ball detection\"\r\n cv2.namedWindow(windowName)\r\n cv2.createTrackbar(\"minS1\", windowName, 43, 60, __nothing)\r\n cv2.createTrackbar(\"minV1\", windowName, 46, 65, __nothing)\r\n cv2.createTrackbar(\"maxH1\", windowName, 10, 20, __nothing)\r\n cv2.createTrackbar(\"minH2\", windowName, 156, 175, __nothing)\r\n while 1:\r\n self.updateFrame(client)\r\n minS1 = cv2.getTrackbarPos(\"minS1\", windowName)\r\n minV1 = cv2.getTrackbarPos(\"minV1\", windowName)\r\n maxH1 = cv2.getTrackbarPos(\"maxH1\", windowName)\r\n minH2 = cv2.getTrackbarPos(\"minH2\", windowName)\r\n minHSV1 = np.array([0, minS1, minV1])\r\n maxHSV1 = np.array([maxH1, 255, 255])\r\n minHSV2 = np.array([minH2, minS1, minV1])\r\n maxHSV2 = np.array([180, 255, 255])\r\n self.updateBallData(client, colorSpace=\"HSV\", minHSV1=minHSV1,\r\n maxHSV1=maxHSV1, minHSV2=minHSV2,\r\n maxHSV2=maxHSV2, saveFrameBin=True, fitting=True)\r\n cv2.imshow(windowName, self._frameBin)\r\n self.showBallPosition()\r\n k = cv2.waitKey(10) & 0xFF\r\n if k == 27:\r\n break\r\n cv2.destroyAllWindows()", "def test_manipulate_sliders(self):\n self.manipulate.toggle()\n self.manipulate.focus_slider(\"bri\")\n self.manipulate.change_slider(True, False)\n received_value = self.manipulate.scale_bri.get_value()\n self.assertEqual(received_value, -1)\n self.manipulate.focus_slider(\"con\")\n self.assertTrue(self.manipulate.scale_con.is_focus())\n self.manipulate.focus_slider(\"sha\")\n self.assertTrue(self.manipulate.scale_sha.is_focus())\n self.manipulate.focus_slider(\"bri\")\n self.assertTrue(self.manipulate.scale_bri.is_focus())\n self.manipulate.button_clicked(None, False)", "def slider_disconnect(self):\n self.sender().valueChanged.disconnect()", "def valueSlider_doubleClicked(self, *args, **kwargs):\n print \"double!!!\"\n self.setSliderValue(self.defaultValue)\n return QWidget.mouseDoubleClickEvent(self.ui.valueSlider, *args, **kwargs)", "def on_slider(self, slider_index):\n self.slider_has_moved = True\n slider_pos = int(slider_index * self.video_len / self.slider_ticks)\n if abs(slider_pos - self.current_pos()) > 1:\n self.set_frame(slider_pos)\n self.show_frame(self.get_frame())\n self.frame_num = self.current_pos()\n self.slider_num = slider_index", "def slider(self, parent, variable, low, high, label):\n widget = Scale(parent, orient='vertical',\n from_=high, to=low, # range of slider\n # tickmarks on the slider \"axis\":\n tickinterval=(high-low)/5.0,\n # the steps of the counter above the slider:\n resolution=(high-low)/100.0,\n label=label, # label printed above the slider\n length=300, # length of slider in pixels\n variable=variable) # slider value is tied to variable\n widget.pack(side='right')\n return widget", "def on_range_update(event):\n label = event.currentTarget.nextElementSibling\n label.innerText = event.currentTarget.value\n plot_waveform()", "def updateButtonCallback(self):\n\n\t\t#if no signal is imported, do nothing\n\t\tif not self.signalImported:\n\t\t\tlogger.logData(source = \"Signal handler\",priority=\"WARN\",msgType=\"Update failed\",msgData=())\n\n\t\t\treturn\n\n\t\t#clip all channel samples corresponding to the times on slider\n\t\tself.signal = self.signal[self.sweepStartSample:self.sweepEndSample,:]\n\n\t\tlogger.logData(source = \"Signal handler\",priority=\"INFO\",msgType=\"Update signal\",msgData=())\n\n\t\t#update variables and plot with clipped signal\n\t\tself.analyzeSignal()", "def _updateValue(self,event):\n self.gain.set(self.slider.get())\n self.value.set(str(self._Gain(self.gain.get()))+\" dB\")\n self.valueLbl.update()", "def fig_callback(self, event):\n global multi\n ax=event.inaxes\n cal=self.calibrator.activegamma\n \n #if self.selecting: return\n \n for source in cal:\n axes=self.figures[source]['axes']\n if ax in axes:\n currentaxes=axes\n currentfigure=self.figures[source]['figure']\n \n self.multi=MultiCursor(currentfigure.canvas, currentaxes, color='r', lw=1.5,\n horizOn=False, vertOn=True, useblit=False)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new object from meta information Id should be a unique number meta an array of length 2 containing constructor name and parameters dictionary
def createFromMeta(self, Id, meta): if len(meta) == 3: ctorname, props, alias = meta elif len(meta) == 2: ctorname, props = meta alias = None else: assert 'wrong meta' ctor = _findType(ctorname) if ctorname == 'marketsim.Side._SellSide': obj = Side.Sell elif ctorname == 'marketsim.Side._BuySide': obj = Side.Buy elif inspect.isclass(ctor): dst_properties = rtti.properties_t(ctor) converted = dict() for k,v in props.iteritems(): converted[k] = self._convert(dst_properties, k, v) obj = ctor(**converted) if alias is not None: obj._alias = alias else: assert inspect.isfunction(ctor) obj = ctor self._insertNew(Id, obj) return obj
[ "def create(cls, **kwargs):", "def do_create(self, arg):\n if len(arg) <= 0:\n print(\"** class name missing **\")\n else:\n arg = arg.split()[0]\n if arg in self.valid_class:\n new_obj = eval(arg)()\n new_obj.save()\n print(new_obj.id)\n else:\n print(\"** class doesn't exist **\")", "def do_create(self, arg):\n \"\"\"\n method that creates a new instance of a class, saves it\n (to the JSON file) and prints the id. Ex: $ create BaseModel\n \"\"\"\n if not arg:\n print(\"** class name missing **\")\n elif arg not in HBNBCommand.className.keys():\n print(\"** class doesn't exist **\")\n else:\n obj = HBNBCommand.className[arg]()\n HBNBCommand.className[arg].save(obj)\n print(obj.id)", "def createObject(self, *args): \r\n return self._wrappedClass(*args)", "def create(objPoints, dictionary, ids) -> retval:\n ...", "def make_object(self, data):\n return Client(**data)", "def create_object(self, data):\n return Recipe(**data)", "def create_instance(self, instance: Model, fields: Dict[Field, Any]):", "def create_object(cls, id_object, **kwargs):\n if not cls.factories.has_key(id_object):\n print(\"Not known filter: {}\".format(id_object))\n return None\n # ToDo eval is not safe - anything could be inserted in.\n# cls.factories[id_object] = \\\n# cls.get_class(id_object)\n# print(id_object)\n return cls.factories[id_object](**kwargs)", "def new_object(self, kind, name, initialize, active=True, fit=True, plot=True):\n\n App.log.debug(\"new_object()\")\n\n ### Check for existing name\n if name in self.collection.get_names():\n ## Create a new name\n # Ends with number?\n App.log.debug(\"new_object(): Object name exists, changing.\")\n match = re.search(r'(.*[^\\d])?(\\d+)$', name)\n if match: # Yes: Increment the number!\n base = match.group(1) or ''\n num = int(match.group(2))\n name = base + str(num + 1)\n else: # No: add a number!\n name += \"_1\"\n\n # Create object\n classdict = {\n \"gerber\": FlatCAMGerber,\n \"excellon\": FlatCAMExcellon,\n \"cncjob\": FlatCAMCNCjob,\n \"geometry\": FlatCAMGeometry\n }\n\n App.log.debug(\"Calling object constructor...\")\n obj = classdict[kind](name)\n obj.units = self.options[\"units\"] # TODO: The constructor should look at defaults.\n\n # Set default options from self.options\n for option in self.options:\n if option.find(kind + \"_\") == 0:\n oname = option[len(kind)+1:]\n obj.options[oname] = self.options[option]\n\n # Initialize as per user request\n # User must take care to implement initialize\n # in a thread-safe way as is is likely that we\n # have been invoked in a separate thread.\n initialize(obj, self)\n\n # Check units and convert if necessary\n if self.options[\"units\"].upper() != obj.units.upper():\n self.inform.emit(\"Converting units to \" + self.options[\"units\"] + \".\")\n obj.convert_units(self.options[\"units\"])\n\n FlatCAMApp.App.log.debug(\"Moving new object back to main thread.\")\n\n # Move the object to the main thread and let the app know that it is available.\n obj.moveToThread(QtGui.QApplication.instance().thread())\n self.object_created.emit(obj)\n\n return obj", "def create_instance(self, name: str, **kwargs) -> RuntimeInstance.Params:", "def __init__(self, obj):\n self.obj = obj\n if isinstance(obj, VersionData):\n self.id = obj.id\n elif isinstance(obj, str):\n self.id = self.from_string(obj)\n elif isinstance(obj, Version):\n self.id = self.from_packaging_version(version=obj)\n elif isinstance(obj, SpecifierSet):\n self.id = self.from_specifier_set(version=obj)\n else:\n self.id = str(obj)", "def __init__(self,\n *,\n asset_id: str = None,\n asset_type: str = None,\n catalog_id: str = None,\n create_time: datetime = None,\n creator_id: str = None,\n description: str = None,\n href: str = None,\n name: str = None,\n origin_country: str = None,\n project_id: str = None,\n resource_key: str = None,\n size: int = None,\n source_system: dict = None,\n tags: List[str] = None,\n usage: 'AssetSystemMetadataUsage' = None) -> None:\n self.asset_id = asset_id\n self.asset_type = asset_type\n self.catalog_id = catalog_id\n self.create_time = create_time\n self.creator_id = creator_id\n self.description = description\n self.href = href\n self.name = name\n self.origin_country = origin_country\n self.project_id = project_id\n self.resource_key = resource_key\n self.size = size\n self.source_system = source_system\n self.tags = tags\n self.usage = usage", "def create_instance(self, \n qid: str, additional_keys: Dict[str, Union[str, int]]={}, **targets) -> Instance:\n instance = Instance(qid=qid, vid=0, additional_keys=additional_keys)\n instance.set_entries(**targets)\n return instance", "def _create(cls, model_class, *args, **kwargs):\n return model_class(*args, **kwargs)", "def manage_addZClass(self, id, title='', baseclasses=[],\n meta_type='', CreateAFactory=0, REQUEST=None,\n zope_object=0):\n if bad_id(id) is not None:\n raise 'Bad Request', (\n 'The id %s is invalid as a class name.' % id)\n if not meta_type: meta_type=id\n\n r={}\n for data in self.aq_acquire('_getProductRegistryData')('zclasses'):\n r['%(product)s/%(id)s' % data]=data['meta_class']\n\n bases=[]\n for b in baseclasses:\n if Products.meta_classes.has_key(b):\n bases.append(Products.meta_classes[b])\n elif r.has_key(b):\n bases.append(r[b])\n else:\n raise 'Invalid class', b\n\n Z=ZClass(id, title, bases, zope_object=zope_object)\n Z._zclass_.meta_type=meta_type\n self._setObject(id, Z)\n\n if CreateAFactory and meta_type:\n self.manage_addDTMLMethod(\n id+'_addForm',\n id+' constructor input form',\n addFormDefault % {'id': id, 'meta_type': meta_type},\n )\n constScript = PythonScript(id+'_add')\n constScript.write(addDefault % {'id': id, 'title':id+' constructor'})\n self._setObject(constScript.getId(), constScript)\n self.manage_addPermission(\n id+'_add_permission',\n id+' constructor permission',\n 'Add %ss' % meta_type\n )\n self.manage_addPrincipiaFactory(\n id+'_factory',\n id+' factory',\n meta_type,\n id+'_addForm',\n 'Add %ss' % meta_type\n )\n\n Z=self._getOb(id)\n Z.propertysheets.permissions.manage_edit(\n selected=['Add %ss' % id])\n Z.manage_setPermissionMapping(\n permission_names=['Create class instances'],\n class_permissions=['Add %ss' % meta_type]\n )\n if REQUEST is not None:\n return self.manage_main(self,REQUEST, update_menu=1)", "def __init__(self, *args):\n this = _Field.new_Material(*args)\n try: self.this.append(this)\n except: self.this = this", "def read_constructor(is_private, datatype, tokens):\n args = parse_args(tokens)\n body = parse_body(tokens)\n return {'op': 'constructor',\n 'name': datatype,\n 'args': args,\n 'body': body,\n 'private': is_private }", "def __init__(self, id, manufacturer, model, year, price, wheel_size, category, spec1, spec2, spec3):\n\n self._id = id\n self._manufacturer = manufacturer\n self._model = model\n self._year = year\n self._price = price\n self._wheel_size = wheel_size\n self._type = category\n self._spec1 = spec1\n self._spec2 = spec2\n self._spec3 = spec3\n self.bike = [id, manufacturer, model, year, price, wheel_size, category, spec1, spec2, spec3]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Processes an object with id = k_id
def visit(k_id): if k_id not in rv: # check that it hasn't been yet processed dumped = self.tojson(k_id) # getting dump representation rv[k_id] = dumped # storing it in the dictionary for p in dumped[1].itervalues(): # iterating its fields visit_if_ref(p)
[ "def visit(k_id):\r\n if k_id not in rv: # check that it hasn't been yet processed\r\n dumped = self.dump(k_id) # getting dump representation\r\n rv[k_id] = dumped # storing it in the dictionary\r\n if len(dumped) > 1: # if it has properties\r\n for _,p in dumped[1].itervalues(): # iterating its fields\r\n visit_if_ref(p)\r\n if type(p) is list: # if a field is list (other sequences are to be processed in future)\r\n for e in p: # for each its element\r\n visit_if_ref(e)", "def get_vineyard_object_id(self, chunk_key):\n raise NotImplementedError", "def update(self, obj, id):", "def test_patch_obj_id_get(self):\n pass", "def run_kf_id_query(ctx, kf_id, key):\n host = ctx.obj[\"host\"]\n for e in yield_entities_from_kfids(host, [kf_id], show_progress=False):\n entity_handler(e, key)", "def process(self, obj):\n if isinstance(obj, np.ndarray):\n if obj.dtype == self.dtype and obj.size >= self.minlen:\n sid = self.objtosid.get(id(obj))\n if sid is None:\n sid = self.uid+str(self.id)\n self.sidtoobj[sid] = obj\n self.objtosid[id(obj)]= sid\n self.sids.append(sid)\n self.shapes.append(obj.shape)\n self.id+=1\n return sid \n return None if self.nexthandler is None else self.nexthandler.process(obj)", "def test_patch_obj_id_put(self):\n pass", "def object_id(obj):\n if isinstance(obj,ObjectWrapper):\n return obj.id()\n else:\n return id(obj)", "def do_stuff(self, pk):", "def get_object_key(obj: rhp.RoomObject):\n return ObjectKey(obj.clean_category, obj.center)", "def _id(self, _id):\n self.__id = _id", "def id(obj):\n try:\n return key(obj).id_or_name()\n except AttributeError:\n return obj", "def register_object( self, obj ):\n obj_id = id( obj )\n self.objects[ obj_id ] = obj\n return obj_id", "def parser(self, id):\n if not isinstance(id, str) or not re.match('^[0-9a-fA-F]{24}$', id):\n raise ValueError('objectid is 12 bytes hex str.')\n self.timestamp = int(id[:8], 16)\n self.host = id[8:14]\n self.pid = id[14:18]\n self.count = int(id[18:24], 16)", "def removeidfobject(self, idfobject):\n key = idfobject.key.upper()\n self.idfobjects[key].remove(idfobject)", "def fetch( self, obj, id ):\n\t\treturn obj.ById( id )", "def get_object_by_id(self,id):\n return self.objects[id]", "def getobject(self, key, name):\n return getobject(self.idfobjects, key, name)", "def process_docker_image_id(self, param_imageid):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Processes an object with id = k_id
def visit(k_id): if k_id not in rv: # check that it hasn't been yet processed dumped = self.dump(k_id) # getting dump representation rv[k_id] = dumped # storing it in the dictionary if len(dumped) > 1: # if it has properties for _,p in dumped[1].itervalues(): # iterating its fields visit_if_ref(p) if type(p) is list: # if a field is list (other sequences are to be processed in future) for e in p: # for each its element visit_if_ref(e)
[ "def visit(k_id):\r\n if k_id not in rv: # check that it hasn't been yet processed\r\n dumped = self.tojson(k_id) # getting dump representation\r\n rv[k_id] = dumped # storing it in the dictionary\r\n for p in dumped[1].itervalues(): # iterating its fields\r\n visit_if_ref(p)", "def get_vineyard_object_id(self, chunk_key):\n raise NotImplementedError", "def update(self, obj, id):", "def test_patch_obj_id_get(self):\n pass", "def run_kf_id_query(ctx, kf_id, key):\n host = ctx.obj[\"host\"]\n for e in yield_entities_from_kfids(host, [kf_id], show_progress=False):\n entity_handler(e, key)", "def process(self, obj):\n if isinstance(obj, np.ndarray):\n if obj.dtype == self.dtype and obj.size >= self.minlen:\n sid = self.objtosid.get(id(obj))\n if sid is None:\n sid = self.uid+str(self.id)\n self.sidtoobj[sid] = obj\n self.objtosid[id(obj)]= sid\n self.sids.append(sid)\n self.shapes.append(obj.shape)\n self.id+=1\n return sid \n return None if self.nexthandler is None else self.nexthandler.process(obj)", "def test_patch_obj_id_put(self):\n pass", "def object_id(obj):\n if isinstance(obj,ObjectWrapper):\n return obj.id()\n else:\n return id(obj)", "def do_stuff(self, pk):", "def get_object_key(obj: rhp.RoomObject):\n return ObjectKey(obj.clean_category, obj.center)", "def _id(self, _id):\n self.__id = _id", "def id(obj):\n try:\n return key(obj).id_or_name()\n except AttributeError:\n return obj", "def register_object( self, obj ):\n obj_id = id( obj )\n self.objects[ obj_id ] = obj\n return obj_id", "def parser(self, id):\n if not isinstance(id, str) or not re.match('^[0-9a-fA-F]{24}$', id):\n raise ValueError('objectid is 12 bytes hex str.')\n self.timestamp = int(id[:8], 16)\n self.host = id[8:14]\n self.pid = id[14:18]\n self.count = int(id[18:24], 16)", "def removeidfobject(self, idfobject):\n key = idfobject.key.upper()\n self.idfobjects[key].remove(idfobject)", "def fetch( self, obj, id ):\n\t\treturn obj.ById( id )", "def get_object_by_id(self,id):\n return self.objects[id]", "def getobject(self, key, name):\n return getobject(self.idfobjects, key, name)", "def process_docker_image_id(self, param_imageid):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load gallery image list
def read_gallery_list(self): pass
[ "def db_get_images(galleryname):\n \n return list_of_Img_objects", "def display_gallery():\n\n images = db.session.query(Image).all()\n\n return render_template('all_images.html',\n images=images)", "def test_core_get_gallery_images_v1(self):\n pass", "def loadImgs(self, ids=[]):\n ids = ids if isinstance(ids, list) else [ids]\n ids = self._filterImgIds(ids)\n if len(ids) == 0:\n return []\n images = self.dataset['images']\n return [images[id] for id in ids]", "def load_multiple_images_UI(self):\n path_list = get_filenames_UI()\n if path_list:\n self.load_multiple_images(path_list)", "def get_images(self):\n pass", "def loadImgs(self, ids=[]):\n # log.info(\"-------------------------------->\")\n if type(ids) == list:\n return [self.imgs[x] for x in ids]\n else:\n return [self.imgs[ids]]", "def make_image_list(image_dir):", "def load_multiple_images(self, filepath_list):\n self.image = Image.from_multiples(filepath_list)", "def load():\r\n global main, il\r\n il = wx.ImageList(24, 24, True)\r\n for item in main.items:\r\n for child in item.children:\r\n _loadImage(child)\r\n _loadImage(item)", "def load_img_collection(path):\n image_path_list = join(path, '*.jpg')\n image_list = io.ImageCollection(image_path_list, load_func=load_leaf_img)\n return image_list", "def test_core_get_gallery_images_scope_v1(self):\n pass", "def load_images(path):\r\n images = []\r\n for file_name in os.listdir(path):\r\n each_image = games.load_image(path + os.sep + file_name).convert()\r\n images.append(each_image)\r\n return images", "def test_core_get_gallery_images_folder_v1(self):\n pass", "def load():\n images = []\n for p in Path(DATASET_PATH).rglob('*' + PNG):\n images.append(str(p))\n return images", "def display_images(cls):\n cls.objects.all()", "def refresh_photolist(self):\n\n app = App.get_running_app()\n\n #Get photo list\n self.photos = []\n if self.type == 'Album':\n self.folder_title = 'Album: \"'+self.target+'\"'\n for albuminfo in app.albums:\n if albuminfo['name'] == self.target:\n photo_paths = albuminfo['photos']\n for fullpath in photo_paths:\n photoinfo = app.Photo.exist(fullpath)\n if photoinfo:\n self.photos.append(photoinfo)\n elif self.type == 'Tag':\n self.folder_title = 'Tagged As: \"'+self.target+'\"'\n self.photos = app.Tag.photos(self.target)\n else:\n self.folder_title = 'Folder: \"'+self.target+'\"'\n self.photo = app.session.query(Photo).filter_by(id=self.target).first()\n self.photos = self.photo.folder.photos\n\n #Sort photos\n if self.sort_method == 'Imported':\n sorted_photos = sorted(self.photos, key=lambda x: x.import_date, reverse=self.sort_reverse)\n elif self.sort_method == 'Modified':\n sorted_photos = sorted(self.photos, key=lambda x: x.modify_date, reverse=self.sort_reverse)\n elif self.sort_method == 'Owner':\n sorted_photos = sorted(self.photos, key=lambda x: x.owner, reverse=self.sort_reverse)\n elif self.sort_method == 'Name':\n sorted_photos = sorted(self.photos, key=lambda x: os.original_file, reverse=self.sort_reverse)\n else:\n sorted_photos = sorted(self.photos, key=lambda x: x.original_date, reverse=self.sort_reverse)\n self.photos = sorted_photos", "def get_images(ibs, gid_list):\n gpath_list = ibs.get_image_paths(gid_list)\n image_list = [gtool.imread(gpath) for gpath in gpath_list]\n return image_list", "def loadImageList(self):\n listFiles = os.listdir(self.data_path)\n exclude_files = ['.DS_Store', 'conf.json', 'README.txt']\n listNames = [f for f in listFiles if f not in exclude_files]\n nListNames = range(1,len(listNames)+1)\n dictImgNames = dict(zip(nListNames, listNames))\n return dictImgNames" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
VERY primitive implementation of NewtonRaphson method. Assumes guess is close to final results e always performs 100 iterations.
def newton(P, x0, niter, paramn): """ Needs improvement """ # Find a new initial guess closer to the desired pressure # m = -(paramn[1]/paramn[0]) # guess = (P/m) + x0 x = x0 - 5 while vinet(x, paramn[0], paramn[1], paramn[2]) < P: x0 = x0 - 5 x = x0 guess = 0.5 * (x + x0) for i in range(1, niter): x = guess - (vinet(guess, paramn[0], paramn[1], paramn[2]) - P) / dvinet(guess, paramn) guess = x return x
[ "def newton(x):\r\n\r\n # Initialize the tolerance and estimate\r\n tolerance = 0.000001\r\n estimate = 1.0\r\n\r\n # Perform the successive approximations\r\n while True:\r\n estimate = (estimate + x / estimate) / 2\r\n difference = abs(x - estimate ** 2)\r\n if difference <= tolerance:\r\n break\r\n return estimate", "def newton_method(self, x_0):\n\n self.error_a = 101\n i = 0\n while i < self.n_iter and self.exp_err < self.error_a:\n\n x_1 = x_0-self.function(x_0)/self.function_prime(x_0)\n\n if x_1 != 0:\n self.error_a = error_formula(x_0, x_1)\n\n x_0 = x_1\n i += 1\n\n print(\"Current iteration for newton method is %d, with root value of %.4f\" %(i, x_0))\n print(\"Final result using the newton method is %.4f with %d iterations\" %(x_0, i))", "def newtonraphson_method(f, x0, epsilon=10**-4, nMax=100):\n n = 1\n f_ = derive(f)\n while n <= nMax:\n if (f_(x0)==0):\n print(\"Error!, division by zero.\")\n return\n x1 = x0 - (f(x0) / f_(x0))\n print(\"x0: {}, x1: {}\".format(x0, x1))\n if (x1-x0<epsilon):\n print(\"\\nThe root is: {}\".format(x1))\n return x1\n else:\n x0=x1\n return False", "def _newton_rhaphson(self, X, T, E, initial_beta=None, step_size=1.,\n precision=10e-5, show_progress=True, include_likelihood=False):\n assert precision <= 1., \"precision must be less than or equal to 1.\"\n n, d = X.shape\n\n # Want as bools\n E = E.astype(bool)\n\n # make sure betas are correct size.\n if initial_beta is not None:\n assert initial_beta.shape == (d, 1)\n beta = initial_beta\n else:\n beta = np.zeros((d, 1))\n\n # Method of choice is just efron right now\n if self.tie_method == 'Efron':\n get_gradients = self._get_efron_values\n else:\n raise NotImplementedError(\"Only Efron is available.\")\n\n i = 1\n converging = True\n # 50 iterations steps with N-R is a lot.\n # Expected convergence is ~10 steps\n while converging and i < 50 and step_size > 0.001:\n\n if self.strata is None:\n output = get_gradients(X.values, beta, T.values, E.values, include_likelihood=include_likelihood)\n h, g = output[:2]\n else:\n g = np.zeros_like(beta).T\n h = np.zeros((beta.shape[0], beta.shape[0]))\n ll = 0\n for strata in np.unique(X.index):\n stratified_X, stratified_T, stratified_E = X.loc[[strata]], T.loc[[strata]], E.loc[[strata]]\n output = get_gradients(stratified_X.values, beta, stratified_T.values, stratified_E.values, include_likelihood=include_likelihood)\n _h, _g = output[:2]\n g += _g\n h += _h\n ll += output[2] if include_likelihood else 0\n\n if self.penalizer > 0:\n # add the gradient and hessian of the l2 term\n g -= self.penalizer * beta.T\n h.flat[::d + 1] -= self.penalizer\n\n delta = solve(-h, step_size * g.T)\n if np.any(np.isnan(delta)):\n raise ValueError(\"delta contains nan value(s). Convergence halted.\")\n # Only allow small steps\n if norm(delta) > 10:\n step_size *= 0.5\n continue\n\n beta += delta\n # Save these as pending result\n hessian, gradient = h, g\n\n if norm(delta) < precision:\n converging = False\n\n if ((i % 10) == 0) and show_progress:\n print(\"Iteration %d: delta = %.5f\" % (i, norm(delta)))\n i += 1\n\n self._hessian_ = hessian\n self._score_ = gradient\n if include_likelihood:\n self._log_likelihood = output[-1] if self.strata is None else ll\n if show_progress:\n print(\"Convergence completed after %d iterations.\" % (i))\n return beta", "def newton(F, J, x, eps):\n F_value = F(x)\n iteration_counter = 0\n while iteration_counter < 100:\n delta = np.linalg.solve(J(x), -F_value)\n x = x + delta\n F_value = F(x)\n iteration_counter += 1\n\n return x, iteration_counter", "def solve_newton(f, df, x0, epsilon=1E-8, max_iter=100):\n xn = x0\n for n in range(0, max_iter):\n fxn = f(xn)\n if abs(fxn) < epsilon:\n return xn\n dfxn = df(xn)\n if dfxn == 0: # avoid zero derivatives\n xn = xn + 1E-3\n continue\n xn = xn - fxn / dfxn\n return None", "def test_small_iteration():\n assert newton.square_root_with_newton_method(10, 0.5) == 5.0", "def Newtons_Method(func, func_prime, x_0, iters=100, tol=1e-5):\n i = 1\n x_new = x_0-func(x_0)/func_prime(x_0)\n while i < iters:\n x_new = x_0-func(x_0)/func_prime(x_0)\n if abs(x_new-x_0) < tol:\n return x_new\n else:\n x_0 = x_new\n i += 1\n return None", "def newton_raphson_convergence(f, J, U0, N, epsilon):\n i = 0\n U = U0\n nfU = []\n while (i < N and np.linalg.norm(f(U)) > epsilon):\n nfU.append(np.linalg.norm(f(U)))\n V = np.linalg.lstsq(J(U), -f(U))[0]\n U = U + V\n i = i + 1\n return nfU", "def safe_newton(evaluator, lowerbound, upperbound, initial_point,\n u_lower, u_upper, precision, max_iter=100):\n\n if (u_lower > 0 and u_upper > 0) or (u_lower < 0 and u_upper < 0):\n raise ValueError(\n \"Root must be bracketed in [lower bound, upper bound]\")\n if u_lower == 0:\n return lowerbound\n if u_upper == 0:\n return upperbound\n\n if u_lower < 0: # Orient the search so that f(xl) < 0.\n xl, xh = lowerbound, upperbound\n else:\n xh, xl = lowerbound, upperbound\n\n rts = initial_point # Initialize the guess for root\n dxold = abs(upperbound - lowerbound) # the “stepsize before last\"\n dx = dxold # and the last step\n\n u, newton = evaluator(rts)\n obj = [u]\n\n for _ in np.arange(max_iter): # Loop over allowed iterations.\n\n rts -= newton # Newton step\n if (rts - xh) * (rts - xl) <= 0 and abs(newton) <= abs(dxold) / 2:\n # Keep the Newton step if it remains in the bracket and\n # if it is converging fast enough.\n # This will be false if fdf is NaN.\n dxold = dx\n dx = newton\n\n else: # Bisection otherwise\n dxold = dx\n dx = (xh - xl) / 2\n rts = xl + dx\n\n if abs(dx) < precision: # Convergence criterion.\n return rts, obj\n u, newton = evaluator(rts)\n # the one new function evaluation per iteration\n obj.append(u)\n\n if u < 0: # maintain the bracket on the root\n xl = rts\n else:\n xh = rts\n\n raise RuntimeError(\"Maximum number of iterations exceeded in safe_newton\")", "def _newton_rhaphson(self, X, T, E, weights=None, initial_beta=None, step_size=None,\n precision=10e-6, show_progress=True, max_steps=50):\n self.path = []\n assert precision <= 1., \"precision must be less than or equal to 1.\"\n n, d = X.shape\n\n # make sure betas are correct size.\n if initial_beta is not None:\n assert initial_beta.shape == (d, 1)\n beta = initial_beta\n else:\n beta = np.zeros((d, 1))\n\n step_sizer = StepSizer(step_size)\n step_size = step_sizer.next()\n\n # Method of choice is just efron right now\n if self.tie_method == 'Efron':\n get_gradients = self._get_efron_values\n else:\n raise NotImplementedError(\"Only Efron is available.\")\n\n i = 0\n converging = True\n ll, previous_ll = 0, 0\n start = time.time()\n\n while converging:\n self.path.append(beta.copy())\n i += 1\n if self.strata is None:\n h, g, ll = get_gradients(X.values, beta, T.values, E.values, weights.values)\n else:\n g = np.zeros_like(beta).T\n h = np.zeros((beta.shape[0], beta.shape[0]))\n ll = 0\n for strata in np.unique(X.index):\n stratified_X, stratified_T, stratified_E, stratified_W = X.loc[[strata]], T.loc[[strata]], E.loc[[strata]], weights.loc[[strata]]\n _h, _g, _ll = get_gradients(stratified_X.values, beta, stratified_T.values, stratified_E.values, stratified_W.values)\n g += _g\n h += _h\n ll += _ll\n\n if self.penalizer > 0:\n # add the gradient and hessian of the l2 term\n g -= self.penalizer * beta.T\n h.flat[::d + 1] -= self.penalizer\n\n delta = solve(-h, step_size * g.T)\n if np.any(np.isnan(delta)):\n raise ValueError(\"\"\"delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:\nhttps://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model\n\"\"\")\n\n # Save these as pending result\n hessian, gradient = h, g\n\n if show_progress:\n print(\"Iteration %d: norm_delta = %.5f, step_size = %.5f, ll = %.5f, seconds_since_start = %.1f\" % (i, norm(delta), step_size, ll, time.time() - start))\n # convergence criteria\n if norm(delta) < precision:\n converging, completed = False, True\n elif abs(ll - previous_ll) < precision:\n converging, completed = False, True\n elif i >= max_steps:\n # 50 iterations steps with N-R is a lot.\n # Expected convergence is ~10 steps\n converging, completed = False, False\n elif step_size <= 0.00001:\n converging, completed = False, False\n elif abs(ll) < 0.0001 and norm(delta) > 1.0:\n warnings.warn(\"The log-likelihood is getting suspciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. \\\nSee https://stats.idre.ucla.edu/other/mult-pkg/faq/general/faqwhat-is-complete-or-quasi-complete-separation-in-logisticprobit-regression-and-how-do-we-deal-with-them/ \", ConvergenceWarning)\n converging, completed = False, False\n\n step_size = step_sizer.update(norm(delta)).next()\n\n beta += delta\n previous_ll = ll\n\n self._hessian_ = hessian\n self._score_ = gradient\n self._log_likelihood = ll\n\n if show_progress and completed:\n print(\"Convergence completed after %d iterations.\" % (i))\n if not completed:\n warnings.warn(\"Newton-Rhapson failed to converge sufficiently in %d steps.\" % max_steps, ConvergenceWarning)\n\n return beta", "def before_newton_iteration(self):\n pass", "def newton(func, x0, fprime, args=(), tol=1.48e-8, maxiter=50,\n disp=True):\n\n if tol <= 0:\n raise ValueError(\"tol is too small <= 0\")\n if maxiter < 1:\n raise ValueError(\"maxiter must be greater than 0\")\n\n # Convert to float (don't use float(x0); this works also for complex x0)\n p0 = 1.0 * x0\n funcalls = 0\n status = _ECONVERR\n\n # Newton-Raphson method\n for itr in range(maxiter):\n # first evaluate fval\n fval = func(p0, *args)\n funcalls += 1\n # If fval is 0, a root has been found, then terminate\n if fval == 0:\n status = _ECONVERGED\n p = p0\n itr -= 1\n break\n fder = fprime(p0, *args)\n funcalls += 1\n # derivative is zero, not converged\n if fder == 0:\n p = p0\n break\n newton_step = fval / fder\n # Newton step\n p = p0 - newton_step\n if abs(p - p0) < tol:\n status = _ECONVERGED\n break\n p0 = p\n\n if disp and status == _ECONVERR:\n msg = \"Failed to converge\"\n raise RuntimeError(msg)\n\n return _results((p, funcalls, itr + 1, status))", "def test_iteration_zero():\n assert newton.square_root_with_newton_method(25, 0) == 12.5", "def test_function_104(self):\n\t\tself.assertEqual(attempt.newtons_nsteps(10,3), 28.00077)", "def newtonMethod(f, f_prima, x0, tol, limit):\n\tprint(\"Newton\")\n\txi = x0\n\tlistOfResult = []\n\tlistOfResult.append(xi)\n\tfor i in range(0, limit):\n\t\ty = f(xi)\n\t\typ = f_prima(xi)\n\t\tif(abs(yp) < e_mach):\n\t\t\tprint(\"division por cero\")\n\t\t\tbreak\n\t\txii = xi - y/yp\n\t\tif(((xii != 0) and (xi != 0) and (abs(xii - xi) / abs(xii) < tol)) or (abs(xii - xi) == 0)):\n\t\t\tbreak\n\t\txi = xii\n\t\tlistOfResult.append(xi)\n\tprint(\"raiz encontrada: \" + str(xi))\n\tlineal(listOfResult)\n\tsuperLineal(listOfResult)\n\tcuadratico(listOfResult)", "def _icbrt_newton(n, s):\n d = n // (s * s)\n a = s + (d - s) // 3\n # Early return covering most of the cases where ``s`` is already the\n # correct answer.\n if a == s:\n return a\n\n while True:\n d = n // (a * a)\n if d >= a:\n return a\n a += (d - a) // 3", "def newthonModifiedMethod(f, f_prima, x0, m, tol, limit):\n\tprint(\"Newton Modificado\")\n\txi = x0\n\tlistOfResult = []\n\tlistOfResult.append(xi)\n\tfor i in range(0, limit):\n\t\ty = m * f(xi)\n\t\typ = f_prima(xi)\n\t\tif(abs(yp) < e_mach):\n\t\t\tprint(\"division por cero\")\n\t\t\tbreak\n\t\txii = xi - y/yp\n\t\tif(((xii != 0) and (xi != 0) and (abs(xii - xi) / abs(xii) < tol)) or (abs(xii - xi) == 0)):\n\t\t\tbreak\n\t\txi = xii\n\t\tlistOfResult.append(xi)\n\tprint(\"raiz encontrada: \" + str(xi))\n\tlineal(listOfResult)\n\tsuperLineal(listOfResult)\n\tcuadratico(listOfResult)\n\treturn xi", "def newton1d(f, df, ddf, x, niter=10):\n#raise NotImplementedError(\"Problem 3 Incomplete\")\n if np.isclose(df(x),0) or niter == 0:\n return x\n elif np.isclose(ddf(x),0) :\n raise ValueError(\"Division by zero occured.\")\n else :\n return newton1d(f,df,ddf,x-df(x)/float(ddf(x)),niter-1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configures the Celery APP for CPU, GPU, MPI mode.
def create_celery_app() -> Celery: bootmode = BootMode.CPU if start_as_mpi_node(): bootmode = BootMode.MPI elif config.FORCE_START_CPU_MODE: bootmode = BootMode.CPU elif config.FORCE_START_GPU_MODE or is_gpu_node(): bootmode = BootMode.GPU return configure_node(bootmode)
[ "def setup_app():\n cfg = get_config()\n print(cfg)\n backend = cfg['backend']\n broker = cfg['broker']\n app = Celery('nlp_server', broker=broker, backend=backend)\n\n if cfg.get('queues'):\n queue_list = []\n for queue in cfg.get('queues'):\n q = Queue(queue.get('name'), Exchange(queue.get('exchange')), routing_key=queue.get('routing_key'))\n queue_list.append(q)\n app.conf.task_queues = tuple(queue_list)\n\n if cfg.get('routing_key'):\n app.conf.task_default_routing_key = cfg.get('routing_key')\n\n if cfg.get('exchange'):\n app.conf.task_default_exchange = cfg.get('exchange')\n\n if cfg.get('update'):\n app.conf.update(cfg.get('update'))\n\n if cfg.get('task_serializer'):\n app.conf.task_serializer = cfg.get('task_serializer')\n\n if cfg.get('result_serializer'):\n app.conf.result_serializer = cfg.get('result_serializer')\n\n if cfg.get('accept_content'):\n app.conf.accept_content = cfg.get('accept_content')\n\n if cfg.get('worker_prefetch_multiplier'):\n app.conf.worker_prefetch_multiplier = int(\n cfg.get('worker_prefetch_multiplier'))\n return app", "def init_app(self, app):\n # Instantiate celery and read config\n super(Celery, self).__init__(app.name,\n broker=app.config['CELERY_BROKER_URL'])\n # Update the config\n self.conf.update(app.config)", "def init_celery(celery, app):\n celery.conf.update(app.config)\n\n class ContextTask(celery.Task):\n def __call__(self, *args, **kwargs):\n with app.app_context():\n return self.run(*args, **kwargs)\n\n celery.Task = ContextTask", "def init(with_result_backend=False):\n # Avoid exception on CLI run.\n from celery import Celery\n\n conf = {\"broker_url\": os.environ[\"BROKER_URL\"]}\n\n if with_result_backend:\n conf[\"result_backend\"] = os.environ[\"RESULT_BACKEND_URL\"]\n\n app = Celery(\"app\")\n app.config_from_object(conf)\n\n # Set Selinon configuration.\n Config.set_config_yaml(*get_config_files())\n # Prepare Celery\n Config.set_celery_app(app)\n\n return app", "def __configure(self):\n\n if hpccm.config.g_cpu_arch == cpu_arch.X86_64:\n if not self.__configure_opts:\n self.__configure_opts = ['--enable-shared', '--enable-openmp',\n '--enable-threads', '--enable-sse2']\n\n if hpccm.config.test_cpu_feature_flag('avx'):\n self.__configure_opts.append('--enable-avx')\n\n if hpccm.config.test_cpu_feature_flag('avx2'):\n self.__configure_opts.append('--enable-avx2')\n\n if hpccm.config.test_cpu_feature_flag('avx512'):\n self.__configure_opts.append('--enable-avx512')\n else:\n if not self.__configure_opts:\n self.__configure_opts = ['--enable-shared', '--enable-openmp',\n '--enable-threads']\n\n if self.__mpi:\n self.__configure_opts.append('--enable-mpi')", "def set_cpus(self, num_cpus):\n if self.batch:\n self.batch_settings.batch_args[\"cpus-per-task\"] = num_cpus\n for db in self:\n db.run_settings.set_cpus_per_task(num_cpus)", "async def start_background_tasks(app):\n await license_init(app)\n await matlab_starter(app)", "def setup_server(backend_type=BackendType.LocalCPU, max_number_of_workers=1,\n conda_environment='propertyestimator', worker_memory=4 * unit.gigabyte,\n port=8000, cuda_version='10.1'):\n\n working_directory = 'working_directory'\n storage_directory = 'storage_directory'\n\n # Remove any existing data.\n if os.path.isdir(working_directory):\n shutil.rmtree(working_directory)\n\n calculation_backend = None\n\n if backend_type == BackendType.LocalCPU:\n calculation_backend = DaskLocalCluster(number_of_workers=max_number_of_workers)\n\n elif backend_type == BackendType.LocalGPU:\n\n calculation_backend = DaskLocalCluster(number_of_workers=max_number_of_workers,\n resources_per_worker=ComputeResources(1, 1,\n ComputeResources.\n GPUToolkit.CUDA))\n\n elif backend_type == BackendType.GPU:\n\n queue_resources = QueueWorkerResources(number_of_threads=1,\n number_of_gpus=1,\n preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,\n per_thread_memory_limit=worker_memory,\n wallclock_time_limit=\"05:59\")\n\n worker_script_commands = [\n f'conda activate {conda_environment}',\n f'module load cuda/{cuda_version}'\n ]\n\n calculation_backend = DaskLSFBackend(minimum_number_of_workers=1,\n maximum_number_of_workers=max_number_of_workers,\n resources_per_worker=queue_resources,\n queue_name='gpuqueue',\n setup_script_commands=worker_script_commands,\n adaptive_interval='1000ms')\n elif backend_type == BackendType.CPU:\n\n queue_resources = QueueWorkerResources(number_of_threads=1,\n per_thread_memory_limit=worker_memory,\n wallclock_time_limit=\"01:30\")\n\n worker_script_commands = [\n f'conda activate {conda_environment}'\n ]\n\n calculation_backend = DaskLSFBackend(minimum_number_of_workers=1,\n maximum_number_of_workers=max_number_of_workers,\n resources_per_worker=queue_resources,\n queue_name='cpuqueue',\n setup_script_commands=worker_script_commands,\n adaptive_interval='1000ms')\n\n # Set up a backend to cache simulation data in.\n storage_backend = LocalFileStorage(storage_directory)\n\n # Spin up the server object.\n server.PropertyEstimatorServer(calculation_backend=calculation_backend,\n storage_backend=storage_backend,\n port=port,\n working_directory=working_directory)", "def configure(self):\n if self.name == 'ncm-ncd':\n self.configure_ncm_ncd()\n\n if self.name == 'maven-tools':\n self.configure_maven_tools()\n\n if self.name == 'CAF':\n self.configure_caf()\n\n if self.name == 'CCM':\n self.configure_ccm()\n\n if self.name == 'configuration-modules-grid':\n self.configure_components_grid()\n\n if self.name == 'configuration-modules-core':\n self.configure_components()\n\n if self.name == 'template-library-core':\n self.configure_template_library_core()", "def get_celery_app(self):\n\n from jobcontrol.async.tasks import app as celery_app\n celery_app.conf['JOBCONTROL'] = self\n celery_app.conf.update(self.config.celery)\n return celery_app", "def _init_device_worker(identifier, precision):\n import bempp.api\n from bempp.api.utils.pool import get_id\n from bempp.core.cl_helpers import get_context_by_name\n\n ctx, platform_index = get_context_by_name(identifier)\n bempp.api.set_default_device(platform_index, get_id())\n if precision is not None:\n bempp.api.DEVICE_PRECISION_CPU = precision\n bempp.api.DEVICE_PRECISION_GPU = precision", "def _setup_executors(self, executors, project_type_params):\n super()._setup_executors(executors, project_type_params)\n for executor in executors:\n executor.run_job_config_setup()", "def heavy_init(cls):\n cfg.CONF.set_default('connection', 'sqlite://', group='database')\n cfg.CONF.set_default('max_overflow', -1, group='database')\n cfg.CONF.set_default('max_pool_size', 1000, group='database')\n\n qinling_opts = [\n (config.API_GROUP, config.api_opts),\n (config.PECAN_GROUP, config.pecan_opts),\n (config.ENGINE_GROUP, config.engine_opts),\n (config.STORAGE_GROUP, config.storage_opts),\n (config.KUBERNETES_GROUP, config.kubernetes_opts),\n (None, [config.launch_opt])\n ]\n for group, options in qinling_opts:\n cfg.CONF.register_opts(list(options), group)\n\n db_api.setup_db()", "def configure_gpu_cpu(RUN_GPU, GPU_ALLOCATION):\n # Extra imports to set GPU options\n import tensorflow as tf\n from keras import backend as k\n import os\n # To force code to run on cpu\n if RUN_GPU==False:\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n if RUN_GPU and GPU_ALLOCATION !=100:\n # TensorFlow congif\n config = tf.ConfigProto()\n\n # Allocate memory as-needed\n config.gpu_options.allow_growth = True\n\n # Allocate GPU memory based on user input USE_GPU\n config.gpu_options.per_process_gpu_memory_fraction = GPU_ALLOCATION/100\n\n # Create a session with the above specified options\n k.tensorflow_backend.set_session(tf.Session(config=config))", "def set_gpu():\n if Config.gpu_count == 1:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1\n elif Config.gpu_count == 2:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1 + ', ' + Config.gpu2\n elif Config.gpu_count == 3:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1 + ', ' + Config.gpu2 + ', ' + Config.gpu3\n elif Config.gpu_count == 4:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1 + ', ' + Config.gpu2 + ', ' + Config.gpu3 + ', ' + Config.gpu4", "def configure(manager):\r\n\r\n ###########################################################################\r\n # configuration of numerics\r\n ###########################################################################\r\n conf = manager.num_conf # DO NOT EDIT THIS LINE\r\n ###########################################################################\r\n\r\n # usage of mpi\r\n conf.mpi_acceleration = False \r\n \r\n # here one can prevent competing multithreating if necessary\r\n conf.cpu_acceleration = True\r\n conf.num_threads = -1 # -1 means to be determined optimally\r\n \r\n # use gpu acceleration (if gpu available)\r\n # this requires pytorch, but it does\r\n # not switch on pytorch usage for\r\n # other than GPU computations\r\n conf.gpu_acceleration = False \r\n \r\n # restrict yourself only to certain GPUs\r\n conf.available_gpus = [0, 1]\r\n \r\n # enables pytorch as an alternative\r\n # to numpy even without GPUs\r\n conf.enable_pytorch = False \r\n \r\n\r\n ###########################################################################\r\n # logging configuration\r\n ###########################################################################\r\n conf = manager.log_conf # DO NOT EDIT THIS LINE\r\n ###########################################################################\r\n conf.log_on_screen = True\r\n conf.log_to_file = False\r\n #conf.log_file_name = \"./qrhei.log\"\r\n \r\n # verbosity is a number from 0 to 10\r\n # 0 == no information written\r\n # 10 == all information is written\r\n conf.verbosity = 5 \r\n conf.verbose=True\r\n\r\n ###########################################################################\r\n # general configuration\r\n ###########################################################################\r\n conf = manager.gen_conf # DO NOT EDIT THIS LINE\r\n ###########################################################################\r\n conf.legacy_relaxation = False", "def configure():\n\n with settings(warn_only=True):\n # disable default site\n sudo('rm /etc/nginx/sites-enabled/default')\n\n # upload nginx server blocks\n put(env.config_dir + '/nginx.conf', '/tmp/nginx.conf')\n sudo('mv /tmp/nginx.conf %s/nginx_pmgbilltracker.conf' % env.project_dir)\n\n # link server blocks to Nginx config\n with settings(warn_only=True):\n sudo('ln -s %s/nginx_pmgbilltracker.conf /etc/nginx/conf.d/' % env.project_dir)\n\n # upload supervisor config\n put(env.config_dir + '/supervisor.conf', '/tmp/supervisor.conf')\n sudo('mv /tmp/supervisor.conf /etc/supervisor/conf.d/supervisor_pmgbilltracker.conf')\n sudo('supervisorctl reread')\n sudo('supervisorctl update')\n\n # configure Flask\n with settings(warn_only=True):\n sudo('mkdir %s/instance' % env.project_dir)\n put(env.config_dir + '/config_backend.py', '/tmp/config_backend.py')\n put(env.config_dir + '/config_frontend.py', '/tmp/config_frontend.py')\n put(env.config_dir + '/config_backend_private.py', '/tmp/config_backend_private.py')\n put(env.config_dir + '/config_frontend_private.py', '/tmp/config_frontend_private.py')\n sudo('mv /tmp/config_backend.py ' + env.project_dir + '/instance/config_backend.py')\n sudo('mv /tmp/config_frontend.py ' + env.project_dir + '/instance/config_frontend.py')\n sudo('mv /tmp/config_backend_private.py ' + env.project_dir + '/instance/config_backend_private.py')\n sudo('mv /tmp/config_frontend_private.py ' + env.project_dir + '/instance/config_frontend_private.py')\n\n restart()\n return", "def assign_gpu_and_run(self):\n if not self.gpu_free.empty():\n\n # Retrieve the job from the queue\n job_to_run = self.check_enough_gpu()\n\n if job_to_run is None:\n return\n\n # Floor division to get lower bound of num_gpus\n num_gpus = int(job_to_run.width)//1000 + 1\n\n #if (int(job_to_run.width) % 1000)/1000 >= 0.4:\n # num_gpus += 2\n # This is okay because we already know that from check_enough_gpu that\n # gpu_free's size is greater than int(job_to_run.width)/1000\n\n for _ in range(num_gpus):\n job_to_run.gpu.append(self.gpu_free.get())\n\n # Create a copy of the environemnt\n new_env = os.environ.copy()\n\n # Create the CUDA GPU string\n gpu_string = \"\"\n\n i = 0\n while (i < len(job_to_run.gpu)):\n if i == 0:\n gpu_string = gpu_string + str(job_to_run.gpu[i])\n else:\n gpu_string = gpu_string + \",\" + str(job_to_run.gpu[i])\n i += 1\n\n new_env['CUDA_VISIBLE_DEVICES'] = gpu_string\n\n params = ['python',\n '/app/neural-style/neural_style.py',\n '--content', '%s' % job_to_run.path1,\n '--styles', '%s' % job_to_run.path2,\n '--output','%s' % job_to_run.output_path,\n '--content-weight', str(job_to_run.content_weight),\n '--content-weight-blend', str(job_to_run.content_blend),\n '--style-weight', str(job_to_run.style_weight),\n '--style-layer-weight-exp', str(job_to_run.style_layer_weight_exp),\n '--style-scales', str(job_to_run.style_scale),\n '--style-blend-weights', str(job_to_run.style_blend),\n '--iterations', str(job_to_run.iterations),\n '--width', str(job_to_run.width),\n '--network', VGG_LOCATION ]\n\n # set preserve colors if indicated\n # assuming that preserve_colors will be of type boolean\n if job_to_run.preserve_color:\n params.append('--preserve-colors')\n\n # Run the subprocess\n try:\n job_to_run.proc = Popen(params, env=new_env)\n self.logger.log.info(\"Popen worked! Job %d assigned GPU %s.\" % (job_to_run.job_id, job_to_run.gpu))\n self.running_jobs.append(job_to_run)\n self.logger.log.info(\"The number of free gpus is: \" + str(self.gpu_free.qsize()))\n\n except Exception as e:\n self.logger.log.error(\"Job %d could not be assigned GPU %s.\" % (job_to_run.job_id, job_to_run.gpu))\n self.logger.log.exception(e)\n\n #c = self.db.cursor()\n #c.execute(\"UPDATE deepstyle_job SET job_status='PF' WHERE id = (%s)\", (job_to_run.job_id,))\n self.safe_execute_sql(\"UPDATE deepstyle_job SET job_status='PF' WHERE id = (%s)\", True, (job_to_run.job_id,))\n\n for free in job_to_run.gpu:\n self.gpu_free.put(free)\n\n self.logger.log.info(\"The number of free gpus is: \" + str(self.gpu_free.qsize()))", "def start_celeryd():\n require('hosts')\n require('path')\n\n clear_logs()\n with cd('%(path)s' % env):\n with prefix('source %(path)s/.env/bin/activate' % env):\n run('app/manage.py celeryd_multi start %(celeryconf)s' % env)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the log of input probabilities masking divide by zero in log. Notes During the Mstep of EMalgorithm, very small intermediate start or transition probabilities could be normalized to zero, causing a
def log_mask_zero(a): a = np.asarray(a) with np.errstate(divide="ignore"): return np.log(a)
[ "def log_with_zeros(x):\n x = torch.max(x, torch.tensor(1e-10))\n return torch.log(x)", "def log_of_array_ignoring_zeros(M: np.ndarray) -> np.ndarray:\n log_M = M.copy()\n mask = log_M > 0\n log_M[mask] = np.log(log_M[mask])\n \n return log_M", "def p_log_p(p: torch.FloatTensor) -> torch.FloatTensor:\n return p * mask_log(p, mask=p != 0)", "def log_prior(self, inputs):\n n = inputs.shape[0]\n return torch.zeros(n)", "def np_masked_softmax(logits, legal_actions_mask):\n masked_logits = logits + np.log(legal_actions_mask)\n max_logit = np.amax(masked_logits, axis=-1, keepdims=True)\n exp_logit = np.exp(masked_logits - max_logit)\n return exp_logit / np.sum(exp_logit, axis=-1, keepdims=True)", "def _logprob(self):\n logp = -0.5 * tf.reduce_sum(self.log_vars)\n logp += -0.5 * tf.reduce_sum(tf.square(self.old_actions - self.means) /\n tf.exp(self.log_vars), axis=-1)\n\n self.logp = logp\n\n logp_old = -0.5 * tf.reduce_sum(self.log_vars)\n logp_old += -0.5 * tf.reduce_sum(tf.square(self.old_actions - self.old_means) /\n tf.exp(self.log_vars), axis=-1)\n self.logp_old = logp_old", "def scale_log(self) -> None:\n # Problem are probabilities below 1\n self.values = [log(1.01 + x, 2) for x in self.values]", "def _compute_denominator_log_likelihood(self, logits: Tensor, mask: Tensor):\n device = logits.device\n batch_size, seq_len, _ = logits.size()\n # (num_labels, num_labels) -> (1, num_labels, num_labels)\n transitions = self.transitions.unsqueeze(0)\n # add the score from beginning to each label\n # and the first score of each label\n score = self.start_transition + logits[:, 0]\n # iterate through processing for the number of words in the mini batch\n for t in range(1, seq_len):\n # (batch_size, self.num_labels, 1)\n before_score = score.unsqueeze(2)\n # prepare t-th mask of sequences in each sequence\n # (batch_size, 1)\n mask_t = mask[:, t].unsqueeze(1)\n mask_t = mask_t.to(device)\n # prepare the transition probability of the t-th sequence label\n # in each sequence\n # (batch_size, 1, num_labels)\n logits_t = logits[:, t].unsqueeze(1)\n # calculate t-th scores in each sequence\n # (batch_size, num_labels)\n score_t = before_score + logits_t + transitions\n score_t = torch.logsumexp(score_t, 1)\n # update scores\n # (batch_size, num_labels)\n score = torch.where(mask_t, score_t, score)\n # add the end score of each label\n score += self.end_transition\n # return the log likely food of all data in mini batch\n return torch.logsumexp(score, 1)", "def log_normalize(log_prob_vector):\n max_v = log_prob_vector.max()\n log_prob_vector += max_v\n log_prob_vector = np.exp(log_prob_vector)\n log_prob_vector /= log_prob_vector.sum()\n return log_prob_vector", "def mask_log(x: torch.FloatTensor, mask: Optional[torch.Tensor] = None) -> torch.FloatTensor:\n if mask is not None:\n # Set masked entries of x equal to 1 (in a differentiable way) so log(1) = 0\n mask = mask.float()\n x = x * mask + (1 - mask)\n\n return torch.log(x)", "def log(inputs):\n return tf.math.log(inputs)", "def _log_prob(self, x):\n return tf.math.log(tf.cast(self.prob(x), dtype=tf.float32))", "def logprob(hmm, x):\n if isinstance(hmm, HMM):\n hmm = [hmm]\n if isinstance(hmm, list) and isinstance(hmm[0], HMM):\n n_objs = len(hmm)\n n_samples, n_features = x.shape\n logP = np.zeros((n_objs))\n for i in range(0, n_objs):\n logp_act = 0\n pX, logS = hmm[i].output_distr[0].prob(x, hmm[i].output_distr)\n alpha_hat, c = hmm[i].state_gen.forward(pX)\n # compute true probability with scale factor\n if np.isscalar(logS):\n logS = np.tile(logS, (n_samples))\n for j in range(0, n_samples):\n logp_act += np.log(c[j]) + logS[j]\n if len(c) == n_samples:\n # ln(c_0) + .. + ln(c_{T-1})\n logP[i] = logp_act\n else:\n logP[i] = logp_act + np.log(c[-1]) # c[-1] is not scaled\n else:\n raise ValueError(\"The first input must be an hmm object or a list of hmm objects\")\n return logP", "def logpow(x, m):\n return torch.where(\n torch.eq(x, torch.tensor(0)),\n torch.where(torch.eq(m, torch.tensor(0)), torch.tensor(0.0), torch.tensor(-np.inf)),\n m * torch.log(x),\n )", "def log_prob(self, inputs, context=None):\n\n # Get necessary quantities.\n logits, means, precisions, sumlogdiag, _ = self.get_mixture_components(context)\n\n batch_size, n_mixtures, output_dim = means.size()\n inputs = inputs.view(-1, 1, output_dim)\n\n # Split up evaluation into parts.\n a = logits - torch.logsumexp(logits, dim=-1, keepdim=True)\n b = -(output_dim / 2.0) * np.log(2 * np.pi)\n c = sumlogdiag\n d1 = (inputs.expand_as(means) - means).view(\n batch_size, n_mixtures, output_dim, 1\n )\n d2 = torch.matmul(precisions, d1)\n d = -0.5 * torch.matmul(torch.transpose(d1, 2, 3), d2).view(\n batch_size, n_mixtures\n )\n\n return torch.logsumexp(a + b + c + d, dim=-1)", "def logits(self) -> T.Tensor:\n if self._logits is None:\n self._logits = T.random.bernoulli_probs_to_logits(self._probs,\n self.epsilon)\n return self._logits", "def log_p(observed_data: torch.FloatTensor,\n log_alpha: torch.FloatTensor) -> torch.FloatTensor:\n alpha = log_alpha.exp()\n return ((torch.log(observed_data) * (alpha - 1.0)).sum(-1) +\n torch.lgamma(alpha.sum(-1)) -\n torch.lgamma(alpha).sum(-1))", "def logits(self, x):", "def forward(log_emlik, log_startprob, log_transmat):", "def _compute_numerator_log_likelihood(self, logits: Tensor, labels: Tensor, mask: Tensor) -> Tensor:\n batch_size, seq_len, = labels.shape\n # mask = mask.type_as(logits)\n\n logits_unsqueezed = logits.unsqueeze(-1)\n transitions = self.transitions.unsqueeze(-1)\n arange_b = torch.arange(batch_size)\n # extract first vector of sequences in mini batch\n last_mask_index = mask.sum(1) - 1\n calc_range = seq_len - 1 #should calc_range be last_mask_index? No since parallel faster\n # calc_range = last_mask_index\n score = self.start_transition[labels[:, 0]] + sum(\n [self._calc_trans_score_for_num_llh(\n logits_unsqueezed, labels, transitions, mask, t, arange_b\n ) for t in range(calc_range)])\n # extract end label number of each sequence in mini batch\n # (batch_size)\n last_labels = labels[arange_b,last_mask_index]\n each_last_score = logits[arange_b, -1, last_labels].squeeze(-1) * mask[:, -1]\n # Add the score of the sequences of the maximum length in mini batch\n # Add the scores from the last tag of each sequence to EOS\n score += each_last_score + self.end_transition[last_labels]\n return score" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function returns a dictionary with definitions of output data produced the component.
def output_data_definitions(self): return { self.key_outputs: DataDefinition([-1, self.output_size], [torch.Tensor], "Batch of outputs [BATCH_SIZE x OUTPUT_SIZE]") }
[ "def outputs(self) -> Dict[str, TypeShape]:\n raise NotImplementedError()", "def _get_output_vars(self):", "def _getInitOutputValues(self):\r\n \r\n outputs = {}\r\n\r\n for attr_name, attr_data in self._output_plug_map.items():\r\n \r\n #---if atribute is array, set its default to a list of the correct length----##\r\n if attr_data[3]:\r\n outputs[attr_name] = attr_data[4]([attr_data[2]] * attr_data[0].numElements())\r\n\r\n else:\r\n outputs[attr_name] = attr_data[2]\r\n\r\n return outputs", "def _getProcessOutputs(self, process):\n processOutputs = [] \n for data in process.ProcessOutputs.Output:\n output = {} \n output[\"Identifier\"] = str(data.Identifier.value()) \n ita = self._getTitleAbstract(data)\n for key in ita.keys():\n output[key] = ita[key] \n \n if data.ComplexOutput != None:\n output[\"ComplexOutput\"] = self._getComplexData(data.ComplexOutput)\n if data.LiteralOutput != None:\n output[\"LiteralOutput\"] = self._getLiteralData(data.LiteralOutput)\n \n processOutputs.append(output)\n \n return processOutputs", "def all_outputs(self):\n all_outputs = {}\n for plug in self.outputs.values():\n all_outputs[plug.name] = plug\n for sub in plug.sub_plugs.values():\n all_outputs[sub.name] = sub\n return all_outputs", "def input_data_definitions(self):\n return {}", "def get_outputs(self):\r\n return []", "def get_device_data(self) -> Dict[str, Any]:\n\n device_data: Dict[str, Any] = {\n \"snapshot_timestamp\": self.timestamp,\n \"device_name\": self.name,\n \"device_type\": self.type,\n \"output_formats\": {},\n }\n\n for output_format in self.output_formats:\n netcat.LOGGER.info(f\"Reading info from device in '{output_format['format_name']}' format\")\n\n for command in output_format[\"pre_commands\"]:\n self.send_command(command)\n\n output_format_section = {}\n\n for command in output_format[\"commands\"]:\n output_format_section[command] = (\n \"\\n\".join(self.send_command(command).split(\"\\r\\n\")[output_format[\"output_start\"]: output_format[\"output_end\"]]) + \"\\n\"\n )\n\n device_data[\"output_formats\"][output_format[\"format_name\"]] = output_format_section # type: ignore\n\n for command in output_format[\"post_commands\"]:\n self.send_command(command)\n\n return device_data", "def _read_data(self):\n outputs = self._get_outputs(self.type)\n outputs.sort()\n self.data = {}\n for o in outputs:\n logger.debug(\"parsing output file '{}'\".format(o))\n out_items = parsing.parse_output_filename(o)\n proclib_id = out_items['sample_id']\n out_type = out_items['type']\n out_source = out_items['source']\n\n logger.debug(\"storing data from '{}' in '{}' '{}'\".format(\n out_source, proclib_id, out_type))\n out_parser = self._get_parser(out_type, out_source)(path=o)\n\n self.data.setdefault(\n out_type, {}).setdefault(proclib_id, []).append(\n {out_source: out_parser.parse()}\n )", "def output_type_specs(self) -> Dict[Text, tf.TypeSpec]:\n return {\n k: tf.type_spec_from_value(v) for k, v in\n self.decode_record.get_concrete_function().structured_outputs.items()\n }", "def _terraform_outputs(self):\n response = self._terraform('output -json')\n output_dict = json.loads(response)\n return {var: output_dict[var][\"value\"] for var in output_dict}", "def outputs_map(stackname):\n data = core.describe_stack(stackname).meta.data # boto3\n if \"Outputs\" not in data:\n return {}\n return {o['OutputKey']: o.get('OutputValue') for o in data['Outputs']}", "def export(self, export_dir=None):\n return {\n self.component_key: {\n 'documentation_complete': self.meta.get('documentation_complete'),\n 'name': self.meta.get('name'),\n 'system_key': self.system_key,\n 'component_key': self.component_key,\n 'verifications': self.export_references(self.meta.get('verifications'), export_dir),\n 'references': self.export_references(self.meta.get('references'), export_dir)\n }\n }", "def return_results(self):\n\n try: # if something failed, we still might be able to retrieve something\n last_calc_out = self.ctx.last_base_wc.outputs.output_parameters\n retrieved = self.ctx.last_base_wc.outputs.retrieved\n last_calc_out_dict = last_calc_out.get_dict()\n except (NotExistent, AttributeError):\n last_calc_out = None\n last_calc_out_dict = {}\n retrieved = None\n\n last_nmmp_distance = None\n if self.ctx.last_nmmp_distance > 0.0:\n last_nmmp_distance = self.ctx.last_nmmp_distance\n\n outputnode_dict = {}\n outputnode_dict['workflow_name'] = self.__class__.__name__\n outputnode_dict['workflow_version'] = self._workflowversion\n outputnode_dict['material'] = self.ctx.formula\n outputnode_dict['conv_mode'] = self.ctx.wf_dict['mode']\n outputnode_dict['loop_count'] = self.ctx.loop_count\n outputnode_dict['iterations_total'] = last_calc_out_dict.get('number_of_iterations_total', None)\n outputnode_dict['distance_charge'] = self.ctx.last_charge_density\n outputnode_dict['distance_charge_all'] = self.ctx.distance\n outputnode_dict['total_energy'] = last_calc_out_dict.get('energy_hartree', None)\n outputnode_dict['total_energy_all'] = self.ctx.total_energy\n outputnode_dict['force_diff_last'] = self.ctx.forcediff\n outputnode_dict['force_largest'] = last_calc_out_dict.get('force_largest', None)\n outputnode_dict['distance_charge_units'] = 'me/bohr^3'\n outputnode_dict['total_energy_units'] = 'Htr'\n outputnode_dict['nmmp_distance'] = last_nmmp_distance\n outputnode_dict['nmmp_distance_all'] = self.ctx.nmmp_distance\n outputnode_dict['total_wall_time'] = self.ctx.total_wall_time\n outputnode_dict['total_wall_time_units'] = 's'\n outputnode_dict['info'] = self.ctx.info\n outputnode_dict['warnings'] = self.ctx.warnings\n outputnode_dict['errors'] = self.ctx.errors\n\n if self.ctx.x_torques:\n outputnode_dict['last_x_torques'] = self.ctx.x_torques[-1]\n outputnode_dict['last_y_torques'] = self.ctx.y_torques[-1]\n outputnode_dict['alphas'] = self.ctx.alpha_angles\n outputnode_dict['betas'] = self.ctx.beta_angles\n\n num_iterations = last_calc_out_dict.get('number_of_iterations_total', None)\n if self.ctx.successful and self.ctx.reached_conv:\n if len(self.ctx.total_energy) <= 1: # then len(self.ctx.all_forces) <= 1 too\n self.report('STATUS: Done, the convergence criteria are reached.\\n'\n 'INFO: The charge density of the FLEUR calculation '\n f'converged after {self.ctx.loop_count} FLEUR runs, {num_iterations} '\n f'iterations and {self.ctx.total_wall_time} sec '\n f'walltime to {outputnode_dict[\"distance_charge\"]} \"me/bohr^3\" \\n'\n 'INFO: Did not manage to get energy and largest force difference '\n 'between two last iterations, probably converged in a single iteration')\n else:\n self.report('STATUS: Done, the convergence criteria are reached.\\n'\n 'INFO: The charge density of the FLEUR calculation '\n f'converged after {self.ctx.loop_count} FLEUR runs, {num_iterations} '\n f'iterations and {self.ctx.total_wall_time} sec '\n f'walltime to {outputnode_dict[\"distance_charge\"]} \"me/bohr^3\" \\n'\n 'INFO: The total energy difference of the last two iterations '\n f'is {self.ctx.energydiff} Htr and largest force difference is '\n f'{self.ctx.forcediff} Htr/bohr')\n elif self.ctx.successful and not self.ctx.reached_conv:\n if len(self.ctx.total_energy) <= 1: # then len(self.ctx.all_forces) <= 1 too\n self.report('STATUS/WARNING: Done, the maximum number of runs '\n 'was reached.\\n INFO: The '\n 'charge density of the FLEUR calculation, '\n f'after {self.ctx.loop_count} FLEUR runs, {num_iterations} '\n f' iterations and {self.ctx.total_wall_time} sec '\n f'walltime is {outputnode_dict[\"distance_charge\"]} \"me/bohr^3\"\\n'\n 'INFO: can not extract energy and largest force difference between'\n ' two last iterations, probably converged in a single iteration')\n else:\n self.report('STATUS/WARNING: Done, the maximum number of runs '\n 'was reached.\\n INFO: The '\n 'charge density of the FLEUR calculation, '\n f'after {self.ctx.loop_count} FLEUR runs, {num_iterations} '\n f' iterations and {self.ctx.total_wall_time} sec '\n f'walltime is {outputnode_dict[\"distance_charge\"]} \"me/bohr^3\"\\n'\n 'INFO: The total energy difference of the last two iterations '\n f'is {self.ctx.energydiff} Htr and largest force difference is'\n f'{self.ctx.forcediff} Htr/bohr\\n')\n else: # Termination ok, but not converged yet...\n if self.ctx.abort: # some error occurred, do not use the output.\n self.report('STATUS/ERROR: I abort, see logs and errors/warning/hints in output_scf_wc_para')\n\n if self.ctx.torquediff is None:\n self.ctx.torquediff = 9999\n self.report(f'Torque diff {self.ctx.torquediff}')\n outputnode_t = Dict(dict=outputnode_dict)\n if self.ctx.last_nmmp_distance > 0.0:\n self.report(f'INFO: The LDA+U density matrix is converged to {self.ctx.last_nmmp_distance} change '\n 'of all matrix elements')\n\n outputnode_t = Dict(outputnode_dict)\n # this is unsafe so far, because last_calc_out could not exist...\n if last_calc_out:\n outdict = create_scf_result_node(outpara=outputnode_t,\n last_calc_out=last_calc_out,\n last_calc_retrieved=retrieved)\n else:\n outdict = create_scf_result_node(outpara=outputnode_t)\n\n # Now it always returns changed fleurinp that was actually used in the calculation\n if self.ctx.fleurinp is not None:\n outdict['fleurinp'] = self.ctx.fleurinp\n\n if self.ctx.last_base_wc:\n self.out_many(self.exposed_outputs(self.ctx.last_base_wc, FleurBaseWorkChain, namespace='last_calc'))\n\n #outdict['output_scf_wc_para'] = outputnode\n for link_name, node in outdict.items():\n self.out(link_name, node)\n\n if not self.ctx.reached_conv:\n return self.exit_codes.ERROR_DID_NOT_CONVERGE", "def define_inspect_outputs(self): # pragma: no cover\n self.inspect_outputs_dict = {}\n\n # RECON outputs\n # Dipy\n if self.config.recon_processing_tool == \"Dipy\":\n if (\n self.config.dipy_recon_config.local_model\n or self.config.diffusion_imaging_model == \"DSI\"\n ): # SHORE or CSD models\n\n if self.config.diffusion_imaging_model == \"DSI\":\n\n recon_dir = os.path.join(\n self.stage_dir, \"reconstruction\", \"dipy_SHORE\"\n )\n\n gfa_res = os.path.join(recon_dir, \"shore_gfa.nii.gz\")\n if os.path.exists(gfa_res):\n self.inspect_outputs_dict[\n self.config.recon_processing_tool + \" gFA image\"\n ] = [\"mrview\", gfa_res]\n msd_res = os.path.join(recon_dir, \"shore_msd.nii.gz\")\n if os.path.exists(msd_res):\n self.inspect_outputs_dict[\n self.config.recon_processing_tool + \" MSD image\"\n ] = [\"mrview\", msd_res]\n rtop_res = os.path.join(recon_dir, \"shore_rtop_signal.nii.gz\")\n if os.path.exists(rtop_res):\n self.inspect_outputs_dict[\n self.config.recon_processing_tool + \" RTOP image\"\n ] = [\"mrview\", rtop_res]\n dodf_res = os.path.join(recon_dir, \"shore_dodf.nii.gz\")\n if os.path.exists(dodf_res):\n self.inspect_outputs_dict[\n self.config.recon_processing_tool\n + \" Diffusion ODF (SHORE) image\"\n ] = [\"mrview\", gfa_res, \"-odf.load_sh\", dodf_res]\n shm_coeff_res = os.path.join(recon_dir, \"shore_fodf.nii.gz\")\n if os.path.exists(shm_coeff_res):\n self.inspect_outputs_dict[\n self.config.recon_processing_tool\n + \" Fiber ODF (SHORE) image\"\n ] = [\"mrview\", gfa_res, \"-odf.load_sh\", shm_coeff_res]\n else:\n recon_tensor_dir = os.path.join(\n self.stage_dir, \"reconstruction\", \"dipy_tensor\"\n )\n\n fa_res = os.path.join(\n recon_tensor_dir, \"diffusion_preproc_resampled_fa.nii.gz\"\n )\n if os.path.exists(fa_res):\n self.inspect_outputs_dict[\n self.config.recon_processing_tool + \" FA image\"\n ] = [\"mrview\", fa_res]\n\n recon_dir = os.path.join(\n self.stage_dir, \"reconstruction\", \"dipy_CSD\"\n )\n shm_coeff_res = os.path.join(\n recon_dir, \"diffusion_shm_coeff.nii.gz\"\n )\n if os.path.exists(shm_coeff_res):\n if os.path.exists(fa_res):\n self.inspect_outputs_dict[\n self.config.recon_processing_tool + \" ODF (CSD) image\"\n ] = [\"mrview\", fa_res, \"-odf.load_sh\", shm_coeff_res]\n else:\n self.inspect_outputs_dict[\n self.config.recon_processing_tool + \" ODF (CSD) image\"\n ] = [\"mrview\", shm_coeff_res, \"-odf.load_sh\", shm_coeff_res]\n\n # TODO: add Tensor image in case of DTI+Tensor modeling\n # MRtrix\n if self.config.recon_processing_tool == \"MRtrix\":\n metrics_dir = os.path.join(\n self.stage_dir, \"reconstruction\", \"mrtrix_tensor_metrics\"\n )\n\n fa_res = os.path.join(metrics_dir, \"FA.mif\")\n if os.path.exists(fa_res):\n self.inspect_outputs_dict[\n self.config.recon_processing_tool + \" FA image\"\n ] = [\"mrview\", fa_res]\n\n adc_res = os.path.join(metrics_dir, \"ADC.mif\")\n if os.path.exists(adc_res):\n self.inspect_outputs_dict[\n self.config.recon_processing_tool + \" ADC image\"\n ] = [\"mrview\", adc_res]\n\n # Tensor model (DTI)\n if not self.config.mrtrix_recon_config.local_model:\n recon_dir = os.path.join(\n self.stage_dir, \"reconstruction\", \"mrtrix_make_tensor\"\n )\n\n tensor_res = os.path.join(\n recon_dir, \"diffusion_preproc_resampled_tensor.mif\"\n )\n if os.path.exists(fa_res) and os.path.exists(tensor_res):\n self.inspect_outputs_dict[\n self.config.recon_processing_tool + \" SH image\"\n ] = [\"mrview\", fa_res, \"-odf.load_tensor\", tensor_res]\n else: # CSD model\n RF_dir = os.path.join(self.stage_dir, \"reconstruction\", \"mrtrix_rf\")\n RF_resp = os.path.join(RF_dir, \"diffusion_preproc_resampled_ER.mif\")\n if os.path.exists(RF_resp):\n self.inspect_outputs_dict[\"MRTRIX Response function\"] = [\n \"shview\",\n \"-response\",\n RF_resp,\n ]\n\n recon_dir = os.path.join(self.stage_dir, \"reconstruction\", \"mrtrix_CSD\")\n shm_coeff_res = os.path.join(\n recon_dir, \"diffusion_preproc_resampled_CSD.mif\"\n )\n if os.path.exists(fa_res) and os.path.exists(shm_coeff_res):\n self.inspect_outputs_dict[\n self.config.recon_processing_tool + \" SH image\"\n ] = [\"mrview\", fa_res, \"-odf.load_sh\", shm_coeff_res]\n\n # Tracking outputs\n # Dipy\n if self.config.tracking_processing_tool == \"Dipy\":\n if (\n self.config.dipy_recon_config.local_model\n or self.config.diffusion_imaging_model == \"DSI\"\n ):\n if self.config.diffusion_model == \"Deterministic\":\n diff_dir = os.path.join(\n self.stage_dir, \"tracking\", \"dipy_deterministic_tracking\"\n )\n streamline_res = os.path.join(diff_dir, \"tract.trk\")\n else:\n diff_dir = os.path.join(\n self.stage_dir, \"tracking\", \"dipy_probabilistic_tracking\"\n )\n streamline_res = os.path.join(diff_dir, \"tract.trk\")\n\n if os.path.exists(streamline_res):\n self.inspect_outputs_dict[\n self.config.tracking_processing_tool\n + \" \"\n + self.config.diffusion_model\n + \" streamline\"\n ] = [\"trackvis\", streamline_res]\n else:\n diff_dir = os.path.join(\n self.stage_dir, \"tracking\", \"dipy_dtieudx_tracking\"\n )\n streamline_res = os.path.join(diff_dir, \"tract.trk\")\n if os.path.exists(streamline_res):\n self.inspect_outputs_dict[\n self.config.tracking_processing_tool\n + \" Tensor-based EuDX streamline\"\n ] = [\"trackvis\", streamline_res]\n\n # MRtrix\n if self.config.tracking_processing_tool == \"MRtrix\":\n\n diff_dir = os.path.join(self.stage_dir, \"tracking\", \"trackvis\")\n streamline_res = os.path.join(diff_dir, \"tract.trk\")\n\n if os.path.exists(streamline_res):\n self.inspect_outputs_dict[\n self.config.tracking_processing_tool\n + \" \"\n + self.config.diffusion_model\n + \" streamline\"\n ] = [\"trackvis\", streamline_res]\n\n self.inspect_outputs = sorted(\n [key for key in list(self.inspect_outputs_dict.keys())], key=str.lower\n )", "def create_output_map(self, outputs):\n # data_map contains topic, field (aka volttron point name),\n # and meta data associated with each unique Modelica point name.\n self.data_map = outputs\n for name, info in outputs.items():\n topic = info['topic']\n self.output_data[topic] = [{}, {}]\n # Modelica sends the measurements one at a time. When\n # the agent receives a measurement it removes the point from the\n # data_map but data_map_master is never manipulated after it is created\n self.data_map_master = dict(self.data_map)\n log.debug('data %s', self.data_map)", "def collect_output_shapes(graph: NNCFGraph) -> Dict[NNCFNodeName, List[int]]:\n modules_out_shapes = {}\n output_shape_collecting_info = [\n (NNCF_GENERAL_CONV_MODULES_DICT, slice(2, None)),\n (NNCF_LINEAR_MODULES_DICT, slice(None)),\n ]\n for nncf_module_type, shape_slice in output_shape_collecting_info:\n for node in graph.get_nodes_by_types([v.op_func_name for v in nncf_module_type]):\n output_edges = graph.get_output_edges(node)\n if output_edges:\n out_edge = output_edges[0]\n out_shape = out_edge.tensor_shape[shape_slice]\n else:\n # For disconnected NNCFGraph when node have no output edge\n out_shape = _calculate_output_shape(graph, node)\n nncf_logger.debug(f\"Node {node.node_name} has no output edge in NNCFGraph\")\n modules_out_shapes[node.node_name] = out_shape\n\n return modules_out_shapes", "def exeDisplayData():\n exe = 'eric6_api'\n if Utilities.isWindowsPlatform():\n exe = os.path.join(getConfig(\"bindir\"), exe + '.cmd')\n if not os.path.exists(exe):\n exe = os.path.join(getConfig(\"bindir\"), exe + '.bat')\n else:\n exe = os.path.join(getConfig(\"bindir\"), exe)\n \n data = {\n \"programEntry\": True,\n \"header\": QCoreApplication.translate(\n \"EricapiPlugin\", \"Eric6 API File Generator\"),\n \"exe\": exe,\n \"versionCommand\": '--version',\n \"versionStartsWith\": 'eric6_',\n \"versionPosition\": -3,\n \"version\": \"\",\n \"versionCleanup\": None,\n }\n \n return data", "def export(self):\n return {'meta': self.meta, 'justifications': self.justifications}", "def getData(self):\n return (\n self.__generateIncludeDirectoriesList(),\n self.__generateDefinedNamesList(),\n self.__generateUndefinedNamesList(),\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Repeat the same feature vector over all spatial positions of a given feature map. The feature vector should have the same batch size and number of features as the feature map.
def tile_2d_over_nd(feature_vector, feature_map): n, c = feature_vector.size() spatial_size = feature_map.dim() - 2 tiled = feature_vector.view(n, c, *([1] * spatial_size)).expand_as(feature_map) return tiled
[ "def tile_2d_over_nd(feature_vector, feature_map):\n n, c = feature_vector.size()\n spatial_size = feature_map.dim() - 2\n tiled = feature_vector.view(n, c, *([1] * spatial_size)).expand_as(feature_map)\n return tiled", "def map_add_features(x, s):\n stride = s.tensor_stride\n coords = s.coords.long()\n feats = x.permute(0, 2, 3, 1)\n feats = feats[coords[:, -1],\n coords[:, 0] // stride[0],\n coords[:, 1] // stride[1]]\n return ME.SparseTensor(coords=coords, feats=feats + s.feats,\n coords_manager=s.coords_man, force_creation=True,\n tensor_stride=s.tensor_stride)", "def produce_features(self, game_map):\n feature_matrix = [[[0 for _ in range(NUM_IMAGE_LAYERS)] for _ in range(MAP_MAX_HEIGHT//SCALE_FACTOR)] for _ in range(MAP_MAX_WIDTH//SCALE_FACTOR)]\n\n for planet in game_map.all_planets():\n\n # Compute \"ownership\" feature - 1 if planet is not occupied, 2 if occupied by us, 3 if occupied by enemy.\n if planet.owner == game_map.get_me():\n ownership = 2\n elif planet.owner is None:\n ownership = 1\n else: # owned by enemy\n ownership = 3\n\n remaining_docking_spots = planet.num_docking_spots - len(planet.all_docked_ships())\n x = int(planet.x/SCALE_FACTOR)\n y = int(planet.y/SCALE_FACTOR)\n radius = int(planet.radius/SCALE_FACTOR)\n for i in range(radius):\n for j in range(radius):\n if i**2 + j**2 <= radius**2:\n feature_matrix[x+i][y+j][0] = ownership\n feature_matrix[x-i][y+j][0] = ownership\n feature_matrix[x+i][y-j][0] = ownership\n feature_matrix[x-i][y-j][0] = ownership\n if ownership != 3:\n feature_matrix[x+i][y+j][3] = remaining_docking_spots\n feature_matrix[x-i][y+j][3] = remaining_docking_spots\n feature_matrix[x+i][y-j][3] = remaining_docking_spots\n feature_matrix[x-i][y-j][3] = remaining_docking_spots\n\n my_id = game_map.get_me().id\n\n\n for player in game_map.all_players():\n if player.id == my_id:\n for ship in player.all_ships():\n feature_matrix[int(ship.x/SCALE_FACTOR)][int(ship.y/SCALE_FACTOR)][1]+=1\n else:\n for ship in player.all_ships():\n feature_matrix[int(ship.x/SCALE_FACTOR)][int(ship.y/SCALE_FACTOR)][2]+=1\n\n \n\n\n return feature_matrix", "def reset_features(self):\n x = np.random.randn(self.T, self.n_arms, self.n_features)\n x /= np.repeat(np.linalg.norm(x, axis=-1, ord=2), self.n_features).reshape(self.T, self.n_arms, self.n_features)\n self.features = x", "def _getFeatureMaps(self):\n assert(self._GFM is not None)\n \n FM = [] # FM is an array of feature maps (scaling number, channel, height, width)\n\n for s in range(self._scalingNumber, 0, -1):\n\n FM.append(self._createFeatureMap(s))\n\n self._FM = np.array(FM)", "def repeat(val: tf.Tensor, axis: int, reps: int) -> tf.Tensor:\n return tf.repeat(tf.expand_dims(val, axis), reps, axis)", "def _copy_features(sg_features: tf.train.Feature,\n ex_features_dict: Dict[str, tf.train.Feature]):\n for feature_name, ex_feature in ex_features_dict.items():\n sg_feature = sg_features.feature.get(feature_name, None)\n if sg_feature is None:\n # Feature is empty for that node. Fail for now, ragged tensors are not\n # supported by this conversion routine.\n raise ValueError(\"Feature '{}' is missing from input: {}\".format(\n feature_name, sg_features))\n ex_feature.MergeFrom(sg_feature)", "def gen_tile_features(self, args: dict):\n raise NotImplementedError", "def compute_all_features(self, grasps):\n num_digits = len(str(len(grasps)-1)) # for padding with zeros\n features = []\n for i, grasp in enumerate(grasps):\n logging.info('Computing features for grasp %d' %(i))\n\n feature = self._compute_feature_rep(grasp)#, '%s_%s' %(self.graspable_.key, str(i).zfill(num_digits)))\n features.append(feature)\n return features", "def extend(self, features):\n for feature in features:\n self.append(feature)", "def forward(self, proposals, keypoints_xyz, keypoints_features):\n gridpoints = self.sample_gridpoints(proposals)\n gridpoints = gridpoints.view(1, -1, 3)\n pooled_features = self.pnet(keypoints_xyz, keypoints_features, gridpoints)[1]\n n = proposals.wlh.shape[0]\n m = self.cfg.n_gridpoints\n pooled_features = pooled_features.view(1, -1, n, m) \\\n .permute(0, 3, 1, 2).contiguous().view(1, n, -1)\n pooled_features = self.reduction(pooled_features)\n return pooled_features", "def tile(x, count, dim=0):\n perm = list(range(len(x.size())))\n if dim != 0:\n perm[0], perm[dim] = perm[dim], perm[0]\n x = x.permute(perm).contiguous()\n out_size = list(x.size())\n out_size[0] *= count\n batch = x.size(0)\n x = x.view(batch, -1).transpose(0, 1).repeat(count, 1).transpose(0,\n 1).contiguous().view(\n *out_size)\n if dim != 0:\n x = x.permute(perm).contiguous()\n return x", "def repeat_elements(x, rep, axis):\n\tx_shape = x.get_shape().as_list()\n\tif x_shape[axis] is not None:\n\t\tsplits = tf.split(value=x, num_or_size_splits=x_shape[axis], axis=axis)\n\t\tx_rep = [s for s in splits for _ in range(rep)]\n\t\treturn concatenate(x_rep, axis)\n\n\t# x_shape[axis] is None\t\n\t# Repeating\n\tauxiliary_axis = axis + 1\n\tx_shape = tf.shape(x)\n\tx_rep = tf.expand_dims(x, axis=auxiliary_axis)\n\treps = np.ones(len(x.get_shape()) + 1)\n\treps[auxiliary_axis] = rep\n\tx_rep = tf.tile(x_rep, reps)\n\n\t# Merging\n\treps = np.delete(reps, auxiliary_axis)\n\treps[axis] = rep\n\treps = tf.constant(reps, dtype='int32')\n\tx_shape = x_shape * reps\n\tx_rep = tf.reshape(x_rep, x_shape)\n\n\t# Fix shape representation\n\tx_shape = x.get_shape().as_list()\n\tx_rep.set_shape(x_shape)\n\tx_rep._keras_shape = tuple(x_shape)\n\treturn x_rep", "def write_feature_vector(target, post, feature):\n assert post.id < len(target)\n target[post.id, 1:] = feature\n target[post.id, 0] = 1", "def mapFeature(X1, X2):\n X1 = X1.reshape((X1.size, 1))\n X2 = X2.reshape((X2.size, 1))\n degree = 6\n out = np.ones(shape=(X1[:, 0].size, 1))\n\n for i in range(1, degree + 1):\n for j in range(i + 1):\n r = (X1 ** (i - j)) * (X2 ** j)\n out = np.append(out, r, axis=1)\n\n return out", "def _extend_gradient_vectors(results, m, n, keys=(\"dX_dp\", \"dY_dp\", \"dZ_dp\")):\n\n new_results = [{key: sparse.matrix_column(m) for key in keys} for _ in range(n)]\n results.extend(new_results)\n\n return results", "def pad_batch(features, batch_size):\n ts = []\n for t in nest.flatten(features):\n before_pads = [0] * t.get_shape().ndims\n after_pads = [0] * t.get_shape().ndims\n batch_pad = tf.convert_to_tensor(batch_size) - tf.shape(t)[0]\n after_pads[0] = batch_pad\n pads = list(zip(before_pads, after_pads))\n old_shape = t.get_shape().as_list()\n old_shape[0] = batch_size\n t = tf.pad(t, pads)\n t.set_shape(old_shape)\n ts.append(t)\n return nest.pack_sequence_as(features, ts)", "def _resize_concate(feature_maps, align_corners, index=-1, resize_size=None):\n if feature_maps is None:\n return None\n\n feature_map_list = []\n\n if index < 0:\n index += len(feature_maps)\n\n if resize_size is None:\n resize_size = (feature_maps[index].size(2),\n feature_maps[index].size(3))\n\n for feature_map in feature_maps:\n ori_size = (feature_map.size(2), feature_map.size(3))\n if ori_size != resize_size:\n feature_map = torch.nn.functional.interpolate(\n feature_map,\n size=resize_size,\n mode='bilinear',\n align_corners=align_corners)\n\n feature_map_list.append(feature_map)\n\n return feature_map_list", "def get_gsom_node_array_with_new_feature_vectors(gsom_nodemap, gsom_list, labels, input_database, centroids, global_centroid):\n frame_list = []\n no_of_nodes = len(gsom_list)\n print(\"no of nodes in gsom: \" + str(no_of_nodes))\n\n for x in range(no_of_nodes):\n gsom_node_weights = gsom_list[x]\n # print(\"\\nNode:\" + str(x))\n for key, node in gsom_nodemap.items():\n if (len(node.get_mapped_labels()) > 0):\n if (gsom_node_weights.tolist() == node.recurrent_weights[0].tolist()):\n updated_weights = []\n grade = []\n for frame in node.get_mapped_labels():\n prev_feature_vector = input_database[0][int(frame)].tolist()\n\n contsant = calculate_const_for_frame(\n global_centroid,\n centroids[labels[x]],\n gsom_node_weights,\n prev_feature_vector[0]\n )\n\n updated_weights.append(\n [contsant * val for val in prev_feature_vector[0]]\n )\n grade.append(contsant)\n\n frame_list.append([key, node, labels[x], node.get_mapped_labels(), updated_weights, grade])\n break\n return frame_list", "def expand_around_existing_points(self, num_pts, reso):\n\n new_pts = []\n \n i = numpy.arange(-num_pts * reso, num_pts * reso + reso*0.01, reso)\n for xi in i:\n for yi in i:\n for zi in i:\n vec = numpy.array([xi, yi, zi])\n new_pts.append(self.points + vec)\n self.points = numpy.vstack(new_pts)\n \n self.__unique_points()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply any number of attention maps over the input.
def apply_attention(input, attention): n, c = input.size()[:2] glimpses = attention.size(1) # glimpses is equivalent to multiple heads in attention # flatten the spatial dims into the third dim, since we don't need to care about how they are arranged input = input.view(n, 1, c, -1) # [n, 1, c, s] [batch, 1, channels, height*width] [48, 1, 2048, 7*7] attention = attention.view(n, glimpses, -1) # [48, 2, 7*7] attention = torch.nn.functional.softmax(attention, dim=-1).unsqueeze(2) # [n, g, 1, s] [batch, multi_head, 1, height*width] [48, 2, 1, 7*7] weighted = attention * input # [n, g, c, s] [48, 2, 2048, 7*7] weighted_mean = weighted.sum(dim=-1) # [n, g, c] [48, 2, 2048] return weighted_mean.view(n, -1) # [48, 4096]
[ "def apply_attention(input, attention):\n # import pdb\n # pdb.set_trace()\n n, c = input.size()[:2]\n glimpses = attention.size(1)\n\n # flatten the spatial dims into the third dim, since we don't need to care about how they are arranged\n input = input.view(n, c, -1)\n attention = attention.view(n, glimpses, -1)\n s = input.size(2)\n\n # apply a softmax to each attention map separately\n # since softmax only takes 2d inputs, we have to collapse the first two dimensions together\n # so that each glimpse is normalized separately\n attention = attention.view(n * glimpses, -1)\n attention = F.softmax(attention)\n\n # apply the weighting by creating a new dim to tile both tensors over\n target_size = [n, glimpses, c, s]\n input = input.view(n, 1, c, s).expand(*target_size)\n attention = attention.view(n, glimpses, 1, s).expand(*target_size)\n weighted = input * attention\n # sum over only the spatial dimension\n weighted_mean = weighted.sum(dim=3)\n # the shape at this point is (n, glimpses, c, 1)\n return weighted_mean.view(n, -1)", "def attention_map(model, image):\n size = model.input_shape[1]\n grid_size = int(np.sqrt(model.layers[5].output_shape[0][-2] - 1))\n\n # Prepare the input\n X = vit.preprocess_inputs(cv2.resize(image, (size, size)))[np.newaxis, :] # type: ignore\n\n # Get the attention weights from each transformer.\n outputs = [\n l.output[1] for l in model.layers if isinstance(l, layers.TransformerBlock)\n ]\n weights = np.array(\n tf.keras.models.Model(inputs=model.inputs, outputs=outputs).predict(X)\n )\n num_layers = weights.shape[0]\n num_heads = weights.shape[2]\n reshaped = weights.reshape(\n (num_layers, num_heads, grid_size ** 2 + 1, grid_size ** 2 + 1)\n )\n\n # From Appendix D.6 in the paper ...\n # Average the attention weights across all heads.\n reshaped = reshaped.mean(axis=1)\n\n # From Section 3 in https://arxiv.org/pdf/2005.00928.pdf ...\n # To account for residual connections, we add an identity matrix to the\n # attention matrix and re-normalize the weights.\n reshaped = reshaped + np.eye(reshaped.shape[1])\n reshaped = reshaped / reshaped.sum(axis=(1, 2))[:, np.newaxis, np.newaxis]\n\n # Recursively multiply the weight matrices\n v = reshaped[-1]\n for n in range(1, len(reshaped)):\n v = np.matmul(v, reshaped[-1 - n])\n\n # Attention from the output token to the input space.\n mask = v[0, 1:].reshape(grid_size, grid_size)\n mask = cv2.resize(mask / mask.max(), (image.shape[1], image.shape[0]))[\n ..., np.newaxis\n ]\n return (mask * image).astype(\"uint8\")", "def __call__(self, inputs_to_attend, attn_gen):\n _, height, width, _ = inputs_to_attend.get_shape().as_list()\n attention_size = height * width\n # Use a MLP here\n attention = layers.fully_connected(attn_gen, 10, scope='attn_spatial1')\n attention = layers.fully_connected(\n attention,\n attention_size,\n activation_fn=None,\n scope='attn_spatial2')\n attention = tf.nn.softmax(attention)\n\n # [batch_size, kernel_size, kernel_size, n_channels]\n inputs_shape = inputs_to_attend.get_shape().as_list()\n # reshape to [batch_size, kernel_size, kernel_size]\n attention_shaped = tf.reshape(attention, inputs_shape[:3])\n attention_shaped = tf.expand_dims(attention_shaped, axis=-1)\n inputs_to_attend *= attention_shaped\n\n return inputs_to_attend, attention", "def _forward_paired(self, inputs, scales):\n x_1x = inputs['images']\n\n # run 1x scale\n assert 1.0 in scales, 'expected one of scales to be 1.0'\n ps = {}\n all_feats = {}\n ps[1.0], all_feats[1.0] = self._fwd(x_1x)\n\n # run all other scales\n for scale in scales:\n if scale == 1.0:\n continue\n resized_x = ResizeX(x_1x, scale)\n p, feats = self._fwd(resized_x)\n ps[scale] = scale_as(p, x_1x)\n all_feats[scale] = scale_as(feats, all_feats[1.0])\n\n # Generate all attention outputs\n output = None\n num_scales = len(scales)\n attn = {}\n for idx in range(num_scales - 1):\n lo_scale = scales[idx]\n hi_scale = scales[idx + 1]\n concat_feats = torch.cat([all_feats[lo_scale],\n all_feats[hi_scale]], 1)\n p_attn = self.scale_attn(concat_feats)\n attn[lo_scale] = scale_as(p_attn, x_1x)\n\n # Normalize attentions\n norm_attn = {}\n last_attn = None\n for idx in range(num_scales - 1):\n lo_scale = scales[idx]\n hi_scale = scales[idx + 1]\n attn_lo = attn[lo_scale][:, 0:1, :, :]\n attn_hi = attn[lo_scale][:, 1:2, :, :]\n if last_attn is None:\n norm_attn[lo_scale] = attn_lo\n norm_attn[hi_scale] = attn_hi\n else:\n normalize_this_attn = last_attn / (attn_lo + attn_hi)\n norm_attn[lo_scale] = attn_lo * normalize_this_attn\n norm_attn[hi_scale] = attn_hi * normalize_this_attn\n last_attn = attn_hi\n\n # Apply attentions\n for idx, scale in enumerate(scales):\n attn = norm_attn[scale]\n attn_1x_scale = scale_as(attn, x_1x)\n if output is None:\n output = ps[scale] * attn_1x_scale\n else:\n output += ps[scale] * attn_1x_scale\n\n if self.training:\n assert 'gts' in inputs\n gts = inputs['gts']\n loss = self.criterion(output, gts)\n return loss\n else:\n return output, attn", "def _attention_scores(from_view, to_view, additive_mask,\n queries, keys):\n from_buckets = 'N' if from_view in ('tail', 'window') else ''\n to_buckets = 'N' if to_view in ('tail', 'window') else ''\n result_buckets = from_buckets or to_buckets\n # Computes unmasked attention scores. If either from or to views have a\n # num_bucket dimension, we keep it in the output.\n scores = tf.einsum(\n f'BH{from_buckets}FE,BH{to_buckets}TE->BH{result_buckets}FT',\n getattr(queries, from_view),\n getattr(keys, to_view),\n name=f'query_key_{from_view}_{to_view}')\n\n return scores + additive_mask", "def activation_maps(self, input):\n for layer in self._model.layers:\n output = K.function([layer.get_input_at(0), K.learning_phase()],\n [layer.get_output_at(0)])\n input = output([input, 0])[0]\n\n return input.squeeze()", "def __call__(self, inputs_to_attend, attn_gen):\n attention_size = inputs_to_attend.get_shape().as_list()[-1]\n\n if self._use_mlp:\n attn_gen = layers.fully_connected(attn_gen, 128,\n scope='feature_attn_gen0')\n shift_and_scale = layers.fully_connected(\n attn_gen,\n attention_size * 2,\n activation_fn=None,\n biases_initializer=tf.zeros_initializer(),\n weights_initializer=tf.zeros_initializer(),\n scope='feature_attn_gen')\n shift, scale = tf.split(shift_and_scale, num_or_size_splits=2, axis=1)\n scale = tf.nn.relu(scale + 1.0)\n\n shift = tf.expand_dims(tf.expand_dims(shift, axis=1), axis=1)\n inputs_to_attend += shift\n\n scale = tf.expand_dims(tf.expand_dims(scale, axis=1), axis=1)\n inputs_to_attend *= scale\n\n inputs_to_attend = tf.nn.relu(inputs_to_attend)\n return inputs_to_attend, (shift, scale)", "def augmentator(images, masks):\n spatial_aug = iaa.Sequential([\n iaa.Fliplr(0.5), # horizontal flips\n iaa.Flipud(0.5), # vertical flips\n iaa.Crop(percent=(0, 0.1)), # random crops\n # Apply affine transformations to each image.\n # Scale/zoom them, translate/move them, rotate them and shear them.\n iaa.Affine(\n scale=(0.8, 1.2),\n translate_percent={\"x\": (-0.1, 0.1), \"y\": (-0.1, 0.1)},\n rotate=(-20, 20),\n # shear=(-20, 20),\n order=[1], # use nearest neighbour or bilinear interpolation (fast)\n cval=125, # if mode is constant, use a cval between 0 and 255\n mode=\"reflect\",\n name=\"Affine\")\n ], random_order=True)\n\n blur_aug = iaa.Sequential([\n # Blur about 50% of all images.\n iaa.Sometimes(0.5,\n iaa.OneOf([\n iaa.GaussianBlur(sigma=(0, 0.5)),\n iaa.AverageBlur(k=(3, 7)),\n iaa.MedianBlur(k=(3, 7)),\n ])\n )\n\n ], random_order=True)\n\n elastic_aug = iaa.Sometimes(0.5, [iaa.ElasticTransformation(alpha=(30, 60), sigma=10)])\n\n other_aug = iaa.Sequential([\n iaa.Sometimes(0.5, [\n iaa.OneOf([\n iaa.contrast.CLAHE(clip_limit=2),\n iaa.contrast.GammaContrast(gamma=(0.5, 2.0))\n ]),\n # change brightness of images\n iaa.Add((-40, 40))\n ])\n ], random_order=True)\n\n # Freeze randomization to apply same to labels\n spatial_det = spatial_aug.to_deterministic()\n elastic_det = elastic_aug.to_deterministic()\n\n # when input mask is float32, the no channels must be 3 as it would be 3 classes.\n # TODO: remove nb_classes parameter, it's deprecated\n segmaps = [SegmentationMapOnImage(m, nb_classes=3, shape=images[i].shape) for i, m in enumerate(masks)]\n\n aug_images, aug_masks = spatial_det.augment_images(images), spatial_det.augment_segmentation_maps(segmaps=segmaps)\n aug_images, aug_masks = elastic_det.augment_images(aug_images), elastic_det.augment_segmentation_maps(segmaps=aug_masks)\n aug_images = blur_aug.augment_images(aug_images)\n aug_images = other_aug.augment_images(aug_images)\n\n # convert seg_maps into numpy arrays with shape (H,W,1)\n # TODO: use get_arr() function for converting to a numpy array the SegmentationMapOnImage instances\n aug_masks = [np.expand_dims(m.arr[:, :, 0], axis=2) for m in aug_masks]\n\n return aug_images, aug_masks", "def get_map_fn(transformation_list: List[str], param_dict: Dict, n_classes: int):\n def map_fn(image, label):\n label = tf.one_hot(label, n_classes)\n image = augmentations.apply_list_of_transformations(image, transformation_list, param_dict)\n return image, label\n return map_fn", "def apply_learn_output_fns(self,active_units_mask=True):\n for of in self.weights_output_fns:\n of(CFIter(self,active_units_mask=active_units_mask))", "def apply(self,\n inputs,\n mask,\n num_encoders=6,\n num_heads=8,\n value_dim=128,\n activation_fn=flax.deprecated.nn.relu,\n weight_init=jax.nn.initializers.xavier_normal()):\n inputs = flax.deprecated.nn.Dense(\n inputs, features=value_dim, kernel_init=weight_init)\n for _ in range(num_encoders):\n inputs = TransformerEncoderLayer(inputs,\n mask,\n activation_fn=activation_fn,\n num_heads=num_heads,\n weight_init=weight_init)\n return inputs", "def contextual_attention_layer(\n genes, smiles, attention_size, reduce_sequence=True,\n return_alphas=True, name=None\n):\n with tf.variable_scope(\n name, default_name='merged_attention_layer',\n values=[genes, smiles]\n ):\n genes = tf.expand_dims(genes, 2) if len(genes.shape) == 2 else genes\n hidden_size = smiles.shape[2].value\n num_genes = genes.shape[1].value\n num_gene_features = genes.shape[2].value\n\n # Trainable parameters.\n w_num_gene_features = tf.Variable(\n tf.random_normal([num_gene_features], stddev=0.1)\n )\n w_genes = tf.Variable(\n tf.random_normal([num_genes, attention_size], stddev=0.1)\n )\n b_genes = tf.Variable(tf.random_normal([attention_size], stddev=0.1))\n\n \n w_smiles = tf.Variable(\n tf.random_normal([hidden_size, attention_size], stddev=0.1)\n )\n b_smiles = tf.Variable(tf.random_normal([attention_size], stddev=0.1))\n v = tf.Variable(tf.random_normal([attention_size], stddev=0.1))\n\n with tf.name_scope('x'):\n # Applying fully connected layer with non-linear activation and\n # genes context to each of the batch_size * sequence_length.\n # Shape of `x` is `[batch_size, sequence_length, attention_size]`\n\n genes_collapsed = tf.tensordot(\n genes, w_num_gene_features, axes=[2, 0]\n )\n\n x = tf.tanh(\n tf.expand_dims(\n tf.tensordot(\n genes_collapsed, w_genes, axes=1\n ) + b_genes,\n axis=1\n ) \n + (tf.tensordot(smiles, w_smiles, axes=1) + b_smiles)\n )\n\n # For each of the timestamps its vector of size attention_size\n # from `v` is reduced with `u` vector\n # `[batch_size, sequence_length]`\n xv = tf.tensordot(x, v, axes=1, name='unnormalized')\n # `[batch_size, sequence_length]`\n alphas = tf.nn.softmax(xv, name='alphas')\n\n # If reduce_sequence is true, result is `[batch_size, hidden_size]`\n # else it is `[batch_size, sequence_length, hidden_size]`\n output = (\n tf.reduce_sum(smiles * tf.expand_dims(alphas, -1), 1)\n if reduce_sequence else\n smiles * tf.expand_dims(alphas, -1)\n )\n\n # Optionally return the attention weights\n return (\n (output, alphas)\n if return_alphas else\n output\n )", "def attention_layer(query_layer_l,\n key_layer_l,\n value_layer_l,\n from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None):\n\n def transpose_for_scores(input_tensor, batch_size, num_attention_heads,\n seq_length, width):\n output_tensor = tf.reshape(\n input_tensor, [batch_size, seq_length, num_attention_heads, width])\n\n output_tensor = tf.transpose(a=output_tensor, perm=[0, 2, 1, 3])\n return output_tensor\n\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3],name='')\n to_shape = get_shape_list(to_tensor, expected_rank=[2, 3],name='')\n\n if len(from_shape) != len(to_shape):\n raise ValueError(\n \"The rank of `from_tensor` must match the rank of `to_tensor`.\")\n\n if len(from_shape) == 3:\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n to_seq_length = to_shape[1]\n elif len(from_shape) == 2:\n if (batch_size is None or from_seq_length is None or to_seq_length is None):\n raise ValueError(\n \"When passing in rank 2 tensors to attention_layer, the values \"\n \"for `batch_size`, `from_seq_length`, and `to_seq_length` \"\n \"must all be specified.\")\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n\n from_tensor_2d = reshape_to_matrix(from_tensor)\n to_tensor_2d = reshape_to_matrix(to_tensor)\n\n # `query_layer` = [B*F, N*H]\n # query_layer = tf.compat.v1.layers.dense(\n # from_tensor_2d,\n # num_attention_heads * size_per_head,\n # activation=query_act,\n # name=\"query\",\n # kernel_initializer=create_initializer(initializer_range))\n\n # query_layer_l=Dense(name='query',\n # kernel_initializer=create_initializer(initializer_range),\n # units=num_attention_heads * size_per_head,\n # activation=query_act)\n\n query_layer=query_layer_l(from_tensor_2d)\n\n # `key_layer` = [B*T, N*H]\n # key_layer = tf.compat.v1.layers.dense(\n # to_tensor_2d,\n # num_attention_heads * size_per_head,\n # activation=key_act,\n # name=\"key\",\n # kernel_initializer=create_initializer(initializer_range))\n # key_layer_l = Dense(name='key',\n # kernel_initializer=create_initializer(initializer_range),\n # units=num_attention_heads * size_per_head,\n # activation=key_act)\n\n key_layer = key_layer_l(to_tensor_2d)\n\n\n # `value_layer` = [B*T, N*H]\n # value_layer = tf.compat.v1.layers.dense(\n # to_tensor_2d,\n # num_attention_heads * size_per_head,\n # activation=value_act,\n # name=\"value\",\n # kernel_initializer=create_initializer(initializer_range))\n # value_layer_l = Dense(name='value',\n # kernel_initializer=create_initializer(initializer_range),\n # units=num_attention_heads * size_per_head,\n # activation=value_act)\n\n value_layer = value_layer_l(to_tensor_2d)\n\n\n # `query_layer` = [B, N, F, H]\n query_layer = transpose_for_scores(query_layer, batch_size,\n num_attention_heads, from_seq_length,\n size_per_head)\n\n # `key_layer` = [B, N, T, H]\n key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,\n to_seq_length, size_per_head)\n\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n # `attention_scores` = [B, N, F, T]\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n attention_scores = tf.multiply(attention_scores,\n 1.0 / math.sqrt(float(size_per_head)))\n\n if attention_mask is not None:\n # `attention_mask` = [B, 1, F, T]\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0\n\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_scores += adder\n\n # Normalize the attention scores to probabilities.\n # `attention_probs` = [B, N, F, T]\n attention_probs = tf.nn.softmax(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = dropout(attention_probs, attention_probs_dropout_prob)\n\n # `value_layer` = [B, T, N, H]\n value_layer = tf.reshape(\n value_layer,\n [batch_size, to_seq_length, num_attention_heads, size_per_head])\n\n # `value_layer` = [B, N, T, H]\n value_layer = tf.transpose(a=value_layer, perm=[0, 2, 1, 3])\n\n # `context_layer` = [B, N, F, H]\n context_layer = tf.matmul(attention_probs, value_layer)\n\n # `context_layer` = [B, F, N, H]\n context_layer = tf.transpose(a=context_layer, perm=[0, 2, 1, 3])\n\n if do_return_2d_tensor:\n # `context_layer` = [B*F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size * from_seq_length, num_attention_heads * size_per_head])\n else:\n # `context_layer` = [B, F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size, from_seq_length, num_attention_heads * size_per_head])\n\n return context_layer", "def _perturb_inputs(\n inputs: Iterable[Any],\n input_roles: Tuple[int],\n baselines: Tuple[Union[int, float, Tensor], ...],\n perturbation_mask: Tuple[Union[Tensor, None], ...],\n) -> Tuple[Any, ...]:\n\n perturbed_inputs = []\n attr_inp_count = 0\n\n for inp, role in zip(inputs, input_roles):\n if role != InputRole.need_attr:\n perturbed_inputs.append(inp)\n continue\n\n pert_mask = perturbation_mask[attr_inp_count]\n\n # no perturbation is needed for this input\n if pert_mask is None:\n perturbed_inputs.append(inp)\n else:\n baseline = baselines[attr_inp_count]\n\n perturbed_inp = inp * pert_mask + baseline * (1 - pert_mask)\n perturbed_inputs.append(perturbed_inp)\n\n attr_inp_count += 1\n\n perturbed_inputs = tuple(perturbed_inputs)\n\n return perturbed_inputs", "def _save_attention_map(self, attention_map, layer_output_dir, j, k, raw_input):\r\n if self.gcam_dict['save_pickle']:\r\n self.gcam_dict['pickle_maps'].append(attention_map)\r\n if self.gcam_dict['save_maps']:\r\n gcam_utils.save_attention_map(filename=layer_output_dir + \"/attention_map_\" + str(self.gcam_dict['counter']) + \"_\" + str(j) + \"_\" + str(k), attention_map=attention_map, heatmap=self.gcam_dict['heatmap'], raw_input=raw_input)", "def map_enumerate(maybe_fn_or_fn_list, maybe_input_or_inputs, *args, **kwargs):\n # make sure we actually have a list of inputs...\n if not isinstance(maybe_input_or_inputs, list):\n inputs = [maybe_input_or_inputs]\n else:\n inputs = maybe_input_or_inputs\n # function is callable? just map it over the inputs\n if callable(maybe_fn_or_fn_list):\n return [maybe_fn_or_fn_list(arg, *args, *kwargs) for arg in inputs]\n # same number of inputs as functions: apply function i to arg i\n fn_list = maybe_fn_or_fn_list\n if len(fn_list) == len(inputs):\n return [fn_list[i](inputs[i], *args, **kwargs)\n for i in range(len(fn_list))]\n # many inputs one function, apply function to inputs\n elif len(fn_list) is 1 and len(inputs) > 1:\n return [fn_list[0](inputs[i], *args, **kwargs)\n for i in range(len(inputs))]\n # many functions on one input, apply functions independently\n elif len(fn_list) > 1 and len(inputs) == 1:\n return [fn_list[i](inputs[0], *args, **kwargs)\n for i in range(len(fn_list))]\n else:\n raise Exception(\"map_enumerate fail\",\n maybe_fn_or_fn_list, maybe_input_or_inputs,\n *args, **kwargs)", "def MapMulti(iterable, *funcs):\n tees = itt.tee(iterable, len(funcs))\n return [map(f, t) for f, t in zip(funcs, tees)]", "def post_attention(self, encode_hiddens, con_decode_hidden):\r\n post_w = tf.stack([self.post_trans_w for _ in range(self.batch_size)]) # [batch, hidden, 1]\r\n post_sim = tf.matmul(encode_hiddens, post_w) # [batch, time, 1]\r\n single_decode = tf.matmul(con_decode_hidden, self.response_trans_w) # [batch, 1]\r\n response_sim = tf.stack([single_decode for _ in range(self.config.max_len)], axis=1) # [batch, time, 1]\r\n attention_scores = self.activate(post_sim + response_sim) # [batch, time, 1]\r\n\r\n sfx_att_scores = tf.exp(attention_scores)\r\n sum_att_scores = tf.reduce_sum(sfx_att_scores, axis=1, keep_dims=True)\r\n sfx_att_scores = sfx_att_scores / sum_att_scores # [batch, time, 1]\r\n post_att_vectors = tf.reduce_sum(sfx_att_scores * encode_hiddens, axis=1) # [batch, hidden]\r\n return post_att_vectors", "def apply_calibs(self):\n\n for i in range(self.NHWChannels):\n if self.CalibFuncts[i] is not None:\n self.data[i] = self.CalibFuncts[i](self.data[i])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Count the number of characters that differ between box1 and box2
def chars_different(box1, box2): diff = sum( 1 if i != j else 0 for i, j in zip(box1, box2) ) return diff
[ "def occurrences(text1, text2):\n num_text1 = {char:0 for char in text1}\n\n for char in text2:\n if char in num_text1:\n num_text1[char] += 1\n\n return sum(list(num_text1.values()))", "def _get_common_letters(box_id1: str, box_id2: str) -> str:\n # I can iterate through both the string and find out the diff in one iteration only.\n # below logic is O(n), n - min(length(boxid1), length(boxid2))\n common_letters: Iterable[str] = map(\n lambda e: e[0] if e[0] == e[1] else \"\", zip(box_id1, box_id2)\n )\n return \"\".join(common_letters)", "def count_differences(self):\r\n result = calc2.count_differences(\"asdf\", \"asdg\")\r\n self.assertEqual(result, 2)", "def difference_between_words(a, b):\n a = a.lower()\n b = b.lower()\n if a == b:\n return 100\n zipped = zip(a, b) # give list of tuples (of letters at each index)\n difference = sum(1 for e in zipped if e[0] != e[1]) # count tuples with non matching elements\n difference = difference + abs(len(a) - len(b))\n return difference", "def commonCharacterCount(s1, s2):\r\n\r\n\t# number of similar characters.\r\n\tcounter = 0\r\n\r\n\t# mutable lists to hold characters of the two strings.\r\n\tls1 = list()\r\n\tls2 = list()\r\n\r\n\t# Append characters of strings to the two lists.\r\n\tfor c in s1:\r\n\t\tls1.append(c)\r\n\tfor c in s2:\r\n\t\tls2.append(c)\r\n\r\n\t# Compare both Strings\r\n\tfor indx, value in enumerate(ls1):\r\n\t\tfor indx2,value2 in enumerate(ls2):\r\n\r\n\t\t\t# increment counter, and remove character from second string to avoid duplicate characters in both lists.\r\n\t\t\tif (value == value2):\r\n\t\t\t\tcounter = counter + 1\r\n\t\t\t\tls2.pop(indx2)\r\n\t\t\t\tbreak\r\n\t\t\t\t\r\n\treturn counter", "def common_chars(box1, box2):\n return ''.join(i if i == j else '' for i, j in zip(box1, box2))", "def compare_adj_count(text_a, text_b):\r\n pass", "def getEditDist(str1, str2):\n assert(len(str1)==len(str2))\n str1 = str1.upper()\n str2 = str2.upper()\n\n editDist = 0\n for c1, c2 in zip(str1, str2):\n if c1!=c2:\n editDist +=1\n return editDist", "def caminhadas(char1, char2):\n qtde_andadas = 0\n letras = 'abcdefghijklmnopqrstuvwxyz'\n while char1 != char2:\n posicao = letras.index(char1) + 1\n if posicao == 26:\n posicao = 0\n char1 = letras[posicao]\n qtde_andadas += 1\n return qtde_andadas", "def hammingDistance( s1, s2 ):\n strLen = len( s1 )\n count = 0\n for i in range( strLen ):\n if s1[i] != s2[i]:\n count += 1\n return count", "def calc_num_changes(text1, text2):\n\n\tif isinstance(text1, basestring) and isinstance(text2, basestring):\n\n\t\t# Number of changes to document\n\t\tn_changes = 0\n\n\t\t# Last character of line\n\t\tlast_char = None\n\n\t\t# Loop over every line of function unified_diff\n\t\tfor line in unified_diff(text1.split(), text2.split(), lineterm=''):\n\t\t\t\n\t\t\t# Current character of line\n\t\t\tcurr_char = line[0]\n\n\t\t\tfor prefix in ('---', '+++', '@@'):\n\t\t\t\tif line.startswith(prefix):\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\t# Check for changes in lines\n\t\t\t\tif (last_char == '-' and curr_char == ' ') or \\\n\t\t\t\t\t(last_char == '-' and curr_char == '+') or \\\n\t\t\t\t\t(last_char == '+' and curr_char == ' ') or \\\n\t\t\t\t\t(last_char == '+' and curr_char == '-'):\n\t\t\t\t\tn_changes = n_changes + 1\n\t\t\t\n\t\t\t\t# Update last_char\n\t\t\t\tlast_char = curr_char\n\n\t\treturn n_changes\n\telse:\n\t\treturn None", "def validate_box(self, letters):\n \n if 2 in letters.values():\n self.two_letter_boxes += 1\n\n if 3 in letters.values():\n self.three_letter_boxes += 1", "def total_length(s1: str, s2: str) -> int:\n return len(s1+s2)", "def one_char_diff(first, second):\n differences = sum((x != y) for x, y in zip(first, second))\n return differences == 1", "def editDistance(bitstring1, bitstring2):\n distance = 0\n for b in range(len(bitstring1)):\n distance = distance + bin(bitstring1[b] ^ bitstring2[b])[2:].count('1')\n\n return distance", "def hamdist(str1, str2):\n diffs = 0\n if len(str1) != len(str2):\n return max(len(str1),len(str2))\n for ch1, ch2 in zip(str1, str2):\n if ch1 != ch2:\n\t diffs += 1\n return diffs", "def test_len(self):\r\n self.assertEqual(len(ANSIString('{gTest{n')), 4)", "def _cmp_size(self, a, b):\n return len(a) - len(b)", "def _lcs_len(X, Y, m, n, memo):\n # If there are no more elements in either string.\n if m == 0 or n == 0:\n return 0\n key = (m, n)\n if key not in memo:\n # If last charachter of X and Y matches\n if X[m-1] == Y[n-1]:\n memo[key] = _lcs_len(X, Y, m-1, n-1, memo) + 1\n else:\n # Else if last charachter does NOT match.\n memo[key] = max(_lcs_len(X, Y, m-1, n, memo), _lcs_len(X, Y, m, n-1, memo))\n return memo[key]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all common characters between box1 and box2 in order >>> common_chars('abcdef', 'abddeg') 'abde'
def common_chars(box1, box2): return ''.join(i if i == j else '' for i, j in zip(box1, box2))
[ "def _get_common_letters(box_id1: str, box_id2: str) -> str:\n # I can iterate through both the string and find out the diff in one iteration only.\n # below logic is O(n), n - min(length(boxid1), length(boxid2))\n common_letters: Iterable[str] = map(\n lambda e: e[0] if e[0] == e[1] else \"\", zip(box_id1, box_id2)\n )\n return \"\".join(common_letters)", "def common_charecters(string1, string2):\n\n first_String= string1.lower()\n second_String= string2.lower()\n\n common = []\n\n for charecter in first_String:\n if charecter in second_String:\n common.append(charecter)\n else:\n None\n\n print(\"Common letters: {}\".format(common))", "def find_common_letters(box_ids: List[str]) -> Optional[str]:\n common_letters: Optional[str] = None\n\n for id1, id2 in combinations(box_ids, 2):\n common_letters = _get_common_letters(id1, id2)\n\n if (len(id1) - len(common_letters)) == 1:\n print(f\"Id1: {id1}, Id2: {id2}\")\n break\n\n common_letters = None\n\n return common_letters", "def common_letters(id_list):\n pair = find_correct_box_ids(id_list)\n pair_difference = compare_ids(pair[0], pair[1])[0]\n char_list = list(pair[1])\n char_list.pop(pair_difference[0])\n return \"\".join(char_list)", "def chars_in_two(*strings) -> set:\n if not all([isinstance(i, str) for i in strings]):\n raise TypeError\n if len(strings) < 2:\n raise ValueError\n set_combinations = combinations(strings, 2)\n common_in_two = set()\n for set_pair in set_combinations:\n common_in_two |= reduce(set.intersection, map(set, set_pair))\n return common_in_two", "def chars_in_all(*strings) -> set:\n if not all([isinstance(i, str) for i in strings]):\n raise TypeError\n common_chars = reduce(set.intersection, map(set, strings))\n return common_chars", "def commonCharacterCount(s1, s2):\r\n\r\n\t# number of similar characters.\r\n\tcounter = 0\r\n\r\n\t# mutable lists to hold characters of the two strings.\r\n\tls1 = list()\r\n\tls2 = list()\r\n\r\n\t# Append characters of strings to the two lists.\r\n\tfor c in s1:\r\n\t\tls1.append(c)\r\n\tfor c in s2:\r\n\t\tls2.append(c)\r\n\r\n\t# Compare both Strings\r\n\tfor indx, value in enumerate(ls1):\r\n\t\tfor indx2,value2 in enumerate(ls2):\r\n\r\n\t\t\t# increment counter, and remove character from second string to avoid duplicate characters in both lists.\r\n\t\t\tif (value == value2):\r\n\t\t\t\tcounter = counter + 1\r\n\t\t\t\tls2.pop(indx2)\r\n\t\t\t\tbreak\r\n\t\t\t\t\r\n\treturn counter", "def chars_different(box1, box2):\n diff = sum(\n 1 if i != j else 0 for i, j in zip(box1, box2)\n )\n return diff", "def get_common(self, a, b):\n return ''.join(x for x, y in zip(a, b) if x == y)", "def num_common_letters(goal_word, guess):\n \"*** YOUR CODE HERE ***\"\n num_common = 0\n for letter1 in letters:\n find_goal = False\n find_guess = False\n for letter2 in goal_word:\n if letter1 == letter2:\n find_goal = True\n for letter3 in guess:\n if letter1 == letter3:\n find_guess = True\n if find_goal and find_guess:\n num_common += 1\n\n return num_common", "def remove_common_characters(string1: str, string2: str) -> Tuple[str, str]:\n if len(string1) != len(string2):\n raise ValueError(\n \"Both strings must be the same length.\"\n f\" ``string1`` is of length {len(string1)}\"\n f\" and ``string2`` is of length {len(string2)}\"\n )\n for i, string in enumerate((string1, string2)):\n if len(string) != len(set(string)):\n character_counts = collections.Counter(string)\n duplicates = [\n character\n for character in character_counts\n if 1 < character_counts[character]\n ]\n duplicate_preview = \", \".join(duplicate for duplicate in duplicates[:5])\n raise ValueError(\n \"Each string must contain only unique characters.\"\n f\" ``string{i + 1}`` contains duplicates of the\"\n f\" following characters: {duplicate_preview},...\"\n )\n cleaned_string1 = \"\"\n cleaned_string2 = \"\"\n for character1, character2 in zip(string1, string2):\n if character1 != character2:\n cleaned_string1 += character1\n cleaned_string2 += character2\n return cleaned_string1, cleaned_string2", "def caminhadas(char1, char2):\n qtde_andadas = 0\n letras = 'abcdefghijklmnopqrstuvwxyz'\n while char1 != char2:\n posicao = letras.index(char1) + 1\n if posicao == 26:\n posicao = 0\n char1 = letras[posicao]\n qtde_andadas += 1\n return qtde_andadas", "def find_common_roots(x,y):\n\tx = x.split(\".\")\n\ty = y.split(\".\")\n\tshared = []\n\tfor i in range(min(len(x), len(y))):\n\t\tif x[i] == y[i]:\n\t\t\tshared.append(x[i])\n\treturn \".\".join(shared)", "def common_roots(paths1:list, paths2:list):\n foo = []\n for p1 in paths1:\n for p2 in paths2:\n if p1[:len(p2)] == p2:\n foo.append(p2)\n elif p2[:len(p1)] == p1:\n foo.append(p1)\n# return list(set(foo)) # <- not hashable\n goo = [] # remove redundant elements\n for x in foo:\n if not x in goo:\n goo.append(x)\n return goo", "def find_two_similar_boxes(box_ids):\n for x, y in itertools.product(box_ids, box_ids):\n if one_char_diff(x, y):\n return x, y", "def twoStrings(s1, s2):\n #brute force solution O(len(s1) * len(s2))\n # for c1 in s1:\n # for c2 in s2:\n # if c1 == c2:\n # return 'YES'\n # return 'NO'\n\n # set solution O(len(s1)) since 'in' keyword is O(1) time\n all_chars = dict.fromkeys(set(s2), 1)\n for c in s1:\n if c in all_chars.keys():\n return 'YES'\n return 'NO'", "def find_pair_differs_by_one_char(box_ids):\n for str1, str2 in itertools.combinations(box_ids, r=2):\n difference_result = differs_by_one_char_same_len(str1, str2)\n if difference_result != -1:\n return (difference_result, (str1, str2))\n return None", "def common_elements(L1, L2):\n L1 = list(set(L1))\n L2 = list(set(L2))\n \n L_commom = []\n \n for l1e in L1:\n for l2e in L2:\n if l2e in L_commom:\n pass\n else:\n if l1e == l2e:\n L_commom.append(l1e)\n return L_commom", "def intersection(a,b):\n return \"\".join(sorted(set(c for c in a+b)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
1. Remove all comments. 2. Separate program into list of substrings, each could represent a token. 3. Convert into list of actual tokens, or TokenList(). 4. Append EOF and return.
def lex(self): #These regexes defines all comments: (//comment\n). #Anything (.*) can go in comment, including nothing (hence * instead of +). comment_reg = '//.*\n' #Split prog around non-comment sections, then join to remove comments. new_inp = "".join(re.split(comment_reg, self.inp)) #Separate into list called 'items' of strings which will become tokens items = re.findall('\w+|[,+*/(){}\[\];-]|[<=>]+|"[^\'\r\n]*"', new_inp) tokens = TokenList([self.choose_tok(x) for x in items]) tokens.ls.append(Token(EOF, "eof")) #no end-of-file in string input return tokens
[ "def pp_tokenize(src, fname) :\n\tcmnts = map(lambda c: (c.start(), c.end()), re_cmnt.finditer(src))#XXX something more hitech?\n\ttoks = re_tok.finditer(src)\n\tlines = map(lambda c: (c.start(), c.end()), re_newln.finditer(src))\n\treturn map(lambda t: tok_wrap(t, toks, cmnts, lines, fname),\n\t\tifilter(lambda t: tok_is_commented(t, cmnts), toks))", "def get_token_types(self):\r\n \r\n # With help from: https://deplinenoise.wordpress.com/2012/01/04/python-tip-regex-based-tokenizer/\r\n SCANNER = re.compile(r'''\r\n (\\s+) | # whitespace\r\n (//)[^\\n]* | # comments\r\n 0[xX]([0-9A-Fa-f]+) | # hexadecimal integer literals\r\n (\\d+) | # integer literals\r\n (<<|>>) | # multi-char punctuation\r\n ([][(){}<>=,;:*+-/|&~]) | # punctuation \r\n ([A-Za-z_][A-Za-z0-9_]*) | # identifiers\r\n \"\"\"(.*?)\"\"\" | # multi-line string literal\r\n \"((?:[^\"\\n\\\\]|\\\\.)*)\" | # regular string literal\r\n (.) | # an error!\r\n ''', re.DOTALL | re.VERBOSE)\r\n \r\n for match in re.finditer(SCANNER, self.scanner.modified_source_text): \r\n \r\n (space, comment, hexint, integer, mpunct, \r\n punct, word, mstringlit, stringlit, badchar) = match.groups()\r\n \r\n if word: \r\n #-------------------------------------------------------------------\r\n # check if word is an keyword\r\n #-------------------------------------------------------------------\r\n if word in self.symbols.keyword: \r\n keyword_token = Token(word, \"keyword\") \r\n self.token_list.append(keyword_token)\r\n #-------------------------------------------------------------------\r\n # check if word is an identifier\r\n #-------------------------------------------------------------------\r\n else:\r\n identifier_token = Token(word, \"identifier\") \r\n self.token_list.append(identifier_token)\r\n #-------------------------------------------------------------------\r\n # check if word is an integerConstant\r\n #-------------------------------------------------------------------\r\n if integer:\r\n Int_token = Token(integer, \"integerConstant\") \r\n self.token_list.append(Int_token)\r\n #-------------------------------------------------------------------\r\n # check if word is an symbol \r\n #-------------------------------------------------------------------\r\n if punct: \r\n symbol_token = Token(punct, \"symbol\") \r\n self.token_list.append(symbol_token)\r\n #-------------------------------------------------------------------\r\n # check if word is an stringConstant\r\n #------------------------------------------------------------------- \r\n if stringlit: \r\n string_token = Token(stringlit, \"stringConstant\") \r\n self.token_list.append(string_token) \r\n #-------------------------------------------------------------------\r\n # append EOF token\r\n #------------------------------------------------------------------- \r\n EOF_token = Token(self.endmark, \"EOF\") \r\n self.token_list.append(EOF_token) \r\n \r\n return self.token_list", "def generate_tokens(readline):\r\n lnum = parenlev = continued = 0\r\n namechars, numchars = string.ascii_letters + '_', '0123456789'\r\n contstr, needcont = '', 0\r\n contline = None\r\n indents = [0]\r\n\r\n while 1: # loop over lines in stream\r\n try:\r\n line = readline()\r\n except StopIteration:\r\n line = ''\r\n lnum = lnum + 1\r\n pos, max = 0, len(line)\r\n\r\n if contstr: # continued string\r\n if not line:\r\n raise TokenError(\"EOF in multi-line string\", strstart)\r\n endmatch = endprog.match(line)\r\n if endmatch:\r\n pos = end = endmatch.end(0)\r\n yield (STRING, contstr + line[:end],\r\n strstart, (lnum, end), contline + line)\r\n contstr, needcont = '', 0\r\n contline = None\r\n elif needcont and line[-2:] != '\\\\\\n' and line[-3:] != '\\\\\\r\\n':\r\n yield (ERRORTOKEN, contstr + line,\r\n strstart, (lnum, len(line)), contline)\r\n contstr = ''\r\n contline = None\r\n continue\r\n else:\r\n contstr = contstr + line\r\n contline = contline + line\r\n continue\r\n\r\n elif parenlev == 0 and not continued: # new statement\r\n if not line: break\r\n column = 0\r\n while pos < max: # measure leading whitespace\r\n if line[pos] == ' ': column = column + 1\r\n elif line[pos] == '\\t': column = (column//tabsize + 1)*tabsize\r\n elif line[pos] == '\\f': column = 0\r\n else: break\r\n pos = pos + 1\r\n if pos == max: break\r\n\r\n if line[pos] in '#\\r\\n': # skip comments or blank lines\r\n if line[pos] == '#':\r\n comment_token = line[pos:].rstrip('\\r\\n')\r\n nl_pos = pos + len(comment_token)\r\n yield (COMMENT, comment_token,\r\n (lnum, pos), (lnum, pos + len(comment_token)), line)\r\n yield (NL, line[nl_pos:],\r\n (lnum, nl_pos), (lnum, len(line)), line)\r\n else:\r\n yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],\r\n (lnum, pos), (lnum, len(line)), line)\r\n continue\r\n\r\n if column > indents[-1]: # count indents or dedents\r\n indents.append(column)\r\n yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)\r\n while column < indents[-1]:\r\n if column not in indents:\r\n raise IndentationError(\r\n \"unindent does not match any outer indentation level\",\r\n (\"<tokenize>\", lnum, pos, line))\r\n indents = indents[:-1]\r\n yield (DEDENT, '', (lnum, pos), (lnum, pos), line)\r\n\r\n else: # continued statement\r\n if not line:\r\n raise TokenError(\"EOF in multi-line statement\", (lnum, 0))\r\n continued = 0\r\n\r\n while pos < max:\r\n pseudomatch = pseudoprog.match(line, pos)\r\n if pseudomatch: # scan for tokens\r\n start, end = pseudomatch.span(1)\r\n spos, epos, pos = (lnum, start), (lnum, end), end\r\n token, initial = line[start:end], line[start]\r\n\r\n if initial in numchars or \\\r\n (initial == '.' and token != '.'): # ordinary number\r\n yield (NUMBER, token, spos, epos, line)\r\n elif initial in '\\r\\n':\r\n newline = NEWLINE\r\n if parenlev > 0:\r\n newline = NL\r\n yield (newline, token, spos, epos, line)\r\n elif initial == '#':\r\n assert not token.endswith(\"\\n\")\r\n yield (COMMENT, token, spos, epos, line)\r\n elif token in triple_quoted:\r\n endprog = endprogs[token]\r\n endmatch = endprog.match(line, pos)\r\n if endmatch: # all on one line\r\n pos = endmatch.end(0)\r\n token = line[start:pos]\r\n yield (STRING, token, spos, (lnum, pos), line)\r\n else:\r\n strstart = (lnum, start) # multiple lines\r\n contstr = line[start:]\r\n contline = line\r\n break\r\n elif initial in single_quoted or \\\r\n token[:2] in single_quoted or \\\r\n token[:3] in single_quoted:\r\n if token[-1] == '\\n': # continued string\r\n strstart = (lnum, start)\r\n endprog = (endprogs[initial] or endprogs[token[1]] or\r\n endprogs[token[2]])\r\n contstr, needcont = line[start:], 1\r\n contline = line\r\n break\r\n else: # ordinary string\r\n yield (STRING, token, spos, epos, line)\r\n elif initial in namechars: # ordinary name\r\n yield (NAME, token, spos, epos, line)\r\n elif initial == '\\\\': # continued stmt\r\n # This yield is new; needed for better idempotency:\r\n yield (NL, token, spos, (lnum, pos), line)\r\n continued = 1\r\n else:\r\n if initial in '([{': parenlev = parenlev + 1\r\n elif initial in ')]}': parenlev = parenlev - 1\r\n yield (OP, token, spos, epos, line)\r\n else:\r\n yield (ERRORTOKEN, line[pos],\r\n (lnum, pos), (lnum, pos+1), line)\r\n pos = pos + 1\r\n\r\n for indent in indents[1:]: # pop remaining indent levels\r\n yield (DEDENT, '', (lnum, 0), (lnum, 0), '')\r\n yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')", "def tokenize_comments(ruddit: str) -> List[List[str]]:\n tokenizer = RegexpTokenizer(r'\\w+')\n with open(ruddit, 'r', encoding='utf-8') as cfile:\n\n tokenized_comments = []\n reader = csv.reader(cfile)\n for i, line in enumerate(reader):\n\n # Ignore the column headers\n if (i == 0):\n continue\n\n # Tokenize the lower-cased comment\n word_tokens = tokenizer.tokenize(line[0].lower())\n filtered_sentence = [w for w in word_tokens if w not in stop_words]\n tokenized_comments.append(filtered_sentence)\n\n cfile.close()\n\n return tokenized_comments", "def _tokenizeline(line, delimstrings=\" \", ignorestrings=[\"#\"]): \n tokens=[]\n comments = ''\n\n tmp = line.strip()\n if tmp: \n minlengthforst = -1\n actualignorestring = None\n lengthofline = len(tmp)\n\n #Find the ignore string that occurs first\n\n for st in ignorestrings:\n linelist = tmp.split(st)\n lengthforst = len(linelist[0])\n if lengthforst < lengthofline:\n\n #These strings are on the line\n if lengthforst < minlengthforst or -1 == minlengthforst:\n actualignorestring = st\n minlengthforst = lengthforst \n\n tokstring = \"\"\n\n if actualignorestring: \n linelist = tmp.split(actualignorestring)\n if len(linelist[1])>1:\n comments = actualignorestring + actualignorestring.join(linelist[1:])\n tokstring = linelist[0]\n else:\n tokstring = tmp\n if delimstrings== \"\":\n tokens = tokstring.split()\n else:\n #print \"delimstring \" , delimstrings\n tokens = map(lambda x: x.strip(), tokstring.split(delimstrings))\n ret = ( tokens , comments)\n return ret", "def generate_tokens(readline):\r\n lnum = parenlev = continued = 0\r\n namechars, numchars = string.ascii_letters + '_', '0123456789'\r\n contstr, needcont = '', 0\r\n contline = None\r\n indents = [0]\r\n\r\n while 1: # loop over lines in stream\r\n try:\r\n line = readline()\r\n except StopIteration:\r\n line = ''\r\n lnum = lnum + 1\r\n pos, max = 0, len(line)\r\n\r\n if contstr: # continued string\r\n if not line:\r\n raise TokenError, (\"EOF in multi-line string\", strstart)\r\n endmatch = endprog.match(line)\r\n if endmatch:\r\n pos = end = endmatch.end(0)\r\n yield (STRING, contstr + line[:end],\r\n strstart, (lnum, end), contline + line)\r\n contstr, needcont = '', 0\r\n contline = None\r\n elif needcont and line[-2:] != '\\\\\\n' and line[-3:] != '\\\\\\r\\n':\r\n yield (ERRORTOKEN, contstr + line,\r\n strstart, (lnum, len(line)), contline)\r\n contstr = ''\r\n contline = None\r\n continue\r\n else:\r\n contstr = contstr + line\r\n contline = contline + line\r\n continue\r\n\r\n elif parenlev == 0 and not continued: # new statement\r\n if not line: break\r\n column = 0\r\n while pos < max: # measure leading whitespace\r\n if line[pos] == ' ': column = column + 1\r\n elif line[pos] == '\\t': column = (column/tabsize + 1)*tabsize\r\n elif line[pos] == '\\f': column = 0\r\n else: break\r\n pos = pos + 1\r\n if pos == max: break\r\n\r\n if line[pos] in '#\\r\\n': # skip comments or blank lines\r\n yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],\r\n (lnum, pos), (lnum, len(line)), line)\r\n continue\r\n\r\n if column > indents[-1]: # count indents or dedents\r\n indents.append(column)\r\n yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)\r\n while column < indents[-1]:\r\n if column not in indents:\r\n raise IndentationError(\r\n \"unindent does not match any outer indentation level\",\r\n (\"<tokenize>\", lnum, pos, line))\r\n indents = indents[:-1]\r\n yield (DEDENT, '', (lnum, pos), (lnum, pos), line)\r\n\r\n else: # continued statement\r\n if not line:\r\n raise TokenError, (\"EOF in multi-line statement\", (lnum, 0))\r\n continued = 0\r\n\r\n while pos < max:\r\n pseudomatch = pseudoprog.match(line, pos)\r\n if pseudomatch: # scan for tokens\r\n start, end = pseudomatch.span(1)\r\n spos, epos, pos = (lnum, start), (lnum, end), end\r\n token, initial = line[start:end], line[start]\r\n\r\n if initial in numchars or \\\r\n (initial == '.' and token != '.'): # ordinary number\r\n yield (NUMBER, token, spos, epos, line)\r\n elif initial in '\\r\\n':\r\n yield (parenlev > 0 and NL or NEWLINE,\r\n token, spos, epos, line)\r\n elif initial == '#':\r\n yield (COMMENT, token, spos, epos, line)\r\n elif token in triple_quoted:\r\n endprog = endprogs[token]\r\n endmatch = endprog.match(line, pos)\r\n if endmatch: # all on one line\r\n pos = endmatch.end(0)\r\n token = line[start:pos]\r\n yield (STRING, token, spos, (lnum, pos), line)\r\n else:\r\n strstart = (lnum, start) # multiple lines\r\n contstr = line[start:]\r\n contline = line\r\n break\r\n elif initial in single_quoted or \\\r\n token[:2] in single_quoted or \\\r\n token[:3] in single_quoted:\r\n if token[-1] == '\\n': # continued string\r\n strstart = (lnum, start)\r\n endprog = (endprogs[initial] or endprogs[token[1]] or\r\n endprogs[token[2]])\r\n contstr, needcont = line[start:], 1\r\n contline = line\r\n break\r\n else: # ordinary string\r\n yield (STRING, token, spos, epos, line)\r\n elif initial in namechars: # ordinary name\r\n yield (NAME, token, spos, epos, line)\r\n elif initial == '\\\\': # continued stmt\r\n continued = 1\r\n else:\r\n if initial in '([{': parenlev = parenlev + 1\r\n elif initial in ')]}': parenlev = parenlev - 1\r\n yield (OP, token, spos, epos, line)\r\n else:\r\n yield (ERRORTOKEN, line[pos],\r\n (lnum, pos), (lnum, pos+1), line)\r\n pos = pos + 1\r\n\r\n for indent in indents[1:]: # pop remaining indent levels\r\n yield (DEDENT, '', (lnum, 0), (lnum, 0), '')\r\n yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')", "def get_significant_tokens(source):\n tokens = []\n try:\n for tok in py_tokenize.generate_tokens(StringIO(source).readline):\n token = Token(tok)\n if not token.string.strip():\n continue\n if token.is_comment():\n continue\n tokens.append(token)\n except py_tokenize.TokenError:\n return tokens\n\n return tokens", "def tokenize(self):", "def compile_tokens(tokens, pc, context):\n\n it = iter(tokens)\n ignore = False\n subtokens = None\n\n for token in it:\n # Handle comments. Whether or not a Forth permits nested comments is\n # pretty up-in-the-air; this Forth does not permit nesting of\n # comments.\n if token == \"(\":\n ignore = True\n continue\n elif token == \")\":\n ignore = False\n continue\n\n if ignore:\n continue\n\n # Look for subroutines.\n if token == \":\":\n subtokens = []\n continue\n elif token == \";\":\n if not subtokens:\n raise Exception(\"Empty word definition!\")\n name = subtokens[0]\n pc = subroutine(name, subtokens[1:], pc, context)\n continue\n elif subtokens is not None:\n subtokens.append(token)\n continue\n\n raise Exception(\"Lone word %r in tokenizer!\" % token)\n\n return pc", "def get_all_tokens(self):\n word = \"\"\n begin_string = False\n i = 0\n\n while i < len(self.code):\n char = self.code[i]\n # Ignore white space\n if char in [' ', '\\t', '\\n'] and begin_string == False: \n i = i + 1 \n word = \"\" \n continue\n \n word = word + char\n if word in KEYWORDS and self.code[i + 1] in SYMBOLS + SKIPABLE:\n self.tokens.append(Token(\"keyword\", word))\n word = \"\"\n elif char == '\"' or begin_string: # Check for string\n if char == '\"':\n begin_string = not begin_string\n if not begin_string:\n self.tokens.append(Token(\"stringConstant\", word[1:-1]))\n word = \"\"\n elif word in SYMBOLS:\n self.tokens.append(Token(\"symbol\", word))\n word = \"\"\n elif self.code[i + 1] in SKIPABLE + SYMBOLS:\n if word.isdigit():\n self.tokens.append(Token(\"integerConstant\", word))\n else:\n self.tokens.append(Token(\"identifier\", word))\n word = \"\"\n i = i + 1", "def process_tokens(self, tokens):\n self._tokens = list(tokens)\n self._pos = 0\n self._ast = self._assert(self._chunk(), 'input to be a program')\n self._ast.store_token_groups(self._tokens)", "def py2commentblocks(string, firstlinenum, options):\n input_stream = cStringIO.StringIO(string)\n block_list = []\n pos = 0\n current_block = \"\"\n newline = True\n linenum = 0\n last_token = None\n for tokendesc in tokenize.generate_tokens(input_stream.readline):\n\n if PYTHON_VERSION >= 26:\n # As of 2.6, tokenize.generate_tokens() chops newlines off\n # then end of comments and returns them as NL tokens. This\n # confuses the logic of the rest of pyreport, so we gobble\n # NL following a comment.\n if last_token == tokenize.COMMENT and \\\n tokendesc[0] == tokenize.NL:\n last_token = tokendesc[0]\n continue\n else:\n last_token = tokendesc[0]\n\n tokentype = token.tok_name[tokendesc[0]]\n startpos = tokendesc[2][1]\n tokencontent = tokendesc[1]\n if tokendesc[2][0] > linenum:\n # We just started a new line\n tokencontent = startpos * \" \" + tokencontent\n newline = True\n elif startpos > pos :\n tokencontent = (startpos - pos) * \" \" + tokencontent\n pos = startpos + len(tokendesc[1])\n linenum = tokendesc[2][0]\n reallinenum = linenum + firstlinenum - 1\n if newline and tokentype == 'COMMENT' :\n if current_block:\n block_list += [ [ \"inputBlock\", current_block, reallinenum ], ]\n current_block = \"\"\n pos = 0\n lines = tokencontent.splitlines()\n lines = map(lambda z : z + \"\\n\", lines[:])\n for line in lines:\n if line[0:3] == \"#!/\" and reallinenum == 1:\n # This is a \"#!/foobar on the first line, this \n # must be an executable call\n block_list += [ [\"inputBlock\", line, reallinenum], ]\n elif line[0:3] == \"#%s \" % options.commentchar :\n block_list += [ [ \"textBlock\", line[3:]], ]\n elif line[0:2] == \"#%s\" % options.commentchar :\n block_list += [ [\"textBlock\", line[2:]], ]\n elif options.latexliterals and line[0:2] == \"#$\" :\n block_list += [ [\"latexBlock\", line[2:]], ]\n else:\n block_list += [ [\"commentBlock\", line, reallinenum], ]\n else:\n current_block += tokencontent\n newline = False\n if current_block :\n block_list += [ [ \"inputBlock\", current_block, reallinenum ], ]\n return block_list", "def _create_tokenize_gen(self, a_starting_pos=-1):\n ordered_tokens = self._tok_c.get_ordered_tokens_list()\n tokens_re = self._tok_c.get_tokens_re()\n \n # position 0 in io stream\n if a_starting_pos != -1:\n self._io_prog.seek(a_starting_pos)\n \n for line in self._io_prog:\n #print(\"line to read=[%s].len(line)=%d\\n\"%(line,len(line)))\n \n self._line_num += 1\n \n self._file_pos = self._io_prog.tell()\n \n self._line_pos, max = 0, len(line)\n \n while self._line_pos < max:\n \n b_found = False\n # This code provides some short-circuit code for whitespace, tabs, and other ignored characters\n if line[self._line_pos] in IGNORED_LITERALS:\n self._line_pos += 1\n continue\n \n #print(\"Try to match from [%s]\\n\"%(line[pos:]))\n \n for key in ordered_tokens:\n regexp = tokens_re[key]\n match = regexp.match(line, self._line_pos)\n if match:\n \n val = match.group()\n start, end = self._line_pos, (self._line_pos+len(val)-1)\n \n # when it is an ID check if this is a WCID\n if key == TokenCreator.TokenNames.ID:\n type = self._get_ID_type(val)\n else:\n type = key\n \n self._tok = Token(type, val, start, end, self._line_num, line, self._file_pos)\n \n #update pos\n self._line_pos = end +1\n \n #print(\"Token = %s\\n\"%(self._tok))\n b_found = True\n \n #return token using yield and generator\n yield self._tok\n \n #found on so quit for loop\n break\n \n \n if not b_found:\n raise IllegalCharacterError(self._line_num, line, self._line_pos) \n \n # All lines have been read return ENDMARKER Token\n self._tok = ENDMARKERToken(self._line_num)\n yield self._tok", "def py2commentblocks(string, firstlinenum, options):\r\n input_stream = cStringIO.StringIO(string)\r\n block_list = []\r\n pos = 0\r\n current_block = \"\"\r\n newline = True\r\n linenum = 0\r\n last_token = None\r\n for tokendesc in tokenize.generate_tokens(input_stream.readline):\r\n\r\n if PYTHON_VERSION >= 26:\r\n # As of 2.6, tokenize.generate_tokens() chops newlines off\r\n # then end of comments and returns them as NL tokens. This\r\n # confuses the logic of the rest of pyreport, so we gobble\r\n # NL following a comment.\r\n if last_token == tokenize.COMMENT and \\\r\n tokendesc[0] == tokenize.NL:\r\n last_token = tokendesc[0]\r\n continue\r\n else:\r\n last_token = tokendesc[0]\r\n\r\n tokentype = token.tok_name[tokendesc[0]]\r\n startpos = tokendesc[2][1]\r\n tokencontent = tokendesc[1]\r\n if tokendesc[2][0] > linenum:\r\n # We just started a new line\r\n tokencontent = startpos * \" \" + tokencontent\r\n newline = True\r\n elif startpos > pos :\r\n tokencontent = (startpos - pos) * \" \" + tokencontent\r\n pos = startpos + len(tokendesc[1])\r\n linenum = tokendesc[2][0]\r\n reallinenum = linenum + firstlinenum - 1\r\n if newline and tokentype == 'COMMENT' :\r\n if current_block:\r\n block_list += [ [ \"inputBlock\", current_block, reallinenum ], ]\r\n current_block = \"\"\r\n pos = 0\r\n lines = tokencontent.splitlines()\r\n lines = map(lambda z : z + \"\\n\", lines[:])\r\n for line in lines:\r\n if line[0:3] == \"#!/\" and reallinenum == 1:\r\n # This is a \"#!/foobar on the first line, this \r\n # must be an executable call\r\n block_list += [ [\"inputBlock\", line, reallinenum], ]\r\n elif line[0:3] == \"#%s \" % options.commentchar :\r\n block_list += [ [ \"textBlock\", line[3:]], ]\r\n elif line[0:2] == \"#%s\" % options.commentchar :\r\n block_list += [ [\"textBlock\", line[2:]], ]\r\n elif options.latexliterals and line[0:2] == \"#$\" :\r\n block_list += [ [\"latexBlock\", line[2:]], ]\r\n else:\r\n block_list += [ [\"commentBlock\", line, reallinenum], ]\r\n else:\r\n current_block += tokencontent\r\n newline = False\r\n if current_block :\r\n block_list += [ [ \"inputBlock\", current_block, reallinenum ], ]\r\n return block_list", "def read_from_tokens(tokens: list) -> Exp:\n if len(tokens) == 0:\n raise SyntaxError('unexpected EOF')\n token = tokens.pop(0) # pop is used to consume\n if token == '(':\n L = []\n while tokens[0] != ')': # recurse until hitting )\n L.append(read_from_tokens(tokens)) \n tokens.pop(0) # pop off )\n return L\n elif token == ')':\n raise SyntaxError('unexpected )')\n else:\n return atom(token)", "def lexer(it):\n tokens = []\n token = \"\"\n for c in it:\n if c == \"{\":\n if token:\n tokens.append(token)\n token = \"\"\n tokens.append(c)\n elif c == \"}\":\n if token:\n tokens.append(token)\n token = \"\"\n tokens.append(c)\n else:\n token += c\n if token:\n tokens.append(token)\n return tokens", "def tokenize(source, warning=True):\n tokens = []\n\n for tok in py_tokenize.generate_tokens(StringIO(source).readline):\n try:\n token = Token(tok)\n tokens.append(token)\n except (py_tokenize.TokenError, Exception) as exc:\n if warning:\n print(\n \"WARNING: the following error was raised in \",\n f\"{__name__}.tokenize\",\n )\n print(exc)\n return tokens\n\n if source.endswith((\" \", \"\\t\")):\n fix_empty_line(source, tokens)\n\n return tokens", "def tokenization(text):\r\n list_of_punctuations_and_more = ['(', ')', ',', ':', '!', ' ', '\\n', '.', '']\r\n tokens = []\r\n token = ''\r\n for idx, character in enumerate(text):\r\n if any(character in s for s in list_of_punctuations_and_more):\r\n if '\\'' in token:\r\n splitted_word = token.split('\\'')\r\n for contraction in get_contractions():\r\n if contraction[0] == splitted_word[1]:\r\n if contraction[0] == 't':\r\n is_on_list = True\r\n for additional_contraction in get_additional_contractions():\r\n if additional_contraction[0] == splitted_word[0]:\r\n tokens.append(additional_contraction[1])\r\n is_on_list = False\r\n if is_on_list:\r\n tokens.append(splitted_word[0][:-1])\r\n tokens.append(contraction[1])\r\n else:\r\n tokens.append(splitted_word[0])\r\n tokens.append(contraction[1])\r\n else:\r\n tokens.append(token)\r\n tokens.append(character)\r\n token = ''\r\n else:\r\n token = token + character\r\n\r\n unwanted_characters = {'', ' ', '\\n'}\r\n tokens = [ele for ele in tokens if ele not in unwanted_characters] # remove unwanted characters\r\n print('Tokens: ', tokens)\r\n return tokens", "def test_ignore_comments(self):\n lexer = Lexer()\n parse = lexer.parse(self.tokens)\n self.assertNotIn(COMMENT, set(token.tag for token in flatten(parse)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses ``page`` (html as string), tries to find the correct link for date, get the index and then parse the menu with this index.
def parse_menu(page: str, date: datetime.date): menu = bs4.BeautifulSoup(page, 'html.parser') date_str = date.strftime('%d.%m.') date_idcs = {date_link.attrs['data-index'] for date_link in menu.select('.weekdays .nav-item') if date_link.select('.date')[0].text == date_str} if len(date_idcs) != 1: raise RuntimeError(f"No unique menu found for date={date_str} (found entries with " f"indices {date_idcs})") date_idx, = date_idcs menu_of_day = menu.select(f':not(.d-md-none) > div > .menu-plan .menu-item-{date_idx}') # first filter complete sections wrapped in <div class="menu-item ...> FILTER_SECTION_BLACKLIST_REGEX = ( 'Frühstück', ) def is_blacklisted(meal): return any(meal.find_all(string=re.compile(pattern)) for pattern in FILTER_SECTION_BLACKLIST_REGEX) menu_of_day = [meal for meal in menu_of_day if not is_blacklisted(meal)] # now filter <p> tags, small unnecessary comments FILTER_BLACKLIST_REGEX = ( #'Empfehlen Sie uns bitte weiter', #'Wir möchten', #'Unser Umweltzeichen', #'Produkte vom heimischen', #'Wir verwendent erstklassige', #'Unser Wochenangebot', #'in bisserl mehr sein', 'Tagesteller', 'Unsere Tagesgerichte', 'Unser Wochenangebot', 'Aus unserer My Mensa-Soup', 'darauf hinweisen, dass wir vorwiegend Produkte vom', 'Unser Umweltzeichen - welches wir in all', 'Empfehlen Sie uns bitte weiter...', 'M-Café', 'Tages-Empfehlung', 'Aus unserer My-Mensa Soup-Bar', 'Angebot der Woche', 'Herzlich Willkommen', 'im M-Café Biotech!', 'M-Cafe', 'Herzlich Willkommen', 'im M-Café Mendel', 'Gerne verwöhnen wir euch mit verschiedenen,', 'gefüllten Weckerln und Sandwiches,', 'hausgemachtem Blechkuchen und', 'täglich frisch gebackenem Gebäck!', 'Darf´s ein bisserl mehr se', 'im M-Café Mendel', 'Täglich frischer', '\*\*\*', '\*', ) for pattern in FILTER_BLACKLIST_REGEX: for meal in menu_of_day: for tag in meal.find_all(string=re.compile(pattern)): tag.parent.decompose() menu_of_day_items = [] for vegy_type, v_symbol_name in VEGY_TYPES.items(): for meal in menu_of_day: for v_image in meal.find_all('img', alt=vegy_type): v_symbol = menu.new_tag('p') v_symbol.string = v_symbol_name v_image.replace_with(v_symbol) # note: meal might contain multiple items/prices for meal in menu_of_day: # split by prices foods_prices = re.split('(€\s?\d+,\d+)', meal.text) foods = foods_prices[::2] prices = foods_prices[1::2] # replace new lines with spaces foods = [" ".join(food.split()) for food in foods] menu_of_day_items += [f"{food} {price}" for food, price in zip(foods, prices)] return menu_of_day_items
[ "def load_page_for_date(site: Site, date: datetime.date) -> Page:\n return site.pages[\"Freitagsfoo/{}\".format(date)]", "def _parse_page(url):\n html = urllib2.urlopen(url).read()\n soup = BeautifulSoup(html, 'lxml', from_encoding=\"utf-8\")\n #contents = [x.get('content') for x in soup('meta')]\n links = [link.get('href') for link in soup('a')]\n return Page(url, soup.get_text(), links)", "def parse_site_page(url):\n\n import re\n\n url_request = get_request(url)\n soup = BeautifulSoup(url_request, 'html.parser')\n\n pattern = re.compile(r'entry+')\n div_tags = soup.find_all('div', id=pattern)\n\n return_list = []\n for div in div_tags:\n a_tag = div.find('a')\n name = a_tag.find('h2').text\n link = a_tag.get('href') # link on anime\n\n anime_request = get_request(link)\n anime_soap = BeautifulSoup(anime_request, 'html.parser') # html of anime page\n\n description = anime_soap.find('div', {'class': 'kino-desc full-text clearfix noselect'}).text.replace('\\n', '')\n\n anime_ul = anime_soap.find('ul', {'class': 'kino-lines ignore-select'})\n ul_links = anime_ul.find_all('a')\n genre = ' '.join(a.text for a in ul_links if a.text in GENRES)\n\n rating = anime_soap.find('ul', {'class': 'unit-rating'}).find('li').text\n\n image_url = 'http://baza1.animevost.tv/' + anime_soap.find('a', {'class': 'highslide'}).find('img').get('src')\n\n return_list.append({\n 'name': name,\n 'link': link,\n 'genre': genre,\n 'rating': rating,\n 'description': description,\n 'image': image_url\n })\n\n return return_list", "def parse_page(\n page: Page, render_function: Callable[[str], str] = None\n) -> Result:\n hosts, date = parse_top_section(page)\n sections = wtp.parse(page.text()).sections\n talks = parse_talks(sections, render_function)\n return {\n \"hosts\": hosts,\n \"date\": date,\n \"talks\": talks\n }", "def parse(self, response):\n \n time.sleep(random.random())\n \n for href in response.xpath('//span[@class=\"atc_title\"]/a'):\n _url = href.xpath('@href')[0].extract()\n url = response.urljoin(_url)\n print url\n yield scrapy.Request(url, callback=self.parse_blog_contents)\n \n next_page = response.xpath('//li[@class=\"SG_pgnext\"]/a/@href')\n if next_page:\n url = response.urljoin(next_page[0].extract())\n yield scrapy.Request(url, self.parse)", "def parse_index(html):\n pattern = re.compile('<a.*?href=\"(.*?)\".*?class=\"name\">')\n items = re.findall(pattern, html)\n if not items:\n return []\n for item in items:\n detail_url = urljoin(BASE_URL, item)\n logging.info('get detail url %s', detail_url)\n yield detail_url", "def get_job_links_from_page(self):\n ## Marked for tidying\n if self.current_page_text:\n self.parsed_site = BeautifulSoup(self.current_page_text)\n holding_dictionary = dict((link.get('href'), link.get('title')) for link in self.parsed_site.find_all('a') if re.findall(self.job_search_regex, link.get('href')) and link.get('title'))\n self.job_dictionary.update(holding_dictionary)", "def parse(self, response):\n\t\tlogging.info('started scraping {}'.format(response.url))\n\t\tpage = json.loads(response.text)['pagecontent']\n\t\tlinks = Selector(text=page).css(\"div.col-xs-12>a::attr(href)\").getall()\n\t\tlogging.info('finished scraping'.format(response.url))\n\t\tif len(links) == self.per_page:\n\t\t\tfor i in range(len(links)):\n\t\t\t\tyield {'links': links[i]}\n\t\telif response.meta['num'] == self.num_of_pages:\n\t\t\tfor i in range(len(links)):\n\t\t\t\tyield {'links': links[i]}\n\t\telse:\n\t\t\tlogging.warning('the chosen selector did not find all the links \\\nwhich are on the page {}'.format(response.url))\n\t\t\traise CloseSpider(\"not all the links were found on the page {}. The\\\n selector has to be changed\".format(response.url))", "def __init__(self):\n self.date_parser = date_parser.WeekOfParser()\n self.parser = sodexo_parser.MenuParser()\n self.date_url = ''\n self.urls = {\n \"Commons\": \"https://rpi.sodexomyway.com/images/WeeklyMenuCommons%2011-17-14_tcm1068-29434.htm\",\n \"Sage\": \"https://rpi.sodexomyway.com/images/WeeklyMenuRSDH%2011-17-14_tcm1068-29436.htm\",\n \"Sage2\": \"https://rpi.sodexomyway.com/images/WeeklyMenuRSDH%2011-24-14_tcm1068-29441.htm\",\n \"BARH\": \"https://rpi.sodexomyway.com/images/WeeklyMenuBARH%2011-17-14_tcm1068-2231.htm\"}", "def page_link(role, rawtext, text, lineno, inliner, options={}, content=[]):\n # Get the page slugs map\n slugs = get_page_slugs()\n # Throw error if the given slug does not exist\n if text not in slugs and not settings.DOCUMENTS_PARSER_WIKIROLE_SILENT_WARNING:\n msg = inliner.reporter.error('Page with slug \"%s\" does not exist.' % text, line=lineno)\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n # Add a class to the item\n options.update({'classes': ['documents_page_link']})\n roles.set_classes(options)\n # Return the node as reference to display the link for the given page's slug\n site_current = Site.objects.get_current()\n url = \"http://{0}{1}\".format(site_current.domain, reverse('documents-page-details', args=[text]))\n node = nodes.reference(rawtext, utils.unescape(slugs[text]), refuri=url, **options)\n return [node], []", "def ProcessPage(keyword, vBrowser, vNews_name, vNews_url, language):\n\n # output: pandas dataframe with title, publishing date, article text and url\n articles_page = pd.DataFrame(columns=['title', 'publish_date', 'text', 'url'])\n\n # 1) list results\n search_result_page_source = vBrowser.page_source\n\n # make url regex-usable\n url_any = vNews_url\n url_any = re.sub(re.escape('?s='+keyword), '', url_any)\n url_any = re.sub(re.escape('search?k='+keyword), '', url_any)\n url_any = re.sub(re.escape('search?q=' + keyword), '', url_any)\n url_any = re.sub('\\?m\\=[0-9]{6}', '', url_any)\n url_any = re.escape(url_any) + '(?=\\S*[-]*)([0-9a-zA-Z-\\/\\.\\-\\n]+)'\n regex = re.compile(url_any)\n # logger.info('searching for {}'.format(url_any))\n search_results = list(set([match[0] for match in\n regex.finditer(search_result_page_source)\n if keyword in match[0].lower()]))\n\n if vNews_name in ['NewVision']:\n regex = re.compile('\\/new\\_vision\\/news\\/(?=\\S*[-])([0-9a-zA-Z-\\/\\.\\-]+)')\n search_results = list(set([ match[0] for match in regex.finditer(search_result_page_source) if keyword in match[0].lower()]))\n search_results = ['https://www.newvision.co.ug' + search_result for search_result in search_results]\n\n if vNews_name == \"FloodList\":\n regex = re.compile('(http|ftp|https):\\/\\/([\\w_-]+(?:(?:\\.[\\w_-]+)+))([\\w.,@?^=%&:\\/~+#-]*[\\w@?^=%&\\/~+#-])')\n search_results = list(set([ match[0] for match in regex.finditer(search_result_page_source) if '.com/africa/' in match[0].lower()]))\n search_results = [url for url in search_results if \"/attachment/\" not in url]\n\n if len(search_results) > 0:\n logger.info(\"found {0} article(s):\".format(len(search_results)))\n for title in search_results:\n logger.info(\"url: {0}\".format(title))\n else:\n logger.info('no articles found')\n\n # 2) for each result, get article and save it\n for idx, search_result in enumerate(search_results):\n\n logger.info('processing {}'.format(search_result))\n # download article\n article = Article(search_result, keep_article_html=True)\n article.download()\n attempts, attempts_max = 0, 10\n while (article.download_state != 2) and (attempts < attempts_max):\n attempts += 1\n logger.warning(f\"download_state {article.download_state} \"\n f\", retrying {attempts}/{attempts_max}\")\n article = Article(search_result, keep_article_html=True)\n article.download()\n time.sleep(10)\n\n if article.download_state != 2:\n logger.warning('unable to download article: {}'.format(search_result))\n continue\n article.parse()\n\n article_html = str(article.html)\n\n # select articles with keyword\n regex = re.compile(keyword, re.IGNORECASE)\n\n if re.search(regex, article.html) is not None:\n\n logger.debug('{}'.format(article_html))\n\n # get date\n date = article.publish_date\n date_str = \"\"\n search_date = False\n\n if not pd.isnull(date):\n # keep date found only if older than today\n if pd.to_datetime(date).date() < pd.to_datetime(datetime.today()).date():\n date_str = date.strftime(DATE_FORMAT)\n else:\n search_date = True\n else:\n search_date = True\n\n if search_date:\n article_html = re.sub('\\s+', ' ', article_html)\n dates_found = []\n\n res_date = [re.compile('[a-zA-ZÀ-ÿ]\\w+\\s[0-9]+\\,\\s[0-9]{4}'),\n re.compile('[a-zA-ZÀ-ÿ]\\w+\\s[0-9]+\\s[0-9]{4}'),\n re.compile('[0-9]\\w+\\s[a-zA-ZÀ-ÿ]+\\,\\s[0-9]{4}'),\n re.compile('[0-9]\\w+\\s[a-zA-ZÀ-ÿ]+\\s[0-9]{4}'),\n re.compile('[0-9]+\\s[a-zA-ZÀ-ÿ]+\\,\\s[0-9]{4}'),\n re.compile('[0-9]+\\s[a-zA-ZÀ-ÿ]+\\s[0-9]{4}'),\n re.compile('[0-9]{2}\\/[0-9]{2}\\/[0-9]{4}'),\n re.compile('[0-9]{2}\\-[0-9]{2}\\-[0-9]{4}'),\n re.compile('[0-9]{2}\\.[0-9]{2}\\.[0-9]{4}')]\n for re_date in res_date:\n for match in re_date.finditer(article_html):\n if is_date(match.group(), language):\n dates_found.append((match.start(), match.group()))\n if len(dates_found) > 0:\n logger.info('{}'.format(dates_found))\n dates_found.sort(key=lambda tup: tup[0])\n for res in dates_found:\n try:\n res_date = dateparser.parse(res[1], languages=[language],\n settings={'DATE_ORDER': 'DMY'}).date()\n if (res_date < pd.to_datetime(datetime.today()).date()\n and res_date > pd.to_datetime('30/04/1993', format=\"%d/%m/%Y\").date()):\n date_str = res_date.strftime(DATE_FORMAT)\n break\n except:\n pass\n\n if date_str == \"\":\n logger.warning('Publication date not found or wrongly assigned, skipping article')\n continue\n else:\n logger.info('Publication date assigned: {}'.format(date_str))\n\n # Take newspaper name out of article title\n article.title = remove_newspaper_name_from_title(article.title, vNews_name)\n\n # if no text is present (e.g. only video), use title as text\n article_text = article.text\n if len(str(article.text)) == 0:\n article_text = article.title\n\n # add to dataframe\n logger.info('{0} : {1}'.format(article.title, date_str))\n articles_page.loc[idx] = [article.title, date_str, article_text, article.url]\n\n # 3) return dataframe\n if len(search_results) > 0:\n logger.info('{}'.format(articles_page.head()))\n return articles_page", "def fetch_page(self):\n\n # Clear the current internal links\n self.hreflang_entries = {}\n\n # Grab the page and pull out the hreflang <link> elements\n try:\n r = requests.get(self.base_url, allow_redirects=False)\n except Exception as e:\n raise ValueError(str(e))\n\n self.status_code = r.status_code\n if r.status_code != 200:\n raise ValueError(\"HTTP Response Code was not 200.\")\n\n tree = lxml.html.fromstring(r.text)\n elements = tree.xpath(\"//link[@hreflang]\")\n\n # concert a link element into a tuple of the clean language\n # code and the alternate url link\n def element_hreflang_value_and_url(element):\n \"\"\" Get the attributes of the element \"\"\"\n\n language_code = element.get('hreflang', '')\n alternate_url = element.get('href', '')\n\n formatted_hreflang_value = self.format_hreflang_value(language_code)\n\n return formatted_hreflang_value, alternate_url\n\n # group the links by country code\n hreflang_entries = defaultdict(list)\n for element in elements:\n hreflang_value, alternate_url = element_hreflang_value_and_url(element)\n hreflang_entries[hreflang_value].append(alternate_url)\n self.alternate_languages.add(self.hreflang_value_language(hreflang_value))\n region = self.hreflang_value_region(hreflang_value)\n if region:\n self.alternate_regions.add(region)\n\n self.hreflang_entries = dict(hreflang_entries)", "def parse(self, html, url, categ, category_id, page_expire_limit=10):\n\n #Create BS4 parsing object from encoded HTML\n soup = BeautifulSoup(html.encode('utf-8').strip(), 'lxml')\n\n #Instantiate child objects for thread object\n userlist = UserList([])\n postlist = PostList([])\n\n #List of indices we've seen so far\n checked_indices = []\n\n #If we have old data in our cache\n if len(self.db.pred.keys()) > 0:\n #Get the oldest index\n oldest_index = self.db.find_oldest_index(url, categ)\n old_indices = self.db.get_indices(url, categ)\n if len(old_indices) == 0:\n old_indices = None\n else:\n #Otherwise, we only use postdate to determine when to stop\n oldest_index = None\n old_indices = None\n \n try:\n #If we can't parse the title\n title = soup.find('h1', class_='lia-message-subject-banner lia-component-forums-widget-message-subject-banner')\\\n .text.replace('\\n\\t', '').replace('\\n', '').replace('\\u00a0', '')\n except:\n #Format it from the URL\n title = url.split(categ + '/')[1].split('/td-p')[0].replace('-', ' ')\n \n #Get thread postdate from first page\n post_date = soup.find('span', class_='DateTime lia-message-posted-on lia-component-common-widget-date')\\\n .find('span', class_='message_post_text').text\n \n #If we have an edit date available, parse it out\n try:\n edit_date = soup.find('span', class_='DateTime lia-message-edited-on lia-component-common-widget-date')\\\n .find('span', class_='message_post_text').text\n except AttributeError:\n edit_date = 'Unedited'\n \n #Get the max number of pages in this thread\n pages = self.get_page_numbers(soup)\n\n #Set scan limits\n start = pages\n if '-full' not in sys.argv:\n if start > 30:\n end = start - 30\n else:\n end = 1\n else:\n end = 1\n \n \n #Backend tracking params\n now = datetime.now()\n post_total = str(10 * pages)\n\n #Try to find original author container\n try:\n op = soup.find_all('div', class_='MessageView lia-message-view-forum-message lia-message-view-display lia-row-standard-unread lia-thread-reply')\n except:\n op = None\n \n #Get thread author name\n try:\n author = op[0].find('a', class_='lia-link-navigation lia-page-link lia-user-name-link user_name').find('span').text\n except:\n author = ''\n\n #Parse out the number of posts on the thread\n if op is not None:\n for msg in op:\n try:\n post_total = msg.find('span', class_='MessagesPositionInThread').text.split('of ')[1].replace('\\n', '').replace(',', '')\n break\n except:\n pass\n\n queue = []\n #Iterate through thread pages from last page to limit defined above\n for pagenum in range(start, end-1, -1):\n #print(f'Currently on page {pagenum} of {url}')\n #If we're past the first page, we want to generate the next page URL and validate it\n if pagenum > 1:\n if validators.url(self.generate_next(url, pagenum)):\n #Get the page and recreate the parsing object\n if '-p' not in sys.argv:\n self.driver.get(self.generate_next(url, pagenum))\n soup = BeautifulSoup(self.driver.page_source.encode('utf-8').strip(), 'lxml')\n else:\n r = requests.get(self.generate_next(url, pagenum))\n soup = BeautifulSoup(r.content, 'html.parser')\n else:\n if '-p' not in sys.argv:\n self.driver.get(url)\n soup = BeautifulSoup(self.driver.page_source.encode('utf-8').strip(), 'lxml')\n else:\n r = requests.get(url)\n soup = BeautifulSoup(r.content, 'html.parser')\n\n msgli, count = self.get_message_divs(soup, categ, url)\n try:\n assert(count > 0)\n except:\n print(url, pagenum)\n if pagenum != start:\n try:\n assert(count == 10)\n except:\n print(url, pagenum)\n \n #print(f'Got {count} posts on page {pagenum} of {url}')\n expired = False\n idx = 0\n\n #Iterate through list in reverse order\n for msg in msgli:\n if msg is None:\n continue\n #try:\n p, editor_id, edited_url, edited_by = self.parse_message_div(msg, url, pagenum)\n #except Exception as e:\n # import traceback\n # print(f'Something went wrong while parsing a message div \\n {url}, {e}')\n checked_indices.append(p.index)\n userlist.handle_user(p.author)\n in_queue = False\n \n #If this post was edited, add it to the queue to find the editor info\n if editor_id != '' and edited_by != p.author.name:\n queue.append((p, edited_url, edited_by))\n in_queue = True\n elif editor_id != '' and edited_by == p.author.name:\n p.add_edited(p.author)\n if not in_queue:\n postlist.add(p)\n idx += 1\n\n \"\"\"\n We only expire if the following conditions are met:\n\n 1. The thread we are scanning has more than 10 pages. Otherwise, it is inexpensive to\n scan the entire thread.\n\n 2. We have seen a post that is older than a week. If we have no cached data, we stop\n scanning here.\n\n 3. We have an oldest index, we've encountered a post older than a week, and we've reached\n the oldest index.\n\n 4. We have an oldest index and a list of indices we encountered on the last scan. If\n all the previous criteria has been met and we have more checked indices than old indices\n we break.\n \"\"\"\n\n #If message is older than a week old and we've passed our oldest index break.\n #If we don't have an oldest index, just break when we find a message thats a week old\n date_format = \"%b %d, %Y %I:%M:%S %p\"\n dt = datetime.strptime(p.postdate, date_format)\n now = datetime.now()\n\n if pages > page_expire_limit:\n if (now-dt).days > 7:\n if oldest_index is not None:\n if old_indices is not None:\n if len(old_indices) < len(checked_indices) and all(elem in checked_indices for elem in old_indices):\n expired = True\n else:\n if oldest_index in checked_indices:\n expired = True\n else:\n expired = True\n\n #If we determined we should stop, break here\n if expired is True:\n break\n \n if len(queue) > 0:\n #For each item queued\n for item in queue:\n if item[1] == '**Info Inaccessible**':\n continue\n #Get editor profile\n if '-p' not in sys.argv:\n self.driver.get(item[1])\n soup = BeautifulSoup(self.driver.page_source, 'lxml')\n else:\n r = requests.get(item[1])\n soup = BeautifulSoup(r.content, 'html.parser')\n\n #Parse out relevant user info\n \n data_container = soup.find('div', class_='userdata-combine-container')\n joininfo = data_container.find_all('span', class_='member-info')\n for entry in joininfo:\n if entry.text != 'Member since:':\n joindate = entry.text\n rank_container = data_container.find('div', class_='user-userRank')\n rank = rank_container.text.strip()\n\n #Create user object and handle it, then add post\n u = User(item[2], joindate, item[1], rank)\n userlist.handle_user(u)\n item[0].add_edited(u)\n postlist.add(item[0])\n\n if '-r' not in sys.argv:\n missing = []\n #Debug helper for checking if any posts were missed in last scan\n if url.split('/t5/')[1].split('/')[0] in self.db.pred.keys():\n if url in self.db.pred[url.split('/t5/')[1].split('/')[0]].threads.keys():\n for post in self.db.pred[url.split('/t5/')[1].split('/')[0]].threads[url].postlist.postlist:\n if str(post.index) not in checked_indices:\n missing.append((post.index, post.page))\n\n missingqueue = []\n for item in missing:\n if item[1] == '**Info Inaccessible**':\n continue\n missing_bool = False\n if '-p' not in sys.argv:\n self.driver.get(self.generate_next(url, item[1]))\n soup = BeautifulSoup(self.driver.page_source.encode('utf-8').strip(), 'lxml')\n else:\n r = requests.get(self.generate_next(url, item[1]))\n soup = BeautifulSoup(r.content, 'html.parser')\n newli, _ = self.get_message_divs(soup, categ, url)\n for msg in newli:\n if msg is None:\n continue\n try:\n p, editor_id, edited_url, edited_by = self.parse_message_div(msg, url, item[1])\n if p.index == item[0] or p.index not in checked_indices:\n if editor_id != '' and edited_by != p.author.name:\n missingqueue.append((p, edited_url, edited_by))\n missing_bool = True\n elif editor_id != '' and edited_by == p.author.name:\n p.add_edited(p.author)\n if not missing_bool:\n postlist.add(p)\n except Exception as e:\n print(f'Something went wrong while finding missing posts\\n {e}')\n print(url)\n\n for item in missingqueue:\n if item[1] == '**Info Inaccessible**':\n continue\n #Get editor profile\n if '-p' not in sys.argv:\n self.driver.get(item[1])\n soup = BeautifulSoup(self.driver.page_source, 'lxml')\n else:\n r = requests.get(item[1])\n soup = BeautifulSoup(r.content, 'html.parser')\n #Parse out relevant user info\n \n data_container = soup.find('div', class_='userdata-combine-container')\n joininfo = data_container.find_all('span', class_='member-info')\n for entry in joininfo:\n if entry.text != 'Member since:':\n joindate = entry.text\n rank_container = data_container.find('div', class_='user-userRank')\n rank = rank_container.text.strip()\n\n #Create user object and handle it, then add post\n u = User(item[2], joindate, item[1], rank)\n userlist.handle_user(u)\n item[0].add_edited(u)\n postlist.add(item[0])\n\n if old_indices is not None:\n if sorted(checked_indices) != sorted(old_indices):\n diff = self.list_diff(checked_indices, old_indices)\n try:\n assert(all(elem in checked_indices for elem in old_indices))\n except:\n self.db.stats.diffs[url] = self.list_diff(checked_indices, old_indices)\n print(f'Got diff {diff} on url {url}')\n \n #Generate thread object and return\n t = Thread(postlist, url, author, url.split('/t5/')[1].split('/')[0], \\\n self.page, post_date, title, edit_date, userlist, post_total)\n with DBConn() as conn:\n for p in t.postlist.postlist:\n conn.insert_from_post(p, t.id, category_id)\n return t", "def pages_from_directory(directory, recursive=True, consider_index=True):\n\n pages = {}\n directories = [] # Save directories for later\n\n for name in os.listdir(directory):\n if not consider_index and name == 'index.md': continue\n path = os.path.join(directory, name)\n if os.path.isdir(path):\n new_path = os.path.join(path, 'index.md')\n if os.path.isfile(new_path):\n directories.append((name, path))\n path = new_path\n name += '.md'\n\n if name.endswith('.md'):\n name = name[:-3]\n with open(path) as fp:\n content = fp.read()\n content, details = parse_page_details(content, path)\n if details.get('content-from'):\n with open(os.path.join(os.path.dirname(path), details['content-from'])) as fp:\n content = fp.read()\n page = Page(name, content, details)\n pages[page.name] = page\n elif os.path.isdir(path):\n directories.append((name, path))\n\n if recursive:\n for name, path in directories:\n children = pages_from_directory(path, recursive=True, consider_index=False)\n if children:\n # Get the page that contains these children, or create a page\n # without content (a folder) for them.\n page = pages.get(name)\n if not page:\n # Check if there's a details TOML file for this directory.\n details_fn = path + '.toml'\n if not os.path.isfile(details_fn):\n details_fn = os.path.join(path, 'index.toml')\n if os.path.isfile(details_fn):\n with open(details_fn) as fp:\n details = load_page_details(fp.read(), details_fn)\n else:\n details = None\n page = Page(name, None, details)\n pages[name] = page\n\n page.add_child_page(*children)\n\n return list(pages.values())", "def parse(self, response):\n # Grab all the job posting urls and calculate their age based on their post date and today's date\n reached_max_age = False\n for sel in response.xpath('//div[@class=\"job\"]'):\n # Find if job too old\n full_date = sel.xpath('p//span[@class=\"date_compact\"]/script/text()').extract()[0][19:-3]\n if date_age(full_date) > self.max_age:\n reached_max_age = True\n break\n posting_url = response.urljoin(sel.xpath('h2/a/@href').extract()[0])\n job_location = sel.xpath('p//a[@class=\"locations_compact\"]/text()').extract()[0]\n try:\n self.jentries.append(scrape_job_posting(posting_url, loc=job_location))\n except Exception:\n logging.error(\"Unexpected error with website:\" + posting_url)\n traceback.print_exc()\n \n\n # Goto next page up to the end of the pagination div\n try:\n rightmost_a = response.xpath('//p[@class=\"browse\"]/a')[-1]\n a_text = rightmost_a.xpath('text()').extract()[0]\n url = response.urljoin(rightmost_a.xpath('@href').extract()[0])\n if a_text == ' >>' and not reached_max_age:\n self.search_page_index += 1\n logging.log(21, self.name + 'Processing page ' + str(self.search_page_index+1))\n yield scrapy.Request(url)\n except IndexError:\n pass", "def _parse_guild_homepage(cls, builder, info_container):\n if m := homepage_regex.search(info_container.text):\n builder.homepage(m.group(1))\n if link := info_container.select_one(\"a\"):\n link_info = parse_link_info(link)\n if \"target\" in link_info[\"query\"]:\n builder.homepage(link_info[\"query\"][\"target\"])\n else:\n builder.homepage(link_info[\"url\"])", "def open(self, debate_prefix = \"http://www.presidency.ucsb.edu/ws/index.php?\"):\n\n # Open url\n # print(self.counter , \":\", self.current_page)\n response = requests.get(self.current_page)\n self.visited_links.add(self.current_page)\n\n # Fetch every debate_links\n self.soup = BeautifulSoup(response.content, 'html.parser')\n\n page_links = []\n try :\n for link in [h.get('href') for h in self.soup.find_all('a')]:\n # limit analysis to most recent debates for now\n if link.startswith(debate_prefix) and (int(link.split('=')[-1]) >= 110489):\n page_links.append(link)\n\n except Exception: # Magnificent exception handling\n pass\n\n ### I copied this originally from a stackoverflow answer, ###\n ### and when I remove this, it screws it up. I don't know ###\n ### what it does though ###\n # Update debate_links\n self.debate_links = self.debate_links.union(set(page_links))\n # Choose a random url from non-visited set\n self.current_page = random.sample(self.debate_links.difference(self.visited_links), 1)[0]\n self.counter+=1", "def read_page(self, page_html, page_url, page_result):\n html_parser = MyHTMLParser()\n try:\n html_parser.feed(page_html)\n except Exception:\n pass\n\n page_statistics = [len(html_parser.text)]\n for item in self.search_target:\n word_count = html_parser.text.count(item)\n page_statistics.append(word_count)\n page_result.put_statistics(page_statistics)\n self.compute_bm25(page_url)\n\n # put useful urls into candidate urls\n for link in html_parser.links:\n new_url = urljoin(page_url, link)\n # unfortunately most urls have no file endings, need to consider None as an option\n if mimetypes.guess_type(link)[0] == \"text/html\" or mimetypes.guess_type(link)[0] is None:\n self.add_candidate(new_url, page_url, page_result.relevance)\n self.refresh_priority(True)", "def get_day_menu(which, url, allergens: Dict[str, str]):\n # Assumptions:\n # - The #content-core contains only <li> items belonging to the menu and <h3> elements that indicate a type.\n # - All menu items have a price, except vegetables.\n # - Priced items are of the form \"NAME - € X,XX\".\n # - Vegan and vegetarian is indicated by either the old system (KIND: name - price)\n # or the new system (name - KIND - price). The kind is optional; if not present, meat is assumed (in the new\n # system)\n day_menu = pq(url=url)\n vegetables = []\n meats = []\n soups = []\n\n if CLOSED[which] in day_menu(CONTENT_SELECTOR).html():\n return dict(open=False)\n\n # We iterate through the html: the h3 headings are used to reliably (?) determine the kind of the meal.\n meals_and_headings = day_menu(MEAL_AND_HEADING_SELECTOR).items()\n\n last_heading = None\n for current in meals_and_headings:\n if current.is_('h3'):\n if current.html() is not None:\n last_heading = current.html().lower().strip()\n continue\n # We have a meal type.\n meal = current.html()\n if meal is None:\n continue # Ignore empty\n\n meal = meal.strip()\n meal = BeautifulSoup(meal, \"html.parser\").get_text()\n\n if last_heading is None:\n print(f'Ignoring {meal}, no header.')\n continue\n\n if last_heading not in HEADING_TO_TYPE:\n raise ValueError(f\"Unknown header type {last_heading}, not mapped.\")\n\n if HEADING_TO_TYPE[last_heading] == 'soup':\n name, price = split_price(meal)\n food_allergens = find_allergens_for_food(allergens, name)\n soups.append(dict(price=price, name=name, type='side', allergens=food_allergens))\n elif HEADING_TO_TYPE[last_heading] == 'meal soup':\n name, price = split_price(meal)\n food_allergens = find_allergens_for_food(allergens, name)\n soups.append(dict(price=price, name=name, type='main', allergens=food_allergens))\n elif HEADING_TO_TYPE[last_heading] == 'meat':\n hot_cold = HOT_COLD_MAPPING[last_heading]\n name, price = split_price(meal)\n if ':' in meal: # Meat in the old way\n kind, name = [s.strip() for s in name.split(':')]\n kind = kind.lower()\n kind = TRANSLATE_KIND[kind]\n food_allergens = find_allergens_for_food(allergens, name)\n meats.append(dict(price=price, name=name, kind=kind, hot=hot_cold, allergens=food_allergens))\n else: # Meat in the new way\n # If the name contains '-', it might be an indication of vegan/vegi\n if '-' in name:\n kind = name.split('-')[-1].strip()\n stripped_name = '-'.join(name.split('-')[:-1]).strip() # Re-join other splits\n if kind in TRANSLATE_KIND:\n food_allergens = find_allergens_for_food(allergens, stripped_name)\n meats.append(dict(price=price, name=stripped_name, kind=TRANSLATE_KIND[kind], hot=hot_cold,\n allergens=food_allergens))\n else:\n food_allergens = find_allergens_for_food(allergens, name)\n meats.append(dict(price=price, name=name, kind='meat', hot=hot_cold, allergens=food_allergens))\n else:\n # Sometimes there is vegan/vegetarian in the name, in which case they don't repeat the type.\n if any(possible in name.lower() for possible in POSSIBLE_VEGETARIAN):\n kind = 'vegetarian'\n elif any(possible in name.lower() for possible in POSSIBLE_VEGAN):\n kind = 'vegan'\n elif any(possible in name.lower() for possible in POSSIBLE_FISH):\n kind = 'fish'\n else:\n kind = 'meat'\n food_allergens = find_allergens_for_food(allergens, name)\n meats.append(dict(price=price, name=name, kind=kind, hot=hot_cold, allergens=food_allergens))\n elif HEADING_TO_TYPE[last_heading] == 'vegetables':\n vegetables.append(meal)\n else:\n raise ValueError(f\"Oops, HEADING_TO_TYPE contains unknown value for {last_heading}.\")\n\n # sometimes the closed indicator has a different layout.\n if not vegetables and not soups and not meats:\n return dict(open=False)\n\n r = dict(open=True, vegetables=vegetables, soup=soups, meat=meats)\n return r" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make predictions using a single binary estimator.
def predict_binary(self,estimator, X): return sklearn.multiclass._predict_binary(estimator,X)
[ "def _fit_binary(estimator, X, y, classes=None, sample_weight=None):\n unique_y = np.unique(y)\n if len(unique_y) == 1:\n if classes is not None:\n if y[0] == -1:\n c = 0\n else:\n c = y[0]\n warnings.warn(\"Label %s is present in all training examples.\" %\n str(classes[c]))\n estimator = _ConstantPredictor().fit(X, unique_y)\n else:\n estimator = clone(estimator)\n estimator.fit(X, y, sample_weight=None)\n return estimator", "def predict(self, test_data, predict_proba = False, pred_class_and_proba = False):\n pass", "def predict(model, X_test):", "def make_prediction(self, samples: List):\n raise NotImplementedError", "def test_predict():\n recommender = SLIM(alpha=0.1, l1_ratio=1e-3, seed=0)\n utils.test_binary_recommend_ml100k(recommender, 0.1)", "def _setup_prediction_op(self):", "def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):\n # Adjust length of sample weights\n fit_params = fit_params if fit_params is not None else {}\n fit_params = dict([(k, _index_param_value(X, v, train))\n for k, v in fit_params.items()])\n\n X_train, y_train = _safe_split(estimator, X, y, train)\n X_test, _ = _safe_split(estimator, X, y, test, train)\n\n if y_train is None:\n estimator.fit(X_train, **fit_params)\n else:\n estimator.fit(X_train, y_train, **fit_params)\n preds = estimator.predict(X_test)\n return preds, test", "def evaluate_binary_model(self):\n baseline = {}\n baseline['accuracy'] = accuracy_score(self.y_test, [1 for _ in range(\n len(self.y_test))]) # always predict the majority class\n baseline['recall'] = recall_score(\n self.y_test, [1 for _ in range(len(self.y_test))]) # always predict positive\n baseline['precision'] = precision_score(\n self.y_test, [1 for _ in range(len(self.y_test))]) # always predict positive\n baseline['roc'] = 0.5\n baseline['F1score'] = 2 * 0.5 / (0.5 + 1)\n\n test_results = {}\n test_results['accuracy'] = accuracy_score(self.y_test, self.pred_test)\n test_results['recall'] = recall_score(self.y_test, self.pred_test)\n test_results['precision'] = precision_score(\n self.y_test, self.pred_test)\n test_results['roc'] = roc_auc_score(self.y_test, self.prob_test)\n test_results['F1score'] = f1_score(\n self.y_test, self.pred_test) # binary classifier\n\n train_results = {}\n train_results['accuracy'] = accuracy_score(\n self.y_train, self.pred_train)\n train_results['recall'] = recall_score(self.y_train, self.pred_train)\n train_results['precision'] = precision_score(\n self.y_train, self.pred_train)\n train_results['roc'] = roc_auc_score(self.y_train, self.prob_train)\n train_results['F1score'] = f1_score(self.y_train, self.pred_train)\n\n for metric in ['accuracy', 'recall', 'precision', 'roc', 'F1score']:\n print(f'\\n{metric.capitalize()}\\n'\n f'Baseline: {round(baseline[metric], 3)} | '\n f'Test: {round(test_results[metric], 3)} | '\n f'Train: {round(train_results[metric], 3)} ')", "def test_predict():\n\t\n\t# Create a row of data and run prediction.\n\thome = 'Arsenal'\n\taway = 'Chelsea'\n\tstats = pd.read_sql_query(\"select * from stats;\", engine)\n\tmodel = joblib.load('./model.pkl')\n\tresult = prediction.prediction(home, away, stats, model)\n\n\t# Check type of output.\n\tassert isinstance(result, np.ndarray)\n\n\t# Check array length.\n\tassert len(result) == 3", "def classifier_predict(samples, classifier_model):\n return classifier_model.predict(samples)", "def evaluate(labels, predictions):\n i=0\n j=0\n total_true = 0\n total_wrong = 0\n for label,prediction in zip(labels,predictions):\n if label==1:\n total_true = total_true + 1\n if prediction == 1:\n i = i + 1\n else:\n total_wrong = total_wrong + 1\n if prediction == 0:\n j = j + 1\n sensitivity = float(i/total_true)\n specificity = float(j/total_wrong)\n return(sensitivity, specificity)\n\n\n\n\n raise NotImplementedError", "def do_predictions(self):\n\n self.train_preds = self.tfmodel.predict(self.Data.X_train)\n self.test_preds = self.tfmodel.predict(self.Data.X_test)\n\n self.Helpers.logger.info(\n \"Training predictions: \" + str(self.train_preds))\n self.Helpers.logger.info(\n \"Testing predictions: \" + str(self.test_preds))\n print(\"\")", "def predict(self):\n raise NotImplementedError(\"Child class must implement this method\")", "def predict(self, user_index: int) -> np.ndarray:", "def make_predictions(main_dict, param, pheno_df, threshold, features, output):\n\n print('\\nMaking predictions...')\n\n # Split training and test set for cross-validation\n pheno_df, x, y, x_train, x_test, y_train, y_test, phenotypes = split_labeled_set(pheno_df, features,\n param['k_fold_cv'])\n\n # Initialize arrays for NN runs\n identifier_index = len(pheno_df.columns.values) - len(features)\n df_output = pheno_df.iloc[:, :identifier_index]\n sum_prob_labeled = np.zeros([y.shape[0], y.shape[1]])\n sum_prob_test = np.zeros([y.shape[0], y.shape[1]])\n\n # Train NN with cross validation for evaluating performance\n performance = pd.DataFrame()\n divide = x.shape[0] // param['k_fold_cv']\n run = 1\n for cv in range(param['k_fold_cv']):\n start = cv * divide\n end = (cv + 1) * divide\n if cv == (param['k_fold_cv'] - 1):\n end = x.shape[0]\n # Train and make predictions for each fold for a number of runs\n for n in range(param['runs']):\n runn = n + cv * param['runs']\n # Train NN with training set\n model, performance = neural_network(x_train[cv], y_train[cv], param, phenotypes, performance, runn,\n x_test[cv], y_test[cv])\n # Predictions on test data\n probabilities_test = model.predict(x_test[cv], batch_size=param['batch_size'])\n sum_prob_test[start:end] += probabilities_test\n\n # Predictions on labeled data\n probabilities_labeled = model.predict(x, batch_size=param['batch_size'])\n predictions_labeled = np.argmax(probabilities_labeled, axis=1)\n sum_prob_labeled += probabilities_labeled\n df_output['Run-%d' % run] = [phenotypes[i] for i in predictions_labeled]\n run += 1\n\n # Save training performance of cross-validation\n num_runs = param['k_fold_cv'] * param['runs']\n plot_training_performance(performance, output['TrainingCV'], num_runs)\n\n # Train NN with the complete labeled set\n performance = pd.DataFrame()\n sum_prob_all = np.zeros([main_dict['data_scaled'].shape[0], y.shape[1]])\n for n in range(param['runs']):\n model, performance = neural_network(x, y, param, phenotypes, performance, n)\n # Predictions on all data\n probabilities_all = model.predict(main_dict['data_scaled'], batch_size=param['batch_size'])\n sum_prob_all += probabilities_all\n plot_training_performance(performance, output['Training'], param['runs'])\n\n # Labeled set single cell accuracies\n cell_accuracy(df_output, sum_prob_labeled, phenotypes, num_runs, output)\n\n # Test-set predictions\n y_pred = np.argmax(sum_prob_test, axis=1)\n y_true = np.argmax(y, axis=1)\n plot_confusion_matrix(y_true, y_pred, phenotypes, output['Confusion'])\n\n # Make predictions for the complete data\n y_all = sum_prob_all / param['runs']\n y_prob_all = (y_all >= threshold).astype('int')\n y_pred_all = np.argmax(y_all, axis=1)\n phenotype_all = []\n for i in range(len(y_pred_all)):\n pred = phenotypes[y_pred_all[i]]\n # If none of the probabilities pass the threshold, predict as None phenotype\n if sum(y_prob_all[i]) == 0:\n pred = 'none'\n phenotype_all.append(pred)\n\n # Save phenotype predictions for cell_IDs provided\n cell_id = pd.DataFrame(columns=['CellID', 'Prediction'] + list(phenotypes))\n cell_id['CellID'] = main_dict['cell_id']\n cell_id['Prediction'] = np.array(phenotype_all)\n for i in range(len(phenotypes)):\n cell_id[phenotypes[i]] = y_all[:, i]\n cell_id = cell_id.sort_values('CellID', ascending=True).reset_index(drop=True)\n cell_id.to_csv(path_or_buf=output['PhenotypeCellIDs'], index=False)\n\n # Save predictions and inlier state in the combined dictionary\n main_dict['phenotype'] = np.array(phenotype_all)\n main_dict['is_inlier'] = np.array([p == 'negative' for p in main_dict['phenotype']])\n\n return main_dict", "def predict(self,unlabeled):\r\n y_pred = unlabeled['label']\r\n if(self.main_transformer!=None):\r\n X,y = self.main_transformer.transform(unlabeled)\r\n y_pred = self.model_main.predict(X)\r\n pred_probs = self.model_main.predict_proba(X)\r\n for i,probs in enumerate(pred_probs):\r\n if(max(probs)<self.alpha):\r\n y_pred[i] = 'Unsorted'\r\n unsorted = unlabeled.loc[y_pred == 'Unsorted']\r\n if(self.small_transformer!=None and len(unsorted)!=0):\r\n X,y = self.small_transformer.transform(unsorted)\r\n y = self.model_small.predict(X)\r\n pred_probs = self.model_small.predict_proba(X)\r\n for i,probs in enumerate(pred_probs):\r\n if(max(probs)<self.beta):\r\n y[i] = 'Unsorted'\r\n y_pred[y_pred=='Unsorted'] = y\r\n return y_pred", "def classify1(self,X):\n prediction = self.classify.predict(X)\n \n return prediction", "def gen_predictions(model):\r\n Y=np.load(\"data/Y-val-2k-new.npy\") #validation param data\r\n\r\n fmap=load_fmap(model)\r\n TY = fmap.ssy.transform(Y)\r\n TX = fmap.model.predict(TY)\r\n Xp = fmap.ssx.inverse_transform(TX) #predicted xsecs\r\n\r\n np.save(\"data/X-pre-%s\"%model,Xp,allow_pickle=True)", "def predict(self):\n\t\treturn self.y_pred" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Active, verified organizations related to this incident. Handles current and legacy incident/s properties.
def organizations(self): from organization import Organization # avoid circular import # lookup using new incidents field orgs = list( Organization.all().filter('incidents', self.key()) .filter('org_verified', True) .filter('is_active', True) ) # build list of id and look for global admin org_ids = set() seen_global_admin = False for org in orgs: if org.is_global_admin: seen_global_admin = True org_id = org.key().id() if org_id not in org_ids: org_ids.add(org_id) # check legacy incident field legacy_field_orgs = Organization.all().filter('incident', self.key()) \ .filter('org_verified', True) \ .filter('is_active', True) for org in legacy_field_orgs: if org.key().id() not in org_ids: orgs.append(org) # prepend global admin if not encountered if not seen_global_admin: orgs = ( list(Organization.all().filter('name', 'Admin')) + orgs ) return orgs
[ "def organizations():", "def get_active_organization(self):\n return self._active_organization", "def get_organizations(self):\n url = \"{}/organizations\".format(self.API_URL)\n if self.debug:\n self.print(\"Sending GET request to URL {}\".format(url))\n r = self.session.get(url)\n r.raise_for_status()\n return r.json()", "def organization(self):\n return self._get_prop_value(self._ORGANIZATION_KEY)", "def getOrganization(self, organizations):\n \n dict_organizations = {} #Empty dictionary to save the organizations\n\n for org in organizations:\n #Empty dictionary to save the attributes of the Organization\n org_att = {}\n org_att['name'] = org.attrib['name']\n org_att['description'] = org.attrib['description']\n org_att['id_equipments'] = org.attrib['id_equipments'].replace(\" \", \"\").split(',')\n\n # If the required information for the Organization is not given the RORI evaluation cannot be done\n if (org_att['name'] or org_att['id_equipments'] or org.attrib['id']) == \"\":\n self.informationRequired = \"Org\"\n\n #Append the attributes to the list of Organizations\n dict_organizations[org.attrib['id']] = org_att\n \n return dict_organizations", "def test_organization():\n return {\n \"organizationLogo\": \"\",\n \"organizationName\": \"Testing\",\n \"organizationId\": \"b86e537e-48c7-483c-815f-2665d5618f38\",\n \"organizationUrl\": \"testing\",\n \"events\": []\n }", "def active(self):\n return ILocalAgencyInfo.providedBy(self.context)", "def test_change_organization(self):\n pass", "def test_organizations(self):\n self.assert_requires_auth(self.instance.organizations)", "def visible_organization(self):\n user_org = \\\n SchoolDB.models.getActiveDatabaseUser().get_active_organization_key()\n if (user_org == self.key()):\n return True\n else:\n sub_orgs = self.get_subordinate_organizations_list()\n return (sub_orgs.count(user_org) > 0)", "def test_get_organization_memberships(self):\n pass", "def get_organization_options(self):\n organizations = self.get_organization_list(fields=\"name\")\n return [(org[\"id\"], org[\"name\"]) for org in organizations]", "def ensure_organisation(self, connection):\n organisation = connection.organisations.find_by_name(CAAS_ORGANISATION_NAME)\n if organisation:\n self.stdout.write(f\"Found existing organisation '{CAAS_ORGANISATION_NAME}'\")\n else:\n self.stdout.write(f\"Creating organisation '{CAAS_ORGANISATION_NAME}'\")\n organisation = connection.organisations.create(name = CAAS_ORGANISATION_NAME)\n return organisation", "def get_active_organization_key(self):\n return self._active_organization.key()", "def test_organization_id_get(self):\n pass", "def get_org_json(self):\n org_dicts = [o.toDict for o in Organization.objects.all()]\n return simplejson.dumps(org_dicts)", "def getOrganisation(self):\n\t\turl = xml_helper.getHref(baseCloudXML,\"orgList\")\n\t\theaders = rest_helper.buildHeaders(Accept=\"application/*+xml;version=5.11\",x_vcloud_authorization=self.xvcloud_authorization)\n\t\tresponse = requests.get(url, headers=headers)\n\t\treturn response.content", "def active_instructors(self, request, pk=None):\n # Get the region\n region = self.get_object()\n # Get the requesting user\n user = self.request.user\n\n # If the user is an admin, then they're fine\n if user.is_staff:\n pass\n # Otherwise, if the user is a committee member and the region\n # matches, then they're fine\n elif user.has_any_role() and user.club.region == region:\n pass\n # Or (least likely) the user is the regional dive officer\n elif user == region.dive_officer:\n pass\n # Otherwise, they're forbidden to access this\n else:\n raise PermissionDenied\n\n # Get all instructors from this region\n queryset = User.objects.filter(\n club__region=region,\n qualifications__certificate__is_instructor_certificate=True\n )\n # Filter on active status --- we can't do this through the ORM,\n # so we have to do it on the retrieved queryset.\n queryset = [u for u in queryset if u.current_membership_status() == STATUS_CURRENT]\n serializer = UserSerializer(queryset, many=True)\n return Response(serializer.data)", "def get_subject_organization(self):\n return self.subject_info.get_organization()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
formDataList is a list/(sequence) of (formName, formData) pairs for normal form fields, or (formName, fileType) or (formName, (filename, fileData)) for a file upload form element.
def __encodeMultipartFormdata(self, formDataList): boundary = str(time.time()).replace(".", "_").rjust(32, "-") lines = [] for formName, data in formDataList: lines.append("--" + boundary) if type(data) is types.StringType: cd = "Content-Disposition: form-data; name=\"%s\"" % formName lines.append(cd) else: dataType = type(data) if dataType is types.TupleType: filename, data = data elif dataType is types.FileType: filename = data.name data = data.read() else: print "Ignoring unsupported data type: %s" % dataType continue cd = "Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"" % (formName, filename) lines.append(cd) lines.append("Content-Type: %s" % self.__getFileContentType(filename)) lines.append("") lines.append(data) lines.append("--" + boundary + "--") lines.append("") data = string.join(lines, "\r\n") contentType = "multipart/form-data; boundary=%s" % boundary return contentType, data
[ "def handle_form_data(self, data):\n\t\tif data is None:\n\t\t\treturn\n\t\t\n\t\t# medications?\n\t\tmeds = []\n\t\tif 'medications' in data:\n\t\t\torig = data['medications']\n\t\t\tdrugs = orig['drug'] if 'drug' in orig else []\n\t\t\tif not isinstance(drugs, list):\n\t\t\t\tdrugs = [drugs]\n\t\t\t\n\t\t\tfor drug in drugs:\n\t\t\t\tif not isinstance(drug, dict):\n\t\t\t\t\tbody = self.fetch_item(drug)\n\t\t\t\t\tif body is not None:\n\t\t\t\t\t\tgraph = Graph().parse(data=body)\n\t\t\t\t\t\tmed = self.data_from_graph(graph, 'rxnorm')\n\t\t\t\t\t\tif med is not None:\n\t\t\t\t\t\t\tself.complement_data_for(med, 'rxnorm')\n\t\t\t\t\t\t\tmeds.append(med)\n\t\t\n\t\t# our FDA form needs two meds, so make sure to null the second if needed\n\t\twhile len(meds) < 2:\n\t\t\tmeds.append(None)\n\t\t\n\t\tdata['medications']['meds'] = meds", "def get_forms(data):\n forms = {}\n for datum in data:\n # Should streamline setUps. Currently in both tuple and dict.\n try:\n file = datum[0]['file']\n except KeyError:\n file = datum['inputs']['file']\n if file not in forms:\n forms[file] = OdkForm.from_file(TEST_FILES_DIR + file)\n return forms", "def extract_file_fields(form):\n result = []\n for fieldname in form:\n field = form[fieldname]\n if isinstance(field, list):\n for field_entry in field:\n if field_entry.filename:\n result.append(field_entry)\n\n elif field.filename:\n result.append(field)\n\n return result", "def process_formdata(self, valuelist):\n if valuelist:\n if self.is_related:\n self.data = self.datamodel.get_related_interface(self.col_name).get(\n valuelist[0]\n )\n else:\n self.data = self.datamodel.get(valuelist[0])", "def multipart_data(self) -> Optional[UploadFileDict]:\n if not self.input_files:\n return None\n return {\n (input_file.attach_name or self.name): input_file.field_tuple\n for input_file in self.input_files\n }", "def process(self, formdata, data):\n self.process_errors = []\n try:\n self.process_data(data)\n except ValueError as e:\n self.process_errors.append(e.args[0])\n\n # logical fix. obj is the default value\n if formdata and self.name in formdata:\n try:\n self.raw_data = formdata.getlist(self.name)\n self.process_formdata(self.raw_data)\n except ValueError as e:\n self.process_errors.append(e.args[0])\n\n for _filter in self.filters:\n try:\n self.data = _filter(self.data)\n except ValueError as e:\n self.process_errors.append(e.args[0])", "def extractForms(self, dxlFileContent):\n \n forms = dxlFileContent.getElementsByTagName(\"form\") + dxlFileContent.getElementsByTagName(\"subform\")\n\n for form in forms:\n dico = {}\n dico['type'] = 'PlominoForm'\n dico['id'], dico['title'] = self.getIdTitleAttributes(form)\n \n # set the layout from \"body\" element\n dico['formLayout'] = self.richtextToHtml(form.getElementsByTagName(\"body\")[0])\n \n # import all the fields included in this form\n dico['fields'] = self.extractFields(form)\n \n # self.extractInsertedFiles(form)\n self.forms.append(dico)", "def encode_multipart_formdata(\n self,\n ) -> Tuple[Optional[str], Optional[bytes]]:\n if not (self._fields or self._files):\n return None, None\n\n NEWLINE = b'\\r\\n'\n BOUNDARY = self._make_mime_boundary()\n content = BytesIO()\n\n for key, value in self._fields.items():\n content.write(b'--%s%s' % (BOUNDARY, NEWLINE))\n content.write(b'Content-Disposition: form-data; name=\"%s\"%s'\n % (key, NEWLINE))\n content.write(NEWLINE)\n content.write(value)\n content.write(NEWLINE)\n\n for key, file_info in self._files.items():\n content.write(b'--%s%s' % (BOUNDARY, NEWLINE))\n content.write(b'Content-Disposition: form-data; name=\"%s\"; ' % key)\n content.write(b'filename=\"%s\"%s' % (file_info['filename'],\n NEWLINE))\n content.write(b'Content-Type: %s%s' % (file_info['mimetype'],\n NEWLINE))\n content.write(NEWLINE)\n content.write(file_info['content'])\n content.write(NEWLINE)\n\n content.write(b'--%s--%s%s' % (BOUNDARY, NEWLINE, NEWLINE))\n content_type = ('multipart/form-data; boundary=%s'\n % BOUNDARY.decode('utf-8'))\n\n return content_type, content.getvalue()", "def data(self, data):\n token_name = '{0}_csrf_token'.format(self.name)\n if hasattr(data, 'post'):\n raw_data = MultiDict()\n raw_data.update(data.files.items())\n for key, value in data.post.items():\n if key.endswith('_csrf_token'):\n raw_data['csrf_token'] = value\n else:\n raw_data[key] = value\n else:\n if token_name in data:\n data['csrf_token'] = data[token_name]\n del data[token_name]\n raw_data = data\n self._set_data_on_fields(raw_data)", "def generate_form_data(self, form):\n\t\tplaceholder_values = {}\n\t\tfor i in form.fields:\n\t\t\tplaceholder_values[i.name] = i.get_placeholder()\n\t\t\n\t\tyield placeholder_values, None, None\n\t\t\n\t\tfor k in placeholder_values:\n\t\t\tfor v in self.vectors:\n\t\t\t\tnx = placeholder_values.copy()\n\t\t\t\tnx[k] = v\n\t\t\t\tyield nx, k, v", "def formdata(raw, headers):\n\n # Force the cgi module to parse as we want. If it doesn't find\n # something besides GET or HEAD here then it ignores the fp\n # argument and instead uses environ['QUERY_STRING'] or even\n # sys.stdin(!). We want it to parse request bodies even if the\n # method is GET (we already parsed the querystring elsewhere).\n\n environ = {\"REQUEST_METHOD\": \"POST\"}\n _headers = CaseInsensitiveMapping()\n for k, vals in headers.items():\n for v in vals:\n _headers.add(k.decode('ascii'), v.decode('ascii'))\n headers = _headers\n parsed = cgi.FieldStorage(\n fp=BytesIO(raw),\n environ=environ,\n headers=headers,\n keep_blank_values=True,\n strict_parsing=False,\n )\n result = Mapping()\n for k in parsed.keys():\n vals = parsed[k]\n if not isinstance(vals, list):\n vals = [vals]\n for v in vals:\n if v.filename is None:\n v = v.value\n if isinstance(v, bytes):\n v = v.decode(\"UTF-8\") # XXX Really? Always UTF-8?\n result.add(k, v)\n return result", "def _fill_form_dict(self, form_dict):\n for form in form_dict:\n form_item = {\n 'class': 'input',\n 'attrib': 'id',\n 'value': form\n }\n self._fill_form_item(form_item, form_dict[form])", "def parse_multipart_form_data(\n boundary: bytes,\n data: bytes,\n arguments: Dict[str, List[bytes]],\n files: Dict[str, List[HTTPFile]],\n) -> None:\n # The standard allows for the boundary to be quoted in the header,\n # although it's rare (it happens at least for google app engine\n # xmpp). I think we're also supposed to handle backslash-escapes\n # here but I'll save that until we see a client that uses them\n # in the wild.\n if boundary.startswith(b'\"') and boundary.endswith(b'\"'):\n boundary = boundary[1:-1]\n final_boundary_index = data.rfind(b\"--\" + boundary + b\"--\")\n if final_boundary_index == -1:\n gen_log.warning(\"Invalid multipart/form-data: no final boundary\")\n return\n parts = data[:final_boundary_index].split(b\"--\" + boundary + b\"\\r\\n\")\n for part in parts:\n if not part:\n continue\n eoh = part.find(b\"\\r\\n\\r\\n\")\n if eoh == -1:\n gen_log.warning(\"multipart/form-data missing headers\")\n continue\n headers = HTTPHeaders.parse(part[:eoh].decode(\"utf-8\"))\n disp_header = headers.get(\"Content-Disposition\", \"\")\n disposition, disp_params = _parse_header(disp_header)\n if disposition != \"form-data\" or not part.endswith(b\"\\r\\n\"):\n gen_log.warning(\"Invalid multipart/form-data\")\n continue\n value = part[eoh + 4 : -2]\n if not disp_params.get(\"name\"):\n gen_log.warning(\"multipart/form-data value missing name\")\n continue\n name = disp_params[\"name\"]\n if disp_params.get(\"filename\"):\n ctype = headers.get(\"Content-Type\", \"application/unknown\")\n files.setdefault(name, []).append(\n HTTPFile(\n filename=disp_params[\"filename\"], body=value, content_type=ctype\n )\n )\n else:\n arguments.setdefault(name, []).append(value)", "def parse_multipart_form_data(\n boundary: bytes,\n data: bytes,\n arguments: Dict[str, List[bytes]],\n files: Dict[str, List[HTTPFile]],\n) -> None:\n # The standard allows for the boundary to be quoted in the header,\n # although it's rare (it happens at least for google app engine\n # xmpp). I think we're also supposed to handle backslash-escapes\n # here but I'll save that until we see a client that uses them\n # in the wild.\n if boundary.startswith(b'\"') and boundary.endswith(b'\"'):\n boundary = boundary[1:-1]\n final_boundary_index = data.rfind(b\"--\" + boundary + b\"--\")\n if final_boundary_index == -1:\n LOGGER.warning(\"Invalid multipart/form-data: no final boundary\")\n return\n parts = data[:final_boundary_index].split(b\"--\" + boundary + b\"\\r\\n\")\n for part in parts:\n if not part:\n continue\n eoh = part.find(b\"\\r\\n\\r\\n\")\n if eoh == -1:\n LOGGER.warning(\"multipart/form-data missing headers\")\n continue\n headers = HTTPHeaders_parse(part[:eoh].decode(\"utf-8\"))\n disp_header = headers.get(\"Content-Disposition\", \"\")\n disposition, disp_params = _parse_header(disp_header)\n if disposition != \"form-data\" or not part.endswith(b\"\\r\\n\"):\n LOGGER.warning(\"Invalid multipart/form-data\")\n continue\n value = part[eoh + 4 : -2]\n if not disp_params.get(\"name\"):\n LOGGER.warning(\"multipart/form-data value missing name\")\n continue\n name = disp_params[\"name\"]\n if disp_params.get(\"filename\"):\n ctype = headers.get(\"Content-Type\", \"application/unknown\")\n files.setdefault(name, []).append(\n HTTPFile(\n filename=disp_params[\"filename\"], body=value, content_type=ctype\n )\n )\n else:\n arguments.setdefault(name, []).append(value)", "def create(self, list_id, data):\n self.list_id = list_id\n response = self._mc_client._post(url=self._build_path(list_id, 'signup-forms'), data=data)\n return response", "def get_from_form_data(self, data, files, name):\n return self.field.widget.value_from_datadict(data, files, name)", "def get_from_form_data(self, data, files, name):\n return data.get(name, None)", "def add_bucketlist_to_form(input_form):\n\n input_form.fields[form_consts.Common.BUCKET_LIST_VARIABLE_NAME] = \\\n forms.CharField(widget=forms.TextInput,\n required=False,\n label=form_consts.Common.BUCKET_LIST,\n help_text=\"Use comma separated values.\")", "def denormalize_formset_dict(data_dict_list, formset, attr_list):\n assert isinstance(formset, BaseSimpleFormSet)\n res = django.http.QueryDict('', mutable=True)\n for i, data_dict in enumerate(data_dict_list):\n prefix = formset.make_prefix(i)\n form = formset.form(prefix=prefix)\n res.update(denormalize_form_dict(data_dict, form, attr_list))\n res[prefix + '-_exists'] = 'True'\n\n res[str(formset.management_form.add_prefix('next_form_id'))] = str(len(data_dict_list))\n return res\n\n def __str__(self):\n return '%s: %s' % (self.__class__, self.query)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns all date encoded subdirectories in the url
def get_dates(url, start_year, end_year): # all URLs of `url` dates = [] try: for year in range(start_year, end_year + 1): # domain name of the URL without the protocol # print("url ", url) content = url + str(year) + "/contents.html" # print("content ",content) days = get_href(content, "contents.html") # print("days ",days) for day in days: dates.append(day) except Exception as e: raise e return dates
[ "def parse_folder( self, url ):\n data = self.read_file( url )\n soup = BeautifulSoup( data, \"html.parser\" )\n result = []\n url_path = self.get_url_path( url )\n for a in soup.find_all( \"a\" ):\n href = a['href']\n if href.startswith( url_path ) or href.replace('/blob/','/tree/').startswith( url_path ):\n result.append( href )\n return result", "def get_file_paths(date, translingual=False, v='2'):\r\n return [\"{d}/{f}\".format(d=gdelt_directory, f=f) for f in get_filenames(date, translingual=translingual, v=v)]", "def list_dates(product=None):\n\n date_folders = _get_links(_URI_ROOT)\n if product:\n dates = []\n for date_folder in date_folders:\n uri = '{0}/{1}'.format(_URI_ROOT, date_folder)\n products = [p[:-1] for p in _get_links(uri)] # remove slash\n for available_product in products:\n if product in available_product:\n dates.append(re.findall('\\d{8}', date_folder)[0])\n dates = list(set(dates))\n else:\n dates = [re.findall('\\d{8}', d)[0] for d in date_folders]\n return sorted(dates)", "def get_root_url_for_date(date):\n year = date[:4]\n mm = date[5:7]\n hostname = get_host_name()\n if date <= '2016-10-01':\n aosdir = 'AOS'\n elif date <= '2017-01-27':\n aosdir = 'AOS64'\n else:\n aosdir = 'APE1'\n return \"%s/index.php?dir=%s/CONTAINER/%s/\" % (hostname, aosdir, date)", "def ListWikiDates():\n #Request the dump webpage from Wikipedia\n base_url = 'https://dumps.wikimedia.org/enwiki'\n index = requests.get(base_url).text\n #Analyze the listed links using BeautifulSoup\n soup_index = BeautifulSoup(index, 'html.parser')\n dumps = [a['href'] for a in soup_index.find_all('a') if a.has_attr('href')]\n return dumps", "def parse_modis_dates(product_url, requested_dates):\n import time\n html = load_page_text(product_url).split('\\n')\n\n available_dates = []\n for line in html:\n if line.find(\"href\") >= 0 and \\\n line.find(\"[DIR]\") >= 0:\n # Points to a directory\n the_date = line.split('href=\"')[1].split('\"')[0].strip(\"/\")\n available_dates.append(the_date)\n\n dates = set(requested_dates)\n available_dates = set(available_dates)\n suitable_dates = list(dates.intersection(available_dates))\n suitable_dates.sort()\n return suitable_dates", "def get_datetime_paths(date, station = 'mho'):\n\n # check and see if the directory corresponding to the passed date exists\n\n if station == 'mho':\n dir_path = os.path.join(BASE_IMAGING_DIRECTORY, '%s' % station, str(date.year),\n '%s%02d%s' % (date.strftime('%b'), date.day, str(date.year)[2:]))\n elif station == 'sao':\n mo = date.strftime('%b')\n\n if date.year == 2018:\n month = sao_month_codes_2018[mo]\n else:\n month = sao_month_codes_2019[mo]\n\n dir_path = os.path.join(BASE_IMAGING_DIRECTORY, '%s' % station, str(date.year), month,\n '%s%02d%s' % (mo, date.day, str(date.year)[2:]))\n\n if not os.path.isdir(dir_path):\n return [], [], [], [], [], []\n else:\n doy = date.timetuple().tm_yday\n Bfiles = glob.glob(os.path.join(dir_path, '*B.%03d' % doy))\n Cfiles = glob.glob(os.path.join(dir_path, '*C.%03d' % doy))\n Dfiles = glob.glob(os.path.join(dir_path, '*D.%03d' % doy))\n\n # extract the timestamps from each of the groups\n times_B = []\n times_C = []\n times_D = []\n\n for fn in Bfiles:\n times_B.append(extract_timestamp(fn, date))\n\n for fn in Cfiles:\n times_C.append(extract_timestamp(fn, date))\n\n for fn in Dfiles:\n times_D.append(extract_timestamp(fn, date))\n\n # sort the files (ascending order, wrt time)\n tB = sorted(list(tuple(zip(Bfiles, times_B))), key = sort_key)\n tC = sorted(list(tuple(zip(Cfiles, times_C))), key = sort_key)\n tD = sorted(list(tuple(zip(Dfiles, times_D))), key = sort_key)\n\n try:\n Bfiles_sorted, times_B_sorted = list(zip(*tB))\n except ValueError as e:\n print(\"No images of type 'B' found for %s\" % date)\n Bfiles_sorted = []\n times_B_sorted = []\n\n try:\n Cfiles_sorted, times_C_sorted = list(zip(*tC))\n except ValueError as e:\n print(\"No images of type 'C' found for %s\" % date)\n Cfiles_sorted = []\n times_C_sorted = []\n\n try:\n Dfiles_sorted, times_D_sorted = list(zip(*tD))\n except ValueError as e:\n print(\"No images of type 'D' found for %s\" % date)\n Dfiles_sorted = []\n times_D_sorted = []\n\n return Bfiles_sorted, Cfiles_sorted, Dfiles_sorted, times_B_sorted, times_C_sorted, times_D_sorted", "def _get_all_url(cls) -> str:", "def get_directory_path(date):\n\n directory = \"s3a://forex-processed-data/*/{}/{}/{}/*\".format(*date.strftime('%Y %m %d').split())\n\n return directory", "def _get_dirname(self, starttime, endtime):\n if not starttime and not endtime:\n return []\n if starttime.month == endtime.month: # one day\n return [starttime.strftime(\"%Y-%m\")]\n else: # two days\n return [starttime.strftime(\"%Y-%m\"),\n endtime.strftime(\"%Y-%m\")]", "def get_urls(self):\n all_urls = []\n for sitemap in self._get_sitemaps():\n urls = download_sitemap(sitemap)\n for url in urls:\n try:\n year, month, day = re.findall('(\\d+)/(\\d+)/(\\d+)', url)[0]\n except IndexError:\n # urls that don't follow this pattern aren't articles\n continue\n url_datetime = datetime(year=int(year), month=int(month), day=int(day))\n if self.to_date >= url_datetime >= self.from_date:\n all_urls.append(url)\n return all_urls", "def filter_directory(path, url_builder):\n final_files = []\n url_start = path\n files_to_add_path = url_builder\n for file in files_to_add_path:\n final_file = url_start + file\n final_files.append(final_file)\n return final_files", "def get_dates(node):\n\treturn sorted([n['date'] for n in node.leaf_iter()])", "def get_url_year(self, year, useday):\n yearmax = 2012\n\n xmlfile =\\\n 'http://data.nodc.noaa.gov/thredds/catalog/pathfinder/Version5.2/%s/catalog.xml'%year\n\n if year < 1981 or year > yearmax:\n raise Exception('year outside of %d to %d'%(1981, yearmax))\n\n doc = minidom.parse(urllib.request.urlopen(xmlfile))\n\n urls = []\n for node in doc.getElementsByTagName('dataset'):\n url = node.getAttribute('urlPath')\n #if len(url)>0:\n if useday:\n if '_day' in url:\n urls.append(url)\n else:\n if '_night' in url:\n urls.append(url)\n #print url\n\n return urls", "def parse_standard_date(date):\n return [ int(i) for i in date.split(\"/\") ]", "def getcongressURLs():\n\n #Generate the Possible Dates\n dates = getdates()\n\n #Open CSV, Get URLs for Dates, and Write Results\n f = open('congressional_records_URLs.csv', 'w')\n try:\n for dt in range(0, len(dates)):\n date = str(dates[dt]).replace(\"'\", \"\").replace(\",\", \"-\").replace(\" \", \"\").replace(\"(\", \"\").replace(\")\", \"\")\n full_url = getfullURL(date) #Get URL for Date\n f.write(u'%s\\n' % (full_url)) #Write to CSV\n finally:\n f.close()", "def get_root_url_for_ccc_container(date):\n url = get_root_url_for_date(date)\n url += 'alma/logs/cob-cc/CORR/CCC/' \n return url", "def __get_stat_page_urls(self, year):\n return []", "def date_patterns():\n\tfor year in [' %Y',' %y']:\n\t\tfor mon in ['%b','%B','%m']:\n\t\t\tyield ['%%d %s%s'%(mon, year), DAY, []]\n\t\t\tyield ['%s %%d%s'%(mon, year), DAY, []]\n\tfor mon in ['%b','%B']: # Year empty\n\t\tyield ['%%d %s'%(mon), DAY, [YEAR]]\n\t\tyield ['%s %%d'%(mon), DAY, [YEAR]]\n\tyield ['%%Y %%d %s'%(mon), DAY, []]\n\tyield ['%%Y %s %%d'%(mon), DAY, []]\n\tyield ['%Y %m %d', DAY, []]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets and cleans fuel poverty dataset.
def get_clean_fuel_poverty(): fuel_poverty = get_fuel_poverty() # fuel_poverty = fuel_poverty.rename( columns={ "Area Codes": "code", "Area name": "region_1", "Unnamed: 2": "region_2", "Unnamed: 3": "region_3", "Number of households1": "total_households", "Number of households in fuel poverty1": "fp_households", "Proportion of households fuel poor (%)": "fp_proportion", } ) # # Remove trailing spaces and fix capitalisation in region columns fuel_poverty["region_1"] = fuel_poverty["region_1"].apply(strip_and_titlecase) fuel_poverty["region_2"] = fuel_poverty["region_2"].apply(strip_and_titlecase) fuel_poverty["region_3"] = fuel_poverty["region_3"].apply(strip_and_titlecase) # # Merge the different 'region' columns into one and apply clean_names - # this allows for joining onto data in which local authorities # are only referred to by name and not ID fuel_poverty["clean_name"] = ( fuel_poverty["region_1"] .fillna(fuel_poverty["region_2"]) .fillna(fuel_poverty["region_3"]) .apply(clean_names) ) # Fill in NaN values in region columns so that all region_3 rows # have associated region_1 and region_2 data, # and all region_2 rows have associated region_1 data. # First copy region_1 values into region_2 then forward-fill region_2 - # the 'region_1's stop the filling from going too far fuel_poverty["region_2"] = ( fuel_poverty["region_2"].fillna(fuel_poverty["region_1"]).ffill() ) # Set the copied-over values in region_2 back to NaN fuel_poverty["region_2"].loc[~fuel_poverty["region_1"].isna()] = np.nan # Then forward-fill region_1 fuel_poverty["region_1"] = fuel_poverty["region_1"].ffill() # Filter out all of the region_1 rows - they are not local authorities fuel_poverty = fuel_poverty[~fuel_poverty["region_2"].isna()] # Additionally remove all Met Counties and Inner/Outer London - # these are rows that contain (Met County) or Inner/Outer London in region_2 # and have NA region_3 def not_la_condition(string): return ("(Met County)" in string) | (string in ["Inner London", "Outer London"]) # # not_las = [not_la_condition(string) for string in fuel_poverty["region_2"]] no_region_3 = list(fuel_poverty.region_3.isna()) both = [a and b for a, b in zip(not_las, no_region_3)] fuel_poverty = fuel_poverty.drop(fuel_poverty[both].index) # # Append rows for Greater London Authority and # Greater Manchester Combined Authority - # these are not LAs but some grants went to them combined_authorities = pd.DataFrame( [ [ np.nan, "London", "Greater London Authority", np.nan, np.nan, np.nan, np.nan, "Greater London Authority", ], [ np.nan, "North West", "Greater Manchester Combined Authority", np.nan, np.nan, np.nan, np.nan, "Greater Manchester Combined Authority", ], ], columns=fuel_poverty.columns, ) # fuel_poverty = fuel_poverty.append(combined_authorities, ignore_index=True) # return fuel_poverty
[ "def get_data():\n points = get_alameda_county_points()\n return filter_ranson_criteria(clean_data(get_weather_data(points)))", "def __get_data(self):\n try:\n self.data = self.hdulist[0].data\n except:\n self.hdulist = astropy.io.fits.open(self.map_name)\n self.data = self.hdulist[0].data", "def _load_energy_data(self):\n\n energy = pd.read_excel(DATA_PATH + 'Energy_Consumption/' +\n 'bp-stats-review-2019-all-data.xlsx',\n sheet_name=1)\n energy.set_index('In Mtoe', inplace=True)\n energy = energy.dropna().transpose()\n\n self.datasets['energy'] = energy", "def getTiesData(self):\n # Get Ties data from UI\n self.ties_l_cover = self.ties_widget.ties_leftCover.text()\n self.ties_l_cover = FreeCAD.Units.Quantity(self.ties_l_cover).Value\n self.ties_r_cover = self.ties_widget.ties_rightCover.text()\n self.ties_r_cover = FreeCAD.Units.Quantity(self.ties_r_cover).Value\n self.ties_t_cover = self.ties_widget.ties_topCover.text()\n self.ties_t_cover = FreeCAD.Units.Quantity(self.ties_t_cover).Value\n self.ties_b_cover = self.ties_widget.ties_bottomCover.text()\n self.ties_b_cover = FreeCAD.Units.Quantity(self.ties_b_cover).Value\n self.ties_offset = self.ties_widget.ties_offset.text()\n self.ties_offset = FreeCAD.Units.Quantity(self.ties_offset).Value\n self.ties_diameter = self.ties_widget.ties_diameter.text()\n self.ties_diameter = FreeCAD.Units.Quantity(self.ties_diameter).Value\n self.ties_bent_angle = int(\n self.ties_widget.ties_bentAngle.currentText()\n )\n self.ties_extension_factor = (\n self.ties_widget.ties_extensionFactor.value()\n )\n self.ties_number_check = self.ties_widget.ties_number_radio.isChecked()\n if self.ties_number_check:\n self.ties_number_spacing_check = True\n self.ties_number_spacing_value = (\n self.ties_widget.ties_number.value()\n )\n else:\n self.ties_number_spacing_check = False\n self.ties_number_spacing_value = (\n self.ties_widget.ties_spacing.text()\n )\n self.ties_number_spacing_value = FreeCAD.Units.Quantity(\n self.ties_number_spacing_value\n ).Value\n if self.ties_configuration == \"TwoTiesSixRebars\":\n item1 = self.form.ties_sequenceListWidget.item(0).text()\n item2 = self.form.ties_sequenceListWidget.item(1).text()\n self.ties_sequence = (item1, item2)", "def LoadHousingData(varlist, clean=False):\n\n # Read in targetcsv as Pandas df\n df = pd.read_csv('data/kc_house_data.csv')\n \n # Drop unnecessary columns\n df = df[varlist]\n \n if clean == True:\n \n # Generate QOL variable\n df['sqft_per_occupant'] = df['sqft_living'] / df['bedrooms']\n df['space_x_grade'] = df['sqft_living'] * df['grade']\n \n # Dummy vars for grade excluding lowest grade\n df_dummies = pd.get_dummies(df.grade).iloc[:,1:]\n\n # Combine grade dummy vars with df\n df = pd.concat([df,df_dummies], axis = 1)\n \n # Dummy vars for sqft_lot\n bins = [0,8000, 40000, 500000]\n\n bin_names = ['urban', 'suburban', 'rural']\n\n df['sqft_lot_transform'] = pd.cut(df['sqft_lot'], bins, labels = bin_names)\n\n lot_dummies = pd.get_dummies(df.sqft_lot_transform).iloc[:,:2]\n\n df = pd.concat([df, lot_dummies], axis = 1)\n \n\n # Train-test split\n train_set, test_set = train_test_split(df, test_size = .2, random_state = 5)\n \n train_set = train_set.drop([\n 'sqft_living', \n 'grade', \n 'sqft_lot', \n 'space_x_grade',\n 'sqft_lot_transform'], axis=1)\n \n test_set = test_set.drop([\n 'sqft_living', \n 'grade', \n 'sqft_lot', \n 'space_x_grade',\n 'sqft_lot_transform'], axis=1)\n\n\n split_dfs = {'df': df, 'train_set': train_set, 'test_set': test_set} \n\n return split_dfs\n \n else:\n # Train-test split\n train_set, test_set = train_test_split(df, test_size = .2, random_state = 5)\n split_dfs = {'df': df, 'train_set': train_set, 'test_set': test_set} \n return split_dfs", "def get_fuels_yh(self, model_object, attribute_to_get):\n if model_object.enduse_object.crit_flat_profile:\n\n # Yearly fuel\n fuels_reg_y = model_object.enduse_object.fuel_y\n\n if attribute_to_get == 'fuel_peak_dh':\n # Flat shape\n shape_peak_dh = np.full((24), 1/24)\n\n # Because flat shape, the dh_peak is 24/8760\n fuels_reg_peak = fuels_reg_y * (1/365)\n\n fuels = fuels_reg_peak[:, np.newaxis] * shape_peak_dh\n\n elif attribute_to_get == 'shape_non_peak_y_dh':\n # Flat shape\n shape_non_peak_y_dh = np.full((365, 24), (1.0/24))\n fuels = fuels_reg_y * shape_non_peak_y_dh\n\n elif attribute_to_get == 'shape_non_peak_yd':\n # Flat shape\n shape_non_peak_yd = np.ones((365)) / 365\n fuels = fuels_reg_y * shape_non_peak_yd\n\n elif attribute_to_get == 'fuel_yh':\n # Flat shape\n shape_non_peak_yh = np.full((365, 24), 1/8760)\n fast_shape_non_peak_yh = np.zeros((model_object.enduse_object.fuel_new_y.shape[0], 365, 24))\n\n for fueltype, _ in enumerate(fast_shape_non_peak_yh):\n fast_shape_non_peak_yh[fueltype] = shape_non_peak_yh\n\n fuels = fuels_reg_y[:, np.newaxis, np.newaxis] * fast_shape_non_peak_yh\n else:\n # If not flat shape, use yh load profile of enduse\n fuels = getattr(model_object.enduse_object, attribute_to_get)\n\n return fuels", "def clean_data(self):\r\n self.all_data.drop(len(self.all_data) - 1, inplace = True)", "def soil_water(self, location, max_features):\n logger.info(f\"Downloading soilwater data\")\n data_wfs_VMM_ws = self.wfs_request(\n layer='waterbodems:pfas_meetpunten_fcs',\n location=location,\n max_features=max_features)\n data_wfs_OVAM = self.wfs_request(\n layer='pfas:pfas_analyseresultaten',\n location=location,\n max_features=max_features,\n query=Or([\n PropertyIsEqualTo('medium', 'Waterbodem - sediment'),\n PropertyIsEqualTo('medium', 'Waterbodem - vaste deel van waterbodem')]))\n\n data_wfs_VMM_ws = data_wfs_VMM_ws.drop_duplicates(\n subset=data_wfs_VMM_ws.columns)\n data_wfs_OVAM = data_wfs_OVAM.drop_duplicates(\n subset=data_wfs_OVAM.columns)\n data_wfs_OVAM_sediment = data_wfs_OVAM[data_wfs_OVAM['medium'] == 'Waterbodem - sediment']\n data_wfs_OVAM_fixed = data_wfs_OVAM[data_wfs_OVAM['medium'] == 'Waterbodem - vaste deel van waterbodem']\n\n combined_soil_water = {'Soil_water': [data_wfs_VMM_ws, data_wfs_OVAM_sediment, data_wfs_OVAM_fixed]}\n self.combined_datasets.update(combined_soil_water)\n\n data_wfs_VMM_ws_len = len(data_wfs_VMM_ws)\n data_wfs_OVAM_sediment_len = len(data_wfs_OVAM_sediment)\n data_wfs_OVAM_fixed_len = len(data_wfs_OVAM_fixed)\n\n nb_datapoints = {\"Soil_water_VMM\" : data_wfs_VMM_ws_len}\n self.dictionary[\"nb_datapoints\"][0].update(nb_datapoints)\n nb_datapoints = {\"Soil_water_sediment_OVAM\" : data_wfs_OVAM_sediment_len}\n self.dictionary[\"nb_datapoints\"][0].update(nb_datapoints)\n nb_datapoints = {\"Soil_water_fixed_OVAM\": data_wfs_OVAM_fixed_len}\n self.dictionary[\"nb_datapoints\"][0].update(nb_datapoints)\n\n return data_wfs_VMM_ws, data_wfs_OVAM_sediment, data_wfs_OVAM_fixed", "def _load_oil_price_data(self):\n\n # load historical oil prices to approximate historical fossil resource\n # cost.\n\n # MWV. (July 25, 2019). Average annual OPEC crude oil price from 1960\n # to 2019 (in U.S. dollars per barrel) [Chart].\n # In Statista. Retrieved July 25, 2019,\n # from https://www.statista.com/statistics/262858/change-in-opec-crude-oil-prices-since-1960/\n\n oil_price = pd.read_excel(\n DATA_PATH + 'Oil_Price/' +\n 'statistic_id262858_opec-oil-price-annually-1960-2019.xlsx',\n sheet_name='Data',\n header=4)\n oil_price.index = oil_price.index.droplevel(0).astype(int)\n oil_price.head()\n\n # convert to price per ton\n toe_to_barrel = 7.1428571428571\n oil_price['Average price in U.S. dollars per ton'] = \\\n oil_price['Average price in U.S. dollars per barrel'] \\\n * toe_to_barrel\n\n self.datasets['oil price'] = oil_price", "def get_all_peptides (self):\n try:\n self.all_peptides = pd.read_csv(self.all_peptides_data, sep ='\\t')\n except:\n print(\"Data File %s not found. Make sure you specified the right directory.\" % self.all_peptides_data)", "def get_data(self):\n df = mugs_data.copy(deep=True)\n df.rename(columns=lambda x: x.strip(), inplace=True)\n self.importance_features = [col for col in df if col.startswith('I')]\n self.product_features = [col for col in df if not col.startswith('I')]\n self.imp_mapper = dict(zip(self.importance_features, PRODUCT_MAP_LIST))\n self.df = df", "def load_hep_data(self,variables2plot=[]):\n file = uproot.open(self.hep_data)\n data = file[self.treename]\n self.df = data.pandas.df( self.features+['target']+variables2plot )\n #self.df = df.sample(frac=0.2)\n print self.df.dtypes\n\n self.metadata = file['metadata'] # names of samples, target values, etc.\n\n return", "def get_town_data(self):\n full_data_crash = self.full_data\n if self.town == 'All':\n town_data_subset = full_data_crash\n else:\n town_data_subset = full_data_crash[full_data_crash[\"Town_Name\"]==self.town]\n return(town_data_subset)", "def __read_and_filter(self):\n\n data = pd.read_csv('data/us_bills.csv', delimiter=';')\n data = data.filter(['Title', 'Major'])\n # data = data.drop(x for x in data.Major if x == 'nan')\n data = data.mask(data.Major == 'NaN').dropna()\n self.data = data", "def getSubtractedForceTriggerData(self,count=1000):\n dataset = self.getStrippedForceTriggerData(count)\n print \"Subtracting pedestals.\"\n return self.processSubtractPedestals(dataset)", "def remove_latent(df, path_json='src/python_code/settings.json'):\n settings = json.load(open(path_json))[\"OOD\"][\"Gather_Data\"]\n names_ood = settings[\"Set_DataSets\"][int(settings[\"Choose_set\"])][\"OOD\"]\n methods = settings[\"Feature_methods\"]\n for method in methods:\n for name_ood in names_ood:\n df = df[df['DataSet'] != name_ood + ' BinaryCross ' + method]\n df = df[df['DataSet'] != 'Train OOD ' + method]\n df = df[df['DataSet'] != 'Test OOD ' + method]\n \"\"\"\n df = df[df['DataSet'] != 'FashionMnist BinaryCross Likehood']\n df = df[df['DataSet'] != 'FashionMnist BinaryCross Disc']\n df = df[df['DataSet'] != 'MNIST-C BinaryCross Disc']\n df = df[df['DataSet'] != 'MNIST-C BinaryCross Likehood']\n df = df[df['DataSet'] != 'Train OOD Disc']\n df = df[df['DataSet'] != 'Test OOD Disc']\n df = df[df['DataSet'] != 'Train OOD Likehood']\n df = df[df['DataSet'] != 'Test OOD Likehood']\n \"\"\"\n return df", "def calculate_ft_temp(self, county, load_8760, enduse):\n\n # Harris county (fips 48201) results in a very large dataframe and\n # memory allocation errors with pandas methods.\n if county == 48201:\n\n # Setting npartions\n load_8760 = dd.from_pandas(load_8760.reset_index(), npartitions=50)\n\n temp_eu_data = self.temp_fraction.xs(county).multiply(\n self.fuel_mix_enduse.xs(county)\n )\n\n def temp_fuel_mult(x, temp_eu_data):\n\n x = x.set_index(\n ['naics', 'Emp_Size','End_use']\n ).join(temp_eu_data)\n\n x.load_MMBtu_per_hour.update(\n x.load_MMBtu_per_hour.multiply(x.MMBtu)\n )\n\n return x\n\n temp_load_8760 = load_8760.map_partitions(\n lambda x: temp_fuel_mult(x, temp_eu_data)\n )\n\n temp_load_8760 = temp_load_8760.reset_index()\n\n temp_load_8760 = temp_load_8760.drop(columns=['MMBtu'])\n\n else:\n # Multiply by temperature fraction and fuel mix\n temp_load_8760 = load_8760.load_MMBtu_per_hour.multiply(\n self.temp_fraction.xs(county).multiply(\n self.fuel_mix_enduse.xs(county)\n )\n )\n\n temp_load_8760 = temp_load_8760.dropna()\n\n temp_load_8760.name = 'load_MMBtu_per_hour'\n\n print('type:', type(temp_load_8760))\n\n file_dir_name = self.results_dir+'county_'+str(county)+'.parquet'\n\n if file_dir_name in os.listdir(self.results_dir):\n\n shutil.rmtree(file_dir_name+'/')\n\n if type(temp_load_8760) == pd.core.frame.DataFrame:\n\n temp_load_8760 = pd.DataFrame(temp_load_8760).reset_index()\n\n temp_load_8760.to_parquet(\n file_dir_name, engine='pyarrow', partition_cols=['op_hours'],\n compression='snappy', index=None\n )\n\n else:\n\n temp_load_8760.to_parquet(\n file_dir_name+'/', engine='pyarrow', partition_on=['op_hours'],\n compression='snappy', write_index=True, compute=True\n )\n\n return", "def get_dataset(ps, selected_dataset):\n\n try:\n myds = ps.get_dataset(selected_dataset)\n except Exception as e:\n # TODO: Account for 500 errors\n abort(400, \"Please select a valid Pennsieve dataset\")\n\n \n return myds", "def recent_mixed_data(self):\n return self.get_all_recent_data(self.hardware_data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets and cleans current LA majority party and model (e.g. county, district) data.
def get_clean_parties_models(): parties_models = get_parties_models() # parties_models = parties_models.rename( columns={ "model (C=county, D=district, 1=all-up, 3=thirds, etc.)": "model", } ) # 'Buckinghamshire' row in this dataset is incorrect - # it is labelled as a County council but it has become unitary # Manually replace with the correct data # Source: http://opencouncildata.co.uk/council.php?c=413&y=0 parties_models.loc[2] = ["Buckinghamshire", "U1", "CON"] # # Rename models to full names parties_models["model"] = parties_models["model"].apply(model_type) # # Apply clean_names to all names in parties/models data parties_models["clean_name"] = parties_models["name"].apply(clean_names) parties_models = parties_models.drop(columns="name") # return parties_models
[ "def get_data():\n points = get_alameda_county_points()\n return filter_ranson_criteria(clean_data(get_weather_data(points)))", "def _get_restriction_to_main(self) -> Tuple[pd.Series, np.ndarray]:\n\n # get the names of the main states, remove 'rest' if present\n main_names = self.lineage_probabilities.names\n main_names = main_names[main_names != \"rest\"]\n\n # get the metastable annotations & colors\n cats_main = self.metastable_states.copy()\n colors_main = np.array(self._meta_states_colors.copy())\n\n # restrict both colors and categories\n mask = np.in1d(cats_main.cat.categories, main_names)\n colors_main = colors_main[mask]\n cats_main.cat.remove_categories(cats_main.cat.categories[~mask], inplace=True)\n\n return cats_main, colors_main", "def get_crimeCommunities_data(load_data_size=None):\n\n # src_path = os.path.dirname(os.path.realpath(__file__))\n df = pd.read_csv(os.path.join(folder_location, 'data/crimeCommunities/communities_data.csv'))\n\n df['ViolentCrimesPerPop'] = df['ViolentCrimesPerPop'].apply(lambda x: -1 if x <= 0.24 else 1)\n df['racePctWhite'] = df['racePctWhite'].apply(lambda x: 'other' if x <= 0.75 else 'white')\n df = df.drop(columns=['state', 'county', 'community', 'communityname string', 'fold', 'OtherPerCap',\n # 'medIncome', 'pctWWage', 'pctWInvInc','medFamInc',\n 'LemasSwornFT', 'LemasSwFTPerPop', 'LemasSwFTFieldOps', 'LemasSwFTFieldPerPop',\n 'LemasTotalReq',\n 'LemasTotReqPerPop', 'PolicReqPerOffic', 'PolicPerPop', 'RacialMatchCommPol',\n 'PctPolicWhite', 'PctPolicBlack', 'PctPolicHisp', 'PctPolicAsian', 'PctPolicMinor',\n 'OfficAssgnDrugUnits',\n 'NumKindsDrugsSeiz', 'PolicAveOTWorked', 'PolicCars', 'PolicOperBudg', 'LemasPctPolicOnPatr',\n 'LemasGangUnitDeploy', 'LemasPctOfficDrugUn', 'PolicBudgPerPop'])\n # 29 attributes are dropped because of missing values in these features, or because they contain IDs or names\n\n df = df.rename(columns={'racePctWhite': 'race'})\n\n sensitive_attr_map = {'white': 1, 'other': -1}\n\n s = df['race'].map(sensitive_attr_map).astype(int)\n y = df['ViolentCrimesPerPop']\n\n df = df.drop(columns=['race', 'ViolentCrimesPerPop'])\n\n x = pd.DataFrame(data=None)\n for name in df.columns:\n x = pd.concat([x, normalize(x=df[name])], axis=1)\n\n X = x.to_numpy()\n y = y.to_numpy()\n s = s.to_numpy()\n\n if load_data_size is not None: # Don't shuffle if all data is requested\n # shuffle the data\n perm = list(range(0, len(y)))\n shuffle(perm)\n X = X[perm]\n y = y[perm]\n s = s[perm]\n\n print(\"Loading only %d examples from the data\" % load_data_size)\n X = X[:load_data_size]\n y = y[:load_data_size]\n s = s[:load_data_size]\n\n X = X[:, (X != 0).any(axis=0)]\n\n return X, y, s", "def get_clean_epc():\n epc = get_epc()\n #\n # Calculate median energy rating for each LA:\n epc_medians = (\n epc.groupby(\"LOCAL_AUTHORITY\")[\"CURRENT_ENERGY_EFFICIENCY\"]\n .apply(np.median)\n .reset_index(name=\"median_energy_efficiency\")\n )\n #\n # Calculate proportions of 'improvable' social housing\n # (socially rented dwellings that are currently EPC D or below,\n # and have the potential to be C or above)\n #\n # There are two different strings signifying socially rented\n # in the TENURE column of the EPC data:\n epc_social = epc.loc[epc[\"TENURE\"].isin([\"rental (social)\", \"Rented (social)\"])]\n #\n epc_social[\"is_improvable\"] = (\n epc_social[\"CURRENT_ENERGY_RATING\"].isin([\"G\", \"F\", \"E\", \"D\"])\n ) & (epc_social[\"POTENTIAL_ENERGY_RATING\"].isin([\"C\", \"B\", \"A\"]))\n #\n # Find the numbers of improvable / not improvable social houses in each LA\n potential_counts = (\n epc_social.groupby([\"LOCAL_AUTHORITY\", \"is_improvable\"])[\n [\"LOCAL_AUTHORITY\", \"is_improvable\"]\n ]\n .size()\n .reset_index(name=\"count\")\n .pivot(index=\"LOCAL_AUTHORITY\", columns=\"is_improvable\", values=\"count\")\n .rename(columns={True: \"total_improvable\", False: \"total_not_improvable\"})\n )\n # Calculate proportions\n potential_counts.columns.name = None\n potential_counts[\"total_social\"] = potential_counts.sum(axis=1)\n potential_counts[\"prop_improvable\"] = (\n potential_counts[\"total_improvable\"] / potential_counts[\"total_social\"]\n )\n potential_counts = potential_counts.reset_index()[\n [\"LOCAL_AUTHORITY\", \"total_improvable\", \"prop_improvable\"]\n ]\n # Join to medians\n clean_epc = epc_medians.merge(potential_counts, on=\"LOCAL_AUTHORITY\").rename(\n columns={\"LOCAL_AUTHORITY\": \"code\"}\n )\n #\n return clean_epc", "def get_model_details(self):\n logger.info(\"Getting model details.\")\n sql_query = \"\"\"\n SELECT\n mv.model_version_id,\n mv.cause_id,\n c.acause,\n mv.sex_id,\n mv.inserted_by\n FROM\n cod.model_version mv\n JOIN\n shared.cause c USING (cause_id)\n WHERE\n model_version_id = {};\n \"\"\".format(self.model_version_id)\n model_data = db_connect.query(sql_query, conn_def=self.conn_def)\n self.acause = model_data.ix[0, 'acause']\n self.sex_id = model_data.ix[0, 'sex_id']\n self.user = model_data.ix[0, 'inserted_by']", "def form_analysis_data(self):\n fatal_percent_sum = 0\n self.analysis_dct[\"max_fatalities\"] = 0\n self.analysis_dct[\"phases\"] = {}\n self.analysis_dct[\"damage\"] = {}\n self.analysis_dct[\"years\"] = []\n destroyed_dct = {}\n\n for accident in self.accidents:\n accident.process_data()\n fatal_percent_sum += accident.fatalities_percent\n if accident.fatalities > self.analysis_dct[\"max_fatalities\"]:\n self.analysis_dct[\"max_fatalities\"] = accident.fatalities\n\n if accident.phase not in self.analysis_dct[\"phases\"].keys():\n self.analysis_dct[\"phases\"][accident.phase] = 1\n else:\n self.analysis_dct[\"phases\"][accident.phase] += 1\n\n if accident.damage not in self.analysis_dct[\"damage\"].keys():\n self.analysis_dct[\"damage\"][accident.damage] = 1\n else:\n self.analysis_dct[\"damage\"][accident.damage] += 1\n\n if accident.damage == \"Destroyed\" or accident.damage == \"Substantial\":\n if accident.phase not in destroyed_dct.keys():\n destroyed_dct[accident.phase] = 1\n else:\n destroyed_dct[accident.phase] += 1\n\n self.analysis_dct[\"years\"].append(accident.aircraft_years)\n\n self.analysis_dct[\"accidents_number\"] = len(self.accidents)\n self.analysis_dct[\"fatalities_percent\"] = fatal_percent_sum / self.analysis_dct[\"accidents_number\"]\n max_percent_phase = sorted(list(self.analysis_dct['phases'].items()), key=lambda x: x[1], reverse=True)[0][0]\n max_percent_phase_num = max(self.analysis_dct['phases'].values()) / sum(self.analysis_dct['phases'].values()) * 100\n self.analysis_dct[\"max_percent_phase\"] = (max_percent_phase, max_percent_phase_num)\n max_destroyed_planes_phase = sorted(list(self.analysis_dct['phases'].items()), key=lambda x: x[1], reverse=True)[0]\n self.analysis_dct[\"destroyed_damage\"] = max_destroyed_planes_phase", "def extractcompactmodel(data):\n\n\n # Find the value of n_eff, n_g and D at lambda_c\n lambda_c = c/data['f'][5]*1e9\n n_eff = data['real(neff)'][5]\n n_g = data['ng'][5]\n D = data['D'][5]\n\n\n\n return lambda_c, n_eff, n_g, D", "def get_full_vote_info(votes_df):\n vote_counts = votes_df.groupby(['voteId', 'party', 'leg_vote']).apply(len).reset_index()\n vote_counts.rename(columns={0: 'count'}, inplace=True)\n\n\n out = votes_df[['voteId']].drop_duplicates()\n cols = ['voteId']\n\n # Rep, Aye\n out = join_new_data(out, vote_counts, 'Republican', 'AYE', 'rep_aye_count', cols)\n # Rep, Noe\n out = join_new_data(out, vote_counts, 'Republican', 'NOE', 'rep_noe_count', cols)\n # Rep, Abs\n out = join_new_data(out, vote_counts, 'Republican', 'ABS', 'rep_abs_count', cols)\n\n # Dem, Aye\n out = join_new_data(out, vote_counts, 'Democrat', 'AYE', 'dem_aye_count', cols)\n # Dem, Noe\n out = join_new_data(out, vote_counts, 'Democrat', 'NOE', 'dem_noe_count', cols)\n # Dem, Abs\n out = join_new_data(out, vote_counts, 'Democrat', 'ABS', 'dem_abs_count', cols)\n\n out = out.fillna(0)\n\n out['d_alignment'] = out.apply(lambda x: 'For'\n if x.dem_aye_count > x.dem_noe_count\n else 'Against',\n axis=1)\n out['r_alignment'] = out.apply(lambda x: 'For'\n if x.rep_aye_count > x.rep_noe_count\n else 'Against',\n axis=1)\n\n out['d_min'] = out.apply(lambda x: min(x.dem_aye_count, x.dem_noe_count), axis=1)\n out['r_min'] = out.apply(lambda x: min(x.rep_aye_count, x.rep_noe_count), axis=1)\n\n out['d_maj'] = out.apply(lambda x: max(x.dem_aye_count, x.dem_noe_count), axis=1)\n out['r_maj'] = out.apply(lambda x: max(x.rep_aye_count, x.rep_noe_count), axis=1)\n\n out['d_total'] = out['dem_aye_count'] + out['dem_noe_count']\n out['r_total'] = out['rep_aye_count'] + out['rep_noe_count']\n\n out = votes_df.merge(out, on='voteId')\n\n return out", "def get_majority_judgment(self):\n return self.get_quota_rule_judgment(0.5)", "def get_clean_fuel_poverty():\n fuel_poverty = get_fuel_poverty()\n #\n fuel_poverty = fuel_poverty.rename(\n columns={\n \"Area Codes\": \"code\",\n \"Area name\": \"region_1\",\n \"Unnamed: 2\": \"region_2\",\n \"Unnamed: 3\": \"region_3\",\n \"Number of households1\": \"total_households\",\n \"Number of households in fuel poverty1\": \"fp_households\",\n \"Proportion of households fuel poor (%)\": \"fp_proportion\",\n }\n )\n #\n # Remove trailing spaces and fix capitalisation in region columns\n fuel_poverty[\"region_1\"] = fuel_poverty[\"region_1\"].apply(strip_and_titlecase)\n fuel_poverty[\"region_2\"] = fuel_poverty[\"region_2\"].apply(strip_and_titlecase)\n fuel_poverty[\"region_3\"] = fuel_poverty[\"region_3\"].apply(strip_and_titlecase)\n #\n # Merge the different 'region' columns into one and apply clean_names -\n # this allows for joining onto data in which local authorities\n # are only referred to by name and not ID\n fuel_poverty[\"clean_name\"] = (\n fuel_poverty[\"region_1\"]\n .fillna(fuel_poverty[\"region_2\"])\n .fillna(fuel_poverty[\"region_3\"])\n .apply(clean_names)\n )\n # Fill in NaN values in region columns so that all region_3 rows\n # have associated region_1 and region_2 data,\n # and all region_2 rows have associated region_1 data.\n # First copy region_1 values into region_2 then forward-fill region_2 -\n # the 'region_1's stop the filling from going too far\n fuel_poverty[\"region_2\"] = (\n fuel_poverty[\"region_2\"].fillna(fuel_poverty[\"region_1\"]).ffill()\n )\n # Set the copied-over values in region_2 back to NaN\n fuel_poverty[\"region_2\"].loc[~fuel_poverty[\"region_1\"].isna()] = np.nan\n # Then forward-fill region_1\n fuel_poverty[\"region_1\"] = fuel_poverty[\"region_1\"].ffill()\n # Filter out all of the region_1 rows - they are not local authorities\n fuel_poverty = fuel_poverty[~fuel_poverty[\"region_2\"].isna()]\n # Additionally remove all Met Counties and Inner/Outer London -\n # these are rows that contain (Met County) or Inner/Outer London in region_2\n # and have NA region_3\n def not_la_condition(string):\n return (\"(Met County)\" in string) | (string in [\"Inner London\", \"Outer London\"])\n\n #\n #\n not_las = [not_la_condition(string) for string in fuel_poverty[\"region_2\"]]\n no_region_3 = list(fuel_poverty.region_3.isna())\n both = [a and b for a, b in zip(not_las, no_region_3)]\n fuel_poverty = fuel_poverty.drop(fuel_poverty[both].index)\n #\n # Append rows for Greater London Authority and\n # Greater Manchester Combined Authority -\n # these are not LAs but some grants went to them\n combined_authorities = pd.DataFrame(\n [\n [\n np.nan,\n \"London\",\n \"Greater London Authority\",\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n \"Greater London Authority\",\n ],\n [\n np.nan,\n \"North West\",\n \"Greater Manchester Combined Authority\",\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n \"Greater Manchester Combined Authority\",\n ],\n ],\n columns=fuel_poverty.columns,\n )\n #\n fuel_poverty = fuel_poverty.append(combined_authorities, ignore_index=True)\n #\n return fuel_poverty", "def aggregate_national_estimates_by_district(self):\n data = {}\n states = Division.objects.filter(level=self.STATE_LEVEL)\n for state in tqdm(states):\n districts = Division.objects.filter(\n level=self.DISTRICT_LEVEL, parent=state\n )\n for district in districts:\n aggregated_labels = []\n estimates = CensusEstimate.objects.filter(division=district)\n for estimate in estimates:\n series = estimate.variable.table.series\n year = estimate.variable.table.year\n table = estimate.variable.table.code\n\n label = None\n if estimate.variable.label:\n label = estimate.variable.label.label\n table_label = \"{}{}\".format(table, label)\n\n code = estimate.variable.code\n if series not in data:\n data[series] = {}\n if year not in data[series]:\n data[series][year] = {}\n if table not in data[series][year]:\n data[series][year][table] = {}\n if state.code not in data[series][year][table]:\n data[series][year][table][state.code] = {}\n if (\n district.code\n not in data[series][year][table][state.code]\n ):\n data[series][year][table][state.code][\n district.code\n ] = {}\n if label is not None:\n if table_label not in aggregated_labels:\n aggregated_labels.append(table_label)\n if (\n len(\n CensusEstimate.objects.filter(\n variable=estimate.variable,\n division=district.id,\n )\n )\n > 0\n ):\n data[series][year][table][state.code][\n district.code\n ][label] = self.aggregate_variable(\n estimate, district.id\n )\n else:\n data[series][year][table][state.code][district.code][\n code\n ] = estimate.estimate\n return data", "def ModelOne(patient):\n\n # import model using pickle de-serializer\n with open('./data/10featint_model400_15b.b', 'rb') as f:\n deployed_model = pickle.load(f)\n\n # import complete dataset\n final_features_raw_wid, final_features_raw, active_all = fns.import_features()\n\n # get normalizing measures\n final_features_raw_array = np.array(final_features_raw_wid.drop(['anon_id'], axis=1))\n final_features_mean = np.mean(final_features_raw_array, axis=0)\n final_features_std = np.std(final_features_raw_array, axis=0)\n\n # get the selected features\n selected_feature_pd = pd.read_csv('./data/10featureimptdf_model400_15b.csv')\n selected_feature_names = list(selected_feature_pd['feature'])\n selected_feature_index = list(selected_feature_pd['index'])\n selected_feature_pd2 = pd.read_csv('./data/10featureimptdf_model400_15b_readable.csv')\n selected_feature_read = list(selected_feature_pd2['feature'])\n\n # get normalized feature set\n final_features_norm = (final_features_raw - final_features_mean) / final_features_std\n\n # merge w. active status\n final_features_raw['status'] = active_all['isactive_interested']\n\n # group by active / drop-off, get means as array\n final_features_group = final_features_raw.groupby(by='status', axis=0)\n final_features_activemean = final_features_group.get_group(1).mean()\n final_features_dropmean = final_features_group.get_group(0).mean()\n final_features_dropmean = final_features_dropmean.drop('status')\n final_features_activemean = final_features_activemean.drop('status')\n activemean_np = np.array(final_features_activemean)\n dropmean_np = np.array(final_features_dropmean)\n\n try:\n # extra safe that check patient is correct format\n patient = int(patient)\n\n # get features for just this patient\n single_patient = final_features_raw_wid[final_features_raw_wid['anon_id'] == patient]\n single_patient_noid = single_patient.drop('anon_id', axis=1)\n test_features = np.array(single_patient_noid)\n # test_feature_norm = (test_features - final_features_mean) / final_features_std\n test_colnames = list(single_patient_noid.columns.values)\n # test_data_norm = pd.DataFrame(test_feature_norm, columns=test_colnames)\n # patientval = np.array(test_feature_norm[0, :])\n\n # get only features included in model\n selected_features = pd.DataFrame()\n selected_dropmean = []\n selected_activemean = []\n for i in selected_feature_names:\n selected_features[i] = single_patient[i]\n selected_patient = np.array(selected_features)\n selected_patient = np.transpose(selected_patient[0, :])\n\n # get means of both groups for features included in model\n for i in selected_feature_index:\n selected_activemean.append(final_features_activemean[i])\n selected_dropmean.append(final_features_dropmean[i])\n selected_activemean_np = np.array(selected_activemean)\n selected_dropmean_np = np.array(selected_dropmean)\n\n # create df to input into function to compare individual to groups\n comparison = pd.DataFrame({'feature': selected_feature_read,\n 'dropoff_mean': selected_dropmean_np.round(2),\n 'patientval': selected_patient.round(2),\n 'active_mean': selected_activemean_np.round(2)})\n\n # compare this patient to the means of both groups\n # dropcloser = 1 if more similar to drop-off group\n comparison['dropcloser'] = comparison.apply(fns.patientdiff_groups, 1)\n\n # select only those features where more similar to drop-off\n compgroup = comparison.groupby(by='dropcloser', axis=0)\n painpoints = compgroup.get_group(1)\n # painpoints = painpoints.sort_values(y='')\n\n # extract status of patient (active/inactive)\n temp = active_all[active_all['anon_id'] == patient]\n temp = temp['isactive_interested']\n temp2 = np.array(temp)\n active_status = temp2[0]\n print(active_status)\n if active_status == 1:\n activity = 'is active'\n else:\n activity = 'is dropped out'\n\n # get probability of drop-off for this patient\n testX = selected_patient\n pred = deployed_model.predict_proba(testX)\n prediction = pred[0][0]\n prediction = prediction.round(3)\n # print(activity)\n\n # determine/report model performance based on actual status\n if activity == 'is dropped out':\n if prediction > .5:\n assessment = 'The model predicted this user correctly'\n elif prediction < .5:\n assessment = 'The model was not correct for this user. No one is perfect!'\n elif prediction == .5:\n assessment = 'This user seems to be on the fence!'\n else:\n assessment = 'Error assessing model accuracy for this user'\n elif activity == 'is active':\n if prediction < .5:\n assessment = 'The model predicted this user correctly'\n elif prediction > .5:\n assessment = 'The model was not correct for this user. No one is perfect!'\n elif prediction == .5:\n assessment = 'This user seems to be on the fence!'\n else:\n assessment = 'Error comparing model prediction and activity status for this patient'\n else:\n assessment = 'Error identifying patient activity status'\n\n prediction = prediction * 100\n prediction = prediction.round(3)\n\n except IndexError:\n prediction = 'not calculable'\n activity = 'is nonexistent'\n assessment = 'please try a different patient id. Hint: try one less than 10187!'\n patient = '-'\n active_status = '-'\n painpoints = pd.DataFrame()\n except ValueError:\n prediction = 'not calculable'\n activity = 'is nonexistent'\n assessment = 'please try a different patient id. Hint: try one less than 10187!'\n patient = '-'\n active_status = '-'\n painpoints = pd.DataFrame()\n\n return prediction, activity, assessment, patient, active_status, painpoints", "def extract_prior_acc_conference_data(self, debug):\r\n year = accolade = first_name = last_name = college = None\r\n with open('../conference_data/ACC.txt', 'r') as f:\r\n for line in f:\r\n if self.has_date(line):\r\n year = line.strip()\r\n elif 'Team' in line:\r\n accolade = line.strip()\r\n else:\r\n cleaned_line = re.sub('\\\\.{2,}', ' ', line).strip()\r\n print(cleaned_line)\r\n first_name = cleaned_line.split()[1].strip()\r\n last_name = ' '.join(cleaned_line.split()[2:-1]).strip()\r\n college = cleaned_line.split()[-1].strip()\r\n college = self.__convert_to_full_college_name(college)\r\n\r\n if debug:\r\n self.__print_conference_data_debug_message([year, first_name, last_name, accolade, college],\r\n [year, first_name, last_name, accolade, college])\r\n self.__append_conference_datum(year, first_name, last_name, accolade, college)", "def get_majors(session, bs, college='QNS01', term='1192'):\n values = getParam1(bs, college, icaction='CLASS_SRCH_WRK2_STRM$35$', term=term)\n page = session.post(URL, data=values, headers=headers, proxies=proxies)\n bs = BeautifulSoup(page.text, 'lxml')\n majors_elem = bs.find(id='SSR_CLSRCH_WRK_SUBJECT_SRCH$0').option.find_next_siblings('option')\n majors = []\n for major in majors_elem:\n majors.append(major['value'])\n return majors", "def univMode(L):\n\tdata = Counter(L)\n\t#print data\n\treturn data.most_common(1)[0][0]", "def test_area_studies(self):\n\n credits = [\n APCredit(AP_MUS, 5), # Humanities and Fine Arts - Music\n APCredit(AP_PSY, 5), # Social Sciences - Psychology\n ]\n\n college = init_college(WARREN_NAME)\n college.apply_credits(credits)\n\n # Unit calculation:\n # 4 units for Social Sciences\n # 4 units for Humanities and Fine Arts\n # = 8 units\n\n self.assertEqual(4, college.area_study_hum_credited_units)\n self.assertEqual(4, college.area_study_soc_credited_units)\n\n # Verify the SubRequirement names\n hum_condition = (WARREN_MUS_AS_NAME == college.area_study_hum.name)\n soc_condition = (WARREN_PSY_AS_NAME == college.area_study_soc.name)\n self.assertTrue(hum_condition and soc_condition)", "def confidence_criteria(self):\n if self.confidence_threshold == 0:\n print(\"Confidence threshold is not defined.\")\n return\n df = self.__parse_detailed_metrics(['mape_m','mape_h'], self.evaluation_metrics)\n df = (df.groupby(['account_banner','original_product_dimension_25','original_product_dimension_26'])\n .median().reset_index())\n \n df = (df.groupby(['account_banner','original_product_dimension_25','original_product_dimension_26'])\n .median()\n .reset_index())\n\n df.columns = ['Account','Category','Brand','MAPE Model','MAPE Human']\n df['MAPE Model'] = df['MAPE Model'].astype(int)\n df['MAPE Human'] = df['MAPE Human'].astype(int)\n \n report_name = os.path.join(self.model.output_dir, self.model.model_name + '_confidence_criteria.xlsx')\n sort_columns = ['MAPE Model']\n \n with pd.ExcelWriter('outputs/' + self.model.model_name + '_confidence_criteria.xlsx') as writer:\n for account in df['Account'].unique():\n df_p = df[df['Account']==account].sort_values(sort_columns).reset_index(drop=True)\n df_p.to_excel(writer, sheet_name=account)\n \n print(\"Model Confidence Criteria report is saved to {}\".format(report_name))", "def findPduModel():\n try:\n pduModel = self.actorConfig['lamps']['pduModel']\n except KeyError:\n raise RuntimeError(f'lamps pdu model is not properly described')\n\n if pduModel not in ['aten', 'digitalLoggers']:\n raise ValueError(f'unknown pduModel : {pduModel}')\n\n return pduModel", "def guess_counties(self):\n city = self.load_city_metadata()\n counties = TIGER().load_county_boundaries()\n precincts = self.load_preprocessed_shapefile()\n\n # speed things up\n city = city.to_crs(counties.crs)\n precincts = precincts.to_crs(counties.crs)\n\n shape1 = city.geometry.iloc[0]\n shape2 = precincts.unary_union\n union = shape1.union(shape2)\n\n counties = counties[counties.intersects(union)]\n\n # set up equal area projection\n proj = util.crs.equal_area_from_geodf(city)\n city = city.to_crs(proj)\n counties = counties.to_crs(proj)\n precincts = precincts.to_crs(proj)\n\n # calculate union of city and precincts\n shape1 = city.geometry.iloc[0]\n shape2 = precincts.unary_union\n union = shape1.union(shape2)\n\n # determine counties with plausible intersection\n tol = precincts.area.min() * 1e-6\n counties = counties.set_index('COUNTYFP')\n intersection = counties.intersection(union).area\n intersecting = intersection[intersection > tol].index.tolist()\n\n self.save_guessed_counties(intersecting)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets and cleans data on grants received by LAs.
def get_clean_grants(): grants = get_grants() grants = grants.rename( columns={ "Local authority": "full_name", "GHG LADS 1a": "GHG_1a_individuals", "1a Consortium Leads": "GHG_1a_leads", "1a Consortium bodies": "GHG_1a_bodies", "GHG LADS 1b": "GHG_1b_individuals", "1b Consortium leads": "GHG_1b_leads", "1b Consortium bodies": "GHG_1b_bodies", "Social Housing Decarbonisation Fund - Demonstrator ": "SHDDF", "Total": "total_grants", } ) # # Some regions appear twice in the grants data duplicate_strings = ["Greenwich", "Lewisham", "Redbridge"] regex_exp = "|".join(duplicate_strings) clean_grants = grants[~grants["full_name"].str.contains(regex_exp, regex=True)] # for string in duplicate_strings: duplicate_df = grants[grants["full_name"].str.contains(string)] replacement_row = duplicate_df.iloc[0] + duplicate_df.iloc[1] replacement_row["full_name"] = string clean_grants = clean_grants.append(replacement_row, ignore_index=True) # # Babergh and Mid Suffolk are shown in one row in the grants data, # but they are actually two different LAs - the stated grants # apply to both individually babergh_ms = clean_grants[ [("Babergh and Mid Suffolk" in name) for name in clean_grants["full_name"]] ] babergh = babergh_ms.copy() babergh["full_name"] = "Babergh" ms = babergh_ms.copy() ms["full_name"] = "Mid Suffolk" clean_grants = ( clean_grants[ [ ("Babergh and Mid Suffolk" not in name) for name in clean_grants["full_name"] ] ] .append(babergh) .append(ms) .reset_index(drop=True) ) # # As before, apply clean_names in order to join data clean_grants["clean_name"] = clean_grants["full_name"].apply(clean_names) clean_grants = clean_grants.drop(columns="full_name") # return clean_grants
[ "def _downgrade_access_data():\n conn = op.get_bind()\n\n res = conn.execute(sa.text('SELECT COUNT(*) FROM events.menu_entry_principals WHERE type != :regforms'),\n regforms=_PrincipalType.registration_form)\n if res.fetchone()[0]:\n raise Exception('Cannot downgrade; some menu items contain complex ACLs and would become unprotected')\n\n # The server_default is set to give everyone access to all menu entries. We need to restrict\n # those appropriately now. Start with locking down for speakers, and then open up to registered\n # participants if applicable\n\n # Any menu entry that is accessible to speakers should only be open to speakers. If it is also\n # open to registrants, we'll expand this later on.\n conn.execute(\n sa.text('''\n UPDATE events.menu_entries\n SET access = :speakers\n WHERE speakers_can_access AND protection_mode = :protected\n '''),\n speakers=_MenuEntryAccess.speakers.value,\n protected=_ProtectionMode.protected.value\n )\n\n # Inheriting menu entries with the parent accessible to speakers should be only open to\n # speakers. Again expanding to others later on.\n conn.execute(\n sa.text('''\n UPDATE events.menu_entries\n SET access = :speakers\n WHERE menu_entries.id IN (\n SELECT me.id\n FROM events.menu_entries me\n LEFT OUTER JOIN events.menu_entries parent_me ON (me.parent_id = parent_me.id)\n WHERE\n me.type IN (:user_link, :page) AND\n me.parent_id IS NOT NULL AND\n me.protection_mode = :inheriting AND\n parent_me.protection_mode = :protected AND\n parent_me.speakers_can_access\n )\n '''),\n speakers=_MenuEntryAccess.speakers.value,\n user_link=_MenuEntryType.user_link.value,\n page=_MenuEntryType.page.value,\n inheriting=_ProtectionMode.inheriting.value,\n protected=_ProtectionMode.protected.value,\n )\n\n # Any menu entry that has a registration attached to it should be registered participants only,\n # provided its protection mode is set to protected\n conn.execute(\n sa.text('''\n UPDATE events.menu_entries\n SET access = :registered\n WHERE\n protection_mode = :protected AND\n menu_entries.id IN (\n SELECT DISTINCT menu_entry_id\n FROM events.menu_entry_principals\n WHERE type = :regform\n )\n '''),\n registered=_MenuEntryAccess.registered_participants.value,\n regform=_PrincipalType.registration_form.value,\n protected=_ProtectionMode.protected.value,\n )\n\n # Menu entries that are inheriting with their parent set to protected and containing\n # registrations get the same treatment\n conn.execute(\n sa.text('''\n UPDATE events.menu_entries\n SET access = :registered\n WHERE id IN (\n SELECT me.id\n FROM events.menu_entries me\n LEFT OUTER JOIN events.menu_entries parent_me ON me.parent_id = parent_me.id\n RIGHT OUTER JOIN events.menu_entry_principals mep ON mep.menu_entry_id = parent_me.id\n WHERE\n me.type IN (:user_link, :page) AND\n me.parent_id IS NOT NULL AND\n me.protection_mode = :inheriting AND\n parent_me.protection_mode = :protected AND\n mep.type = :regform\n )\n '''),\n registered=_MenuEntryAccess.registered_participants.value,\n user_link=_MenuEntryType.user_link.value,\n page=_MenuEntryType.page.value,\n inheriting=_ProtectionMode.inheriting.value,\n protected=_ProtectionMode.protected.value,\n regform=_PrincipalType.registration_form.value,\n )", "def grants(self):\n from linode.objects.account import UserGrants\n resp = self._client.get('/profile/grants') # use special endpoint for restricted users\n\n grants = UserGrants(self._client, self.username)\n grants._populate(resp)\n return grants", "def grant_read_write_data(\n self, grantee: aws_cdk.aws_iam.IGrantable\n ) -> aws_cdk.aws_iam.Grant:\n ...", "def clean_data_buffer():\n global usr_occ_list, usr_name_list, usr_url_list\n usr_name_list = []\n usr_occ_list = []\n usr_url_list = []", "def parse_found_acl(self, bucket):\r\n if bucket.foundACL is None:\r\n return\r\n\r\n if 'Grants' in bucket.foundACL:\r\n for grant in bucket.foundACL['Grants']:\r\n if grant['Grantee']['Type'] == 'Group':\r\n if 'URI' in grant['Grantee'] and grant['Grantee']['URI'] == 'http://acs.amazonaws.com/groups/global/AuthenticatedUsers':\r\n # Permissions have been given to the AuthUsers group\r\n if grant['Permission'] == 'FULL_CONTROL':\r\n bucket.AuthUsersRead = Permission.ALLOWED\r\n bucket.AuthUsersWrite = Permission.ALLOWED\r\n bucket.AuthUsersReadACP = Permission.ALLOWED\r\n bucket.AuthUsersWriteACP = Permission.ALLOWED\r\n bucket.AuthUsersFullControl = Permission.ALLOWED\r\n elif grant['Permission'] == 'READ':\r\n bucket.AuthUsersRead = Permission.ALLOWED\r\n elif grant['Permission'] == 'READ_ACP':\r\n bucket.AuthUsersReadACP = Permission.ALLOWED\r\n elif grant['Permission'] == 'WRITE':\r\n bucket.AuthUsersWrite = Permission.ALLOWED\r\n elif grant['Permission'] == 'WRITE_ACP':\r\n bucket.AuthUsersWriteACP = Permission.ALLOWED\r\n\r\n elif 'URI' in grant['Grantee'] and grant['Grantee']['URI'] == 'http://acs.amazonaws.com/groups/global/AllUsers':\r\n # Permissions have been given to the AllUsers group\r\n if grant['Permission'] == 'FULL_CONTROL':\r\n bucket.AllUsersRead = Permission.ALLOWED\r\n bucket.AllUsersWrite = Permission.ALLOWED\r\n bucket.AllUsersReadACP = Permission.ALLOWED\r\n bucket.AllUsersWriteACP = Permission.ALLOWED\r\n bucket.AllUsersFullControl = Permission.ALLOWED\r\n elif grant['Permission'] == 'READ':\r\n bucket.AllUsersRead = Permission.ALLOWED\r\n elif grant['Permission'] == 'READ_ACP':\r\n bucket.AllUsersReadACP = Permission.ALLOWED\r\n elif grant['Permission'] == 'WRITE':\r\n bucket.AllUsersWrite = Permission.ALLOWED\r\n elif grant['Permission'] == 'WRITE_ACP':\r\n bucket.AllUsersWriteACP = Permission.ALLOWED\r\n\r\n # All permissions not explicitly granted in the ACL are denied\r\n # TODO: Simplify this\r\n if bucket.AuthUsersRead == Permission.UNKNOWN:\r\n bucket.AuthUsersRead = Permission.DENIED\r\n\r\n if bucket.AuthUsersWrite == Permission.UNKNOWN:\r\n bucket.AuthUsersWrite = Permission.DENIED\r\n\r\n if bucket.AuthUsersReadACP == Permission.UNKNOWN:\r\n bucket.AuthUsersReadACP = Permission.DENIED\r\n\r\n if bucket.AuthUsersWriteACP == Permission.UNKNOWN:\r\n bucket.AuthUsersWriteACP = Permission.DENIED\r\n\r\n if bucket.AuthUsersFullControl == Permission.UNKNOWN:\r\n bucket.AuthUsersFullControl = Permission.DENIED\r\n\r\n if bucket.AllUsersRead == Permission.UNKNOWN:\r\n bucket.AllUsersRead = Permission.DENIED\r\n\r\n if bucket.AllUsersWrite == Permission.UNKNOWN:\r\n bucket.AllUsersWrite = Permission.DENIED\r\n\r\n if bucket.AllUsersReadACP == Permission.UNKNOWN:\r\n bucket.AllUsersReadACP = Permission.DENIED\r\n\r\n if bucket.AllUsersWriteACP == Permission.UNKNOWN:\r\n bucket.AllUsersWriteACP = Permission.DENIED\r\n\r\n if bucket.AllUsersFullControl == Permission.UNKNOWN:\r\n bucket.AllUsersFullControl = Permission.DENIED", "def test_get_all_account_permission_using_get(self):\n pass", "def filterGrantsForFreqRange(grants, low_freq, high_freq):\n chan_type = findDpaType(low_freq, high_freq)\n if chan_type == DpaType.OUT_OF_BAND:\n # All grants affect COCHANNEL, including those higher than 3650MHz.\n return grants\n return [g for g in grants\n if (min(g.high_frequency, high_freq) - max(g.low_frequency, low_freq)) > 0]", "def parse_grants(grants):\n return concat([grant.cidr_ip for grant in grants\n if grant.cidr_ip is not None],\n [grant.name for grant in grants\n if grant.name is not None])", "def _revoke_range_users_app_token_grant(self, start_fhir_id, count, app_name):\n for i in range(0, count):\n fhir_id = start_fhir_id + str(i)\n cw = Crosswalk.objects.get(_fhir_id=fhir_id)\n app = Application.objects.get(name=app_name)\n remove_application_user_pair_tokens_data_access(app, cw.user)", "def get(self):\n layered_cache.DeleteAllExpiredEntities()", "def find_users_without_mfa(ctx):", "def test_get_user_effective_rights(self):\n pass", "def get_all_consent_uncached(self, user):\n raise NotImplementedError", "def test_get_account_allocation_mapping_all_using_get(self):\n pass", "def get_all_consent_uncached(self, user):\n stored_consent = self._get_stored_consent_for_user(user)\n result = {}\n\n if stored_consent:\n for key, value in stored_consent.consent_grants.items():\n if value:\n result[key] = Consent.GRANTED\n else:\n result[key] = Consent.DENIED\n\n return result", "def get_all_access_for_reader(email):\n\n\tapprover_email = get_jwt_identity()\n\tapprover = Approver.query.filter_by(email=approver_email).first()\n\tif not approver:\n\t\treturn bad_request(\"This user does not have the approver role!\")\n\n\t# if the user is an admin display all rooms regardless\n\tadmin = Admin.query.filter_by(approver_id=approver.id).first()\n\tif admin:\n\t\treturn get_all_access_helper(email)\n\n\t# display all rooms that the approver has responsibility over\n\tapprover_rooms = get_responsibilites_helper(approver)\n\treturn get_all_access_helper(email, approver_rooms)", "def unauth_read(self):\n raise Unauthorized(f\"can't view {self.resource['resourceType']}\")", "def process_allo(param, permit_use):\n run_time_start = pd.Timestamp.today().strftime('%Y-%m-%d %H:%M:%S')\n print(run_time_start)\n\n #######################################\n ### Read in source data and update accela tables in ConsentsReporting db\n print('--Reading in source data...')\n\n ## Make object to contain the source data\n db = types.SimpleNamespace()\n\n for t in param['misc']['AllocationProcessing']['tables']:\n p = param['source data'][t]\n print(p['table'])\n if p['schema'] != 'public':\n stmt = 'select {cols} from \"{schema}\".\"{table}\"'.format(schema=p['schema'], table=p['table'], cols=json.dumps(p['col_names'])[1:-1])\n else:\n stmt = 'select {cols} from \"{table}\"'.format(table=p['table'], cols=json.dumps(p['col_names'])[1:-1])\n setattr(db, t, sf.read_table(p['username'], p['password'], p['account'], p['database'], p['schema'], stmt))\n\n ##################################################\n ### Sites\n print('--Process Waps')\n\n ## takes\n wap_allo1 = db.wap_allo.copy()\n wap1 = wap_allo1['Wap'].unique()\n waps = wap1[~pd.isnull(wap1)].copy()\n\n ## Check that all Waps exist in the USM sites table\n usm_waps1 = db.waps[db.waps.isin(waps)].copy()\n # usm_waps1[['NzTmX', 'NzTmY']] = usm_waps1[['NzTmX', 'NzTmY']].astype(int)\n\n if len(wap1) != len(usm_waps1):\n miss_waps = set(wap1).difference(set(usm_waps1.Wap))\n print('Missing {} Waps in USM'.format(len(miss_waps)))\n wap_allo1 = wap_allo1[~wap_allo1.Wap.isin(miss_waps)].copy()\n\n\n ##################################################\n ### Permit table\n print('--Process Permits')\n\n '''\n WILCO:\n Selection FromDate and toDate was a bit of a pain in the ass i remember for the Rakaia as well. I don't think there is any filtering done here below yet, but maybe it is\n good to consider that:\n 1) Some consents may have never been active between the FromDate and ToDate. The 'Given Effect To' field can help with that. If the given effect to is larger than the\n toDate, then that consent was never exercised and (at least for modelling purposes) should be dropped from the list of consents.\n 2) If the Given Effect To date is larger than the fromDate, then set FromDate equal to Given Effect To.\n 3) For parent and child consents (orginal and renewals) it is good to check the FromDate and ToDate. In the Ecan database the FromDate of the renewal is most of the time\n equal to the ToDate of the parent (original record), which would lead to double accounting for that day. For the Rakaia I fixed this by making sure that sure that\n the toDate is always 1 day before the frommDate of the child consent.\n\n Below I have inserted some (commented) code that I used in my Rakaia work, so not sure whether you want to use this yes/no.\n\n '''\n\n# #-Select consents that were active between sdate and edate\n# print 'Filter consents that were active between %s and %s...' %(sdate.strftime('%d-%m-%Y'), edate.strftime('%d-%m-%Y'))\n# df1 = df.loc[(df['toDate']>pd.Timestamp(sdate)) & (df['fmDate']<=pd.Timestamp(edate))]\n# #-If 'Given Effect To' date is later than 'toDate', then consent was never active in between the fmDate-toDate period, and is therefore removed from the dataframe\n# df1.loc[(df1['Given Effect To'] > df1['toDate']),:]=np.nan\n# df2 = df1.dropna(how='all')\n# #-If 'Given Effect To' date is later than 'fmDate', then the 'fmDate' field is set to 'Given Effect To'\n# df2.loc[(df2['fmDate'] < df2['Given Effect To']),['fmDate']]= df2['Given Effect To']\n#\n# #-Unique consent numbers of 'OriginalRecord'\n# ori_records = pd.unique(df2['OriginalRecord'])\n# df2_columns = list(df2.columns)\n# fmDate_index = df2_columns.index('fmDate')\n# toDate_index = df2_columns.index('toDate')\n# #-Make sure toDate is always 1 day before the fmDate of the child consent. Required to make sure that a consent isn't active twice on one day\n# for c in ori_records:\n# #-select the consents that belong to the same group (have same parent so to speak)\n# df_short = df2.loc[df2['OriginalRecord']==c]\n# for i in range(0,len(df_short)-1):\n# toDate = df_short.iloc[i,toDate_index] #-toDate of current record\n# fmDate = df_short.iloc[i+1,fmDate_index] #-fromDate of child record\n# if toDate == fmDate: #-cannot be equal. If so, then decrease the todate of the current record with one day\n# df_short.iloc[i, toDate_index] = toDate - dt.timedelta(days=1)\n# df2.loc[df2['OriginalRecord']==c] = df_short\n# #-get rid of old dataframes\n# df = df2.copy()\n# df1 = None; df2 = None; del df1, df2\n#\n# #-For consents that are active for one day, the toDate may now (because of extracting one day from toDate) be smaller than fmDate. Those records are removed\n# df = df.loc[df['toDate']>=df['fmDate']]\n\n ## Clean data\n permits2 = db.permit.copy()\n permits2['FromDate'] = pd.to_datetime(permits2['FromDate'], infer_datetime_format=True, errors='coerce')\n permits2['ToDate'] = pd.to_datetime(permits2['ToDate'], infer_datetime_format=True, errors='coerce')\n\n ## Filter data\n permits2 = permits2[permits2.ConsentStatus.notnull() & permits2.RecordNumber.notnull()].copy()\n # permits2 = permits2[(permits2['FromDate'] > '1950-01-01') & (permits2['ToDate'] > '1950-01-01') & (permits2['ToDate'] > permits2['FromDate']) & permits2.NzTmX.notnull() & permits2.NzTmY.notnull() & permits2.ConsentStatus.notnull() & permits2.RecordNumber.notnull() & permits2['EcanID'].notnull()].copy()\n\n ## Convert datetimes to date\n permits2.loc[permits2['FromDate'].isnull(), 'FromDate'] = pd.Timestamp('1900-01-01')\n permits2.loc[permits2['ToDate'].isnull(), 'ToDate'] = pd.Timestamp('1900-01-01')\n\n ##################################################\n ### Parent-Child\n print('--Process Parent-child table')\n\n ## Clean data\n pc1 = db.parent_child.copy()\n\n ## Filter data\n pc1 = pc1.drop_duplicates()\n pc1 = pc1[pc1['ParentRecordNumber'].notnull() & pc1['ChildRecordNumber'].notnull()]\n\n ## Check foreign keys --> what are foreign keys?\n crc1 = permits2.RecordNumber.unique()\n pc0 = pc1[pc1.ParentRecordNumber.isin(crc1) & pc1.ChildRecordNumber.isin(crc1)].copy()\n\n #################################################\n ### AllocatedRatesVolumes\n print('--Process Allocation data')\n\n ## Rates\n # Clean data\n wa1 = wap_allo1.copy()\n\n # Check foreign keys\n wa4 = wa1[wa1.RecordNumber.isin(crc1)].copy()\n\n # Find the missing Waps per consent\n crc_wap_mis1 = wa4.loc[wa4.Wap.isnull(), 'RecordNumber'].unique()\n crc_wap4 = wa4[['RecordNumber', 'Wap']].drop_duplicates()\n\n for i in crc_wap_mis1:\n crc2 = pc0[np.in1d(pc0.ParentRecordNumber, i)].ChildRecordNumber.values\n wap1 = []\n while (len(crc2) > 0) & (len(wap1) == 0):\n wap1 = crc_wap4.loc[np.in1d(crc_wap4.RecordNumber, crc2), 'Wap'].values\n crc2 = pc0[np.in1d(pc0.ParentRecordNumber, crc2)].ChildRecordNumber.values\n if len(wap1) > 0:\n wa4.loc[wa4.RecordNumber == i, 'Wap'] = wap1[0]\n\n wa4 = wa4[wa4.Wap.notnull()].copy()\n\n ## Distribute the months\n # Since the tables in accela have no explicit primary/composite keys, it is possible that the eventual composite key 'RecordNumber', 'TakeType', 'SwAllocationBlock', 'Wap' does not fully caapture the Accela data set. It is possible that the rates also change by month. This occurs in less than 100 consents ever, so the simplification seems justified. The below code splits the consents out by each month that the consent is allowed to be active by the appropriate rates and volumes listed in the Accela table. Then the mean is taken over all months to ensure that there is only one value for 'RecordNumber', 'TakeType', 'SwAllocationBlock', 'Wap'.\n\n cols1 = wa4.columns.tolist()\n from_mon_pos = cols1.index('FromMonth')\n to_mon_pos = cols1.index('ToMonth')\n\n allo_rates_list = []\n for val in wa4.itertuples(False, None):\n from_month = int(val[from_mon_pos])\n to_month = int(val[to_mon_pos])\n if from_month > to_month:\n mons = list(range(1, to_month + 1))\n else:\n mons = range(from_month, to_month + 1)\n d1 = [val + (i,) for i in mons]\n allo_rates_list.extend(d1)\n col_names1 = wa4.columns.tolist()\n col_names1.extend(['Month'])\n wa5 = pd.DataFrame(allo_rates_list, columns=col_names1).drop(['FromMonth', 'ToMonth'], axis=1)\n\n # Mean of all months\n grp1 = wa5.groupby(['RecordNumber', 'TakeType', 'SwAllocationBlock', 'Wap'])\n mean1 = grp1[['WapRate', 'AllocatedRate', 'VolumeDaily', 'VolumeWeekly', 'Volume150Day']].mean().round(2)\n include1 = grp1['IncludeInSwAllocation'].first()\n mon_min = grp1['Month'].min()\n mon_min.name = 'FromMonth'\n mon_max = grp1['Month'].max()\n mon_max.name = 'ToMonth'\n wa6 = pd.concat([mean1, mon_min, mon_max, include1], axis=1).reset_index()\n # wa6['HydroGroup'] = 'Surface Water'\n\n ## Rename allocation blocks !!!!!! Need to be changed later!!!!\n # av1.rename(columns={'GwAllocationBlock': 'AllocationBlock'}, inplace=True)\n # wa6.rename(columns={'SwAllocationBlock': 'AllocationBlock'}, inplace=True)\n\n # wa6.replace({'SwAllocationBlock': {'In Waitaki': 'A'}}, inplace=True)\n\n ## Combine volumes with rates !!! Needs to be changed later!!!\n # wa7 = pd.merge(av1, wa6, on=['RecordNumber', 'TakeType'])\n\n ## Add in stream depletion\n waps = db.waps.copy()\n wa7 = pd.merge(wa6, waps, on='Wap').drop(['SD1_30Day'], axis=1)\n\n # wa9['SD1_7Day'] = pd.to_numeric(wa9['SD1_7Day'], errors='coerce').round(0)\n # wa9['SD1_150Day'] = pd.to_numeric(wa9['SD1_150Day'], errors='coerce').round(0)\n\n ## Add in the lowflow bool\n wa8 = pd.merge(wa7, db.consented_takes, on=['RecordNumber', 'TakeType'], how='left')\n wa8.loc[wa8.LowflowCondition.isnull(), 'LowflowCondition'] = False\n\n ## Distribute the rates according to the stream depletion requirements\n ## According to the LWRP Schedule 9!\n\n allo_rates1 = wa8.drop_duplicates(['RecordNumber', 'SwAllocationBlock', 'Wap']).set_index(['RecordNumber', 'SwAllocationBlock', 'Wap']).copy()\n\n # Convert daily, 7-day, and 150-day volumes to rates in l/s\n allo_rates1['RateDaily'] = (allo_rates1['VolumeDaily'] / 24 / 60 / 60) * 1000\n allo_rates1['RateWeekly'] = (allo_rates1['VolumeWeekly'] / 7 / 24 / 60 / 60) * 1000\n allo_rates1['Rate150Day'] = (allo_rates1['Volume150Day'] / 150 / 24 / 60 / 60) * 1000\n\n # SD categories - According to the LWRP! Schedule 9.\n rate_bool = (allo_rates1['Rate150Day'] * (allo_rates1['SD1_150Day'] * 0.01)) > 5\n\n allo_rates1['sd_cat'] = 'low'\n allo_rates1.loc[(rate_bool | (allo_rates1['SD1_150Day'] >= 40)), 'sd_cat'] = 'moderate'\n allo_rates1.loc[(allo_rates1['SD1_150Day'] >= 60), 'sd_cat'] = 'high'\n allo_rates1.loc[(allo_rates1['SD1_7Day'] >= 90), 'sd_cat'] = 'direct'\n allo_rates1.loc[(allo_rates1['TakeType'] == 'Take Surface Water'), 'sd_cat'] = 'direct'\n\n # Assign volume ratios\n allo_rates1['sw_vol_ratio'] = 1\n allo_rates1.loc[allo_rates1.sd_cat == 'low', 'sw_vol_ratio'] = 0\n allo_rates1.loc[allo_rates1.sd_cat == 'moderate', 'sw_vol_ratio'] = 0.5\n allo_rates1.loc[allo_rates1.sd_cat == 'high', 'sw_vol_ratio'] = 0.75\n allo_rates1.loc[allo_rates1.sd_cat == 'direct', 'sw_vol_ratio'] = 1\n\n allo_rates1 = allo_rates1[allo_rates1['IncludeInSwAllocation'] | (allo_rates1['TakeType'] == 'Take Groundwater')].copy()\n\n ## Assign Rates\n rates1 = allo_rates1.copy()\n\n gw_bool = rates1['TakeType'] == 'Take Groundwater'\n sw_bool = rates1['TakeType'] == 'Take Surface Water'\n\n low_bool = rates1.sd_cat == 'low'\n mod_bool = rates1.sd_cat == 'moderate'\n high_bool = rates1.sd_cat == 'high'\n direct_bool = rates1.sd_cat == 'direct'\n\n lf_cond_bool = rates1.LowflowCondition\n\n rates1['Surface Water'] = 0\n rates1['Groundwater'] = 0\n\n rates1.loc[gw_bool, 'Groundwater'] = rates1.loc[gw_bool, 'Rate150Day']\n rates1.loc[mod_bool | high_bool, 'Surface Water'] = rates1.loc[mod_bool | high_bool, 'Rate150Day'] * (rates1.loc[mod_bool | high_bool, 'SD1_150Day'] * 0.01)\n\n # The below boolean query is directly related to Schedule 9 and the consented allocation document by Matt Smith and Don\n alt_bool = gw_bool & (((rates1.Storativity | lf_cond_bool) & (mod_bool | high_bool)) | rates1.Combined)\n rates1.loc[alt_bool, 'Groundwater'] = rates1.loc[alt_bool, 'Rate150Day'] - rates1.loc[alt_bool, 'Surface Water']\n\n rates1.loc[direct_bool & gw_bool, 'Surface Water'] = rates1.loc[direct_bool & gw_bool, 'RateDaily']\n rates1.loc[(direct_bool & gw_bool) & (rates1.Storativity | lf_cond_bool), 'Groundwater'] = 0\n\n rates1.loc[sw_bool, 'Surface Water'] = rates1.loc[sw_bool, 'AllocatedRate']\n\n rates2 = rates1[['Groundwater', 'Surface Water']].stack().reset_index()\n rates2.rename(columns={'level_3': 'HydroGroup', 0: 'AllocatedRate'}, inplace=True)\n rates2 = pd.merge(rates2, rates1.reset_index()[['RecordNumber', 'SwAllocationBlock', 'Wap', 'FromMonth', 'ToMonth']], on=['RecordNumber', 'SwAllocationBlock', 'Wap'])\n# rates2.rename(columns={'SwAllocationBlock': 'AllocationBlock'}, inplace=True)\n# rates3 = rates2.drop_duplicates(['RecordNumber', 'HydroGroup', 'SwAllocationBlock', 'Wap']).set_index(['RecordNumber', 'HydroGroup', 'SwAllocationBlock', 'Wap'])\n rates3 = rates2.drop_duplicates(['RecordNumber', 'HydroGroup', 'SwAllocationBlock', 'Wap'])\n\n ## Allocated Volume\n av1 = db.allocated_volume.copy()\n # av1.replace({'GwAllocationBlock': {'In Waitaki': 'A'}}, inplace=True)\n\n # Add in the Wap info\n ar1 = allo_rates1.reset_index()[['RecordNumber', 'SwAllocationBlock', 'TakeType', 'Wap', 'Rate150Day', 'Storativity', 'Combined', 'sd_cat', 'sw_vol_ratio', 'LowflowCondition']].copy()\n ar2_grp = ar1.groupby(['RecordNumber', 'TakeType', 'Wap'])\n ar2_rates = ar2_grp[['Rate150Day']].sum()\n ar2_others = ar2_grp[['Storativity', 'Combined', 'sd_cat', 'sw_vol_ratio', 'LowflowCondition']].first()\n ar3 = pd.concat([ar2_rates, ar2_others], axis=1).reset_index()\n# ar3['WapCount'] = ar3.groupby(['RecordNumber', 'TakeType'])['Wap'].transform('count')\n\n vols1 = pd.merge(av1, ar3, on=['RecordNumber', 'TakeType'])\n# vols1.groupby(['RecordNumber', 'TakeType', 'Wap'])['GwAllocationBlock'].count()\n\n grp3 = vols1.groupby(['RecordNumber', 'TakeType', 'GwAllocationBlock'])\n vols1['Rate150DayAgg'] = grp3['Rate150Day'].transform('sum')\n vols1['ratio'] = vols1['Rate150Day'] / vols1['Rate150DayAgg']\n vols1.loc[vols1['ratio'].isnull(), 'ratio'] = 0\n vols1['FullAnnualVolume'] = (vols1['FullAnnualVolume'] * vols1['ratio'])\n\n vols1.drop(['Rate150DayAgg', 'ratio'], axis=1, inplace=True)\n# vols1['FullAnnualVolume'] = (vols1['FullAnnualVolume'] * vols1['ratio'] / vols1['WapCount']).round()\n# vols1.drop(['WapRateAgg', 'ratio', 'WapCount'], axis=1, inplace=True)\n\n # Assign volumes with discount exception\n # vols1 = allo_rates1.copy()\n vols1['Surface Water'] = vols1['FullAnnualVolume'] * vols1['sw_vol_ratio']\n vols1['Groundwater'] = vols1['FullAnnualVolume']\n vols1.loc[vols1.TakeType == 'Take Surface Water', 'Groundwater'] = 0\n# vols1.loc[(vols1.TakeType == 'Take Surface Water') & (vols1['Surface Water'] == 0), 'Surface Water'] = np.nan\n\n# discount_bool = ((vols1.sd_cat == 'moderate') & (vols1.Storativity)) | ((vols1.sd_cat == 'moderate') & vols1.Combined) | (vols1.sd_cat == 'high') | (vols1.sd_cat == 'direct')\n discount_bool = ((vols1.Storativity | vols1.LowflowCondition) & ((vols1.sd_cat == 'moderate') | (vols1.sd_cat == 'high') | (vols1.sd_cat == 'direct'))) | vols1.Combined\n\n vols1.loc[discount_bool, 'Groundwater'] = vols1.loc[discount_bool, 'FullAnnualVolume'] - vols1.loc[discount_bool, 'Surface Water']\n\n # Split the take types by SW and GW to assign the appropraite allocation block type - Put more info about why this has to happen!\n sw_vols1 = vols1[vols1.TakeType == 'Take Surface Water'].copy()\n gw_vols1 = vols1[vols1.TakeType == 'Take Groundwater'].copy()\n\n sw_vols1.rename(columns={'GwAllocationBlock': 'SwAllocationBlock'}, inplace=True)\n\n gw_vols2 = gw_vols1.set_index(['RecordNumber', 'GwAllocationBlock', 'Wap'])[['Groundwater', 'Surface Water']].stack().reset_index()\n gw_vols2.rename(columns={'level_3': 'HydroGroup', 0: 'AllocatedAnnualVolume'}, inplace=True)\n gw_vols3 = gw_vols2.drop_duplicates(['RecordNumber', 'HydroGroup', 'GwAllocationBlock', 'Wap'])\n\n sw_vols2 = sw_vols1.set_index(['RecordNumber', 'SwAllocationBlock', 'Wap'])[['Groundwater', 'Surface Water']].stack().reset_index()\n sw_vols2.rename(columns={'level_3': 'HydroGroup', 0: 'AllocatedAnnualVolume'}, inplace=True)\n sw_vols3 = sw_vols2.drop_duplicates(['RecordNumber', 'HydroGroup', 'SwAllocationBlock', 'Wap'])\n\n ## Join SW rates to SW volumes\n rv0 = pd.merge(rates3, sw_vols3, on=['RecordNumber', 'SwAllocationBlock', 'HydroGroup', 'Wap'])\n rv0.rename(columns={'SwAllocationBlock': 'AllocationBlock'}, inplace=True)\n\n ## Join GW rates and GW volumes\n rv1 = pd.merge(rates3, gw_vols3, on=['RecordNumber', 'HydroGroup', 'Wap'])\n\n # Fix duplicates\n rv1['Count'] = rv1.groupby(['RecordNumber', 'HydroGroup', 'SwAllocationBlock', 'Wap'])['AllocatedRate'].transform('count')\n rv1['AllocatedRate'] = rv1['AllocatedRate'] / rv1['Count']\n\n rv_grp = rv1.groupby(['RecordNumber', 'HydroGroup', 'GwAllocationBlock', 'Wap'])\n rv1['Count'] = rv_grp['AllocatedRate'].transform('count')\n rv1['AllocatedAnnualVolume'] = rv1['AllocatedAnnualVolume'] / rv1['Count']\n\n # Distribute volumes according to rates\n rv1['rate_ratio'] = rv1['AllocatedRate'] / rv_grp['AllocatedRate'].transform('sum')\n rv1.loc[rv1['rate_ratio'].isnull(), 'rate_ratio'] = 0\n rv1.loc[rv1['rate_ratio'] == np.inf, 'rate_ratio'] = 1\n rv1['vol_sum'] = rv_grp['AllocatedAnnualVolume'].transform('sum')\n rv1['AllocatedAnnualVolume'] = rv1['vol_sum'] * rv1['rate_ratio']\n\n # Specify the Allocation blocks and aggregate\n rv1['AllocationBlock'] = rv1['SwAllocationBlock']\n rv1.loc[rv1.HydroGroup == 'Groundwater', 'AllocationBlock'] = rv1.loc[rv1.HydroGroup == 'Groundwater', 'GwAllocationBlock']\n rv1.drop(['SwAllocationBlock', 'GwAllocationBlock', 'Count', 'rate_ratio', 'vol_sum'], axis=1, inplace=True)\n\n rv1_grp = rv1.groupby(['RecordNumber', 'HydroGroup', 'AllocationBlock', 'Wap'])\n rv1_sum = rv1_grp[['AllocatedRate', 'AllocatedAnnualVolume']].sum()\n rv1_min = rv1_grp[['FromMonth']].min()\n rv1_max = rv1_grp[['ToMonth']].max()\n rv1a = pd.concat([rv1_sum, rv1_min, rv1_max], axis=1).reset_index()\n\n ## Combine the SW and GW data frames\n rv2 = pd.concat([rv0, rv1a])\n\n ## Deal with the \"Include in Allocation\" fields\n sw_allo_bool = allo_rates1.reset_index()[['RecordNumber', 'Wap', 'IncludeInSwAllocation']].drop_duplicates(['RecordNumber', 'Wap'])\n gw_allo_bool = vols1[['RecordNumber', 'Wap', 'IncludeInGwAllocation']].drop_duplicates(['RecordNumber', 'Wap'])\n\n rv2a = pd.merge(rv2, sw_allo_bool, on=['RecordNumber', 'Wap'])\n rv2 = pd.merge(rv2a, gw_allo_bool, on=['RecordNumber', 'Wap'])\n rv3 = rv2[(rv2.HydroGroup == 'Surface Water') | (rv2.IncludeInGwAllocation)]\n rv4 = rv3[(rv3.HydroGroup == 'Groundwater') | (rv3.IncludeInSwAllocation)]\n\n ## Calculate missing volumes and rates\n# ann_bool = rv4.AllocatedAnnualVolume.isnull()\n# rv4.loc[ann_bool, 'AllocatedAnnualVolume'] = (rv4.loc[ann_bool, 'AllocatedRate'] * 0.001*60*60*24*30.42* (rv4.loc[ann_bool, 'ToMonth'] - rv4.loc[ann_bool, 'FromMonth'] + 1)).round()\n#\n# rate_bool = rv4.AllocatedRate.isnull()\n# rv4.loc[rate_bool, 'AllocatedRate'] = np.floor((rv4.loc[rate_bool, 'AllocatedAnnualVolume'] / 60/60/24/30.42/ (rv4.loc[rate_bool, 'ToMonth'] - rv4.loc[rate_bool, 'FromMonth'] + 1) * 1000))\n\n rv4 = rv4[(rv4['AllocatedAnnualVolume'] > 0) | (rv4['AllocatedRate'] > 0)].copy()\n# rv4.loc[rv4['AllocatedAnnualVolume'].isnull(), 'AllocatedAnnualVolume'] = 0\n# rv4.loc[rv4['AllocatedRate'].isnull(), 'AllocatedRate'] = 0\n\n ## Aggregate by crc, allo block, hydrogroup, and wap\n# rv_grp = rv4.groupby(['RecordNumber', 'HydroGroup', 'AllocationBlock', 'Wap'])\n# sum1 = rv_grp[['AllocatedRate', 'AllocatedAnnualVolume']].sum()\n# other1 = rv_grp[['FromMonth', 'ToMonth']].first()\n#\n# rv4 = pd.concat([sum1, other1], axis=1).reset_index()\n\n ## Convert the rates and volumes to integers\n rv4['AllocatedAnnualVolume'] = rv4['AllocatedAnnualVolume'].round().astype('int64')\n rv4['AllocatedRate'] = rv4['AllocatedRate'].round().astype('int64')\n\n ## Combine with permit data\n rv5 = pd.merge(rv4, permits2[['RecordNumber', 'ConsentStatus', 'ApplicationStatus', 'FromDate', 'ToDate']].drop_duplicates('RecordNumber', keep='last'), on='RecordNumber')\n\n ## Update the Waitaki use types\n rv5a = pd.merge(rv5, permit_use[['RecordNumber', 'WaitakiTable5']], on='RecordNumber')\n rv5a.loc[rv5a.AllocationBlock == 'In Waitaki', 'AllocationBlock'] = rv5a.loc[rv5a.AllocationBlock == 'In Waitaki', 'WaitakiTable5']\n rv5b = rv5a.drop('WaitakiTable5', axis=1)\n\n ## Combine with other Wap data\n waps1 = waps[['Wap', 'GwSpatialUnitId', 'SwSpatialUnitId', 'Combined']].copy()\n rv6 = pd.merge(rv5b, waps1, on='Wap')\n\n# gw_bool = (rv6.HydroGroup == 'Groundwater') | (rv6.Combined)\n# sw_bool = (rv6.HydroGroup == 'Surface Water') & (~rv6.Combined)\n\n gw_bool = (rv6.HydroGroup == 'Groundwater')\n sw_bool = (rv6.HydroGroup == 'Surface Water')\n\n rv6['SpatialUnitId'] = None\n\n rv6.loc[gw_bool, 'SpatialUnitId'] = rv6.loc[gw_bool, 'GwSpatialUnitId']\n rv6.loc[sw_bool, 'SpatialUnitId'] = rv6.loc[sw_bool, 'SwSpatialUnitId']\n\n ## Save results\n print('Save results')\n\n # Detailed table\n rv6['EffectiveFromDate'] = run_time_start\n out_param = param['source data']['allo_calc']\n sf.to_table(rv6, out_param['table'], out_param['username'], out_param['password'], out_param['account'], out_param['database'], out_param['schema'], True)\n\n ## Return\n return rv6", "def fetch_allow_list(self) -> None:\n\n logging.info(\"fetching valid allow list\")\n ipfs_client = ipfshttpclient.connect()\n res = ipfs_client.get(self.robonomics_allow_list_hash)\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Processes EPC dataset to obtain median EPC for each LA and counts/proportions of improvable social housing.
def get_clean_epc(): epc = get_epc() # # Calculate median energy rating for each LA: epc_medians = ( epc.groupby("LOCAL_AUTHORITY")["CURRENT_ENERGY_EFFICIENCY"] .apply(np.median) .reset_index(name="median_energy_efficiency") ) # # Calculate proportions of 'improvable' social housing # (socially rented dwellings that are currently EPC D or below, # and have the potential to be C or above) # # There are two different strings signifying socially rented # in the TENURE column of the EPC data: epc_social = epc.loc[epc["TENURE"].isin(["rental (social)", "Rented (social)"])] # epc_social["is_improvable"] = ( epc_social["CURRENT_ENERGY_RATING"].isin(["G", "F", "E", "D"]) ) & (epc_social["POTENTIAL_ENERGY_RATING"].isin(["C", "B", "A"])) # # Find the numbers of improvable / not improvable social houses in each LA potential_counts = ( epc_social.groupby(["LOCAL_AUTHORITY", "is_improvable"])[ ["LOCAL_AUTHORITY", "is_improvable"] ] .size() .reset_index(name="count") .pivot(index="LOCAL_AUTHORITY", columns="is_improvable", values="count") .rename(columns={True: "total_improvable", False: "total_not_improvable"}) ) # Calculate proportions potential_counts.columns.name = None potential_counts["total_social"] = potential_counts.sum(axis=1) potential_counts["prop_improvable"] = ( potential_counts["total_improvable"] / potential_counts["total_social"] ) potential_counts = potential_counts.reset_index()[ ["LOCAL_AUTHORITY", "total_improvable", "prop_improvable"] ] # Join to medians clean_epc = epc_medians.merge(potential_counts, on="LOCAL_AUTHORITY").rename( columns={"LOCAL_AUTHORITY": "code"} ) # return clean_epc
[ "def groupEnergy(data, en_type):\n df = data.loc[data[\"type\"] == en_type]\n df = GeneralFunctions.removeOutliers(df, \"annual_consume_corrected\")\n\n # create a column for the groupby\n df[\"Postcode4\"] = df[\"POSTCODE\"].str.extract(\"([0-9]+)\")\n\n df_mean = df.groupby([\"Postcode4\", \"year\"]).mean()\n df_median = df.groupby([\"Postcode4\", \"year\"]).median()\n\n return df_mean, df_median", "def metric(self):\n topology = self.topology\n\n #metrics\n MP0 = np.ones (topology.P0)\n MP1 = np.zeros(topology.P1)\n MP2 = np.zeros(topology.P2)\n MD0 = np.ones (topology.D0)\n MD1 = np.zeros(topology.D1)\n MD2 = np.zeros(topology.D2)\n\n #precomputations\n EVP = util.gather(topology.EVi, self.primal)\n FEVP = util.gather(topology.FEi, EVP) #[faces, e3, v2, c3]\n FEM = util.normalize(FEVP.sum(axis=2))\n FEV = util.gather(topology.FEi, topology.EVi)\n\n #calculate areas; devectorization over e makes things a little more elegant, by avoiding superfluous stacking\n for e in range(3):\n areas = triangle_area_from_corners(FEVP[:,e,0,:], FEVP[:,e,1,:], self.dual)\n MP2 += areas #add contribution to primal face\n util.scatter( #add contributions divided over left and right dual face\n FEV[:,e,:], #get both verts of each edge\n np.repeat(areas/2, 2), #half of domain area for both verts\n MD2)\n\n #calc edge lengths\n MP1 += edge_length(EVP[:,0,:], EVP[:,1,:])\n for e in range(3):\n util.scatter(\n topology.FEi[:,e],\n edge_length(FEM[:,e,:], self.dual),\n MD1)\n\n #hodge operators\n self.D2P0 = MD2 / MP0\n self.P0D2 = MP0 / MD2\n\n self.D1P1 = MD1 / MP1\n self.P1D1 = MP1 / MD1\n\n self.D0P2 = MD0 / MP2\n self.P2D0 = MP2 / MD0", "def _subprofiles(self):\n\n self.subcounts = self._get_subcounts()\n hasval = [np.nonzero(arr.astype(bool))[0] for arr in self.subcounts]\n\n # calculating jackknife subprofiles\n for i, lab in enumerate(self.sub_labels):\n # print i, lab\n ind = self.indexes[i]\n cind = hasval[i]\n\n ww = self.w[ind, np.newaxis]\n ww_sum = np.sum(self.w[ind])\n\n Rs = np.zeros(len(cind))\n if self.Rs is not None:\n Rs = np.ones(len(cind)) * self.Rs\n\n if self.ismeta:\n val1parr = (np.sum(self.metadata[0][self.e1_nom, ind][:, cind] * ww, axis=0) /\n np.sum(self.metadata[0][self.meta_denom, ind][:, cind] * ww, axis=0))\n val1marr = (np.sum(self.metadata[1][self.e1_nom, ind][:, cind] * ww, axis=0) /\n np.sum(self.metadata[1][self.meta_denom, ind][:, cind] * ww, axis=0))\n R11 = (val1parr - val1marr) / 0.02\n\n val2parr = (np.sum(self.metadata[2][self.e2_nom, ind][:, cind] * ww, axis=0) /\n np.sum(self.metadata[2][self.meta_denom, ind][:, cind] * ww, axis=0))\n val2marr = (np.sum(self.metadata[3][self.e2_nom, ind][:, cind] * ww, axis=0) /\n np.sum(self.metadata[3][self.meta_denom, ind][:, cind] * ww, axis=0))\n R22 = (val2parr - val2marr) / 0.02\n\n Rs = 0.5 * (R11 + R22)\n # print(Rs)\n\n wsum = np.sum(self.data[self.meta_prefac, ind][:, cind] * ww, axis=0)\n ssum = np.sum(self.data[self.meta_denom, ind][:, cind] * ww, axis=0)\n\n dsum_jack = np.sum(self.data[self.dst_nom, ind][:, cind] * ww, axis=0)\n dsum_w_jack = np.sum(self.data[self.dst_denom, ind][:, cind] * ww, axis=0)\n\n self.dst_sub[cind, lab] = dsum_jack / (dsum_w_jack + Rs * wsum)\n\n osum_jack = np.sum(self.data[self.dsx_nom, ind][:, cind] * ww, axis=0)\n osum_w_jack = np.sum(self.data[self.dsx_denom, ind][:, cind] * ww, axis=0)\n self.dsx_sub[cind, lab] = osum_jack / (osum_w_jack + Rs * wsum)\n\n dsensum = np.sum(self.data[self.dsensum_s, ind][:, cind] * ww, axis=0)\n self.resp_sub_shear[cind, lab] = dsensum / ssum\n self.resp_sub_sel[cind, lab] = Rs\n\n self.snum_sub[cind, lab] = np.sum(self.data[self.snum_ind, ind][:, cind] * ww, axis=0) / ww_sum\n self.snum_sub[:, lab] /= np.sum(self.snum_sub[:, lab])", "def find_outlier_median_rank():\n\tprojs1 = ['rs-6d-c3-obj1', 'rs-6d-c3-obj2', 'sol-6d-c2-obj1', 'sol-6d-c2-obj2', 'wc+rs-3d-c4-obj1', 'wc+rs-3d-c4-obj2', 'wc+sol-3d-c4-obj1', 'wc+sol-3d-c4-obj2', 'wc+wc-3d-c4-obj1', 'wc+wc-3d-c4-obj2', 'wc-3d-c4-obj1', 'wc-3d-c4-obj2', 'wc-5d-c5-obj1', 'wc-5d-c5-obj2', 'wc-6d-c1-obj1', 'wc-6d-c1-obj2', 'wc-c1-3d-c1-obj1', 'wc-c1-3d-c1-obj2', 'wc-c3-3d-c1-obj1', 'wc-c3-3d-c1-obj2']\n\tprojs2 = ['AJStats', 'Apache', 'BerkeleyC', 'BerkeleyJ', 'clasp', 'Dune', 'Hipacc', 'HSMGP_num', 'LLVM', 'lrzip', 'sac', 'spear', 'SQL', 'WGet', 'x264', 'XZ']\n\n\tprojs = projs1 + projs2\n\tproj_anomaly = []\n\tfor proj in projs:\n\t\tanomaly_list = []\n\t\tfor i in range(50):\n\t\t\tcsv_file = \"../experiment/outlier_detection/\" + proj + \"/rank_based\" + str(i) + \".csv\"\n\t\t\tpdcontent = pd.read_csv(csv_file)\n\t\t\tanomaly_indexes = [i for i in range(len(pdcontent)) if pdcontent.iloc[i][\"isAnomaly\"] == -1]\n\t\t\t# anomaly_size = len(anomaly_indexes)\n\t\t\t\n\t\t\tif len(anomaly_indexes) == 0:\n\t\t\t\tcontinue\n\n\t\t\tmedian_anomaly = anomaly_indexes[0]+1\n\t\t\tanomaly_list.append(median_anomaly)\n\n\t\tprint(np.mean(anomaly_list), len(anomaly_list))\n\t\tproj_anomaly.append(np.mean(anomaly_list))\n\n\tprint(proj_anomaly)", "def calc_diag_stats(E, n_dims):\n ave, std, top, count = np.zeros((4, n_dims), dtype=np.float32)\n for i in range(n_dims):\n tmp = E.diagonal(i)\n if tmp.size == 0:\n top[i] = 0\n ave[i] = 0\n std[i] = 0\n count[i] = 0\n else:\n cutoff = np.percentile(tmp, 99)\n tmp = np.where(tmp < cutoff, tmp, cutoff)\n top[i] = cutoff\n ave[i] = np.mean(tmp)\n std[i] = np.std(tmp)\n count[i] = np.sum(tmp > 0)\n # TODO smoothing\n return ave, std, top, count", "def DCAPE (data, ES, origin_method):\n \n calc_start_time = time.time()\n def DCAPE_origin(data, Eqn, method):\n \"\"\" Will let you choose between 3 different options for finding the source origin height\n ---\n Inputs:\n data: the sounding dataset that is read in \n Eqn: Equation to use in the saturation vapor pressure calculation\n method: Noting which calculation you would like to use to dins the source origin height\n \n Outputs: \n press_oforig: the pressure value of the source origin height in hPa\n \"\"\"\n if method == 'DD_method1':\n '''this is the lowest thetae value in the sfc-400mb layer'''\n #only eval levels at which the pressure is greater than 400mb \n sfc_to_400 = data.loc[data['press'] >= 400]\n #find the thetae-e at all the levels that matched the above criteria\n sfc_to_400_Te = thetae(sfc_to_400.press, sfc_to_400.tempK, Eqn) \n #find which level the min thetae occurs at (from sfc to 400mb)\n row_oforig, Te_min = sfc_to_400_Te.idxmin(), sfc_to_400_Te.min()\n press_oforig = data.iloc[row_oforig]['press']\n return press_oforig\n\n \n elif method == 'DD_method2': \n '''this is the lowest thetae value in 100mb averaged layers'''\n ##Group into 100 mb layers (label each group with number i in the colunmn layer_group)\n data['layer_group'] = np.nan\n i, start_p, end_p = 0, data['press'].iloc[0], data['press'].iloc[-1]\n \n while (start_p >= end_p):\n top_p = start_p - 100\n data.loc[(data['press'] <= start_p) & (data['press'] > top_p), 'layer_group'] = i\n i, start_p = i+1, top_p \n\n #find the thetae-e at all the levels \n data['Te'] = thetae(data.press, data.tempK, Eqn) \n #Average the data via 100mb level groupings \n data_averaged=data.groupby(pd.Grouper(key='layer_group')).mean()\n pressure_ave=data.groupby(pd.Grouper(key='layer_group')).median()\n #find which layer the min thetae occurs at \n row_ofmin, Te_min = data_averaged['Te'].idxmin(), data_averaged['Te'].min()\n\n press_oforig = pressure_ave.loc[row_ofmin]['press']\n row_oforig = data.loc[data['press'] == press_oforig].index\n data=data.drop(columns=['layer_group'])\n return press_oforig\n\n elif method == 'DD_method3': \n #Will be used to calculate the density weighted average DCAPE below\n press_oforig= data['press'] \n \n return press_oforig\n\n def DD_CAPE_CALC(data, sfc_press, upper_press, ES): \n \"\"\" Using one of the options from above to calculate DCAPE\n ---\n Inputs: \n data: the sounding dataset that is read in \n sfc_press: surface pressure in hPa; lower bound of integration for DCAPE\n upper_press: upper level pressure in hPa; upper bound of integration for DCAPE\n ES: Noting which saturation vapor pressure equation you would like to use\n \n Output: \n dcape: Downdraft CAPE in J/kg\n \"\"\"\n # Trim data to only consider the levels within the identified layer\n # Flip order of the data to capture the descending motion of the parcel\n DD_layer = data.loc[(data['press'] <= sfc_press) & (data['press'] >= upper_press)].sort_values(by='press')\n ## Create the parcel profile for decent along a moist adiabat\n # # # # # # # # # # # # # # # # # # # # # # # # # # # \n #calc parcel path temps (aka moist adiabtic descent) \n parcel_temp = [DD_layer.tempK.values[0]]\n for i in range(1, len(DD_layer.index)):\n dz= DD_layer.hght[i]-DD_layer.hght[i-1] #new height - previous height\n new_temp=moist_lapse(parcel_temp[i-1], DD_layer.press.values[i-1], dz, ES)\n parcel_temp.append(new_temp)\n \n #convert to Celcius \n pa_t=[x - 273.15 for x in parcel_temp] \n #attach a new column of the parcel temps to the pandas dataframe\n DD_layer['p_tempC'], DD_layer['p_tempK'] = pa_t, parcel_temp\n DD_layer['TV_env'] = Virtual_Temp(DD_layer['press'], DD_layer['tempK'], ES)\n DD_layer['TV_par'] = Virtual_Temp(DD_layer['press'], DD_layer['p_tempK'], ES)\n ############\n \n ## Calculate the difference in profile and environmental temperature to integrate\n DD_layer['evn_par_diff']= DD_layer['TV_env'] - DD_layer['TV_par']\n with pd.option_context( 'display.max_columns', None): # more options can be specified also\n DD_layer = DD_layer.drop(columns=['hght', 'dewC', 'dewK', 'tempK', 'p_tempK'])\n try: DD_layer = DD_layer.drop(columns=['layer_group'])\n except: pass\n # print(DD_layer)\n \n # Calculate DCAPE\n dcape = ((mpconsts.Rd) * (np.trapz(DD_layer['evn_par_diff'], x=np.log(DD_layer['press'].values)) * units.kelvin)).to('J/kg')\n return dcape\n\n # Calculate bounds of integration\n # # # # # # # # # # # # # # # #\n sfc_press = data.iloc[0]['press'] #lower\n upper_press = DCAPE_origin(data, ES, origin_method) #upper\n \n #Option 3 is a density weighted average DCAPE\n if origin_method == 'DD_method3':\n dcape_array=[]\n for i in range(0, len(upper_press)):\n #Defining constants\n Rd = mpconsts.Rd.m*1000\n #Calculating the density of each temperature\n rho = ((data['press'][i] / (Rd*data['tempK'][i]))*-1000) #Converting from g to kg\n \n #Calculating the weighted average\n dcape_unweighted = DD_CAPE_CALC(data, sfc_press, upper_press[i], ES)\n dcape_weighted = dcape_unweighted * rho\n dcape_array.append(dcape_weighted)\n \n #Final_DCAPE = statistics.mean(dcape_array)\n Final_DCAPE = (sum(dcape_array)/(i+1))\n \n else: Final_DCAPE =DD_CAPE_CALC(data, sfc_press, upper_press, ES)\n \n calculation_time= time.time() - calc_start_time\n return Final_DCAPE.m, calculation_time", "def analyze_clusters(eq_clusters, eq_dict):\r\n\r\n for cluster in range(len(eq_clusters)):\r\n\r\n print(\"Analysis of cluster\", cluster)\r\n\r\n # put the data of the cluster into a list\r\n clust_data = []\r\n for i in eq_clusters[cluster]:\r\n clust = eq_clusters[cluster]\r\n for i in range(len(clust)):\r\n clust_data.append(eq_dict[clust[i]])\r\n\r\n print(\" Analysis of magnitude data\")\r\n\r\n # Put magnitudes into list\r\n line = []\r\n magnitude = []\r\n for i in range(len(clust_data)):\r\n line = clust_data[i]\r\n magnitude.append(line[2])\r\n \r\n # Mean\r\n fmt = \" Mean magnitude = {:.1f}\"\r\n print(fmt.format(data_mean(magnitude)))\r\n\r\n # Median\r\n fmt = \" Median magnitude = {:.1f}\"\r\n print(fmt.format(data_median(magnitude)))\r\n\r\n # Standard Deviation\r\n variance = data_mean_variance(magnitude)\r\n standard_deviation = math.sqrt(variance[1])\r\n fmt = \" Standard deviation = {:.2f}\"\r\n print(fmt.format(standard_deviation))\r\n\r\n print(\" Analysis of depth data\")\r\n\r\n # Put depth into list\r\n line = []\r\n depth = []\r\n for i in range(len(clust_data)):\r\n line = clust_data[i]\r\n depth.append(line[3])\r\n\r\n # Mean\r\n fmt = \" Mean depth = {:.1f} miles\"\r\n print(fmt.format(data_mean(depth)))\r\n\r\n # Median\r\n fmt = \" Median depth = {:.1f} miles\"\r\n print(fmt.format(data_median(depth)))\r\n\r\n # Standard Deviation\r\n variance = data_mean_variance(depth)\r\n standard_deviation = math.sqrt(variance[1])\r\n fmt = \" Standard deviation = {:.2f} miles\"\r\n print(fmt.format(standard_deviation))", "def calculate_TODs(self, pce):\n mf_amet_upper = self.atl01_dict[pce].mf_amet_upper\n mf_amet_lower = self.atl01_dict[pce].mf_amet_lower\n # This tags each photon in the Major Frame\n raw_pce_mframe_cnt_ph = self.atl01_dict[pce].raw_pce_mframe_cnt_ph\n # This maps which laser fire \"made\" the returns. Many photons can have same pulse id.\n # It is not always true that pulse id is 1-200, though usually is.\n ph_id_pulse = self.atl01_dict[pce].ph_id_pulse \n\n # This gives the major frame numnber of each shot\n mframe_counts = raw_pce_mframe_cnt_ph - raw_pce_mframe_cnt_ph[0]\n # This gives the list of major frames. Length = number of major frames.\n mframes = list(set(mframe_counts))\n\n # Leading lower element coarse counts.\n tx_cc = self.tx_cc(pce) #equations.tx_cc(self.atl01, pce)\n TX_CC = self.TX_CC(tx_cc)\n\n if self.verbose:\n print(\" \")\n print(\"pce: \", pce)\n print(\"mframe_counts: \", len(mframe_counts), mframe_counts)\n print(\"tx_cc: \", len(tx_cc), tx_cc)\n print(\"TX_CC: \", len(TX_CC), TX_CC)\n\n # Initialize lists to store values per MF.\n amet_FirstT0MF_permf = []\n GPSTime_FirstT0MF_permf = []\n delta_GPSTime_permf = []\n GPSTime_T0_permf = []\n GPSTime_ll_permf = []\n DeltaTime_ll_permf = []\n mf_amet_upper_permf = []\n mf_amet_lower_permf = []\n tx_cc_permf = []\n TX_CC_permf = []\n\n if self.mf_limit != None:\n end_mf = self.mf_limit\n else:\n end_mf = len(mframes)\n\n for mframe in mframes[:end_mf]:\n print(\"PCE {}, mframe {}\".format(pce, mframe))\n\n mframe = int(mframe)\n # mask out all events not in major frame.\n mframe_mask = np.where(mframe_counts == mframe)[0]\n TX_CC_mframe = TX_CC[mframe_mask]\n ph_id_pulse_mframe = ph_id_pulse[mframe_mask]\n # List unique pulse IDs available in the major frame\n pulses = list(set(ph_id_pulse_mframe))\n\n amet_FirstT0MF = self.amet_FirstT0MF(mf_amet_upper[mframe], mf_amet_lower[mframe])\n GPSTime_FirstT0MF = self.GPSTime_FirstT0MF(pce, amet_FirstT0MF)\n\n # Do a first calculation of T0_effective and GPSTime_T0\n # Need the GPS time for each T0 in order to establish the GPS\n # time of each transmit (Tx) that uses T0 as reference\n T0_effective = self.T0_effective(pce, TX_CC_mframe, ph_id_pulse_mframe)\n GPSTime_T0 = self.GPSTime_T0(GPSTime_FirstT0MF, T0_effective) \n\n # Determine GPS time of the LL now that know each shot's T0effective GPS time.\n GPSTime_ll = self.GPSTime_ll(pce, TX_CC_mframe, GPSTime_T0) \n # ToD value for PCE, each MF?\n DeltaTime_ll = self.DeltaTime_ll(GPSTime_ll)\n\n # Append to lists.\n GPSTime_T0_permf.append(GPSTime_T0)\n GPSTime_ll_permf.append(GPSTime_ll)\n DeltaTime_ll_permf.append(DeltaTime_ll)\n amet_FirstT0MF_permf.append(amet_FirstT0MF)\n GPSTime_FirstT0MF_permf.append(GPSTime_FirstT0MF)\n\n mf_amet_upper_permf.append(mf_amet_upper[mframe])\n mf_amet_lower_permf.append(mf_amet_lower[mframe])\n tx_cc_permf.append(tx_cc[mframe_mask])\n TX_CC_permf.append(TX_CC[mframe_mask])\n\n if self.verbose:\n print(\" mframe: \", mframe)\n print(\" mframe_mask: \", len(mframe_mask), mframe_mask)\n print(\" TX_CC_mframe: \", len(TX_CC_mframe), TX_CC_mframe)\n print(\" ph_id_pulse_mframe: \", len(ph_id_pulse_mframe), ph_id_pulse_mframe)\n print(\" amet_FirstT0MF: \", amet_FirstT0MF)\n print(\" GPSTime_FirstT0MF: \", GPSTime_FirstT0MF)\n print(\" T0_effective: \", T0_effective)\n print(\" GPSTime_T0: \", GPSTime_T0)\n print(\" GPSTime_ll: \", len(GPSTime_ll), GPSTime_ll)\n print(\" DeltaTime_ll: \", len(DeltaTime_ll), DeltaTime_ll)\n \n # Flatten the arrays in major frame key. Only want one array.\n GPSTime_T0_permf = flatten_mf_arrays(GPSTime_T0_permf)\n GPSTime_ll_permf = flatten_mf_arrays(GPSTime_ll_permf)\n DeltaTime_ll_permf = flatten_mf_arrays(DeltaTime_ll_permf)\n amet_FirstT0MF_permf = np.array(amet_FirstT0MF_permf).flatten()\n GPSTime_FirstT0MF_permf = np.array(GPSTime_FirstT0MF_permf).flatten()\n mf_amet_upper_permf = np.array(mf_amet_upper_permf).flatten()\n mf_amet_lower_permf = np.array(mf_amet_lower_permf).flatten()\n tx_cc_permf = np.array(tx_cc_permf).flatten()\n TX_CC_permf = np.array(TX_CC_permf).flatten() \n\n # Return all values for this PCE in a named tuple.\n pce_variables = PCEVariables(\n mf_amet_upper=mf_amet_upper_permf, #mf_amet_upper, \n mf_amet_lower=mf_amet_lower_permf, #mf_amet_lower, \n tx_cc=tx_cc_permf, #tx_cc, \n TX_CC=TX_CC_permf, #TX_CC,\n amet_FirstT0MF=amet_FirstT0MF_permf, \n T0_effective=T0_effective, \n GPSTime_FirstT0MF=GPSTime_FirstT0MF_permf, \n GPSTime_T0=GPSTime_T0_permf, \n GPSTime_ll=GPSTime_ll_permf, \n DeltaTime_ll=DeltaTime_ll_permf) \n\n return pce_variables", "def amalgamateResults(results, architectures, dataset, PCA = False):\n aggregatedDict = dict()\n variables = results.Dataset.unique()\n models = results.Method.unique()\n\n \n for architecture in architectures:\n # Set the hidden unit definition depending on the architecture\n if(architecture == 'CNN' or architecture == 'RF'):\n hidden_sizes = None\n elif(architecture == 'MLP' or architecture == 'LSTM'):\n hidden_sizes = [32, 16, 8, 4, 2]\n else:\n hidden_sizes = [[32], [32, 16], [32, 16, 8], [32, 16, 8, 4], [32, 16, 8, 4, 2]] \n\n for variable in variables:\n variable = variable.replace(dataset+': ', '')\n \n # Edge case for PCA dataset with different naming convention.\n if(PCA == True):\n variable = 'PCA'\n if(hidden_sizes is not None):\n for hidden in hidden_sizes:\n #If we are dealing with ALL models, then file naming has a different sctructure.\n if(variable == 'ALL'):\n results = pd.read_parquet('output/' + str(architecture) + '_' + str(dataset) +'_' + str(hidden).replace('[', '').replace(']', '').replace(', ', '_') + '.gzip')\n else:\n #For each variation of a certain variable read the relevant file and concatenate the predictions to the dataframe stored in the aggrefatedDict dictionairy for said variable.\n results = pd.read_parquet('output/' + str(architecture) + '_' + str(dataset) +'_' + str(hidden).replace('[', '').replace(']', '').replace(', ', '_') + '_' + str(variable).replace(' ', '').replace('%', '') + '.gzip')\n \n try:\n df = pd.concat([aggregatedDict.get(variable), results.Pred], axis = 1)\n except:\n df = results.Pred\n aggregatedDict.update({variable: df})\n\n #For the architectures withouth hidden units, those are irrelevant. \n elif(hidden_sizes is None):\n #If we are dealing with ALL models, then file naming has a different sctructure.\n if(variable == 'ALL'):\n results = pd.read_parquet('output/' + str(architecture) + '_' + str(dataset) + '.gzip')\n else:\n results = pd.read_parquet('output/' + str(architecture) + '_' + str(dataset) +'_' + str(variable).replace(' ', '').replace('%', '') + '.gzip')\n \n try:\n df = pd.concat([aggregatedDict.get(variable), results.Pred], axis = 1)\n except:\n df = results.Pred\n aggregatedDict.update({variable: df})\n \n return aggregatedDict", "def PCA_graph(INPUT_FILE, DATASET_LABEL):\n def SuperPop(x):\n if x in [\"GBR\" , \"CEU\" , \"TSI\" , \"FIN\" , \"IBS\" ]:\n return \"EUR\"\n elif x in [\"CHB\" , \"JPT\" , \"CHS\" , \"CDX\" , \"KHV\"]:\n return \"EAS\"\n elif x in [\"YRI\" , \"LWK\" , \"GWD\" , \"MSL\" , \"ESN\" , \"ASW\" , \"ACB\" ]:\n return \"AFR\"\n elif x in [\"MXL\" , \"PUR\" , \"CLM\" , \"PEL\" ]:\n return \"AMR\"\n elif x in [\"GIH\" , \"PJL\" , \"BEB\" , \"STU\" , \"ITU\" ]:\n return \"SAS\"\n else:\n return \"Samples\"\n ## Starting to handle big data so bringing in Pandas\n raw = pd.read_csv(INPUT_FILE, sep=\" \" , header=None )\n ## put 1000g data into superpopulation groups and define dataset\n clean = (raw[list(raw.columns[:4])])\n clean.columns = ['FAM_ID' , 'ID' , 'C1' , 'C2' ]\n clean.set_index(['FAM_ID'], inplace = True)\n ## setting up super population codes to map colours for graph\n clean[\"POP\"] = clean.ID.apply(SuperPop)\n groups = clean.groupby('POP')\n ## Plotting\n fig , ax = plt.subplots()\n ax.margins(0.1)\n for name, group in groups:\n ax.plot(group.C1, group.C2, marker='o', linestyle='', ms=4, label=name)\n ax.legend(numpoints=1, loc='best')\n plt.xlabel('Component 1' )\n plt.ylabel('Component 2' )\n plt.suptitle(\"PCA on \" + DATASET_LABEL , weight= 'bold')\n fig.savefig( DATASET_LABEL +\".PCA_results.pdf\")\n plt.close()\n ##kmean clustering to find outliers\n find_out = clean[['C1' , 'C2']].copy()\n k_means = cluster.KMeans(n_clusters=5,)\n k_means.fit(find_out)\n centroids = k_means.cluster_centers_\n labels = k_means.labels_\n results = pd.DataFrame([clean.index,labels]).T\n results.columns = [\"FAM_ID\" , \"k_group\"]\n results[\"ID\"] = clean[[\"ID\"]].copy()\n results.set_index(['FAM_ID'], inplace = True)\n output_label = (DATASET_LABEL + \".PCA_kmeans.txt\")\n ## Display samples that are not Europeans in dataset\n merge_df = pd.merge(clean , results, right_index=True , left_index=True )\n merge_df['k_group'] = merge_df['k_group'].astype(int)\n test = merge_df.loc[merge_df['POP'] == \"EUR\" , ['k_group']].apply(np.median)\n Euro_group = int(test)\n #print (\"European cluster is :\" + str(Euro_group))\n your_samples = merge_df.loc[merge_df['POP'] == \"Samples\" , ['k_group']]\n your_samples['check'] = np.where(your_samples['k_group'] == Euro_group , 'good' , 'bad')\n bad_ids = your_samples[your_samples['check'] =='bad']\n after = (clean[~clean.index.isin(bad_ids.index)])\n count = len(bad_ids.index.get_level_values(0))\n #print (str(count) + \" Samples fall outside the European cluster \")\n after_groups = after.groupby('POP')\n ### Plotting with outliers removed\n fig , ax = plt.subplots()\n ax.margins(0.1)\n for name, after_groups in after_groups:\n ax.plot(after_groups.C1, after_groups.C2, marker='o', linestyle='', ms=4, label=name)\n ax.legend(numpoints=1, loc='best')\n plt.xlabel('Component 1' )\n plt.ylabel('Component 2' )\n plt.suptitle(\"Outliers removed PCA on \" + DATASET_LABEL + \" - \" + str(count) + \" Samples were removed\" ,weight= 'bold' )\n #print (\"Graph saved as \" + DATASET_LABEL + \".PCA_results.pdf\")\n #print (\"Outliers removed Graph saved as \" + DATASET_LABEL + \".outliers_removed_PCA_results.pdf\")\n fig.savefig( DATASET_LABEL +\".outliers_removed_PCA_results.pdf\")\n output_id = (DATASET_LABEL + \".outliers.txt\")\n #print (\"bad IDs exported to text file : \" + output_id)\n bad_ids.to_csv(output_id , sep=\"\\t\" , header=None )\n plt.close()", "def analyse_all_cages(all_cage_sets, read_data):\n\n cages_cis_wins = []\n cages_not_wins = []\n lig_studied = []\n energy_preferences = []\n plane_devs = []\n sqpl_ops = []\n lses = []\n\n experiments = ['5D1', '4D2', '5D3', '3D1']\n\n if read_data and exists('all_cage_results.txt'):\n data = read_csv('all_cage_results.txt')\n\n for i, row in data.iterrows():\n lig_name = row['lig']\n stable = row['stable']\n preferred = row['preferred']\n cis_preferred_and_stable = all([stable, preferred])\n if cis_preferred_and_stable:\n cages_cis_wins.append(lig_name)\n else:\n cages_not_wins.append(lig_name)\n\n lig_studied.append(lig_name)\n energies = [\n float(row['energy_A']), float(row['energy_B']),\n float(row['energy_C']), float(row['energy_D']),\n ]\n if energies[2] == 0:\n energy_preferences.append(\n min([\n i for i in energies if i != 0\n ])\n )\n else:\n energy_preferences.append(-energies[2])\n plane_devs.append(float(row['plane_dev_C']))\n sqpl_ops.append(float(row['sqpl_op_C']))\n lses.append(float(row['lse_C']))\n\n else:\n with open('all_cage_results.txt', 'w') as f:\n f.write(\n 'lig,stable,preferred,'\n 'plane_dev_A,plane_dev_B,plane_dev_C,plane_dev_D,'\n 'sqpl_op_A,sqpl_op_B,sqpl_op_C,sqpl_op_D,'\n 'lse_A,lse_B,lse_C,lse_D,'\n 'energy_A,energy_B,energy_C,energy_D\\n'\n )\n for lig_name in all_cage_sets:\n # if lig_name not in experiments:\n # continue\n print('ligand:', lig_name)\n lig_studied.append(lig_name)\n cages = all_cage_sets[lig_name]\n energies = CA.get_cage_energies(lig_name, cages)\n m_distortions = CA.get_metal_centre_distortion(\n name=lig_name,\n cages=cages\n )\n l_distortions = CA.get_ligand_distortion(\n name=lig_name,\n cages=cages,\n # bites_dist=ligands[lig_name][1],\n # NN_dists=ligands[lig_name][2]\n bites_dist=None,\n NN_dists=None\n )\n\n # print('bites', np.mean(ligands[lig_name][1]))\n # print('bites', np.std(ligands[lig_name][1]))\n # print('nn', np.mean(ligands[lig_name][2]))\n # print('nn', np.std(ligands[lig_name][2]))\n # for i in m_distortions['plane_dev'][0]:\n # print(i, m_distortions['plane_dev'][0][i])\n #\n # for i in l_distortions['bite_angle'][0]:\n # print(i, l_distortions['bite_angle'][0][i])\n #\n # for i in l_distortions['NN_dist'][0]:\n # print(i, l_distortions['NN_dist'][0][i])\n # print('---')\n\n stable = CA.check_stability(\n l_distortions=l_distortions,\n m_distortions=m_distortions\n )\n preferred, energy_sep = CA.check_preference(\n energies,\n energy_cutoff=6.0\n )\n energy_preferences.append(energy_sep)\n plane_devs.append(m_distortions['plane_dev'][0]['C'])\n sqpl_ops.append(m_distortions['min_q4_op'][0]['C'])\n lses.append(l_distortions['sum_strain'][0]['C'])\n\n cis_preferred_and_stable = all([stable, preferred])\n if cis_preferred_and_stable:\n cages_cis_wins.append(lig_name)\n else:\n cages_not_wins.append(lig_name)\n\n f.write(\n f'{lig_name},{stable},{preferred},'\n f\"{m_distortions['plane_dev'][0]['A']},\"\n f\"{m_distortions['plane_dev'][0]['B']},\"\n f\"{m_distortions['plane_dev'][0]['C']},\"\n f\"{m_distortions['plane_dev'][0]['D']},\"\n f\"{m_distortions['min_q4_op'][0]['A']},\"\n f\"{m_distortions['min_q4_op'][0]['B']},\"\n f\"{m_distortions['min_q4_op'][0]['C']},\"\n f\"{m_distortions['min_q4_op'][0]['D']},\"\n f\"{l_distortions['sum_strain'][0]['A']},\"\n f\"{l_distortions['sum_strain'][0]['B']},\"\n f\"{l_distortions['sum_strain'][0]['C']},\"\n f\"{l_distortions['sum_strain'][0]['D']},\"\n f\"{energies['A']},{energies['B']},\"\n f\"{energies['C']},{energies['D']}\\n\"\n )\n print('-----------------------')\n\n print('-----------------------------------------')\n # Plot distribution of all cages.\n total_cages = len(cages_cis_wins) + len(cages_not_wins)\n print(\n f'{len(cages_cis_wins)} cages with cis preferred '\n f'and stable of {total_cages}.'\n )\n print('-----------------------------------------')\n print('candidate cages:')\n for i in sorted(cages_cis_wins):\n print(i)\n\n PL.plot_isomer_distributions()\n\n PL.plot_all_cages_bars(\n ligands=lig_studied,\n experiments=experiments,\n cages_cis_wins=cages_cis_wins,\n cages_not_wins=cages_not_wins,\n y_value=energy_preferences,\n y_title='stability of C isomer [kJmol$^{-1}$]',\n y_bar=6.0,\n suffix='energypreference',\n )\n\n PL.plot_all_cages_bars(\n ligands=lig_studied,\n experiments=experiments,\n cages_cis_wins=cages_cis_wins,\n cages_not_wins=cages_not_wins,\n y_value=lses,\n y_title=r'sum(ligand strain energy) [kJmol$^{-1}$]',\n y_bar=0.0,\n suffix='lses',\n )\n\n PL.plot_all_cages_bars(\n ligands=lig_studied,\n experiments=experiments,\n cages_cis_wins=cages_cis_wins,\n cages_not_wins=cages_not_wins,\n y_value=plane_devs,\n y_title=r'max. plane deviation [$\\mathrm{\\AA}$]',\n y_bar=0.3,\n suffix='planedevs',\n )\n\n PL.plot_all_cages_bars(\n ligands=lig_studied,\n experiments=experiments,\n cages_cis_wins=cages_cis_wins,\n cages_not_wins=cages_not_wins,\n y_value=sqpl_ops,\n y_title=r'$q_{\\mathrm{sqp,min}}$',\n y_bar=0.95,\n suffix='sqplops',\n )\n\n PL.plot_energetics_and_geom(\n ligands=lig_studied,\n experiments=experiments,\n cages_cis_wins=cages_cis_wins,\n cages_not_wins=cages_not_wins,\n energy_preferences=energy_preferences,\n plane_devs=plane_devs,\n sqpl_ops=sqpl_ops,\n )\n PL.plot_energetics_and_geom_3D(\n ligands=lig_studied,\n experiments=experiments,\n energy_preferences=energy_preferences,\n plane_devs=plane_devs,\n sqpl_ops=sqpl_ops,\n )\n\n utilities.draw_molecules(\n ligands=lig_studied,\n energy_preferences=energy_preferences,\n plane_devs=plane_devs,\n sqpl_ops=sqpl_ops,\n )", "def get_median_composite(path_to_collection, start_date, end_date, coord_point, num_of_scenes, sort_feature=''):\n\n # Load a Landsat 8 collection.\n collection = ee.ImageCollection(path_to_collection)\n # Filter by date and location.\n collection = collection.filterBounds(ee.Geometry.Point(coord_point[0], coord_point[1])).filterDate(start_date, end_date)\n # Sort by increasing cloudiness.\n collection = collection.sort(sort_feature)\n\n # Compute the median of each pixel for each band of the 5 least cloudy scenes.\n median = collection.limit(num_of_scenes).reduce(ee.Reducer.median());\n\n\n # pprint(median.getInfo())\n\n # print(get_median_composite('LANDSAT/LC08/C01/T1', '2014-01-01', '2014-12-31', (-122.262, 37.8719), 5, sort_feature='CLOUD_COVER'))", "def summarize(self):\r\n\r\n # Time for steady-state of the Systolic Array: (<array_size>-1 * 2).\r\n # Therefore, we reduce that number*2 from clock counting (time to fill the Systolic Array, and time to evacuate)\r\n self.clock -= 2*((self.array_size-1)*2)\r\n\r\n # For the same reason, we delete from mac_utility list 2*(<array_size>-1) from the beginning,\r\n # and 2*(<array_size>-1) from the end\r\n for pe_iindex in range(self.array_size):\r\n\r\n for pe_jindex in range(self.array_size):\r\n\r\n self.pe_array[pe_iindex][pe_jindex].mac_utility = self.pe_array[pe_iindex][pe_jindex].mac_utility[2*((self.array_size-1)*2):]\r\n\r\n self.pe_array[pe_iindex][pe_jindex].mac_utility = self.pe_array[pe_iindex][pe_jindex].mac_utility[:-2*((self.array_size - 1)*2)]\r\n\r\n for pe_iindex in range(self.array_size):\r\n\r\n for pe_jindex in range(self.array_size):\r\n\r\n self.results[:, pe_iindex, pe_jindex] = self.pe_array[pe_iindex][pe_jindex].result\r\n\r\n self.utilization_per_pe[pe_iindex][pe_jindex] = self.pe_array[pe_iindex][pe_jindex].mac_utility.count(1) / self.clock\r\n\r\n SystolicArrayLogger.info(\"Final Clock: {}\".format(self.clock))\r\n SystolicArrayLogger.info(\"Clock Cycles Per Matrix On Average: {}\".format(self.clock / self.thread_count))\r\n SystolicArrayLogger.info(\"Utilization Per PE:\\n{}\".format(self.utilization_per_pe))\r\n SystolicArrayLogger.info(\"Average, Std Utilization Per PE For Systolic Array: {}, {}\".format(self.utilization_per_pe.mean(), self.utilization_per_pe.std()))\r\n SystolicArrayLogger.info(\"\\n\\nResults:\\n{}\".format(self.results))", "def main(test, pred, metric = \"RMSE\"):\n \n N = len(pred)\n \n if metric == \"RMSE\":\n out = []\n for i in np.arange(N):\n out.append(math.sqrt(((test[i] - pred[i])**2)/2))\n \n elif metric == \"MAPE\":\n out = []\n for i in np.arange(N):\n out.append(((np.abs((test[i] - pred[i])/test[i]))/2)*100)\n \n elif metric == \"MPE\": \n out = []\n for i in np.arange(N):\n out.append((((test[i] - pred[i])/pred[i])/2))\n \n elif metric == \"MAD\":\n out = []\n for i in np.arange(N):\n out.append(np.abs(test[i] - pred[i])/2)\n \n elif metric == \"NLL\":\n out = []\n for i in np.arange(N):\n out.append(-np.sum(stats.norm.logpdf(test[i], loc=pred[i]))/100)\n \n elif metric == \"MSE\":\n out = []\n for i in np.arange(N):\n out.append(((test[i] - pred[i])**2)/2)\n \n else:\n print(\"Key error\")\n \n return out", "def precinct_level_estimates(self):\n # TODO: make this output match r_by_c version in shape, num_precincts x 2 x 2\n percentiles = [2.5, 97.5]\n num_precincts = len(self.precinct_pops)\n\n # The stracking on the next line convers to a num_samples x num_precincts array\n precinct_level_samples_gp1 = (\n self.sim_trace[\"posterior\"][\"b_1\"].stack(all_draws=[\"chain\", \"draw\"]).values.T\n )\n precinct_posterior_means_gp1 = precinct_level_samples_gp1.mean(axis=0)\n precinct_credible_intervals_gp1 = np.percentile(\n precinct_level_samples_gp1, percentiles, axis=0\n ).T\n\n # The stracking on the next line convers to a num_samples x num_precincts array\n precinct_level_samples_gp2 = (\n self.sim_trace[\"posterior\"][\"b_2\"].stack(all_draws=[\"chain\", \"draw\"]).values.T\n )\n precinct_posterior_means_gp2 = precinct_level_samples_gp2.mean(axis=0)\n precinct_credible_intervals_gp2 = np.percentile(\n precinct_level_samples_gp2, percentiles, axis=0\n ).T # num_precincts x 2\n\n precinct_posterior_means = np.empty((num_precincts, 2, 2))\n precinct_posterior_means[:, 0, 0] = precinct_posterior_means_gp1\n precinct_posterior_means[:, 0, 1] = 1 - precinct_posterior_means_gp1\n precinct_posterior_means[:, 1, 0] = precinct_posterior_means_gp2\n precinct_posterior_means[:, 1, 1] = 1 - precinct_posterior_means_gp2\n\n precinct_credible_intervals = np.empty((num_precincts, 2, 2, 2))\n precinct_credible_intervals[:, 0, 0, :] = precinct_credible_intervals_gp1\n precinct_credible_intervals[:, 0, 1, :] = 1 - precinct_credible_intervals_gp1\n precinct_credible_intervals[:, 1, 0, :] = precinct_credible_intervals_gp2\n precinct_credible_intervals[:, 1, 1, :] = 1 - precinct_credible_intervals_gp2\n\n return (precinct_posterior_means, precinct_credible_intervals)", "def median_price_municipality_wall(self, df):\n\n df = AnalyseData().sorted_by_city_in_wallonia(df)\n test = df.groupby(['locality']).agg({'price': ['median']})\n res = test.apply(lambda x: x.sort_values(ascending=False))\n\n res.plot(kind=\"bar\")\n plt.legend(['Median price'])\n plt.title('Properties median price in Wallonia by city')\n plt.show()", "def median(self):\n\n # generate combined list of all pixels in CCD called 'arr'\n larr = []\n for win in self._data:\n larr.append(win.flatten())\n arr = np.concatenate(larr)\n\n return np.median(arr)", "def calc_primary_emissions(nei_data):\n\n def get_primary_poll_for_industry(nei_data, yr):\n \"\"\"Function to get 'primary pollutants' for each industry.\n 'primary pollutants' are defined as the three pollutants that are highest, relative to the \n corss-industry emission values. \n \"\"\"\n # Get mean emissions totals for each pollutant, for each industry.\n needed_cols = ['FAC_INDUSTRY'] + \\\n [col for col in nei_data.columns if '2014' in col]\n mean_emiss = nei_data[needed_cols].groupby('FAC_INDUSTRY').mean()\n\n # Norm. emissions of each pollutant by dividing by the mean across all industries. Primary pollutants\n # for an industry are the those that have the largest emissoins relative to cross-industry means.\n primary_poll = {}\n mean_emiss_quant = mean_emiss.copy()\n for i, row in mean_emiss_quant.iterrows():\n mean_emiss_quant.loc[i,\n :] = mean_emiss_quant.loc[i, :]/mean_emiss.mean()\n primary_poll[i] = {'poll'+str(i+1): name.split(':')[1] for\n i, name in enumerate(list(row.nlargest(3).index))}\n return primary_poll\n\n def calc_mean_emiss_by_industry(nei_data, years=['2008', '2011', '2014']):\n \"\"\"Function for calculating mean emissions of each pollutant, for each industry\"\"\"\n mean_emiss_by_year = {}\n for year in years:\n needed_cols = ['FAC_INDUSTRY'] + \\\n [col for col in nei_data.columns if year in col]\n mean_emiss = nei_data[needed_cols].groupby('FAC_INDUSTRY').mean()\n mean_emiss_by_year[year] = mean_emiss.rename(columns={col: col.split(':')[1] for col\n in mean_emiss.columns})\n return mean_emiss_by_year\n\n def add_primary_poll_cols(row, poll_num, year, primary_poll, mean_emiss):\n \"\"\"Function for calculating emissions for the primary pollutants for a SINGLE facility, normalized by \n the emissions for all facilities in the industry. \n \"\"\"\n poll_name = primary_poll[row['FAC_INDUSTRY']]['poll'+str(poll_num)]\n poll_val = row[':'.join(['total_emissions', poll_name, year])] / \\\n mean_emiss[year].loc[row['FAC_INDUSTRY'], poll_name]\n return poll_val\n\n primary_poll = get_primary_poll_for_industry(nei_data, '2014')\n mean_emiss = calc_mean_emiss_by_industry(\n nei_data, years=['2008', '2011', '2014'])\n for year in ['2008', '2011', '2014']:\n for poll_num in range(1, 4):\n new_col = []\n for _, row in nei_data.iterrows():\n new_col.append(add_primary_poll_cols(\n row, poll_num, year, primary_poll, mean_emiss))\n nei_data['poll'+str(poll_num)+'_'+year] = new_col\n\n return nei_data, primary_poll", "def calculate_errors(dataset,cluster_indices,predict_energies):\n \n print(\"Calculating errors for each cluster...\")\n\n \n #helping variables\n n_clusters=len(cluster_indices)\n R,F,E=dataset[\"R\"],dataset[\"F\"],dataset[\"E\"]\n mse=[]\n sample_errors=[]\n\n #loop through clusters\n #predict results for each cluster\n #calculate error, save in list\n sys.stdout.write( \"\\r[0/{}] done\".format(n_clusters) )\n sys.stdout.flush()\n for i in range(n_clusters):\n cind=cluster_indices[i] #cluster indices\n cr,cf,ce=R[cind],F[cind],E[cind] #cluster R \n shape=cr.shape\n n_samples,n_atoms,n_dim=shape[0],shape[1],False\n if len(shape)>2:\n n_dim=shape[2]\n \n if predict_energies:\n cf=np.array(ce)\n cf_pred=predict.predict_energies(cr)\n\n else:\n #reshaping\n if n_dim:\n cf=np.reshape(cf,(n_samples,n_atoms*n_dim)) \n else:\n cf=np.reshape(cf,(n_samples,n_atoms)) \n cf_pred=predict.predict(cr)\n\n err=(cf-cf_pred)**2\n sample_errors.append(err.mean(axis=1))\n mse.append(err.mean())\n\n #print out\n sys.stdout.write( \"\\r[{}/{}] done\".format(i+1,n_clusters) )\n sys.stdout.flush()\n \n print(\"\") \n #order the cluster_indices etc\n sorted_ind=np.argsort(mse)\n mse=np.array(mse)\n cluster_indices=np.array(cluster_indices)\n sample_errors=np.array(sample_errors)\n\n return mse[sorted_ind],cluster_indices[sorted_ind],sample_errors[sorted_ind]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return prediction formatted according to the content type
def output_fn(prediction, content_type): return prediction
[ "def make_predictions(text, types, pipeline):\n types_order, preds, judgements = list(), dict(), dict()\n\n if \"toxic\" in types:\n preds[\"Toxicity\"], judgements[\"Toxicity\"] = pipeline.predict_toxicity_ulm(text)\n types_order.append(\"Toxicity\")\n if \"insult\" in types:\n preds[\"Insult\"], judgements[\"Insult\"] = pipeline.predict_insult_ulm(text)\n types_order.append(\"Insult\")\n if \"obscene\" in types:\n preds[\"Obscenity\"], judgements[\"Obscenity\"] = pipeline.predict_obscenity_ulm(text)\n types_order.append(\"Obscenity\")\n if \"prejudice\" in types:\n preds[\"Prejudice\"], judgements[\"Prejudice\"] = pipeline.predict_identity_ulm(text)\n types_order.append(\"Prejudice\")\n\n return types_order, preds, judgements", "def interpreted_prediction(prediction):\n class_dict = {0: 'bad', 1: 'good'}\n rp = int(round(prediction[0][0]))\n return float(prediction[0][0]), rp, class_dict.get(rp)", "def classify_request(self):\n\n # Detects the response of the text\n try:\n response = self.client.analyze_entities(self.document, encoding_type='UTF32', )\n\n \"\"\"\n 0 = 'UNKNOWN'\n 1 = 'PERSON'\n 2 = 'LOCATION'\n 3 = 'ORGANIZATION'\n 4 = 'EVENT'\n 5 = 'WORK_OF_ART'\n 6 = 'CONSUMER_GOOD'\n 7 = 'OTHER'\n \"\"\"\n\n classified_text = [{}]\n\n for entity in response.entities:\n classified_text.append(entity)\n classified_text.pop(0)\n return classified_text\n except:\n print(\"Classification error\")", "def predict_type(self) -> 'PreElabRDLType':\n raise NotImplementedError", "def predict(self, text):", "def make_prediction(pred_head):\n print(\"predicting---------------------------------\")\n print(\"head is \", pred_head)\n print(\"body is \", pred_body)\n\n res = model.predict([pred_head], pred_body)\n print(classes[res[0]])\n return classes[res[0]]", "def pred_transform(self,pred):\n\n \n gender_label,gender_confidence = np.argmax(pred, axis=1),np.int16(np.max(pred*100, axis=1))\n \n gender_label=np.where(gender_label==0, 'male', gender_label) \n \n gender_label=np.where(gender_label=='1', 'female', gender_label) \n\n results={'gender':{'label':gender_label.tolist(),\n\n 'confidence':gender_confidence.tolist()}}\n return results", "def datatype(self) -> str:", "def pred_lang(text, model):\n \n return model.predict(text)[0][0].replace('__label__', '')", "def _format_predictions(predictions: np.ndarray,\n is_binary_classification: bool,\n one_minus_msp: bool = False):\n if is_binary_classification:\n assert_msg = (\"Expect binary classification: predictions must have shape \"\n f\" (dataset size, 2); received {predictions.shape}.\")\n assert predictions.ndim == 2 == predictions.shape[1], assert_msg\n # In the binary classification case, the retrieval metrics expect the\n # predictions to be that of the positive class (i.e., with index 1).\n predictions = predictions[:, 1]\n else:\n # In the multiclass case, we take the maximum predictions across classes.\n # This is motivated by the OOD detection setting with the standard approach:\n # \"Maximum over softmax probabilities\" (MSP),\n # See https://arxiv.org/pdf/2106.03004.pdf.\n # TODO(rjenatton): Generalize this logic to other known schemes, e.g.,\n # entropy(predictions, axis=-1) or Mahalanobis distance.\n predictions = np.max(predictions, axis=-1)\n # Depending on the convention used in labeling the IN and OOD datasets (see\n # https://arxiv.org/pdf/1610.02136.pdf), we may have to consider 1 - MSP.\n if one_minus_msp:\n predictions = 1.0 - predictions\n return predictions", "def predict(self, text):\n emotion_fields = [\n 'anger',\n 'anticipation',\n 'disgust',\n 'fear',\n 'joy',\n 'sadness',\n 'surprise',\n 'trust',\n ]\n sentiment_fields = [\n 'negative',\n 'positive'\n ]\n count = Counter()\n for token in word_tokenize(text.lower()):\n if token in self.model:\n count += Counter(self.model[token])\n # get % per emotion\n emotion_score = {}\n for key in emotion_fields:\n emotion_score[key] = count[key]\n emotion_perc = {}\n for key in emotion_fields:\n emotion_perc[key] = self.calculate_perc(count[key], sum(emotion_score.values()))\n # get % per sentiment\n sent_score = {}\n for key in sentiment_fields:\n sent_score[key] = count[key]\n sent_perc = {}\n for key in sentiment_fields:\n sent_perc[key] = self.calculate_perc(count[key], sum(sent_score.values()))\n return {\n 'emotion_cnt': emotion_score,\n 'emotion': emotion_perc,\n 'sentiment_cnt': sent_score,\n 'sentiment': sent_perc\n }", "def input_format(self, content_type):\n return getattr(self, '_input_format', {}).get(content_type, hug.defaults.input_format.get(content_type, None))", "def classify(texts: List[str], params: Any) -> List[str]:\n\n # ############################ REPLACE THIS WITH YOUR CODE #############################\n best_model, doc2vec, datasets_info = params\n\n X_test_start, X_test_len = datasets_info[texts[0]]\n X_test = doc2vec.get_X(X_test_start, X_test_len)\n\n preds_int = best_model.predict(X_test)\n preds = ['pos' if pr == 1 else 'neg' for pr in preds_int]\n\n return preds\n # ############################ REPLACE THIS WITH YOUR CODE #############################", "def predict(self, req: dict) -> (str, float):\n # The base-64 string is converted into an image object but this object\n # cannot be passed to Keras directly. The object is first dumped into a\n # temp file and then the filename is passed to Keras.\n try:\n image_obj = base64_to_image_obj(req)\n except TorchException as ex:\n raise TorchException(str(ex), self.service_name)\n random_file_name = super().get_random_name()\n temp_image_filename = temp_file(self.service_name, random_file_name)\n with open(temp_image_filename, \"wb\") as img_file:\n img_file.write(image_obj)\n try:\n temp_image = self.__load_image(temp_image_filename)\n except:\n raise TorchException(\n \"Could not load image data\", self.service_name)\n # Get a value between 0 and 1 for each class (pred is a list in a list)\n pred = self.model.predict(temp_image)\n # Get the index of the highest prediction\n result = np.argmax(pred, axis=1)\n if self.class_map:\n prediction_class = self.class_map.get(result[0], None)\n if not prediction_class:\n raise Exception(\n \"Unexpected class from model [{self.service_name}]\")\n else:\n prediction_class = result[0]\n os.remove(temp_image_filename)\n\n confidence = float(pred[0][result[0]])\n # Compare the confidence with either the single-value threshold or the\n # corresponding class threshold if threshold is a provided as a list.\n if (isinstance(self.background_threshold, float) and\n confidence < self.background_threshold) \\\n or (isinstance(self.background_threshold, list) and\n confidence < self.background_threshold[result[0]]):\n prediction_class = 'bg'\n\n # Always return a string class\n return str(prediction_class), confidence", "def classify(img):\n # possible TODO: call the imagekit script on the input image\n # decodes from base64 to an image type suitable for prediction model\n imgDecoded = base64.b64decode(img)\n image = Image.open(BytesIO(imgDecoded))\n image = image.convert(\"RGB\")\n target_size = (IMG_WIDTH, IMG_HEIGHT)\n np_img = image.resize(target_size)\n np_img = img_to_array(np_img) # (224, 224, 3)\n np_img = np.expand_dims(np_img, axis=0)\n datagen = ImageDataGenerator(rescale=1./255).flow(\n np_img, \n batch_size=BATCH_SIZE\n )\n \n # make prediction\n # predict_classes will return the label, which is an integer (0, 1, 2... for\n # each animal in alphabetical order)\n # convert this to a string name, then return\n with graph.as_default():\n bottleneck_features_web = vgg16_model.predict_generator(datagen, BATCH_SIZE)\n prediction = model.predict_classes(bottleneck_features_web, batch_size=BATCH_SIZE)[0]\n animal = ANIMALS[prediction]\n fun_fact = getFunFacts(animal)\n\n article = \"an \" if fun_fact[0].lower() in ['a','e','i','o','u'] else \"a \"\n # text = \"This is \" + article + animal + \"!\\nDid you know? \\n\" + fun_fact + \"\\n\\n\"\n return json.dumps({'result': article + animal, 'fun': fun_fact})", "def predict(note):\n\n # Patterns for information extraction\n p = re.compile(r\"edss\", re.IGNORECASE)\n p_score = re.compile(r\"\\d\\.\\d\")\n p_num = re.compile(r\"zero|one|two|three|four|five|six|seven|eight|nine\", re.IGNORECASE)\n num_dict = {\n \"zero\":0,\n \"one\":1,\n \"two\":2,\n \"three\":3,\n \"four\":4,\n \"five\":5,\n \"six\":6,\n \"seven\":7,\n \"eight\":8,\n \"nine\":9\n }\n score = -1\n sentences = sent_tokenize(note)\n for sent in sentences:\n # Find sentence with \"EDSS\"\n if len(re.findall(p, sent)) > 0:\n # Find score with format \"x.x\"\n if len(re.findall(p_score, sent)) > 0:\n score = float(re.findall(p_score, sent)[0])\n break\n # Find score with format \"EDSS is x\"\n elif len(re.findall(r\"\\s+(?:0|1|2|3|4|5|6|7|8|9)(?:\\.|\\,|\\s+|\\))\", sent)) > 0:\n number = re.findall(r\"\\s+(?:0|1|2|3|4|5|6|7|8|9)(?:\\.|\\,|\\s+|\\))\", sent)[0]\n score = float(re.sub(r\"\\s|\\.|\\,|\\)\", r\"\", number))\n break\n # Find score writtent in \"zero/one ...\"\n elif len(re.findall(p_num, sent)) > 0:\n score = float(num_dict[re.findall(p_num, sent)[0].lower()])\n break\n \n if score not in [0.0, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5]:\n score = -1\n \n \n label_dict = {0.0:0,\n 1.0:1,\n 1.5:2,\n 2.0:3,\n 2.5:4,\n 3.0:5,\n 3.5:6,\n 4.0:7,\n 4.5:8,\n 5.0:9,\n 5.5:10,\n 6.0:11,\n 6.5:12,\n 7.0:13,\n 7.5:14,\n 8.0:15,\n 8.5:16,\n 9.0:17,\n 9.5:18,\n -1:-1}\n \n return label_dict[score]", "def predict(val_text,model):\n \n try:\n if isinstance(pd.read_csv(val_text),pd.DataFrame) == True:\n val_text = np.array(pd.read_csv(val_text))\n except:\n if isinstance(val_text,str) == True:\n val_text = np.array([val_text])\n else:\n return \"First Argument must be of string or numpy array DataType\"\n\n tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')\n tokens_val = tokenizer.batch_encode_plus(\n val_text.tolist(),\n max_length = 512,\n padding='max_length',\n truncation=True\n )\n\n val_seq = torch.tensor(tokens_val['input_ids'])\n val_mask = torch.tensor(tokens_val['attention_mask'])\n le = pickle.load(open(get_python_lib() + \"/TLA/ang_Classify/models/encoder.pkl\",\"rb\"))\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n with torch.no_grad():\n model.to(device)\n preds = model(val_seq.to(device), val_mask.to(device))\n preds = preds.detach().cpu().numpy()\n preds = np.argmax(preds, axis=1)\n preds = le.inverse_transform(preds)\n return preds[0]", "def transformation():\n data = None\n\n # Convert from JSON to dict\n if flask.request.content_type == \"application/json\":\n input = flask.request.data.decode(\"utf-8\")\n input = ast.literal_eval(input)\n\n data = input[\"data\"]\n pred_type = input[\"pred_type\"]\n\n else:\n return flask.Response(\n response=\"This predictor only supports JSON data\",\n status=415,\n mimetype=\"text/plain\",\n )\n\n print(\"Invoked with {} records\".format(len(data)))\n\n out = io.StringIO()\n\n if pred_type == \"prediction\":\n output = ScoringService.predict(data, input[\"target_node\"])\n output.to_csv(out, header=False, index=False)\n\n elif pred_type == \"intervention\":\n output = ScoringService.intervention(data)\n pd.DataFrame({\"results\": output}).to_csv(out, header=False, index=False)\n\n result = out.getvalue()\n\n return flask.Response(response=result, status=200, mimetype=\"text/csv\")", "def make_predictions_multiple(texts, types, pipeline):\n preds, judgements = dict(), dict()\n\n if \"Toxicity\" in types:\n preds[\"toxic\"], judgements[\"toxic\"] = pipeline.predict_toxicity_ulm_multiple(\n texts\n )\n if \"Insult\" in types:\n preds[\"insult\"], judgements[\"insult\"] = pipeline.predict_insult_ulm_multiple(texts)\n if \"Obscenity\" in types:\n preds[\"obscene\"], judgements[\"obscene\"] = pipeline.predict_obscenity_ulm_multiple(\n texts\n )\n if \"Prejudice\" in types:\n preds[\"prejudice\"], judgements[\n \"prejudice\"\n ] = pipeline.predict_identity_ulm_multiple(texts)\n\n return preds, judgements", "def predict():\n try:\n processor = Processor()\n input_params = processor.process_input(request.data)\n predictions = processor.predict(input_params)\n json_input = json.loads(request.data)\n insert_inference(json.dumps(json_input), json.dumps(predictions.tolist()))\n return json.dumps({\"Predicted Price\": predictions.tolist()})\n except (KeyError, json.JSONDecodeError, AssertionError, ValueError):\n return json.dumps({\"error\": \"CHECK INPUT\"}), 400\n except Exception as err:\n return json.dumps({\"error\": \"PREDICTION FAILED\", \"message\": {err}}), 500" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the predicted value for all the vectors in X.
def predict(self, X): return predicted_value
[ "def predict(self, X):\n n = X.shape[0]\n m = self.num_obj\n Y_m = np.ndarray((n, m))\n Y_v = np.ndarray((n, m))\n for i in xrange(m):\n if self.denoised:\n if hasattr(self.surrogates[i],'likelihood') and hasattr(self.surrogates[i].likelihood,'variance'):\n noise = self.surrogates[i].likelihood.variance\n else:\n noise = 0.\n else:\n noise = 0.\n m, v = self.surrogates[i].predict(X)\n Y_m[:, i] = m.flatten()\n Y_v[:, i] = v.flatten() - noise\n return Y_m, Y_v", "def predict(self, X):\n\n return yhat", "def predict(self, X: np.ndarray) -> np.ndarray:\n return np.array([self._classify(x) for x in X])", "def predict(self, X):\n if self.B is None:\n raise Exception(\"The model has not been fit yet!\")\n\n # Append a column of 1's to X for the betas, if set\n if self.fit_intercept:\n X = np.c_[np.ones(len(X)), X]\n\n # Dot X with the fit betas\n return X.dot(self.B)", "def predict(self, X: np.array) -> np.array:\n check_is_fitted(self, [\"popt_\", \"pcov_\", \"name_\"])\n X = check_array(X)\n\n return self.model_func(X, *self.popt_)", "def predict(self, X):\n # predict the risk of death using the control estimator\n risk_control = self.control_estimator.predict_proba(X)[:, 1]\n \n # predict the risk of death using the treatment estimator\n risk_treatment = self.treatment_estimator.predict_proba(X)[:, 1]\n \n # the predicted risk reduction is control risk minus the treatment risk\n pred_risk_reduction = risk_control - risk_treatment\n \n \n return pred_risk_reduction", "def predict(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n return self.predict_mu(x), self.predict_var(x)", "def predict(self, X):\n\n predicted_probabilitiy = self.predict_proba(X)\n return self.classes_.take((np.argmax(predicted_probabilitiy, axis=1)),\n axis=0)", "def svm_predict(model, X):\n\n # check if we are getting a vector. If so, then assume we only need to do predictions\n # for a single example\n if X.shape[1] == 1:\n X = X.T\n\n m = X.shape[0]\n p = np.zeros(m)\n pred = np.zeros(m)\n\n if model['kernelFunction'].__name__ == 'linear_kernel':\n # we can use the weights and bias directly if working with the linear kernel\n p = np.dot(X, model['w']) + model['b']\n elif model['kernelFunction'].__name__ == 'gaussian_kernel':\n # vectorized RBF Kernel\n # This is equivalent to computing the kernel on every pair of examples\n X1 = np.sum(X ** 2, 1)\n X2 = np.sum(model['X'] ** 2, 1)\n K = X2 + X1[:, None] - 2 * np.dot(X, model['X'].T)\n\n if len(model['args']) > 0:\n K /= 2 * model['args'][0] ** 2\n\n K = np.exp(-K)\n p = np.dot(K, model['alphas'] * model['y']) + model['b']\n else:\n # other non-linear kernel\n for i in range(m):\n predictions = 0\n for j in range(model['X'].shape[0]):\n predictions += model['alphas'][j] * model['y'][j] \\\n * model['kernelFunction'](X[i, :], model['X'][j, :])\n p[i] = predictions\n\n pred[p >= 0] = 1\n return pred", "def predict_proba(self, X):\n prob = self.decision_function(X)\n prob *= -1\n np.exp(prob, prob)\n prob += 1\n np.reciprocal(prob, prob)\n if len(prob.shape) == 1:\n return np.vstack([1 - prob, prob]).T\n else:\n # OvR normalization, like LibLinear's predict_probability\n prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))\n return prob", "def predict(self, x):\n res = 0\n for arbre in self.arbres:\n res += arbre.predict(x)\n if res >= 0:\n return 1\n return -1", "def predict(self, X):\n probabilities = self.predict_probability(X)\n\n def classForProbability(probability):\n if probability > 0.5:\n return self.classOneLabel\n return self.classZeroLabel\n\n return numpy.array([\n classForProbability(p) for p in probabilities\n ])", "def predict(self, x):\n # Create an array to store predictions in. Add an extra dimension if this\n predictions = []\n # Loop over the cross-validation models\n for i, model in enumerate(self._models):\n\n # Make and store predictions\n predictions.append(model.predict(x).flatten())\n predictions=np.asarray(predictions)\n # Get the mean and standard deviation of predictions\n mean_preds = np.mean(predictions, axis = 0)\n stdev_preds = np.std(predictions, axis = 0)\n # Return the mean predictions and standard deviation of predictions\n return mean_preds, stdev_preds", "def predict(self):\n\t\treturn self.y_pred", "def normal_equation_prediction(X, y):\n rows, cols = np.shape(X)\n Xaux = np.ones((rows, cols + 1))\n Xaux[0:,1:] = X\n \n w = np.matrix(np.dot(Xaux.T,Xaux)).I # (X^T * X)^-1\n w = np.dot(w,Xaux.T) # (X^T * X)^-1 * X^T\n w = np.dot(w,y) # (X^T * X)^-1 * X^T * y\n \n prediction = np.dot(Xaux,w) # calcula predição a partir de w\n\n return prediction", "def predict(self, X, return_std=..., return_cov=...):\n ...", "def predict(self, X):\n # predict the class of y with classifier\n classes = self.clf.predict(X)\n \n # create default regressor predictions - zeros\n y_pred = np.zeros(X.shape[0])\n \n for lbl, r in zip(self._class_labels, self.regs):\n # use the portion of X with the given label \n mask = (classes == lbl)\n \n if sum(mask) > 0:\n # fit the regressor for this class\n y_pred[np.array(mask)] = r.predict(X[mask])\n \n return y_pred", "def predict(self, Xnew):\n if self.compute_invcdf:\n for _, x in np.ndenumerate(self.X):\n x.compute_inv_cdf(self.spline_basis)\n\n scores = np.ones(Xnew.shape[0]) * self.intercept\n for i in range(Xnew.shape[0]):\n for j in range(self.n_predictors):\n scores[i] += \\\n np.dot(Xnew[i, j].phi, self.phi[j, :]) + \\\n np.dot(Xnew[i, j].inv_cdf.inv_cdf_coeffs,\n np.dot(self.spline_basis.metric, self.beta[j, :]))\n \n preds = 1.0 / (1.0 + np.exp(-scores))\n preds[preds >= 0.5] = 1.0\n preds[preds < 0.5] = 0.0\n return preds", "def predict(self, X):\n proba = {}\n total_probabilities = np.array([])\n for classifier_index in range(1, 5):\n clf = self.classifiers[classifier_index]\n proba[classifier_index] = clf.predict_proba(X)[:, 1]\n for class_index in range(1, 6):\n if class_index == 1:\n # probability = 1 - probability(bigger than 1)\n total_probabilities = np.vstack(1 - proba[class_index])\n elif 1 < class_index < 5:\n # probability = probabillity(bigger than i) - probability(bigger than i-1)\n total_probabilities = np.column_stack((total_probabilities, (proba[class_index-1]-proba[class_index])))\n elif class_index == 5:\n # probability = probability(bigger than 4)\n total_probabilities = np.column_stack((total_probabilities, (proba[class_index-1])))\n # add one to the results because indexes start at 0, but classes range in (1 - 5)\n results = np.argmax(total_probabilities, axis=1) + 1\n return results" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an integer list of tasks from an edge label.
def get_task_list(label): task_list = [] current = 0 in_range = False range_start = -1 for char in label: if char.isdigit(): current = current * 10 + int(char) elif char == '-': range_start = current current = 0 in_range = True elif char == ',' or char == ']': if in_range: range_end = current for i in range(range_start, range_end + 1): task_list.append(i) else: task_list.append(current) if char == ']': break in_range = False current = 0 range_start = -1 return task_list
[ "def query_edge_id_list(graph, label=None):\n travel = graph.E()\n if label:\n travel = travel.hasLabel(label)\n temp_id_list = travel.id().toList()\n id_list = list(map(lambda t: t.get('@value').get('relationId'), temp_id_list))\n return id_list", "def _get_ids_from_label(self, label):\n keys = self.list_keys()\n results = []\n for key in keys:\n if key['label'] == label:\n results.append(key['id'])\n return results", "def subtask_ids(self):\n return [elem[\"subtask\"] for elem in self.accumulators()[\"subtasks\"]]", "def label_ids(self):\n return [l.id for l in self.labels.all()]", "def getElementEdgeNodes(elementType, tag=-1, primary=False, task=0, numTasks=1):", "def _index_label(self, label: Any) -> List[int]:\n raise NotImplementedError", "def get_task_ids(computation):\n return computation._tasks.keys()", "def num_tasks(self):\n return Task.objects.filter(labels=self).count()", "def get_start_node_ids(task):\n return map(str.strip, task.start_node_id.split('|'))", "def num_edge_labels(self) -> int:\n if self._num_edge_labels is None:\n if self.graphs is None:\n self._num_edge_labels = self.generator.num_edge_labels\n else:\n unique_edge_labels = torch.LongTensor([])\n for graph in self.graphs:\n unique_edge_labels = torch.cat([\n unique_edge_labels, graph.get_num_labels(\"edge_label\")\n ])\n self._num_edge_labels = torch.unique(\n unique_edge_labels\n ).shape[0]\n return self._num_edge_labels", "def stream_ids(self, label):\n\n if label in self:\n return sorted(list(self[label].keys()))\n\n else:\n return None", "def get_id(graph, edges):\n res = [0 for _ in edges]\n for i, (node1, node2) in enumerate(edges):\n if graph.has_edge(node1, node2):\n res[i] = 1\n return res", "def query_vertex_id_list(graph, label=None):\n travel = graph.V()\n if label:\n travel = travel.hasLabel(label)\n return travel.id().toList()", "def num_labels(self) -> int:\n if self.task == \"node\":\n return self.num_node_labels\n elif self.task == \"edge\" or self.task == \"link_pred\":\n return self.num_edge_labels\n elif self.task == \"graph\":\n return self.num_graph_labels\n else:\n raise ValueError(f\"Task {self.task} not supported\")", "def num_edge_labels(self):\n # TODO: change to unique as what we did in graph.py\n return max([gen.num_edge_labels for gen in self.generators])", "def make_label_edges(self):\n data_path = self.args.data_name + '_true_edges.pickle'\n if os.path.exists(os.path.join(self.args.data_name, data_path)):\n with open(os.path.join(self.args.data_name, data_path), 'rb') as handle:\n label_edges = pickle.load(handle)\n else:\n nodes = list(self.graph.nodes)\n label_edges = []\n for node in nodes:\n info = self.graph._adj[node]\n neighs = list(info.keys())\n for neigh in neighs:\n if info[neigh][0]['key'] == 'labels_edges':\n label_edges.append([node, neigh])\n try:\n with open(os.path.join(self.args.data_name, data_path), 'wb') as handle:\n pickle.dump(label_edges, handle, protocol=3)\n except:\n pass\n return label_edges", "def edge_label(edge):\n return EDGE_LABELS[edge.get_type()]", "def tasks_at(self, i, j, k=None):\n if k is None:\n return self._net_graph[i][j].tasks\n else:\n return self._net_graph[i][j].tasks[k]", "def get_values(self, node1, label):\n return [node2 for _, _, node2, _ in self.get_edges(node1, label)]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate some picture values
def validate_image(self, picture): if picture: if picture.size > 2000000: raise ValueError('Max size allowed 2MB') if picture.image.width < 180: raise ValueError('Width should be min 180px') if picture.image.height < 180: raise ValueError('Height should be min 180px')
[ "def validate_base_image(value):\n if not value:\n return False\n\n filename, data = b64decode_file(value)\n\n # check size\n if len(data) > 1048576:\n raise Invalid(_(u'Image should be smaller than 1MB.'))\n\n img = Image.open(StringIO(data))\n\n # check format\n if img.format != 'PNG':\n raise Invalid(_(u'Image should be in PNG format.'))\n\n # check image dimensions\n width, height = img.size\n if not(width >= 1200 and height >= 630):\n raise Invalid(_(\n u'Image must be at least 1200 x 630 pixels for the best display on high resolution devices.'\n ))\n\n return True", "def test_invalid_rgb_magick(self):\n assert not poly.isInMap((40, 40), \"magick\") and not poly.isInMap((40, 40, 40, 40), \"magick\") and not poly.isInMap(('a', 40, 40), \"magick\") and not poly.isInMap((40, 40, 400), \"magick\") \n\n # Testing that the correct maps are supported", "def test__validate_thumbnail__1():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_thumbnail(input_value)", "def _check_image_input(observation_space: spaces.Box, key: str = \"\") -> None:\n if observation_space.dtype != np.uint8:\n warnings.warn(\n f\"It seems that your observation {key} is an image but the `dtype` \"\n \"of your observation_space is not `np.uint8`. \"\n \"If your observation is not an image, we recommend you to flatten the observation \"\n \"to have only a 1D vector\"\n )\n\n if np.any(observation_space.low != 0) or np.any(observation_space.high != 255):\n warnings.warn(\n f\"It seems that your observation space {key} is an image but the \"\n \"upper and lower bounds are not in [0, 255]. \"\n \"Generally, CNN policies assume observations are within that range, \"\n \"so you may encounter an issue if the observation values are not.\"\n )", "def test_valid_rgb_in_magick(self):\n assert poly.isInMap((205, 201, 201), \"magick\")", "def check_correctness(self):\n if self.r < 0 or self.r > 255 or self.g < 0 or self.g > 255 or self.b < 0 or self.b > 255:\n raise ValueError(\"RGB arguments should be between 0 and 255\")", "def test_valid_rgb_notin_magick(self):\n assert not poly.isInMap((40, 40, 40), \"magick\")", "def validate_image_format(image):\n return image.endswith('png') or image.endswith('jpg')", "def is_single_face_valid(img) -> int:\n # TODO stub\n return 0", "def validate_components(components: dict):\n # Check that all image components have the same dimensions\n size = None\n for img in components.values():\n if size and img.size != size:\n raise ValueError(\"Image components must have the same dimensions!\")\n else:\n size = img.size", "def validate_image_format(seq):\n msg = None\n for image in seq:\n # if selection isn't a supported image and its not a directory warn user\n if not image.ext.endswith(pyani.core.util.SUPPORTED_IMAGE_FORMATS) and not os.path.isdir(image.path):\n msg = (\n \"Your selection contains unsupported images. The following image types are supported: \"\n \"{0}\".format(' , '.join(pyani.core.util.SUPPORTED_IMAGE_FORMATS))\n )\n break\n return msg", "def test_pictureIsTooLarge(self):\r\n self.assertTrue(len(self.picture.image.tostring()) > 0xFFFF)", "def check(data):\n tmp_len = len(data[0])\n row_num = 0\n for row in data:\n row_num += 1\n if len(row) != tmp_len:\n raise ImageError(\n \"row number {0} has different length from first row\".format(\n row_num),\n 'bad_row_length'\n )\n for pixel in row:\n if type(pixel) == tuple:\n if len(pixel) < 3 or len(pixel) > 4:\n raise ImageError(\n \"'{0}' is not a valid pixel tuple\".format(pixel),\n 'bad_pixel_length'\n )\n elif type(pixel) != int and not re.match('.*numpy\\.[u]?int(8|16|32|64)',\n str(type(pixel))):\n raise ImageError(\n \"'{0}' is not a valid pixel value\".format(pixel),\n 'bad_pixel_value'\n )", "def validate_cover_image(self, value):\n if (\n value is not None\n and not hasattr(value, \"name\")\n and not urlparse(value).scheme\n ):\n raise ValidationError(\"Expected cover image to be a file or url\")\n return {\"cover_image\": value}", "def img_check(img):\n with rasterio.open(img) as src:\n if src.crs.is_valid and src.crs.is_projected and src.crs.is_epsg_code:\n print(\"Input raster is valid and has valid CRS\")\n else:\n print(\"Input raster does not have valid CRS. Exiting the script\")\n # exiting from script\n sys.exit()", "def verify_aperture_img_shape(self):\n assert self.tpf[1].header['TDIM5'] == '({},{})'.format(self.tpf[2].header['NAXIS1'], self.tpf[2].header['NAXIS2'])", "def validate_pixel(pixel):\n \n valid_color = (pixel[0] < 150 and pixel[1] < 135 and pixel[2] < 135)\n return valid_color", "def test_image(self):\n for c, f in self.constants:\n self.assertEqual(f.image(), {c})", "def test__validate_thumbnail__0():\n thumbnail = EmbedThumbnail(url = 'https://orindance.party/')\n \n for input_value, expected_output in (\n (None, None),\n (thumbnail, thumbnail),\n ):\n output = validate_thumbnail(input_value)\n vampytest.assert_eq(output, expected_output)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
USAGE list_hb selection, [cutoff (default=3.2)], [angle (default=55)], [hb_list_name] e.g. list_hb 1abc & c. a &! r. hoh, cutoff=3.2, hb_list_name=abchbonds
def list_hb(selection,cutoff=3.2,angle=55,hb_list_name='hbonds'): cutoff=float(cutoff) angle=float(angle) hb = cmd.find_pairs("((byres "+selection+") and n;n)","((byres "+selection+") and n;o)",mode=1,cutoff=cutoff,angle=angle) # sort the list for easier reading hb.sort(lambda x,y:(cmp(x[0][1],y[0][1]))) for pairs in hb: for ind in [0,1]: cmd.iterate("%s and index %s" % (pairs[ind][0],pairs[ind][1]), 'print "%s/%3s`%s/%s/%s " % (chain,resn,resi,name,index),') print "%.2f" % cmd.distance(hb_list_name,"%s and index %s" % (pairs[0][0],pairs[0][1]),"%s and index %s" % (pairs[1][0],pairs[1][1]))
[ "def help_list_bookings():\n get_list_bookings_parser().print_help()", "def honeypot_tab(x):\n BURN_IN_HELL = r\"\"\"\n id: 2\n opt: ('progress-bar', )\n ui: '''\n type: 'text'\n value: 'How many?'\n %\n type: 'edit_num'\n name: 'num'\n min: 1\n max: len(h)\n %\n %\n type: 'text'\n value: 'Separate them with?'\n %\n type: 'edit_line'\n name: 'sep'\n value: ','\n %\n %\n type: 'button'\n %'''\n post-ui-code: '''\ns = h.pick(result[\"num\"], result[\"sep\"], pb=pb)\nshow_copy_single(s)\n'''\n %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\"\"\"\n list_choice_to_vdictui(True, x, api.parse_text(r\"\"\"\n id: 1\n opt: ('progress-bar', )\n post-ui-code: 'show_copy_single(h.pick(pb=pb))'\n %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n id: 2\n opt: ('progress-bar', )\n ui: '''\n type: 'text'\n value: 'How many?'\n %\n type: 'edit_num'\n name: 'num'\n min: 1\n max: len(h)\n %\n %\n type: 'button'\n %'''\n post-ui-code: '''\nresult = h.pickl(result[\"num\"], pb=pb)\nfor s in result:\n show_copy_single(s)\n'''\n %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n id: 3\n ui: '''\n type: 'text'\n value: 'What is the fake password?'\n %\n type: 'edit_line'\n name: 'line'\n %\n %\n type: 'button'\n %'''\n post-ui-code: 'h.add(result[\"line\"])'\n %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n id: 4\n opt: ('choice', )\n post-ui-code: 'h.remove(selection)'\n %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\"\"\"))", "def selection(dataset, feature_list, clf, cut_off=0.01):\n\n # Extract features and labels from dataset for local testing\n data = feature_format.featureFormat(dataset, feature_list, sort_keys=True)\n labels, features = feature_format.targetFeatureSplit(data)\n\n clf.fit(features, labels)\n f_weight = clf.feature_importances_\n\n # poi removed as this has been seperated into the label.\n del feature_list[0]\n\n df_f = pd.DataFrame(feature_list)\n\n df_f[\"Feature_importance\"] = f_weight\n\n df_f = df_f[df_f.Feature_importance > cut_off]\n\n # Convert the feature names back into a list.\n fs_list = df_f.iloc[:, 0].tolist()\n\n # Add poi back to the start of the new list.\n fs_list = [\"poi\"] + fs_list\n\n return fs_list", "def __word_filter__(option, low, high, num_word, total_word_count, merge_list):\n # option\n if option == 'CustomP':\n low *= total_word_count\n high *= total_word_count\n\n elif option == 'CustomR': # custom raw counts\n pass\n\n elif option.endswith('StdE'):\n StdE = 0\n Average = total_word_count / num_word # average frequency of the word appearance (raw count)\n for word in merge_list:\n StdE += (merge_list[word] - Average) ** 2\n StdE = sqrt(StdE)\n StdE /= num_word\n\n if option.startswith('top'):\n # TopStdE: only analyze the Right outlier of word, determined by standard deviation\n high = total_word_count\n low = Average + 2 * StdE\n\n elif option.startswith('mid'):\n # MidStdE: only analyze the Non-Outlier of word, determined by standard deviation\n high = Average + 2 * StdE\n low = Average - 2 * StdE\n\n elif option.startswith('low'):\n # LowStdE: only analyze the Left Outlier of word, determined by standard deviation\n high = Average - 2 * StdE\n\n else:\n raise IOError('input option is not valid')\n\n elif option.endswith('IQR'):\n TempList = sorted(merge_list.items(), key=itemgetter(1))\n Mid = TempList[int(num_word / 2)][1]\n Q3 = TempList[int(num_word * 3 / 4)][1]\n Q1 = TempList[int(num_word / 4)][1]\n IQR = Q3 - Q1\n\n if option.startswith('top'):\n # TopIQR: only analyze the Top outlier of word, determined by IQR\n high = total_word_count\n low = (Mid + 1.5 * IQR)\n\n elif option.startswith('mid'):\n # MidIQR: only analyze the non-outlier of word, determined by IQR\n high = (Mid + 1.5 * IQR)\n low = (Mid - 1.5 * IQR)\n\n elif option.startswith('low'):\n # LowIQR: only analyze the Left outlier of word, determined by IQR\n high = (Mid - 1.5 * IQR)\n\n else:\n raise IOError('input option is not valid')\n\n else:\n raise IOError('input option is not valid')\n\n return high, low", "def list_choice(ypos, yspace, thelist, ct=False):\n assert len(thelist) > 0\n xspace = 0\n for x in thelist:\n if len(x) > xspace:\n xspace = len(x)\n xspace += 4 # [**]\n assert yspace >= 7 and xspace <= xsize\n center_text(ypos, \"Page-up up w k 8\")\n center_text(ypos + 1, \"Choose with space, tab or enter\")\n center_text(ypos + 2, \"or escape to cancel.\")\n center_text(ypos + yspace - 1, \"Page-down down s j 2\")\n # The size of a ``page``.\n page = yspace - 6\n # Leftover columns on the right.\n x_leftover = xsize - xspace\n if x_leftover >= 8: # Enough room for \"<-Choose\"?\n show_arrow = True\n else:\n show_arrow = False\n # The currently selected button.\n choice = 0\n size = len(thelist)\n while True:\n # Draw.\n for i in range(page):\n # Offset of the current page in the list.\n offset = choice - choice%page\n button_listpos = offset + i\n button_ypos = i + ypos + 4\n # Surrounding brackets.\n wrap_addch(button_ypos, 0, ord(\"[\"))\n wrap_addch(button_ypos, xspace - 1, ord(\"]\"))\n # Past end?\n if button_listpos >= size:\n # Put empty.\n wrap_hline(button_ypos, 0, ord(\"#\"), xspace)\n wrap_hline(button_ypos, xspace, ord(\" \"), x_leftover)\n else:\n # Put line.\n if choice % page == i: #Selected?\n wrap_addch(button_ypos, 1, ord(\"*\"))\n wrap_addch(button_ypos, xspace - 2, ord(\"*\"))\n if show_arrow:\n wrap_addstr(button_ypos, xspace, \"<-Choose\")\n else:\n if show_arrow:\n wrap_hline(button_ypos, xspace, 32, x_leftover)\n wrap_addch(button_ypos, 1, ord(\" \"))\n wrap_addch(button_ypos, xspace - 2, ord(\" \"))\n #Print the characters.\n for x in range(2, xspace - 2):\n # Print the buttons.\n if x - 2 >= len(thelist[button_listpos]):\n wrap_addch(button_ypos, x, ord(\" \"))\n else:\n wrap_addstr(button_ypos, x,\n thelist[button_listpos][x - 2])\n # Print the 'rollability-indicators'.\n if offset - page < 0:\n wrap_hline(ypos + 3, 0, ord(\"-\"), xspace)\n else:\n wrap_hline(ypos + 3, 0, ord(\"^\"), xspace)\n if offset + page > len(thelist) - 1:\n wrap_hline(ypos + yspace - 2, 0, ord(\"-\"), xspace)\n else:\n wrap_hline(ypos + yspace - 2, 0, ord(\"v\"), xspace)\n wrap_refresh()\n # Handle input.\n ch = wrap_getch()\n if ch == 27:\n return False\n choice += common_updown_handler(ch, page)\n # Don't let choice be out of range\n if choice < 0:\n choice = 0\n if choice >= len(thelist):\n choice = len(thelist) - 1\n # Choose.\n if ch in (ord(\"\\t\"), ord(\" \"), ord(\"\\n\")):\n return choice\n zxcv_handler(ch)\n if common_ct_handler(ch) and ct:\n return True", "def selHair(self, *args):\n\n\t\tselectedhair = cmds.textScrollList('hairStrandList', q=1, si=1)\n\t\tcmds.select(selectedhair)\n\t\tprint selectedhair\n\t\treturn", "def CGContent(lst,lower,upper, verbose=True):\n newlist=[]\n for item in lst:\n cont=0\n for char in item:\n if char==\"G\" or char==\"C\":cont=cont+1\n cont=cont/float(len(item))\n if (lower<cont) & (cont<upper):\n newlist=newlist+[item]\n if verbose==True: print cont\n return newlist", "def hs_list(args):\n for hs in get_hidden_services():\n print args.fmt.replace(r'\\t', '\\t') % hs", "def split_pipe(self, l):\n bins = []\n for word in l:\n if not word.startswith('?'):\n bins.append([word])\n elif word.startswith('?'):\n bins[-1].append(word)\n return bins", "def df_display_hist_from_list(df_food, list_columns) :\n z = plt.figure(figsize=(4,4))\n for column in list_columns :\n df = df_food.copy()\n zmin, zmax = df_boxplot_min_max(df, column)\n if zmin < zmax :\n list_name = remove_pattern([column],'100g')\n new_column = list_name[0]\n df.rename(columns={column: new_column}, inplace=True)\n column = new_column\n df = pd.DataFrame(df[column], index=df.index)\n df = df[df[column] <= zmax]\n df = df[df[column] >= zmin]\n df = df[df[column] > 0.0]\n #z = plt.figure()\n z = df.plot.hist(bins=50)", "def format_help_list(hdict_cmds, hdict_db):\r\n string = \"\"\r\n if hdict_cmds and any(hdict_cmds.values()):\r\n string += \"\\n\" + SEP + \"\\n {CCommand help entries{n\\n\" + SEP\r\n for category in sorted(hdict_cmds.keys()):\r\n string += \"\\n {w%s{n:\\n\" % (str(category).title())\r\n string += \"{G\" + fill(\", \".join(sorted(hdict_cmds[category]))) + \"{n\"\r\n if hdict_db and any(hdict_db.values()):\r\n string += \"\\n\\n\" + SEP + \"\\n\\r {COther help entries{n\\n\" + SEP\r\n for category in sorted(hdict_db.keys()):\r\n string += \"\\n\\r {w%s{n:\\n\" % (str(category).title())\r\n string += \"{G\" + fill(\", \".join(sorted([str(topic) for topic in hdict_db[category]]))) + \"{n\"\r\n return string", "def histogram(typename, *args, **kwargs):\n h = typename(*args)\n attributes = {'select': h.GetName(), 'cut': '', 'log': ''} # Defaults\n attributes.update(kwargs)\n for name, value in attributes.iteritems():\n setattr(h, name, value)\n h.SetOption(attributes.get('opt', ''))\n return h", "def lsh(num_hash,threshold,doc_min_hash_list,BUCKET_SIZE = 10000):\n br = computeOptimalBR(num_hash, threshold)\n b,r = br[0],br[1]\n \n #{frozenset(A,B),frozenset(A,C),frozenset(B,C),...}\n candidate_pairs = set()\n \n for i in range(0,b):\n #{hash_value0:set(A,B),hash_value1:set(C,D,E,F),...}\n #clear the bucket for a new band\n bucket = {}\n first_index = i*r\n last_index = i*r + (r - 1)\n for doc_min_hash in doc_min_hash_list:\n doc,min_hash = doc_min_hash[0],doc_min_hash[1] \n signature_in_band = min_hash[first_index:(last_index+1)]\n hash_value = sum([(x+1)*signature_in_band[x] for x in range(len(signature_in_band))]) % BUCKET_SIZE\n #hash_value = tuple(signature_in_band)\n if hash_value not in bucket:\n bucket[hash_value] = set()\n bucket[hash_value] = bucket[hash_value].union(set([doc]))\n #we got the bucket for the current band\n #populate all pairs that belong to the buckets that have more than 1 member\n for hash_value,doc_set in bucket.iteritems():\n if len(doc_set) <= 1:\n continue\n for pair_docs in list(itertools.combinations(doc_set,2)):\n candidate_pairs.add(frozenset(pair_docs))\n \n #re-order the result and turn it to a list\n candidate_pairs = list(candidate_pairs)\n candidate_pairs = [list(x) for x in candidate_pairs]\n \n for x in candidate_pairs:\n x.sort()\n \n candidate_pairs.sort()\n return candidate_pairs", "def set_completion_list(self, completion_list):\n self._completion_list = sorted(completion_list, key=str.lower) # Work with a sorted list\n self.configure(values=completion_list)\n self._hits = []\n self._hit_index = 0\n self.position = 0\n self.bind('<KeyRelease>', self.handle_keyrelease)\n self['values'] = self._completion_list # Setup our popup menu", "def read_Behroozi_catalog():\n\n #get filename \n filename = sys.argv[1]\n\n #make sure this is a .list file\n if 'list' not in filename.split('.'):\n print \"Error: \" + filename + \" not a .list file.\"\n print \"Quitting!\"\n exit()\n\n #now read\n lines = []\n \n #as long as it's a file\n if os.path.isfile(filename):\n with open(filename) as file:\n for line in file:\n #we want to ignore lines that start with '#'\n if line[0] != '#':\n #then trim off the trailing newline\n line = line.rstrip()\n #and split by separator and add to list\n lines.append(re.split('[ ]*', line))\n\n return lines", "def cutKey(targetList, time=(), hierarchy=\"string\", float=(), includeUpperBound=bool, controlPoints=bool, clear=bool, index=int, shape=bool, selectKey=bool, attribute=\"string\", animation=\"string\", option=\"string\"):\n pass", "def do_set_b1_list(self, args):\n parsed = parse(args)\n try:\n self.b1_list = [float(str) for str in parsed]\n except ValueError:\n print(\"set_b1_list: Non-numeric value supplied\")\n return False", "def qthelp(argv):\r\n\t\tcallBuilder()\r\n\t\tshow.info(\"Build finished; now you can run 'qcollectiongenerator' with the .qhcp project file in %(TARGETDIR)s, like this:\", OPTIONS)\r\n\t\tshow.info(\"# qcollectiongenerator %s\", os.path.join(OPTIONS[\"TARGETDIR\"], \"qweasd.qhcp\"))\r\n\t\tshow.info(\"To view the help file:\")\r\n\t\tshow.info(\"# assistant -collectionFile %s\", os.path.join(OPTIONS[\"TARGETDIR\"], \"qweasd.qhc\"))", "def define_biophysics(*arg):\n if arg[0] == 1:\n arg[1].insert('hh')\n arg[1].gnabar_hh = arg[2]\n arg[1].gkbar_hh = arg[3]\n arg[1].gl_hh = arg[4]\n arg[1].cm = arg[5]\n arg[1].Ra = arg[6]\n arg[1].el_hh = arg[7]\n arg[1].ena = arg[8]\n arg[1].ek = arg[9]\n if arg[0 == 2]:\n arg[1].insert('pas')\n arg[1].insert('na')\n arg[1].insert('kv')\n arg[1].gbar_na = arg[2]\n arg[1].gbar_kv = arg[3]\n arg[1].g_pas = arg[4]\n arg[1].cm = arg[5]\n arg[1].Ra = arg[6]\n arg[1].e_pas = arg[7]\n arg[1].ena = arg[8]\n arg[1].ek = arg[9]\n h.psection(sec=arg[1])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a Wide Residual Network with specified parameters
def create_wide_residual_network(input, nb_classes=100, N=2, k=1, dropout=0.0, verbose=1): x = initial_conv(input) nb_conv = 4 for i in range(N): x = conv1_block(x, k, dropout) nb_conv += 2 x = MaxPooling3D((2,2,2))(x) for i in range(N): x = conv2_block(x, k, dropout) nb_conv += 2 #x = MaxPooling3D((2,2,2))(x) #for i in range(N): # x = conv3_block(x, k, dropout) # nb_conv += 2 x = AveragePooling3D((8,8,8))(x) # strides=(2,2,2) x = Flatten()(x) x = Dense(nb_classes, activation='softmax', W_regularizer=l2(weight_decay), bias=use_bias)(x) if verbose: print("Wide Residual Network-%d-%d created." % (nb_conv, k)) return x
[ "def resnetW(pretrained=False, **kwargs):\n model = ResNet(SATBlock, [1,3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def net_builder(net_params: dict):\n import wrn as net\n\n # grab the builder class\n builder = net.build_WideResNet()\n\n # parse the input params\n print(\"Building ResNet...\")\n for key in net_params:\n if hasattr(builder, key):\n print(\"Setting \", key, \": \", net_params[key])\n setattr(builder, key, net_params[key])\n\n return builder.build", "def get_widedeep_net(configure):\n wide_deep_net = WideDeepModel(configure)\n\n loss_net = NetWithLossClass(wide_deep_net, configure)\n train_net = TrainStepWrap(loss_net)\n eval_net = PredictWithSigmoid(wide_deep_net)\n\n return train_net, eval_net", "def ResNetA3D(x, mdlParams, placeholders=None):\r\n with tf.variable_scope('ResNetA3D'):\r\n with slim.arg_scope([slim.convolution], padding='SAME', activation_fn=tf.nn.relu, weights_initializer=tf.truncated_normal_initializer(stddev=0.01), normalizer_fn=slim.batch_norm, normalizer_params={'is_training':placeholders['train_state'], 'epsilon':0.0001, 'decay':0.9, 'center':True, 'scale':True, 'activation_fn':None, 'updates_collections':tf.GraphKeys.UPDATE_OPS, 'fused': False}):\r\n # Initial part\r\n with tf.variable_scope('Initial'):\r\n layer = slim.convolution(x, 48, 3, stride=1, scope='conv1')\r\n layer = slim.convolution(layer, 64, 3, stride=2, scope='conv2')\r\n layer = slim.convolution(layer, 64, 3, stride=1, scope='conv3')\r\n layer = slim.convolution(layer, 64, 3, stride=1, scope='conv4')\r\n layer = slim.convolution(layer, 64, 3, stride=1, scope='conv5')\r\n # Resnet modules\r\n with tf.variable_scope('Resnet_modules'):\r\n # Initial output feature map size\r\n output_fm = mdlParams['ResNetA3D_FM']\r\n # Iterate through all modules\r\n for i in range(len(mdlParams['ResNetA3D_Size'])):\r\n with tf.variable_scope('Module_%d'%(i)):\r\n # Iterate through all blocks inside the module\r\n for j in range(mdlParams['ResNetA3D_Size'][i]):\r\n with tf.variable_scope('Block_%d'%(j)):\r\n # Set desired output feature map dimension of the block and the desired stride for the first block in the module\r\n if j==0:\r\n output_fm = 2*output_fm\r\n block_stride = mdlParams['ResNetA3D_Stride'][i]\r\n else:\r\n block_stride = 1\r\n layer = resneta_block(layer, output_fm, block_stride)\r\n # GAP for 1D,2D,3D\r\n if len(layer.get_shape().as_list()) == 5:\r\n layer = math_ops.reduce_mean(layer, axis=[1,2,3], keep_dims = False, name='global_pool')\r\n elif len(layer.get_shape().as_list()) == 4:\r\n layer = math_ops.reduce_mean(layer, axis=[1,2], keep_dims = False, name='global_pool')\r\n else:\r\n layer = math_ops.reduce_mean(layer, axis=[1], keep_dims = False, name='global_pool')\r\n # Dense output layer\r\n output = slim.layers.fully_connected(layer, len(mdlParams['tar_range']), activation_fn=None)\r\n return output", "def basic_resnet(x, out_channels, kernel_size=[3, 3], first_activation=tf.nn.relu, last_activation=tf.nn.relu, \n preactive=True, train=True, downsample=False, projection=True, reuse=None, name='resnet'): \n \n with tf.variable_scope(name, reuse=reuse): \n first_stride = 2 if downsample else 1\n res = x \n if preactive:\n batch_norm_1 = batch_norm(x, is_train=train)\n act = first_activation(batch_norm_1)\n conv_1 = slim.conv2d(act, out_channels, kernel_size=kernel_size, stride=first_stride, \n activation_fn=last_activation, normalizer_fn=batch_norm, normalizer_params={'is_train': train}, trainable=train, scope='conv_1')\n \n conv_2 = slim.conv2d(conv_1, out_channels, kernel_size=kernel_size, stride=1, activation_fn=None, \n trainable=train, scope='conv_2')\n else:\n conv_1 = slim.conv2d(x, out_channels, kernel_size=kernel_size, stride=first_stride, activation_fn=first_activation, \n normalizer_fn=batch_norm, normalizer_params={'is_train': train}, trainable=train, scope='conv_1')\n \n last_normalizier = batch_norm if last_activation else None\n conv_2 = slim.conv2d(conv_1, out_channels, kernel_size=kernel_size, stride=1, activation_fn=last_activation, \n normalizer_fn=last_normalizier, normalizer_params={'is_train': train}, trainable=train, scope='conv_2') \n \n \n if projection:\n res = slim.conv2d(x, out_channels, kernel_size=[1, 1], stride=first_stride, activation_fn=None, scope='residual')\n else:\n in_channels = x.get_shape().as_list()[-1]\n if in_channels != out_channels:\n ch = (out_channels - in_channels) // 2\n res = tf.pad(res, [[0, 0], [0, 0], [0, 0], [ch, ch]])\n res = tf.add(res, conv_2) \n \n return res", "def ResNetB3D(x, mdlParams, placeholders=None):\r\n with tf.variable_scope('ResNetB3D'):\r\n with slim.arg_scope([slim.convolution], padding='SAME', activation_fn=tf.nn.relu, weights_initializer=tf.truncated_normal_initializer(stddev=0.01), normalizer_fn=slim.batch_norm, normalizer_params={'is_training':placeholders['train_state'], 'epsilon':0.0001, 'decay':0.9, 'center':True, 'scale':True, 'activation_fn':None, 'updates_collections':tf.GraphKeys.UPDATE_OPS, 'fused': False}):\r\n # Initial part\r\n with tf.variable_scope('Initial'):\r\n layer = slim.convolution(x, 48, 3, stride=1, scope='conv1')\r\n layer = slim.convolution(layer, 64, 3, stride=2, scope='conv2')\r\n layer = slim.convolution(layer, 64, 3, stride=1, scope='conv3')\r\n layer = slim.convolution(layer, 64, 3, stride=1, scope='conv4')\r\n layer = slim.convolution(layer, 64, 3, stride=1, scope='conv5')\r\n # Resnet modules\r\n with tf.variable_scope('Resnet_modules'):\r\n # Initial output feature map size\r\n output_fm = mdlParams['ResNetB3D_FM']\r\n # Initial feature map sizes for bottleneck\r\n reduced_fm = mdlParams['ResNetB3D_Red_FM']\r\n # Iterate through all modules\r\n for i in range(len(mdlParams['ResNetB3D_Size'])):\r\n with tf.variable_scope('Module_%d'%(i)):\r\n # Iterate through all blocks inside the module\r\n for j in range(mdlParams['ResNetB3D_Size'][i]):\r\n with tf.variable_scope('Block_%d'%(j)):\r\n # Set desired output feature map dimension of the block and the desired stride for the first block in the module\r\n if j==0:\r\n output_fm = 2*output_fm\r\n reduced_fm = 2*reduced_fm\r\n block_stride = mdlParams['ResNetB3D_Stride'][i]\r\n else:\r\n block_stride = 1\r\n layer = resnetb_block(layer, output_fm, reduced_fm, block_stride)\r\n # GAP for 1D,2D,3D\r\n if len(layer.get_shape().as_list()) == 5:\r\n layer = math_ops.reduce_mean(layer, axis=[1,2,3], keep_dims = False, name='global_pool')\r\n elif len(layer.get_shape().as_list()) == 4:\r\n layer = math_ops.reduce_mean(layer, axis=[1,2], keep_dims = False, name='global_pool')\r\n else:\r\n layer = math_ops.reduce_mean(layer, axis=[1], keep_dims = False, name='global_pool')\r\n # Dense output layer\r\n output = slim.layers.fully_connected(layer, len(mdlParams['tar_range']), activation_fn=None)\r\n return output", "def createNets(x, y, layerNumMin, layerNumMax, minNodes, maxNodes, step=1):\n\n\t#create hidden layer structures \n\tstructures = []\n\tfor i in range(layerNumMin, layerNumMax):\n\t\tstructures.append(itertools.product(range(minNodes, maxNodes, step), repeat = i))\n\n\t#create nn's\n\tnetworks = []\n\tfor struct in structures:\n\t\tfor s in struct:\n\t\t\tnetworks.append(NeuralNetwork([x] + list(s) + [y]))\n\n\treturn networks", "def create_ResNet(self):\n \n resnet = ResNet50V2(include_top=False, weights='imagenet')\n \n dense_1 = Dense(128, activation='relu')\n dense_2 = Dense(128, activation='relu')\n dense_3 = Dense(1, activation='sigmoid')\n\n\n model = Sequential()\n model.add(InputLayer(input_shape=(100, 100, 3)))\n model.add(resnet)\n model.add(Flatten())\n model.add(dense_1)\n model.add(dense_2)\n model.add(dense_3)\n \n dense_1_weights = pickle.load(open('weights/dense_1_weights.pkl', 'rb'))\n dense_2_weights = pickle.load(open('weights/dense_2_weights.pkl', 'rb'))\n dense_3_weights = pickle.load(open('weights/dense_3_weights.pkl', 'rb'))\n\n dense_1.set_weights(dense_1_weights)\n dense_2.set_weights(dense_2_weights)\n dense_3.set_weights(dense_3_weights)\n \n #It is not necessary to compile a model in order to make a prediction\n\n return model", "def createResNetQNetwork(self, prefix=\"\"):\n\n\t\t# Configurations for each bottleneck group.\n\t\tBottleneckGroup = namedtuple('BottleneckGroup',['num_blocks', 'num_filters', 'bottleneck_size'])\n\t\tgroups = [BottleneckGroup(1, 32, 16)]\n\n\t\tstateInput = tf.placeholder(\"float\",[None,65,160,4])\n \n\t\t# First convolution expands to 64 channels\n\t\twith tf.variable_scope(prefix + 'conv_layer1'):\n\t\t\tnet = tf.layers.conv2d(\n\t\t\tstateInput,\n\t\t\tfilters=16,\n\t\t\tkernel_size=6,\n\t\t\tstrides=3, \n\t\t\tactivation=tf.nn.relu)\n\t\t\tW_conv1 = tf.get_default_graph().get_tensor_by_name(os.path.split(net.name)[0] + '/kernel:0')\n\t\t\tb_conv1 = tf.get_default_graph().get_tensor_by_name(os.path.split(net.name)[0] + '/bias:0')\n\t\t\tprint(\"W_conv1:\", W_conv1)\n\t\t\tprint(\"b_conv1:\", b_conv1)\n \n\t\t# Max pool\n\t\tnet = tf.layers.max_pooling2d(net, pool_size=2, strides=2, padding='same')\n\t\tpool_out_shape = net.get_shape().as_list()\n\t\tprint(\"Shape of the output of pool:\", pool_out_shape[0], pool_out_shape[1], pool_out_shape[2], pool_out_shape[3])\n\t\tpool_total_params = pool_out_shape[1] * pool_out_shape[2] * pool_out_shape[3]\n\t\tprint(\"Total params:\", pool_total_params)\n \n\t\t# First chain of resnets\n\t\twith tf.variable_scope(prefix + 'conv_layer2'):\n\t\t\tnet = tf.layers.conv2d(\n\t\t\tnet,\n\t\t\tfilters=groups[0].num_filters,\n\t\t\tkernel_size=1,\n\t\t\tpadding='valid')\n\t\t\tW_conv2 = tf.get_default_graph().get_tensor_by_name(os.path.split(net.name)[0] + '/kernel:0')\n\t\t\tb_conv2 = tf.get_default_graph().get_tensor_by_name(os.path.split(net.name)[0] + '/bias:0')\n\t\t\tprint(\"W_conv2:\", W_conv2)\n\t\t\tprint(\"b_conv2:\", b_conv2)\n \n\t\t# Create the bottleneck groups, each of which contains `num_blocks`\n\t\t# bottleneck groups.\n\t\tfor group_i, group in enumerate(groups):\n\t\t\tfor block_i in range(group.num_blocks):\n\t\t\t\tname = 'group_%d/block_%d' % (group_i, block_i)\n\n\t\t\t\t# 1x1 convolution responsible for reducing dimension\n\t\t\t\twith tf.variable_scope(prefix + name + '/conv_in'):\n\t\t\t\t\tconv = tf.layers.conv2d(\n\t\t\t\t\tnet,\n\t\t\t\t\tfilters=group.num_filters,\n\t\t\t\t\tkernel_size=1,\n\t\t\t\t\tpadding='valid',\n\t\t\t\t\tactivation=tf.nn.relu)\n\t\t\t\t\tW_conv3 = tf.get_default_graph().get_tensor_by_name(os.path.split(conv.name)[0] + '/kernel:0')\n\t\t\t\t\tb_conv3 = tf.get_default_graph().get_tensor_by_name(os.path.split(conv.name)[0] + '/bias:0')\n\t\t\t\t\tprint(\"W_conv3:\", W_conv3)\n\t\t\t\t\tprint(\"b_conv3:\", b_conv3)\n\n\n\t\t\t\twith tf.variable_scope(prefix + name + '/conv_bottleneck'):\n\t\t\t\t\tconv = tf.layers.conv2d(\n\t\t\t\t\tconv,\n\t\t\t\t\tfilters=group.bottleneck_size,\n\t\t\t\t\tkernel_size=3,\n\t\t\t\t\tpadding='same',\n\t\t\t\t\tactivation=tf.nn.relu)\n\t\t\t\t\tW_conv4 = tf.get_default_graph().get_tensor_by_name(os.path.split(conv.name)[0] + '/kernel:0')\n\t\t\t\t\tb_conv4 = tf.get_default_graph().get_tensor_by_name(os.path.split(conv.name)[0] + '/bias:0')\n\t\t\t\t\tprint(\"W_conv4:\", W_conv4)\n\t\t\t\t\tprint(\"b_conv4:\", b_conv4)\n\n\t\t\t\t# 1x1 convolution responsible for restoring dimension\n\t\t\t\twith tf.variable_scope(prefix + name + '/conv_out'):\n\t\t\t\t\tinput_dim = net.get_shape()[-1].value\n\t\t\t\t\tconv = tf.layers.conv2d(\n\t\t\t\t\tconv,\n\t\t\t\t\tfilters=input_dim,\n\t\t\t\t\tkernel_size=1,\n\t\t\t\t\tpadding='valid',\n\t\t\t\t\tactivation=tf.nn.relu)\n\t\t\t\t\tW_conv5 = tf.get_default_graph().get_tensor_by_name(os.path.split(conv.name)[0] + '/kernel:0')\n\t\t\t\t\tb_conv5 = tf.get_default_graph().get_tensor_by_name(os.path.split(conv.name)[0] + '/bias:0')\n\t\t\t\t\tprint(\"W_conv5:\", W_conv5)\n\t\t\t\t\tprint(\"b_conv5:\", b_conv5)\n\n\t\t\t\t# shortcut connections that turn the network into its counterpart\n\t\t\t\t# residual function (identity shortcut)\n\t\t\t\tnet = conv + net\n\n\t\t\ttry:\n\t\t\t\t# upscale to the next group size\n\t\t\t\tnext_group = groups[group_i + 1]\n\t\t\t\twith tf.variable_scope(prefix + 'block_%d/conv_upscale' % group_i):\n\t\t\t\t\tnet = tf.layers.conv2d(\n\t\t\t\t\t\tnet,\n\t\t\t\t\t\tfilters=next_group.num_filters,\n\t\t\t\t\t\tkernel_size=1,\n\t\t\t\t\t\tpadding='same',\n\t\t\t\t\t\tactivation=None,\n\t\t\t\t\t\tbias_initializer=None)\n\t\t\texcept IndexError:\n\t\t\t\tpass\n\n\t\t# Last convolution expands before FC layers maps to total size of 1056\n\t\twith tf.variable_scope(prefix + 'conv_layer6'):\n\t\t\tnet = tf.layers.conv2d(\n\t\t\tnet,\n\t\t\tfilters=16,\n\t\t\tkernel_size=1,\n\t\t\tactivation=tf.nn.relu)\n\t\t\tW_conv6 = tf.get_default_graph().get_tensor_by_name(os.path.split(net.name)[0] + '/kernel:0')\n\t\t\tb_conv6 = tf.get_default_graph().get_tensor_by_name(os.path.split(net.name)[0] + '/bias:0')\n\t\t\tprint(\"W_conv6:\", W_conv6)\n\t\t\tprint(\"b_conv6:\", b_conv6)\n\t\t\tconv6_shape = net.get_shape().as_list()\n\t\t\tprint(\"Shape of the output of conv6:\", conv6_shape[0], conv6_shape[1], conv6_shape[2], conv6_shape[3])\n\t\t\tconv6_total_params = conv6_shape[1] * conv6_shape[2] * conv6_shape[3]\n\t\t\tprint(\"Total params:\", conv6_total_params)\n\n\t\t# Reshape to pass to fully connected layers. \n\t\tnet_flat = tf.reshape(net,[-1,conv6_total_params])\n \n\t\t# Bottleneck to total size of 1056\n\t\twith tf.variable_scope(prefix + 'fc_layer1'):\n\t\t\tnet = tf.layers.dense(net_flat, 1056, activation=tf.nn.relu)\n\t\t\tW_fc1 = tf.get_default_graph().get_tensor_by_name(os.path.split(net.name)[0] + '/kernel:0')\n\t\t\tb_fc1 = tf.get_default_graph().get_tensor_by_name(os.path.split(net.name)[0] + '/bias:0')\n\t\t\tprint(\"W_fc1:\", W_fc1)\n\t\t\tprint(\"b_fc1:\", b_fc1)\n\t\t\tfc1_shape = net.get_shape().as_list()\n\t\t\tprint(\"fc1-shape:\", fc1_shape)\n\n\t\t# Bottleneck to total size of 176\n\t\twith tf.variable_scope(prefix + 'fc_layer2'):\n\t\t\tnet = tf.layers.dense(net, 176, activation=tf.nn.relu)\n\t\t\tW_fc2 = tf.get_default_graph().get_tensor_by_name(os.path.split(net.name)[0] + '/kernel:0')\n\t\t\tb_fc2 = tf.get_default_graph().get_tensor_by_name(os.path.split(net.name)[0] + '/bias:0')\n\t\t\tprint(\"W_fc2:\", W_fc2)\n\t\t\tprint(\"b_fc2:\", b_fc2)\n\t\t\tfc2_shape = net.get_shape().as_list()\n\t\t\tprint(\"fc2-shape:\", fc2_shape)\n\n\t\t# Bottleneck to total size of 44\n\t\twith tf.variable_scope(prefix + 'fc_layer3'):\n\t\t\tnet = tf.layers.dense(net, 44, activation=tf.nn.relu)\n\t\t\tW_fc3 = tf.get_default_graph().get_tensor_by_name(os.path.split(net.name)[0] + '/kernel:0')\n\t\t\tb_fc3 = tf.get_default_graph().get_tensor_by_name(os.path.split(net.name)[0] + '/bias:0')\n\t\t\tprint(\"W_fc3:\", W_fc3)\n\t\t\tprint(\"b_fc3:\", b_fc3)\n\t\t\tfc3_shape = net.get_shape().as_list()\n\t\t\tprint(\"fc3-shape:\", fc3_shape)\n\n\t\t# Bottleneck to total size of 4\n\t\twith tf.variable_scope(prefix + 'fc_layer4'):\n\t\t\tQValue = tf.layers.dense(net, self.actions, activation=None)\n\t\t\tW_fc4 = tf.get_default_graph().get_tensor_by_name(os.path.split(QValue.name)[0] + '/kernel:0')\n\t\t\tb_fc4 = tf.get_default_graph().get_tensor_by_name(os.path.split(QValue.name)[0] + '/bias:0')\n\t\t\tprint(\"W_fc4:\", W_fc4)\n\t\t\tprint(\"b_fc4:\", b_fc4)\n\t\t\tqvalue_shape = QValue.get_shape().as_list()\n\t\t\tprint(\"qvalue-shape:\", qvalue_shape)\n \n\t\treturn stateInput, QValue, W_conv1, b_conv1, W_conv2, b_conv2, W_conv3, b_conv3, W_conv4, b_conv4, W_conv5, b_conv5, W_conv6, b_conv6, W_fc1, b_fc1, W_fc2, b_fc2, W_fc3, b_fc3, W_fc4, b_fc4", "def create_residual_cnn_model(chart_inputs: tf.keras.Input) -> models.Model:\n # Conv1 layer\n conv1_layer = layers.Conv2D(\n 32, (7, 7), strides=(2, 2), padding='valid',\n input_shape=(112, 112, 3), name=\"CONV1\", activation=\"relu\",\n data_format=\"channels_last\"\n )(zero_pad2d(chart_inputs, 3))\n\n # Max pooling layer\n max_pool_layer = layers.MaxPool2D(\n pool_size=(3, 3), padding='VALID', strides=(2, 2),\n name=\"MaxPooling\"\n )(conv1_layer)\n\n res_conv1 = create_res_conv1_layer(max_pool_layer)\n res_conv2 = create_res_conv2_layer(res_conv1)\n res_conv3 = create_res_conv3_layer(res_conv2)\n average_pool = layers.Flatten()(layers.AveragePooling2D(pool_size=(7,7))(res_conv3))\n\n full_connected_1 = layers.Dropout(0.5)(\n layers.Dense(500, activation='relu', use_bias=True)(average_pool)\n )\n full_connected_2 = layers.Dropout(0.5)(\n layers.Dense(100, activation='relu', use_bias=True)(full_connected_1)\n )\n full_connected_3 = layers.Dropout(0.5)(\n layers.Dense(25, activation='relu', use_bias=True)(full_connected_2)\n )\n # return full_connected_3\n output_layer = layers.Dense(1, activation='linear')(full_connected_3)\n return output_layer, average_pool", "def resnet_v1(input_shape, depth, kernel_size, rp_filters, num_classes=1):\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n # Start model definition.\n num_filters = 16\n num_res_blocks = int((depth - 2) / 6)\n\n inputs = Input(shape=input_shape) \n \n rand_proj = Conv2D(filters = rp_filters, kernel_size = kernel_size, padding='same', activation='linear', strides = kernel_size, \n trainable=False, kernel_initializer = RandomNormal(mean=0.0, stddev = np.sqrt(rp_filters), seed=5))(inputs=inputs)\n \n\n# inputs = Input(shape=input_shape) \n \n x = resnet_layer(rand_proj)\n # Instantiate the stack of residual units\n for stack in range(3):\n for res_block in range(num_res_blocks):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n num_filters=num_filters,\n strides=strides)\n y = resnet_layer(inputs=y,\n num_filters=num_filters,\n activation=None)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n x = Activation('relu')(x)\n num_filters *= 2\n\n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n x = AveragePooling2D(pool_size=8)(x)\n x = Flatten()(x)\n \n \n \n\n x1 = Dense(1024)(x)\n x1 = Activation('relu')(x1)\n x1 = Dropout(0.5)(x1)\n x1 = Dense(512)(x1)\n x1 = Activation('relu')(x1)\n x1 = Dropout(0.5)(x1)\n \n x1 = Dense(num_classes)(x1)\n x1 = Activation('sigmoid', name='a1')(x1)\n \n x2 = Dense(1024)(x)\n x2 = Activation('relu')(x2)\n x2 = Dropout(0.5)(x2)\n x2 = Dense(512)(x2)\n x2 = Activation('relu')(x2)\n x2 = Dropout(0.5)(x2) \n \n x2 = Dense(num_classes)(x2)\n x2 = Activation('sigmoid', name='a2')(x2)\n \n x3 = Dense(1024)(x)\n x3 = Activation('relu')(x3)\n x3 = Dropout(0.5)(x3)\n x3 = Dense(512)(x3)\n x3 = Activation('relu')(x3)\n x3 = Dropout(0.5)(x3)\n \n x3 = Dense(num_classes)(x3)\n x3 = Activation('sigmoid', name='a3')(x3)\n \n x4 = Dense(1024)(x)\n x4 = Activation('relu')(x4)\n x4 = Dropout(0.5)(x4)\n x4 = Dense(512)(x4)\n x4 = Activation('relu')(x4)\n x4 = Dropout(0.5)(x4)\n \n x4 = Dense(num_classes)(x4)\n x4 = Activation('sigmoid', name='a4')(x4)\n \n x5 = Dense(1024)(x)\n x5 = Activation('relu')(x5)\n x5 = Dropout(0.5)(x5)\n x5 = Dense(512)(x5)\n x5 = Activation('relu')(x5)\n x5 = Dropout(0.5)(x5)\n \n x5 = Dense(num_classes)(x5)\n x5 = Activation('sigmoid', name='a5')(x5)\n \n x6 = Dense(1024)(x)\n x6= Activation('relu')(x6)\n x6 = Dropout(0.5)(x6)\n x6 = Dense(512)(x6)\n x6 = Activation('relu')(x6)\n x6 = Dropout(0.5)(x6)\n \n x6 = Dense(num_classes)(x6)\n x6= Activation('sigmoid', name='a6')(x6)\n \n x7 = Dense(1024)(x)\n x7 = Activation('relu')(x7)\n x7 = Dropout(0.5)(x7)\n x7 = Dense(512)(x7)\n x7 = Activation('relu')(x7)\n x7 = Dropout(0.5)(x7)\n \n x7 = Dense(num_classes)(x7)\n x7 = Activation('sigmoid', name='a7')(x7)\n \n x8 = Dense(1024)(x)\n x8 = Activation('relu')(x8)\n x8 = Dropout(0.5)(x8)\n x8 = Dense(512)(x8)\n x8 = Activation('relu')(x8)\n x8 = Dropout(0.5)(x8)\n \n x8 = Dense(num_classes)(x8)\n x8 = Activation('sigmoid', name='a8')(x8)\n \n x9 = Dense(1024)(x)\n x9 = Activation('relu')(x9)\n x9 = Dropout(0.5)(x9)\n x9 = Dense(512)(x9)\n x9 = Activation('relu')(x9)\n x9 = Dropout(0.5)(x9)\n \n x9 = Dense(num_classes)(x9)\n x9 = Activation('sigmoid', name='a9')(x9)\n \n x10 = Dense(1024)(x)\n x10 = Activation('relu')(x10)\n x10 = Dropout(0.5)(x10)\n x10 = Dense(512)(x10)\n x10 = Activation('relu')(x10)\n x10 = Dropout(0.5)(x10)\n \n x10 = Dense(num_classes)(x10)\n x10 = Activation('sigmoid', name='a10')(x10)\n \n x11 = Dense(1024)(x)\n x11 = Activation('relu')(x11)\n x11 = Dropout(0.5)(x11)\n x11 = Dense(512)(x11)\n x11 = Activation('relu')(x11)\n x11= Dropout(0.5)(x11)\n \n x11 = Dense(num_classes)(x11)\n x11 = Activation('sigmoid', name='a11')(x11)\n \n x12 = Dense(1024)(x)\n x12 = Activation('relu')(x12)\n x12 = Dropout(0.5)(x12)\n x12 = Dense(512)(x12)\n x12 = Activation('relu')(x12)\n x12 = Dropout(0.5)(x12)\n \n x12 = Dense(num_classes)(x12)\n x12 = Activation('sigmoid', name='a12')(x12)\n \n x13 = Dense(1024)(x)\n x13 = Activation('relu')(x13)\n x13 = Dropout(0.5)(x13)\n x13 = Dense(512)(x13)\n x13 = Activation('relu')(x13)\n x13 = Dropout(0.5)(x13)\n \n x13= Dense(num_classes)(x13)\n x13 = Activation('sigmoid', name='a13')(x13)\n \n x14 = Dense(1024)(x)\n x14 = Activation('relu')(x14)\n x14 = Dropout(0.5)(x14)\n x14 = Dense(512)(x14)\n x14 = Activation('relu')(x14)\n x14 = Dropout(0.5)(x14)\n \n x14 = Dense(num_classes)(x14)\n x14 = Activation('sigmoid', name='a14')(x14)\n \n x15 = Dense(1024)(x)\n x15 = Activation('relu')(x15)\n x15 = Dropout(0.5)(x15)\n x15 = Dense(512)(x15)\n x15 = Activation('relu')(x15)\n x15 = Dropout(0.5)(x15)\n \n x15 = Dense(num_classes)(x15)\n x15 = Activation('sigmoid', name='a15')(x15)\n \n x16 = Dense(1024)(x)\n x16 = Activation('relu')(x16)\n x16 = Dropout(0.5)(x16)\n x16 = Dense(512)(x16)\n x16 = Activation('relu')(x16)\n x16 = Dropout(0.5)(x16)\n \n x16 = Dense(num_classes)(x16)\n x16 = Activation('sigmoid', name='a16')(x16)\n \n x17 = Dense(1024)(x)\n x17 = Activation('relu')(x17)\n x17 = Dropout(0.5)(x17)\n x17 = Dense(512)(x17)\n x17 = Activation('relu')(x17)\n x17 = Dropout(0.5)(x17)\n \n x17 = Dense(num_classes)(x17)\n x17 = Activation('sigmoid', name='a17')(x17)\n \n x18 = Dense(1024)(x)\n x18 = Activation('relu')(x18)\n x18 = Dropout(0.5)(x18)\n x18 = Dense(512)(x18)\n x18 = Activation('relu')(x18)\n x18 = Dropout(0.5)(x18)\n \n x18 = Dense(num_classes)(x18)\n x18 = Activation('sigmoid', name='a18')(x18)\n \n x19 = Dense(1024)(x)\n x19 = Activation('relu')(x19)\n x19 = Dropout(0.5)(x19)\n x19 = Dense(512)(x19)\n x19 = Activation('relu')(x19)\n x19 = Dropout(0.5)(x19)\n \n x19 = Dense(num_classes)(x19)\n x19 = Activation('sigmoid', name='a19')(x19)\n \n x20 = Dense(1024)(x)\n x20 = Activation('relu')(x20)\n x20 = Dropout(0.5)(x20)\n x20 = Dense(512)(x20)\n x20 = Activation('relu')(x20)\n x20 = Dropout(0.5)(x20)\n \n x20 = Dense(num_classes)(x20)\n x20 = Activation('sigmoid', name='a20')(x20)\n \n x21 = Dense(1024)(x)\n x21 = Activation('relu')(x21)\n x21= Dropout(0.5)(x21)\n x21 = Dense(512)(x21)\n x21 = Activation('relu')(x21)\n x21 = Dropout(0.5)(x21)\n \n x21 = Dense(num_classes)(x21)\n x21 = Activation('sigmoid', name='a21')(x21)\n \n \n # Define steering-collision model\n model = Model(inputs=[inputs], outputs=[x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21])\n print(model.summary())\n return model", "def resnet101(pretrained=True):\n model = ResNet(Bottleneck, [3, 4, 23, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model", "def resnet_v1(resnet_size):\n model_params = {\n 18: {'block': building_block, 'layers': [2, 2, 2, 2]},\n 34: {'block': building_block, 'layers': [3, 4, 6, 3]},\n }\n\n if resnet_size not in model_params:\n raise ValueError('Not a valid resnet_size:', resnet_size)\n\n params = model_params[resnet_size]\n return objective_resnet_model(\n params['block'], params['layers'])", "def __init__(self, state_dim, hidden_dim, init_w=3e-3):\n super(ValueNetwork, self).__init__()\n\n self.linear1 = nn.Linear(state_dim, hidden_dim)\n self.linear2 = nn.Linear(hidden_dim, hidden_dim)\n self.linear3 = nn.Linear(hidden_dim, 1)\n\n self.linear3.weight.data.uniform_(-init_w, init_w)\n self.linear3.bias.data.uniform_(-init_w, init_w)", "def create_network(self, neurons_input=1, neurons_hidden=0):\n\t\t\n\t\tself.rate = 0.01\t#Learning rate\n\t\tself.weights_input = []\n\t\tself.weights_hidden = []\n\t\tself.weights_output = []\n\t\tself.neurons_input = neurons_input\n\t\tself.neurons_hidden = neurons_hidden\n\n\t\tif neurons_input > 1:\n\t\t\tneurons_output = 1\n\t\telse:\n\t\t\tneurons_output = 0\n\t\tself.neurons_output = neurons_output\n\n\t\t# set random starting weights\n\t\tfor i in range(neurons_input):\n\t\t\tself.weights_input.append(randint(-1,1))\n\t\tfor i in range(neurons_hidden):\n\t\t\tfor j in range(neurons_input*neurons_hidden):\n\t\t\t\tself.weights_hidden.append(randint(-1,1))\n\t\tfor i in range(neurons_output):\n\t\t\tfor j in range(neurons_hidden):\n\t\t\t\tself.weights_output.append(randint(-1,1))", "def resnext101_32x4d(pretrained=False, **kwargs):\n model = ResNeXt(Bottleneck, [3, 4, 23, 3], 32, 4, **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(\n model_urls['resnext101_32x4d']))\n return model", "def create_growth_transition_encoder_network(\n self, X, alpha_var, params, trans_idx):\n print_obj(\n \"\\nEntered create_growth_transition_encoder_network\",\n \"trans_idx\",\n trans_idx\n )\n print_obj(\"create_growth_transition_encoder_network\", \"X\", X)\n with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):\n # Growing side chain.\n growing_from_rgb_conv_layer = self.from_rgb_conv_layers[trans_idx + 1]\n growing_block_layers = self.conv_layer_blocks[trans_idx + 1]\n\n # Pass inputs through layer chain.\n growing_block_conv = growing_from_rgb_conv_layer(inputs=X)\n print_obj(\n \"\\ncreate_growth_transition_encoder_network\",\n \"growing_block_conv\",\n growing_block_conv\n )\n for i in range(len(growing_block_layers)):\n growing_block_conv = growing_block_layers[i](\n inputs=growing_block_conv\n )\n print_obj(\n \"create_growth_transition_encoder_network\",\n \"growing_block_conv\",\n growing_block_conv\n )\n\n # Shrinking side chain.\n transition_downsample_layer = self.transition_downsample_layers[trans_idx]\n shrinking_from_rgb_conv_layer = self.from_rgb_conv_layers[trans_idx]\n\n # Pass inputs through layer chain.\n transition_downsample = transition_downsample_layer(inputs=X)\n print_obj(\n \"create_growth_transition_encoder_network\",\n \"transition_downsample\",\n transition_downsample\n )\n shrinking_from_rgb_conv = shrinking_from_rgb_conv_layer(\n inputs=transition_downsample\n )\n print_obj(\n \"create_growth_transition_encoder_network\",\n \"shrinking_from_rgb_conv\",\n shrinking_from_rgb_conv\n )\n\n # Weighted sum.\n weighted_sum = tf.add(\n x=growing_block_conv * alpha_var,\n y=shrinking_from_rgb_conv * (1.0 - alpha_var),\n name=\"{}_growth_transition_weighted_sum_{}\".format(\n self.name, trans_idx\n )\n )\n print_obj(\n \"create_growth_transition_encoder_network\",\n \"weighted_sum\",\n weighted_sum\n )\n\n # Permanent blocks.\n permanent_blocks = self.conv_layer_blocks[0:trans_idx + 1]\n\n # Reverse order of blocks and flatten.\n permanent_block_layers = [\n item for sublist in permanent_blocks[::-1] for item in sublist\n ]\n\n # Pass inputs through layer chain.\n block_conv = weighted_sum\n\n # Find number of permanent growth conv layers.\n num_perm_growth_conv_layers = len(permanent_block_layers)\n num_perm_growth_conv_layers -= len(params[\"conv_num_filters\"][0])\n\n # Loop through only the permanent growth conv layers.\n for i in range(num_perm_growth_conv_layers):\n block_conv = permanent_block_layers[i](inputs=block_conv)\n print_obj(\n \"create_growth_transition_encoder_network\",\n \"block_conv_{}\".format(i),\n block_conv\n )\n\n # Loop through only the permanent base conv layers now.\n for i in range(\n num_perm_growth_conv_layers, len(permanent_block_layers)):\n block_conv = permanent_block_layers[i](inputs=block_conv)\n print_obj(\n \"create_growth_transition_encoder_network\",\n \"block_conv_{}\".format(i),\n block_conv\n )\n\n # Get logits now.\n logits = self.use_encoder_logits_layer(\n block_conv=block_conv, params=params\n )\n print_obj(\n \"create_growth_transition_encoder_network\",\n \"logits\",\n logits\n )\n\n return logits", "def __init__(self, layers):\n self.num_layers = len(layers)\n self.layers = layers\n self.weights = [np.random.randn(layers[i], layers[i-1] + 1) for i in range(1, len(layers))]", "def test_create_resnet_with_callable(self):\n for (norm, activation) in itertools.product(\n (nn.BatchNorm3d, None), (nn.ReLU, nn.Sigmoid, None)\n ):\n input_channel = 3\n input_clip_length = 4\n input_crop_size = 56\n model_depth = 50\n stage_spatial_stride = (2, 1, 1, 1)\n stage_temporal_stride = (2, 1, 1, 1)\n model_gt, num_class = self._build_resnet(\n input_channel,\n input_clip_length,\n input_crop_size,\n model_depth,\n norm,\n activation,\n )\n\n total_spatial_stride = 4 * np.prod(stage_spatial_stride)\n total_temporal_stride = np.prod(stage_temporal_stride)\n head_pool_kernel_size = (\n input_clip_length // total_temporal_stride,\n input_crop_size // total_spatial_stride,\n input_crop_size // total_spatial_stride,\n )\n\n model = create_resnet(\n input_channel=input_channel,\n model_depth=50,\n model_num_class=num_class,\n dropout_rate=0,\n norm=norm,\n activation=activation,\n stem_dim_out=8,\n stem_conv_kernel_size=(3, 7, 7),\n stem_conv_stride=(1, 2, 2),\n stem_pool=nn.MaxPool3d,\n stem_pool_kernel_size=(1, 3, 3),\n stem_pool_stride=(1, 2, 2),\n stage_conv_a_kernel_size=((3, 1, 1),) * 4,\n stage_conv_b_kernel_size=((1, 3, 3),) * 4,\n stage_spatial_h_stride=stage_spatial_stride,\n stage_spatial_w_stride=stage_spatial_stride,\n stage_temporal_stride=stage_temporal_stride,\n bottleneck=create_bottleneck_block,\n head_pool=nn.AvgPool3d,\n head_pool_kernel_size=head_pool_kernel_size,\n head_output_size=(1, 1, 1),\n head_activation=nn.Softmax,\n )\n\n model.load_state_dict(\n model_gt.state_dict(), strict=True\n ) # explicitly use strict mode.\n\n # Test forwarding.\n for tensor in TestResNet._get_inputs(\n input_channel, input_clip_length, input_crop_size\n ):\n with torch.no_grad():\n if tensor.shape[1] != input_channel:\n with self.assertRaises(RuntimeError):\n out = model(tensor)\n continue\n\n out = model(tensor)\n out_gt = model_gt(tensor)\n\n self.assertEqual(\n out.shape,\n out_gt.shape,\n \"Output shape {} is different from expected shape {}\".format(\n out.shape, out_gt.shape\n ),\n )\n self.assertTrue(\n np.allclose(out.numpy(), out_gt.numpy(), rtol=1e-1, atol=1e-1)\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
basic JSON message structure
def msg_structure(status="", msg=""): return { "status": status, "msg": msg }
[ "def create_JSON_message(message_type, body=None):\n clientID= partnerID+\"/\"+groupID+\"/\"+deviceID \n message= {\"clientID\" : clientID, \"type\": message_type, \"body\": body}\n return json.dumps(message)", "def create_json_message(county, state, rank, timestamp, creator_id, message_id,\n sender_id, message):\n message_info = {\"county\": county,\n \"state\": state,\n \"rank\": rank,\n \"timestamp\": timestamp,\n \"creatorID\": creator_id,\n \"messageID\": message_id,\n \"senderID\": sender_id,\n \"message\": message}\n\n return json.dumps(message_info)", "def json_dumps(msg):\n return json.dumps(msg)", "def jsonify(topic, msg):\n return topic + ' ' + json.dumps({'message':msg})", "def __encodeMessage(self):\n data = {\n \"Inputs\": {\n \"input1\": {\n \"ColumnNames\": [\"Commit message\"],\n \"Values\": [[self.__msg]]\n },\n },\n \"GlobalParameters\": {}\n }\n return str.encode(json.dumps(data))", "def direct_messages_to_json(messages):\n s = '['\n for i in range(len(messages) - 1):\n s += json.dumps(messages[i]._json, ensure_ascii=False)\n s +=','\n s+= json.dumps(messages[-1]._json, ensure_ascii=False)\n s += ']'\n return s", "def _handle_json(self, msg: XStruct, channel: ServiceChannel) -> None:\n self.on_json(msg, channel)", "def json_rpc_format(self):\n\n error = {\n 'name': text_type(self.__class__.__name__),\n 'code': self.code,\n 'message': '{0}'.format(text_type(self.message)),\n 'data': self.data\n }\n\n if current_app.config['DEBUG']:\n import sys, traceback\n error['stack'] = traceback.format_exc()\n error['executable'] = sys.executable\n\n return error", "def msg(self, msg_type, content=None, parent=None):\n msg = {}\n msg['header'] = self.msg_header()\n msg['parent_header'] = {} if parent is None else extract_header(parent)\n msg['msg_type'] = msg_type\n msg['content'] = {} if content is None else content\n return msg", "def serialize(self, msg):\n msg_header = msg.get_header()\n data = msg.get_data()\n json_str_obj = jsonpickle.encode(\n {\n 'header': msg_header,\n 'data': data,\n })\n return json_str_obj.encode() # return a byte stream", "def value(self):\n return {'type': self.type, 'message': self.message}", "def presence_message():\n message = {\n 'action': 'presence',\n 'time': time.ctime(),\n 'type': 'status',\n 'user': {\n 'account_name': client,\n 'status': status,\n }\n }\n return json.dumps(message)", "def create_message(self):\n request = self.create_request()\n headers = self.create_header_str()\n data = self.body\n return \"%s%s\\r\\n%s\" % (request, headers, data)", "def _create_message_payload(ctx_data):\n member = ctx_data.get(\"member\")\n return {\n \"id\": ctx_data[\"id\"],\n \"channel_id\": ctx_data[\"channel_id\"],\n \"guild_id\": ctx_data[\"guild_id\"],\n \"member\": member,\n \"author\": member[\"user\"] if member else ctx_data.get(\"user\"),\n \"content\": \"\",\n \"tts\": False,\n \"mention_everyone\": False,\n \"mentions\": [],\n \"mention_roles\": [],\n \"mention_channels\": [],\n \"attachments\": [],\n \"embeds\": [],\n \"reactions\": [],\n \"pinned\": False,\n \"type\": 0,\n \"edited_timestamp\": None\n }", "def json(self):\n return {\n \"id\": self.id,\n \"string_id\": self.string_id,\n \"upvotes\": self.upvotes,\n \"downvotes\": self.downvotes,\n \"karma\": self.karma,\n \"created\": self.created,\n }", "def to_json(self):\n\n result = super(Webhook, self).to_json()\n result.update({\n 'name': self.name,\n 'url': self.url,\n 'topics': self.topics,\n 'httpBasicUsername': self.http_basic_username,\n 'headers': self.headers\n })\n\n if self.filters:\n result.update({'filters': self.filters})\n\n if self.transformation:\n result.update({'transformation': self.transformation})\n\n return result", "def _generate_json_response(self, context):\n raise NotImplementedError", "def __init__(self, message, config, logger):\n self.config = config\n self.logger = logger\n if type(message) is bytes or type(message) is str:\n try: \n self.json = simplejson.loads(message)\n if not self.json:\n raise JSONParseError(message, \"Failed to parse message as json\")\n except Exception as ex:\n #print(type(ex))\n #print(ex.args)\n raise\n elif type(message) is dict:\n self.json = message\n else:\n raise JSONParseError(message, \"message neither bytes, str, nor dict: \" + str(type(message)))", "def _reprJSON(self):\n return {'__Fgi__': self.__dict__}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
contains all mime types for HTTP request
def all_mime_types(): return { ".aac": "audio/aac", ".abw": "application/x-abiword", ".arc": "application/octet-stream", ".avi": "video/x-msvideo", ".azw": "application/vnd.amazon.ebook", ".bin": "application/octet-stream", ".bz": "application/x-bzip", ".bz2": "application/x-bzip2", ".csh": "application/x-csh", ".css": "text/css", ".csv": "text/csv", ".doc": "application/msword", ".docx": "application/vnd.openxmlformats-officedocument.\ wordprocessingml.document", ".eot": "application/vnd.ms-fontobject", ".epub": "application/epub+zip", ".gif": "image/gif", ".htm": ".htm", ".html": "text/html", ".ico": "image/x-icon", ".ics": "text/calendar", ".jar": "application/java-archive", ".jpeg": ".jpeg", ".jpg": "image/jpeg", ".js": "application/javascript", ".json": "application/json", ".mid": ".mid", ".midi": "audio/midi", ".mpeg": "video/mpeg", ".mpkg": "application/vnd.apple.installer+xml", ".odp": "application/vnd.oasis.opendocument.presentation", ".ods": "application/vnd.oasis.opendocument.spreadsheet", ".odt": "application/vnd.oasis.opendocument.text", ".oga": "audio/ogg", ".ogv": "video/ogg", ".ogx": "application/ogg", ".otf": "font/otf", ".png": "image/png", ".pdf": "application/pdf", ".ppt": "application/vnd.ms-powerpoint", ".pptx": "application/vnd.openxmlformats-officedocument.\ presentationml.presentation", ".rar": "application/x-rar-compressed", ".rtf": "application/rtf", ".sh": "application/x-sh", ".svg": "image/svg+xml", ".swf": "application/x-shockwave-flash", ".tar": "application/x-tar", ".tif": ".tif", ".tiff": "image/tiff", ".ts": "application/typescript", ".ttf": "font/ttf", ".vsd": "application/vnd.visio", ".wav": "audio/x-wav", ".weba": "audio/webm", ".webm": "video/webm", ".webp": "image/webp", ".woff": "font/woff", ".woff2": "font/woff2", ".xhtml": "application/xhtml+xml", ".xls": "application/vnd.ms-excel", ".xlsx": "application/vnd.openxmlformats-officedocument.\ spreadsheetml.sheet", ".xml": "application/xml", ".xul": "application/vnd.mozilla.xul+xml", ".zip": "application/zip", ".3gp": "video/3gpp", "audio/3gpp": "video", ".3g2": "video/3gpp2", "audio/3gpp2": "video", ".7z": "application/x-7z-compressed", ".pcap": "application/cap" }
[ "def mimetypes_for(self, m):\r\n if m not in self.methods:\r\n return []\r\n return self._string_to_list(self.methods[m]['mimetype'])", "def mime_types(self):\n return {attr.name: attr.mime_type for attr in self.attributes if attr.mime_type}", "def valid_content_types() -> List[str]:", "def get_accepted_content_types(request):\n def qualify(raw_content_type):\n parts = raw_content_type.split(';', 1)\n if len(parts) == 2:\n match = re.match(\n r'(^|;)q=(0(\\.\\d{,3})?|1(\\.0{,3})?)(;|$)',\n parts[1]\n )\n if match:\n return parts[0], float(match.group(2))\n return parts[0], 1\n\n raw_content_types = request.META.get('HTTP_ACCEPT', '*/*').split(',')\n qualified_content_types = map(qualify, raw_content_types)\n return (\n x[0] for x in sorted(\n qualified_content_types, key=lambda x: x[1], reverse=True\n )\n )", "def test_file_types(self):\n obj = self._request(\"test.html\")\n info = obj.info()\n t = info[\"Content-type\"]\n self.assertEqual(t, \"text/html\", \"Content type must be text/html\")\n obj = self._request(\"test.js\")\n info = obj.info()\n t = info[\"Content-type\"]\n self.assertEqual(t, \"application/javascript\",\n \"Content type must be application/javascript\")\n obj = self._request(\"test.css\")\n info = obj.info()\n t = info[\"Content-type\"]\n self.assertEqual(t, \"text/css\", \"Content type must be text/css\")\n obj = self._request(\"test.png\")\n info = obj.info()\n t = info[\"Content-type\"]\n self.assertEqual(t, \"image/png\", \"Content type must be image/png\")\n obj = self._request(\"test.jpg\")\n info = obj.info()\n t = info[\"Content-type\"]\n self.assertEqual(t, \"image/jpeg\", \"Content type must be image/jpeg\")", "def mimeTypes(self):\n list = QStringList()\n list << self.mimeType()\n return list", "def accepts(request, media_type):\n accept = parse_accept_header(request.META.get(\"HTTP_ACCEPT\", \"\"))\n return media_type in [t for (t, p, q) in accept]", "def match_mime_type(ext):\n return {\n '.txt': 'text/plain',\n '.png': 'image/png',\n '.pdf': 'application/pdf',\n '.php': 'application/x-httpd-php',\n '.svg': 'image/svg+xml',\n '.ttf': 'font/ttf',\n '.zip': 'application/zip',\n '.htm': 'text/html',\n '.html': 'text/html',\n '.gif': 'image/gif',\n '.js': 'text/javascript',\n '.json': 'application/json'\n }.get(ext, \"text/html\")", "def get_mimetypes_mapping(cls) -> typing.List[MimetypeMapping]:\n return []", "def get_content_type(request: Request) -> str:\n return request.content_type.split(\";\")[0].strip()", "def content_types(types):\n def decorator(func):\n def guard(self, *values, **kwargs):\n content_type = self.request.META.get('CONTENT_TYPE', \"application/octet-stream\")\n if content_type.split(';')[0].strip().lower() in types:\n return func(self, *values, **kwargs)\n return None\n return guard\n return decorator", "def ct_mimetype(content_type):\n return (content_type or '').split(';')[0].strip()", "def types_from_mime_string(cls, mime_string):\n parts = mime_string.split(cls.PARAM_DELIM, 1)[0].split(cls.TYPE_DELIM, 1)\n return parts[0], parts[1]", "def allow_content_types(self):\n return self.properties.get(\"AllowContentTypes\", None)", "def http_get_url_mime_type(url):\n \n content_type = None # Set default value\n logger = fmeobjects.FMELogFile() # Create a logger\n \n try:\n # Suppress warning\n requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)\n \n # Make a head request to get only the header (not the content)\n response = requests.head(url, timeout=TIMEOUT, verify=False)\n status_code = response.status_code\n text = \"HTTP call -- Status code: {0}; URL {1}\".format(status_code, url)\n logger.logMessageString(text, fmeobjects.FME_INFORM)\n \n headers = response.headers\n content_type = headers.get(\"content-type\") \n if content_type is None:\n # If content-type is empty try to read the data and check if it's an HTML document\n headers = {\"Range\": \"bytes=0-25\"} # Request a range if server can handle it (faster)\n request = requests.get(url,headers=headers, timeout=TIMEOUT, verify=False)\n text = request.text\n if '<!DOCTYPE html' in text[0:20]:\n content_type = \"text/html\"\n else:\n # Not an HTML document.\n pass\n\n except:\n # An error has occured nothing to do \n pass\n \n return content_type", "def is_mimetype(v):\n return rx_mimetype.match(v) is not None", "def require_mime(*mimes):\n @decorator\n def wrap(f, self, request, *args, **kwargs):\n m = Mimer(request)\n realmimes = set()\n\n rewrite = { 'json': 'application/json',\n 'yaml': 'application/x-yaml',\n 'xml': 'text/xml',\n 'pickle': 'application/python-pickle' }\n\n for idx, mime in enumerate(mimes):\n realmimes.add(rewrite.get(mime, mime))\n\n if not m.content_type() in realmimes:\n return rc.BAD_REQUEST\n\n return f(self, request, *args, **kwargs)\n return wrap", "def mime(self):\n return self._mime", "def mimetype(self, content_type):\n import webob\n\n if content_type in MIMETypes.aliases:\n content_type = MIMETypes.aliases[content_type]\n path = self.env['PATH_INFO']\n guess_from_url = mimetypes.guess_type(path)[0]\n possible_from_accept_header = None\n has_extension = False\n if len(path.split('/')) > 1:\n last_part = path.split('/')[-1]\n if '.' in last_part:\n has_extension = True\n if 'HTTP_ACCEPT' in self.env:\n possible_from_accept_header = webob.acceptparse.MIMEAccept('ACCEPT', \n self.env['HTTP_ACCEPT'])\n if has_extension == False:\n if possible_from_accept_header is None:\n return self._set_response_content_type(content_type)\n elif content_type in possible_from_accept_header:\n return self._set_response_content_type(content_type)\n else:\n return False\n if content_type == guess_from_url:\n # Guessed same mimetype\n return self._set_response_content_type(content_type)\n elif guess_from_url is None and content_type in possible_from_accept_header:\n return self._set_response_content_type(content_type)\n else:\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
find the root directory for web static files
def root_dir(): return os.path.join( os.path.join( os.path.dirname(os.path.dirname(__file__)), "web" ), "static" )
[ "def static_folder(self) -> str:\n return path.join(\"web\", \"static\")", "def static_dir():\n tests_dir = os.path.dirname(__file__)\n return os.path.join(tests_dir, 'static')", "def static_dir(self):\n return os.path.join('front', self.slug, 'static')", "def staticpath(self):\n return abspath('content/app-static', self.name)", "def generate_public_directory():\n copytree(static_directory, public_directory, dirs_exist_ok=True)", "def public_filesystem_location():\n return os.path.join(project_root_filesystem_location(), 'public')", "def get_static_dirs():\n package = pkg_resources.Requirement.parse (\"bqserver\")\n package_path = pkg_resources.resource_filename(package,'bq')\n return [(package_path, os.path.join(package_path, 'usage', 'public'))]", "def static():\n return python(\"from django.conf import settings;\"\n \"print(settings.STATIC_ROOT)\").split(\"\\n\")[-1]", "def _make_static_dir_path(cwd, static_dir):\n if not static_dir:\n return None\n\n static_dir_path = os.path.join(cwd, static_dir)\n if os.path.exists(static_dir_path):\n LOG.info(\"Mounting static files from %s at /\", static_dir_path)\n return static_dir_path\n\n return None", "def get_media_root():\r\n if getattr(settings, 'JINGO_MINIFY_USE_STATIC', True):\r\n return settings.STATIC_ROOT\r\n return settings.MEDIA_ROOT", "def setup_staticfiles():\n print yellow(stage_msg('Creating static files directories…'))\n with cd(env.config['directory']):\n run('mkdir -p public/{media,static}')", "def script_root(self):\n path = wsgi_decoding_dance(self.environ.get('SCRIPT_NAME') or '',\n self.charset, self.encoding_errors)\n return path.rstrip('/') + '/'", "def __root_directory__(config) :\n path_config = config.get('ContentPaths', {})\n return os.path.realpath(path_config.get('PService', os.path.join(os.environ['HOME'], '.toxaway')))", "def resolve_links(url, rel):\n url = url.replace('../', settings.STATIC_URL) if url.startswith('../') else url\n\n # First, try to resolve resources to STATIC_ROOT:\n url = url.replace(settings.STATIC_URL, 'portfolio/static/')\n path = os.path.join(settings.PROJECT_BASE, *url.split('/'))\n\n if not os.path.exists(path):\n # This is probably some user-uploaded media, so use MEDIA_ROOT:\n url = url.replace(settings.MEDIA_URL, '')\n path = os.path.join(settings.MEDIA_ROOT, *url.split('/'))\n\n return path", "def get_assets_dir(self):\n\t\treturn self._env.get_assets_dir()", "def base_directory():\n return os.path.dirname(os.path.realpath(__file__)) + os.path.sep", "def _find_java_web_context(self):\n globPath = os.path.join(self.workingDir, '**')\n results = glob.glob(globPath, recursive=True)\n webContextDir = None\n for r in results:\n if 'WEB-INF' in r:\n webContextDir = r\n if not webContextDir:\n return \"web/\"\n\n webContextDir = webContextDir.split('WEB-INF')[0].replace(self.workingDir, '').lstrip('/')\n\n return webContextDir", "def static_url_webpack(request):\n return {\"STATIC_URL_WEBPACK\": settings.STATIC_URL.removesuffix(\"/\")}", "def getDocDir():\n\treturn os.path.dirname(os.path.realpath(__file__)) + os.sep" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fix limit integer from user
def fix_limit(limit): if limit: try: if int(limit) > 10000: return 10000 return int(limit) except Exception: pass return 10
[ "def set_Limit(self, value):\n super(ListIncidentsInputSet, self)._set_input('Limit', value)", "def _render_limit(limit):\n if not limit:\n return ''\n\n return \"LIMIT %s\" % limit", "def get_number(prompt, error_prompt, limit_prompt, min_num=0 - float('inf'), max_num=float('inf'), valid_type='either'):\n valid_input = False\n number = None\n while not valid_input:\n try:\n number = input(prompt)\n if valid_type == 'int':\n number = int(number)\n else:\n try:\n number = int(number)\n except ValueError:\n number = float(number)\n\n if not min_num <= number <= max_num:\n print(limit_prompt)\n else:\n valid_input = True\n except ValueError:\n print(error_prompt)\n return number", "def max_prompt():\n while True:\n try:\n max = re.sub(\"[, ]\", \"\", input(\"\\nMax donation (Press enter for default value): \"))\n return round(float(9999999), 2) if not max else round(float(max), 2)\n break\n except ValueError:\n print(\"\\n>> Please enter a valid maximum value <<\")", "async def xplimit(self, ctx, *, limit = None):\r\n\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\t\t\t\r\n\t\tif limit == None:\r\n\t\t\t# print the current limit\r\n\t\t\tserver_lim = self.settings.getServerStat(ctx.guild, \"XPLimit\")\r\n\t\t\tif server_lim == None:\r\n\t\t\t\tawait ctx.send(\"There is no xp limit.\")\r\n\t\t\t\treturn\r\n\t\t\telse:\r\n\t\t\t\tawait ctx.send(\"The current xp limit is *{:,}*.\".format(server_lim))\r\n\r\n\t\ttry:\r\n\t\t\tlimit = int(limit)\r\n\t\texcept Exception:\r\n\t\t\treturn await ctx.send(\"Limit must be an integer.\")\r\n\r\n\t\tif limit < 0:\r\n\t\t\tself.settings.setServerStat(ctx.guild, \"XPLimit\", None)\r\n\t\t\tawait ctx.send(\"Xp limit removed!\")\r\n\t\telse:\r\n\t\t\tself.settings.setServerStat(ctx.guild, \"XPLimit\", limit)\r\n\t\t\tawait ctx.send(\"Xp limit set to *{:,}*.\".format(limit))", "def setLimit(self, time):\r\n\t\tself.limit = int(time)", "def controleChoix(texte, mini, maxi):\n while True:\n choix = input(texte)\n try:\n choix = int(choix)\n assert choix in range(mini, maxi+1)\n except ValueError:\n print(\"Vous devez saisir un nombre !\")\n except AssertionError:\n print(\"Vous devez saisir un nombre entre {} et {}\".format(mini, maxi))\n else:\n return choix", "def set_number_limit(self, lower_limit=None, upper_limit=None, action=\"ignore\", value_type=None):\n limiter=limit.NumberLimit(lower_limit=lower_limit,upper_limit=upper_limit,action=action,value_type=value_type)\n self.change_limiter(limiter)", "async def _server_limit(self, ctx: commands.Context, num_servers: int):\n if num_servers < 1:\n return await ctx.send(\"Please enter a number greater than 0!\")\n await self.config.limit.set(num_servers)\n return await ctx.tick()", "def ask_int(name, v_min, v_max):\n\n name = name.lower()\n cname = name.capitalize()\n while True:\n v = input('Enter {} ({} - {}): '.format(name, v_min, v_max))\n try:\n v_int = int(v)\n except ValueError:\n print('Invalid {} {}'.format(name, v))\n continue\n else:\n if v_int in range(v_min, v_max + 1):\n break\n else:\n print('{} {} out of range'.format(cname, v))\n continue\n return v_int", "def get_num():\n i = 0\n while (i > 127) or (i < 1):\n try:\n i = int(input(\"Enter ID # from 1-127: \"))\n except ValueError:\n pass\n return i", "def set_limit(context, site, limit, value):\n _set_limits(context, site, ((limit, value),))", "def int_or_none(x, limit):\n try:\n value = int(x)\n if 1 <= value <= limit:\n return value\n else:\n return None\n except ValueError:\n return None", "async def xpreservelimit(self, ctx, *, limit = None):\r\n\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\t\t\t\r\n\t\tif limit == None:\r\n\t\t\t# print the current limit\r\n\t\t\tserver_lim = self.settings.getServerStat(ctx.guild, \"XPReserveLimit\")\r\n\t\t\tif server_lim == None:\r\n\t\t\t\tawait ctx.send(\"There is no xp reserve limit.\")\r\n\t\t\t\treturn\r\n\t\t\telse:\r\n\t\t\t\tawait ctx.send(\"The current xp reserve limit is *{:,}*.\".format(server_lim))\r\n\r\n\t\ttry:\r\n\t\t\tlimit = int(limit)\r\n\t\texcept Exception:\r\n\t\t\treturn await ctx.send(\"Limit must be an integer.\")\r\n\r\n\t\tif limit < 0:\r\n\t\t\tself.settings.setServerStat(ctx.guild, \"XPReserveLimit\", None)\r\n\t\t\tawait ctx.send(\"Xp reserve limit removed!\")\r\n\t\telse:\r\n\t\t\tself.settings.setServerStat(ctx.guild, \"XPReserveLimit\", limit)\r\n\t\t\tawait ctx.send(\"Xp reserve limit set to *{:,}*.\".format(limit))", "def __randomInt(self, limit: int) -> int:\n\t\treturn random.randrange(limit)", "def specify_turns_limit():\n try:\n usr_turns_choice = int(input(\"Specify turns limit (10-30), or leave blank to set no limit: \"))\n \n while usr_turns_choice > 30 or usr_turns_choice < 3:\n print(\"Incorrect choice. Choose from 10-30\")\n usr_turns_choice = int(input(\"Try again: \"))\n \n return usr_turns_choice\n\n except ValueError:\n return None", "def findNumber(maxVal):", "def _calculate_limit(self, default_limit, max_limit):\n if self._limit is None:\n return default_limit\n\n return min(self._limit, max_limit)", "def _check_max_num(self, obj):\n\n if obj.max_num is None:\n return []\n elif not isinstance(obj.max_num, int):\n return must_be(\"an integer\", option=\"max_num\", obj=obj, id=\"admin.E204\")\n else:\n return []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fix skip integer from user
def fix_skip(skip): if skip: try: return int(skip) except Exception: pass return 0
[ "def get_num():\n i = 0\n while (i > 127) or (i < 1):\n try:\n i = int(input(\"Enter ID # from 1-127: \"))\n except ValueError:\n pass\n return i", "def update_ignore(arg):\n args = arg.split(' ', 1)\n bp, err = breakpoint_by_number(args[0])\n if bp:\n try:\n count = int(args[1].strip())\n except:\n err = 'Error, please enter: ignore <bpnumber> <count>'\n else:\n bp.ignore = count\n if count > 0:\n reply = 'Will ignore next '\n if count > 1:\n reply = reply + '%d crossings' % count\n else:\n reply = reply + '1 crossing'\n print reply + ' of breakpoint %d.' % bp.number\n else:\n print 'Will stop next time breakpoint',\n print bp.number, 'is reached.'\n return\n print '***', err", "def _needs_number(self, user_input):\n while not user_input.isdigit():\n user_input = input(\"You need to enter a number \")\n return int(user_input)", "def input_position(board):\n pos = 0\n while pos not in list(range(1, 10)) or not valid_move(board, pos):\n pos = int(input(f'Enter the index to put your marker[1-9]:\\t'))\n return pos", "def skip(sequence, number):\n cnt = 0\n for item in sequence:\n if cnt >= number:\n yield item\n else:\n cnt += 1", "def readNonNegativeInteger(prompt, error_prompt):\n n = -1\n try:\n n = int(input(prompt))\n except ValueError:\n n = -1\n if n < 0:\n # User entered an invalid value for n. Display error and ask them again\n print(error_prompt)\n n = readNonNegativeInteger(prompt, error_prompt)\n return n", "def _should_skip_number_elem(data, elem):\n number_system = elem.get('numberSystem', 'latn')\n\n if number_system != 'latn':\n data['unsupported_number_systems'].add(number_system)\n return True\n\n return False", "def select_players() -> int:\n while True:\n try:\n num_players: int = int(input(\"Enter number of players (1-4): \"))\n except ValueError:\n print(\"That is not a number between 1-4. Try again.\")\n continue\n else:\n if 0 < num_players < 5:\n return num_players\n break\n else:\n print(\"That is not a number between 1-4. Try again.\")\n continue", "def ask_numbers():", "def get_numeric_input(end: int, start: int | None = None) -> int:\n if start is None:\n start = 0\n else:\n start, end = end, start\n\n choice_ind = -1\n while choice_ind < start or end <= choice_ind:\n user_input = \"\"\n while not user_input.isdigit():\n user_input = input(f\"Enter a number from {start} - {end - 1}: \")\n choice_ind = int(user_input)\n return choice_ind", "def drop(drops):\n if not drops.isdigit():\n raise ValueError(\n 'Value must be a positive integer: {drops}'.format(\n drops=drops,\n )\n )\n # Python will automatically cast to a long integer for any x such\n # that x < -sys.maxint-1 or x > sys.maxint\n drops = int(drops)\n return drops", "def skip():\n streak = request.form.get(\"streak-break\")\n return redirect(url_for(\"index\", streak=streak))", "def test_skip(self):\n actual = self.view001(skip=10)['rows']\n expected = [{'key': 'julia{0:03d}'.format(x),\n 'id': 'julia{0:03d}'.format(x),\n 'value': 1} for x in range(10, 100)]\n self.assertEqual(actual, expected)", "def keep_callback(files):\n for i in range(len(files)):\n print(\"{i}: {fn}\".format(i=i, fn=files[i].abspath))\n idx = raw_input(\"Index to keep: \")\n idxre = re.compile('[0-9]+')\n if not idxre.match(idx):\n raise ValueError(\"Expected an integer number\")\n idx = int(idx)\n assert idx < len(files), \"Index out of range\"\n return idx", "def pedirNum():\n\n numeroPedido = \"\"\n while validarNum(numeroPedido, 1) == False:\n numeroPedido = str(input(\"Ingrese un numero de 4 cifras distintas: \"))\n return numeroPedido", "def skipNudge(self):\n b = self.getNext()\n bb = self.getNext()\n if (b == 128) and (bb == 128):\n self.ptr = self.ptr + 4", "def pick_number():\n print ()\n print (str(name) + \", think of a number between \" + str(low) + \" and \" + str(high) + \".\")\n input (\"Press ENTER to continue\")\n print()\n print ((high + low) // 2)", "def skipper_func(*args, **kwargs):\n if skip_val():\n raise nose.SkipTest(get_msg(f,msg))\n else:\n return f(*args, **kwargs)", "def main():\r\n found = False\r\n try_num = 20\r\n\r\n while not found:\r\n\r\n if not chk_divisible(try_num):\r\n try_num += 1\r\n continue\r\n\r\n else:\r\n print(try_num)\r\n found = True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Insert a separator between menu items.
def add_menu_separator(self): if self._menu is None: self._create_menu() self._menu.addSeparator()
[ "def separator(self, menu):\n return menu.AppendSeparator()", "def addSeparator(self, *args) -> \"adsk::core::Ptr< adsk::core::ListItem >\" :\n return _core.ListItems_addSeparator(self, *args)", "def test_menu_separator(self):\n # Separators at the head and tail are ignored\n self.assertEqual(\n '''\n Test entry one\n - - - -\n Test entry two''',\n '\\n' + str(menu(\n separator(),\n separator(),\n item('Test entry one', None),\n separator(),\n item('Test entry hidden', None, visible=False),\n separator(),\n item('Test entry two', None),\n separator(),\n separator())))", "def addSeparator(self, id : 'std::string const &'=\"\", positionID : 'std::string const &'=\"\", isBefore : 'bool'=True) -> \"adsk::core::Ptr< adsk::core::SeparatorControl >\" :\n return _core.ToolbarControls_addSeparator(self, id, positionID, isBefore)", "def DrawSeparator(*args, **kwargs):\n return _aui.AuiToolBarArt_DrawSeparator(*args, **kwargs)", "def InsertCellSeparator(self):\n #dlg = wx.TextEntryDialog(self, 'Enter code cell separator label text:',\n # 'Insert code cell separator', '')\n dlg = CellDialog(self, -1)\n\n if dlg.ShowModal() == wx.ID_OK:\n label = dlg.GetValue()\n\n #If not at the start of a line add \\n\n pos = self.GetCurrentPos()\n indent = self.GetColumn(pos)\n if indent!=0:\n self.InsertText(pos,'\\n')\n self.SetCurrentPos(pos+1)\n\n #add the separator\n pos = self.GetCurrentPos()\n line = self.LineFromPosition(pos)\n pos = self.PositionFromLine(line)\n self.InsertText(pos,label)\n\n #move to end of separator\n self.SetCurrentPos(pos+len(label))\n self.SetAnchor(pos+len(label))\n\n dlg.Destroy()", "def __writeSeparator(self, indent):\n self.__dev.write(\" \" * indent)\n self.__dev.write(\"<HR>\\n\")", "def add_divider(self):\n self.page += '<hr style=\"clear:both;\"/>\\n'", "def getSeparator(self) -> str:\n ...", "def set_separator(self) -> None:\n self.separator = len(self.lines)", "def show_sep(self, symbol=\"-\", qt=30):\n print(\"-\" * qt)", "def item_join(items, sep=' '):\n return sep.join(items).strip()", "def initMenus(self):\n menu_items = eval(file_io.load_config(MENU_FILE))\n menubar = self.menuBar()\n\n for menu in menu_items:\n newMenu = menubar.addMenu(menu[0])\n for action in menu[1]:\n if action[\"name\"] == \"sep\":\n newMenu.addSeparator()\n continue\n newAction = QtGui.QAction(action[\"name\"], self)\n newAction.setShortcut(action[\"shortcut\"])\n newAction.setStatusTip(action[\"tip\"])\n newAction.triggered.connect(action[\"cb\"])\n newMenu.addAction(newAction)", "def _add_menu_items(self):\r\n self.mfile.AppendItem(self.mf_close)\r\n self.mfile.AppendItem(self.mf_exit)\r\n\r\n self.medit.AppendItem(self.me_redraw)\r\n self.medit.AppendItem(self.me_pref)\r\n self.medit.AppendSeparator()\r\n self.medit.AppendItem(self.me_run)\r\n\r\n self.mview.AppendItem(self.mv_zoomfit)\r\n self.mview.AppendSeparator()\r\n\r\n self.mopts.AppendItem(self.mo_limits)\r\n self.mopts.AppendItem(self.mo_emails)", "def getSeparator(self):\r\n return '/'", "def getSeparator(self):\n return '/'", "def itemlist(item, sep):\n return condense(item + ZeroOrMore(addspace(sep + item)) + Optional(sep))", "def separator(parser=comma):\n\n return lexeme(parser)", "def _add_separators(self):\n self.qr[7, :8] = SEPARATOR\n self.qr[:8, 7] = SEPARATOR\n self.qr[-8, :8] = SEPARATOR\n self.qr[-8:, 7] = SEPARATOR\n self.qr[:8, -8] = SEPARATOR\n self.qr[7, -8:] = SEPARATOR" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The function to be called for a mouse rightclick event.
def set_right_click(self, fcn): self.customContextMenuRequested.connect(fcn)
[ "def right_click(self):\n pass", "def right_click(self):\n self.node.right_click()", "def OnRightEvent(self, event):\n self.click = 'RIGHT'\n self.ProcessClick(event)", "def on_right_click(self, event):\n\n element, (x, y) = event\n parent = self.tree_viewer.control\n self.context_menu.create_menu(parent).show(x, y)", "def OnRightDown(self, evt):\n pass", "def OnRightDEvent(self, event):\n self.click = 'DRIGHT'\n self.ProcessClick(event)", "def on_right_click_event(self,treeview, event):\n\t\t\n\t\tif event.button == 3:\n\t\t\t\n\t\t\tselection = treeview.get_selection()\n\t\t\t\n\t\t\tif selection == None:\n\t\t\t\treturn False\n\t\t\t\n\t\t\tmodel, treeiter = selection.get_selected()\n\t\t\t\n\t\t\tself.popup_menu.get_menu.popup(None, None, None, None, event.button, event.time)\n\t\t\t\n\t\t\treturn True", "def show_right_click_menu(self, e):\n self.PopupMenu(self.make_menu())", "def process_right_press(self):\n self.select_entry(callback_number=2)", "def right_click_input(self, coords = (None, None)):\n self.click_input(button='right', coords=coords)", "def right_click_testing(self):\n\n if \"contextmenu\" in str(self.html).lower():\n self.rightClickWeight = 1\n return\n\n self.rightClickWeight = 0", "def Right_Click(event,mouse_socket):\n mouse_socket.sendto('*R',(IP, MOUSE_PORT))\n return True", "def on_right_click(self, event):\n self.mode = 'hex' if self.mode == 'rgb' else 'rgb'\n self.on_motion()", "def OnRightDown(self, event):\n\n click_posn = event.GetPositionTuple() if WX3 else event.GetPosition()\n\n if event.ShiftDown():\n self.is_box_select = True\n self.SetCursor(wx.StockCursor(wx.CURSOR_CROSS))\n (self.sbox_w, self.sbox_h) = (0, 0)\n (self.sbox_1_x, self.sbox_1_y) = click_posn\n event.Skip()", "def right_click(self, event):\n self.remove_rectangle(-1)", "def test_right_click_input(self):\n self.button.right_click_input()\n self.assertEqual(self.label.window_text(), \"RightClick\")\n\n # def test_press_move_release(self):\n # pass", "def event(mouse_event):\n pass", "def mouse_double_clicked(self, x, y, modifiers):\n return False", "def left_click(self):\n self.node.left_click()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add an column of value one to the right of `array`.
def _right_extend(array): ones = np.ones((array.shape[0], 1), dtype=array.dtype) return np.concatenate((array, ones), axis=1)
[ "def add_array_to_col( self, row, col, inarr, attrs=None ):\n\n arlen = len( inarr )\n\n for i in range( arlen ):\n self.setCellcontents( row+i, col, inarr[i], attrs )", "def add_column(x, fill_value=1):\n shape = list(x.shape)\n shape[-1] = 1\n values = np.ones(shape)\n if fill_value != 1:\n \n values *= fill_value\n \n x2 = np.append(x, values, axis=-1)\n \n return x2", "def pad_array(matrix, value):\n for row in matrix:\n row.append(value)\n return matrix", "def _scalar_add(self, array, value):\n return [x + value for x in array]", "def add_first_value_to_end(data):\n new_data = np.append(data, data[0:1, :], axis=0)\n\n return new_data", "def append_column(x, col):\n for i, element in enumerate(col):\n if type(x[i]) == int:\n x[i] = [x[i]]\n x[i].append(element)\n\n return x", "def add_array_to_row( self, row, col, inarr, attrs=None ):\n \n arlen = len( inarr )\n\n\n for i in range( arlen ):\n self.setCellcontents( row, col+i, inarr[i], attrs )", "def oneincol(ar, col_id):\n\n tar = ar[:]\n tar[:,col_id] = numpy.ones(len(ar[:,col_id]))\n return tar", "def add_ones_column_to_matrix(mat):\n\tshape = list(mat.shape)\n\tshape[1] += 1\n\tres = np.ones(shape)\n\tres[:, 1:] = mat\n\treturn res", "def put_2Darray(fname, array, header='', fmt='', append='no'):\n lista = []\n for i in range(array.shape[1]):\n lista.append(array[:, i])\n lista = tuple(lista)\n put_data(fname, lista, header, fmt, append)", "def test_col_to_arr_plus_one_copy():\n sample = np.asarray(\n np.random.normal(size=(20, 1)),\n dtype=np.float,\n order='F'\n )\n mat = carma.col_to_arr_plus_one(sample, True)\n assert np.allclose(mat, sample + 1)", "def add_constant_column(df, column, value):\n length = len(df)\n new_column = pd.DataFrame({column: np.ones(length)*value})\n df = df.join(new_column)\n return df", "def extend_array(array, max_row):\n\n shape = array.shape\n diff = max_row - shape[0]\n\n if diff != 0:\n new_array = np.row_stack([array, np.full((diff, shape[1]), -1, dtype=np.int)])\n else:\n new_array = array\n\n return(new_array)", "def reindex_from_one(array: np.array) -> pd.DataFrame:\n df = pd.DataFrame(array)\n df.columns = df.columns + 1\n df.index = df.index + 1\n return df", "def test_col_to_arr_plus_one():\n sample = np.asarray(\n np.random.normal(size=(20, 1)),\n dtype=np.float,\n order='F'\n )\n mat = carma.col_to_arr_plus_one(sample, False)\n assert np.allclose(mat, sample + 1)", "def append_to_var(value, array):\n if type(array) is list:\n array.append(value)\n elif array == \"_all\" or array == None:\n array = value\n elif array != value:\n array = [array, value]\n return array", "def extend_array(array):\n for row in array:\n while len(row) < 6:\n row.append('')\n while len(array) < 4:\n array.append(['', '', '', '', '', ''])\n return array", "def add(array1, array2):\n (height1, width1) = array1.shape\n (height2, width2) = array2.shape\n \n height = max(height1,height2)\n width = max(width1, width2)\n newArray = np.zeros((height,width))\n \n newArray[0:height1,0:width1]+=array1\n newArray[0:height2,0:width2]+=array2\n \n return newArray.copy()", "def insert_ones(X): \n # This step allows X to be a one-dimensional array. \n X = array_to_ndarray(X)\n num_rows = X.shape[0]\n return np.hstack((np.ones((num_rows, 1)), X))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Changes tempo and gain of the recording with sox and loads it.
def augment_audio_with_sox(path, sample_rate, tempo, gain): with NamedTemporaryFile(suffix=".wav") as augmented_file: augmented_filename = augmented_file.name sox_augment_params = ["tempo", "{:.3f}".format(tempo), "gain", "{:.3f}".format(gain)] sox_params = "sox \"{}\" -r {} -c 1 -b 16 -e si {} {} >/dev/null 2>&1".format(path, sample_rate, augmented_filename, " ".join(sox_augment_params)) os.system(sox_params) y,_ = load_audio(augmented_filename) return y
[ "def detect_tempo(self):\n\n if (self.song.sr is None) or (self.song.waveform is None):\n raise ValueError(\"No song was loaded.\")\n\n # Detect tempo\n tempo, beat_frames = librosa.beat.beat_track(\n y=self.song.mono_waveform, sr=self.song.sr, tightness=100\n )\n\n self.tempo = adjust_tempo(tempo)\n self.beat_frames = beat_frames", "def samplefreq8k(origwav):\n\n # copy orig to temp\n tempWavFile = getTempFile(\"wav\")\n shutil.copy(origwav, tempWavFile)\n\n # convert temp to be 8kz, and overwrite orig\n cmd = \"sox %s -r 8000 %s\" % (tempWavFile, origwav)\n runCommand(cmd)\n\n # delete temp\n os.remove(tempWavFile)\n \n return origwav", "def set_sfx(self):\n value = self.sfx_slider.value\n set_sfx_volume(value)", "def set_record(self, record):\r\n record = int(record)\r\n now = datetime.datetime.utcnow()\r\n self.obs.writecount = 0\r\n strnow = now.isoformat()\r\n parts = strnow.split('.')\r\n strnow = parts[0]\r\n if record == radioastronomy.INTWAIT:\r\n print(\"Stop Averaging : %s \" % ( strnow))\r\n self.stoputc = now\r\n # only restart averaging if not in averaging state\r\n elif self.record == radioastronomy.INTWAIT:\r\n print(\"Start Averaging : %s \" % ( strnow))\r\n self.startutc = now\r\n self.record = int(record)", "def setAudio(self, audio, mode):\n\t\tpass", "def set_sound_timer(self, register):\n self.ST = self.V[register]", "def get_signal(fname):\n data, s_rate = librosa.load(fname)\n return data, s_rate", "def songtiming(filename, filetype = '.cbin', evtaf = 'Amp'):\n\t\n\tif evtaf = 'Amp':", "def play(self, mapping):\n # TO DO: Generator should know samplerate and audbuff\n # TO DO: split this into common and generator-specific functions to minimise code duplication\n samprate = self.samprate\n audbuff = self.audbuff\n\n params = copy.deepcopy(self.preset)\n utils.linear_to_nested_dict_reassign(mapping, params)\n # for p in self.preset.keys():\n # if p not in mapping:\n # mapping[p] = self.preset[p]\n\n # sample to use\n samplefunc = self.samples[params['note']]\n \n # note length\n if params['note_length'] == 'sample':\n nlength = self.samplens[params['note']]\n params['note_length'] = nlength/samprate\n else:\n nlength = (params['note_length']+params['volume_envelope']['R'])*samprate\n\n # generator stream (TO DO: attribute of stream?)\n sstream = stream.Stream(nlength/samprate, samprate)\n sstream.get_sampfracs()\n samples = sstream.samples.astype(float)\n\n pindex = np.zeros(samples.size)\n if callable(params['pitch_shift']):\n pindex += params['pitch_shift'](sstream.sampfracs)/12.\n elif params['pitch_shift'] != 0:\n pindex += params['pitch_shift']/12.\n if params['pitch_lfo']['use']:\n pindex += self.lfo(samples, sstream.sampfracs, params, 'pitch')/12.\n if np.any(pindex):\n samples = np.cumsum(pow(2., pindex))\n \n # if callable(params['pitch_shift']):\n # pshift = np.cumsum(params['pitch_shift'](sstream.sampfracs))\n # samples *= pow(2., pshift/12.)\n # else:\n # samples *= pow(2., params['pitch_shift']/12.)\n \n # sample looping if specified\n if params['looping'] != 'off':\n startsamp = params['loop_start']*samprate\n endsamp = params['loop_end']*samprate\n\n # find clean loop points within an audible (< 20Hz) cycle\n startsamp += np.argmin(samplefunc(np.arange(audbuff) + startsamp))\n endsamp += np.argmin(samplefunc(np.arange(audbuff) + endsamp))\n\n if params['looping'] == 'forwardback':\n samples = forward_back_loopsamp(samples,#sstream.samples,\n startsamp,\n endsamp)\n elif params['looping'] == 'forward':\n samples = forward_loopsamp(samples,#sstream.samples,\n startsamp,\n endsamp)\n \n \n # generate stream values\n values = samplefunc(samples)\n\n # get volume envelope\n env = self.envelope(sstream.samples, params)\n if params['volume_lfo']['use']:\n env *= np.clip(1.-self.lfo(sstream.samples, sstream.sampfracs,\n params, 'volume')*0.5, 0, 1)\n # apply volume normalisation or modulation (TO DO: envelope, pre or post filter?)\n sstream.values = values * env * utils.const_or_evo(params['volume'], sstream.sampfracs)\n \n # TO DO: filter envelope (specify as a cutoff array function? or filter twice?)\n\n # filter stream\n if params['filter'] == \"on\":\n if hasattr(params['cutoff'], \"__iter__\"):\n # if static cutoff, use minimum buffer count\n sstream.bufferize(sstream.length/4)\n else:\n # 30 ms buffer (hardcoded for now)\n sstream.bufferize(0.03)\n sstream.filt_sweep(getattr(filters, params['filter_type']),\n utils.const_or_evo_func(params['cutoff']))\n return sstream", "def __init__(self, sampling_rate: int = 44100, volume: float = 0.5):\n # Set the volume to play at\n self.volume = volume\n # Set the sampling rate for this NotePlayer....\n if(sampling_rate > 0):\n self._SAMPLING_RATE = int(sampling_rate)\n else:\n raise ValueError(\"Sampling rate must be greater then 0!!!\")\n # Set the current time into the track to 0...\n self._time = 0\n\n # Create required pyaudio objects...\n self._py_audio = pyaudio.PyAudio()\n self._output_stream = self._py_audio.open(\n format=pyaudio.paFloat32,\n channels=1,\n rate = int(self._SAMPLING_RATE),\n output=True\n )", "def procelemdata(self, event):\n dlg = wx.FileDialog(self, \"Choose a file\", \"\", \"\", \"*.s7k\", wx.OPEN)\n if dlg.ShowModal() == wx.ID_OK:\n self.rawfile = dlg.GetFilename()\n dirname = dlg.GetDirectory()\n havefile = True\n else: havefile = False\n dlg.Destroy()\n if havefile:\n r = prr.x7kRead(os.path.join(dirname, self.rawfile))\n r.mapfile()\n print \"mapping complete. Processing data.\"\n # make sure there are 7038 records in the file\n if r.map.packdir.has_key('7038'):\n r.getrecord(7000, 0)\n frequency = r.packet.subpack.header[3]\n samplerate = r.packet.subpack.header[4]\n r.getrecord(7038, 0)\n # assuming the same number of samples throughout the file\n maxsamples = r.packet.subpack.header[4]\n numelements = r.packet.subpack.numelements\n # initialize stuff\n gainlist = {}\n mags = {}\n num7038 = len(r.map.packdir['7038'])\n dir7000 = np.asarray(r.map.packdir['7000'])\n # get the number of pings at each gain setting\n for pingnum in range(num7038):\n tstamp = r.map.packdir['7038'][pingnum][1]\n idx = np.nonzero(dir7000[:,1] == tstamp)[0]\n if len(idx) == 1:\n r.getrecord(7000, idx[0])\n gain = str(r.packet.subpack.header[15])\n if gainlist.has_key(gain):\n gainlist[gain].append(pingnum)\n else:\n gainlist[gain] = [pingnum]\n # inialize arrays for all gain settings\n for gain in gainlist:\n num = len(gainlist[gain])\n mags[gain] = np.zeros((maxsamples * num, numelements))\n # get data from all pings\n pingcount = 0\n for gain in gainlist:\n pointer = 0\n for n,pingnum in enumerate(gainlist[gain]):\n try:\n\t\t\t\t\t\t\tr.getrecord(7038,pingnum)\n\t\t\t\t\t\t\tnumsamples = r.packet.subpack.numsamples\n\t\t\t\t\t\t\tpingcount +=1\n\t\t\t\t\t\t\tcomplete = str(int(100.0 * pingcount / num7038))\n\t\t\t\t\t\t\tb = (len(complete) + 2) * '\\b'\n\t\t\t\t\t\t\tprint b + complete + '%',\n\t\t\t\t\t\t\tend = pointer + numsamples\n\t\t\t\t\t\t\tmag = np.abs(r.packet.subpack.r.reshape(-1,numelements))\n\t\t\t\t\t\t\tmags[gain][pointer:end, :] = mag\n except:\n mags[gain][pointer:end, :] = np.nan\n pointer += numsamples\n mags[gain] = mags[gain][:pointer, :]\n print '\\n',\n # reusing a variable name, sorry. I'm not very creative.\n gainlist = [float(g) for g in mags.keys()]\n gainlist.sort()\n aveMag = np.zeros((len(gainlist), numelements))\n targetplotgain = 40 # the closest gain to this value is plotted\n lastval = 100 # just picked a large value...\n for idx, gain in enumerate(gainlist):\n g_amp = mags[str(gain)]\n #FFT by Sam Greenaway\n #one side fft of magnitude, treat each element independently\n C = np.average(g_amp, axis=0)\t\n #Tile average to remove average mag value before fft \n D = np.tile(C,(len(g_amp),1))\n W = np.tile(np.hanning(len(g_amp)),(numelements,1)).T\n aveMag[idx,:] = np.average(g_amp, axis = 0)\n testval = np.abs(gain - targetplotgain)\n if testval < lastval:\n lastval = testval\n A = (8/3)*(2/(samplerate*len(g_amp)))*np.abs(np.fft.rfft(np.multiply(W,(g_amp-D)), axis=0))**2\n midg_amp = g_amp\n midgain = str(gain)\n #average PSD - equivalent to ensemble avergaing across elements\n aA = np.average(A, axis=1)\n # the frequencies\n fn1S = np.linspace(0,samplerate/2,np.size(aA))\n # get rid of some warnings...\n idx = np.nonzero(midg_amp == 0)\n midg_amp[idx[0],idx[1]] = 1\n idx = np.nonzero(aveMag == 0)\n aveMag[idx[0],idx[1]] = 1\n # Plotting also by Sam... mostly\n f=plt.figure(figsize = (15,10))\n f.suptitle(self.rawfile)\n plt.subplot(2,2,1)\n plt.imshow(20*np.log10(midg_amp), aspect = 'auto')\n plt.title('Amplitude, ' + midgain + 'dB gain')\n plt.xlabel('element')\n plt.ylabel('sample')\n plt.colorbar()\n\n plt.subplot(2,2,2)\n plt.plot(gainlist,20*np.log10(aveMag))\n plt.title('Average Amplitude by Gain')\n plt.xlabel('gain')\n plt.ylabel('dB re 7125 Units')\n plt.grid()\n\n ax = plt.subplot(2,2,3)\n linelist = ax.plot(20*np.log10(aveMag).T)\n ax.set_title('Average Element Amplitude by Gain')\n ax.set_xlabel('element')\n ax.set_ylabel('dB re 7125 Units')\n ax.set_xlim([0, numelements])\n ax.grid()\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width*0.8, box.height])\n ax.legend(linelist, gainlist, loc = 'center left', bbox_to_anchor=(1,0.5))\n\n plt.subplot(2,2,4)\n plt.plot(fn1S,20*np.log10(aA))\n plt.title('One Sided PSD, '+ midgain +'dB Gain, ensemble averaged across elements')\n plt.xlabel('Hz')\n plt.ylabel('dB re 7125 Units/ Hz')\n plt.grid()\n\n plt.draw()\n print \"Thanks Sam.\"\n else:\n print 'No 7038 data found. Make sure the latest Reson Feature Pack is installed.'", "def do_save(self, arg):\n\t\tself.curr_song.export(arg, \"wav\")", "def save(self, fn, notes, convvolts, new_conf):\n if hasattr(self,'conf') and self.conf:\n use_conf=self.conf\n else:\n # configuration never done, probably because no data recorded yet\n use_conf=new_conf\n \n eol = '\\r\\n' if sys.platform=='win32' else '\\n'\n \n scale = self.board.power_voltage / 65536.\n with codecs.open(fn, 'w', 'utf-8') as f:\n f.write('# PteroDAQ recording{}'.format(eol))\n f.write('# saved at {0:%Y %b %d %H:%M:%S}{1}'.format(datetime.now(),eol))\n if len(self.board.names)>1:\n f.write('# board is one of {0}{1}'.format( \", \".join(self.board.names),eol))\n else:\n f.write('# board is {0}{1}'.format(self.board.names[0],eol))\n if isinstance(use_conf[0], TriggerTimed):\n f.write('# Recording every {0} sec ({1} Hz){2}'.format(use_conf[0].period, 1./use_conf[0].period,eol))\n elif isinstance(use_conf[0], TriggerPinchange):\n f.write('# Recording when {0} {1}{2}'.format(use_conf[0].pin, use_conf[0].sense,eol))\n f.write('# Analog reference is {0}{1}'.format(use_conf[1],eol))\n if use_conf[2] != 1:\n f.write('# Averaging {0} readings together{1}'.format(use_conf[2],eol))\n f.write('# Power supply is {0:.4f} volts'.format(self.board.power_voltage,eol))\n if convvolts:\n f.write('# Scale: 0 to {0:.4f} volts{1}'.format(self.board.power_voltage,eol))\n else:\n f.write('# Scale: 0 to 65535{0}'.format(eol))\n f.write('# Notes:{}'.format(eol))\n for ln in notes.split('\\n'):\n f.write('# {0}{1}'.format(ln,eol))\n x0 = len(self._data)\n f.write('# {0} samples{1}'.format(x0,eol))\n\n f.write('# Recording channels:{}'.format(eol))\n f.write('# timestamp (in seconds){}'.format(eol))\n # Use passed-in configuration for names, rather than the ones saved\n # but use saved for probes and downsampling\n # Note that channels is the last field of the configuration tuple.\n for chan_num,(ch_name,ch_probe) in enumerate(zip(new_conf[-1],use_conf[-1])):\n downsample = ch_probe.interpretation.downsample\n if downsample>1:\n f.write('# {0} : {1} downsample by {2}\\t'.format(ch_name.name, \n self.board.name_from_probe[ch_probe.probe],\n downsample))\n else:\n f.write('# {0} : {1}\\t'.format(ch_name.name, \n self.board.name_from_probe[ch_probe.probe]))\n if x0:\n x1 = sum(d[chan_num+1] for d in self._data)\n x2 = sum(d[chan_num+1]**2 for d in self._data)\n mean = x1/x0\n m2 = max(x2/x0-mean**2, 0)\n if convvolts:\n ch=self.channels[chan_num]\n f.write(\" DC= {0:.7g} RMS= {1:.7g}{2}\".format(\n ch.volts(mean,self.board.power_voltage), \n ch.volts(sqrt(m2),self.board.power_voltage),\n eol\n ))\n else:\n f.write(\" DC= {0:.7g} RMS= {1:.7g}{2}\".format(\n \tmean, sqrt(m2),eol))\n else:\n f.write(eol)\n old_time=0\n time_offset=None\n for d in self._data:\n time=d[0]\n if time_offset==None:\n time_offset=time\n if time<old_time:\n time_offset=time\n f.write(eol) # blank line if back in time\n old_time=time\n \n f.write('{0:.7f}'.format(time-time_offset)) # timestamp\n for n, x in enumerate(d[1:]):\n ch = self.channels[n]\n f.write('\\t')\n if convvolts and ch.interpretation.is_analog:\n f.write(\"{0:.6f}\".format(ch.volts(x,self.board.power_voltage)))\n elif ch.interpretation.is_frequency:\n \tf.write(\"{0:.6f}\".format(x))\n else:\n f.write(str(int(x)))\n f.write(eol)\n self.num_saved = len(self._data)", "def change_pyttsx3_rate(self, new_rate: int):\r\n self.pyttsx3_rate = new_rate", "def setup(self,averaging = 10,reference = -50):\n self._fsp.storeConfig(\"IQCalibration\")\n self._awg.saveSetup(\"iq_calibration.awg\")\n self._mwgState = self._mwg.saveState(\"iq calibration\")\n self._fsp.write(\"SENSE1:FREQUENCY:SPAN 0 MHz\")\n self.period = 20000#int(1.0/self._awg.repetitionRate()*1e9)\n self._fsp.write(\"SWE:TIME 20 ms\")\n self._rbw = 10000\n self._fsp.write(\"SENSE1:BAND:RES %f Hz\" % self._rbw)\n self._fsp.write(\"SENSE1:BAND:VIDEO AUTO\")\n self._fsp.write(\"TRIG:SOURCE EXT\")\n self._fsp.write(\"TRIG:HOLDOFF 0.02 s\")\n self._fsp.write(\"TRIG:LEVEL 0.5 V\")\n self._fsp.write(\"TRIG:SLOP POS\")\n self._fsp.write(\"SENSE1:AVERAGE:COUNT %d\" % averaging)\n self._fsp.write(\"SENSE1:AVERAGE:STAT1 ON\")\n self._fsp.write(\"DISP:TRACE1:Y:RLEVEL %f\" % reference)\n self.setupWaveforms()", "def increase_volume(self):\n if self.is_playing:\n self.volume = self.volume / 0.8 + 0.008", "def _set_samplerate(self, samplerate):\r\n raise NotImplementedError", "def set_gain(self):\n DescStr = 'Setting Gain for AHF_Camera '\n if (self.AHFgainMode & 2):\n DescStr += 'from current illumination'\n else:\n DescStr += \"from ISO \" + str(self.iso)\n if (self.AHFgainMode & 1):\n DescStr += ' with white balancing'\n else:\n DescStr += \" with No white balancing\"\n print (DescStr)\n if (self.AHFgainMode & 1):\n self.awb_mode = 'auto'\n else:\n self.awb_mode = 'off'\n self.awb_gains = (1, 1)\n # if (self.AHFgainMode & 2):\n self.exposure_mode = 'auto'\n # else:\n # self.exposure_mode = 'off'\n super().start_preview(fullscreen=False, window=self.AHFpreview)\n sleep(2.0) # let gains settle, then fix values\n if (self.AHFgainMode & 1):\n savedGain = self.awb_gains\n self.awb_gains = savedGain\n self.awb_mode = \"off\"\n # if (self.AHFgainMode & 2):\n self.exposure_mode = 'off'\n super().stop_preview()\n print (\"Red Gain for white balance =\" + str(float(self.awb_gains[0])))\n print (\"Blue Gain for white balance =\" + str(float(self.awb_gains[1])))\n print (\"Analog Gain = \" + str(float(self.analog_gain)))\n print (\"Digital Gain = \" + str(float(self.digital_gain)))\n return", "def test_io(tmp_path, SoundClass):\n fname = tmp_path / f\"sound-{SoundClass.__name__}-1.wav\"\n sound = SoundClass(volume=100)\n sound.save(fname, overwrite=False)\n sound_loaded = Sound(fname)\n assert sound_loaded.volume == 100\n assert np.allclose(sound.signal, sound_loaded.signal)\n\n fname = tmp_path / f\"sound-{SoundClass.__name__}-2.wav\"\n sound = SoundClass(volume=10)\n sound.save(fname, overwrite=False)\n sound_loaded = Sound(fname)\n assert sound_loaded.volume == 100\n sound_loaded.volume = 10\n assert np.allclose(sound.signal, sound_loaded.signal)\n\n fname = tmp_path / f\"sound-{SoundClass.__name__}-3.wav\"\n sound = SoundClass(volume=(50, 25))\n sound.save(fname, overwrite=False)\n sound_loaded = Sound(fname)\n assert np.allclose(sound_loaded.volume, (100, 50))\n sound_loaded.volume = (50, 25)\n assert np.allclose(sound.signal, sound_loaded.signal)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a Testrail API request instance. Note that this doesnt send the request until self.sendRequest() is run requestType is str "get", "post" etc. urlParams is the tail of the url payload is a dict that will be sent as a json files is a dict reference to a locale file
def __init__(self, requestType, urlParams, payload={}, files={}): self.baseUrl = os.getenv("TEST_RAIL_BASE_URL") self.username = os.getenv("TEST_RAIL_USERNAME") self.password = os.getenv("TEST_RAIL_API_KEY") # or password self.requestType = requestType self.urlParams = urlParams self.headers = {'Content-type': 'application/json'} self.payload = payload self.response = False
[ "def create_request(self, request_type, parameters):\n request = None\n if request_type == u'scripting_request':\n request = scripting.ScriptingRequest(\n self.current_id, self.json_rpc_client, parameters)\n logger.info(\n u'Scripting request id: {} created.'.format(\n self.current_id))\n self.current_id += 1\n\n return request", "def _request(self, url):\n return Request(url)", "def get_request(self, url, method, headers, payload=None):\n request = urllib2.Request(url, headers=headers, data=payload)\n request.get_method = lambda: method.upper()\n return request", "def create_request(self):\n date_time = datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')\n present_time = date_time[0:-3] + 'Z'\n # Using the web service post() method to create request\n response = requests.post(url=bid_url, headers={'Authorization': self.api_key}, json={\n \"type\": self.bid_type.get(),\n \"initiatorId\": self.current_user.id,\n \"dateCreated\": present_time,\n \"subjectId\": Subject().get_id_by_name(self.subject.get()),\n \"additionalInfo\": {\"competency\": self.competency.get(), \"hours_per_week\": self.hours_per_session.get(),\n \"sessions_per_week\": self.sessions_per_week.get(),\n \"rate_per_session\": self.rate_per_session.get()}\n }\n )\n json_data = response.json()\n # Destroying current window and jumping to next screen by calling the main() method from the NewRequestDetails \n # class\n self.window.destroy()\n NewRequestDetails(json_data).main()", "def make_ecobee_request(request_type, url, **kwargs):\n # Generate appropriate grequests object\n if request_type.lower() in [\"get\", \"post\"]:\n response = call_grequest(request_type.lower(), url, verify=requests.certs.where(), timeout=30, **kwargs)\n else:\n raise ValueError(f\"Unsupported request type {request_type} for Ecobee driver.\")\n # Send request and extract data from response\n headers = response.headers\n if \"json\" in headers.get(\"Content-Type\"):\n return response.json()\n else:\n content = response.content\n if isinstance(content, bytes):\n content = jsonapi.loads(response.decode(\"UTF-8\"))\n return content", "def get_request(url, **kwargs):\n # print(\"get_request: received kwargs {}\".format(kwargs))\n # print(\"get_request: received url {}\".format(url))\n try:\n if 'cp_cl_api_key' in kwargs:\n # Cloudant service rest api request\n cp_cl_api_key = kwargs['cp_cl_api_key']\n # prepare payload\n del kwargs['cp_cl_api_key']\n # prepare header\n headers = {'Content-Type': 'application/json', 'cp_api_key': cp_cl_api_key}\n # call get method\n response = requests.get(url=url,headers=headers,params=kwargs)\n elif 'cp_wnlu_api_key' in kwargs:\n # WNLU service request\n cp_wnlu_api_key = kwargs['cp_wnlu_api_key']\n # prepare payload\n params = dict()\n params['text'] = kwargs['text']\n params['version'] = kwargs['version']\n params['features'] = kwargs['features']\n params['return_analyzed_text'] = kwargs['return_analyzed_text']\n if 'language' in kwargs:\n params['language'] = kwargs['language']\n # prepare header\n headers = {'Content-Type': 'application/json'}\n response = requests.get(url=url,headers=headers,params=kwargs,\\\n auth=HTTPBasicAuth('apikey',cp_wnlu_api_key))\n else:\n # no service key has been specified\n print(\"neither cp_cl_api_key nor cp_wnlu_api_key has been specified\")\n return {}\n except:\n # if any error occurs print it\n print(\"Network exception occurred with GET request!!!\")\n return {}\n status_code = response.status_code\n print(\"get_request: received response with status code {}\".format(status_code))\n json_data = json.loads(response.text)\n return json_data", "def build_replica_request(self) -> Request:\n request = Request()\n\n # Details\n request.version = self.request.version\n request.remoteIp = self.request.remote_ip\n request.protocol = self.request.protocol\n request.host = self.request.host\n request.hostName = self.request.host_name\n request.port = self.request.server_connection.stream.socket.getsockname()[1]\n request.uri = self.request.uri\n\n # Method\n request.method = self.request.method\n\n # Path\n request.set_path(self.request.path)\n\n # Headers\n for key, value in self.request.headers._dict.items():\n request.headers[key] = value\n request.headers[key.lower()] = value\n\n # Query String\n for key, value in self.request.query_arguments.items():\n request.queryString[key] = [x.decode() for x in value]\n if len(request.queryString[key]) == 1:\n request.queryString[key] = request.queryString[key][0]\n\n # Body\n if self.request.body_arguments:\n request.mimeType = 'application/x-www-form-urlencoded'\n for key, value in self.request.body_arguments.items():\n try:\n request.bodyType[key] = 'str'\n request.body[key] = [x.decode() for x in value]\n except (AttributeError, UnicodeDecodeError):\n request.bodyType[key] = BASE64\n request.body[key] = [_b64encode(x) for x in value]\n if len(request.body[key]) == 1:\n request.body[key] = request.body[key][0]\n elif self.request.files:\n request.mimeType = 'multipart/form-data'\n for key, value in self.request.files.items():\n try:\n request.bodyType[key] = 'str'\n request.body[key] = [x.body.decode() for x in value]\n except (AttributeError, UnicodeDecodeError):\n request.bodyType[key] = BASE64\n request.body[key] = [_b64encode(x.body) for x in value]\n if len(request.body[key]) == 1:\n request.body[key] = request.body[key][0]\n else:\n request.mimeType = 'text/plain'\n try:\n request.bodyType = 'str'\n request.body = self.request.body.decode()\n except (AttributeError, UnicodeDecodeError):\n request.bodyType = BASE64\n request.body = _b64encode(self.request.body)\n request.bodySize = len(self.request.body)\n\n # Files\n request.files = self.request.files\n\n return request", "def _build_request(self, type, commands):\n request = {}\n headers = {\n \"content-type\": \"application/json\",\n }\n if self.nxargs[\"connect_over_uds\"]:\n user = self.nxargs[\"cookie\"]\n headers[\"cookie\"] = \"nxapi_auth=\" + user + \":local\"\n request[\"url\"] = self.NXAPI_UDS_URI_PATH\n else:\n request[\"url\"] = \"{transport}://{host}:{port}{uri}\".format(\n transport=self.nxargs[\"transport\"],\n host=self.nxargs[\"host\"],\n port=self.nxargs[\"port\"],\n uri=self.NXAPI_REMOTE_URI_PATH,\n )\n\n if isinstance(commands, (list, set, tuple)):\n commands = \" ; \".join(commands)\n payload = {}\n # Some versions of NX-OS fail to process the payload properly if\n # 'input' gets serialized before 'type' and the payload of 'input'\n # contains the string 'type'. Use an ordered dict to enforce ordering.\n payload[\"ins_api\"] = collections.OrderedDict()\n payload[\"ins_api\"][\"version\"] = self.NXAPI_VERSION\n payload[\"ins_api\"][\"type\"] = type\n payload[\"ins_api\"][\"chunk\"] = \"0\"\n payload[\"ins_api\"][\"sid\"] = \"1\"\n payload[\"ins_api\"][\"input\"] = commands\n payload[\"ins_api\"][\"output_format\"] = \"json\"\n\n request[\"headers\"] = headers\n request[\"payload\"] = json.dumps(payload)\n request[\"opts\"] = {\"http_request_timeout\": self.nxargs[\"timeout\"]}\n log.info(\"request: %s\", request)\n return request", "def create_http_request(self):\n http_request = HttpRequest()\n if \"REQUEST_METHOD\" in os.environ:\n http_request.method = os.environ[\"REQUEST_METHOD\"].strip().lower()\n if \"HTTP_COOKIE\" in os.environ:\n http_request.cookie = os.environ[\"HTTP_COOKIE\"].strip()\n if \"QUERY_STRING\" in os.environ:\n http_request.query_string = os.environ[\"QUERY_STRING\"].strip()\n if \"HTTP_ACCEPT\" in os.environ:\n http_request.accept = os.environ[\"HTTP_ACCEPT\"].strip()\n if \"REQUEST_URI\" in os.environ:\n http_request.request_uri = os.environ[\"REQUEST_URI\"].strip()\n\n return http_request", "def create_req(self):\n \n pass", "def request(method, url, data=None, json=None, headers={}, stream=None, timeout=None):\n\t...", "def make_request(self, url, data):\n return", "def build_replica_request(self) -> Request:\n request = Request()\n\n parsed = urlparse(self.address if self.address.startswith('http') else 'http://%s' % self.address)\n port = str(parsed.port)\n hostname = parsed.netloc[:-(len(port) + 1)]\n\n # Details\n request.version = None\n request.remoteIp = None\n request.protocol = self.service_type\n request.host = self.address\n request.hostName = hostname\n request.port = port\n request.uri = None\n\n # Method\n request.method = NON_PREFLIGHT_METHODS[5] if self.is_producer else NON_PREFLIGHT_METHODS[0]\n\n # Path\n request.path = '/%s' % self.topic\n\n # Headers\n headers = self.headers\n\n for key, value in headers.items():\n if isinstance(value, ConfigExternalFilePath):\n value = value.path\n request.headers[key.title()] = value\n\n request.headers.update(self.amqp_properties)\n\n # Query String\n if self.key is not None:\n request.queryString['key'] = self.key\n\n # Body\n value = self.value\n if isinstance(value, ConfigExternalFilePath):\n value = value.path\n\n request.mimeType = 'text/plain'\n if isinstance(value, (bytes, bytearray)):\n request.bodyType = BASE64\n request.body = _b64encode(value)\n else:\n request.bodyType = 'str'\n request.body = value\n request.bodySize = 0 if value is None else len(value)\n\n # Files\n request.files = []\n\n return request", "def _request(self, method: str, subpath: str, data: any,\n target_class: any, target_module: Module = None, target_object = None):\n # Construct path (avoid double slash)\n if subpath != '' and subpath[0] == '/':\n subpath = subpath[1:]\n path = self._path + subpath\n\n json = serialize(data, method)\n\n #print(f\"- method: {method}\")\n #print(f\"- path: {path}\")\n #print(f\"- input: {json}\")\n\n # Actual request\n response = self._session.request(method, path, json=json)\n\n # Raise an exception for HTTP(S) level errors (4xx / 5xx status codes)\n response.raise_for_status()\n\n #print(f\"- output: {response.text}\")\n #print(\"\")\n\n # Note that not all requests have a body with JSON content\n try:\n json = response.json()\n except ValueError as e:\n if response.text:\n raise\n else:\n return None\n\n return deserialize(json, target_class, target_module, target_object)", "def treetime_request():\n userid = \"tt_\" + make_id();\n print (\"treetime request: \" + request.method + \" user_id: \" + userid)\n data = {\n \"UserId\": userid,\n \"config\": {}\n }\n response = app.response_class(\n response=json.dumps(data),\n status=200,\n mimetype='application/json'\n )\n return response", "def create_request(method_name,*args,**kwargs):\n\t\timport uuid, json\n\t\tif not isinstance(method_name,str):\n\t\t\traise TypeError('method name must be a string')\n\t\tif len(args) != 0 and len(kwargs) != 0:\n\t\t\traise TypeError('the method cannot be called with positional and keyword arguments at the same time')\n\t\t# Start building the dict.\n\t\tjdict = {'jsonrpc':'2.0','id':str(uuid.uuid4()),'method':method_name}\n\t\tif len(args) != 0:\n\t\t\tjdict['params'] = args\n\t\telif len(kwargs) != 0:\n\t\t\tjdict['params'] = kwargs\n\t\treturn jdict", "def createJSONRequest(url, requestType, index):\r\n h = httplib2.Http()\r\n return h.request(url, requestType)[index]", "def get_request(data):\n\n # parse get request\n if isinstance(data, str):\n kvs = parse_params(data)\n return __get_request_from_url(kvs)\n \n # post request is supposed to be file object\n elif isinstance(data, io.IOBase):\n root = parse_xml(data)\n return __get_request_from_xml(root)\n else:\n pass", "def send_request(self, method, url, data=None, headers=None, timeout=None, instance_type=None):\n if not instance_type:\n instance_type = self.get_default_instance_type()\n resp = self.starter.send_request(\n instance_type=instance_type,\n verb_method=method,\n url=url,\n data=data,\n headers={} if headers is None else headers,\n timeout=timeout,\n )\n return resp" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check the current instance for a response. To be used by internal methods that are going to act on self.response.
def checkResponse(self): if self.response == False: print("There is no response yet. Run self.sendRequest() first.") return False else: return True
[ "def _default_response_valid(self, response: requests.Response) -> bool:\n return response.status_code == 200", "def IsResponse(self,varName):\n\t\td=self.GetDeclaration(varName)\n\t\treturn isinstance(d,ResponseDeclaration)", "def _ensure_response_has_view(self):\n if not (self.response.original and isinstance(self.response.original, View)):\n raise ValueError(\"The response is not a view\")", "def process_response(self, request, response):\n return self.__process_awesome_response(request, response)", "def __check_response(self, response):\n if not isinstance(response, requests.Response):\n raise TypeError('Unexpected type {}'.format(type(response.__class__.__name__)))\n response.raise_for_status()\n resp = response.json()\n if 'ok' not in resp:\n raise JsonstoreError('Call to jsonstore failed')\n return resp", "def is_response(self) -> bool:\r\n return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE", "def test_httpresponse_pass_through(self):\n response = twilio_view(self.response_view)(self.request_post)\n self.assertTrue(isinstance(response, HttpResponse))", "def check_response_code(self):\n status = True\n if self.response.status_code != 200:\n print('Request failed, response code: {}'.format(\n self.response.status_code))\n status = False\n\n return status", "def IsResponse(self,varName):\n\t\tns,name=self.GetNamespace(varName)\n\t\tif isinstance(ns,ItemSessionState):\n\t\t\treturn ns.IsResponse(name)\n\t\telif ns:\n\t\t\t# duration is the only test-level response variable\n\t\t\treturn name==u'duration'\n\t\telse:\n\t\t\t# attempt to look up an unsupported namespace\n\t\t\traise NotImplementedError", "def check_api_status(response) -> bool:\n return bool(response)", "def responseHasField( self, field, response ):\n if response is None:\n return False\n if field in response.keys():\n return True\n return False", "def IsResponse(self,varName):\n\t\td=self.GetDeclaration(varName)\n\t\tif d is None:\n\t\t\treturn varName in ('numAttempts','duration')\n\t\telse:\n\t\t\treturn isinstance(d,ResponseDeclaration)", "def _check_response(self, response, *args, **kwargs):\n\n if not response.ok:\n self.logger.debug(\"Response status code: \" + str(response.status_code))\n self.logger.debug(\"Response text: \" + response.text)\n if response.status_code == 400:\n if \"tokens\" in response.url:\n raise DAAuthException(\n \"Error getting token. Code: {} Message: {}\".format(\n response.status_code, response.text\n )\n )\n raise DAQueryException(response.text)\n if response.status_code == 401:\n self.logger.warning(\"Access token expired. Acquiring a new one...\")\n self.get_access_token()\n request = response.request\n request.headers[\"Authorization\"] = self.session.headers[\"Authorization\"]\n return self.session.send(request)\n if response.status_code == 403 and \"tokens\" in response.url:\n self.logger.warning(\"Throttled token request. Waiting 60 seconds...\")\n self.retries -= 1\n self.logger.debug(\"Retries remaining: {}\".format(self.retries))\n time.sleep(60)\n request = response.request\n return self.session.send(request)\n if response.status_code == 404:\n raise DADatasetException(\"Invalid dataset name provided\")\n if response.status_code in self._status_forcelist:\n self.logger.debug(\"Retries remaining: {}\".format(self.retries))", "def _any_respond_success(self, responses):\n if True in responses:\n return True\n return False", "def _async_check(self):\n # type: () -> None\n if asyncio.iscoroutinefunction(self.get_response):\n self._is_coroutine = asyncio.coroutines._is_coroutine # type: ignore", "def check_response(self, response, callback, name=None):\n if not (response is None or asyncio.iscoroutine(response)):\n return\n if not name:\n if isinstance(callback, types.FunctionType): # FBV\n name = \"The view %s.%s\" % (callback.__module__, callback.__name__)\n else: # CBV\n name = \"The view %s.%s.__call__\" % (\n callback.__module__,\n callback.__class__.__name__,\n )\n if response is None:\n raise ValueError(\n \"%s didn't return an HttpResponse object. It returned None \"\n \"instead.\" % name\n )\n elif asyncio.iscoroutine(response):\n raise ValueError(\n \"%s didn't return an HttpResponse object. It returned an \"\n \"unawaited coroutine instead. You may need to add an 'await' \"\n \"into your view.\" % name\n )", "def _is_keep_alive_response(response):\n return response.get(\"type\") == \"ka\"", "def get_response(self, url):\n self.response = requests.get(url)", "def validate(self, response, module_responses) :\n\n if not self.satisfies_condition(module_responses):\n return True\n else:\n return self.valid_response(response)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prompt the user with the message and only accept yes/no as input. Return true if they say yes.
def promptYesno(message): choice = "" while not choice: choice = input(message+" [y/n] ") if choice.lower() in ["yes", "y", "yep", "yup", "sure"]: return True elif choice.lower() in ["no", "n", "nope", "nah"]: return False else: print("ERROR: Input not recognized. Choose yes or no\n") choice = ""
[ "def prompt_yes_no(message):\n print(\"\\n-------------------------\")\n print(message)\n while True:\n answer = input(\"(y|n): \")\n if answer.lower() == \"y\":\n return True\n elif answer.lower() == \"n\":\n return False\n else:\n print(\"Invalid answer...\")", "def askYesNo(question):\n\treturn raw_input(question + \" [y/n]: \").lower() in [\"y\", \"yes\"]", "def _getUserConfirmation(self, message=\"Proceed anyways? (y) or (n): \"):\n proceed = ''\n while not proceed:\n proceed = input(message)\n if len(proceed) and not proceed[0] in ['Y', 'y', 'N', 'n']:\n proceed = ''\n print(\"Please enter 'y' or 'n': \", end='')\n elif len(proceed) and proceed[0].lower() == 'y':\n return True\n elif len(proceed) and proceed[0].lower() == 'n':\n return False", "def input_yes_no(prompt):\n while True:\n result = input(prompt)\n if result and result[0] == 'y':\n return True\n elif result and result[0] == 'n':\n return False\n else:\n print(\"Not understood - please respond yes or no.\")", "def f_ask_yes_no(question):\n vResponse = none\n while vResponse not in (\"y\",\"n\"):\n vResponse = input(question).lower()\n return vResponse", "def ask_yes_no(question):\r\n response = None\r\n while response not in (\"y\", \"n\"):\r\n response = raw_input(question).lower()\r\n return response", "def simple_confirm(warning_str):\n message_str = warning_str + \" Continue? [y/N]\\n\"\n result = raw_input(message_str)\n return (result.lower() in YES_SYNONYMS)", "def confirm(prompt=None, resp=False):\n\n if prompt is None:\n prompt = \"Confirm\"\n\n if resp: # If default response is True, bracket the [y]\n prompt = \"%s [%s]|%s: \" % (prompt, \"y\", \"n\")\n else: # If default response is True, bracket the [n]\n prompt = \"%s [%s]|%s: \" % (prompt, \"n\", \"y\")\n\n\n while True:\n ans = input(prompt)\n if not ans:\n return resp\n if ans not in ('y', 'Y', 'n', 'N'):\n print(\"please enter y or n.\")\n continue\n if ans in ('y', 'Y'):\n return True\n if ans in ('n', 'N'):\n return False", "def prompt_confirmation():\n sys.stdout.write('Please confirm (yes/no): ')\n response = input()\n if response != 'yes':\n print('Cancelling.')\n sys.exit(-1)\n print('Confirmed.')", "def ask_yes_no(question):\n\tresponse = None\n\twhile response not in (\"y\", \"n\"):\n\t\tresponse = input(question).lower()\n\treturn response", "def input_yes_no_choice(optional_message=\"Add another Product?(Y/N): \"):\n # Use the Default Message or pass in an optional message, strip white spaces, and make lower case\n return str(input(optional_message)).strip().lower()", "def display_yes_no_dialog(msg, title):\n\n dlg = ix.application.message_box(msg, title, ix.api.AppDialog.cancel(),\n ix.api.AppDialog.STYLE_YES_NO)\n\n return dlg.is_yes()", "def get_yesno(message, default=False):\n if default:\n prompt = ' [Y/n]: '\n elif default is False:\n prompt = ' [y/N]: '\n elif default is None:\n prompt = ' [y/n]: '\n val = raw_input(message + prompt)\n while (val.lower() not in ['y', 'n']) and (default is None):\n val = raw_input(message + prompt)\n continue\n\n if val:\n ret = val == 'y'\n else:\n ret = default\n return ret", "def confirm(prompt=None, resp=False):\n # FIXME find a way to test this\n if prompt is None:\n prompt = 'Confirm'\n\n if resp:\n prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')\n else:\n prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')\n\n while True:\n ans = raw_input(prompt)\n if not ans:\n return resp\n if ans not in ['y', 'Y', 'n', 'N']:\n print('please enter y or n.')\n continue\n if ans == 'y' or ans == 'Y':\n return True\n if ans == 'n' or ans == 'N':\n return False", "def confirm(self, prompt=None, default_yes=True, abort_no=False):\n\n if self.dry:\n return False\n\n if self.force:\n return True\n\n if prompt is None:\n prompt = 'Proceed?'\n\n if default_yes:\n prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')\n else:\n prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')\n\n while True:\n ask = getattr(__builtins__, 'raw_input', input)\n ans = ask(prompt)\n if not ans:\n return default_yes\n if ans not in ['y', 'Y', 'n', 'N']:\n print('please enter y or n.')\n continue\n if ans in ('Y', 'y'):\n return True\n if ans in ('N', 'n'):\n if abort_no:\n sys.exit(1)\n else:\n return False", "def state_answer_yes_no(self):\n while self.user_input_yn not in (\"yes\", \"no\"):\n self.user_input_yn = raw_input(\"\\nU> \").lower()\n # If the user writes \"exit\", stop the run\n if self.user_input_yn == \"exit\":\n rospy.on_shutdown(self.shutdown_callback)\n sys.exit()\n # If the user writes any other thing\n elif self.user_input_yn not in (\"yes\", \"no\"):\n print('\\nS> Please, answer \"yes\" or \"no\".')\n print('\\nS> You can also write \"exit\" to close the program.')", "def confirm() -> bool:\n correct = input(\"Press <Enter> if you want to go on, \"\n \"<n> for rerunning the program: \")\n\n return True if correct == '' else False", "def is_ready():\r\n user_ready = input(\"Are you ready to begin? Please type 'Yes' or 'yes': \")\r\n if user_ready == 'Yes' or user_ready == 'yes':\r\n return True\r\n else:\r\n print(\"Invalid input.\")\r\n return False", "def confirm(question, header=None, warn=None, forceanswer=False):\n\n if header:\n print('\\n{}'.format(header))\n if warn:\n if isinstance(warn, str):\n print('\\n{}'.format(colorerr(warn)))\n elif isinstance(warn, (list, tuple)):\n print('\\n{} {}'.format(colorerr(warn[0]), colorval(warn[1])))\n\n if not question.endswith('?'):\n question = '{}?'.format(question)\n if not question.startswith('\\n'):\n question = '\\n{}'.format(question)\n question = '{} (y/N): '.format(question)\n\n ans = input(question).lower()\n while forceanswer and (not ans):\n ans = input(question).lower()\n\n return (ans[0] == 'y') if ans else False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prompt the user with the message and only accept numbers as input. Return the input.
def promptNum(message): choice = 0 while not choice: choice = input(message+" [number] ") try: int(choice) except: print("ERROR: Input not recognized. Choose a number\n") choice = 0 return choice
[ "def _needs_number(self, user_input):\n while not user_input.isdigit():\n user_input = input(\"You need to enter a number \")\n return int(user_input)", "def multiplier_prompt():\n while True:\n try:\n multiplier = re.sub(\"[, ]\", \"\", input(\"\\nMultiply donations by: \"))\n return int(multiplier)\n break\n except ValueError:\n print(\"\\n>> Please enter a valid multiplier <<\")", "def get_number(prompt, error_prompt, limit_prompt, min_num=0 - float('inf'), max_num=float('inf'), valid_type='either'):\n valid_input = False\n number = None\n while not valid_input:\n try:\n number = input(prompt)\n if valid_type == 'int':\n number = int(number)\n else:\n try:\n number = int(number)\n except ValueError:\n number = float(number)\n\n if not min_num <= number <= max_num:\n print(limit_prompt)\n else:\n valid_input = True\n except ValueError:\n print(error_prompt)\n return number", "def get_number(prompt):\n res = None\n while res is None:\n try:\n res = float(raw_input(prompt))\n except ValueError: pass\n return res", "def getNumeric(prompt, return_type):\n while True:\n response = raw_input(prompt)\n try:\n # if isinstance(response, str) and response.isdigit():\n if isinstance(response, str) and isnumeric(response):\n if return_type == 'int':\n return int(response)\n elif return_type == 'float':\n return float(response)\n elif return_type == 'str':\n return str(response)\n else:\n print(\"please enter a number:\")\n # except ValueError:\n except:\n print(\"please enter a number:\")", "def get_int_input(prompt):\n input_value = None\n while input_value is None:\n try:\n input_value = int(raw_input(prompt))\n except:\n print_error(\"Invalid Number.\")\n return input_value", "async def value_not_digit(self, ctx, user):\r\n await ctx.send(f\"{user.mention} Please raise a numerical value.\")", "def input_number_or_other(self, prompt, other):\n while True:\n result = input(prompt)\n if (result == 'q' or result == 'Q'):\n sys.exit()\n if result.isdigit():\n return int(result)\n if (result in other):\n return result", "def new_donation_prompt():\n while True:\n try:\n amount = re.sub(\"[, ]\", \"\", input(\"\\nNew donation amount:\\n$\"))\n return round(float(amount), 2)\n break\n except ValueError:\n print(\"\\n>> Please enter a valid donation amount <<\")", "def get_user_float(self, min: float, max: float, message: Optional[str] = None) -> float:\n while True:\n tmp: str = input(message if message else '> ')\n if re_match(r'^-?\\d+(?:\\.\\d+)?$', tmp) and float(tmp) in arange(min, max + 1): return float(tmp)\n # TODO: print backspace to clean up previous failures? keep menu on screen..\n self.log_error('Please enter a valid value.')", "def inputFloat(prompt: str) -> float:\n while True:\n try:\n return float(input(prompt))\n except ValueError:\n print(\"Please input a number.\")", "def get_float_entry(text=\"Input floating point or integer\", prompt=\"0\", ):\n while True:\n data = input(\"{} [{}]:\".format(text, prompt))\n if data == \"\":\n data = prompt\n try:\n return float(data)\n except ValueError as e:\n if debug: print(\"Value Error: {}\".format(e))\n print(\"Invalid data, please re-enter...\")\n continue", "def get_positive_integer_entry(text=\"Input +ve or -ve integer\", prompt=\"0\"):\n while True:\n data = input(\"{} [{}]:\".format(text, prompt))\n if data == \"\":\n data = prompt\n try:\n data = int(data)\n return abs(data)\n except ValueError as e:\n if debug: print(\"Value Error: {}\".format(e))\n print(\"Invalid data, please re-enter...\")\n continue", "def input_integer_range(message, low, high):\n while(True):\n try:\n num = int(input(message + '\\n'))\n if num >= low and num <= high:\n return num\n break\n else:\n print('Number must be within desired range\\n')\n except:\n print('Only enter a whole number within the desired range\\n')", "def prompt_for_value(message_text):\n\n sys.stdout.write(f\"{message_text}: \")\n sys.stdout.flush()\n return sys.stdin.readline().rstrip()", "def ask_numbers():", "def is_valid_integer(request_prompt, error_prompt):\n valid = False\n while not valid:\n value_string = input(request_prompt)\n try:\n value = int(value_string)\n valid = True\n except (ValueError):\n print(error_prompt)\n return value", "def pedirNum():\n\n numeroPedido = \"\"\n while validarNum(numeroPedido, 1) == False:\n numeroPedido = str(input(\"Ingrese un numero de 4 cifras distintas: \"))\n return numeroPedido", "def min_prompt():\n while True:\n try:\n min = re.sub(\"[, ]\", \"\", input(\"\\nMin donation (Press enter for default value): \"))\n return round(float(0), 2) if not min else round(float(min), 2)\n break\n except ValueError:\n print(\"\\n>> Please enter a valid minimum value <<\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prompt the user with the message and only accept text as input. Return the input.
def promptText(message): choice = "" while not choice: choice = input(message+" [text] ") try: str(choice) except: print("ERROR: Input not recognized. Choose text\n") choice = "" return choice
[ "def prompt_for_value(message_text):\n\n sys.stdout.write(f\"{message_text}: \")\n sys.stdout.flush()\n return sys.stdin.readline().rstrip()", "def fetch_user_input(self):\n user_text = self.entry.get()\n self.entry.delete(0, 'end')\n self.pipe_in_1(\"user_text\")\n return user_text", "def get_string(self):\n self.text = input(\"Please enter string: \")", "def prompt(self, message):\n raise NotImplementedError()", "def input_text(thePrompt: str, theInputWidth: int, theDefaultInput: str = None, **kwds):\n box = Dialog(**kwds)\n d = box.margin\n\n def ok():\n box.dismiss(True)\n\n def cancel():\n box.dismiss(False)\n\n lb = Label(thePrompt)\n lb.topleft = (d, d)\n tf = TextField(theInputWidth)\n if theDefaultInput:\n tf.set_text(theDefaultInput)\n tf.enter_action = ok\n tf.escape_action = cancel\n tf.top = lb.top\n tf.left = lb.right + 5\n box.add(lb)\n box.add(tf)\n tf.focus()\n box.shrink_wrap()\n if box.present():\n return tf.get_text()\n else:\n return None", "def getRawInput(self, msg, allowed):\n inputValue = raw_input(msg).lower()\n while inputValue not in allowed:\n print 'Command not recognized.\\n'\n inputValue = raw_input(msg).lower()\n return inputValue", "def prompt(message:str, model=\"gpt-4\"):\n import openai\n response = openai.ChatCompletion.create(\n model=model,\n messages=[{\"role\": \"user\", \"content\": message}]\n )\n return response['choices'][0]['message']['content']", "def _get_user_input(prompt):\n\n _inp = ''\n while not _inp:\n _inp = input(prompt)\n\n return _inp", "def eval_prompt(self, input):\n return input", "def prompt_messages():\n ws_id = 0\n if check_logs_timestamp():\n input_message = \"would you like to change the log timestamp from log collection time to log creation time?\" \\\n \"\\nEnter Yes/No\\n\"\n print_notice(\"We have recognized your logs timestamp is set to: Log collection time\\n\")\n else:\n input_message = \"would you like to change the log timestamp from log creation time to log collection time?\" \\\n \"\\nEnter Yes/No\\n\"\n print_notice(\"We have recognized your logs timestamp is set to: Log creation time\\n\")\n # to be compatible with both python 2.7 and python 3\n try:\n response = raw_input(input_message)\n if response not in yes_response:\n sys.exit()\n except NameError:\n response = input(input_message)\n if response not in yes_response:\n sys.exit()\n return", "def get_valid_selection(prompt):\n user_input = input(\"{}: \".format(prompt))\n while user_input == \"\":\n print(\"Input can not be blank\")\n user_input = input(\"{}: \".format(prompt))\n return user_input", "def input(prompt=\"\"):\n _print_stderr(\" >> {}\".format(prompt), end=\"\")\n return builtins.input()", "def input(prompt=None):\n if prompt:\n sys.stderr.write(str(prompt))\n return builtins.input()", "def get_user_input(query):\n return raw_input(query + \"\\n\")", "def userInput(prompt: str = \"\") -> str:\n return input(str).lower()", "def FilterInput(self, text):\n return text", "def collect_username(self, message=None):\n if not (message is None):\n print(message)\n username = input(\n f'{bcolors.OKBLUE}Please enter your username: {bcolors.ENDC}')\n while len(username) > MAX_LENGTH_USERNAME or len(username) < MIN_LENGTH_USERNAME:\n username = input(f'{bcolors.WARNING}Username must be between {MIN_LENGTH_USERNAME} and {MAX_LENGTH_USERNAME} characters{bcolors.ENDC}'\n f'\\n{bcolors.OKBLUE}Please enter you username: {bcolors.ENDC}')\n return username", "def message_from_prompt(): \n username= raw_input('Username: ')\n fromaddr= username+'@gmail.com'\n toaddrs= raw_input('Destination: ')\n subject= raw_input('Subject: ')\n body= raw_input('Message: ')\n msg= MIMEText(body)\n msg['Subject']= subject\n msg['From']= fromaddr\n msg['To']= toaddrs\n return msg", "def ask_input(question, defaultvalue, pattern):\n while True:\n answer = input(\"{}\\n[{}] >\".format(question, defaultvalue))\n if answer == \"\":\n return defaultvalue\n elif re.match(pattern, answer):\n return answer\n else:\n print(\"Error: answer does not match expected format.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report a result for testname to Testrail, based on test ID mapping in the testsList
def reportResult(testname, testsList, status, comment=False): # Testrail status codes mapped to human readable statusMap = { "pass": 1, "passed": 1, "blocked": 2, "untested": 3, "retest": 4, "fail": 5 } payload = { "status_id": statusMap[status] } if comment: payload["comment"] = comment addResult = testrailRequest("post", "/add_result/"+str(testsList[testname]["id"]), payload) addResult.sendRequest()
[ "def print_test_results(self):\n for n, (name, result) in enumerate(zip(self._test_names, self._test_results)):\n print('Test {} ({}): {}'.format(n+1, name, result))\n print('{} test failure(s)'.format(self.fail_cnt))", "def TestCaseReport(self, name, result, msg=''):\n self._WriteToRecord(BODY, 'TESTCASE: %s %s' % (name, result))\n if msg:\n self._WriteToRecord(BODY, ' %s' % msg)\n # Collect none passed test in PRE_BODY too.\n if result != constants.PASS:\n self._WriteToRecord(PRE_BODY, 'TESTCASE: %s %s' % (name, result))\n\n if result == constants.TIMEOUT:\n self.timeout += 1\n elif result == constants.FAIL:\n self.failed += 1\n elif result == constants.PASS:\n self.passed += 1\n elif result == constants.NOTRUN:\n self.notrun += 1\n elif result == constants.ERROR:\n self.error += 1\n else:\n self.unknown += 1\n\n self._WriteToReport()", "def test_results(self, testname):\n return self._tests[testname]", "def test_report_result_multiple_times(self):\n cl = _t_add_client()\n tc = _t_add_test(os.path.join(self.test_dir, 'test_case_name.dat'))\n pl, pv = _t_add_plugin(\n 'test_pl_name',\n os.path.join(self.plugin_dir, 'pl_file_test.tar.gz')\n )\n _t_add_plugin(\n 'test_pl_name_2',\n os.path.join(self.plugin_dir, 'pl_file_test.tar.gz')\n )\n plt = _t_add_plugin_test(pl, tc)\n\n result = _t_create_result(count=1)\n report = {\n 'duration':1200,\n 'iterations':10,\n 'mac':cl.mac_addr,\n 'has_result':True,\n 'plugin_name':pl.name,\n 'plugin_version':pv.name,\n 'classification':result['classification'],\n 'count':result['count'],\n 'defect':result['defect'],\n 'failure':result['failure'],\n 'file_name':result['file_name'],\n 'log':result['log'],\n 'name':result['name'],\n 'result_hash':result['result_hash']\n }\n for _ in range(2):\n _t_add_work_unit(cl, pv, plt)\n with open(result['temp_fs_name'], 'rb') as fp:\n report['attachment'] = fp\n response = self.client.post('/csserver/workreport/', report)\n self.assertEqual(response.status_code, 200)\n os.remove(result['temp_fs_name'])\n r = Result.objects.all()\n self.assertEqual(len(r), 1)\n r = r[0]\n self.assertEqual(r.count, 2)\n self.assertEqual(Result.objects.filter(triage_state=TriageState.objects.get(name='New')).count(), 1)", "def test_report_work_unit_multiple_results(self):\n cl = _t_add_client()\n pl, pv = _t_add_plugin(\n 'test_pl_name',\n os.path.join(self.plugin_dir, 'pl_file_test.tar.gz')\n )\n tc = _t_add_test(os.path.join(self.test_dir, 'test_case_name.dat'))\n _t_add_work_unit(cl, pv, _t_add_plugin_test(pl, tc))\n\n for report_number in range(1, 10):\n result = _t_create_result(count=1)\n report = {\n 'duration':1200,\n 'iterations':10,\n 'mac':cl.mac_addr,\n 'has_result':True,\n 'plugin_name':pl.name,\n 'plugin_version':pv.name,\n 'classification':result['classification'],\n 'count':result['count'],\n 'defect':result['defect'],\n 'failure':result['failure'],\n 'file_name':result['file_name'],\n 'log':result['log'],\n 'name':result['name'],\n 'result_hash':result['result_hash']\n }\n\n with open(result['temp_fs_name'], 'rb') as fp:\n report['attachment'] = fp\n response = self.client.post('/csserver/workreport/', report)\n os.remove(result['temp_fs_name'])\n self.assertEqual(response.status_code, 200)\n wu = WorkUnit.objects.filter(client=cl).order_by('-created_time')[:1]\n self.assertEqual(len(wu), 1)\n wu = wu[0]\n self.assertIsNotNone(wu.end_time)\n self.assertEqual(wu.duration, report['duration'])\n self.assertEqual(wu.iterations, report['iterations'])\n self.assertLessEqual(wu.created_time, wu.end_time)\n self.assertEqual(Result.objects.count(), report_number)\n r = Result.objects.get(failure=report['failure'])\n self.assertEqual(r.classification, Classification.objects.get(value=report['classification']))\n self.assertEqual(r.count, report['count'])\n self.assertEqual(r.defect, report['defect'])\n self.assertEqual(r.failure, report['failure'])\n self.assertEqual(r.file_hash, report['result_hash'])\n self.assertEqual(r.name, report['name'])\n self.assertEqual(r.log, report['log'])\n self.assertEqual(r.triage_state, TriageState.objects.get(name='New'))\n self.assertTrue(os.path.isfile(r.data.name))\n pv = PluginVersion.objects.get(pk=pv.pk)\n self.assertEqual(pv.duration, report['duration'])\n self.assertEqual(pv.iterations, report['iterations'])", "def printTestResult(self):\n splitter = \"=================================================================================================\"\n print(\"\\n\" + splitter)\n print(\"%-3s%-60s%11s\" % ('ID', 'Testcase Name', 'Test Result'))\n for i in range(len(self)):\n print(\"%-3d%-60s%11s\" % (i + 1, self[i].name, self[i].result))\n print(splitter + \"\\n\")", "def _document_test_result(self) -> None:\n self.test_id = 1\n instance_pass_tests, aggregate_pass_tests, instance_fail_tests, aggregate_fail_tests = [], [], [], []\n\n for test in self.json_summary[\"tests\"]:\n if test[\"test_type\"] == \"per-instance\" and test[\"passed\"]:\n instance_pass_tests.append(test)\n elif test[\"test_type\"] == \"per-instance\" and not test[\"passed\"]:\n instance_fail_tests.append(test)\n elif test[\"test_type\"] == \"aggregate\" and test[\"passed\"]:\n aggregate_pass_tests.append(test)\n elif test[\"test_type\"] == \"aggregate\" and not test[\"passed\"]:\n aggregate_fail_tests.append(test)\n\n with self.doc.create(Section(\"Test Summary\")):\n with self.doc.create(Itemize()) as itemize:\n itemize.add_item(\n escape_latex(\"Execution time: {:.2f} seconds\".format(self.json_summary['execution_time(s)'])))\n\n with self.doc.create(Table(position='H')) as table:\n table.append(NoEscape(r'\\refstepcounter{table}'))\n self._document_summary_table(pass_num=len(instance_pass_tests) + len(aggregate_pass_tests),\n fail_num=len(instance_fail_tests) + len(aggregate_fail_tests))\n\n if instance_fail_tests or aggregate_fail_tests:\n with self.doc.create(Section(\"Failed Tests\")):\n if len(aggregate_fail_tests) > 0:\n with self.doc.create(Subsection(\"Failed Aggregate Tests\")):\n self._document_aggregate_table(tests=aggregate_fail_tests)\n if len(instance_fail_tests) > 0:\n with self.doc.create(Subsection(\"Failed Per-Instance Tests\")):\n self._document_instance_table(tests=instance_fail_tests, with_id=bool(self.data_id))\n\n if instance_pass_tests or aggregate_pass_tests:\n with self.doc.create(Section(\"Passed Tests\")):\n if aggregate_pass_tests:\n with self.doc.create(Subsection(\"Passed Aggregate Tests\")):\n self._document_aggregate_table(tests=aggregate_pass_tests)\n if instance_pass_tests:\n with self.doc.create(Subsection(\"Passed Per-Instance Tests\")):\n self._document_instance_table(tests=instance_pass_tests, with_id=bool(self.data_id))\n\n self.doc.append(NoEscape(r'\\newpage')) # For QMS report", "def testTestResult(self):\n # Write test results file\n result_file = os.path.join(TEST_DATA_DIR, 'test_result.xml')\n output_file = (\n '/data/app_default_bucket/test_runs/%s/output/%s/%s/test_result.xml' %\n (self.test_run_id, self.task['command_id'], self.task['attempt_id']))\n self.container.CopyFile(result_file, output_file)\n # Verify that test results were parsed on completion\n self.container.SubmitCommandEvent(self.task, 'InvocationCompleted')\n self.container.WaitForState(self.test_run_id, 'COMPLETED')\n time.sleep(10) # Wait for test result processing to complete.\n test_run = self.container.GetTestRun(self.test_run_id)\n self.assertEqual('4', test_run['failed_test_count'])\n self.assertEqual('1', test_run['failed_test_run_count'])\n self.assertEqual('297', test_run['total_test_count'])", "def table_of_result(self, result):\n rows = []\n def add(label, lst, style):\n for test, backtrace in lst:\n rows.append([label, result.getDescription(test), style])\n add(CHECK, result.successes, u'unittest-success')\n add(CROSS, result.failures, u'unittest-failure')\n add(HEAVY_CROSS, result.errors, u'unittest-error')\n add(SKIP, result.skipped, u'unittest-skipped')\n add(CHECK, result.expectedFailures, u'unittest-success')\n add(CROSS, result.unexpectedSuccesses, u'unittest-failure')\n bd = u'\\n'.join([u'<p class=\"unittest-test {}\">{}<span class=\"unittest-name\">{}</span></p>'.format(row[2], row[0], row[1]) for row in rows])\n return u'{}'.format(bd)", "async def test_filter_by_test_result(self):\n self.response.text = AsyncMock(return_value=self.TESTNG_XML)\n jira = {\"type\": \"jira\", \"parameters\": {\"url\": self.jira_url, \"jql\": \"jql\", \"test_result\": [\"untested\"]}}\n testng = {\"type\": \"testng\", \"parameters\": {\"url\": self.test_report_url}}\n measurement = await self.collect({\"jira\": jira, \"testng\": testng})\n self.assert_equal_entities([self.jira_entity(\"key-2\")], measurement.sources[0].entities)\n self.assertEqual(\"1\", measurement.sources[0].value)\n self.assertEqual(\"0\", measurement.sources[1].value)", "def InternalResultReportMultiThread(self, test_name, function, args,\n **kwargs):\n self._report_thread_lock.acquire()\n tr_record = records.TestResultRecord(test_name, self.TAG)\n self.results.requested.append(tr_record)\n try:\n self.execOneTest(test_name, function, args, **kwargs)\n except Exception as e:\n raise e\n finally:\n self._report_thread_lock.release()", "def summariseResult(self, test):", "def add_test_set(self, name):\n #self.index = self.index + 1\n self.index += 1\n test_base_data = {'name': name,\n 'status': \"Running\",\n 'duration': time(),\n 'report_version': 2,\n 'submission': datetime.now(). \\\n strftime(\"%A %B %d %H:%M %Z %Y\"),\n 'SubTests': []\n }\n self.results_list.append(test_base_data)\n self.logger.log(0, \"create subtest results: %s\",\n str(self.results_list))", "def _InsertTestTimeAndResult(self, test_name, tests):\n\n result = self._get_result_char(test_name)\n test_time = self._GetTestTiming(test_name)\n\n this_test = tests\n for segment in test_name.split('/'):\n if segment not in this_test:\n this_test[segment] = {}\n this_test = this_test[segment]\n\n if len(this_test) == 0:\n self._PopulateResultsAndTimesJSON(this_test)\n\n if self.RESULTS in this_test:\n self._InsertItemRunLengthEncoded(result, this_test[self.RESULTS])\n else:\n this_test[self.RESULTS] = [[1, result]]\n\n if self.TIMES in this_test:\n self._InsertItemRunLengthEncoded(test_time, this_test[self.TIMES])\n else:\n this_test[self.TIMES] = [[1, test_time]]", "def test_add_test_result():\n api = 'results/new/'\n\n data = {\n 'run_id': 13,\n 'outcome': 1, # 0=passed, 1=failed, 2=skipped, 3=error, 5=pending'\n 'stdout': 'my test output',\n 'duration': 23.5, # float, in seconds\n 'testcase': {\n 'name': 'VerifyLoginFailed',\n 'full_name': 'tests.login_tests.VerifyLoginFailed',\n 'description': 'ECS-1234, optional',\n },\n 'test_client': {\n 'name': 'test-agent1',\n 'platform': 'windows 10.1',\n },\n 'error': {\n 'exception_type': 'AssertError',\n 'message': 'the message of exception',\n 'stacktrace': 'the stack trace info',\n }\n }\n\n response = requests.post(url=root + api,\n auth=auth,\n json=data)\n\n result = response.json()\n assert result['success'], response.text\n test_result = result['result']\n print(test_result)", "def runtest():\n pwd = os.path.abspath(os.path.dirname(__file__))\n response = json.loads(request.body.read())\n testCases = (str(response['testCases'])).split(',')\n testCases.pop()\n _runner = (str(response['Runner']))\n _buildName = (str(response['buildName']))\n _userId = (str(response['userId']))\n _testPlanId = (str(response['testPlanId']))\n totalTestCases = len(testCases)\n if _runner == 'HTMLTestRunner':\n if totalTestCases == 0:\n return \"Select testcases to run..\"\n else:\n shutil.rmtree(pwd+'/Output/')\n os.mkdir(pwd+'/Output/')\n listOfTestSuiteNames = getTestSuiteNames(testCases)\n for testSuite in listOfTestSuiteNames:\n suite = unittest.TestSuite()\n for testCase in testCases:\n testSuiteName = ((str(testCase).split(' '))[0]).split('.')[-1]\n if testSuite == testSuiteName:\n _testSuiteName = ((str(testCase)).split(' ')[0])[1:]\n classObj = my_import(_testSuiteName)\n _testCaseName = ((((str(testCase)).split(' ')[1])[:-1]).split('='))[1]\n suite.addTest(classObj(_testCaseName))\n _testModuleName = testSuiteName#((str(testSuite).split(\".\")[-1])[0:-2]) \n _output = open(pwd+\"/Output/\"+_testModuleName+\".html\",\"w\")\n HTMLRunner = HTMLTestRunner.HTMLTestRunner(stream=_output,title=_testModuleName,description=\"Test case's for the module \"+_testModuleName)\n HTMLRunner.run(suite)\n subprocess.Popen(['python',pwd+\"/ExtLib/Statistics.py\",\"Test Automation\",pwd+\"/Output/\"])\n IndexMaker = HTMLIndexCreator.HTMLIndexCreator(pwd+\"/Output/\")\n IndexMaker.makeHTMLIndexFile() \n return \"Test completed.....\"\n else:\n return \"The specified runner does not exist.\"", "def summariseSuiteResult(self, suite):", "def execute_test(self, test_info):\n # Initialize Test Class\n e2e_test = mission_e2e_test.GaiaTest(self.test_const)\n\n test_id_number = test_info['test_id_number']\n test_name = test_info['test_name']\n\n test_results = {'test_name': test_name, 'test_id_number': test_id_number}\n\n # Get current test\n current_test = test_utils.get_test_description(test_id_number)\n\n if current_test == test_common.TEST_LOGIN:\n test_results = e2e_test.get_basic_info_from_test_description(test_results, self.test_const.LOGIN)\n test_results = e2e_test.test_login(test_results)\n elif current_test == test_common.TEST_LOGOUT:\n test_results = e2e_test.get_basic_info_from_test_description(test_results, self.test_const.LOGOUT)\n test_results = e2e_test.test_logout(test_results)\n elif current_test == test_common.TEST_QUERY_OBJECT:\n test_results = e2e_test.get_basic_info_from_test_description(test_results, self.test_const.QUERY_OBJECT)\n test_results = e2e_test.test_query_object(test_results)\n elif current_test == test_common.TEST_CONE_SEARCH:\n test_results = e2e_test.get_basic_info_from_test_description(test_results, self.test_const.CONE_SEARCH)\n test_results = e2e_test.test_cone_search(test_results)\n elif current_test == test_common.TEST_GET_PUBLIC_TABLES:\n test_results = e2e_test.get_basic_info_from_test_description(test_results,\n self.test_const.GET_PUBLIC_TABLES)\n test_results = e2e_test.test_get_public_tables(test_results)\n elif current_test == test_common.TEST_LOAD_TABLE:\n test_results = e2e_test.get_basic_info_from_test_description(test_results, self.test_const.LOAD_TABLE)\n test_results = e2e_test.test_load_table(test_results)\n elif current_test == test_common.TEST_SYNCHRONOUS_QUERY:\n test_results = e2e_test.get_basic_info_from_test_description(test_results,\n self.test_const.SYNCHRONOUS_QUERY)\n test_results = e2e_test.test_synchronous_query(test_results)\n elif current_test == test_common.TEST_SYNCHRONOUS_ON_THE_FLY_QUERY:\n test_results = e2e_test.get_basic_info_from_test_description(test_results,\n self.test_const.SYNCHRONOUS_ON_THE_FLY_QUERY)\n test_results = e2e_test.test_synchronous_on_the_fly_query(test_results)\n elif current_test == test_common.TEST_ASYNCHRONOUS_QUERY:\n test_results = e2e_test.get_basic_info_from_test_description(test_results,\n self.test_const.ASYNCHRONOUS_QUERY)\n test_results = e2e_test.test_asynchronous_query(test_results)\n elif current_test == test_common.TEST_LIST_SHARED_TABLES:\n test_results = e2e_test.get_basic_info_from_test_description(test_results,\n self.test_const.LIST_SHARED_TABLES)\n test_results = e2e_test.test_shared_table(test_results)\n elif current_test == test_common.TEST_DELETE_TABLE:\n test_results = e2e_test.get_basic_info_from_test_description(test_results, self.test_const.DELETE_TABLE)\n # Basically in order to test the delete we need to perform the same steps as for upload a table into\n # a user schema. For this reason we will re-use one of this methods but filling the test_results obj\n # with the proper content.\n test_results = e2e_test.test_upload_table_from_source(test_results, current_test)\n elif current_test == test_common.TEST_UPLOAD_TABLE_FROM_URL:\n test_results = e2e_test.get_basic_info_from_test_description(test_results,\n self.test_const.UPLOAD_TABLE_FROM_URL)\n test_results = e2e_test.test_upload_table_from_source(test_results, current_test)\n elif current_test == test_common.TEST_UPLOAD_TABLE_FROM_FILE:\n test_results = e2e_test.get_basic_info_from_test_description(test_results,\n self.test_const.UPLOAD_TABLE_FROM_FILE)\n test_results = e2e_test.test_upload_table_from_source(test_results, current_test)\n elif current_test == test_common.TEST_UPLOAD_TABLE_FROM_JOB:\n test_results = e2e_test.get_basic_info_from_test_description(test_results,\n self.test_const.UPLOAD_TABLE_FROM_JOB)\n test_results = e2e_test.test_upload_table_from_job(test_results)\n elif current_test == test_common.TEST_UPLOAD_TABLE_FROM_ASTROPY_TABLE:\n test_results = e2e_test.get_basic_info_from_test_description(test_results,\n self.test_const.UPLOAD_TABLE_FROM_ASTROPY_TABLE)\n test_results = e2e_test.test_upload_table_from_astropy_table(test_results)\n elif current_test == test_common.TEST_CROSS_MATCH:\n test_results = e2e_test.get_basic_info_from_test_description(test_results, self.test_const.CROSS_MATCH)\n test_results = e2e_test.test_cross_match(test_results)\n elif current_test == test_common.TEST_DATALINK:\n test_results = e2e_test.get_basic_info_from_test_description(test_results, self.test_const.DATALINK)\n test_results = e2e_test.test_datalink(test_results, self.test_const.DATALINK)\n elif current_test == test_common.TEST_DATALINK_COMPARE:\n test_results = e2e_test.get_basic_info_from_test_description(test_results, self.test_const.DATALINK_COMPARE)\n test_results = e2e_test.test_datalink_compare(test_results, self.test_const.DATALINK_COMPARE)\n elif current_test is test_common.TEST_NUMBER_ERROR:\n error_message = f'Test id {test_id_number}:{test_name}, does not exist. Aborting E2E Test'\n raise ValueError(error_message)\n else:\n error_message = f'Not valid option. Aborting E2E Test'\n raise ValueError(error_message)\n # end_if\n # return\n return test_results", "def main(resultsdb_url, frontend_url, timeparam):\n api = resultsdb_api.ResultsDBapi(resultsdb_url)\n\n results = []\n page = 0\n r = api.get_results(since=timeparam, page=page)\n while len(r[\"data\"]) != 0:\n results.extend(r[\"data\"])\n page += 1\n r = api.get_results(since=timeparam, page=page)\n\n passed = 0\n passed_types = {}\n failed = 0\n failed_types = {}\n failed_links = {}\n together = {}\n for result in results:\n test_case = result[\"testcase\"][\"name\"]\n if result[\"outcome\"] in OKAYISH:\n passed += 1\n passed_types[test_case] = passed_types.get(test_case, 0) + 1\n else:\n failed += 1\n failed_types[test_case] = failed_types.get(test_case, 0) + 1\n test_url = urljoin(frontend_url, \"results/%d\" % result[\"id\"])\n if test_case not in failed_links:\n failed_links[test_case] = [test_url]\n else:\n failed_links[test_case].append(test_url)\n together[test_case] = together.get(test_case, 0) + 1\n\n output = \"libtaskotron results\\n====================\\n\"\n output += \"Generated on: \" + socket.gethostname() + \"\\n\"\n [from_time, to_time] = timeparam.split(\",\")\n output += \"From: \" + from_time + \"\\n\"\n output += \"To: \" + to_time + \"\\n\\n\"\n output += \"Passed: %d\\nFailed: %d\\n\\n\" % (passed, failed)\n output += \"Passed checks:\\n--------------\\n\"\n for check in passed_types.keys():\n output += \"%s: %d\\n\" % (check, passed_types[check])\n output += \"\\n\"\n output += \"Failed checks:\\n--------------\\n\"\n for check in failed_types.keys():\n output += \"%s: %d\\n\" % (check, failed_types[check])\n output += \"\\n\"\n output += \"Links to failed checks:\\n-----------------------\\n\"\n for i, check in enumerate(failed_links.keys()):\n if i != 0:\n output += \"\\n\\n\"\n output += check + \":\\n\"\n output += \"\\n\".join(failed_links[check])\n return output" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the requirement of getting `quantity` of the output chemical, return the list of all the input chemicals and their corresponding quantities to create the output If the reaction produces more than required, return also the leftover quantity
def reverse(self, quantity): batches = ceil(quantity / self.out_quantity) leftover = self.out_quantity * batches - quantity result = [] for inp_c, inp_q in self.inp_chemicals.items(): result.append((inp_c, batches * inp_q)) return result, leftover
[ "def required_parts(self):\n parts = []\n\n for item in self.part.bom_items.all():\n part = {'part': item.sub_part,\n 'per_build': item.quantity,\n 'quantity': item.quantity * self.quantity\n }\n\n parts.append(part)\n\n return parts", "def process_order(self, order, warehouses):\n\n print(\"Order: \")\n print(order)\n print(\"Warehouses: \")\n print(warehouses)\n order, warehouses = self.remove_empty_items(order, warehouses)\n\n if len(order) == 0:\n return []\n\n if len(warehouses) == 0:\n return []\n\n final_shipment = []\n names_index = {}\n\n for item in order:\n i = 0\n # Iterate until we have checked every warehouse or we have satisfied the quantity for item\n while order[item] > 0 and i < len(warehouses):\n\n inventory = warehouses[i]['inventory']\n name = warehouses[i]['name']\n\n if item in inventory:\n\n # Inventory can't satisfy order for item -> amt_to_take is all of inventory - clear inventory\n if inventory[item] - order[item] <= 0:\n amt_to_take = inventory[item]\n order[item] -= inventory[item]\n del inventory[item]\n # Inventory can satisfy order for item -> amt_to_take is equal to the order amount\n else:\n amt_to_take = order[item]\n order[item] = 0\n\n # Create new warehouse object if warehouse not in final shipment\n if name not in names_index:\n warehouse = { name : { item : amt_to_take } }\n names_index[name] = len(final_shipment) # add to end of names\n final_shipment.append(warehouse)\n # Names_order[name] == index of warehouse in final shipment. Add (item : amt_to_take ) pair\n else:\n final_shipment[names_index[name]][name][item] = amt_to_take\n\n i += 1\n\n # Check if order for specific item has been satisfied\n if i == len(warehouses) and order[item] > 0:\n return []\n\n print(\"Final shipment: \")\n print(final_shipment)\n return final_shipment", "def get_process_children(self, parent_process: NodeElem, stock: dict[str, int]) -> list[Node]:\n matrices: Matrix = []\n for need, qty in self.process[parent_process.name].need.items():\n if stock.get(need, 1) >= qty:\n continue\n matrices.append([NodeElem(p.name, 1) for p in self.produces[need]])\n if not matrices:\n return []\n combinations = Node.combinations(matrices)\n return [Node(lst, deepcopy(stock)) for lst in combinations]", "def requiredToProduce(source, n, target, reactions):\n required = Counter()\n balance = Counter()\n\n Q = deque(needed(n, target, balance, reactions))\n\n while len(Q) > 0:\n n, c = Q.popleft()\n if n <= balance[c]:\n balance[c] -= n\n elif c in reactions:\n lhs = needed(n, c, balance, reactions)\n for r, d in lhs:\n # TODO: clean this up\n combined = False\n for i, (r2, d2) in enumerate(Q):\n if d == d2:\n Q[i] = (r + r2, d)\n combined = True\n break\n if not combined:\n Q.append((r, d))\n else:\n required[c] += n\n\n return required[source]", "def multiple_quantity(self):\n return self._multiple_quantity", "def ingredient_demand_of_one_factory(self):\n if self._produced is None:\n return None\n result = []\n for count, ingr_type in self._ingredients:\n result.append(ItemTypeDemand(ingr_type, _F(count, self._time)))\n return result", "def expand(desired, reactions, precursors):\n result = defaultdict(int)\n for chemical, quantity in desired.items():\n if chemical in reactions:\n reaction = reactions[chemical]\n\n dont_expand = False\n for x in desired.keys():\n if chemical in precursors.get(x, set()):\n dont_expand = True\n\n if dont_expand:\n result[chemical] += quantity\n else:\n num = ceil(quantity / reaction.output_yield)\n for input_chemical_tuple in reaction.input_chemicals:\n result[input_chemical_tuple[0]\n ] += input_chemical_tuple[1] * num\n else:\n result[chemical] += quantity\n\n return dict(result)", "def calculation_required(self, atoms=None, quantities=None):\n available_quantities = [\"energy\", \"forces\"]\n for quantity in quantities:\n if quantity not in available_quantities:\n print_warning(\"Quantity '{}' not available.\".format(quantity))\n return True\n\n if self.old_input is None:\n return True\n else:\n new_input = self.CP2K_INPUT._print_input(-1)\n return new_input != self.old_input", "def get_quantity(decimal: 'pynini.FstLike', cardinal_up_to_hundred: 'pynini.FstLike') -> 'pynini.FstLike':\n numbers = cardinal_up_to_hundred\n\n res = (\n pynutil.insert(\"integer_part: \\\"\")\n + numbers\n + pynutil.insert(\"\\\"\")\n + pynini.accep(\" \")\n + pynutil.insert(\"quantity: \\\"\")\n + quantities\n + pynutil.insert(\"\\\"\")\n )\n res |= decimal + pynini.accep(\" \") + pynutil.insert(\"quantity: \\\"\") + quantities + pynutil.insert(\"\\\"\")\n return res", "def subset(self):\n\n #Set the empty lists\n self.qc_list_m1 = [] #To store the IDs of modality 1 (QC'ed)\n self.qc_list_m2 = [] #To store the IDs of modality 2 (QC'ed)\n self.reduct = [] #To store the IDs of the subset to do\n\n #######################################\n #Import the QC_list for modality 1 (with a try/except statement to catch errors)\n if self.qcm1: #If the QC_list was selected...\n try:\n with open(self.qcm1) as x: #Open file\n for line in x:\n new_line = re.search(r'[0-9]{6}', line).group()[0:6] \n if new_line.strip().isdigit() and len(new_line.strip()) == 6: #If stripped line are only numbers and their length is 6, then...\n self.qc_list_m1.append(new_line.strip()) #Append the number to the list\n else:\n raise SystemExit('Error: encountered unexpected input in QC list 1. Cannot strip to one six-digit ID (CC ID) per line.')\n print(' Generated QC_list 1')\n\n except OSError: #If we can't find the file, we exist straight away.\n print(f\"Error: Could not access QC list file at : {self.qcm1}\", flush=True)\n raise SystemExit\n\n else: #If no QC_list is provided, print the message\n print(f' No argument given to -q1 argument. Assume no QC_list is necessary for modality 1')\n \n #######################################\n #Import the QC_list for modality 2 (with a try/except statement to catch errors)\n if self.qcm2:\n try:\n with open(self.qcm2) as y: #Open file\n for line in y:\n new_line = re.search(r'[0-9]{6}', line).group()[0:6]\n if new_line.strip().isdigit() and len(new_line.strip()) == 6: #If stripped line are only numbers and their length is 6, then...\n self.qc_list_m2.append(new_line.strip()) #Append the number to the list\n else:\n raise SystemExit('Error: encountered unexpected input in QC list 2. Cannot strip to one six-digit ID (CC ID) per line.')\n print(' Generated QC_list 2') #Will print if successful.\n \n except OSError: #If we can't find the file, we exist straight away.\n print(f\"Error: Could not access QC list file at : {self.qcm2}\", flush=True)\n raise SystemExit\n else:\n print(f' No argument given to -q2 argument. Assume no QC_list is necessary for modality 2')\n \n #######################################\n #Subset the data by a specific criteria\n if self.red: #If the reduction option is given\n try:\n with open(self.red) as z: #Open file\n for line in z:\n new_line = re.search(r'[0-9]{6}', line).group()[0:6]\n if new_line.strip().isdigit() and len(new_line.strip()) == 6: #If stripped line are only numbers and their length is 6, then...\n self.reduct.append(new_line.strip()) #Append the number to the list\n else:\n raise SystemExit('Error: encountered unexpected input in QC list 1. Expected one six-digit ID (CC ID) per line.')\n\n print(' Generated subset list')\n\n except OSError: #If we can't find the file, we exist straight away.\n print(f\"Error: Could not access subset list file at : {self.red}\", flush=True)\n raise SystemExit\n else:\n print(f\" No argument given to -r argument. Assume we don't want to subset the data and use all subjects that were QC'ed.\")\n \n\n return None", "def characterize(self, flowable, ref_quantity, query_quantity, value, context=None, location='GLO', **kwargs):\n return self._perform_query(_interface, 'characterize', QuantityRequired,\n flowable, ref_quantity, query_quantity, value,\n context=context, location=location, **kwargs)", "def quantity_size():", "def getinputs(prodcons, num):\n\n allowed_type = -1\n for i in range(0, num):\n allowed_types = set()\n\n \"\"\"\n while len(allowed_types) != 5:\n #allowed_type = random.randint(0, 5)\n allowed_type = \\\n int(input('Digite o allowed_type (1 a 5) para dicionar ao %s %i \\n0 para finalizar: '\n % (prodcons.upper(), i)))\n if allowed_type == 0:\n break\n elif allowed_type > 0 and allowed_type <= 5:\n allowed_types.add(allowed_type)\n else:\n print('Tipo errado, sabe ler n\\xc3\\xa3o?')\n \"\"\" \n if prodcons == 'prod':\n allowed_types = set()\n for i in range(1,6):\n allowed_types.add(random.randint(1, 5))\n \n a = Producer(allowed_types)\n produceres.append(a)\n produceres_deque.append(a)\n print(\"PROD\", a._id, a)\n elif prodcons == 'cons':\n allowed_types = set()\n for i in range(1,6):\n allowed_types.add(random.randint(1, 5))\n a = Consumidor(allowed_types)\n consumeres.append(a)\n consumeres_deque.append(a)\n print(\"CONS\", a._id, a)", "def require_quantity(self):\n n = self.require_quant_exact()\n m = None\n if self.the_char == \",\":\n self.next_char()\n if self.match_one(\"0123456789\"):\n m = self.require_quant_exact()\n if n > m:\n raise RegularExpressionError(\n \"Illegal quantity: {%i,%i}\" % (n, m))\n else:\n m = n\n return n, m", "def _generate_items_for_item_children(self, parent_item, quantity_of_parent_item, items_for_order, exclude_preassembled_parts):\n for item_link in InventoryItemLink.objects.filter(\n \n parent_inventory_item = parent_item):\n child_item = item_link.child_inventory_item\n num_child_items = quantity_of_parent_item * item_link.num_child_inventory_items_required\n relationship = item_link.relationship\n\n if child_item not in items_for_order:\n items_for_order[child_item] = {}\n if relationship not in items_for_order[child_item]:\n items_for_order[child_item][relationship] = num_child_items\n else:\n items_for_order[child_item][relationship] += num_child_items \n\n if (not exclude_preassembled_parts) or (not child_item.preassembled):\n self._generate_items_for_item_children(\n parent_item = child_item,\n quantity_of_parent_item = num_child_items,\n items_for_order = items_for_order,\n exclude_preassembled_parts = exclude_preassembled_parts\n )", "def distribute(ξ, capacity: Dict[Hashable, int], quantity: int) -> Dict[Hashable, int]:\n allocation = defaultdict(int)\n if quantity > 0:\n total_capacity = sum(capacity.values())\n assert total_capacity >= quantity, f\"{quantity=} exceeds {total_capacity=}\"\n choices = SortedSet([key for key, value in capacity.items() if value >= 1])\n μ = quantity / float(len(capacity))\n with tqdm(total=quantity, desc=\"Distribute\") as bar:\n for i, q in capacity.items():\n total_capacity -= q\n min_allocation = max(quantity - total_capacity, 0)\n max_allocation = min(quantity, q)\n expected = max(min(max_allocation, μ), min_allocation)\n x = sample_number(ξ, min_allocation, max_allocation, expected)\n allocation[i] = x\n quantity -= x\n bar.update(x)\n return allocation", "def additional_fuel_requirements(fuel_mass: int) -> Iterator[int]:\n step = fuel_requirement(fuel_mass)\n while step > 0:\n yield step\n step = fuel_requirement(step)", "def ingredient_demand_needed_for(self, rate):\n multipler = self.factories_needed_for(rate)\n if multipler is None:\n return None\n result = []\n for d in self.ingredient_demand_of_one_factory():\n result.append(ItemTypeDemand(\n d.item_type, d.requested_rate * multipler\n ))\n return result", "def produce(self, production_function, input_goods, results=False):\n if not isinstance(input_goods, dict):\n input_goods = {good: self[good] for good in input_goods}\n\n result = production_function(**input_goods)\n\n for good, quantity in input_goods.items():\n if self._inventory.haves[good] - quantity + result.get(good, 0) < -epsilon:\n raise NotEnoughGoods\n\n for good, quantity in input_goods.items():\n self._inventory.haves[good] -= quantity\n\n for good, quantity in result.items():\n self._inventory.haves[good] += quantity\n\n if results:\n return {good: result.get(good, 0) - input_goods.get(good, 0)\n for good in ChainMap(input_goods, result).keys()}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all registered op proto from PaddlePaddle C++ end.
def get_all_op_protos(): protostrs = core.get_all_op_protos() ret_values = [] for pbstr in protostrs: op_proto = framework_pb2.OpProto.FromString(bytes(pbstr)) ret_values.append(op_proto) return ret_values
[ "def getCurrentProto(self) -> \"SoProto *\":\n return _coin.SoOutput_getCurrentProto(self)", "def getCurrentProto(self) -> \"SoProto *\":\n return _coin.SoInput_getCurrentProto(self)", "def generate_protos(session):\n # longrunning operations directory is non-standard for backwards compatibility\n # see comments in directory for details\n # Temporarily rename the operations_pb2.py to keep it from getting overwritten\n os.replace(\n \"google/longrunning/operations_pb2.py\",\n \"google/longrunning/operations_pb2-COPY.py\",\n )\n\n session.install(GRPCIO_TOOLS_VERSION)\n protos = [str(p) for p in (Path(\".\").glob(\"google/**/*.proto\"))]\n session.run(\n \"python\", \"-m\", \"grpc_tools.protoc\", \"--proto_path=.\", \"--python_out=.\", *protos\n )\n\n # Some files contain service definitions for which `_pb2_grpc.py` files must be generated.\n service_protos = [\"google/longrunning/operations.proto\"]\n session.run(\n \"python\", \"-m\", \"grpc_tools.protoc\", \"--proto_path=.\", \"--grpc_python_out=.\", *service_protos\n )\n\n # More LRO non-standard fixes: rename the file and fix the import statement\n operations_grpc_py = Path(\"google/longrunning/operations_pb2_grpc.py\")\n file_contents = operations_grpc_py.read_text()\n file_contents = file_contents.replace(\"operations_pb2\", \"operations_proto_pb2\")\n operations_grpc_py.write_text(file_contents)\n\n # Clean up LRO directory\n os.replace(\n \"google/longrunning/operations_pb2.py\",\n \"google/longrunning/operations_proto_pb2.py\",\n )\n os.replace(\n \"google/longrunning/operations_pb2-COPY.py\",\n \"google/longrunning/operations_pb2.py\",\n )", "def ops(self):\n return self._all_ops", "def getProtoDefinition(self) -> \"SoProto *\":\n return _coin.SoProtoInstance_getProtoDefinition(self)", "def get_active_bn_ops(connected_graph: ConnectedGraph):\n for conn_graph_op in connected_graph.get_all_ops().values():\n if conn_graph_op.type in ['FusedBatchNormV3', 'BatchNorm']:\n bn_conn_graph_op = conn_graph_op\n yield bn_conn_graph_op", "def getPcodeOp(self) -> ghidra.program.model.pcode.PcodeOp:\n ...", "def _analyse_cbops_operator(self, cbops_op):\n # get the address to read the cbops op parameters from\n cbops_op_parameter_area_start = cbops_op.get_member(\n \"parameter_area_start\"\n ).address\n\n # now read the header parameters\n cbops_param_hdr = self.chipdata.cast(\n cbops_op_parameter_area_start, \"cbops_param_hdr\"\n )\n nr_inputs = cbops_param_hdr.get_member(\"nr_inputs\").value\n nr_outputs = cbops_param_hdr.get_member(\"nr_outputs\").value\n index_table_addr = cbops_param_hdr.get_member(\"index_table\").address\n\n # read the input and output buffer indexes that are used by the\n # operators.\n # we have the start of index table, there will be nr_inputs\n # indexes for inputs followed by nr_outputs indexes for outputs\n # the indexes are filtered, so only those with valid buffer will\n # be extracted\n input_indexes = (\n self.chipdata.get_data(\n index_table_addr, self.native_word_size * nr_inputs\n )\n ) if nr_inputs > 0 else []\n input_indexes = [m for m in input_indexes if m in self.buffers_indexes]\n index_table_addr += self.native_word_size * nr_inputs\n output_indexes = (\n self.chipdata.get_data(\n index_table_addr, self.native_word_size * nr_outputs\n )\n ) if nr_outputs > 0 else []\n output_indexes = [\n m for m in output_indexes if m in self.buffers_indexes\n ]\n\n # get previous and next operator\n prev_operator_addr = cbops_op.get_member(\"prev_operator_addr\").value\n next_operator_addr = cbops_op.get_member(\"next_operator_addr\").value\n prev_operator_struct = self._search_cbops_op_name_by_addr(\n prev_operator_addr\n )\n next_operator_struct = self._search_cbops_op_name_by_addr(\n next_operator_addr\n )\n\n # get the address of operator specific data\n index_table_addr += self.native_word_size * nr_outputs\n op_specific_data_ptr = index_table_addr\n\n # search for cbops op name\n cbops_struct = self._search_cbops_op_name(cbops_op)\n\n # output the info that found\n self.formatter.section_start(\n \" cbops operator 0x{0:x}, {1}\".\n format(cbops_op.address, cbops_struct)\n )\n self.formatter.output(\n \"prev_operator_addr: 0x{0:x}({1})\".\n format(prev_operator_addr, prev_operator_struct)\n )\n self.formatter.output(\n \"next_operator_addr: 0x{0:x}({1})\".\n format(next_operator_addr, next_operator_struct)\n )\n self.formatter.output(\n \"nr_inputs: {0}({1} used)\".format(nr_inputs, len(input_indexes))\n )\n self.formatter.output(\n \"input buffer indexes: {0}\".format(input_indexes)\n )\n self.formatter.output(\n \"nr_outputs: {0}({1} used)\".\n format(nr_outputs, len(output_indexes))\n )\n self.formatter.output(\n \"output buffer indexes: {0}\".format(output_indexes)\n )\n self.formatter.output(\n \"in-place processing: {0}\".format(output_indexes == input_indexes)\n )\n\n # if a valid operator structure name found, we also output\n # the content of specific data structure for the operator\n if cbops_struct is not None:\n op_specific_data = self.chipdata.cast(\n op_specific_data_ptr, cbops_struct\n )\n self.formatter.output(str(op_specific_data))\n self.formatter.section_end()", "def pc_output_buffers_full(self, *args):\n return _bs_swig.bs_ax25_decoder_sptr_pc_output_buffers_full(self, *args)", "def pc_output_buffers_full(self, *args):\n return _radio_astro_swig.detect_sptr_pc_output_buffers_full(self, *args)", "def ops_list(self):\n return self.ops_order.split('_')", "def pc_output_buffers_full(self, *args):\n return _radio_astro_swig.dedispersion_sptr_pc_output_buffers_full(self, *args)", "def pc_output_buffers_full(self, *args):\n return _frame_detection_swig.preamble_detector_bb_sptr_pc_output_buffers_full(self, *args)", "def feed_ops(self):\n if FLAGS.reinforcement_learning:\n pass\n\n if FLAGS.feed_initial_sate:\n return [self.decoder.initial_state], [self.decoder.final_state]\n else:\n return [], []", "def ping_pong(self):\n return [\n self.ping(),\n super().ping(),\n self.pong(),\n super().pong()\n ]", "def get_output_layers(self):\n layer_names = self.net.getLayerNames()\n output_layers = self.net.getUnconnectedOutLayers()\n return [layer_names[i[0] - 1] for i in output_layers]", "def _get_ops(self):\n\n arglist = []\n arg = self.arg\n\n if arg is None:\n op = [(self.opcode, 0)]\n else:\n while arg > 0xff:\n arg = arg >> (8 * len(arglist))\n arglist.append((self.EXTENDED_ARG, arg & 0xff))\n\n arglist = arglist[::-1]\n if len(arglist) > 3:\n # No more than 3 EXTENDED_ARG opcodes can precede\n # an opcode\n raise RuntimeError(\n f'argument {arg} for {dis.opname[opcode]} too large')\n\n if arglist:\n # The argument associated with the actual instruction\n # is the last one in the arglist\n arg = arglist.pop()[1]\n\n op = [(self.opcode, arg)]\n\n return arglist + op", "def do_list_protocols(self, args):\n print_list2(['Protocols'], self.clb.get_protocols(), args.batch,\n args.delimiter)", "def pc_output_buffers_full(self, *args):\n return _wavelet_swig.wvps_ff_sptr_pc_output_buffers_full(self, *args)", "def get_availableOperators(self):\n # cops\n # idx\n # slen\n res = []\n\n cops = self._AT(\"+COPS=?\")\n slen = len(cops)\n del res[:]\n idx = cops.find(\"(\")\n while idx >= 0:\n slen = slen - (idx+1)\n cops = (cops)[idx+1: idx+1 + slen]\n idx = cops.find(\"\\\"\")\n if idx > 0:\n slen = slen - (idx+1)\n cops = (cops)[idx+1: idx+1 + slen]\n idx = cops.find(\"\\\"\")\n if idx > 0:\n res.append((cops)[0: 0 + idx])\n idx = cops.find(\"(\")\n\n return res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert user's input to OpDesc. Only keyword arguments are supported.
def __call__(self, *args, **kwargs): if len(args) != 0: raise ValueError("Only keyword arguments are supported.") op_desc = framework_pb2.OpDesc() for input_parameter in self.__op_proto__.inputs: input_arguments = kwargs.get(input_parameter.name, []) if is_str(input_arguments): input_arguments = [input_arguments] if not input_parameter.duplicable and len(input_arguments) > 1: raise ValueError( "Input %s expects only one input, but %d are given." % (input_parameter.name, len(input_arguments)) ) ipt = op_desc.inputs.add() ipt.parameter = input_parameter.name ipt.arguments.extend(input_arguments) for output_parameter in self.__op_proto__.outputs: output_arguments = kwargs.get(output_parameter.name, []) if is_str(output_arguments): output_arguments = [output_arguments] if not output_parameter.duplicable and len(output_arguments) > 1: raise ValueError( "Output %s expects only one output, but %d are given." % (output_parameter.name, len(output_arguments)) ) out = op_desc.outputs.add() out.parameter = output_parameter.name out.arguments.extend(output_arguments) # Types op_desc.type = self.__op_proto__.type # Attrs for attr in self.__op_proto__.attrs: if attr.generated: continue user_defined_attr = kwargs.get(attr.name, None) if user_defined_attr is not None: new_attr = op_desc.attrs.add() new_attr.name = attr.name new_attr.type = attr.type if isinstance(user_defined_attr, np.ndarray): user_defined_attr = user_defined_attr.tolist() if attr.type == framework_pb2.INT: new_attr.i = user_defined_attr elif attr.type == framework_pb2.FLOAT: new_attr.f = user_defined_attr elif attr.type == framework_pb2.LONG: new_attr.l = user_defined_attr elif attr.type == framework_pb2.STRING: new_attr.s = user_defined_attr elif attr.type == framework_pb2.BOOLEAN: new_attr.b = user_defined_attr elif attr.type == framework_pb2.INTS: new_attr.ints.extend(user_defined_attr) elif attr.type == framework_pb2.FLOATS: new_attr.floats.extend(user_defined_attr) elif attr.type == framework_pb2.STRINGS: new_attr.strings.extend(user_defined_attr) elif attr.type == framework_pb2.BOOLEANS: new_attr.bools.extend(user_defined_attr) elif attr.type == framework_pb2.LONGS: new_attr.longs.extend(user_defined_attr) elif attr.type == framework_pb2.FLOAT64: new_attr.float64 = user_defined_attr elif attr.type == framework_pb2.FLOAT64S: new_attr.float64s.extend(user_defined_attr) # the code below manipulates protobuf directly elif attr.type == framework_pb2.SCALAR: scalar = make_scalar_proto(user_defined_attr) new_attr.scalar.CopyFrom(scalar) elif attr.type == framework_pb2.SCALARS: scalars = [ make_scalar_proto(item) for item in user_defined_attr ] for item in scalars: new_attr.scalars.MergeFrom(item) else: raise NotImplementedError( "A not supported attribute type: %s." % (str(attr.type)) ) for attr_name, defalut_val in self.__extra_attrs__.items(): user_defined_attr = kwargs.get(attr_name, None) if user_defined_attr is not None: attr_type = int( core.get_attrtibute_type(op_desc.type, attr_name) ) new_attr = op_desc.attrs.add() new_attr.name = attr_name new_attr.type = attr_type if isinstance(user_defined_attr, np.ndarray): user_defined_attr = user_defined_attr.tolist() if attr_type == framework_pb2.INT: new_attr.i = user_defined_attr elif attr_type == framework_pb2.FLOAT: new_attr.f = user_defined_attr elif attr_type == framework_pb2.LONG: new_attr.l = user_defined_attr elif attr_type == framework_pb2.STRING: new_attr.s = user_defined_attr elif attr_type == framework_pb2.BOOLEAN: new_attr.b = user_defined_attr elif attr_type == framework_pb2.INTS: new_attr.ints.extend(user_defined_attr) elif attr_type == framework_pb2.FLOATS: new_attr.floats.extend(user_defined_attr) elif attr_type == framework_pb2.STRINGS: new_attr.strings.extend(user_defined_attr) elif attr_type == framework_pb2.BOOLEANS: new_attr.bools.extend(user_defined_attr) elif attr_type == framework_pb2.LONGS: new_attr.longs.extend(user_defined_attr) elif attr.type == framework_pb2.FLOAT64: new_attr.float64 = user_defined_attr elif attr.type == framework_pb2.FLOAT64S: new_attr.float64s.extend(user_defined_attr) # the code below manipulates protobuf directly elif attr.type == framework_pb2.SCALAR: scalar = make_scalar_proto(user_defined_attr) new_attr.scalar.CopyFrom(scalar) elif attr.type == framework_pb2.SCALARS: scalars = [ make_scalar_proto(item) for item in user_defined_attr ] for item in scalars: new_attr.scalars.MergeFrom(item) else: raise NotImplementedError( "A not supported attribute type: %s." % (str(attr_type)) ) return op_desc
[ "def test_opdef_sig():\n from tensorflow.core.framework import op_def_pb2\n\n custom_opdef_tf = op_def_pb2.OpDef()\n custom_opdef_tf.name = \"MyOpDef\"\n\n arg1_tf = op_def_pb2.OpDef.ArgDef()\n arg1_tf.name = \"arg1\"\n arg1_tf.type_attr = \"T\"\n\n arg2_tf = op_def_pb2.OpDef.ArgDef()\n arg2_tf.name = \"arg2\"\n arg2_tf.type_attr = \"T\"\n\n custom_opdef_tf.input_arg.extend([arg1_tf, arg2_tf])\n\n attr1_tf = op_def_pb2.OpDef.AttrDef()\n attr1_tf.name = \"T\"\n attr1_tf.type = \"type\"\n\n attr2_tf = op_def_pb2.OpDef.AttrDef()\n attr2_tf.name = \"axis\"\n attr2_tf.type = \"int\"\n attr2_tf.default_value.i = 1\n\n custom_opdef_tf.attr.extend([attr1_tf, attr2_tf])\n\n opdef_sig, opdef_func = MetaOpDefLibrary.make_opdef_sig(custom_opdef_tf)\n\n import inspect\n\n # These are standard inputs\n assert opdef_sig.parameters[\"arg1\"].default == inspect._empty\n assert opdef_sig.parameters[\"arg2\"].default == inspect._empty\n # These are attributes that are sometimes required by the OpDef\n assert opdef_sig.parameters[\"axis\"].default == inspect._empty\n # The obligatory tensor name parameter\n assert opdef_sig.parameters[\"name\"].default is None", "def _get_input(op):\n assert op.type in _SUPPORTED_OPS, 'Op type %s is not supported.' % op.type\n if op.type == 'Conv2D' or op.type == 'DepthwiseConv2dNative':\n return op.inputs[0]\n if op.type == 'Conv2DBackpropInput':\n return op.inputs[2]\n if op.type == 'MatMul':\n if op.get_attr('transpose_a') or op.get_attr('transpose_b'):\n raise ValueError('MatMul with transposition is not yet supported.')\n return op.inputs[0]", "def to_op(input, *args, **kwargs):\n device, dtype, copy = _parse_args(*args, **kwargs)\n\n if not isinstance(dtype, flow.dtype) and dtype is not None:\n raise TypeError(\"Invalid dtype param received: {dtype}\")\n\n if not isinstance(copy, bool):\n raise TypeError(\"Invalid copy param received: {copy}\")\n\n if input.is_consistent:\n if device is not None and device not in (\"cuda\", \"cpu\"):\n raise TypeError(\n \"A consistent tensor can only call to() with device_str_without_id, \"\n 'e.g. to(\"cuda\") or to(\"cpu\"), '\n f\"but device param {device} has been received.\"\n )\n\n return _consistent_tensor_to(input, device, dtype, copy=copy)\n else:\n if isinstance(device, str):\n device = flow.device(device)\n\n return _tensor_to(input, device, dtype, copy)", "def _get_description(arg):\n desc = []\n otherwise = False\n if arg.can_be_inferred:\n desc.append('If left unspecified, it will be inferred automatically.')\n otherwise = True\n elif arg.flag:\n desc.append('This argument defaults to '\n '<code>None</code> and can be omitted.')\n otherwise = True\n\n if arg.type in {'InputPeer', 'InputUser', 'InputChannel',\n 'InputNotifyPeer', 'InputDialogPeer'}:\n desc.append(\n 'Anything entity-like will work if the library can find its '\n '<code>Input</code> version (e.g., usernames, <code>Peer</code>, '\n '<code>User</code> or <code>Channel</code> objects, etc.).'\n )\n\n if arg.is_vector:\n if arg.is_generic:\n desc.append('A list of other Requests must be supplied.')\n else:\n desc.append('A list must be supplied.')\n elif arg.is_generic:\n desc.append('A different Request must be supplied for this argument.')\n else:\n otherwise = False # Always reset to false if no other text is added\n\n if otherwise:\n desc.insert(1, 'Otherwise,')\n desc[-1] = desc[-1][:1].lower() + desc[-1][1:]\n\n return ' '.join(desc).replace(\n 'list',\n '<span class=\"tooltip\" title=\"Any iterable that supports len() '\n 'will work too\">list</span>'\n )", "def _handleInput(self, paramInput, dimensionTags=None, dimTagsPrefix=None):", "def create_op_creation_method(op_proto):\n method = OpDescCreationMethod(op_proto)\n\n def __impl__(*args, **kwargs):\n opdesc = method(*args, **kwargs)\n return core.Operator.create(opdesc.SerializeToString())\n\n extra_attrs_map = core.get_op_extra_attrs(op_proto.type)\n\n return OpInfo(\n method=__impl__,\n name=op_proto.type,\n inputs=[(var.name, var.duplicable) for var in op_proto.inputs],\n outputs=[(var.name, var.duplicable) for var in op_proto.outputs],\n attrs=[attr.name for attr in op_proto.attrs],\n extra_attrs=list(extra_attrs_map.keys()),\n )", "def _construct_input_spec(self):", "def _generate_doc_string_(\n op_proto, additional_args_lines=None, skip_attrs_set=None\n):\n\n if not isinstance(op_proto, framework_pb2.OpProto):\n raise TypeError(\"OpProto should be `framework_pb2.OpProto`\")\n\n buf = StringIO()\n buf.write(escape_math(op_proto.comment))\n buf.write('\\nArgs:\\n')\n for each_input in op_proto.inputs:\n line_begin = f' {_convert_(each_input.name)}'\n buf.write(line_begin)\n buf.write(\" (Tensor): \")\n buf.write(escape_math(each_input.comment))\n if each_input.duplicable:\n buf.write(\" Duplicatable.\")\n if each_input.dispensable:\n buf.write(\" Optional.\")\n buf.write('\\n')\n\n skip_attrs = OpProtoHolder.generated_op_attr_names()\n # attr use_mkldnn and is_test also should not be visible to users.\n skip_attrs.add(\"use_mkldnn\")\n skip_attrs.add(\"is_test\")\n skip_attrs.add(\"use_cudnn\")\n\n if skip_attrs_set:\n for t in skip_attrs_set:\n skip_attrs.add(t)\n\n for each_attr in op_proto.attrs:\n if each_attr.name in skip_attrs:\n continue\n buf.write(' ')\n buf.write(each_attr.name)\n buf.write(' (')\n buf.write(_type_to_str_(each_attr.type))\n buf.write('): ')\n buf.write(escape_math(each_attr.comment))\n buf.write('\\n')\n\n if additional_args_lines is not None:\n for line in additional_args_lines:\n line = line.strip()\n buf.write(' ')\n buf.write(line)\n buf.write('\\n')\n\n if len(op_proto.outputs) != 0:\n buf.write('\\nReturns:\\n')\n buf.write(' ')\n for each_opt in op_proto.outputs:\n if not each_opt.intermediate:\n break\n buf.write(_convert_(each_opt.name))\n buf.write(' (Tensor): ')\n buf.write(escape_math(each_opt.comment))\n\n return buf.getvalue()", "def map_spec_operand_to_ods_argument(operand):\n kind = operand['kind']\n quantifier = operand.get('quantifier', '')\n\n # These instruction \"operands\" are for encoding the results; they should\n # not be handled here.\n assert kind != 'IdResultType', 'unexpected to handle \"IdResultType\" kind'\n assert kind != 'IdResult', 'unexpected to handle \"IdResult\" kind'\n\n if kind == 'IdRef':\n if quantifier == '':\n arg_type = 'SPV_Type'\n elif quantifier == '?':\n arg_type = 'SPV_Optional<SPV_Type>'\n else:\n arg_type = 'Variadic<SPV_Type>'\n elif kind == 'IdMemorySemantics' or kind == 'IdScope':\n # TODO(antiagainst): Need to further constrain 'IdMemorySemantics'\n # and 'IdScope' given that they should be gernated from OpConstant.\n assert quantifier == '', ('unexpected to have optional/variadic memory '\n 'semantics or scope <id>')\n arg_type = 'I32'\n elif kind == 'LiteralInteger':\n if quantifier == '':\n arg_type = 'I32Attr'\n elif quantifier == '?':\n arg_type = 'OptionalAttr<I32Attr>'\n else:\n arg_type = 'OptionalAttr<I32ArrayAttr>'\n elif kind == 'LiteralString' or \\\n kind == 'LiteralContextDependentNumber' or \\\n kind == 'LiteralExtInstInteger' or \\\n kind == 'LiteralSpecConstantOpInteger' or \\\n kind == 'PairLiteralIntegerIdRef' or \\\n kind == 'PairIdRefLiteralInteger' or \\\n kind == 'PairIdRefIdRef':\n assert False, '\"{}\" kind unimplemented'.format(kind)\n else:\n # The rest are all enum operands that we represent with op attributes.\n assert quantifier != '*', 'unexpected to have variadic enum attribute'\n arg_type = 'SPV_{}Attr'.format(kind)\n if quantifier == '?':\n arg_type = 'OptionalAttr<{}>'.format(arg_type)\n\n name = operand.get('name', '')\n name = snake_casify(name) if name else kind.lower()\n\n return '{}:${}'.format(arg_type, name)", "def get_op_definition(instruction, doc, existing_info):\n fmt_str = 'def SPV_{opname}Op : SPV_Op<\"{opname}\", [{traits}]> {{\\n'\\\n ' let summary = {summary};\\n\\n'\\\n ' let description = [{{\\n'\\\n '{description}\\n\\n'\\\n ' ### Custom assembly form\\n'\\\n '{assembly}'\\\n '}}];\\n\\n'\\\n ' let arguments = (ins{args});\\n\\n'\\\n ' let results = (outs{results});\\n'\\\n '{extras}'\\\n '}}\\n'\n\n opname = instruction['opname'][2:]\n\n summary, description = doc.split('\\n', 1)\n wrapper = textwrap.TextWrapper(\n width=76, initial_indent=' ', subsequent_indent=' ')\n\n # Format summary. If the summary can fit in the same line, we print it out\n # as a \"-quoted string; otherwise, wrap the lines using \"[{...}]\".\n summary = summary.strip();\n if len(summary) + len(' let summary = \"\";') <= 80:\n summary = '\"{}\"'.format(summary)\n else:\n summary = '[{{\\n{}\\n }}]'.format(wrapper.fill(summary))\n\n # Wrap description\n description = description.split('\\n')\n description = [wrapper.fill(line) for line in description if line]\n description = '\\n\\n'.join(description)\n\n operands = instruction.get('operands', [])\n\n # Set op's result\n results = ''\n if len(operands) > 0 and operands[0]['kind'] == 'IdResultType':\n results = '\\n SPV_Type:$result\\n '\n operands = operands[1:]\n if 'results' in existing_info:\n results = existing_info['results']\n\n # Ignore the operand standing for the result <id>\n if len(operands) > 0 and operands[0]['kind'] == 'IdResult':\n operands = operands[1:]\n\n # Set op' argument\n arguments = existing_info.get('arguments', None)\n if arguments is None:\n arguments = [map_spec_operand_to_ods_argument(o) for o in operands]\n arguments = '\\n '.join(arguments)\n if arguments:\n # Prepend and append whitespace for formatting\n arguments = '\\n {}\\n '.format(arguments)\n\n assembly = existing_info.get('assembly', None)\n if assembly is None:\n assembly = ' ``` {.ebnf}\\n'\\\n ' [TODO]\\n'\\\n ' ```\\n\\n'\\\n ' For example:\\n\\n'\\\n ' ```\\n'\\\n ' [TODO]\\n'\\\n ' ```\\n '\n\n return fmt_str.format(\n opname=opname,\n traits=existing_info.get('traits', ''),\n summary=summary,\n description=description,\n assembly=assembly,\n args=arguments,\n results=results,\n extras=existing_info.get('extras', ''))", "def _docArg(self, doc, arg):\n\n kwargs = self._args[arg]\n dest = kwargs.get('dest', arg[0])\n desc = ':arg {0}: {1}'.format(dest, kwargs['help'])\n choices = kwargs.get('choices')\n if choices:\n desc += ', one of ' + ', '.join(['``' + repr(ch) + '``'\n for ch in choices])\n default = kwargs.get('default')\n if default:\n desc += ', default is ``{0}``'.format(repr(default))\n doc.extend(wrapText(desc, join=False, subsequent_indent=' '))\n try:\n type = kwargs['type']\n except KeyError:\n try:\n action = kwargs['action']\n except KeyError:\n type = None\n else:\n if action.startswith('store') and action.endswith('e'):\n type = bool\n else:\n type = None\n if type is not None:\n doc.append(':type {0}: {1}'.format(dest, type.__name__))\n doc.append('')", "def get_trait_desc(inputs, name, spec):\n desc = spec.desc\n xor = spec.xor\n requires = spec.requires\n argstr = spec.argstr\n\n manhelpstr = [\"\\t%s\" % name]\n\n type_info = spec.full_info(inputs, name, None)\n\n default = \"\"\n if spec.usedefault:\n default = \", nipype default value: %s\" % str(spec.default_value()[1])\n line = f\"({type_info}{default})\"\n\n manhelpstr = wrap(\n line,\n HELP_LINEWIDTH,\n initial_indent=manhelpstr[0] + \": \",\n subsequent_indent=\"\\t\\t \",\n )\n\n if desc:\n for line in desc.split(\"\\n\"):\n line = re.sub(r\"\\s+\", \" \", line)\n manhelpstr += wrap(\n line, HELP_LINEWIDTH, initial_indent=\"\\t\\t\", subsequent_indent=\"\\t\\t\"\n )\n\n if argstr:\n pos = spec.position\n if pos is not None:\n manhelpstr += wrap(\n f\"argument: ``{argstr}``, position: {pos}\",\n HELP_LINEWIDTH,\n initial_indent=\"\\t\\t\",\n subsequent_indent=\"\\t\\t\",\n )\n else:\n manhelpstr += wrap(\n \"argument: ``%s``\" % argstr,\n HELP_LINEWIDTH,\n initial_indent=\"\\t\\t\",\n subsequent_indent=\"\\t\\t\",\n )\n\n if xor:\n line = \"%s\" % \", \".join(xor)\n manhelpstr += wrap(\n line,\n HELP_LINEWIDTH,\n initial_indent=\"\\t\\tmutually_exclusive: \",\n subsequent_indent=\"\\t\\t \",\n )\n\n if requires:\n others = [field for field in requires if field != name]\n line = \"%s\" % \", \".join(others)\n manhelpstr += wrap(\n line,\n HELP_LINEWIDTH,\n initial_indent=\"\\t\\trequires: \",\n subsequent_indent=\"\\t\\t \",\n )\n return manhelpstr", "def BuildInput(self, name, identifier):\n type_name = self.input_types[identifier]\n values, default = self.input_values[identifier]\n # For `data_types.Input` types, the `type_name` refers to the actual name of\n # the Python class, inheriting from `Input`. This is done so that different\n # input types can generate different C++ code by overriding the `Load` and\n # `Store` methods.\n input_constructor = getattr(data_types, type_name)\n return input_constructor(name, values, default)", "def _translate_op(operator):\n return DEB_VERS_OPS.get(operator, operator)", "def add_input_arg(self, inp):\n self.add_arg(inp._dax_repr())\n self._add_input(inp)", "def _call(self, desc_spec):\n if \"atomic_descriptor\" in desc_spec.keys() and \"reducer_function\" in desc_spec.keys():\n return Global_Descriptor_from_Atomic(desc_spec)\n elif \"type\" not in desc_spec.keys():\n raise ValueError(\"Did not specify the type of the descriptor.\")\n if desc_spec[\"type\"] == \"CM\":\n return Global_Descriptor_CM(desc_spec)\n elif desc_spec[\"type\"] == \"MORGAN\":\n return Global_Descriptor_Morgan(desc_spec)\n else:\n raise NotImplementedError", "def _handleInput(self, paramInput):\n pass", "def op(jenni, input):\n if not input.admin:\n return jenni.say('You must be an admin to perform this operation')\n inputs = None\n try:\n inputs = input.group(2).split(' ')\n except:\n return jenni.say('Invalid input: .op ##example or .op ##example nick')\n channel = None\n try:\n channel = inputs[0]\n if not channel.startswith('#'): raise Exception\n except:\n return jenni.say('You must provide a valid channel')\n nick = None\n try:\n nick = inputs[1]\n except:\n pass\n if not nick:\n nick = input.nick\n jenni.write(['MODE', channel, \"+o\", nick])", "def _resolve_arg(action, choices, param, required, typ):\n name, _param = param\n _required = None\n del param\n if _param[\"typ\"] in simple_types:\n typ = _param[\"typ\"]\n # elif (\n # isinstance(_param[\"typ\"], str)\n # and _param[\"typ\"].startswith(\"<class '\")\n # and _param[\"typ\"].endswith(\"'>\")\n # ):\n # typ = _param[\"typ\"][8:-2]\n elif _param[\"typ\"] == \"dict\" or name.endswith(\"kwargs\"):\n typ, required = \"loads\", not name.endswith(\"kwargs\")\n elif _param[\"typ\"]:\n from doctrans.emitter_utils import ast_parse_fix\n\n parsed_type = ast_parse_fix(_param[\"typ\"])\n for node in walk(parsed_type):\n if isinstance(node, Tuple):\n maybe_choices = tuple(\n get_value(elt)\n for elt in node.elts\n if isinstance(elt, (Constant, Str))\n )\n if len(maybe_choices) == len(node.elts):\n choices = maybe_choices\n elif isinstance(node, Name):\n if node.id == \"Optional\":\n _required = False\n elif node.id in simple_types:\n typ = node.id\n elif node.id not in frozenset((\"Union\",)):\n typ = FALLBACK_TYP\n\n if node.id == \"List\":\n action = \"append\"\n if _required is None and (typ or \"\").lower() in frozenset(\n (\"str\", \"complex\", \"int\", \"float\", \"anystr\", \"list\", \"tuple\", \"dict\")\n ):\n _required = True\n\n # if isinstance(_param.get(\"default\"), (list, tuple)):\n # if len()\n # typ, action = None, \"append\"\n\n # if isinstance(param.get(\"default\"), (Constant, Str, Num)):\n # param[\"default\"] = get_value(param[\"default\"])\n return (\n action,\n choices,\n required if _required is None else _required,\n typ,\n (name, _param),\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate op creation method for an OpProto.
def create_op_creation_method(op_proto): method = OpDescCreationMethod(op_proto) def __impl__(*args, **kwargs): opdesc = method(*args, **kwargs) return core.Operator.create(opdesc.SerializeToString()) extra_attrs_map = core.get_op_extra_attrs(op_proto.type) return OpInfo( method=__impl__, name=op_proto.type, inputs=[(var.name, var.duplicable) for var in op_proto.inputs], outputs=[(var.name, var.duplicable) for var in op_proto.outputs], attrs=[attr.name for attr in op_proto.attrs], extra_attrs=list(extra_attrs_map.keys()), )
[ "def _generate_doc_string_(\n op_proto, additional_args_lines=None, skip_attrs_set=None\n):\n\n if not isinstance(op_proto, framework_pb2.OpProto):\n raise TypeError(\"OpProto should be `framework_pb2.OpProto`\")\n\n buf = StringIO()\n buf.write(escape_math(op_proto.comment))\n buf.write('\\nArgs:\\n')\n for each_input in op_proto.inputs:\n line_begin = f' {_convert_(each_input.name)}'\n buf.write(line_begin)\n buf.write(\" (Tensor): \")\n buf.write(escape_math(each_input.comment))\n if each_input.duplicable:\n buf.write(\" Duplicatable.\")\n if each_input.dispensable:\n buf.write(\" Optional.\")\n buf.write('\\n')\n\n skip_attrs = OpProtoHolder.generated_op_attr_names()\n # attr use_mkldnn and is_test also should not be visible to users.\n skip_attrs.add(\"use_mkldnn\")\n skip_attrs.add(\"is_test\")\n skip_attrs.add(\"use_cudnn\")\n\n if skip_attrs_set:\n for t in skip_attrs_set:\n skip_attrs.add(t)\n\n for each_attr in op_proto.attrs:\n if each_attr.name in skip_attrs:\n continue\n buf.write(' ')\n buf.write(each_attr.name)\n buf.write(' (')\n buf.write(_type_to_str_(each_attr.type))\n buf.write('): ')\n buf.write(escape_math(each_attr.comment))\n buf.write('\\n')\n\n if additional_args_lines is not None:\n for line in additional_args_lines:\n line = line.strip()\n buf.write(' ')\n buf.write(line)\n buf.write('\\n')\n\n if len(op_proto.outputs) != 0:\n buf.write('\\nReturns:\\n')\n buf.write(' ')\n for each_opt in op_proto.outputs:\n if not each_opt.intermediate:\n break\n buf.write(_convert_(each_opt.name))\n buf.write(' (Tensor): ')\n buf.write(escape_math(each_opt.comment))\n\n return buf.getvalue()", "def __call__(self, *args, **kwargs):\n if len(args) != 0:\n raise ValueError(\"Only keyword arguments are supported.\")\n op_desc = framework_pb2.OpDesc()\n for input_parameter in self.__op_proto__.inputs:\n input_arguments = kwargs.get(input_parameter.name, [])\n if is_str(input_arguments):\n input_arguments = [input_arguments]\n\n if not input_parameter.duplicable and len(input_arguments) > 1:\n raise ValueError(\n \"Input %s expects only one input, but %d are given.\"\n % (input_parameter.name, len(input_arguments))\n )\n\n ipt = op_desc.inputs.add()\n ipt.parameter = input_parameter.name\n ipt.arguments.extend(input_arguments)\n\n for output_parameter in self.__op_proto__.outputs:\n output_arguments = kwargs.get(output_parameter.name, [])\n if is_str(output_arguments):\n output_arguments = [output_arguments]\n\n if not output_parameter.duplicable and len(output_arguments) > 1:\n raise ValueError(\n \"Output %s expects only one output, but %d are given.\"\n % (output_parameter.name, len(output_arguments))\n )\n\n out = op_desc.outputs.add()\n out.parameter = output_parameter.name\n out.arguments.extend(output_arguments)\n\n # Types\n op_desc.type = self.__op_proto__.type\n\n # Attrs\n for attr in self.__op_proto__.attrs:\n if attr.generated:\n continue\n user_defined_attr = kwargs.get(attr.name, None)\n if user_defined_attr is not None:\n new_attr = op_desc.attrs.add()\n new_attr.name = attr.name\n new_attr.type = attr.type\n if isinstance(user_defined_attr, np.ndarray):\n user_defined_attr = user_defined_attr.tolist()\n if attr.type == framework_pb2.INT:\n new_attr.i = user_defined_attr\n elif attr.type == framework_pb2.FLOAT:\n new_attr.f = user_defined_attr\n elif attr.type == framework_pb2.LONG:\n new_attr.l = user_defined_attr\n elif attr.type == framework_pb2.STRING:\n new_attr.s = user_defined_attr\n elif attr.type == framework_pb2.BOOLEAN:\n new_attr.b = user_defined_attr\n elif attr.type == framework_pb2.INTS:\n new_attr.ints.extend(user_defined_attr)\n elif attr.type == framework_pb2.FLOATS:\n new_attr.floats.extend(user_defined_attr)\n elif attr.type == framework_pb2.STRINGS:\n new_attr.strings.extend(user_defined_attr)\n elif attr.type == framework_pb2.BOOLEANS:\n new_attr.bools.extend(user_defined_attr)\n elif attr.type == framework_pb2.LONGS:\n new_attr.longs.extend(user_defined_attr)\n elif attr.type == framework_pb2.FLOAT64:\n new_attr.float64 = user_defined_attr\n elif attr.type == framework_pb2.FLOAT64S:\n new_attr.float64s.extend(user_defined_attr)\n # the code below manipulates protobuf directly\n elif attr.type == framework_pb2.SCALAR:\n scalar = make_scalar_proto(user_defined_attr)\n new_attr.scalar.CopyFrom(scalar)\n elif attr.type == framework_pb2.SCALARS:\n scalars = [\n make_scalar_proto(item) for item in user_defined_attr\n ]\n for item in scalars:\n new_attr.scalars.MergeFrom(item)\n else:\n raise NotImplementedError(\n \"A not supported attribute type: %s.\" % (str(attr.type))\n )\n for attr_name, defalut_val in self.__extra_attrs__.items():\n user_defined_attr = kwargs.get(attr_name, None)\n if user_defined_attr is not None:\n attr_type = int(\n core.get_attrtibute_type(op_desc.type, attr_name)\n )\n new_attr = op_desc.attrs.add()\n new_attr.name = attr_name\n new_attr.type = attr_type\n if isinstance(user_defined_attr, np.ndarray):\n user_defined_attr = user_defined_attr.tolist()\n if attr_type == framework_pb2.INT:\n new_attr.i = user_defined_attr\n elif attr_type == framework_pb2.FLOAT:\n new_attr.f = user_defined_attr\n elif attr_type == framework_pb2.LONG:\n new_attr.l = user_defined_attr\n elif attr_type == framework_pb2.STRING:\n new_attr.s = user_defined_attr\n elif attr_type == framework_pb2.BOOLEAN:\n new_attr.b = user_defined_attr\n elif attr_type == framework_pb2.INTS:\n new_attr.ints.extend(user_defined_attr)\n elif attr_type == framework_pb2.FLOATS:\n new_attr.floats.extend(user_defined_attr)\n elif attr_type == framework_pb2.STRINGS:\n new_attr.strings.extend(user_defined_attr)\n elif attr_type == framework_pb2.BOOLEANS:\n new_attr.bools.extend(user_defined_attr)\n elif attr_type == framework_pb2.LONGS:\n new_attr.longs.extend(user_defined_attr)\n elif attr.type == framework_pb2.FLOAT64:\n new_attr.float64 = user_defined_attr\n elif attr.type == framework_pb2.FLOAT64S:\n new_attr.float64s.extend(user_defined_attr)\n # the code below manipulates protobuf directly\n elif attr.type == framework_pb2.SCALAR:\n scalar = make_scalar_proto(user_defined_attr)\n new_attr.scalar.CopyFrom(scalar)\n elif attr.type == framework_pb2.SCALARS:\n scalars = [\n make_scalar_proto(item) for item in user_defined_attr\n ]\n for item in scalars:\n new_attr.scalars.MergeFrom(item)\n else:\n raise NotImplementedError(\n \"A not supported attribute type: %s.\" % (str(attr_type))\n )\n\n return op_desc", "def from_proto(\n self,\n proto,\n *,\n arg_function_language: str = '',\n constants: List[v2.program_pb2.Constant] = None,\n deserialized_constants: List[Any] = None,\n ) -> cirq.Operation:", "def _proto2object(\n proto: CreateTensorMessage_PB,\n ) -> \"CreateTensorMessage\":\n\n return CreateTensorMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def buildProtocol(self, addr):\n proto = WebexProto(self.access_token, self.connected_handler, \n self.message_handler)\n proto.factory = self\n self.resetDelay()\n return proto", "def from_proto(\n self,\n proto: v2.program_pb2.Operation,\n *,\n arg_function_language: str = '',\n constants: List[v2.program_pb2.Constant] = None,\n deserialized_constants: List[Any] = None, # unused\n ) -> cirq.Operation:\n qubits = [v2.qubit_from_proto_id(q.id) for q in proto.qubits]\n args = self._args_from_proto(proto, arg_function_language=arg_function_language)\n if self._num_qubits_param is not None:\n args[self._num_qubits_param] = len(qubits)\n gate = self._gate_constructor(**args)\n op = self._op_wrapper(gate.on(*qubits), proto)\n if self._deserialize_tokens:\n which = proto.WhichOneof('token')\n if which == 'token_constant_index':\n if not constants:\n raise ValueError(\n 'Proto has references to constants table '\n 'but none was passed in, value ='\n f'{proto}'\n )\n op = op.with_tags(\n CalibrationTag(constants[proto.token_constant_index].string_value)\n )\n elif which == 'token_value':\n op = op.with_tags(CalibrationTag(proto.token_value))\n return op", "def createProtoInstance(self) -> \"SoProtoInstance *\":\n return _coin.SoProto_createProtoInstance(self)", "def test_opdef_sig():\n from tensorflow.core.framework import op_def_pb2\n\n custom_opdef_tf = op_def_pb2.OpDef()\n custom_opdef_tf.name = \"MyOpDef\"\n\n arg1_tf = op_def_pb2.OpDef.ArgDef()\n arg1_tf.name = \"arg1\"\n arg1_tf.type_attr = \"T\"\n\n arg2_tf = op_def_pb2.OpDef.ArgDef()\n arg2_tf.name = \"arg2\"\n arg2_tf.type_attr = \"T\"\n\n custom_opdef_tf.input_arg.extend([arg1_tf, arg2_tf])\n\n attr1_tf = op_def_pb2.OpDef.AttrDef()\n attr1_tf.name = \"T\"\n attr1_tf.type = \"type\"\n\n attr2_tf = op_def_pb2.OpDef.AttrDef()\n attr2_tf.name = \"axis\"\n attr2_tf.type = \"int\"\n attr2_tf.default_value.i = 1\n\n custom_opdef_tf.attr.extend([attr1_tf, attr2_tf])\n\n opdef_sig, opdef_func = MetaOpDefLibrary.make_opdef_sig(custom_opdef_tf)\n\n import inspect\n\n # These are standard inputs\n assert opdef_sig.parameters[\"arg1\"].default == inspect._empty\n assert opdef_sig.parameters[\"arg2\"].default == inspect._empty\n # These are attributes that are sometimes required by the OpDef\n assert opdef_sig.parameters[\"axis\"].default == inspect._empty\n # The obligatory tensor name parameter\n assert opdef_sig.parameters[\"name\"].default is None", "def generate(env):\n try:\n bld = env['BUILDERS']['Protoc']\n except KeyError:\n bld = ProtocBuilder\n env['BUILDERS']['Protoc'] = bld\n \n env['PROTOC'] = env.Detect(protocs) or 'protoc'\n env['PROTOCFLAGS'] = SCons.Util.CLVar('')\n env['PROTOCPROTOPATH'] = SCons.Util.CLVar('')\n env['PROTOCCOM'] = '$PROTOC $PROTOCFLAGS ${PROTOCPYTHONOUTDIR and (\"--python_out=\"+PROTOCPYTHONOUTDIR) or \"\"} ${PROTOCFDSOUT and (\"-o\"+PROTOCFDSOUT) or \"\"} ${SOURCES}'\n env['PROTOCSRCSUFFIX'] = '.proto'\n env['PROTOCPYTHONOUTDIR'] = '.'", "def _proto2object(\n proto: GetTensorMessage_PB,\n ) -> \"GetTensorMessage\":\n\n return GetTensorMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _create_split_op(self, op: Op) -> Op:\n split_name_parts = ['Split_', str(self._split_count)]\n split_name = ''.join(split_name_parts)\n self._split_count += 1\n split_dotted_name_parts = [self._model_name, split_name]\n split_dotted_name = '.'.join(split_dotted_name_parts)\n is_anonymous = True\n split_op = Op(name=split_name, dotted_name=split_dotted_name, output_shape=op.output_shape,\n is_anonymous=is_anonymous, op_type='Split', residing_module=None)\n self._ops[split_name] = split_op\n return split_op", "def _proto2object(\n proto: CreateRoleMessage_PB,\n ) -> \"CreateRoleMessage\":\n\n return CreateRoleMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def from_proto(\n self,\n proto: v2.program_pb2.CircuitOperation,\n *,\n arg_function_language: str = '',\n constants: List[v2.program_pb2.Constant] = None,\n deserialized_constants: List[Any] = None,\n ) -> cirq.CircuitOperation:\n if constants is None or deserialized_constants is None:\n raise ValueError(\n 'CircuitOp deserialization requires a constants list and a corresponding list of '\n 'post-deserialization values (deserialized_constants).'\n )\n if len(deserialized_constants) <= proto.circuit_constant_index:\n raise ValueError(\n f'Constant index {proto.circuit_constant_index} in CircuitOperation '\n 'does not appear in the deserialized_constants list '\n f'(length {len(deserialized_constants)}).'\n )\n circuit = deserialized_constants[proto.circuit_constant_index]\n if not isinstance(circuit, cirq.FrozenCircuit):\n raise ValueError(\n f'Constant at index {proto.circuit_constant_index} was expected to be a circuit, '\n f'but it has type {type(circuit)} in the deserialized_constants list.'\n )\n\n which_rep_spec = proto.repetition_specification.WhichOneof('repetition_value')\n if which_rep_spec == 'repetition_count':\n rep_ids = None\n repetitions = proto.repetition_specification.repetition_count\n elif which_rep_spec == 'repetition_ids':\n rep_ids = proto.repetition_specification.repetition_ids.ids\n repetitions = len(rep_ids)\n else:\n rep_ids = None\n repetitions = 1\n\n qubit_map = {\n v2.qubit_from_proto_id(entry.key.id): v2.qubit_from_proto_id(entry.value.id)\n for entry in proto.qubit_map.entries\n }\n measurement_key_map = {\n entry.key.string_key: entry.value.string_key\n for entry in proto.measurement_key_map.entries\n }\n arg_map = {\n arg_func_langs.arg_from_proto(\n entry.key, arg_function_language=arg_function_language\n ): arg_func_langs.arg_from_proto(\n entry.value, arg_function_language=arg_function_language\n )\n for entry in proto.arg_map.entries\n }\n\n for arg in arg_map.keys():\n if not isinstance(arg, (str, sympy.Symbol)):\n raise ValueError(\n 'Invalid key parameter type in deserialized CircuitOperation. '\n f'Expected str or sympy.Symbol, found {type(arg)}.'\n f'\\nFull arg: {arg}'\n )\n\n for arg in arg_map.values():\n if not isinstance(arg, (str, sympy.Symbol, float, int)):\n raise ValueError(\n 'Invalid value parameter type in deserialized CircuitOperation. '\n f'Expected str, sympy.Symbol, or number; found {type(arg)}.'\n f'\\nFull arg: {arg}'\n )\n\n return cirq.CircuitOperation(\n circuit, repetitions, qubit_map, measurement_key_map, arg_map, rep_ids # type: ignore\n )", "def _proto2object(\n proto: UpdateTensorMessage_PB,\n ) -> \"UpdateTensorMessage\":\n\n return UpdateTensorMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _create_split_op(self, op: Op):\n split_name_parts = ['Split_', str(self._split_count)]\n split_name = ''.join(split_name_parts)\n self._split_count += 1\n split_dotted_name_parts = [self._model_name, split_name]\n split_dotted_name = '.'.join(split_dotted_name_parts)\n is_anonymous = True\n split_op = Op(split_name, split_dotted_name, op.output_shape, is_anonymous, 'Split')\n self._ops[split_name] = split_op\n return split_op", "def generate_protos(session):\n # longrunning operations directory is non-standard for backwards compatibility\n # see comments in directory for details\n # Temporarily rename the operations_pb2.py to keep it from getting overwritten\n os.replace(\n \"google/longrunning/operations_pb2.py\",\n \"google/longrunning/operations_pb2-COPY.py\",\n )\n\n session.install(GRPCIO_TOOLS_VERSION)\n protos = [str(p) for p in (Path(\".\").glob(\"google/**/*.proto\"))]\n session.run(\n \"python\", \"-m\", \"grpc_tools.protoc\", \"--proto_path=.\", \"--python_out=.\", *protos\n )\n\n # Some files contain service definitions for which `_pb2_grpc.py` files must be generated.\n service_protos = [\"google/longrunning/operations.proto\"]\n session.run(\n \"python\", \"-m\", \"grpc_tools.protoc\", \"--proto_path=.\", \"--grpc_python_out=.\", *service_protos\n )\n\n # More LRO non-standard fixes: rename the file and fix the import statement\n operations_grpc_py = Path(\"google/longrunning/operations_pb2_grpc.py\")\n file_contents = operations_grpc_py.read_text()\n file_contents = file_contents.replace(\"operations_pb2\", \"operations_proto_pb2\")\n operations_grpc_py.write_text(file_contents)\n\n # Clean up LRO directory\n os.replace(\n \"google/longrunning/operations_pb2.py\",\n \"google/longrunning/operations_proto_pb2.py\",\n )\n os.replace(\n \"google/longrunning/operations_pb2-COPY.py\",\n \"google/longrunning/operations_pb2.py\",\n )", "def __call__(self, *args, **kwargs):\n new_node = Tensor()\n new_node.op = self\n return new_node", "def _proto2object(\n proto: DeleteTensorMessage_PB,\n ) -> \"DeleteTensorMessage\":\n\n return DeleteTensorMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def make_proto():\n # Start running from one directory above the directory which is found by\n # this scripts's location as __file__.\n cwd = os.path.dirname(os.path.abspath(__file__))\n\n # Find all the .proto files.\n protos_to_compile = []\n for (root, _, files) in os.walk(cwd):\n for filename in files:\n full_filename = os.path.join(root, filename)\n if full_filename.endswith(\".proto\"):\n proto_stat = os.stat(full_filename)\n try:\n pb2_stat = os.stat(\n full_filename.rsplit(\n \".\", 1)[0] + \"_pb2.py\")\n if pb2_stat.st_mtime >= proto_stat.st_mtime:\n continue\n\n except (OSError, IOError):\n pass\n\n protos_to_compile.append(full_filename)\n\n if not protos_to_compile:\n logging.info(\"No protos needed to be compiled.\")\n else:\n for proto in protos_to_compile:\n logging.info(\"Compiling %s\", proto)\n protodir, protofile = os.path.split(proto)\n\n subprocess.check_call(\n [\n \"python\",\n \"-m\",\n \"grpc_tools.protoc\",\n \"-I.\",\n \"--python_out=.\",\n \"--grpc_python_out=.\",\n protofile,\n ],\n cwd=protodir)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
telnet_expect(list, timeout=None) Read until one from a list of a regular expressions matches.
def Telnet_expect(list, timeout=None): return;
[ "def expect(self, list, timeout=None):\r\n re = None\r\n list = list[:]\r\n indices = range(len(list))\r\n for i in indices:\r\n if not hasattr(list[i], \"search\"):\r\n if not re: import re\r\n list[i] = re.compile(list[i])\r\n while 1:\r\n self.process_rawq()\r\n for i in indices:\r\n m = list[i].search(self.cookedq)\r\n if m:\r\n e = m.end()\r\n text = self.cookedq[:e]\r\n self.cookedq = self.cookedq[e:]\r\n return (i, m, text)\r\n if self.eof:\r\n break\r\n if timeout is not None:\r\n r, w, x = select([self.fileno()], [], [], timeout)\r\n if not r:\r\n break\r\n self.fill_rawq()\r\n text = self.read_very_lazy()\r\n if not text and self.eof:\r\n raise EOFError\r\n return (-1, None, text)", "def test_read_until(self):\n want = [b'xxxmatchyyy']\n telnet = test_telnet(want)\n data = telnet.read_until(b'match')\n self.assertEqual(data, b'xxxmatch', msg=(telnet.cookedq,\n telnet.rawq, telnet.sock.reads))\n\n reads = [b'x' * 50, b'match', b'y' * 50]\n expect = b''.join(reads[:-1])\n telnet = test_telnet(reads)\n data = telnet.read_until(b'match')\n self.assertEqual(data, expect)", "def send_and_expect(self,\n command,\n pattern_list,\n timeout=30.0,\n searchwindowsize=config.SEARCHWINDOWSIZE,\n expect_type=line_identifier.LINE_TYPE_ALL,\n mode=MODE_TYPE_ANY,\n port=0,\n slow=False,\n add_newline=True,\n newline=\"\\n\",\n command_tries=1,\n raise_for_timeout=False):", "def _expect(self, expected, times=50):\n logger.debug('[%s] Expecting [%s]', self.port, expected)\n retry_times = 10\n while times:\n if not retry_times:\n break\n\n line = self._readline()\n\n if line == expected:\n return\n\n if not line:\n retry_times -= 1\n time.sleep(0.1)\n\n times -= 1\n\n raise Exception('failed to find expected string[%s]' % expected)", "def expect(self, pat, timeout=15, failpat=None, success_info=False):\n assert type(pat) in (unicode, str, list), \"unsupported type\"\n s = time.time()\n self._check_fail(failpat)\n if type(pat) != list:\n while pat not in self._buff:\n time.sleep(0.1)\n self._check_fail(failpat)\n if timeout:\n assert s + timeout >= time.time(), \"expect timeout\"\n else:\n while all([x not in self._buff for x in pat]):\n time.sleep(0.1)\n self._check_fail(failpat)\n if timeout:\n assert s + timeout >= time.time(), \"expect timeout\"\n res = self._buff\n self._buff = b\"\"\n if success_info:\n sys.stdout.write(\"\\nexpect {} success\\n\".format(pat))\n sys.stdout.flush()\n return res", "def read_until(self, expected, loglevel=None):\n ret = telnetlib.Telnet.read_until(self, expected,\n self._timeout).decode('ASCII', 'ignore')\n self._log(ret, loglevel)\n if not ret.endswith(expected):\n raise AssertionError(\"No match found for '%s' in %s\"\n % (expected, utils.secs_to_timestr(self._timeout)))\n return ret", "def read_until_regexp(self, *expected):\n expected = list(expected)\n if self._is_valid_log_level(expected[-1]):\n loglevel = expected[-1]\n expected = expected[:-1]\n else:\n loglevel = 'INFO'\n try:\n index, _, ret = self.expect(expected, self._timeout)\n except TypeError:\n index, ret = -1, ''\n ret = ret.decode('ASCII', 'ignore')\n self._log(ret, loglevel)\n if index == -1:\n expected = [ exp if isinstance(exp, basestring) else exp.pattern\n for exp in expected ]\n raise AssertionError(\"No match found for %s in %s\"\n % (utils.seq2str(expected, lastsep=' or '),\n utils.secs_to_timestr(self._timeout)))\n return ret", "def read_until(pattern, timeout=0.0):\n def timeout_event():\n \"\"\"Handles the read timeout event.\"\"\"\n timeout_event.reading = False\n\n timeout_event.reading = True\n\n timer = None\n if timeout > 0:\n timer = threading.Timer(timeout, timeout_event)\n timer.start()\n\n position = 0\n\n dev.purge()\n\n while timeout_event.reading:\n try:\n char = dev.read()\n\n if char is not None and char != '':\n if char == pattern[position]:\n position = position + 1\n if position == len(pattern):\n break\n else:\n position = 0\n\n except Exception as err:\n pass\n\n if timer:\n if timer.is_alive():\n timer.cancel()\n else:\n raise TimeoutError('Timeout while waiting for line terminator.')", "def expect_all(self, array, regex = False, timeout = 10):\n if not isinstance(array, list): array = [array]\n if not regex: array = [re.escape(x) for x in array]\n timer = int(timeout)\n capture = \"\"\n while diminishing_expect:\n captured_lines_local = []\n iter_time = time.time()\n temp_expect = list(diminishing_expect)\n i = self.expect(temp_expect, timer)\n if i[1] == True:\n return diminishing_expect\n timer -= (time.time() - iter_time) # Subtract time it took to capture\n capture += i[0] # Captured Value\n for k in diminishing_expect[:]:\n if re.search(k, capture):\n captured_lines_local.append(k)\n diminishing_expect.remove(k)\n return capture", "def read_until(self, match, timeout=None):\r\n n = len(match)\r\n self.process_rawq()\r\n i = self.cookedq.find(match)\r\n if i >= 0:\r\n i = i+n\r\n buf = self.cookedq[:i]\r\n self.cookedq = self.cookedq[i:]\r\n return buf\r\n s_reply = ([self], [], [])\r\n s_args = s_reply\r\n if timeout is not None:\r\n s_args = s_args + (timeout,)\r\n while not self.eof and select(*s_args) == s_reply:\r\n i = max(0, len(self.cookedq)-n)\r\n self.fill_rawq()\r\n self.process_rawq()\r\n i = self.cookedq.find(match, i)\r\n if i >= 0:\r\n i = i+n\r\n buf = self.cookedq[:i]\r\n self.cookedq = self.cookedq[i:]\r\n return buf\r\n return self.read_very_lazy()", "def readMultiple(self, timeout=60):\n deadline = time.time() + timeout\n allpackets = []\n while time.time() < deadline:\n allpackets.append(self.read())\n time.sleep(1.0)\n\n return allpackets", "def _telnet_read(self):\n return self.tn.read_until(\"\\n\", self.timeout).rstrip('\\n') # Reads reply from device, strips termination char", "def write_until_expected_output(self, text, expected, timeout,\n retry_interval, loglevel=None):\n timeout = utils.timestr_to_secs(timeout)\n retry_interval = utils.timestr_to_secs(retry_interval)\n starttime = time.time()\n while time.time() - starttime < timeout:\n self.write_bare(text)\n self.read_until(text, loglevel)\n ret = telnetlib.Telnet.read_until(self, expected,\n retry_interval).decode('ASCII', 'ignore')\n self._log(ret, loglevel)\n if ret.endswith(expected):\n return ret\n raise AssertionError(\"No match found for '%s' in %s\"\n % (expected, utils.secs_to_timestr(timeout)))", "def expect(self, expected, timeout=10):\n started = time.time()\n elapsed = 0\n while elapsed < timeout:\n line = self._get_line(timeout=(timeout - elapsed))\n if line is None:\n break # timed out waiting for line\n elif expected in line:\n return\n elapsed = time.time() - started\n raise AssertionError(\n \"Timed out waiting for '{}' in the stream\".format(expected)\n )", "def do_and_expect(self,\n func,\n func_args,\n func_kwargs,\n pattern_list,\n timeout=30.0,\n searchwindowsize=config.SEARCHWINDOWSIZE,\n expect_type=line_identifier.LINE_TYPE_LOG,\n mode=MODE_TYPE_ANY,\n raise_for_timeout=False):", "def wait_for_match(self, file, regex, timeout=60, poll_interval=1.0):\n compiled_regex = re.compile(regex)\n\n def check_content():\n try:\n file_content = self._exec.send_line(\n \"cat '{file}'\".format(file=file), expected_exit_code=0)\n except Exception:\n logger.debug(\n 'Error occurred when checking content of file {file}'.format(file=file),\n exc_info=True)\n return False\n\n return compiled_regex.search(file_content)\n\n return wait_for(check_content, timeout=timeout, poll_interval=poll_interval)", "def press_and_expect(self,\n button,\n pattern_list,\n wait=0.0,\n timeout=30.0,\n searchwindowsize=config.SEARCHWINDOWSIZE,\n expect_type=\"log\",\n port=0,\n mode=\"any\"):", "def expect(self, array, timer = 10):\n array = [array] if isinstance(array, str) else array\n results = self._connection.expect([x.encode('ascii') for x in array], timeout = timer)\n if results[0] == -1:\n return None\n else:\n return results[2].decode('ascii')", "def verify_each_packet_on_multiple_port_lists(\n test, pkts=[], ports=[], device_number=0, timeout=None, n_timeout=None\n):\n test.assertTrue(\n len(pkts) == len(ports), \"Packet list count does not match port list count\"\n )\n\n if not timeout:\n timeout = ptf.ptfutils.default_timeout\n if not n_timeout:\n n_timeout = ptf.ptfutils.default_negative_timeout\n\n if timeout <= 0 or n_timeout <= 0:\n raise Exception(\n \"%s() requires positive timeout value.\" % sys._getframe().f_code.co_name\n )\n\n pkt_cnt = 0\n rcv_idx = []\n for port_list, pkt in zip(ports, pkts):\n rcv_ports = set()\n for port in port_list:\n (rcv_device, rcv_port, rcv_pkt, _) = dp_poll(\n test, device_number=device_number, port_number=port, timeout=timeout\n )\n if rcv_device != device_number:\n continue\n logging.debug(\"Checking for pkt on device %d, port %d\", device_number, port)\n if ptf.dataplane.match_exp_pkt(pkt, rcv_pkt):\n pkt_cnt += 1\n rcv_ports.add(port_list.index(rcv_port))\n break\n rcv_idx.append(rcv_ports)\n\n verify_no_other_packets(test, device_number=device_number, timeout=n_timeout)\n\n test.assertTrue(\n pkt_cnt == len(pkts),\n \"Did not receive pkt on one of ports %r for device %d\" % (ports, device_number),\n )\n return rcv_idx" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write to Telnet Port
def PortWrite( data ): global gTelnetConn if gTelnetConn == None: OpenTelnet() gTelnetConn.write( data ) return;
[ "def _telnet_write(self, message):\n self.tn.write(message + \"\\r\\n\") # Writes telnet message to connected device, with termination chars added", "def writeBytes(port, toWrite):\n print(port, toWrite)", "def write(self, data):\n self.serial_device.write(data)", "def __sendTelnetCommand(self, command):\n #print(\"telnet\", command)\n try:\n tn = Telnet(self.ip, self.port, TIMEOUT)\n tn.write(command)\n response = tn.read_eager()\n logging.info('Sent telnet command %s, %s:%s \\\n and received response %s',\n command, self.ip, self.port, response)\n time.sleep(.3)\n tn.close()\n except Exception as e:\n logging.error('Error sending telnet command %s to %s:%i - %s',\n command, self.ip, self.port, e)", "def write(self, command):\n #print \"In \", self.get_name(), \"::::::write()\"\n dev = self.dev_serial \n dev.Close()\n dev.Open() \n dev.Write(command)", "def write_bare(self, text):\n self._verify_connection()\n telnetlib.Telnet.write(self, text)", "def send(self, command, port=0, slow=False, add_newline=True, newline=\"\\n\"):", "def transport_serial_send_xon(self, port=0):", "def telnet(*k, **kw):\n if (len(k)==3 and len(kw)==0):\n port=8999\n host=\"127.0.0.1\"\n if (not telnetOutput.has_key((port, host))):\n telnetOutput[(port, host)] = Telnet(host, port)\n _within = k[0]\n _text = k[1]\n _text = _text % _getNamespace(back=2)\n print \"sending %s \" % _text\n telnetOutput[(port, host)].write(_text)\n return\n port = kw.get(\"port\", 8999)\n host = kw.get(\"host\", \"127.0.0.1\")\n\n if (not telnetOutput.has_key((port, host))):\n telnetOutput[(port, host)] = Telnet(host, port)\n def telnettransform(within, _text, g):\n _text = _text % _getNamespace(back=2)\n print \"sending %s \" % _text\n telnetOutput[(port, host)].write(_text)\n\n return telnettransform", "def write(self, data, tout=200):\n _logging.debug(\"\")\n _logging.debug(\"data: %s\", _hex_data(data))\n try:\n count = self._dev.write(self.PIPE_OUT, data, tout)\n except _usb.USBError as err:\n self._dev = None\n raise StlinkComException(\"USB Error: %s\" % err)\n if count != len(data):\n raise StlinkComException(\"Error Sending data\")", "def _check_and_write_port(self):\n if self.stopped or self.written:\n return\n port = self.get_port_from_httpserver()\n if not port:\n return\n with open(self.portfile, \"wb\") as f:\n f.write(str(port))\n self.written = True\n self.bus.log('Port %r written to %r.' % (port, self.portfile))", "def write(self, data):\n try:\n # Hack to support unicode under Python 2.x\n if isinstance(data, str) or (sys.version_info < (3,) and isinstance(data, unicode)):\n data = data.encode('utf-8')\n\n self._device.write(data)\n\n except serial.SerialTimeoutException:\n pass\n\n except serial.SerialException as err:\n raise CommError('Error writing to device.', err)\n\n else:\n self.on_write(data=data)", "def write(self,command, port = None):\n #command = bytes(command,encoding='Latin-1')\n if type(command) is not bytes:\n warning('Depreciation warning: expecting type bytes in write but received %r' % command)\n command = command.encode('Latin-1')\n debug('encoding: {}'.format(command))\n if port is None:\n port = self.port\n if port is not None:\n port.flushInput()\n debug('write(): pid %r and command = %r' %(self.pump_id,command))\n port.write(command)\n self.last_command = command\n else:\n error('Port is not specified')", "def PortRead():\r\n global gTelnetConn\r\n if gTelnetConn == None:\r\n OpenTelnet()\r\n \r\n data = gTelnetConn.read()\r\n return data;", "def _write(self, buf):\n self._debug_print(\"Send:\", repr(buf).lstrip('b'))\n try:\n self._sport.write(buf)\n self._sport.flush()\n except serial.serialutil.SerialTimeoutException:\n # I've found that this error is rarely recoverable except by power\n # cycling.\n raise PrinterError(\"Error communicating with printer: Write \"\n \"operation timed out. Try power-cycling the printer.\")", "def serial_tx(string):\r\n Serial2.println(string)", "def transfer_ESP32(self, out):\n if len(out) > 0:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.settimeout(1000)\n s.connect((str(self.IP),int(self.port)))\n s.send(bytes(out,\"utf-8\"))\n else:\n print(\"empty data input\")\n #print(out)", "def SERIAL_SEND_cmd(self, cmd):\n # BLOCKS\n if self.Port.writable():\n self.Port.flushInput()\n self.Port.write(cmd)\n self.Port.write(b'\\r')\n # self.Port.write(b'\\r', timeout=None)\n self.Port.flush()\n # self.Port.flushOutput()\n self.logger.debug(\"Wrote \" + repr(cmd))\n else:\n raise self.ErrorPortNotWriteable(\"Couldn't write to serial port\")\n return", "def transport_serial_send_break_byte(self, port=0):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read from Telnet Port
def PortRead(): global gTelnetConn if gTelnetConn == None: OpenTelnet() data = gTelnetConn.read() return data;
[ "def _telnet_read(self):\n return self.tn.read_until(\"\\n\", self.timeout).rstrip('\\n') # Reads reply from device, strips termination char", "def readLine(port, timeout=5000, encoding='utf-8'):\n print(port, timeout, encoding)\n return ''", "def readBytes(port, numberOfBytes, timeout=5000):\n print(port, numberOfBytes, timeout)\n return None", "def interact(self):\n t = telnetlib.Telnet()\n t.sock = self.socket\n t.interact()", "def read_handler(host, port, handler):\n\n sock = socket.socket()\n sock.connect((host, port))\n\n f_hand = sock.makefile()\n line = f_hand.readline()\n\n if line != \"Click::ControlSocket/1.3\\n\":\n raise ValueError(\"Unexpected reply: %s\" % line)\n\n cmd = \"read %s\\n\" % handler\n sock.send(cmd.encode(\"utf-8\"))\n\n line = f_hand.readline()\n\n regexp = '([0-9]{3}) (.*)'\n match = re.match(regexp, line)\n\n while not match:\n line = f_hand.readline()\n match = re.match(regexp, line)\n\n groups = match.groups()\n\n if int(groups[0]) == 200:\n\n line = f_hand.readline()\n res = line.split(\" \")\n\n length = int(res[1])\n data = f_hand.read(length)\n\n return (int(groups[0]), data)\n\n return (int(groups[0]), line)", "def read_serial_line(self):\n size_of_input_buffer = 0\n while size_of_input_buffer == 0:\n size_of_input_buffer = self.port.inWaiting()\n time.sleep(0.1)\n text = self.port.readline()\n return text.decode()", "def telnet_connect():\n conf = get_conf()\n tn = Telnet(conf['telnet_ip'], conf['telnet_port'], timeout=5)\n tn.read_until(b\"user: \", timeout=2)\n tn.write(conf['telnet_user'].encode('ascii') + b\"\\r\")\n tn.read_until(b\"password:\", timeout=2)\n tn.write(conf['telnet_pw'].encode('ascii') + b\"\\r\")\n tn.read_until(b'Telnet Server\\r\\n', timeout=20)\n\n return tn", "def PortWrite( data ):\r\n global gTelnetConn\r\n if gTelnetConn == None:\r\n OpenTelnet()\r\n \r\n gTelnetConn.write( data )\r\n \r\n return;", "def connect(self):\n try:\n self.tn = telnetlib.Telnet(self.DRONEIP, self.DRONEPORT)\n return self.tn\n except:\n print(\"Error While Connecting the Drone System.\")", "def _read(self, size):\n recv = self.ser.read(size=size)\n if len(recv) < size:\n raise ResponseTimeout(\"timeout while communication via serial\")\n else:\n return recv", "def read(self):\r\n portBuffer = \"\"\r\n returnval = False\r\n # 'OK' indicates the end of frame\r\n for i in range(0,500): \r\n time.sleep(0.01)\r\n while (self.serialPort.inWaiting() > 0):\r\n temp = self.serialPort.read()\r\n portBuffer += temp\r\n \r\n if (re.search(\"OK\",portBuffer)):\r\n returnval = True\r\n break\r\n res = portBuffer.strip(\"OK\").replace(\"\\r\\n\", '')\r\n return (res, returnval)", "def readUntil(port, delimiter, includeDelimiter, timeout=5000):\n print(port, delimiter, includeDelimiter, timeout)\n return ''", "def ComPortThread(self):\n while self.alive.isSet(): #loop while alive event is true\n text = self.serial.read(1) #read one, with timout\n if text: #check if not timeout\n n = self.serial.inWaiting() #look if there is more to read\n if n:\n text = text + self.serial.read(n) #get it\n #newline transformation\n if self.settings.newline == NEWLINE_CR:\n text = text.replace('\\r', '\\n')\n elif self.settings.newline == NEWLINE_LF:\n pass\n elif self.settings.newline == NEWLINE_CRLF:\n text = text.replace('\\r\\n', '\\n')\n event = SerialRxEvent(self.GetId(), text)\n self.GetEventHandler().AddPendingEvent(event)", "def read(self, max_size=1024, timeout=None):\r\n\r\n # Check if there is a timeout to call the select method with or\r\n # without it\r\n if timeout:\r\n ready = select(self.__rlist, [], [], timeout)\r\n else:\r\n ready = select(self.__rlist, [], [])\r\n\r\n # If the serial port has any data, read it and call the read\r\n # callback method. If callback isn't set, return the data\r\n if self.__serialfd in ready[0]:\r\n data = None\r\n try:\r\n data = os.read(self.__serialfd, max_size)\r\n if self.__read_cb:\r\n self.__read_cb(data)\r\n else:\r\n return data\r\n except:\r\n return data", "def read(self):\n if self.is_blocking:\n buf = self.tap.read(self.tap.mtu)\n else:\n fd = self.tap.fileno()\n rs, ws, xs = select.select((self.tap,), (), ())\n buf = os.read(fd, self.tap.mtu)\n LOG.info('receive: via {}: {}'.format(\n self.tap.name,\n packet_raw_data_to_hex(buf)))\n return buf", "def _connect_via_telnet(self, host, username, password):\n self.get_logger().debug(\"Open telnet connection to equipment.\")\n \n # Initialize telnet session\n telnet_session = telnetlib.Telnet()\n \n try:\n telnet_session.open(host)\n telnet_session.read_until(\"Username:\", 5)\n telnet_session.write(str(username) + \"\\n\")\n telnet_session.read_until(\"Password:\", 5)\n telnet_session.write(str(password) + \"\\n\")\n telnet_session.read_until(\">\", 5)\n telnet_session.write(\"enable\\n\")\n telnet_session.read_until(\"Password:\", 5)\n telnet_session.write(str(password) + \"\\n\")\n telnet_session.read_until(\"\\n#\", 5)\n except:\n msg = \"Connection via telnet failed.\"\n raise Exception(-1, msg)\n \n try:\n # get running config\n telnet_session.write(\"terminal length 0\\n\")\n telnet_session.read_until(\"\\n#\", 5)\n telnet_session.write(\"show running-config\\n\")\n config_file = telnet_session.read_until(\"\\n#\", 10)\n\n # extract SSIDs\n config_file_lines = config_file.split('\\r\\n')\n for line in config_file_lines:\n match = \"dot11 ssid \"\n if line.startswith(match):\n self._ssids.append(line[len(match):].strip())\n\n # extract wep keys\n for i in self.WIFI_RADIOS:\n found = None\n for line in config_file_lines:\n if found == None:\n match = \"interface Dot11Radio\" + str(i)\n if line.startswith(match):\n found = line\n elif line.startswith(' '):\n match = \"encryption key \"\n if line.strip().startswith(match):\n key_id = int(line.strip()[len(match):].strip().split(' ')[0])\n self._wep_keys[i][key_id-1] = True\n else:\n break\n\n # extract wep keys\n\n except:\n msg = \"Read configuration failed.\"\n raise Exception(-2, msg)\n\n # Update handle value\n self._set_handle(telnet_session)", "def _read(self, read_term=None, read_timeout=None):\r\n\r\n buffer = []\r\n\r\n # if a different timeout was requested just\r\n # for _this_ read, store and override the\r\n # current device setting (not thread safe!)\r\n if read_timeout is not None:\r\n old_timeout = self.device.timeout\r\n self.device.timeout = read_timeout\r\n\r\n def __reset_timeout():\r\n \"\"\"restore the device's previous timeout\r\n setting, if we overrode it earlier.\"\"\"\r\n if read_timeout is not None:\r\n self.device.timeout =\\\r\n old_timeout\r\n\r\n # the default terminator reads\r\n # until a newline is hit\r\n if not read_term:\r\n read_term = \"\\r\\n\"\r\n\r\n while(True):\r\n buf = self.device.read()\r\n buffer.append(buf)\r\n\r\n # if a timeout was hit, raise an exception including the raw data that\r\n # we've already read (in case the calling func was _expecting_ a timeout\r\n # (wouldn't it be nice if serial.Serial.read returned None for this?)\r\n if buf == \"\":\r\n __reset_timeout()\r\n raise(errors.GsmReadTimeoutError(buffer))\r\n\r\n # if last n characters of the buffer match the read\r\n # terminator, return what we've received so far\r\n if buffer[-len(read_term)::] == list(read_term):\r\n buf_str = \"\".join(buffer)\r\n __reset_timeout()\r\n\r\n self._log(repr(buf_str), \"read\")\r\n return buf_str", "def read(self, to_read, timeout_ms):\n if not isinstance(to_read, baseinteger):\n raise TypeError(\"to_read can only be an instance of type baseinteger\")\n if not isinstance(timeout_ms, baseinteger):\n raise TypeError(\"timeout_ms can only be an instance of type baseinteger\")\n data = self._call(\"read\",\n in_p=[to_read, timeout_ms])\n return data", "def read_tcp_socket(self):\n while self.is_alive:\n try:\n # Read data from socket\n data = self.raw_serial_socket.recv(4096)\n\n # If data exist process\n if len(data) > 0:\n self.codec.add(data)\n\n except socket.timeout:\n # Just a socket timeout, continue on\n pass\n except Exception as e:\n logger.error(\"Exception in reading data.\", e)\n #self.stop_adcp_server()\n\n print(\"Read Thread turned off\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Input > a list of integers. output > sorted list. Odd nums first, then even
def my_sort(a_list): sorted_list = [] sort_list(a_list) return odd_list(a_list) + even_list(a_list)
[ "def sort_numbers(lis):\n \n i = 0\n iterations = 0\n \n while iterations < len(lis):\n \n # if the integer is odd, bring it to the end of the list\n if lis[i]%2 != 0:\n \n integer = lis.pop(i)\n lis.append(integer)\n \n # we keep the index as is so that we don't skip the following integer\n i -= 1\n \n i += 1\n iterations += 1", "def sort_reverse(list_of_integers):", "def filter_even_numbers(numbers):\n lista_numeros = []\n\n for element in numbers:\n lista_numeros.append(element)\n\n for element in lista_numeros:\n if not element%2 == 0:\n numbers.pop(numbers.index(element))\n\n #numbers.sort()", "def test_sort_short_list_with_odd_numbers(self):\n result = sort_array([4, 5, 2, 7, 3, 1, 10, 9])\n self.assertEqual(result, [4, 1, 2, 3, 5, 7, 10, 9])", "def descOrder(numList: List[int]) -> List[int]: # Problem 4\n new_NumList = sorted(numList, reverse=True)\n return new_NumList", "def my_sort(lista):\n largo = len(lista)\n if largo > 1:\n return my_merge(my_sort(lista[:largo/2]), my_sort(lista[largo/2:]))\n else:\n return lista", "def sort_array_by_parity_ii(self, nums: List[int]) -> List[int]:\n\n wrong_even_idx: list = []\n wrong_odd_idx: list = []\n for idx, num in enumerate(nums):\n if idx & 1 == 0 and num & 1 != 0:\n wrong_even_idx.append(idx)\n elif idx & 1 != 0 and num & 1 == 0:\n wrong_odd_idx.append(idx)\n\n for (idx_even, idx_odd) in zip(wrong_even_idx, wrong_odd_idx):\n nums[idx_even], nums[idx_odd] = nums[idx_odd], nums[idx_even]\n\n return nums", "def odd_int2(list1):\n\t\n\twhile len(list1) > 0:\n\t\tstart_len = len(list1)\n\t\tcurr_value = list1[0]\n\t\t\n\t\tlist1 = list(filter(lambda elem: elem != curr_value, list1))\n\t\t\n\t\tif (start_len - len(list1)) % 2 == 0:\n\t\t\treturn curr_value", "def test_radix_sort_n_2_list():\n from radix import radix_sort\n assert radix_sort([6, 5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5, 6]", "def even_odd_partition(arr: List[int]) -> List[int]:\n def swap(i, j):\n temp = arr[j]\n arr[j] = arr[i]\n arr[i] = temp\n\n i = 0\n j = len(arr) - 1\n while i < j:\n if arr[i] % 2 == 0:\n # pointer is satisfied so increment\n i += 1\n else:\n # we need to swap an odd element to the end of the array\n # the odd pointer gets what it wanted so decrement\n swap(i, j)\n j -= 1\n\n if arr[j] % 2 != 0:\n # pointer gets what it wanted so decrement\n j -= 1\n else:\n # we need to swap the even element to the front of the array\n # the even pointer gets what it wanted so increment\n swap(i, j)\n i += 1\n\n return arr", "def all_even(number_list):\n\n # do the opposite of the above function (this time, find positive #s)\n\n even_elements = [num for num in number_list if num % 2 == 0]\n return even_elements", "def number_list_sort(l):\n # Lists of numbers are sorted numerically by default.\n return sorted(l)", "def all_odd(number_list):\n odd_numbers = []\n for item in number_list:\n if item % 2 != 0:\n #modulo: if you can divide it by two but there is a remainder\n odd_numbers.append(item)\n\n return odd_numbers", "def filter_positive_even_numbers(numbers):\n\n positive_even_numbers = [x for x in numbers if x > 0 and not x % 2]\n\n return positive_even_numbers", "def gnome_sort(input_list):\n i=1\n while True:\n if i < len(input_list)-1:\n if input_list[i] >= input_list[i - 1]:\n i += 1\n if input_list[i] < input_list[i-1]:\n input_list[i],input_list[i-1]=input_list[i-1],input_list[i]\n i-=1\n if i==0:\n i+=1\n if i==len(input_list)-1:\n break\n return input_list", "def extract_even_numbers_in_list(alist):\r\n result = []\r\n for elem in alist:\r\n if elem%2 == 0:\r\n result.append(elem)\r\n return result", "def shell_sort(lst):\n split_point = len(lst) // 2 #Initially splitting the list in half\n while split_point > 0:\n\n for i in range(split_point, len(lst)):\n temp = lst[i]\n j = i\n\n while j >= split_point and lst[j - split_point] > temp: #Sorting the subsection of the list\n lst[j] = lst[j - split_point]\n j = j - split_point\n lst[j] = temp\n\n split_point = split_point // 2 #splitting the unordered part of the list in half\n yield lst", "def filter_positive_even_numbers(numbers):\n return [number for number in numbers if is_positive(number) and is_even(number)]", "def only_odds(numbers):\n\tresult = []\n\tfor i in numbers:\n\t\tif i%2 == 0:\n\t\t\tcontinue\n\t\telse:\n\t\t\tresult.append(i)\n\treturn result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function prints the current time in isoformat, followed by the normal print.
def timeprint(*args, **kwargs: Any) -> None: print(datetime.now().isoformat(), *args, **kwargs)
[ "def get_iso_time() -> str:\n return datetime.now().isoformat()", "def isoformat(self):\r\n s = _format_time(self.__hour, self.__minute, self.__second,\r\n self.__microsecond)\r\n tz = self._tzstr()\r\n if tz:\r\n s += tz\r\n return s", "def time_display(time):\n return timeFormat(time)", "def get_time_display(self):\n return str(self.time)[11: 19]", "def tprint(s):\n print(\"[\" + time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) + \"] \" + s)", "def getnowstrftime(self):\n print()\n print(\"Current date and time\")\n print(self.now.strftime(\"%m-%d-%Y %H:%M\"))", "def time_isoformat(self):\n s = ''\n for att in ('hour', 'minute', 'second'):\n if self._content[att] is None:\n s += '00' + ':'\n else:\n s += str(self._content[att]) + ':'\n return s.rstrip(\":\") + \".0\"", "def get_time():\n current = datetime.datetime.now()\n in_format = current.strftime(\"%T %p\")\n return in_format", "def get_iso_systime(self):\n return time.strftime(u\"%Y-%m-%dT%H:%M:%S\",\n time.localtime(time.time())) + self._get_timezone()", "def print_with_timestamp(text):\n now = time.time()\n now_string = datetime.datetime.fromtimestamp(now).strftime(\n '%Y%m%d-%H:%M:%S')\n print \"%s: %s\" % (now_string, text)", "def datetime_now_iso() -> str:\n return datetime.now(timezone.utc).isoformat(timespec='seconds')", "def _timestr():\n return '%s' % time.strftime(\"%Y-%m-%d %H:%M:%S\")", "def time_str(self):\n return f'{datetime_to_str(self.time)}'", "def printWithTime(data):\n\tprint (\"%s-->%s\" %(time.asctime(), data))\n\twriteToFile(data)", "def _pretty_time(cls, ftime):\n return time.strftime(\"%H:%M:%S\", time.gmtime(ftime))", "def prettyPrintTime(time):\n return '{0:.5f}s'.format(time)", "def current_time(message):\n return print(f\"{datetime.now().strftime('%H:%M:%S')} - {message}\")", "def get_system_time():\n time = datetime.now()\n return time.strftime((\"%B %Y %A %I:%M:%S\"))", "def at_time_display(time):\n return timeFormat(time, prefix=gettext(\"at \"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function clears contents within a list
def clear(List): print("Original list:",List) print("Cleared list:", [List.clear()])
[ "def clearList(self):\n\n del self.genomeList[:]", "def clear_list(self):\n self.active_list = []\n self.failed_list = []", "def clear(self):\n self.listwalker.clear()", "def ClearItems(self):\n for item in self.items:\n item.Destroy()\n for line in self.lines:\n line.Destroy()\n self.items=[]\n self.lines=[]\n self.Layout()\n self.SetupScrolling()", "def clear_all(self):\n data = self.Entries\n del data[:]", "def _clearList(self):\n # Clear the List\n log.debug(\"[_clearList] Setting queue empty\")\n self.SendList = []\n self.pmLastSentMessage = None\n self.pmExpectedResponse = []", "def clear(self):\n self.parts = []", "def clear(self):\n self.data = []\n self.updateData()", "def clear(self) -> None:\n\n self.render_list.point_lights = list()\n self.render_list.geometry = list()\n self.children = list()", "def clear_items_sequential(self):\n pass", "def clear_complete_list(self):\n Database().clear_completed_records()\n self.ids.complete_list.clear_widgets()", "def clear(self):\r\n\r\n ilist = self.canvas().allItems()\r\n for eachItem in ilist:\r\n if eachItem:\r\n eachItem.setCanvas(None)\r\n del eachItem\r\n self.canvas().update()", "def clear(self):\n del self.results\n self.results = list()", "def clear(self):\n self.buckets = [None] * self.size\n self.items = 0", "def clear(self):\n self.head=self.end=None\n self.size=0", "def selection_clear(self, first, last=None):\r\n\t\tfor l in self.widgets:\r\n\t\t\ttk.Listbox.selection_clear(l, first, last)", "def clear(self,slot_list):\r\n for event_list in slot_list:\r\n for event in event_list:\r\n #print('start 7:ConfTrackManagement:clear(self,slot_list):event_list,event:::',event_list,event)\r\n\r\n self.talk_list.remove(event)", "def remove_blanks_from_list(item_list):\n index = 0\n while index < len(item_list):\n if item_list[index] == \"\":\n del item_list[index]\n else:\n index += 1\n return item_list", "def reset(self):\n self.lines = []\n self.total_todos = 0\n self.active_todos = []\n self.done_todos = []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine bounding box from child elements.
def determine_bounding_box(elements: List): x0, y0, x1, y1 = zip(*map(lambda e: e.bbox, elements)) bbox = (min(x0), min(y0), max(x1), max(y1)) return bbox
[ "def _compound_bounding_box(self, experiment):\n bounds = experiment.prepdata.bounds\n def wh(row):\n return self.where_is(row['bid'])\n\n bounds['node'] = bounds.apply(wh, axis=1)\n groups = bounds.groupby('node')\n bboxes = groups.agg({'x_min': min, 'x_max': max, 'y_min': min, 'y_max': max})\n bbox_nodes = set(bboxes.index)\n graph_nodes = set(self.nodes(data=False))\n actual_bbox_nodes = list(bbox_nodes & graph_nodes)\n return bboxes.loc[actual_bbox_nodes]", "def bounding_box(self):\n (x0, y0, x1, y1) = (None, None, None, None)\n for v in self.variables:\n if x0 is None or x0 > v.x:\n x0 = v.x\n if y0 is None or y0 > v.y:\n y0 = v.y\n if x1 is None or x1 < v.x:\n x1 = v.x\n if y1 is None or y1 < v.y:\n y1 = v.y\n for sg in self.stroke_groups:\n for s in sg:\n for p in s.points:\n (px0, py0, px1, py1) = p.bounding_box()\n if x0 is None or x0 > px0:\n x0 = px0\n if y0 is None or y0 > py0:\n y0 = py0\n if x1 is None or x1 < px1:\n x1 = px1\n if y1 is None or y1 < py1:\n y1 = py1\n return (x0, y0, x1, y1)", "def boundingBox(self):\r\n\t\tfrom blur3d.lib.cartesian import BoundingBox, Point\r\n\t\tp1, p2 = mxs.nodeGetBoundingBox(self.nativePointer(), mxs.matrix3(1))\r\n\t\treturn BoundingBox(Point.newFromMaxPoint(p1), Point.newFromMaxPoint(p2))", "def subimage_bbox(self):\n if self._is_parent_trimmed:\n return self._parent_amplifier.getBBox()\n else:\n return self._parent_amplifier.getRawBBox()", "def calc_bounding_box(self):\n # you need this in case Width or Height are negative\n self.boundingBox = BBox.from_points((self.position + self.size, self.position - self.size))\n if self._canvas:\n self._canvas.boundingBoxDirty = True", "def bbox(self) -> Optional[BBox]:\n boxes = []\n for node in self.iter():\n box = node.bbox\n if box:\n boxes.append(box)\n\n return BBox.max_bbox(boxes)", "def bbox(self):\n lower = np.array([[self.nboxes[i].bounds[j][0] for j in range(self.n)] \n for i in range(len(self.nboxes))]).min(axis=0)\n upper = np.array([[self.nboxes[i].bounds[j][1] for j in range(self.n)] \n for i in range(len(self.nboxes))]).max(axis=0)\n bounds = [(low, high) for low,high in zip(lower, upper)]\n return nBox(bounds)", "def calculate_bounding_box(self):\n assert self.points_list is not None, \\\n 'the list points already need to be scaled order to correctly work,\\\n this requires that get_scaled_points is executed first.'\n\n hull = cv2.convexHull(self.points_list, returnPoints=True)\n return cv2.boundingRect(hull)", "def bounding_box(rect, bboxes):\n x1 = 10000\n y1 = 10000\n x2 = 0\n y2 = 0\n\n for b in bboxes:\n x1 = min(b.x1, x1)\n y1 = min(b.y1, y1)\n x2 = max(b.x2, x2)\n y2 = max(b.y2, y2)\n\n rect = Rect(x1, y1, x2 - x1, y2 - y1, rect.prob, rect.text)\n return rect", "def bounding_rect(self) -> Rect:\n if self._bounding_rect is None:\n self._bounding_rect = Rect(*cv.boundingRect(self.points))\n return self._bounding_rect", "def boundingBox(self):\n xpos = self.xpos\n\n minXY = np.array([xpos - self.box_width / 2, self._bpdata.min * 0.95])\n maxXY = np.array([xpos + self.box_width / 2, self._bpdata.max * 1.05])\n return minXY, maxXY", "def get_bounding_boxes(self):\n all_mins, all_maxes = [], []\n for points in self.points_list():\n cur_mins = points.min(dim=0)[0] # (3,)\n cur_maxes = points.max(dim=0)[0] # (3,)\n all_mins.append(cur_mins)\n all_maxes.append(cur_maxes)\n all_mins = torch.stack(all_mins, dim=0) # (N, 3)\n all_maxes = torch.stack(all_maxes, dim=0) # (N, 3)\n bboxes = torch.stack([all_mins, all_maxes], dim=2)\n return bboxes", "def get_bounding_box(self, person):\n x, y = person.location\n radius = person.radius\n\n xmin, xmax = int(x - radius), int(ceil(x + radius))\n ymin, ymax = int(y - radius), int(ceil(y + radius))\n\n return xmin, ymin, xmax, ymax", "def boundingbox(self):\n\n # angle = radians(self.theta + (self.delta * pos))\n cosr = cos(radians(self.rotation))\n sinr = sin(radians(self.rotation))\n radius = self.radius * self.radius_scale\n\n x_a = -cosr * radius.real\n x_b = -sinr * radius.imag\n x_c = radians(self.theta)\n x_d = radians(self.delta)\n\n y_a = -sinr * radius.real\n y_b = +cosr * radius.imag\n y_c = radians(self.theta)\n y_d = radians(self.delta)\n\n x_pos = [0, 1.0] + _find_solutions_for_arc(x_a, x_b, x_c, x_d)\n y_pos = [0, 1.0] + _find_solutions_for_arc(y_a, y_b, y_c, y_d)\n\n x_coords = []\n y_coords = []\n for pos in x_pos:\n p = self.point(pos)\n x_coords.append(p.real)\n for pos in y_pos:\n p = self.point(pos)\n y_coords.append(p.imag)\n\n x_min, x_max = min(x_coords), max(x_coords)\n y_min, y_max = min(y_coords), max(y_coords)\n return [x_min, y_min, x_max, y_max]", "def bbox(self) -> pygame.Rect:\n return pygame.Rect(self.bbox_xmin, self.bbox_ymax, self.bbox_xmax-self.bbox_xmin, self.bbox_ymax-self.bbox_ymin)", "def get_bounding_box_of_file(self):\n for event, elem in etree.iterparse(self._xml_file, events=('start',\n 'end')):\n if (event == 'start'):\n if (elem.tag == 'bounds'):\n bound_min_lon = elem.attrib['minlon']\n bound_min_lat = elem.attrib['minlat']\n bound_max_lon = elem.attrib['maxlon']\n bound_max_lat = elem.attrib['maxlat']\n t = get_timestamp()\n print(\"[\"+t+\"] (bounding box of file: \"\n + str(bound_min_lon) + \" \" + str(bound_min_lat) + \" \"\n + str(bound_max_lon) + \" \"\n + str(bound_max_lat) + \")\")\n break\n clear_element(elem)\n clear_element(elem)\n return bound_min_lon, bound_min_lat, bound_max_lon, bound_max_lat", "def bounding_box(self):\n x1 = self.X.min()\n x2 = self.X.max()\n y1 = self.Y.min()\n y2 = self.Y.max()\n return [x1,x2,y1,y2]", "def boundingbox(self):\n g0 = self.control1 - self.start\n g1 = self.control2 - self.control1\n g2 = self.end - self.control2\n\n c0 = 3 * g0\n c1 = -6 * g0 + 6 * g1\n c2 = 3 * g0 - 6 * g1 + 3 * g2\n\n x_c0, x_c1, x_c2 = [c.real for c in [c0, c1, c2]]\n y_c0, y_c1, y_c2 = [c.imag for c in [c0, c1, c2]]\n\n x_cand = [0, 1] + _find_solutions_for_bezier(x_c2, x_c1, x_c0)\n y_cand = [0, 1] + _find_solutions_for_bezier(y_c2, y_c1, y_c0)\n\n x_coords = []\n y_coords = []\n for t in x_cand:\n p = self.point(t)\n x_coords.append(p.real)\n for t in y_cand:\n p = self.point(t)\n y_coords.append(p.imag)\n\n x_min, x_max = min(x_coords), max(x_coords)\n y_min, y_max = min(y_coords), max(y_coords)\n return [x_min, y_min, x_max, y_max]", "def bounding_box_area(self):\n return (self.bounding_box[1][0] - self.bounding_box[0][0]) * (self.bounding_box[1][1] - self.bounding_box[0][1])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an estimate to the number of distinct elements in items items a sequence of elements k number of hash functions
def estimate_distinct_elements(items, k): hll = HLL.HyperLogLog64(k) hll.extend(items) return hll.cardinality
[ "def estimate_distinct_elements_parallel(lists_of_items, k, spark_context):\n hll = spark_context.parallelize(lists_of_items) \\\n .mapPartitions(init_compute_hmaps(k)) \\\n .reduce(lambda x, y :x + y)\n return hll.cardinality", "def __hash__(self):\n # Since hash itself is integer type\n h = 0\n for item in self.item_set:\n h ^= hash(item)\n\n return h", "def hashFunctionTest():\n m = 128\n h = HashFunction(m)\n print(h)\n\n count = [0] * m\n for i in range(m*2):\n count[h.h(random.randint(-10000,10000))] += 1\n print count", "def __number_of_all_unique_kmers(s):\n n = len(s)\n uniques = set()\n for k in range(1, n+1):\n uniques.update(__list_unique_kmers(s, k))\n\n return len(uniques)", "def frequent_itemset(transactions, minsup):\n pass", "def hash_fold(item, tablesize):\n # Split number into n chunks\n n = 2\n number = str(item)\n chunked_list = [number[i:i+n] for i in range(0, len(number), n)]\n\n # Convert the list items to numbers then get the sum\n sum = 0\n for c in chunked_list:\n sum += int(c)\n return sum % tablesize", "def n_keys_for_partition(self, partition):\n pass", "def getHash(intTuple, q):\n currHash = 0\n global powers #defined in ksearch{Num,Str}, each entry contains 4**j\n for i in range(len(intTuple)):\n currHash = (currHash + powers[i]*intTuple[i]) % q\n return currHash", "def partial_perms_count(n, k):\n\n a = choose(n, k)\n b = fact(k)\n return (a*b)%int(1E6)", "def estimate_complexity_by_counting_kmers(w):\n n = len(w)\n observed = __number_of_all_unique_kmers(w) - n\n expected = __number_of_possible_kmers(n) - n\n\n return observed / expected", "def threeSum__hash__v1(self, num_list: List[int]) -> List[List[int]]:\n\n target = 0\n match_hash = DefaultDict(int)\n\n #===========================================================\n # Hash numbers to indices...\n\n num_hash = DefaultDict(set)\n\n for (i, num) in enumerate(num_list):\n\n num_hash[num].add(i)\n\n #===========================================================\n # Iterate through combinations...\n\n n = len(num_list)\n\n for i1 in range(0, n - 2):\n for i2 in range(i1 + 1, n - 1):\n\n num1 = num_list[i1]\n num2 = num_list[i2]\n num3 = target - num1 - num2\n\n if num3 in num_hash:\n\n # Find whether i3 is available. We cannot duplicate i1 or i2.\n i3_set = set.difference(num_hash[num3], set((i1, i2)))\n\n if i3_set:\n\n trial = tuple(sorted((num1, num2, num3)))\n match_hash[trial] += 1\n\n return tuple(match_hash.keys())", "def merkle_hash(input_items: Sequence[Any]) -> Hash32:\n\n # Store length of list (to compensate for non-bijectiveness of padding)\n data_length = len(input_items).to_bytes(32, \"little\")\n if len(input_items) == 0:\n # Handle empty list case\n chunks = (b'\\x00' * SSZ_CHUNK_SIZE,)\n elif len(input_items[0]) < SSZ_CHUNK_SIZE:\n # See how many items fit in a chunk\n items_per_chunk = SSZ_CHUNK_SIZE // len(input_items[0])\n\n # Build a list of chunks based on the number of items in the chunk\n chunks_unpadded = (\n b''.join(input_items[i:i + items_per_chunk])\n for i in range(0, len(input_items), items_per_chunk)\n )\n chunks = tuple(\n chunk.ljust(SSZ_CHUNK_SIZE, b\"\\x00\")\n for chunk in chunks_unpadded\n )\n else:\n # Leave large items alone\n chunks = input_items\n\n # Tree-hash\n while len(chunks) > 1:\n if len(chunks) % 2 == 1:\n chunks += (b'\\x00' * SSZ_CHUNK_SIZE, )\n chunks = tuple(\n hash_eth2(chunks[i] + chunks[i + 1])\n for i in range(0, len(chunks), 2)\n )\n\n # Return hash of root and length data\n return hash_eth2(chunks[0] + data_length)", "def rkhsCapacity(group, alpha):\n return sum([alpha ** len(g) for g in group.partition])", "def compute_distribution(result_set, print_out=True):\n cnt_len = {1:0, 2:0, 3:0, 4:0, 5:0}\n cnt_duplicates = 0\n for r in result_set:\n r = r[0]\n tmp = []\n for l in r:\n tmp.extend(l)\n len_tmp = len(tmp)\n cnt_len[len_tmp] += 1\n if len(set(tmp)) < len_tmp:\n cnt_duplicates += 1\n\n if print_out:\n print(f\"Distribution of lengths: {cnt_len}\")\n print(f\"Sequences containing duplicates: {cnt_duplicates} / {len(result_set)}\")\n else:\n return cnt_len, cnt_duplicates", "def hash(arrayvec, k, n):\n tempkey = generatekey(k, n)\n tempmap = [hashsingle(x, k, tempkey) for x in arrayvec]\n tempkey = [tuple(y) for y in tempkey]\n tempkey = tuple(tempkey)\n result = {tempkey: tempmap}\n return result", "def hash_count(self) -> int:\n return self.__hash_count", "def test_hash_n_keyword(self):\n h = scrypt.hash(N=256, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)", "def hash_terms(terms):\n hash = r_uint(0)\n for term in terms:\n hash += term.hash << 5\n return hash", "def numIdenticalPairs(self, nums: List[int]) -> int:\n\n # simplest\n \"\"\"\n O(n * (n-1)) run time\n rval = 0\n for i in range(len(nums)):\n for j in range(i+1, len(nums)):\n if nums[i] == nums[j] and i < j:\n rval+=1\n return rval\n \"\"\"\n\n # optimising?\n \"\"\"\n O(n+k) k distinct values\n \n # can we do it in O(n)?\n # only counts if i == j\n # count the number of each distinct number?\n \n # only counts if i < j \n # means [1] -->0\n # [1, 1] --> 1\n # [1, 1,1] -> 1+1\n # [1, 1,1,1] -> 3+2+1\n # n*(n-1)//2\n\n \"\"\"\n num_freq = {} # functioning as a hashmap\n for i in range(len(nums)):\n # hashmap.put()\n num = nums[i]\n count = num_freq.get(num)\n\n if count is None:\n num_freq.update({num: 1})\n else:\n num_freq.update({num: count + 1})\n\n # list comp to iterate and sum\n good_pairs = sum([n * (n - 1) // 2 for n in num_freq.values()])\n\n return good_pairs # [1, 1] --> 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a method yielding hmaps from hll initialised with k hash functions
def init_compute_hmaps(k): def compute_hmaps(list_of_sequences): """ Iterator yielding 1 HyperLogLog.hmap per sequence in given iterable list_of_sequences - iterable of iterable """ for sequence in list_of_sequences: hll = HLL.HyperLogLog64(k) hll.extend(sequence) yield hll return compute_hmaps
[ "def compute_hmaps(list_of_sequences):\n for sequence in list_of_sequences:\n hll = HLL.HyperLogLog64(k)\n hll.extend(sequence)\n yield hll", "def hashFunctionTest():\n m = 128\n h = HashFunction(m)\n print(h)\n\n count = [0] * m\n for i in range(m*2):\n count[h.h(random.randint(-10000,10000))] += 1\n print count", "def create_hash_from_fs (fs, lsh_classes, lsh_configs, hash_store_class, hash_store_config):\n\n # Feature names\n feat_names = fs.names()\n\n # Set up LSH for each feature\n lsh_obj = {}\n for nm in feat_names:\n lsh_obj[nm] = lsh_classes[nm](lsh_configs[nm])\n\n # Perform LSH on each instance and feature\n hs = hash_store_class(hash_store_config)\n num = 0\n had_output = False\n for (ky,val) in fs:\n lsh_vals = {}\n for nm in feat_names:\n lsh_vals[nm] = lsh_obj[nm].encode(val[nm])\n hs.add(ky, lsh_vals)\n num += 1\n if (num % 1000)==0:\n print '{}K '.format(num/1000), \n had_output = True\n sys.stdout.flush()\n if had_output:\n print\n return hs", "def get_indexes(self, key):\n h1, h2 = mmh3.hash64(key)\n for i in xrange(self.num_hashes):\n yield (h1 + i * h2) % self.num_bytes", "def create_hashtable(seqs, k = 2):\n hashtable = {}\n for i, seq in enumerate(seqs): \n for x in range(0, len(seq), k): #create all chained k-mers for seq\n kmer = seq[x: x + k]\n \n # Sometimes the last kmer is truncated and not the actual length\n if len(kmer) != k:\n continue\n\n \"\"\"\n \"It's easier to ask forgiveness than it is to get permission.\"\n ~ Grace Hopper\n (also faster in python)\n \"\"\"\n try:\n hashtable[kmer].append((i, x))\n except KeyError: #happens if the key does not yet exist\n hashtable[kmer] = [(i, x)]\n \n return hashtable", "def get_hash(self, descriptor):", "def copy(self) -> HashFunction:", "def _compute_H_MAP(self, verbose=False):\n \n # compute _theta_MAP if necessary\n if self._theta_MAP is None: self._compute_theta_MAP(verbose)\n \n tic = time.clock()\n self._H_MAP = hessian_MAP_KJMA(theta = self._theta_MAP, param = self._param)\n if verbose: print \"H_MAP computed in {}s\".format(time.clock()-tic)", "def test_rehash_fig_10p6():\n keys = (54, 28, 41, 18, 10, 36, 25, 38, 12, 90)\n solution = ([], [], [12], [18], [41], [], [36], [25], [],\n [54], [], [], [38], [10], [], [90], [28], [], [])\n hash_table = chap10.SimpleChainHashTableP14()\n # Add entries to hash table\n for key in keys:\n hash_table[key] = key\n # Verify hash table matches expectation\n for entry, expected in zip(hash_table, solution):\n assert entry == expected", "def _get_lsh(self,sig,b,r):\n lsh = []\n for i,band in enumerate(range(b)):\n lsh.append(hash(tuple(sig[i*r:i*r+r])))\n #logging.debug('hashed signature: %s\\n[get_lsh]\\tto bins: %s',sig,lsh)\n return lsh", "def hash_map(self):\n return self._hash_map", "def hash_iterator(self):\n return self.fixed_statistics.keys()", "def new(self) -> HashFunction:\n return self.hashfunc(self.algorithm)", "def build_hot_metal_lookup_table():\n lookup = list()\n \n for c in range(256):\n lookup.append(c)\n return lookup", "def testHash(steps, collList, hashFunction):\n diz = None\n\n if collList:\n diz = DictCollisionListHash(int(steps / 5), hashFunction)\n else:\n diz = DictOpenIndexingHash(steps, hashFunction)\n\n print (\"\\tTest di {} (tempo medio per ogni operazione, calcolato su {}\"\\\n \" chiamate):\".format(\n (\"DictionaryCollisionList\" if collList else \"DictionaryOpenIndexing\")\n , steps))\n\n start = time()\n for i in range(steps):\n diz.insert(2 * i, i)\n elapsed = time() - start\n print (\"\\tTempo medio insert: \\t\\t\\t\\t%4.10f\" % (elapsed / steps))\n\n start = time()\n for i in range(steps):\n diz.search(2 * i)\n elapsed = time() - start\n print (\"\\tTempo medio search a buon fine: \\t\\t%4.10f\" % (elapsed / steps))\n\n start = time()\n for i in range(steps):\n diz.search(2 * i + 1)\n elapsed = time() - start\n print (\"\\tTempo medio search di elementi non presenti: \\t%4.10f\" \\\n % (elapsed / steps))\n\n start = time()\n for i in range(steps):\n diz.delete(2 * i)\n elapsed = time() - start\n print (\"\\tTempo medio delete: \\t\\t\\t\\t%4.10f\" % (elapsed / steps))", "def generate_hkl_positions(self):\n pass", "def hash(arrayvec, k, n):\n tempkey = generatekey(k, n)\n tempmap = [hashsingle(x, k, tempkey) for x in arrayvec]\n tempkey = [tuple(y) for y in tempkey]\n tempkey = tuple(tempkey)\n result = {tempkey: tempmap}\n return result", "def MakeFunctionMap():\r\n\treturn ExtendFunctionMap({})", "def call_hash_function(self, key):\r\n return self._hash_function(key) % self.capacity" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }