query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Returns a set of all cells in self.cells that are known to be mines, given that the length of the set is equal to the clue count
def MinesKnown(self): if len(self.cells) == self.count: return set(self.cells) else: return set()
[ "def known_mines(self):\n return {cell for cell in self.cells if len(self.cells)==self.count}", "def SafesKnown(self):\n if self.count == 0:\n return set(self.cells)\n else:\n return set()", "def num_mines(self) -> int:\n count = 0\n for row in self:\n for cell in row:\n if cell.mine:\n count += 1\n return count", "def minors(self):\n return minors(self.values)", "def maximal_cells(self):\n return Set(self._facets)", "def get_free_cells(self):\n free_cells = []\n for i in range(3):\n for j in range(3):\n if self[i, j] == \" \":\n free_cells.append((i, j))\n return free_cells", "def minimal_nonfaces(self):\n\n face_dict = self.faces()\n vertices = self.vertices()\n dimension = self.dimension()\n set_mnf = set()\n\n for dim in range(dimension + 1):\n face_sets = frozenset(f.set() for f in face_dict[dim])\n for candidate in combinations(vertices, dim + 1):\n set_candidate = frozenset(candidate)\n if set_candidate not in face_sets:\n new = not any(set_candidate.issuperset(mnf) for mnf in set_mnf)\n if new:\n set_mnf.add(set_candidate)\n\n for candidate in combinations(vertices, dimension+2): # Checks for minimal nonfaces in the remaining dimension\n set_candidate = frozenset(candidate)\n new = not any(set_candidate.issuperset(mnf) for mnf in set_mnf)\n if new:\n set_mnf.add(set_candidate)\n\n min_non_faces = Set([Simplex(mnf) for mnf in set_mnf])\n\n return min_non_faces", "def _get_occupied_positions(self) -> Set[Position]:\n return self._get_all_valid_positions() - self._get_holes()", "def sweep_mines(bm: BoardManager) -> List[List[int]]:", "def _mine_placements(self):\n # Start with an array/matrix of zeros.\n mines = np.zeros((self.n_rows, self.n_cols))\n # Randomly place 1's.\n mines.ravel()[np.random.choice(mines.size, self.n_mines, replace=False)] = 1\n return mines", "def set_the_mines(self, tile):\r\n num_mines_placed = 0\r\n\r\n # Create the lists of not allowed tiles\r\n restricted_columns = []\r\n restricted_rows = []\r\n for i in range(-1, 2):\r\n test_column = tile.column + i\r\n test_row = tile.row + i\r\n if not test_column < 0 or test_column >= self.columns:\r\n restricted_columns.append(test_column)\r\n if not test_row < 0 or test_row >= self.rows:\r\n restricted_rows.append(test_row)\r\n\r\n while num_mines_placed < self.mines:\r\n rand_column = random.randint(0, self.columns - 1)\r\n rand_row = random.randint(0, self.rows - 1)\r\n tile_name = str(rand_column) + ',' + str(rand_row)\r\n if not (self.tiles[tile_name].is_mine or\r\n (rand_column in restricted_columns and\r\n rand_row in restricted_rows)):\r\n self.tiles[tile_name].is_mine = True\r\n logging.debug(f'Mine placed at column {rand_column}, row '\r\n f'{rand_row}')\r\n num_mines_placed += 1\r\n logging.debug(f'{num_mines_placed} mines have been placed')\r\n\r\n # Figure out how many adjacent mines each tile has\r\n for column in range(self.columns):\r\n for row in range(self.rows):\r\n tile_name = str(column) + ',' + str(row)\r\n num_mines = self.count_adjacent_mines(column=column, row=row)\r\n if num_mines > 0:\r\n self.tiles[tile_name].num_adjacent_mines = num_mines", "def find_all_claimevens(board: Board) -> Set[Claimeven]:\n claimevens = set()\n\n for row in range(0, len(board.state[0]), 2):\n for col in range(len(board.state[0][0])):\n upper = Square(row, col)\n lower = Square(row + 1, col)\n\n if board.is_empty(upper) and board.is_empty(lower):\n claimevens.add(Claimeven(upper, lower))\n\n return claimevens", "def countAllMines(board):\n mineCount = 0\n # parse through the board to count mines\n for row in range(len(board)):\n for col in range(len(board)):\n if board[row][col] == \"X\":\n mineCount += 1\n return mineCount", "def placeMines():\n # initialize a 2d matrix filled with 0s\n grid = [[0 for j in range(GRID_SIZE)] for i in range(GRID_SIZE)]\n # parse through the matrix and give values to cells\n for row in range(0, GRID_SIZE):\n for col in range(0, GRID_SIZE):\n mineHere = random.randint(0, MINE_CHANCE) # 1 in 10 chance for a mine\n if mineHere == 1:\n grid[row][col] = \"X\"\n else:\n grid[row][col] = \"\"\n return grid", "def getMinimumSpanningTree(self):\n mst = set()\n for u,v,w in self.weightedEdges:\n if self.disjointSet.findSet(u) != self.disjointSet.findSet(v):\n mst.add((u,v))\n self.disjointSet.unionSet(u,v)\n return mst", "def get_placed_stones(self):\n return self.stone_set.exclude(row=-1, col=-1)", "def unset_cells(self):\n return (cell for cell in _cells if not self.is_set(cell))", "def _get_all_valid_positions(self) -> Set[Position]:\n return Board._get_all_valid_positions_memoized(self._size, self._shape)", "def _filter_cell_clumps(data, cells, wildcards, distance_threshold=10):\n if np.all(cells==0):\n return np.zeros((1480,1480))\n\n df = (Snake._extract_features(cells, cells, wildcards))\n # add column for [x,y] positions\n df['ij'] = df[['i','j']].values.tolist()\n ij = df['ij'].values.tolist()\n\n # calculate matrix of Euclidean distance between all cells in FOV\n distance = scipy.spatial.distance.cdist(ij, ij, 'euclidean')\n min_dist = np.where(distance>0, distance,distance.max()).min(1)\n # cells (labels) that pass distance threshold from nearest neighbor\n try:\n min_idx = np.hstack(np.argwhere(min_dist > distance_threshold))\n label = df.iloc[min_idx]\n mask = np.isin(cells, np.array(label['label'].values.tolist()))\n filtered_cells = np.multiply(mask.astype(int),cells)\n except:\n filtered_cells = np.zeros((1480,1480))\n\n return filtered_cells" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the set of all cells in self.cells known to be safe.
def SafesKnown(self): if self.count == 0: return set(self.cells) else: return set()
[ "def MinesKnown(self):\n if len(self.cells) == self.count:\n return set(self.cells)\n else:\n return set()", "def known_mines(self):\n return {cell for cell in self.cells if len(self.cells)==self.count}", "def unset_cells(self):\n return (cell for cell in _cells if not self.is_set(cell))", "def cells(self):\n\n return self.session.query(db.Cell)", "def maximal_cells(self):\n return Set(self._facets)", "def empty_cells(self):\n return self.__empty_cells", "def get_dead_cells(self):\n return self._dead_cells", "def _get_all_valid_positions(self) -> Set[Position]:\n return Board._get_all_valid_positions_memoized(self._size, self._shape)", "def get_free_cells(self):\n free_cells = []\n for i in range(3):\n for j in range(3):\n if self[i, j] == \" \":\n free_cells.append((i, j))\n return free_cells", "def _get_occupied_positions(self) -> Set[Position]:\n return self._get_all_valid_positions() - self._get_holes()", "def selected_cells(self):\n return SelectionHelper(\n self._data, self.selections, self.selection_mode\n ).all()", "def get_all_unique_neighbours(self):\n unique_neighbours = set()\n for ring in self.__rings:\n unique_neighbours |= ring.get_all_unique_neighbours()\n return unique_neighbours", "def get_cells(self, copy = False):\n cells = []\n for p in self.positions:\n cells.append(self.game_map[p])\n\n #row_start = self.position.y - 3\n #row_end = self.position.y + 3\n #col_start = self.position.x - 3\n #col_end = self.position.x + 3\n\n #if copy:\n # cells = copy.deepcopy(self.game_map._cells[row_start:row_end, col_start:col_end])\n #else:\n # cells = self.game_map._cells[row_start:row_end, col_start:col_end]\n\n return cells", "def list_active_cells(self):\n list_active_cells = []\n for row in self.active_cells:\n for cell in row:\n if cell is not None:\n list_active_cells.append(cell)\n return list_active_cells", "def code_cells_to_ignore(self) -> Set[int]:\n return self._code_cells_to_ignore", "def _get_empty(self):\n empty_cells = []\n row_i = 0\n column_i = 0\n\n for row in self._grid:\n column_i = 0\n for column in row:\n if column == 0:\n empty_cells.append([row_i, column_i])\n column_i += 1\n row_i += 1\n\n return empty_cells", "def available_moves_in_cell(self, cell_row, cell_col):\n\n\t\tif self.subcell_winner(cell_row, cell_col) != constants.NO_PIECE:\n\t\t\treturn set()\n\n\t\tstart_row = cell_row * 3\n\t\tstart_col = cell_col * 3\n\t\t#check if there are no additional moves\n\t\tif not constants.NO_PIECE in self.board[start_row:start_row + 3, start_col:start_col + 3]:\n\t\t\treturn set()\n\n\t\treturn self.available_moves[cell_row, cell_col]", "def get_winning_cells(self):\r\n return self.__wining_cells", "def getCanAdvanceCells(self):\n can_advance_cells = []\n for row in range(self.board.NUM_ROWS):\n for col in range(self.board.NUM_ROWS):\n if (self.is_top and self.board.isTop(row, col)) or \\\n ((not self.is_top) and self.board.isBottom(row, col)):\n moves_and_eats = self.movesAndEats(row, col)\n num_moves = len(moves_and_eats[0])\n num_eats = len(moves_and_eats[1])\n if (num_moves > 0) or (num_eats > 0):\n can_advance_cells.append((row, col))\n return can_advance_cells" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
callback function to obtain yaw angle from odometry message
def yaw_from_odom(msg): orientation_q = msg.pose.pose.orientation orientation_vec = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w] (roll, pitch, yaw) = euler_from_quaternion(orientation_vec) return yaw
[ "def yawAngle(self):\n if self._calibratedYaw is None:\n return self.config.get('yaw', 0)\n else:\n return self._calibratedYaw", "def yaw(eulers):\n return eulers[2]", "def _odom_callback(self, data):\n\t\torientation_q = data.pose.pose.orientation\n\t\t\n\t\torientation_list = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w]\n\t\t\n\t\t(self.roll, self.pitch, self.yaw) = euler_from_quaternion (orientation_list)\n\t\tself.x_pos = data.pose.pose.position.x\n\t\tself.y_pos = data.pose.pose.position.y\n\t\tself.z_pos = data.pose.pose.position.z", "def get_angle(self):\n gyro_z = self.read_gyroscope().z\n # print(gyro_z)\n angle_xy = self.calc_accel_angle()\n # print(math.degrees(angle_xy))\n dt = time.time() - self.timestamp\n #y_n = (1 - self.a) * angle_xy + self.a * self.angle\n self.angle = self.a * (self.angle + gyro_z * dt) + (1 - self.a) * angle_xy\n #self.angle = angle_xy\n self.timestamp = time.time()\n return self.angle, dt", "def yaw_control(self, yaw_cmd, yaw):\n yaw_error = yaw_cmd - yaw\n if yaw_error > np.pi: yaw_error -= 2 * np.pi\n elif yaw_error < -np.pi: yaw_error += 2 * np.pi\n\n yaw_rate = self.k_p_yaw * yaw_error\n return yaw_rate", "def getAngles(self):\n\n if self.state != '#ob': self.__setState('#ob')\n self.bus.write(\"#f\")\n output = self.bus.read(12)\n self.__update(output)\n\n return output", "def get_angle(blob) -> float:\n rel_angle = Camera.HFOV * (blob.cxf() - sensor.width() / 2) / sensor.width()\n return rel_angle", "def computeYaw(self,points):\n p = deepcopy(points)\n p[1][2] = p[0][2]\n v_12 = p[1]-p[0]\n p_yaw = deepcopy(p[0])\n p_yaw[0]+=10\n v_yaw = p_yaw - p[0]\n angle = self.computeAngle(v_12, v_yaw)\n if v_12[1] < 0:\n angle = -angle\n rospy.loginfo(\"yaw in deg %f\"%self.rad2deg(angle))\n return angle", "def yaw(camera: 'SoCamera', radians: 'float') -> \"void\":\n return _coin.SoScXMLFlightControlTarget_yaw(camera, radians)", "def readGyroAngle(self):\n if self.gyroSensor is not None:\n angleData = self.gyroSensor.angle\n return angleData\n else:\n print(\"Warning, no gyro sensor connected\")\n return None", "def Rbody2nav_to_angle(R, output_units='rad', rotation_sequence='321'):\n yaw = np.arctan2(R[1,0], R[0,0])\n #pitch = -np.arctan(R[2,0] / np.sqrt(1.-R[2,0]**2)) # Farrel eqn 2.45\n pitch = -np.arcsin(R[2,0]) # this is simpler\n roll = np.arctan2(R[2,1], R[2,2] )\n \n # Apply necessary unit transformations.\n if output_units == 'rad':\n pass\n elif output_units == 'deg':\n yaw, pitch, roll = np.degrees([yaw, pitch, roll])\n \n return yaw, pitch, roll", "def getAngle(self):\n return math.radians(self.gyro.getAngle())", "def SoScXMLFlightControlTarget_yaw(camera: 'SoCamera', radians: 'float') -> \"void\":\n return _coin.SoScXMLFlightControlTarget_yaw(camera, radians)", "def angle(self):\n act_loc = self.thin_face.parent_thin.parent_lattice.z_line\n myo_loc = self.thick_face.get_axial_location(-1)\n ls = self.parent_lattice.lattice_spacing\n angle = np.arctan2(ls, act_loc-myo_loc)\n return angle", "def getOrientation(matrix=None,errorValue=(0,0,0)):\n if matrix==None:\n matrix=getRotationMatrix()\n if matrix==None:\n return errorValue \n yaw=atan2(matrix[0][1], matrix[1][1])\n pitch=asin(-matrix[2][1])\n roll=atan2(-matrix[2][0], matrix[2][2])\n return yaw,pitch,roll", "def arrival_angle(self, last_but_one_x, last_but_one_y, last_x, last_y):\n theta = aim_to_point(last_but_one_x, last_but_one_y, last_x, last_y)\n # print \"DOA = \" + str(math.degrees(theta))\n return theta", "def rotToYawPitchRoll(C):\n i = 2\n j = 1\n k = 0\n c_y = np.sqrt(C[i, i]**2 + C[j, i]**2)\n if c_y > 1e-15:\n r = np.arctan2(C[j, i], C[i, i])\n p = np.arctan2(-C[k, i], c_y)\n y = np.arctan2(C[k, j], C[k, k])\n else:\n r = 0\n p = np.arctan2(-C[k, i], c_y)\n y = np.arctan2(-C[j, k], C[j, j])\n return y, p, r", "def yaw_pitch_roll(self):\n \n return self.__tilt", "def yaw_from_quaternion(q: Quaternion) -> float:\n\n (_, _, yaw) = euler_from_quaternion([q.x, q.y, q.z, q.w])\n\n return yaw", "def angles(self):\n\n # in next line replace ???? with the right command and uncomment \n # self.state = ?????\n self.bus.write(self.state)\n\n try:\n # get the 12 bytes of data with bus.read\n # ????\n # and decode it with struct unpack\n # output must be a list with 3 values\n # output = ???\n except:\n output = None\n pass\n return output" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Featurize all docked structures.
def featurize_job(docked_compounds): # Instantiate copy of binana vector binana = Binana() feature_len = binana_num_features() feature_vectors = {} for count, compound in enumerate(docked_compounds): print "\nprocessing %d-th docked pdb %s" % (count, compound)
[ "def main():\n print \"***************************************************************************************\"\n print \" This test shows how to add StructureContainer objects together \"\n print \" A special test to double-check re-labeling ... add a big container to a 'small' one \"\n print \"*************************************************************************************** \\n\"\n\n p1 = Particle( [1.1, 1.1, 1.1], \"Si\", 2.0, 1.23)\n p2 = Particle( [2.2, 2.2, 2.2], \"C\", 1.0, 2.34)\n p3 = Particle( [3.3, 3.3, 3.3], \"C\", 1.0, 2.34)\n\n b1 = Bond( 1, 2, 1.111, \"hooke\")\n b2 = Bond( 2, 3, 2.222, \"hooke\")\n\n atoms1 = ParticleContainer()\n atoms1.put(p1)\n atoms1.put(p2)\n atoms1.put(p3)\n\n bonds1 = BondContainer()\n bonds1.put(b1)\n bonds1.put(b2)\n\n polymer1 = StructureContainer(atoms1, bonds1) # Complete structure 1 completely\n\n p1other = Particle( [1.11, 1.11, 1.11], \"C\", 1.0, 2.34)\n p2other = Particle( [2.22, 2.22, 2.22], \"Ar\", 2.0, 2.34)\n\n b1other = Bond( 1, 2, 1.1, \"hooke-2\") # Correct ptclIDs for second structure\n\n atoms2 = ParticleContainer()\n atoms2.put(p1other)\n atoms2.put(p2other)\n\n bonds2 = BondContainer()\n bonds2.put(b1other)\n\n polymer2 = StructureContainer(atoms2, bonds2) # Complete structure 1 completely\n\n del p1, p2, p3, p1other, p2other, b1, b2, b1other, atoms1, atoms2, bonds1, bonds2\n print \"\\n Cleaning memory for initial objects \\n\" \n\n print \"-------------------- Before adding --------------------\"\n print \"polymer1 = \", polymer1\n print \"polymer2 = \", polymer2\n print \" \"\n\n print \"-------------------- After adding --------------------\"\n # polymer1 += polymer2\n polymer2 += polymer1\n print \"polymer2 = \", polymer2", "def featurize_structure(self, df: pd.DataFrame) -> pd.DataFrame:\n\n if not self.structure_featurizers:\n return pd.DataFrame([])\n\n LOG.info(\"Applying structure featurizers...\")\n df = df.copy()\n df = self._fit_apply_featurizers(df, self.structure_featurizers, \"structure\")\n df.columns = df.columns.map('|'.join).str.strip('|')\n\n return df", "def _build_OceanWakeAndFoam_FluidContainers(editor = '', ocean_foam_template= '', ocean_wake_template = '', oceanSHD = CONST.OCEANDISPSHADER,\r\n wakeFluidShapeName = CONST.WAKE_FLUID_SHAPENODE, foamFluidShapeName = CONST.FOAM_FLUID_SHAPENODE):\r\n ## Gget fluid containers already in scene\r\n fluidContainers = cmds.ls(type= 'fluidTexture3D')\r\n debug(None, method = 'oceanBuilder._build_OceanWakeAndFoam_FluidContainers', message = 'fluidContainers: %s' % fluidContainers, verbose = False)\r\n flGrp = 'fluids_hrc'\r\n\r\n ## Create fluid group if it does not already exist\r\n if not cmds.objExists(flGrp):\r\n flGrp = cmds.group(em = True, name = flGrp)\r\n debug(None, method = 'oceanBuilder._build_OceanWakeAndFoam_FluidContainers', message = 'Built grp: %s' % flGrp, verbose = False)\r\n attrs = ['ty', 'sx', 'sy', 'sz', 'rx', 'rz']\r\n ## Lock attrs\r\n for eachAttr in attrs:\r\n cmds.setAttr (\"%s.%s\" % (flGrp, eachAttr), lock = True)\r\n cmds.setAttr (\"%s.%s\" % (flGrp, eachAttr), keyable = False , channelBox = False)\r\n debug(None, method = 'oceanBuilder._build_OceanWakeAndFoam_FluidContainers', message = 'Locked attrs for grp: %s' % flGrp, verbose = False)\r\n\r\n ## Set the last attrs\r\n cmds.setAttr (flGrp + \".ry\", k=False, cb= True)\r\n cmds.setAttr (flGrp + \".tx\", k=False, cb= True)\r\n cmds.setAttr (flGrp + \".tz\", k=False, cb= True)\r\n debug(None, method = 'oceanBuilder._build_OceanWakeAndFoam_FluidContainers', message = 'Hidden attrs for grp: %s' % flGrp, verbose = False)\r\n\r\n ## Add the type attr if it doesn't already exits\r\n if not cmds.objExists('%s.type' % flGrp):\r\n cmds.addAttr(flGrp, ln = 'type', dt = 'string')\r\n cmds.setAttr('%s.type' % flGrp, 'fx', type = 'string')\r\n debug(None, method = 'oceanBuilder._build_OceanWakeAndFoam_FluidContainers', message = 'Added attrs to grp: %s' % flGrp, verbose = False)\r\n\r\n ## Add custom attrs for fluid container scales\r\n if cmds.objExists('OCEAN_hrc'):\r\n try:\r\n cmds.addAttr('OCEAN_hrc', longName = 'containerWidth', at = \"long\",defaultValue = 50,)\r\n cmds.setAttr(\"OCEAN_hrc.containerWidth\", k=False, cb= True)\r\n cmds.addAttr('OCEAN_hrc', longName = 'containerLength', at = \"long\",defaultValue = 50)\r\n cmds.setAttr(\"OCEAN_hrc.containerLength\", k=False, cb= True)\r\n debug(None, method = 'oceanBuilder._build_OceanWakeAndFoam_FluidContainers', message = 'Added attrs for grp: OCEAN_hrc', verbose = False)\r\n except: ## already added\r\n pass\r\n\r\n if not cmds.objExists(foamFluidShapeName):\r\n debug(None, method = 'oceanBuilder._build_OceanWakeAndFoam_FluidContainers', message = 'Adding %s...' % foamFluidShapeName, verbose = False)\r\n foamFluid = fluidLib._create_FOAM_FluidTexture(oceanSHD, 20, ocean_foam_template, foamFluidShapeName)\r\n\r\n ## Now parent the fluid to the flGrp\r\n debug(None, method = 'oceanBuilder._build_OceanWakeAndFoam_FluidContainers', message = 'foamFluid: %s' % foamFluid, verbose = False)\r\n cmds.parent('%s' % foamFluid.split('Shape')[0], flGrp)\r\n\r\n ## Now connect the OCEAN_hrc groups attrs to the dimensions\r\n cmds.connectAttr(\"OCEAN_hrc.containerWidth\", '%s.dimensionsW' % foamFluid, f = True)\r\n cmds.connectAttr(\"OCEAN_hrc.containerLength\", '%s.dimensionsH' % foamFluid, f = True)\r\n\r\n if not cmds.objExists(wakeFluidShapeName):\r\n debug(None, method = 'oceanBuilder._build_OceanWakeAndFoam_FluidContainers', message = 'Adding %s...' % wakeFluidShapeName, verbose = False)\r\n wakeFluid = fluidLib._create_WAKE_FluidTexture(oceanSHD, 20, ocean_wake_template, wakeFluidShapeName)\r\n\r\n ## Now parent the fluid to the flGrp\r\n cmds.parent('%s' % wakeFluid.split('Shape')[0], flGrp)\r\n\r\n ## Now connect the OCEAN_hrc groups attrs to the dimensions\r\n cmds.connectAttr(\"OCEAN_hrc.containerWidth\", '%s.dimensionsW' % wakeFluid, f = True)\r\n cmds.connectAttr(\"OCEAN_hrc.containerLength\", '%s.dimensionsH' % wakeFluid, f = True)\r\n\r\n ## Move group to persp camera position in x and z\r\n getCam = cmds.modelEditor(editor, query = True, camera = True)\r\n position = cmds.camera(getCam, q=True, worldCenterOfInterest= True)\r\n try:\r\n cmds.setAttr (flGrp + \".tx\", position[0])\r\n cmds.setAttr (flGrp + \".tz\", position[2])\r\n except:\r\n pass\r\n\r\n ## Attrs to manage calculations on the fluid texturesc\r\n if not cmds.objExists(\"OCEAN_hrc.oceanCalcOnOff\"):\r\n cmds.addAttr('OCEAN_hrc', longName = 'oceanCalcOnOff', attributeType = 'float', min = 0, max =1, defaultValue = 1)\r\n cmds.setAttr('OCEAN_hrc.oceanCalcOnOff', keyable = True)\r\n\r\n ## Now build a multidiv to flip the value so it's not confusing to animators...\r\n if not cmds.objExists('wakesOnOffMultiDiv'):\r\n cmds.shadingNode('blendColors', asUtility = True, n = 'wakesOnOffMultiDiv',)\r\n\r\n try:\r\n cmds.connectAttr('OCEAN_hrc.oceanCalcOnOff', 'wakesOnOffMultiDiv.blender', f= True)\r\n except:\r\n pass\r\n\r\n cmds.setAttr('wakesOnOffMultiDiv.color1', 0, 0, 0, type = 'double3')\r\n cmds.setAttr('wakesOnOffMultiDiv.color2', 1, 1, 1, type = 'double3')\r\n\r\n ## Now connect these to the OCEAN_hrc eval calc attrs\r\n try:\r\n cmds.connectAttr(\"wakesOnOffMultiDiv.outputG\", \"%s.disableInteractiveEval\" % wakeFluidShapeName, f = True)\r\n cmds.connectAttr(\"wakesOnOffMultiDiv.outputG\", \"%s.disableInteractiveEval\" % foamFluidShapeName, f = True)\r\n except:\r\n pass", "def write_all_structures(self):\n\n _poscar = Poscar(self.original_substrate_structure)\n _poscar.write_file('bulk_substrate_POSCAR')\n\n\n _poscar = Poscar(self.original_film_structure)\n _poscar.write_file('bulk_film_POSCAR')\n\n _poscar = Poscar(self.strained_substrate)\n _poscar.write_file('strained_substrate_POSCAR')\n\n _poscar = Poscar(self.strained_film)\n _poscar.write_file('strained_film_POSCAR')\n\n for i, interface in enumerate(self.modified_substrate_structures):\n _poscar = Poscar(interface)\n _poscar.write_file('slab_substrate_%d_POSCAR'%i)\n\n for i, interface in enumerate(self.modified_film_structures):\n _poscar = Poscar(interface)\n _poscar.write_file('slab_film_%d_POSCAR' % i)\n \n for i, interface in enumerate(self.film_structures):\n _poscar = Poscar(interface)\n _poscar.write_file('slab_unit_film_%d_POSCAR' % i)\n\n for label, interface in zip(self.interface_labels, self.interfaces):\n _poscar = Poscar(interface)\n _poscar.write_file('interface_%s_POSCAR' % label.replace(\"/\", \"-\"))\n return", "def dock(self, \n tech_prepped_lig_list, \n tech_prepped_receptor_list, \n output_receptor_pdb, \n output_lig_mol, \n targ_info_dict={}):\n ligand_mae = tech_prepped_lig_list[0]\n grid_file = tech_prepped_receptor_list[0]\n allowed_precisions = ['SP','XP']\n precision = 'SP'\n if not(precision in allowed_precisions):\n logging.info('Invalid precision setting %s. Must be one of %r. Switching precision to %s' %(precision, allowed_precisions, allowed_precisions[0]))\n precision = allowed_precisions[0]\n dock_lines = []\n dock_lines += [\"GRIDFILE \\t %s \" %(grid_file)]\n dock_lines += [\"LIGANDFILE \\t %s \" %(ligand_mae)]\n dock_lines += [\"POSES_PER_LIG \\t 10 \"]\n if precision == 'SP':\n #dock_lines += [\"PRECISION \\t XP \"]\n dock_lines += [\"PRECISION \\t SP \"]\n elif precision == 'XP':\n dock_lines += [\"PRECISION \\t XP \"]\n dock_lines += [\"POSTDOCK_XP_DELE \\t 0.5 \"]\n dock_lines += [\"EXPANDED_SAMPLING \\t True \"]\n dock_lines += [\"WRITE_XP_DESC \\t False \"]\n with open('dock.in', \"w\") as f:\n f.write('\\n'.join(dock_lines))\n os.system(\"$SCHRODINGER/glide -WAIT dock.in\")\n \n\n\n ## Split into a receptor mae file and one ligand mae for each pose\n os.system('$SCHRODINGER/run split_structure.py -m ligand -many_files dock_pv.maegz split.mae')\n \n\n ## Convert the receptor mae into pdb\n # This pdb is one of the final outputs from docking\n \n os.system('$SCHRODINGER/utilities/structconvert split_receptor1.mae ' + output_receptor_pdb)\n \n ## Convert the ligand maes into mols\n docked_ligand_maes = glob.glob('./split_ligand*.mae')\n #print docked_ligand_maes\n for docked_ligand_mae in docked_ligand_maes:\n docked_ligand_mol = docked_ligand_mae.replace('.mae','.mol') \n os.system('$SCHRODINGER/utilities/structconvert %s %s' %(docked_ligand_mae, docked_ligand_mol))\n \n # Copy the top-ranked ligand mol to be one of the final outputs from this step\n os.system('cp split_ligand1.mol ' + output_lig_mol)\n \n return True", "def _transfer_fixed_struct_disps(self):\n # TODO : set this up for shape change from struct to aero disps\n return", "def _find_all_fs(self) -> Iterable[FeatureStructure]:\n all_fs = {}\n\n openlist = []\n for sofa in self.sofas:\n view = self.get_view(sofa.sofaID)\n openlist.extend(view.select_all())\n\n ts = self.typesystem\n while openlist:\n fs = openlist.pop(0)\n all_fs[fs.xmiID] = fs\n\n t = ts.get_type(fs.type)\n for feature in t.all_features:\n feature_name = feature.name\n\n if feature_name == \"sofa\":\n continue\n\n if (\n ts.is_primitive(feature.rangeTypeName)\n or ts.is_primitive_collection(feature.rangeTypeName)\n or ts.is_primitive_collection(fs.type)\n ):\n continue\n elif ts.is_collection(fs.type, feature):\n lst = getattr(fs, feature_name)\n if lst is None:\n continue\n\n for referenced_fs in lst:\n if referenced_fs.xmiID not in all_fs:\n openlist.append(referenced_fs)\n else:\n referenced_fs = getattr(fs, feature_name)\n if referenced_fs is None:\n continue\n\n if referenced_fs.xmiID not in all_fs:\n openlist.append(referenced_fs)\n\n # We do not want to return cas:NULL here as we handle serializing it later\n all_fs.pop(0, None)\n yield from all_fs.values()", "def cmd_unminimise_all(self):\r\n for w in self.windows:\r\n w.minimised = False\r\n self.layoutAll()", "def do_it_all(self):\n self.label_connections_in_space()\n self.connect_labels_in_time()\n self.relabel()\n labels = np.unique(self.labeled_feature[self.labeled_feature != 0])\n self.construct_feature_tree([(label, ) for label in labels])", "def freeze(self):\n\n for feature in self.features:\n feature.freeze()", "def containers():", "def setup_aerostruct(self):\n\n # Set the problem name if the user doesn't\n if 'prob_name' not in self.prob_dict.keys():\n self.prob_dict['prob_name'] = 'aerostruct'\n\n # Create the base root-level group\n root = Group()\n coupled = Group()\n\n # Create the problem and assign the root group\n self.prob = Problem()\n self.prob.root = root\n\n # Loop over each surface in the surfaces list\n for surface in self.surfaces:\n\n # Get the surface name and create a group to contain components\n # only for this surface\n name = surface['name']\n tmp_group = Group()\n\n # Strip the surface names from the desvars list and save this\n # modified list as self.desvars\n desvar_names = []\n for desvar in self.desvars.keys():\n\n # Check to make sure that the surface's name is in the design\n # variable and only add the desvar to the list if it corresponds\n # to this surface.\n if name[:-1] in desvar:\n desvar_names.append(''.join(desvar.split('.')[1:]))\n\n # Add independent variables that do not belong to a specific component\n indep_vars = []\n for var in surface['geo_vars']:\n if var in desvar_names or var in surface['initial_geo'] or 'thickness' in var:\n indep_vars.append((var, surface[var]))\n\n # Add components to include in the surface's group\n tmp_group.add('indep_vars',\n IndepVarComp(indep_vars),\n promotes=['*'])\n tmp_group.add('tube',\n MaterialsTube(surface),\n promotes=['*'])\n tmp_group.add('mesh',\n GeometryMesh(surface, self.desvars),\n promotes=['*'])\n tmp_group.add('struct_setup',\n SpatialBeamSetup(surface),\n promotes=['*'])\n\n # Add bspline components for active bspline geometric variables.\n # We only add the component if the corresponding variable is a desvar,\n # a special parameter (radius), or if the user or geometry provided\n # an initial distribution.\n for var in surface['bsp_vars']:\n if var in desvar_names or var in surface['initial_geo'] or 'thickness' in var:\n n_pts = surface['num_y']\n if var in ['thickness_cp', 'radius_cp']:\n n_pts -= 1\n trunc_var = var.split('_')[0]\n tmp_group.add(trunc_var + '_bsp',\n Bspline(var, trunc_var, surface['num_'+var], n_pts),\n promotes=['*'])\n\n # Add monotonic constraints for selected variables\n if surface['monotonic_con'] is not None:\n if type(surface['monotonic_con']) is not list:\n surface['monotonic_con'] = [surface['monotonic_con']]\n for var in surface['monotonic_con']:\n tmp_group.add('monotonic_' + var,\n MonotonicConstraint(var, surface), promotes=['*'])\n\n # Add tmp_group to the problem with the name of the surface.\n name_orig = name\n name = name[:-1]\n root.add(name, tmp_group, promotes=[])\n\n # Add components to the 'coupled' group for each surface.\n # The 'coupled' group must contain all components and parameters\n # needed to converge the aerostructural system.\n tmp_group = Group()\n tmp_group.add('def_mesh',\n TransferDisplacements(surface),\n promotes=['*'])\n tmp_group.add('aero_geom',\n VLMGeometry(surface),\n promotes=['*'])\n tmp_group.add('struct_states',\n SpatialBeamStates(surface),\n promotes=['*'])\n tmp_group.struct_states.ln_solver = LinearGaussSeidel()\n tmp_group.struct_states.ln_solver.options['atol'] = 1e-20\n\n name = name_orig\n coupled.add(name[:-1], tmp_group, promotes=[])\n\n # Add a loads component to the coupled group\n coupled.add(name_orig + 'loads', TransferLoads(surface), promotes=[])\n\n # Add a performance group which evaluates the data after solving\n # the coupled system\n tmp_group = Group()\n\n tmp_group.add('struct_funcs',\n SpatialBeamFunctionals(surface),\n promotes=['*'])\n tmp_group.add('aero_funcs',\n VLMFunctionals(surface, self.prob_dict),\n promotes=['*'])\n\n root.add(name_orig + 'perf', tmp_group, promotes=[\"rho\", \"v\", \"alpha\", \"re\", \"M\"])\n\n root.add_metadata(surface['name'] + 'yield_stress', surface['yield'])\n root.add_metadata(surface['name'] + 'fem_origin', surface['fem_origin'])\n\n # Add a single 'aero_states' component for the whole system within the\n # coupled group.\n coupled.add('aero_states',\n VLMStates(self.surfaces),\n promotes=['v', 'alpha', 'rho'])\n\n # Explicitly connect parameters from each surface's group and the common\n # 'aero_states' group.\n for surface in self.surfaces:\n name = surface['name']\n\n root.connect(name[:-1] + '.K', 'coupled.' + name[:-1] + '.K')\n\n # Perform the connections with the modified names within the\n # 'aero_states' group.\n root.connect('coupled.' + name[:-1] + '.def_mesh', 'coupled.aero_states.' + name + 'def_mesh')\n root.connect('coupled.' + name[:-1] + '.b_pts', 'coupled.aero_states.' + name + 'b_pts')\n root.connect('coupled.' + name[:-1] + '.c_pts', 'coupled.aero_states.' + name + 'c_pts')\n root.connect('coupled.' + name[:-1] + '.normals', 'coupled.aero_states.' + name + 'normals')\n\n # Connect the results from 'aero_states' to the performance groups\n root.connect('coupled.aero_states.' + name + 'sec_forces', name + 'perf' + '.sec_forces')\n\n # Connect the results from 'coupled' to the performance groups\n root.connect('coupled.' + name[:-1] + '.def_mesh', 'coupled.' + name + 'loads.def_mesh')\n root.connect('coupled.aero_states.' + name + 'sec_forces', 'coupled.' + name + 'loads.sec_forces')\n\n # Connect the output of the loads component with the FEM\n # displacement parameter. This links the coupling within the coupled\n # group that necessitates the subgroup solver.\n root.connect('coupled.' + name + 'loads.loads', 'coupled.' + name[:-1] + '.loads')\n\n # Connect aerodyamic mesh to coupled group mesh\n root.connect(name[:-1] + '.mesh', 'coupled.' + name[:-1] + '.mesh')\n\n # Connect performance calculation variables\n root.connect(name[:-1] + '.radius', name + 'perf.radius')\n root.connect(name[:-1] + '.A', name + 'perf.A')\n root.connect(name[:-1] + '.thickness', name + 'perf.thickness')\n\n # Connection performance functional variables\n root.connect(name + 'perf.structural_weight', 'total_perf.' + name + 'structural_weight')\n root.connect(name + 'perf.L', 'total_perf.' + name + 'L')\n root.connect(name + 'perf.CL', 'total_perf.' + name + 'CL')\n root.connect(name + 'perf.CD', 'total_perf.' + name + 'CD')\n root.connect('coupled.aero_states.' + name + 'sec_forces', 'total_perf.' + name + 'sec_forces')\n\n # Connect parameters from the 'coupled' group to the performance\n # groups for the individual surfaces.\n root.connect(name[:-1] + '.nodes', name + 'perf.nodes')\n root.connect('coupled.' + name[:-1] + '.disp', name + 'perf.disp')\n root.connect('coupled.' + name[:-1] + '.S_ref', name + 'perf.S_ref')\n root.connect('coupled.' + name[:-1] + '.widths', name + 'perf.widths')\n root.connect('coupled.' + name[:-1] + '.chords', name + 'perf.chords')\n root.connect('coupled.' + name[:-1] + '.lengths', name + 'perf.lengths')\n root.connect('coupled.' + name[:-1] + '.cos_sweep', name + 'perf.cos_sweep')\n\n # Connect parameters from the 'coupled' group to the total performance group.\n root.connect('coupled.' + name[:-1] + '.S_ref', 'total_perf.' + name + 'S_ref')\n root.connect('coupled.' + name[:-1] + '.widths', 'total_perf.' + name + 'widths')\n root.connect('coupled.' + name[:-1] + '.chords', 'total_perf.' + name + 'chords')\n root.connect('coupled.' + name[:-1] + '.b_pts', 'total_perf.' + name + 'b_pts')\n root.connect(name + 'perf.cg_location', 'total_perf.' + name + 'cg_location')\n\n # Set solver properties for the coupled group\n coupled.ln_solver = ScipyGMRES()\n coupled.ln_solver.preconditioner = LinearGaussSeidel()\n coupled.aero_states.ln_solver = LinearGaussSeidel()\n coupled.nl_solver = NLGaussSeidel()\n\n # This is only available in the most recent version of OpenMDAO.\n # It may help converge tightly coupled systems when using NLGS.\n try:\n coupled.nl_solver.options['use_aitken'] = True\n coupled.nl_solver.options['aitken_alpha_min'] = 0.01\n # coupled.nl_solver.options['aitken_alpha_max'] = 0.5\n except:\n pass\n\n if self.prob_dict['print_level'] == 2:\n coupled.ln_solver.options['iprint'] = 1\n if self.prob_dict['print_level']:\n coupled.nl_solver.options['iprint'] = 1\n\n # Add the coupled group to the root problem\n root.add('coupled', coupled, promotes=['v', 'alpha', 'rho'])\n\n # Add problem information as an independent variables component\n prob_vars = [('v', self.prob_dict['v']),\n ('alpha', self.prob_dict['alpha']),\n ('M', self.prob_dict['M']),\n ('re', self.prob_dict['Re']/self.prob_dict['reynolds_length']),\n ('rho', self.prob_dict['rho'])]\n\n root.add('prob_vars',\n IndepVarComp(prob_vars),\n promotes=['*'])\n\n # Add functionals to evaluate performance of the system.\n # Note that only the interesting results are promoted here; not all\n # of the parameters.\n root.add('total_perf',\n TotalPerformance(self.surfaces, self.prob_dict),\n promotes=['L_equals_W', 'fuelburn', 'CM', 'CL', 'CD', 'v', 'rho', 'cg', 'weighted_obj', 'total_weight'])\n\n # Actually set up the system\n self.setup_prob()", "def setup_struct(self):\n\n # Set the problem name if the user doesn't\n if 'prob_name' not in self.prob_dict.keys():\n self.prob_dict['prob_name'] = 'struct'\n\n # Create the base root-level group\n root = Group()\n\n # Create the problem and assign the root group\n self.prob = Problem()\n self.prob.root = root\n\n # Loop over each surface in the surfaces list\n for surface in self.surfaces:\n\n # Get the surface name and create a group to contain components\n # only for this surface.\n # This group's name is whatever the surface's name is.\n # The default is 'wing'.\n name = surface['name']\n tmp_group = Group()\n\n # Strip the surface names from the desvars list and save this\n # modified list as self.desvars\n desvar_names = []\n for desvar in self.desvars.keys():\n\n # Check to make sure that the surface's name is in the design\n # variable and only add the desvar to the list if it corresponds\n # to this surface.\n if name[:-1] in desvar:\n desvar_names.append(''.join(desvar.split('.')[1:]))\n\n # Add independent variables that do not belong to a specific component.\n # Note that these are the only ones necessary for structual-only\n # analysis and optimization.\n # Here we check and only add the variables that are desvars or a\n # special var, radius, which is necessary to compute weight.\n indep_vars = [('loads', surface['loads'])]\n for var in surface['geo_vars']:\n if var in desvar_names or 'thickness' in var or var in surface['initial_geo']:\n indep_vars.append((var, surface[var]))\n\n # Add structural components to the surface-specific group\n tmp_group.add('indep_vars',\n IndepVarComp(indep_vars),\n promotes=['*'])\n tmp_group.add('mesh',\n GeometryMesh(surface, self.desvars),\n promotes=['*'])\n tmp_group.add('tube',\n MaterialsTube(surface),\n promotes=['*'])\n tmp_group.add('struct_setup',\n SpatialBeamSetup(surface),\n promotes=['*'])\n tmp_group.add('struct_states',\n SpatialBeamStates(surface),\n promotes=['*'])\n tmp_group.add('struct_funcs',\n SpatialBeamFunctionals(surface),\n promotes=['*'])\n\n # Add bspline components for active bspline geometric variables.\n # We only add the component if the corresponding variable is a desvar\n # or special (radius).\n for var in surface['bsp_vars']:\n if var in desvar_names or var in surface['initial_geo'] or 'thickness' in var:\n n_pts = surface['num_y']\n if var in ['thickness_cp', 'radius_cp']:\n n_pts -= 1\n trunc_var = var.split('_')[0]\n tmp_group.add(trunc_var + '_bsp',\n Bspline(var, trunc_var, surface['num_'+var], n_pts),\n promotes=['*'])\n\n # Add tmp_group to the problem with the name of the surface.\n # The default is 'wing'.\n root.add(name[:-1], tmp_group, promotes=[])\n\n root.add_metadata(surface['name'] + 'yield_stress', surface['yield'])\n root.add_metadata(surface['name'] + 'fem_origin', surface['fem_origin'])\n\n # Actually set up the problem\n self.setup_prob()", "def pool_topological_types(self, re_compute_types=False):\n print(\"Pooling topological types...\")\n\n if re_compute_types:\n for bond in self.bonds:\n bond.compute_type()\n for angle in self.angles:\n angle.compute_type()\n for dihedral in self.dihedrals:\n dihedral.compute_type()\n for improper in self.impropers:\n improper.compute_type()\n\n self.atom_types = []\n for (index, atom) in enumerate(self.atoms, 1):\n atom.index = index\n if atom.type not in self.atom_types:\n self.atom_types.append(atom.type)\n\n self.bond_types = []\n for (index, bond) in enumerate(self.bonds, 1):\n for molecule in self.molecules:\n if any(atom in molecule.atoms for atom in bond.atoms):\n molecule.bonds.append(bond)\n bond.index = index\n if bond.type not in self.bond_types:\n self.bond_types.append(bond.type)\n\n self.angle_types = []\n for (index, angle) in enumerate(self.angles, 1):\n for molecule in self.molecules:\n if any(atom in molecule.atoms for atom in angle.atoms):\n molecule.angles.append(angle)\n angle.index = index\n if angle.type not in self.angle_types:\n self.angle_types.append(angle.type)\n\n self.dihedral_types = []\n for (index, dihedral) in enumerate(self.dihedrals, 1):\n for molecule in self.molecules:\n if any(atom in molecule.atoms for atom in dihedral.atoms):\n molecule.dihedrals.append(dihedral)\n dihedral.index = index\n if dihedral.type not in self.dihedral_types:\n self.dihedral_types.append(dihedral.type)\n\n self.improper_types = []\n for (index, improper) in enumerate(self.impropers, 1):\n for molecule in self.molecules:\n if any(atom in molecule.atoms for atom in improper.atoms):\n molecule.impropers.append(improper)\n improper.index = index\n if improper.type not in self.improper_types:\n self.improper_types.append(improper.type)\n\n for (index, molecule) in enumerate(self.molecules, 1):\n molecule.index = index\n # no type computing for molecules\n\n print(\"Topological types pooled\")", "def sanitize(self):\n\n for i in range(3):\n count_before = len(self.graph.nodes)\n self.graph.cleanup().toposort()\n try:\n for node in self.graph.nodes:\n for o in node.outputs:\n o.shape = None\n model = gs.export_onnx(self.graph)\n model = shape_inference.infer_shapes(model)\n self.graph = gs.import_onnx(model)\n except Exception as e:\n log.info(\"Shape inference could not be performed at this time:\\n{}\".format(e))\n try:\n self.graph.fold_constants(fold_shapes=True)\n except TypeError as e:\n log.error(\"This version of ONNX GraphSurgeon does not support folding shapes, please upgrade your \"\n \"onnx_graphsurgeon module. Error:\\n{}\".format(e))\n raise\n\n count_after = len(self.graph.nodes)\n if count_before == count_after:\n # No new folding occurred in this iteration, so we can stop for now.\n break", "def fill_BC_all(self):\n for name in self.vars:\n self.fill_BC(name)", "def elaborate( self ):\n\n # Initialize data structure to hold all model classes in the design\n self._model_classes = set()\n\n # Recursively elaborate each model in the design, starting with top\n self._recurse_elaborate( self, 'top' )\n\n # Visit all connections in the design, set directionality\n self._recurse_connections()", "def initialize_all_widgets(self):\n self.hidden = True\n \n self.directory_view_window = QtWidgets.QDockWidget(\"Directory view\", self.main_form)\n self.main_form.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.directory_view_window)\n self.directory_view_window.visibilityChanged.connect(self.directory_view_window_visibility_callback)\n\n self.current_working_directory = None\n\n self.filesystem = QtWidgets.QFileSystemModel()\n self.filesystem.setIconProvider(IconProviderWidget())\n\n self.directory_view_tree = QtWidgets.QTreeView()\n self.directory_view_tree.setAnimated(False)\n self.directory_view_tree.clicked.connect(self.directory_view_tree_item_clicked_callback)\n self.directory_view_tree.setStyleSheet(\"QTreeView { border: 1px solid lightgrey; }\")\n self.directory_view_window.setWidget(self.directory_view_tree)\n self.directory_view_window.hide()\n\n self.directory_view_tree.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n self.directory_view_tree.customContextMenuRequested.connect(self.custom_menu_context)", "def _dump_power_straps_for_hardmacros(self) -> None:\n check_abut = self.get_setting(\"par.power_straps_abutment\")\n\n output = [] # type: List[Dict[str, Any]]\n misaligned_insts = {} # type: Dict[str, List[str]]\n\n # Valid orientations based on layer direction\n valid_orients = {\"vertical\": [\"r0\", \"mx\"], \"horizontal\": [\"r0\", \"my\"]}\n\n # Get masters and process all instances of each\n masters = set(map(lambda m: m[\"master\"], self._hardmacro_power_straps))\n\n for master in masters:\n insts = list(filter(lambda m: m[\"master\"] == master, self._hardmacro_power_straps))\n # All instances of this master should specify the same top_layer\n if len(set(map(lambda m: m[\"top_layer\"], insts))) > 1:\n self.logger.error(f\"Some instances of hardmacro {master} have conflicting \\\"top_layer\\\" fields. Check your placement constraints.\")\n\n # Get the parameters of top_layer + 1 first (offset doesn't matter)\n above_insts = list(filter(lambda m: m[\"top_layer\"] != m[\"layer\"], insts))\n copy_fields = [\"layer\", \"direction\", \"net_order\", \"width\", \"spacing\", \"group_pitch\"]\n if len(above_insts) > 0: # in some cases top_layer == top layer in power strap API\n above_desc = {k: above_insts[0][k] for k in copy_fields}\n elif not check_abut:\n self.logger.error(f\"par.power_straps_abutment is False, but you do not have power straps generated on layer {above_insts[0]['layer']} above instances of module {master}! Double check that you will supply power to them.\")\n\n # Filter for top_layer == layer and valid/bad orientation\n abut_insts = list(filter(lambda m: m[\"top_layer\"] == m[\"layer\"] and\n m[\"orientation\"] in valid_orients[m[\"direction\"]],\n insts))\n bad_orient_insts = list(filter(lambda m: m[\"top_layer\"] == m[\"layer\"] and\n m[\"orientation\"] not in valid_orients[m[\"direction\"]],\n insts))\n\n variant_cnt = 0\n while len(abut_insts) + len(bad_orient_insts) > 0:\n # Get offset value with most occurrences in abut_insts first, then bad_orient_insts\n if len(abut_insts) > 0:\n max_count_offset = mode(map(lambda m: m[\"offset\"], abut_insts))\n insts = list(filter(lambda m: m[\"offset\"] == max_count_offset, abut_insts))\n abut_insts = list(filter(lambda m: m[\"offset\"] != max_count_offset, abut_insts))\n else:\n max_count_offset = mode(map(lambda m: m[\"offset\"], bad_orient_insts))\n insts = list(filter(lambda m: m[\"offset\"] == max_count_offset, bad_orient_insts))\n bad_orient_insts = list(filter(lambda m: m[\"offset\"] != max_count_offset, bad_orient_insts))\n\n # Generate description\n master_module = master\n if variant_cnt > 0: # bad module placement\n if master not in misaligned_insts:\n misaligned_insts[master] = list(map(lambda m: m[\"path\"], insts))\n else:\n misaligned_insts[master].extend(list(map(lambda m: m[\"path\"], insts)))\n master_module = master_module + \"_\" + str(variant_cnt)\n\n abut_desc = {k: insts[0][k] for k in copy_fields}\n abut_desc[\"offset\"] = max_count_offset\n abut_desc[\"inst_paths\"] = list(map(lambda m: m[\"path\"], insts))\n abut_desc[\"inst_orientations\"] = list(map(lambda m: m[\"orientation\"], insts))\n\n if len(above_insts) > 0:\n above_desc[\"inst_paths\"] = list(map(lambda m: m[\"path\"], insts))\n above_desc[\"inst_orientations\"] = list(map(lambda m: m[\"orientation\"], insts))\n output.append({master_module: [abut_desc, above_desc.copy()]})\n else:\n output.append({master_module: [abut_desc]})\n\n variant_cnt += 1\n\n if check_abut and misaligned_insts:\n self.logger.error(\"par.power_straps_abutment is True, but multiple instances of the same hardmacro \"\n \"are not placed on its \\\"top_layer\\\" power strap pitch or are mirrored across the axis parallel \"\n \"to that layer's direction! Adjust them for proper power strap abutment or generate alternate \"\n \"versions of your hardmacros with different top layer power patterns. Offending masters and \"\n f\"instances are:\\n{json.dumps(misaligned_insts, indent=4)}\")\n\n json_str = json.dumps(output, indent=4)\n with open(os.path.join(self.run_dir, \"power_straps.json\"), 'w') as f:\n f.write(json_str)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a ``networkx.Graph`` from a skeleton DataFrame.
def skeleton_df_to_nx(df, with_attributes=True, directed=True, with_distances=False, virtual_roots=False, root_dist=np.inf): if directed: g = nx.DiGraph() else: g = nx.Graph() if with_attributes: for row in df.itertuples(index=False): g.add_node(row.rowId, x=row.x, y=row.y, z=row.z, radius=row.radius) else: g.add_nodes_from(df['rowId'].sort_values()) if not virtual_roots: # Instead of assuming that the root node refers to a special parent (e.g. -1), # we determine the root_parents by inspection. root_parents = pd.Index(df['link'].unique()).difference(df['rowId'].unique()) root_parents if with_distances: edges_df = df[['rowId', 'link']].copy() edges_df['distance'] = calc_segment_distances(df, root_dist) if not virtual_roots: edges_df = edges_df.query('link not in @root_parents') edges_df = edges_df.sort_values(['rowId', 'link']) g.add_weighted_edges_from(edges_df.itertuples(index=False), 'distance') else: if not virtual_roots: edges_df = df.query('link not in @root_parents') edges_df = edges_df[['rowId', 'link']] edges_df = edges_df.sort_values(['rowId', 'link']) g.add_edges_from(edges_df.values) return g
[ "def _reorient_skeleton(skeleton_df, root, root_parent=-1, g=None):\n g = g or skeleton_df_to_nx(skeleton_df, False, False)\n assert isinstance(g, nx.Graph) and not isinstance(g, nx.DiGraph), \\\n \"skeleton graph must be undirected\"\n\n edges = list(nx.dfs_edges(g, source=root))\n\n # If the graph has more than one connected component,\n # the remaining components have arbitrary roots\n if len(edges) != len(g.edges):\n for cc in nx.connected_components(g):\n if root not in cc:\n edges += list(nx.dfs_edges(g, source=cc.pop()))\n\n edges = pd.DataFrame(edges, columns=['link', 'rowId']) # parent, child\n edges = edges.set_index('rowId')['link']\n\n # Replace 'link' (parent) column using DFS edges\n skeleton_df['link'] = skeleton_df['rowId'].map(edges).fillna(root_parent).astype(int)", "def attach_synapses_to_skeleton(skeleton_df, synapses_df):\n skeleton_df = skeleton_df.copy(deep=False).reset_index(drop=True)\n synapses_df = synapses_df.copy(deep=False).reset_index(drop=True)\n\n skeleton_df['structure'] = 'neurite'\n synapses_df['structure'] = synapses_df['type']\n synapses_df['radius'] = 0.0\n\n kd = cKDTree(skeleton_df[[*'xyz']].values)\n _, indexes = kd.query(synapses_df[[*'xyz']].values)\n\n synapses_df['link'] = skeleton_df.loc[indexes, 'rowId'].values\n synapses_df['rowId'] = synapses_df.index + skeleton_df['rowId'].max() + 1\n\n relevant_cols = ['rowId', *'xyz', 'radius', 'link', 'structure']\n synapses_df = synapses_df[relevant_cols]\n skeleton_df = skeleton_df[relevant_cols]\n\n combined = pd.concat((skeleton_df, synapses_df), ignore_index=True)\n combined['structure'] = pd.Categorical(combined['structure'])\n return combined", "def buildGraph(self, graphDF):\n for index, vertex in graphDF.iterrows():\n key = vertex['v1']\n otherVertex = vertex['v2']\n weight = vertex['cost']\n\n # create new vertex if doesn't exist in graph\n if key not in self.graph:\n node = self.addNode(key)\n if otherVertex not in self.graph:\n otherNode = self.addNode(otherVertex)\n\n node = self.graph[key]\n node.addEdge(otherVertex, weight)\n\n otherNode = self.graph[otherVertex]\n otherNode.addEdge(key, weight)", "def create_nodes_from_data_frame(tx, df, labels=None):\n create_nodes(tx, df.itertuples(index=False, name=None),\n labels=labels, keys=df.keys())", "def construct_graph(self):\r\n\t\tedges = self.generate_edges()\r\n\t\tfor edge in edges:\r\n\t\t\tself.insert_edge(edge[0],edge[1],edge[2]) # adds all the edges to graph\r", "def upsample_skeleton(skeleton_df, max_segment_length):\n if len(skeleton_df) in (0, 1) or (skeleton_df['link'] == -1).all():\n # Can't upsample a skeleton with no child-parent segments\n return skeleton_df\n\n seg_df = _skeleton_segments(skeleton_df)\n seg_df = seg_df.loc[seg_df['length'] > max_segment_length]\n\n if len(seg_df) == 0:\n return skeleton_df\n\n I0 = seg_df['rowId']\n I1 = seg_df['rowId_parent']\n next_id = 1 + skeleton_df['rowId'].max()\n\n # It's best to minimize the number of times we call np.linspace(),\n # so we interpolate points and radii in conjunction with a single array.\n PR0 = seg_df[[*'xyz', 'radius']].values\n PR1 = seg_df[['x_parent', 'y_parent', 'z_parent', 'radius_parent']].values\n\n D = seg_df['length']\n\n segment_nodes = []\n for i0, i1, pr0, pr1, d in zip(I0, I1, PR0, PR1, D):\n # Number of nodes from child (i0) to parent (i1)\n # excluding the parent (which we won't edit).\n n = int(np.ceil(d / max_segment_length))\n\n # IDs of the original child and new intermediates going towards\n # the original parent, but not the parent itself.\n I = [i0, *range(next_id, next_id + n - 1)] # noqa\n next_id += n - 1\n\n # 'link' (parent id) for the original child and new intermediates\n L = I[1:] + [i1]\n\n # Interpolate points and radii\n PR = np.linspace(pr0, pr1, n, endpoint=False)\n\n assert len(PR) == len(I) == len(L)\n segment_nodes.append((I, *PR.T, L))\n\n segment_cols = [*zip(*segment_nodes)]\n full_cols = [np.concatenate(a) for a in segment_cols]\n new_df = pd.DataFrame(dict(zip(['rowId', *'xyz', 'radius', 'link'], full_cols)))\n\n # Expand the DataFrame to make room for the new rows,\n # then copy them over.\n all_rowIds = np.sort(pd.concat((skeleton_df['rowId'], new_df['rowId'])).unique())\n dtypes = skeleton_df.dtypes\n skeleton_df = skeleton_df.set_index('rowId').reindex(all_rowIds)\n skeleton_df.update(new_df.set_index('rowId'))\n\n # Restore to standard column form.\n return skeleton_df.reset_index().astype(dtypes)", "def skeleton(DAG):\n return DAG.to_undirected()", "def pull_graph(self):\n res = self.neo4j_run_cypher_query('match (n)-[r]-(m) where m<>n and type(r)<>\"contraindicated_for\" and type(r)<>\"indicated_for\" with distinct n as node1, m as node2 return node1.id as source, node2.id as target')\n df = pd.DataFrame(res.data())\n return df", "def generate_graph(self):\n\n for i, (h1, h2, d) in enumerate(self.helix_graph.graph.edges(data=True)): # noqa: E501\n # It is easier to just add in all the nodes first, and then remove\n # the 'start' and 'end' dummy nodes at the end.\n self.graph.add_node(f'H{h1}',\n nucleotides=self.helix_graph.graph.node[h2]['nucleotides'], # noqa: E501\n bipartite='helix')\n self.graph.add_node(f'H{h2}',\n nucleotides=self.helix_graph.graph.node[h2]['nucleotides'], # noqa: E501\n bipartite='helix')\n\n # Add in the bulge nodes. These are what were the edges in the\n # helix_graph.\n self.graph.add_node(f'B{i}',\n nucleotides=d['nucleotides'],\n bipartite='bulge')\n\n self.graph.add_edge(f'H{h1}', f'B{i}')\n self.graph.add_edge(f'H{h2}', f'B{i}')\n\n self.graph.remove_node(f'Hstart')\n self.graph.remove_node(f'Hend')", "def extract_graph_from_skeleton(sk): \n #used/unsused\n sk_used = np.zeros_like(sk)\n sk_unused = np.copy(sk)\n #root node\n root_position = findroot(sk)\n print('root_position',root_position)\n root = Branch(pixels=[root_position],name='root')\n setvalue(sk_used,root_position,1)\n setvalue(sk_unused,root_position,0)\n #extract rood edge\n edgelist,branchlist,endlist = next_pixels(root_position,sk_used,sk_unused)\n #assert len(edgelist)==1,'root has more than 1 branchedge'################!!!!!!!!\n rootedge = BranchEdge(edgelist[:1])\n while True:\n edgelist,branchlist,endlist = next_pixels(edgelist[0],sk_used,sk_unused)\n if edgelist:\n rootedge.add_pixels(edgelist)\n else:\n break\n assert len(branchlist)>=1,'root has no children'\n #first node(perhaps split LM and RM)\n branch1 = Branch(pixels=branchlist)\n root.add_child(branch1,rootedge)\n branch_startpoint_list = [branch1]##BFS\n edge_startpoint_list = []\n while branch_startpoint_list:\n branch1 = branch_startpoint_list.pop(0)\n edgelist,branchlist,endlist = next_pixels(branch1.pixels[0],sk_used,sk_unused)\n edge_startpoint_list = edgelist\n branch_cumulate_list = branchlist\n while branch_cumulate_list:#cumulate all the branch pixels(>3)\n bposition = branch_cumulate_list.pop(0)\n branch1.add_pixel(bposition)\n edgelist,branchlist,endlist = next_pixels(bposition,sk_used,sk_unused)\n edge_startpoint_list += edgelist\n branch_cumulate_list += branchlist\n #for each connected edge start,trace until next node\n for edge in edge_startpoint_list:\n branchedge1 = BranchEdge([edge])\n edgelist,branchlist,endlist = next_pixels(edge,sk_used,sk_unused)\n while edgelist:#trace until next node\n #print('edgelist',edgelist)\n branchedge1.add_pixels(edgelist)\n edgelist,branchlist,endlist = next_pixels(edgelist[0],sk_used,sk_unused)\n if branchlist:#next branch\n branch2 = Branch(pixels=branchlist)\n ##if branchedge too short, do nothing\n branch1.add_child(branch2,branchedge1)\n branch_startpoint_list.append(branch2)\n elif endlist:#end node\n branch2 = Branch(pixels=endlist)\n ##if branchedge too short, threshold based on rank(todo)\n branch1.add_child(branch2,branchedge1)\n else:#end without endlist (pixel value=3)\n branch2 = Branch(pixels=branchedge1.pixels[-1:])\n ##if branchedge too short, threshold based on rank(todo)\n branch1.add_child(branch2,branchedge1)\n #if this branch has only one edge, merge(may throw assert error)\n if len(branch1.edges) == 1:\n branch1.edges[0].endbracnch.rank-=1\n branch1.parent_edge.endbracnch = branch1.edges[0].endbracnch\n branch1.parent_edge.add_pixels_nocontinious(branch1.pixels)\n branch1.parent_edge.add_pixels(branch1.edges[0].pixels)\n branch1.edges[0].endbracnch.parent_edge = branch1.parent_edge\n return root", "def heal_skeleton(skeleton_df, max_distance=np.inf, root_parent=None):\n if max_distance is True:\n max_distance = np.inf\n\n if not max_distance:\n max_distance = 0.0\n\n if root_parent is None:\n root_parent = -1\n else:\n # Fast path to exit early if we can easily check the number of roots.\n num_roots = (skeleton_df['link'] == root_parent).sum()\n if num_roots == 1:\n # There's only one root and therefore only one component.\n # No healing necessary.\n return skeleton_df\n\n skeleton_df = skeleton_df.sort_values('rowId', ignore_index=True)\n g = skeleton_df_to_nx(skeleton_df, False, False)\n\n # Extract each fragment's rows and construct a KD-Tree\n Fragment = namedtuple('Fragment', ['frag_id', 'df', 'kd'])\n fragments = []\n for frag_id, cc in enumerate(nx.connected_components(g)):\n if len(cc) == len(skeleton_df):\n # There's only one component -- no healing necessary\n return skeleton_df\n df = skeleton_df.query('rowId in @cc')\n kd = cKDTree(df[[*'xyz']].values)\n fragments.append( Fragment(frag_id, df, kd) )\n\n # Sort from big-to-small, so the calculations below use a\n # KD tree for the larger point set in every fragment pair.\n fragments = sorted(fragments, key=lambda frag: -len(frag.df))\n\n # We could use the full graph and connect all\n # fragment pairs at their nearest neighbors,\n # but it's faster to treat each entire fragment as\n # a single node and run MST on that quotient graph,\n # which is tiny.\n frag_graph = nx.Graph()\n for frag_a, frag_b in combinations(fragments, 2):\n coords_b = frag_b.df[[*'xyz']].values\n distances, indexes = frag_a.kd.query(coords_b)\n\n index_b = np.argmin(distances)\n index_a = indexes[index_b]\n\n node_a = frag_a.df['rowId'].iloc[index_a]\n node_b = frag_b.df['rowId'].iloc[index_b]\n dist_ab = distances[index_b]\n\n # Add edge from one fragment to another,\n # but keep track of which fine-grained skeleton\n # nodes were used to calculate distance.\n frag_graph.add_edge( frag_a.frag_id, frag_b.frag_id,\n node_a=node_a, node_b=node_b,\n distance=dist_ab )\n\n # Compute inter-fragment MST edges\n frag_edges = nx.minimum_spanning_edges(frag_graph, weight='distance', data=True)\n\n # For each inter-fragment edge, add the corresponding\n # fine-grained edge between skeleton nodes in the original graph.\n omit_edges = []\n for _u, _v, d in frag_edges:\n g.add_edge(d['node_a'], d['node_b'])\n if d['distance'] > max_distance:\n omit_edges.append((d['node_a'], d['node_b']))\n\n # Traverse in depth-first order to compute edges for final tree\n root = skeleton_df['rowId'].iloc[0]\n\n # Replace 'link' (parent) column using MST edges\n _reorient_skeleton(skeleton_df, root, root_parent, g=g)\n assert (skeleton_df['link'] == root_parent).sum() == 1\n assert skeleton_df['link'].iloc[0] == root_parent\n\n # Delete edges that violated max_distance\n if omit_edges:\n # Make sure this is invariant to edge direction (check both directions).\n omit_edges = omit_edges + [(b, a) for (a, b) in omit_edges]\n omit_df = pd.DataFrame(omit_edges, columns=['rowId', 'link'])\n omit_df['omit_link'] = -1\n\n # Remove links for omitted edges (convert child node to a new root).\n skeleton_df = skeleton_df.merge(omit_df, 'left', on=['rowId', 'link'])\n skeleton_df['link'].update(skeleton_df['omit_link'])\n del skeleton_df['omit_link']\n\n return skeleton_df", "def _fetch_hemibrain_skeleton(hemi_body):\n from requests import HTTPError\n from tqdm import tqdm\n from neuclease.dvid import fetch_skeleton\n\n try:\n df = fetch_skeleton(*Hemibrain_v12, 'segmentation_skeletons', hemi_body, 'pandas')\n df['hemibrain_body'] = hemi_body\n df['source'] = 'skeleton'\n return df\n except HTTPError:\n with tqdm.external_write_mode():\n logger.error(f\"Failed to fetch skeleton for body {hemi_body}\")\n return None", "def _skeleton_segments(skeleton_df):\n segment_df = skeleton_df.merge(skeleton_df[['rowId', 'link', *'xyz', 'radius']],\n 'inner',\n left_on='link',\n right_on='rowId',\n suffixes=['', '_parent'])\n\n child_points = segment_df[[*'xyz']].values\n parent_points = segment_df[['x_parent', 'y_parent', 'z_parent']].values\n segment_df['length'] = np.linalg.norm(child_points - parent_points, axis=1)\n return segment_df", "def from_parmed(cls, structure: Structure) -> nx.Graph:\n topology_graph = cls()\n for atom in structure.atoms:\n if atom.name.startswith('_'):\n atomic_number = None\n element = None\n else:\n atomic_number = atom.atomic_number\n element = atom.element_name\n\n topology_graph.add_atom(\n name=atom.name,\n index=atom.idx,\n atomic_number=atomic_number,\n element=element,\n )\n\n for bond in structure.bonds:\n topology_graph.add_bond(bond.atom1.idx, bond.atom2.idx)\n\n return topology_graph", "def makeGraph(self):\n r = self.get_rows()\n c = self.get_cols()\n\n #first of all... initializing the knights and storing them as initial nodes of the graph\n for k in self._knights:\n kgt = self.setGraph().insertNode(k.get_position(), k)\n self._knights_nodes.append(kgt) #storing the list of knights' nodes\n #node with a knight: knight_position + knight_weight\n k.completeTour(r, c) #calculating the complete tour for every knight\n for knight in self._knights:\n for step in knight.getMoves():\n move_from = step[0]\n move_to = step[1]\n node = self.setGraph().insertNode(move_from)\n moveNode = self.setGraph().insertNode(move_to)\n self.setGraph().linkNode(node, moveNode)\n knight.refreshBuffer() #just to free some memory...", "def nx_graph_with_data(self) -> nx.DiGraph:\n g = nx.DiGraph()\n g.add_nodes_from([n for n in self.nodes.items()])\n g.add_edges_from(set([(e.source.node_id, e.destination.node_id) for e in self.edges]))\n return g", "def build_graph(self):\n G = nx.Graph()\n for cell in self.board.get_cells():\n G.add_node(cell)\n # Add all edges from cell to its neighbours\n neighbours = [(cell, neighbour[\"cell\"]) for neighbour in cell.get_neighbours()]\n G.add_edges_from(neighbours)\n return G", "def construct_graph(weights, multigraph=True):\n from dvidutils import LabelMapper\n assert weights.index.nlevels == 2, \\\n \"Please pass a series, indexed by e.g. [body_pre, body_post]\"\n weights = weights.astype(np.int32)\n\n body_edges = weights.reset_index().iloc[:, :2].values.astype(np.uint64)\n sorted_bodies = np.sort(pd.unique(body_edges.reshape(-1)))\n\n vertexes = np.arange(len(sorted_bodies), dtype=np.uint32)\n vertex_mapper = LabelMapper(sorted_bodies.astype(np.uint64), vertexes)\n edges = vertex_mapper.apply(body_edges)\n\n g = gt.Graph(directed=True)\n g.add_vertex(np.uint32(len(vertexes)))\n\n if multigraph:\n edges = np.repeat(edges, weights.values, axis=0)\n g.add_edge_list(edges)\n else:\n g.add_edge_list(edges)\n g.ep[\"weight\"] = g.new_edge_property(\"int\")\n g.ep[\"weight\"].a = weights.values\n\n return g, sorted_bodies", "def to_graph(self):\n\n if self.is_single_matrix:\n if self.matrix_type == \"directed\":\n G = nx.DiGraph(self.squareform())\n else:\n G = nx.Graph(self.squareform())\n if self.labels:\n labels = {x: y for x, y in zip(G.nodes, self.labels)}\n nx.relabel_nodes(G, labels, copy=False)\n return G\n else:\n raise NotImplementedError(\n \"This function currently only works on \" \"single matrices.\"\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For each node (row) in the given skeleton DataFrame, compute euclidean distance from the node to its parent (link) node.
def calc_segment_distances(df, root_dist=np.inf): # Append parent (link) columns to each row by matching # each row's 'link' ID with the parent's 'rowId'. edges_df = df[['rowId', 'link', *'xyz']].merge( df[['rowId', *'xyz']], 'left', left_on='link', right_on='rowId', suffixes=['', '_link']) diff = edges_df[[*'xyz']] - edges_df[['x_link', 'y_link', 'z_link']].values distances = np.linalg.norm(diff, axis=1).astype(np.float32) distances[np.isnan(distances)] = root_dist return distances
[ "def distances_from_root(df):\n g = skeleton_df_to_nx(df, directed=False, with_distances=True, virtual_roots=True, root_dist=0.0)\n d = nx.shortest_path_length(g, -1, weight='distance')\n d = pd.Series(d, name='distance').rename_axis('rowId')\n df = df.merge(d, 'left', on='rowId')\n return df", "def _skeleton_segments(skeleton_df):\n segment_df = skeleton_df.merge(skeleton_df[['rowId', 'link', *'xyz', 'radius']],\n 'inner',\n left_on='link',\n right_on='rowId',\n suffixes=['', '_parent'])\n\n child_points = segment_df[[*'xyz']].values\n parent_points = segment_df[['x_parent', 'y_parent', 'z_parent']].values\n segment_df['length'] = np.linalg.norm(child_points - parent_points, axis=1)\n return segment_df", "def heal_skeleton(skeleton_df, max_distance=np.inf, root_parent=None):\n if max_distance is True:\n max_distance = np.inf\n\n if not max_distance:\n max_distance = 0.0\n\n if root_parent is None:\n root_parent = -1\n else:\n # Fast path to exit early if we can easily check the number of roots.\n num_roots = (skeleton_df['link'] == root_parent).sum()\n if num_roots == 1:\n # There's only one root and therefore only one component.\n # No healing necessary.\n return skeleton_df\n\n skeleton_df = skeleton_df.sort_values('rowId', ignore_index=True)\n g = skeleton_df_to_nx(skeleton_df, False, False)\n\n # Extract each fragment's rows and construct a KD-Tree\n Fragment = namedtuple('Fragment', ['frag_id', 'df', 'kd'])\n fragments = []\n for frag_id, cc in enumerate(nx.connected_components(g)):\n if len(cc) == len(skeleton_df):\n # There's only one component -- no healing necessary\n return skeleton_df\n df = skeleton_df.query('rowId in @cc')\n kd = cKDTree(df[[*'xyz']].values)\n fragments.append( Fragment(frag_id, df, kd) )\n\n # Sort from big-to-small, so the calculations below use a\n # KD tree for the larger point set in every fragment pair.\n fragments = sorted(fragments, key=lambda frag: -len(frag.df))\n\n # We could use the full graph and connect all\n # fragment pairs at their nearest neighbors,\n # but it's faster to treat each entire fragment as\n # a single node and run MST on that quotient graph,\n # which is tiny.\n frag_graph = nx.Graph()\n for frag_a, frag_b in combinations(fragments, 2):\n coords_b = frag_b.df[[*'xyz']].values\n distances, indexes = frag_a.kd.query(coords_b)\n\n index_b = np.argmin(distances)\n index_a = indexes[index_b]\n\n node_a = frag_a.df['rowId'].iloc[index_a]\n node_b = frag_b.df['rowId'].iloc[index_b]\n dist_ab = distances[index_b]\n\n # Add edge from one fragment to another,\n # but keep track of which fine-grained skeleton\n # nodes were used to calculate distance.\n frag_graph.add_edge( frag_a.frag_id, frag_b.frag_id,\n node_a=node_a, node_b=node_b,\n distance=dist_ab )\n\n # Compute inter-fragment MST edges\n frag_edges = nx.minimum_spanning_edges(frag_graph, weight='distance', data=True)\n\n # For each inter-fragment edge, add the corresponding\n # fine-grained edge between skeleton nodes in the original graph.\n omit_edges = []\n for _u, _v, d in frag_edges:\n g.add_edge(d['node_a'], d['node_b'])\n if d['distance'] > max_distance:\n omit_edges.append((d['node_a'], d['node_b']))\n\n # Traverse in depth-first order to compute edges for final tree\n root = skeleton_df['rowId'].iloc[0]\n\n # Replace 'link' (parent) column using MST edges\n _reorient_skeleton(skeleton_df, root, root_parent, g=g)\n assert (skeleton_df['link'] == root_parent).sum() == 1\n assert skeleton_df['link'].iloc[0] == root_parent\n\n # Delete edges that violated max_distance\n if omit_edges:\n # Make sure this is invariant to edge direction (check both directions).\n omit_edges = omit_edges + [(b, a) for (a, b) in omit_edges]\n omit_df = pd.DataFrame(omit_edges, columns=['rowId', 'link'])\n omit_df['omit_link'] = -1\n\n # Remove links for omitted edges (convert child node to a new root).\n skeleton_df = skeleton_df.merge(omit_df, 'left', on=['rowId', 'link'])\n skeleton_df['link'].update(skeleton_df['omit_link'])\n del skeleton_df['omit_link']\n\n return skeleton_df", "def distance_from_node(self,node):\n d, node = self.tree.query(self.nodes[node])\n # distance=pool.map(lambda a: np.linalg.norm(a-self.nodes[node]),self.nodes.values())\n return d", "def row_distance(self, row1, row2):\n diffs = [(x - y) ** 2 for x, y in zip(self.data[row1], self.data[row2])\n if (x is not None) and (y is not None)]\n if len(diffs) > 0:\n return sqrt(sum(diffs) / len(diffs))\n else:\n pass", "def distance_from_point(self,point):\n d,node=self.tree.query(point)\n # distance=pool.map(lambda a: np.linalg.norm(a-point),self.nodes.values())\n return d", "def calc_distance(self):\n total_distance = sum([connection.distance for connection in self.get_true_connections()])\n return total_distance", "def EuclideanDistance(inData):\n nObjs = len(inData)\n res = numpy.zeros((nObjs * (nObjs - 1) / 2), numpy.float)\n nSoFar = 0\n for col in range(1, nObjs):\n for row in range(col):\n t = inData[row] - inData[col]\n res[nSoFar] = sum(t * t)\n nSoFar += 1\n return numpy.sqrt(res)", "def tree_distance(gene, disease, parsed):\n edges = []\n gene_mentions = []\n disease_mentions = []\n for token in parsed:\n token_format = '{0}-{1}'.format(token.text, token.i)\n if gene in token.text:\n gene_mentions.append(token_format)\n if disease in token.text:\n disease_mentions.append(token_format)\n for child in token.children:\n edges.append((token_format, '{0}-{1}'.format(child.text, child.i)))\n graph = nx.Graph(edges)\n pairs = [(g, d) for g in gene_mentions for d in disease_mentions]\n min_dists = get_shortest_path(graph, pairs)\n if len(min_dists) == 0:\n min_dists = [-1]\n word_dists = [abs(int(p[0].rsplit('-', 1)[1]) - int(p[1].rsplit('-', 1)[1])) for p in pairs]\n try:\n return (max(min_dists), min(min_dists), sum(min_dists) / len(min_dists),\n min(word_dists), max(word_dists), sum(word_dists) / len(word_dists))\n except:\n print(gene, disease, [t.text for t in parsed])", "def compute_distances(model, prototypes, batch):\n inputs, targets = batch\n\n outputs = model(inputs)\n\n # Calculate euclidean distance in a vectorized way\n diffs = outputs.unsqueeze(1) - prototypes.unsqueeze(0)\n distances = torch.sum(diffs*diffs, -1) * -1 # get negative distances\n\n return distances", "def _compute_distance(self) -> np.ndarray:\n loc = np.expand_dims(self.state[:, :, Boids.Attr.LOC], axis=-1)\n m = np.tile(loc, (1, 1, self.num_boids))\n self.loc_diff = m-m.transpose(0, 2, 1)\n return np.linalg.norm(self.loc_diff, axis=0)", "def calculate_distance_to(self, node):\n x1, y1 = self.position\n x2, y2 = node.position\n\n distance = ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** .5\n\n return distance", "def findEclidDist(self, row, col):\n dist = np.sqrt( (row**2 + col**2 ))\n dist = np.round( dist )\n return dist", "def get_adjacency(dataframe):\n \n # Number of nodes in the graph\n n_nodes = dataframe.shape[0]\n\n # Calculate distances. Due to the high dimensional data (> 1300 dimensions) the cosine distance is chosen\n distances = np.zeros((n_nodes, n_nodes))\n \n for i, a in dataframe.iterrows():\n for j, b in dataframe.iterrows():\n dot_product = np.dot(a,b)\n distances[i,j] = 1 - dot_product/(np.linalg.norm(a,2)*np.linalg.norm(b,2))\n\n # Weights (gaussian) are assigned to each link based on the distance \n kernel_width = distances.std()\n weights = np.exp(-distances**2 / (2*kernel_width**2))\n\n # Set main diagonal to zero (No self-loops)\n np.fill_diagonal(weights,0)\n adjacency = weights.copy()\n return adjacency", "def calcDist(self, elec_node_idx, elec_dist_um_y, offset1, offset2):\n internodal_len = self.params.internodal_len\n node_dist = np.arange(self.params.num_nodes)*internodal_len # dist of node from head\n e1_dist = elec_node_idx*internodal_len-offset1\n e2_dist = e1_dist + offset2\n lateral_dist1 = np.abs(node_dist-e1_dist)\n lateral_dist2 = np.abs(node_dist-e2_dist)\n return np.sqrt((lateral_dist1**2 + elec_dist_um_y**2)), \\\n np.sqrt((lateral_dist2**2 + elec_dist_um_y**2))", "def evaluate_euclidean_cell_utilities(self):\n for row in self.grid:\n for cell in row:\n cell.distance_utility = get_euclidean_distance(cell, self.target)", "def computeDistances(self, docs, centroids):\n\n prod = scipy.sparse.csr_matrix(centroids.transpose() * docs)\n ones = numpy.ones(prod.get_shape())\n diff = scipy.sparse.csr_matrix(ones - prod)\n return diff.multiply(2).sqrt()", "def get_Euclidean_length_to_root(self,from_node) :\n n = from_node.content['p3d']\n p = self._tree.root.content['p3d']\n d = np.sqrt(np.sum((n.xyz-p.xyz)**2))\n return d", "def _calculate_sd(self):\n cost = 0\n for k in range(self.k):\n cost += \\\n distance.cdist(np.array([self.centroids[k]]), np.array([self.previous_centroids[k]]),\n metric=self.metric)[\n 0][0]\n return cost" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the distance from the root node(s) to all nodes in the skeleton. Return those distances as a new column in the skeleton DataFrame. All root nodes will be used, as long as they all have virtual root of 1.
def distances_from_root(df): g = skeleton_df_to_nx(df, directed=False, with_distances=True, virtual_roots=True, root_dist=0.0) d = nx.shortest_path_length(g, -1, weight='distance') d = pd.Series(d, name='distance').rename_axis('rowId') df = df.merge(d, 'left', on='rowId') return df
[ "def calc_segment_distances(df, root_dist=np.inf):\n # Append parent (link) columns to each row by matching\n # each row's 'link' ID with the parent's 'rowId'.\n edges_df = df[['rowId', 'link', *'xyz']].merge(\n df[['rowId', *'xyz']], 'left',\n left_on='link', right_on='rowId', suffixes=['', '_link'])\n\n diff = edges_df[[*'xyz']] - edges_df[['x_link', 'y_link', 'z_link']].values\n distances = np.linalg.norm(diff, axis=1).astype(np.float32)\n distances[np.isnan(distances)] = root_dist\n return distances", "def _skeleton_segments(skeleton_df):\n segment_df = skeleton_df.merge(skeleton_df[['rowId', 'link', *'xyz', 'radius']],\n 'inner',\n left_on='link',\n right_on='rowId',\n suffixes=['', '_parent'])\n\n child_points = segment_df[[*'xyz']].values\n parent_points = segment_df[['x_parent', 'y_parent', 'z_parent']].values\n segment_df['length'] = np.linalg.norm(child_points - parent_points, axis=1)\n return segment_df", "def heal_skeleton(skeleton_df, max_distance=np.inf, root_parent=None):\n if max_distance is True:\n max_distance = np.inf\n\n if not max_distance:\n max_distance = 0.0\n\n if root_parent is None:\n root_parent = -1\n else:\n # Fast path to exit early if we can easily check the number of roots.\n num_roots = (skeleton_df['link'] == root_parent).sum()\n if num_roots == 1:\n # There's only one root and therefore only one component.\n # No healing necessary.\n return skeleton_df\n\n skeleton_df = skeleton_df.sort_values('rowId', ignore_index=True)\n g = skeleton_df_to_nx(skeleton_df, False, False)\n\n # Extract each fragment's rows and construct a KD-Tree\n Fragment = namedtuple('Fragment', ['frag_id', 'df', 'kd'])\n fragments = []\n for frag_id, cc in enumerate(nx.connected_components(g)):\n if len(cc) == len(skeleton_df):\n # There's only one component -- no healing necessary\n return skeleton_df\n df = skeleton_df.query('rowId in @cc')\n kd = cKDTree(df[[*'xyz']].values)\n fragments.append( Fragment(frag_id, df, kd) )\n\n # Sort from big-to-small, so the calculations below use a\n # KD tree for the larger point set in every fragment pair.\n fragments = sorted(fragments, key=lambda frag: -len(frag.df))\n\n # We could use the full graph and connect all\n # fragment pairs at their nearest neighbors,\n # but it's faster to treat each entire fragment as\n # a single node and run MST on that quotient graph,\n # which is tiny.\n frag_graph = nx.Graph()\n for frag_a, frag_b in combinations(fragments, 2):\n coords_b = frag_b.df[[*'xyz']].values\n distances, indexes = frag_a.kd.query(coords_b)\n\n index_b = np.argmin(distances)\n index_a = indexes[index_b]\n\n node_a = frag_a.df['rowId'].iloc[index_a]\n node_b = frag_b.df['rowId'].iloc[index_b]\n dist_ab = distances[index_b]\n\n # Add edge from one fragment to another,\n # but keep track of which fine-grained skeleton\n # nodes were used to calculate distance.\n frag_graph.add_edge( frag_a.frag_id, frag_b.frag_id,\n node_a=node_a, node_b=node_b,\n distance=dist_ab )\n\n # Compute inter-fragment MST edges\n frag_edges = nx.minimum_spanning_edges(frag_graph, weight='distance', data=True)\n\n # For each inter-fragment edge, add the corresponding\n # fine-grained edge between skeleton nodes in the original graph.\n omit_edges = []\n for _u, _v, d in frag_edges:\n g.add_edge(d['node_a'], d['node_b'])\n if d['distance'] > max_distance:\n omit_edges.append((d['node_a'], d['node_b']))\n\n # Traverse in depth-first order to compute edges for final tree\n root = skeleton_df['rowId'].iloc[0]\n\n # Replace 'link' (parent) column using MST edges\n _reorient_skeleton(skeleton_df, root, root_parent, g=g)\n assert (skeleton_df['link'] == root_parent).sum() == 1\n assert skeleton_df['link'].iloc[0] == root_parent\n\n # Delete edges that violated max_distance\n if omit_edges:\n # Make sure this is invariant to edge direction (check both directions).\n omit_edges = omit_edges + [(b, a) for (a, b) in omit_edges]\n omit_df = pd.DataFrame(omit_edges, columns=['rowId', 'link'])\n omit_df['omit_link'] = -1\n\n # Remove links for omitted edges (convert child node to a new root).\n skeleton_df = skeleton_df.merge(omit_df, 'left', on=['rowId', 'link'])\n skeleton_df['link'].update(skeleton_df['omit_link'])\n del skeleton_df['omit_link']\n\n return skeleton_df", "def distances(self) -> OctreeVolume:\n if self._distances is None:\n self._distances = OctreeVolume(\n self.leaf_shape_voxels,\n self.seg_voxel_bounds,\n float,\n self._data_populator_factory(float(\"inf\"), float),\n )\n return self._distances", "def get_distances(self):\n return np.sqrt(np.diff(self.x)**2+np.diff(self.y)**2)", "def skeleton_df_to_nx(df, with_attributes=True, directed=True, with_distances=False, virtual_roots=False, root_dist=np.inf):\n if directed:\n g = nx.DiGraph()\n else:\n g = nx.Graph()\n\n if with_attributes:\n for row in df.itertuples(index=False):\n g.add_node(row.rowId, x=row.x, y=row.y, z=row.z, radius=row.radius)\n else:\n g.add_nodes_from(df['rowId'].sort_values())\n\n if not virtual_roots:\n # Instead of assuming that the root node refers to a special parent (e.g. -1),\n # we determine the root_parents by inspection.\n root_parents = pd.Index(df['link'].unique()).difference(df['rowId'].unique())\n root_parents\n\n if with_distances:\n edges_df = df[['rowId', 'link']].copy()\n edges_df['distance'] = calc_segment_distances(df, root_dist)\n if not virtual_roots:\n edges_df = edges_df.query('link not in @root_parents')\n edges_df = edges_df.sort_values(['rowId', 'link'])\n g.add_weighted_edges_from(edges_df.itertuples(index=False), 'distance')\n else:\n if not virtual_roots:\n edges_df = df.query('link not in @root_parents')\n edges_df = edges_df[['rowId', 'link']]\n edges_df = edges_df.sort_values(['rowId', 'link'])\n g.add_edges_from(edges_df.values)\n\n return g", "def distmatrix(self):\n import numpy as np\n\n self.nodexnormal, self.nodeynormal = sf.FeatureScaling(self.nodex), sf.FeatureScaling(self.nodey)\n \n self.dmatrix = np.empty((self.nodenum, self.nodenum), dtype = float)\n self.dnormalmatrix = np.empty((self.nodenum, self.nodenum), dtype = float)\n \n for i in range(self.nodenum):\n for j in range(i, self.nodenum):\n self.dmatrix[i, j] = sf.dist(self.nodey[i], self.nodex[i], self.nodey[j], self.nodex[j])\n self.dmatrix[j, i] = self.dmatrix[i, j]\n \n self.dnormalmatrix[i, j] = sf.dist(self.nodeynormal[i], self.nodexnormal[i], self.nodeynormal[j], self.nodexnormal[j])\n self.dnormalmatrix[j, i] = self.dnormalmatrix[i, j]", "def __skeleton_nodes(self, data3d_skel, kernel=None):\n\n if kernel is None:\n kernel = np.ones([3, 3, 3])\n\n mocnost = scipy.ndimage.filters.convolve(data3d_skel, kernel) * data3d_skel\n\n nodes = (mocnost > 3).astype(np.int8)\n terminals = ((mocnost == 2) | (mocnost == 1)).astype(np.int8)\n\n data3d_skel[nodes == 1] = 2\n data3d_skel[terminals == 1] = 3\n # maybe swap next two lines\n data3d_skel = self.__skeleton_nodes_aggregation(data3d_skel)\n data3d_skel = self.__remove_terminal_nodes_in_neghborhood_of_the_branching_node(data3d_skel)\n\n return data3d_skel", "def calculate_distances(self):\n\n # Matrices with reports vectors and abstracts vectors\n reports = self.model.doc_vecs.loc[self.data.report_ids]\n abstracts = self.model.doc_vecs.loc[self.data.abstract_ids]\n\n # Calculates the distance between each pairs of the matrices\n distances = cdist(reports, abstracts, self.distance_measure)\n distances = np.nan_to_num(distances, nan=np.inf)\n\n distances = pd.DataFrame(distances, index=self.data.report_ids, columns=self.data.abstract_ids)\n\n return distances", "def get_distance_to_root(token, parser):\r\n dist = 0\r\n while parser.get_head(token) != token:\r\n token = parser.get_head(token)\r\n dist += 1\r\n return dist", "def mean_distances(self):\n num_nodes = self.size()[0]\n\n return sum([self.distance(i, j)\n for j in self.get_nodes()\n for i in self.get_nodes()\n if j > i and self.distance(i, j) != None]) / num_nodes", "def findSigma(self):\n\t\tdistance = []\n\t\tfor index, row in self.prototypes.iterrows():\n\t\t\tmodified_prototype_set = self.prototypes.drop([index]) # Remove current point from data set\n\t\t\tdistance.append(NearestNeighbor.oneNearestNeighbor(row, modified_prototype_set, return_distance=True, class_header=self.class_header))\n\n\t\treturn distance", "def calc_distance(self):\n total_distance = sum([connection.distance for connection in self.get_true_connections()])\n return total_distance", "def calculate_distances(self):\n\n # Initialize container.\n distances = np.zeros((len(self.data.stem_ids), 2))\n\n # For each report-abstract pairs\n for i in tqdm(range(len(self.data.stem_ids))):\n\n # Get report, abstract and random other abstract\n report = self.model.doc_vecs.loc['%s_report' % self.data.stem_ids[i]]\n summary = self.model.doc_vecs.loc['%s_abstract' % self.data.stem_ids[i]]\n other = self.model.doc_vecs.loc[self.data.abstract_ids[random.randint(0, len(self.data.abstract_ids)-1)]]\n\n # self.distance_measure is always cosine. Calculate distance.\n if self.distance_measure == 'cosine':\n distances[i][0] = cosine(report, summary)\n distances[i][1] = cosine(report, other)\n\n # Make pandas dataframe, save and return.\n distances = pd.DataFrame(distances, index=self.data.stem_ids, columns=['own', 'other'])\n distances.to_csv(self.model.path / str('distances_%s_%s.csv' % (self.data.name, self.distance_measure)))\n\n return distances", "def _init_distance_vector(self):\r\n for router in [self.sourceRouter]+list(self.neighbours.keys()):\r\n self.routingTable[router] = {}\r\n self.routingTable[router][router] = {}\r\n self.routingTable[router][router]['distance'] = 0\r\n self.routingTable[router][router]['nextHopRouter'] = router\r\n\r\n for neighbourRouter, routerAddress in self.neighbours.items():\r\n sourceDV = self.routingTable[self.sourceRouter]\r\n neighbourDV = self.routingTable[neighbourRouter]\r\n\r\n sourceDV[neighbourRouter] = {}\r\n sourceDV[neighbourRouter]['distance'] = routerAddress['link_cost']\r\n sourceDV[neighbourRouter]['nextHopRouter'] = neighbourRouter\r\n\r\n neighbourDV[self.sourceRouter] = {}\r\n neighbourDV[self.sourceRouter]['distance'] = routerAddress['link_cost']\r\n neighbourDV[self.sourceRouter]['nextHopRouter'] = self.sourceRouter", "def distances(self):\n sequence_count = self.sequence_count()\n dm = np.zeros((sequence_count, sequence_count))\n identifiers = []\n for i in xrange(sequence_count):\n self_i = self[i]\n identifiers.append(self_i.identifier)\n for j in xrange(i):\n dm[i, j] = dm[j, i] = self_i.distance(self[j])\n return DistanceMatrix(dm, identifiers)", "def upsample_skeleton(skeleton_df, max_segment_length):\n if len(skeleton_df) in (0, 1) or (skeleton_df['link'] == -1).all():\n # Can't upsample a skeleton with no child-parent segments\n return skeleton_df\n\n seg_df = _skeleton_segments(skeleton_df)\n seg_df = seg_df.loc[seg_df['length'] > max_segment_length]\n\n if len(seg_df) == 0:\n return skeleton_df\n\n I0 = seg_df['rowId']\n I1 = seg_df['rowId_parent']\n next_id = 1 + skeleton_df['rowId'].max()\n\n # It's best to minimize the number of times we call np.linspace(),\n # so we interpolate points and radii in conjunction with a single array.\n PR0 = seg_df[[*'xyz', 'radius']].values\n PR1 = seg_df[['x_parent', 'y_parent', 'z_parent', 'radius_parent']].values\n\n D = seg_df['length']\n\n segment_nodes = []\n for i0, i1, pr0, pr1, d in zip(I0, I1, PR0, PR1, D):\n # Number of nodes from child (i0) to parent (i1)\n # excluding the parent (which we won't edit).\n n = int(np.ceil(d / max_segment_length))\n\n # IDs of the original child and new intermediates going towards\n # the original parent, but not the parent itself.\n I = [i0, *range(next_id, next_id + n - 1)] # noqa\n next_id += n - 1\n\n # 'link' (parent id) for the original child and new intermediates\n L = I[1:] + [i1]\n\n # Interpolate points and radii\n PR = np.linspace(pr0, pr1, n, endpoint=False)\n\n assert len(PR) == len(I) == len(L)\n segment_nodes.append((I, *PR.T, L))\n\n segment_cols = [*zip(*segment_nodes)]\n full_cols = [np.concatenate(a) for a in segment_cols]\n new_df = pd.DataFrame(dict(zip(['rowId', *'xyz', 'radius', 'link'], full_cols)))\n\n # Expand the DataFrame to make room for the new rows,\n # then copy them over.\n all_rowIds = np.sort(pd.concat((skeleton_df['rowId'], new_df['rowId'])).unique())\n dtypes = skeleton_df.dtypes\n skeleton_df = skeleton_df.set_index('rowId').reindex(all_rowIds)\n skeleton_df.update(new_df.set_index('rowId'))\n\n # Restore to standard column form.\n return skeleton_df.reset_index().astype(dtypes)", "def attach_synapses_to_skeleton(skeleton_df, synapses_df):\n skeleton_df = skeleton_df.copy(deep=False).reset_index(drop=True)\n synapses_df = synapses_df.copy(deep=False).reset_index(drop=True)\n\n skeleton_df['structure'] = 'neurite'\n synapses_df['structure'] = synapses_df['type']\n synapses_df['radius'] = 0.0\n\n kd = cKDTree(skeleton_df[[*'xyz']].values)\n _, indexes = kd.query(synapses_df[[*'xyz']].values)\n\n synapses_df['link'] = skeleton_df.loc[indexes, 'rowId'].values\n synapses_df['rowId'] = synapses_df.index + skeleton_df['rowId'].max() + 1\n\n relevant_cols = ['rowId', *'xyz', 'radius', 'link', 'structure']\n synapses_df = synapses_df[relevant_cols]\n skeleton_df = skeleton_df[relevant_cols]\n\n combined = pd.concat((skeleton_df, synapses_df), ignore_index=True)\n combined['structure'] = pd.Categorical(combined['structure'])\n return combined", "def distance_from_node(self,node):\n d, node = self.tree.query(self.nodes[node])\n # distance=pool.map(lambda a: np.linalg.norm(a-self.nodes[node]),self.nodes.values())\n return d" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a DataFrame from and SWC file. The 'node_type' column is discarded.
def skeleton_swc_to_df(swc): if hasattr(swc, 'read'): swc = swc.read() else: assert isinstance(swc, str) if swc.endswith('.swc'): with open(swc, 'r') as f: swc = f.read() cols = ['rowId', 'node_type', 'x', 'y', 'z', 'radius', 'link'] lines = swc.split('\n') lines = filter(lambda line: '#' not in line, lines) swc_csv = '\n'.join(lines) # Compact dtypes save RAM when loading lots of skeletons dtypes = { 'rowId': np.int32, 'node_type': np.int8, 'x': np.float32, 'y': np.float32, 'z': np.float32, 'radius': np.float32, 'link': np.int32, } df = pd.read_csv(StringIO(swc_csv), delimiter=' ', engine='c', names=cols, dtype=dtypes, header=None) df = df.drop(columns=['node_type']) return df
[ "def read_feat(file):\n df = pd.read_csv(file, sep=\" \", names=[\"node_id\"] + list(range(0, 1364)))\n return df", "def create_df(file, df_type):\n try:\n date_id = file.split(\"/\")[-1].split(\".\")[0]\n report_timestamp = datetime.strptime(date_id, \"%m-%d-%y\").strftime(\"%Y-%m-%dT%H:%M:%S\")\n\n df = pd.read_csv(file)\n columns = df.columns.tolist()\n\n df[\"reportTimestamp\"] = df.apply(lambda row: report_timestamp, axis=1)\n df[\"dateId\"] = df.apply(lambda row: date_id, axis=1)\n\n if df_type == \"confirmed\":\n df[\"confirmedCases\"] = df.apply(lambda row: row[columns[-1]], axis=1)\n else:\n df[\"deaths\"] = df.apply(lambda row: row[columns[-1]], axis=1)\n\n df.drop(columns[-1], axis=1, inplace=True)\n\n return df\n\n except Exception as exception:\n logger.error(\"Received Exception in create_df function \"\n \"in covid_cases_usa.py - {}\".format(exception))\n raise exception", "def create_df(path_or_buffer, v='2'):\r\n column_names = load_column_names(v=v)\r\n return pd.read_csv(\r\n path_or_buffer, sep=\"\\t\", header=None, usecols=range(len(column_names)),\r\n names=column_names, index_col=0, dtype={'EventCode': 'object'}, encoding='utf-8'\r\n )", "def epw_to_data_frame(file_):\n return pandas.read_csv(\n file_,\n header=8,\n names=field_names,\n index_col=False,\n na_values=missing_values,\n parse_dates={'datetime': [0, 1, 2, 3, 4]},\n date_parser=date_converter\n )", "def _create_nodes_df(filename_dict):\n node_file_keys = ['labels', 'sizes', 'colors']\n series_dict = {k: f if isinstance(f, pd.core.frame.DataFrame) else _prep_node_data(f)\n for k, f in filename_dict.items()\n if f is not None and k in node_file_keys}\n return pd.concat(series_dict.values(), axis=1, keys=series_dict.keys())", "def load_data_impl() -> pd.DataFrame:\n # The source for this file is at https://ssd.jpl.nasa.gov/?sb_elem\n fname: str = '../jpl/orb_elements_asteroid.txt'\n\n # The field names in the JPL file and their column positions\n names: List[str] = ['Num', 'Name', 'Epoch', 'a', 'e', 'i', 'w', 'Node', 'M', 'H', 'G', 'Ref']\n colspec_tbl: Dict[str, Tuple[int, int]] = {\n 'Num': (0,6), \n 'Name': (7, 25), \n 'Epoch': (25, 30), \n 'a': (31, 41), \n 'e': (42, 52), \n 'i': (54, 62), \n 'w': (63, 72),\n 'Node': (73, 82),\n 'M': (83, 94),\n 'H': (95, 100),\n 'G': (101, 105),\n 'Ref': (106, 113),\n }\n \n # Other arguments for Pandas file import\n colspecs: List[Tuple[int, int]] = [colspec_tbl[nm] for nm in names]\n header: int = 0\n skiprows: List[int] = [1]\n dtype: Dict[str, int] = {\n 'Num': int,\n 'Name': str,\n 'Epoch': float,\n 'a': float,\n 'e': float,\n 'i': float,\n 'w': float,\n 'Node': float,\n 'M': float,\n 'H': float,\n 'G': float,\n 'Ref': str,\n }\n\n # Read the DataFrame\n df: pd.DataFrame = pd.read_fwf(fname, colspecs=colspecs, header=header, names=names, skiprows=skiprows, dtype=dtype)\n # Set the asteroid number field to be the index\n df.set_index(keys=['Num'], drop=False, inplace=True)\n return df", "def to_dataframe(fp):\n # Avoid circular import problems.\n # Xport Modules\n from xport.v56 import load\n warnings.warn('Please use ``xport.v56.load`` in the future', DeprecationWarning)\n library = load(fp)\n dataset = next(iter(library.values()))\n return dataset", "def load_file_to_dataframe(self, file_path: str) -> pd.DataFrame:\n file_extension = os.path.splitext(file_path)[-1].lower()\n if file_extension == \".json\":\n return pd.read_json(file_path)\n elif file_extension == \".jsonl\":\n return pd.read_json(file_path, lines=True)\n elif file_extension == \".tsv\":\n return pd.read_table(file_path)\n elif file_extension in {\".csv\", \".data\"}:\n return pd.read_csv(file_path)\n elif file_extension in {\".parquet\", \".pq\", \".pqt\"}:\n return pd.read_parquet(file_path)\n else:\n raise ValueError(f\"Unsupported dataset file type: {file_extension}\")", "def read_spss(\n path: str | Path,\n usecols: Sequence[str] | None = None,\n convert_categoricals: bool = True,\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n) -> DataFrame:\n pyreadstat = import_optional_dependency(\"pyreadstat\")\n check_dtype_backend(dtype_backend)\n\n if usecols is not None:\n if not is_list_like(usecols):\n raise TypeError(\"usecols must be list-like.\")\n usecols = list(usecols) # pyreadstat requires a list\n\n df, _ = pyreadstat.read_sav(\n stringify_path(path), usecols=usecols, apply_value_formats=convert_categoricals\n )\n if dtype_backend is not lib.no_default:\n df = df.convert_dtypes(dtype_backend=dtype_backend)\n return df", "def from_tcx(cls, file_obj):\n activereader = import_optional_dependency('activereader')\n\n reader = activereader.Tcx.from_file(file_obj)\n\n activities = pd.DataFrame.from_records([\n {\n 'sport': act.sport,\n 'device': act.device,\n 'unit_id': act.device_id,\n 'product_id': act.product_id,\n } for act in reader.activities\n ])\n\n if len(activities) > 1:\n raise ValueError('multi-activity files not supported')\n elif len(activities) == 1:\n summary = activities.iloc[0]\n else:\n summary = None\n\n laps = pd.DataFrame.from_records([\n # lap.to_dict()\n {\n f'{TIMESTAMP}_start': lap.start_time,\n f'{TIME}_timer': lap.total_time_s,\n f'{DISTANCE}_total': lap.distance_m,\n f'{SPEED}_max': lap.max_speed_ms,\n f'{SPEED}_avg': lap.avg_speed_ms,\n 'calories': lap.calories,\n f'{HEARTRATE}_avg': lap.hr_avg,\n f'{HEARTRATE}_max': lap.hr_max,\n f'{CADENCE}_avg': lap.cadence_avg,\n f'{CADENCE}_max': lap.cadence_max,\n 'intensity': lap.intensity,\n 'trigger_method': lap.trigger_method,\n }\n for lap in reader.laps\n ])\n\n # Build a DataFrame using only trackpoints (as records).\n records = pd.DataFrame.from_records([\n {\n TIMESTAMP: tp.time,\n LAT: tp.lat,\n LON: tp.lon,\n DISTANCE: tp.distance_m,\n ELEVATION: tp.altitude_m,\n HEARTRATE: tp.hr,\n SPEED: tp.speed_ms,\n CADENCE: tp.cadence_rpm,\n }\n for tp in reader.trackpoints\n ])\n\n # TODO: Rethink how I want to use this lap column.\n # records['lap'] = [\n # i for i, l in enumerate(reader.laps) for t in l.trackpoints\n # ]\n\n # Make the lap column into an additional index level.\n # TODO: Consider if 'time' or 'timestamp' might make a good\n # additional index. Or whether we need these as indexes at all.\n # records.index.name = 'record'\n # records = records.set_index('lap', append=True)\n\n activity = cls(records, laps, summary)\n\n # Convert cadence from RPM to strides per minute.\n activity.cadence._convert_units()\n\n return activity", "def _load_btl_data(btl_file, cols=None):\n btl_data = dataToNDarray(btl_file,float,True,',',0)\n btl_data = pd.DataFrame.from_records(btl_data)\n if cols != None:\n btl_data = btl_data[cols]\n btl_data[\"SSSCC\"] = Path(btl_file).stem.split(\"_\")[0]\n\n return btl_data", "def parse_gff3_to_dataframe( file ):\n\n # These are two helper functions to extract ID and Parent fields:\n def getID( attributes ):\n return parse_attributes( attributes ).get( 'ID', None )\n def getParent( attributes ):\n return parse_attributes( attributes ).get( 'Parent', None )\n\n result = read_gff3_using_pandas( file ) # this is defined below\n\n # Extract ID and Parent columns using the `apply()` dataframe method.\n result['ID'] = result['attributes'].apply( getID )\n result['Parent'] = result['attributes'].apply( getParent )\n\n # reorder columns, because I want ID and Parent first\n result = result[ ['ID', 'Parent', 'seqid', 'source', 'type', 'start', 'end', 'score', 'strand', 'phase', 'attributes'] ]\n\n return result", "def read_swpc_reports(file):\n\n with open(file, \"r\") as f:\n flare_list = []\n for line in f.readlines():\n if \"Date:\" in line:\n date = line[7:17].replace(\" \", \"\")\n elif \"EDITED EVENTS for\" in line:\n date = pd.to_datetime(line[18:29]).strftime(\"%Y%m%d\")\n\n if \"XRA\" in line:\n event_list = {}\n event_list[\"date\"] = date\n event_list[\"event_no\"] = line[0:4]\n event_list[\"start_time\"] = line[11:15]\n event_list[\"max_time\"] = line[18:22]\n event_list[\"end_time\"] = line[28:32]\n event_list[\"goes_sat\"] = line[34:37]\n event_list[\"goes_channel\"] = line[48:52]\n event_list[\"goes_class_ind\"] = line[58]\n event_list[\"goes_class\"] = line[58:62]\n event_list[\"integrated_flux\"] = line[66:73]\n # to adjust for cases when no active region number\n # and when the NOAA ar numbering passed 9000.\n if len(line)>75:\n ar = int(line[76:80]) if (line[76:80]!= \" \" and '\\n' not in line[76:80]) else 0\n if (ar < 4000 and ar!=0):\n ar = ar + 10000\n else:\n ar = 0\n event_list[\"noaa_ar\"] = ar\n flare_list.append(event_list)\n\n return pd.DataFrame(flare_list)", "def read_SWC_tree_from_file(self,file_n,types=range(1,10)) :\n # check soma-representation: 3-point soma or a non-standard representation\n soma_type = self._determine_soma_type(file_n)\n #print \"STree2::read_SWC_tree_from_file found soma_type=%i\" % soma_type\n \n file = open(file_n,'r')\n all_nodes = dict()\n for line in file :\n if not line.startswith('#') :\n split = line.split()\n index = int(split[0].rstrip())\n swc_type = int(split[1].rstrip())\n x = float(split[2].rstrip())\n y = float(split[3].rstrip())\n z = float(split[4].rstrip())\n radius = float(split[5].rstrip())\n parent_index = int(split[6].rstrip())\n\n if swc_type in types:\n tP3D = P3D2(np.array([x,y,z]),radius,swc_type)\n t_node = SNode2(index)\n t_node.content = {'p3d': tP3D}\n all_nodes[index] = (swc_type,t_node,parent_index)\n else:\n print type,index\n\n #print \"len(all_nodes): \", len(all_nodes)\n\n # IF 3-point soma representation\n if soma_type == 1:\n for index,(swc_type,node,parent_index) in all_nodes.items() :\n if index == 1:\n self.root = node\n elif index in (2,3):\n # the 3-point soma representation (http://neuromorpho.org/neuroMorpho/SomaFormat.html)\n self.add_node_with_parent(node,self.root)\n else:\n parent_node = all_nodes[parent_index][1]\n self.add_node_with_parent(node,parent_node)\n # IF multiple cylinder soma representation\n elif soma_type ==2:\n self.root = all_nodes[1][1]\n \n # get all some info\n soma_cylinders = []\n connected_to_root = []\n for index,(swc_type,node,parent_index) in all_nodes.items() :\n if swc_type == 1 and not index == 1:\n soma_cylinders.append((node,parent_index))\n if index > 1 :\n connected_to_root.append(index)\n\n # make soma\n s_node_1, s_node_2 = self._make_soma_from_cylinders(soma_cylinders,all_nodes)\n \n # add soma\n self.root = all_nodes[1][1]\n self.root.content[\"p3d\"].radius = s_node_1.content[\"p3d\"].radius\n self.add_node_with_parent(s_node_1,self.root)\n self.add_node_with_parent(s_node_2,self.root)\n\n # add the other points \n for index,(swc_type,node,parent_index) in all_nodes.items() :\n if swc_type == 1:\n pass\n else:\n parent_node = all_nodes[parent_index][1]\n if parent_node.index in connected_to_root:\n self.add_node_with_parent(node,self.root)\n else:\n self.add_node_with_parent(node,parent_node)\n \n return self", "def load_node_meta(file_path):\n nmeta = pd.read_csv(file_path, sep=\"\\t\")\n nmeta.columns = ['Node', 'Term', 'Definition', 'Vocabulary']\n nmeta.index = nmeta['Node']\n return nmeta", "def wtsv3_to_token_df(file: str)-> pd.DataFrame:\r\n list_par = []\r\n list_line_tsv3 = []\r\n dict_features = {}\r\n dict_features_by_type = {\"span_variables\": {}, \r\n \"relation_variables\": {}}\r\n\r\n list_df = []\r\n with open(file, encoding = \"utf-8\") as tsvfile:\r\n tsvreader = csv.reader(tsvfile, delimiter=\"\\t\")\r\n for line in tsvreader:\r\n list_line_tsv3.append(line)\r\n if len(line)==1:\r\n if line[0][:6] == \"#Text=\":\r\n list_par.append(line[0][6:])\r\n if (len(line)==1) and (line[0][5:20]==\"=webanno.custom\"):\r\n list_per_feature = line[0].split(\"|\")[1:]\r\n layer_name = line[0].split(\"|\")[0][21:]\r\n \r\n if line[0][0:5]==\"#T_SP\": \r\n dict_features_by_type[\"span_variables\"][layer_name] = [(layer_name, x) for x in list_per_feature]\r\n if line[0][0:5]==\"#T_RL\": \r\n #list_per_feature = [list_per_feature[ind-1] + \"_id\" if \"BT_webanno.custom.\" in x else x for ind, x in enumerate(list_per_feature)]\r\n dict_features_by_type[\"relation_variables\"][layer_name] = [(layer_name, x) for x in list_per_feature]\r\n dict_features[layer_name] = list_per_feature\r\n if (len(line) not in [0, 1]):\r\n list_df.append(line[:-1])\r\n list_layers = [\"\".join([x[0].upper() for x in mot.split(\"_\")]) for mot in list(dict_features.keys())]\r\n\r\n \r\n tuples_index_features = [[(layer, x) for x in dict_features[layer]] for layer in dict_features.keys()]\r\n tuples_index_features = list(itertools.chain.from_iterable(tuples_index_features))\r\n \r\n id_features = [(\"id_features\", \"token_par_id\"), (\"id_features\", 'id_characters'), (\"id_features\", \"token\")]\r\n tuples_index_features = id_features + tuples_index_features\r\n multi_index = pd.MultiIndex.from_tuples(tuples_index_features, names=['layer', 'feature'])\r\n \r\n \r\n df= pd.DataFrame.from_records(list_df, columns = multi_index)\r\n df = df.replace(to_replace=r'\\*\\[\\d+\\]', value='_', regex=True)\r\n df = df.replace(to_replace=r'\\*', value='_', regex=True)\r\n df[[x for x in df.columns if x not in [\"token\"]]] = df[[x for x in df.columns if x not in [\"token\"]]].replace(to_replace=r'\\\\', value='', regex=True)\r\n df[(\"id_features\", \"par_id\")] = df[(\"id_features\", \"token_par_id\")].apply(lambda x: int(x.split(\"-\")[0])-1)\r\n df[(\"id_features\", \"token_id\")] = df[(\"id_features\", \"token_par_id\")].apply(lambda x: int(x.split(\"-\")[1])-1)\r\n \r\n df[(\"id_features\", \"start_index\")] = df[(\"id_features\", \"id_characters\")].apply(lambda x: int(x.split(\"-\")[0]))\r\n df[(\"id_features\", \"end_index\")] = df[(\"id_features\", \"id_characters\")].apply(lambda x: int(x.split(\"-\")[1]))\r\n \r\n return df, dict_features_by_type, list_layers, dict_features, list_line_tsv3, list_par", "def create_dataframe(filename):\r\n\r\n df = pd.read_csv(filename)\r\n \r\n # strips whitespace\r\n df = df.rename(columns = lambda x: x.strip())\r\n return df", "def create_dataframe():\n # Import Libraries\n import pandas as pd\n # Function\n df_cols = [\n 'sequence', # STR\n 'on_site_score' # FLOAT\n ]\n df = pd.DataFrame(columns=df_cols)\n \"\"\"\n implement memory optimization by assigning appropriate dtype\n \"\"\"\n return df", "def GSLIB2Dataframe(data_file):\n\n columns = []\n with open(data_file) as f:\n head = [next(f) for _ in range(2)] # read first two lines\n line2 = head[1].split()\n ncol = int(line2[0]) # get the number of columns\n\n for icol in range(ncol): # read over the column names\n head = next(f)\n columns.append(head.split()[0])\n\n data = np.loadtxt(f, skiprows=0)\n df = pd.DataFrame(data)\n df.columns = columns\n return df", "def read_visa(path: str) -> pd.DataFrame:\n return read_aux_table(\n path,\n 'visa'\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an SWC file from a skeleton DataFrame.
def skeleton_df_to_swc(df, export_path=None): df = df.copy() df['node_type'] = 0 df = df[['rowId', 'node_type', 'x', 'y', 'z', 'radius', 'link']] swc = "# " swc += df.to_csv(sep=' ', header=True, index=False) if export_path: with open(export_path, 'w') as f: f.write(swc) return swc
[ "def create_sjr_sframe():\n sjr_sf = tc.SFrame()\n for p in os.listdir(DATASETS_SJR_DIR):\n if not p.endswith(\".csv\"):\n continue\n y = int(re.match(r'.*([1-3][0-9]{3})', p.split(os.path.sep)[-1]).group(1))\n sf = tc.SFrame.read_csv(\"%s/%s\" % (DATASETS_SJR_DIR, p))\n sf['Year'] = y\n sf = sf.rename({\"Total Docs. (%s)\" % y: \"Total Docs.\"})\n extra_cols = [\"Categories\"]\n for c in extra_cols:\n if c not in sf.column_names():\n sf[c] = ''\n sjr_sf = sjr_sf.append(sf)\n\n r_issn = re.compile('(\\\\d{8})')\n sjr_sf['Issn'] = sjr_sf['Issn'].apply(lambda i: r_issn.findall(i))\n sjr_sf = sjr_sf.stack('Issn', new_column_name='ISSN')\n sjr_sf.save(SJR_SFRAME)", "def format_cpc_soi(src_file, target_file):\n\n index_table_start = None\n with open(src_file, 'r') as ifs:\n for i, line in enumerate(ifs):\n if 'STANDARDIZED' in line:\n index_table_start = i + 2\n\n if index_table_start is None:\n raise RuntimeError('Start of standardized data not found')\n\n logging.info('Reading source file: %s', src_file)\n widths = [4, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6]\n input_data = pd.read_fwf(\n src_file, header=0, widths=widths, na_values='-999.9', index_col=0,\n skiprows=index_table_start)\n\n stacked_data = input_data.stack()\n times = np.array([pd.to_datetime('{} {}'.format(i[0], i[1]))\n for i in stacked_data.index])\n values = stacked_data.values\n\n n_samples = times.shape[0]\n logging.info('Number of samples: %d', n_samples)\n\n data = np.ones((n_samples, 4))\n data[:, 0] = np.array([dt.year for dt in times])\n data[:, 1] = np.array([dt.month for dt in times])\n data[:, 3] = values\n\n header = 'year,month,day,value'\n fmt = '%d,%d,%d,%16.8e'\n\n logging.info('Writing formatted data to: %s', target_file)\n np.savetxt(target_file, data, header=header, fmt=fmt)", "def write_cif(struc, filename=None, header=\"\", permission='w', sym_num=None, style='mp'):\n if sym_num is None:\n l_type = struc.group.lattice_type\n symbol = struc.group.symbol\n number = struc.group.number\n G1 = struc.group.Wyckoff_positions[0]\n else: #P1 symmetry\n l_type = 'triclinic'\n symbol = 'P1'\n number = 1\n G1 = Group(1).Wyckoff_positions[0]\n\n if hasattr(struc, 'mol_sites'):\n sites = struc.mol_sites\n molecule = True\n else:\n sites = struc.atom_sites\n molecule = False\n\n change_set = False\n if number in [7, 14, 15]:\n if hasattr(struc, 'diag') and struc.diag:\n symbol = struc.group.alias \n G1.diagonalize_symops()\n change_set = True\n \n lines = logo\n lines += 'data_' + header + '\\n'\n if hasattr(struc, \"energy\"):\n if struc.molecular:\n eng = struc.energy/sum(struc.numMols)\n else:\n eng = struc.energy/sum(struc.numIons)\n lines += '#Energy: {:} eV/cell\\n'.format(eng)\n\n lines += \"\\n_symmetry_space_group_name_H-M '{:s}'\\n\".format(symbol)\n lines += '_symmetry_Int_Tables_number {:>15d}\\n'.format(number)\n lines += '_symmetry_cell_setting {:>15s}\\n'.format(l_type)\n\n a, b, c, alpha, beta, gamma = struc.lattice.get_para(degree=True)\n lines += '_cell_length_a {:12.6f}\\n'.format(a)\n lines += '_cell_length_b {:12.6f}\\n'.format(b)\n lines += '_cell_length_c {:12.6f}\\n'.format(c)\n lines += '_cell_angle_alpha {:12.6f}\\n'.format(alpha)\n lines += '_cell_angle_beta {:12.6f}\\n'.format(beta)\n lines += '_cell_angle_gamma {:12.6f}\\n'.format(gamma)\n\n lines += '\\nloop_\\n'\n lines += ' _symmetry_equiv_pos_site_id\\n'\n lines += ' _symmetry_equiv_pos_as_xyz\\n'\n\n if not change_set:\n #if change_set:\n wps = G1\n else:\n wps = sites[0].wp.ops\n for i, op in enumerate(wps):\n lines += \"{:d} '{:s}'\\n\".format(i+1, op.as_xyz_string())\n\n lines += '\\nloop_\\n'\n lines += ' _atom_site_label\\n'\n lines += ' _atom_site_type_symbol\\n'\n lines += ' _atom_site_symmetry_multiplicity\\n'\n if style == 'icsd':\n lines += ' _atom_site_Wyckoff_symbol\\n'\n lines += ' _atom_site_fract_x\\n'\n lines += ' _atom_site_fract_y\\n'\n lines += ' _atom_site_fract_z\\n'\n lines += ' _atom_site_occupancy\\n'\n\n for site in sites:\n mul = site.wp.multiplicity\n letter = site.wp.letter\n if molecule:\n if sym_num is None:\n coords, species = site._get_coords_and_species(first=True)\n else:\n coords = None\n species = []\n for id in range(sym_num):\n mol = site.get_mol_object(id)\n tmp = mol.cart_coords.dot(site.lattice.inv_matrix)\n if coords is None:\n coords = tmp\n else:\n coords = np.append(coords, tmp, axis=0)\n species.extend([s.value for s in mol.species])\n #coords, species = site._get_coords_and_species(ids=sym_num)\n else:\n coords, species = [site.position], [site.specie]\n for specie, coord in zip(species, coords):\n lines += '{:6s} {:6s} {:3d} '.format(specie, specie, mul)\n if style != 'mp':\n lines += '{:s} '.format(letter)\n lines += '{:12.6f}{:12.6f}{:12.6f} 1\\n'.format(*coord)\n lines +='#END\\n\\n'\n\n if filename is None:\n return lines\n else:\n with open(filename, permission) as f:\n f.write(lines)\n return", "def _write_files(args, body_df, output_df):\n from neuclease.util import skeleton_to_neuroglancer\n body_df = body_df.set_index('hemibrain_body')\n\n if args.skeleton:\n os.makedirs(f\"{args.output_dir}/skeleton\", exist_ok=True)\n if args.mesh:\n os.makedirs(f\"{args.output_dir}/mesh\", exist_ok=True)\n\n for (source, hemi_body), df in output_df.groupby(['source', 'hemibrain_body'], sort=False):\n assert source in ('skeleton', 'mesh')\n object_id = body_df.loc[hemi_body, 'object_id']\n if source == 'skeleton':\n try:\n skeleton_to_neuroglancer(df, 8, f\"{args.output_dir}/skeleton/{object_id}\")\n except Exception as ex:\n logger.error(f\"Failed to write skeleton for hemibrain body {hemi_body}: {ex}\")\n if source == 'mesh':\n mesh = body_df.loc[hemi_body, 'mesh']\n if mesh:\n mesh_to_neuroglancer(object_id, df, mesh, 8, args.output_dir)", "def writeSegyStructure(filename, Data, SH, STH, endian='>'): # modified by A Squelch\n\n #printverbose(\"writeSegyStructure : Trying to write \" + filename, 0)\n\n f = open(filename, 'wb')\n\n # VERBOSE INF\n revision = SH[\"SegyFormatRevisionNumber\"]\n dsf = SH[\"DataSampleFormat\"]\n if (revision == 100):\n revision = 1\n if (revision == 256): # added by A Squelch\n revision = 1\n\n # try: # block added by A Squelch\n # DataDescr = SH_def[\"DataSampleFormat\"][\"descr\"][str(revision)][str(dsf)]\n # except KeyError:\n # print(\"\")\n # print(\" An error has ocurred interpreting a SEGY binary header key\")\n # print(\" Please check the Endian setting for this file: \", SH[\"filename\"])\n # sys.exit()\n\n #printverbose(\"writeSegyStructure : SEG-Y revision = \" + str(revision), 1)\n #printverbose(\"writeSegyStructure : DataSampleFormat=\" + str(dsf) + \"(\" + DataDescr + \")\", 1)\n\n # WRITE SEGY HEADER\n\n for key in SH_def.keys():\n pos = SH_def[key][\"pos\"]\n format = SH_def[key][\"type\"]\n value = SH[key]\n\n # SegyHeader[key],index = putValue(value,f,pos,format,endian);\n putValue(value, f, pos, format, endian)\n\n txt = str(pos) + \" \" + str(format) + \" Reading \" + key + \"=\" + str(value)\n # +\"=\"+str(SegyHeader[key])\n # printverbose(txt,-1)\n\n # SEGY TRACES\n\n ctype = SH_def['DataSampleFormat']['datatype'][revision][dsf]\n bps = SH_def['DataSampleFormat']['bps'][revision][dsf]\n\n sizeT = 240 + SH['ns'] * bps\n\n for itrace in range(SH['ntraces']):\n index = 3600 + itrace * sizeT\n #printverbose('Writing Trace #' + str(itrace + 1) + '/' + str(SH['ntraces']), 10)\n # WRITE SEGY TRACE HEADER\n for key in STH_def.keys():\n pos = index + STH_def[key][\"pos\"]\n format = STH_def[key][\"type\"]\n value = STH[key][itrace]\n txt = str(pos) + \" \" + str(format) + \" Writing \" + key + \"=\" + str(value)\n\n #printverbose(txt, 40)\n putValue(value, f, pos, format, endian)\n\n # Write Data\n cformat = endian + ctype\n for s in range(SH['ns']):\n strVal = struct.pack(cformat, Data[s, itrace])\n f.seek(index + 240 + s * struct.calcsize(cformat))\n f.write(strVal)\n\n f.close\n\n # return segybuffer", "def gen_seg_table(frame_filepath, per_args):\n threshold = per_args['threshold']\n shift_len = per_args['shift_len']\n out_dir = per_args['out_dir']\n\n print(f\"process {frame_filepath}\")\n name = frame_filepath.split(\"/\")[-1].rsplit(\".\", 1)[0]\n\n sequence = np.loadtxt(frame_filepath)\n start = 0\n end = 0\n start_list = [0]\n end_list = []\n state_list = []\n\n for i in range(len(sequence) - 1):\n current_sate = \"non-speech\" if sequence[i] <= threshold else \"speech\"\n next_state = \"non-speech\" if sequence[i + 1] <= threshold else \"speech\"\n if next_state != current_sate:\n end = i * shift_len + shift_len # shift_len for handling joint\n state_list.append(current_sate)\n end_list.append(end)\n\n start = (i + 1) * shift_len\n start_list.append(start)\n\n end_list.append((i + 1) * shift_len + shift_len)\n state_list.append(current_sate)\n\n seg_table = pd.DataFrame({'start': start_list, 'end': end_list, 'vad': state_list})\n\n save_name = name + \".txt\"\n save_path = os.path.join(out_dir, save_name)\n seg_table.to_csv(save_path, sep='\\t', index=False, header=False)", "def AppendSAVerticalCSVs(DataDirectory, fname_prefix):\n\n # get the csv filename\n csv_suffix = '_SAvertical.csv'\n\n MasterDF = pd.DataFrame()\n basin_dict = MapBasinsToKeysFromJunctionList(DataDirectory, fname_prefix)\n\n # loop through and get each basin csv\n for outlet_jn, basin_key in basin_dict.iteritems():\n this_fname = \"basin\"+str(outlet_jn)+csv_suffix\n # append to master DF and change the basin key and the junction\n df = pd.read_csv(DataDirectory+this_fname)\n df = df[df['basin_key'] == 0]\n df['basin_key'] = basin_key\n MasterDF = MasterDF.append(df, ignore_index = True)\n\n # write to a new csv\n MasterDF.to_csv(DataDirectory+fname_prefix+csv_suffix)\n\n return MasterDF", "def create_and_save_visa_cat():\n # visa category dataframe\n visa_category = pd.DataFrame({\n 'id': [1,2,3],\n 'category': ['Business', 'Pleasure', 'Student']\n })\n visa_category = visa_category.set_index('id')\n \n # set the path according to config file\n visa_category_path = os.path.join(output_dir,'visa_category.csv')\n \n if save_on_s3:\n save_df_on_s3(visa_category,visa_category_path)\n else:\n visa_category.to_csv(visa_category_path)", "def create_output_data_file():\n logging.info(cs_ref, 'create Output Data File')\n current_date = '%Y%m%d-%H%M%S'\n head, tail = osp.split(src_file)\n first_data = \"\\nNX-COMPUTATIONS : OUTPUT DATA FILE for \" + src_file\n df = 'data/%s_%s' % (datetime.now().strftime(current_date), tail)\n open(df, 'w').write(first_data)\n return df", "def stretch_skeleton(skeleton):\n total_length = 100\n\n s = pd.Series(index=np.arange(0, total_length))\n i = 0\n num_moves = len(skeleton)\n # Stretch the TS\n for n, v in skeleton.iteritems():\n s.ix[i] = v\n i += int(total_length/num_moves+1)\n\n return s", "def _fetch_hemibrain_skeleton(hemi_body):\n from requests import HTTPError\n from tqdm import tqdm\n from neuclease.dvid import fetch_skeleton\n\n try:\n df = fetch_skeleton(*Hemibrain_v12, 'segmentation_skeletons', hemi_body, 'pandas')\n df['hemibrain_body'] = hemi_body\n df['source'] = 'skeleton'\n return df\n except HTTPError:\n with tqdm.external_write_mode():\n logger.error(f\"Failed to fetch skeleton for body {hemi_body}\")\n return None", "def prepare_bsf_voronoi(redo=True):\n ############################################################################\n # Input parameters\n w1 = context.w1\n w2 = context.w2\n velscale = context.velscale\n sample = \"bsf\"\n targetSN = 250\n dataset = \"MUSE\"\n ############################################################################\n # BSF parameters\n outw1 = 4800\n outw2 = 9100\n dw = 4\n wfit = np.arange(outw1, outw2, dw)\n sigma = 350 # km / s\n outroot = os.path.join(context.data_dir, dataset, \"bsf\")\n if not os.path.exists(outroot):\n os.mkdir(outroot)\n # Preparing the data\n outdir_data = os.path.join(outroot, \"data\")\n if not os.path.exists(outdir_data):\n os.mkdir(outdir_data)\n for field in context.fields:\n data_dir = os.path.join(context.data_dir, dataset, \"combined\", field,\n \"spec1d_FWHM2.95_sn{}\".format(targetSN),\n \"ppxf_vel{}_w{}_{}_{}\".format(int(velscale), w1, w2, sample))\n if not os.path.exists(data_dir):\n continue\n tables = sorted([_ for _ in os.listdir(data_dir) if _.endswith(\n \"bestfit.fits\")])\n for table in tables:\n print(\"Processing file {}\".format(table))\n prepare_spectra(os.path.join(data_dir, table), wfit, outdir_data)\n input(404)\n\n # Setting unique name for particular modeling\n fitname = \"ngc3311_w{}_{}_dw{}_sigma{}_sn{}\".format(outw1, outw2, dw, sigma,\n targetSN)\n outroot = os.path.join(context.home, \"bsf\", fitname)\n if not os.path.exists(outroot):\n os.mkdir(outroot)\n # Setting the directory where the data is going to be saved\n data_dir = os.path.join(outroot, \"data\")\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n prepare_spectra(outw1, outw2, dw, data_dir, redo=redo, sigma=sigma,\n targetSN=targetSN)\n # Setting templates\n templates_dir = os.path.join(outroot, \"templates\")\n if not os.path.exists(templates_dir):\n os.mkdir(templates_dir)\n wave, params, templates = prepare_templates(outw1, outw2, dw,\n templates_dir, redo=redo,\n sample=sample, sigma=sigma)", "def createSaveFrame(self):\n\n Length_for_array = LoopParams['Loops']*(len(self.stageVector_mm))*2*2\n zero_data = np.zeros(shape=(int(Length_for_array), 1))\n self.AllData_Reduced = pd.DataFrame(zero_data, columns=['Diodesignal'])", "def data_frame_creator(self):\n\n rgb_dir = self.dataset_dir + \"/color/\"\n rgb_data = [\n rgb_dir + rgb for rgb in os.listdir(rgb_dir)\n ]\n\n segmentation_dir = self.dataset_dir + \"/mask/\"\n segmentation_data = [\n segmentation_dir + segmentation\n for segmentation in os.listdir(segmentation_dir)\n ]\n\n dataset = {\n 'RGB': rgb_data,\n 'SEGMENTATION': segmentation_data\n }\n\n if self.shuffle:\n return pd.DataFrame(dataset).sample(frac=1, random_state=123)\n\n return pd.DataFrame(dataset)", "def _write_header(self, block, fi):\n if len(block.segments) > 0:\n channel_indexes = channel_indexes_in_segment(block.segments[0])\n else:\n channel_indexes = []\n\n # type of file\n fi.write('NEURALSG')\n\n # sampling rate, in text and integer\n fi.write('30 kS/s\\0')\n for _ in range(8): fi.write('\\0')\n fi.write(struct.pack('<I', 1))\n\n # channel count: one for each analogsignal, and then also for\n # each column in each analogsignalarray\n fi.write(struct.pack('<I', len(channel_indexes)))\n for chidx in channel_indexes:\n fi.write(struct.pack('<I', chidx))", "def synth_header(self):\n\n header = \"n,imbalanced,num_c,internoiselvl,intranoiselvl,density,k,epsilon,sze_idx,nirr,refinement,tcompression,tdecompression,tpostdecompression,kvs_sze,kvs_fsze,l2_sze_G,l2_fsze_G,l1_sze_G,l1_fsze_G,l2_sze_GT,l2_fsze_GT,l1_sze_GT,l1_fsze_GT,l2_usze_G, th_usze_G,l2_ufsze_G, th_ufsze_G\\n\"\n print(f\"[Stats] .csv Filename: {self.filename}\")\n if not os.path.isfile(self.filename):\n with open(self.filename, 'w') as f:\n f.write(header)", "def writeSegy(filename, Data, dt=1000, STHin={}, SHin={}):\n\n #printverbose(\"writeSegy : Trying to write \" + filename, 0)\n\n N = Data.shape\n ns = N[0]\n ntraces = N[1]\n # print(ntraces)\n # print(ns)\n\n if not len(SHin):\n SH = getDefaultSegyHeader(ntraces, ns, dt)\n else:\n SH = SHin\n if not len(STHin):\n STH = getDefaultSegyTraceHeaders(ntraces, ns, dt)\n else: \n STH = STHin \n \n\n writeSegyStructure(filename, Data, SH, STH)", "def create_source(\n source_id,\n source_table,\n source_lines,\n origin_params,\n cube_cor_filename,\n cube_std_filename,\n mask_filename,\n skymask_filename,\n spectra_fits_filename,\n segmaps,\n version,\n source_ts,\n profile_fwhm,\n *,\n author=\"\",\n nb_fwhm=2,\n expmap_filename=None,\n save_to=None,\n):\n logger = logging.getLogger(__name__)\n\n # [0] is to get a Row not a table.\n source_table = source_table.filled()\n source_info = source_table[source_table[\"ID\"] == source_id][0]\n\n # The mask size is used for the cut-out size.\n mask = Image(mask_filename)\n mask_size = mask.shape[0]\n\n data_cube = Cube(origin_params[\"cubename\"], convert_float64=False)\n\n origin = (\n \"ORIGIN\",\n origin_version,\n os.path.basename(origin_params[\"cubename\"]),\n data_cube.primary_header.get(\"CUBE_V\", \"\"),\n )\n source = Source.from_data(\n source_info[\"ID\"], source_info[\"ra\"], source_info[\"dec\"], origin\n )\n\n # Information about the source in the headers\n source.header[\"SRC_V\"] = version, \"Source version\"\n source.header[\"SRC_TS\"] = source_ts, \"Timestamp of the source creation\"\n source.header[\"CAT3_TS\"] = (\n source_table.meta[\"CAT3_TS\"],\n \"Timestamp of the catalog creation\",\n )\n source.add_history(\"Source created with ORIGIN\", author)\n\n source.header[\"OR_X\"] = source_info[\"x\"], \"x position in pixels\"\n source.header[\"OR_Y\"] = source_info[\"y\"], \"y position in pixels\"\n source.header[\"OR_SEG\"] = (\n source_info[\"seg_label\"],\n \"Label in the segmentation map\",\n )\n source.header[\"OR_V\"] = origin_version, \"ORIGIN version\"\n source.header[\"OR_FLUX\"] = source_info[\"flux\"], \"flux maximum in all lines\"\n source.header[\"OR_PMAX\"] = (source_info[\"purity\"], \"maximum purity in all lines\")\n\n if not np.isnan(source_info[\"STD\"]):\n source.header[\"OR_STD\"] = (source_info[\"STD\"], \"STD max value in all lines\")\n\n if not np.isnan(source_info[\"nsigSTD\"]):\n source.header[\"OR_nSTD\"] = (\n source_info[\"nsigSTD\"],\n \"max of STD/std(STD) in all lines\",\n )\n\n if not np.isnan(source_info[\"T_GLR\"]):\n source.header[\"OR_TGLR\"] = (\n source_info[\"T_GLR\"],\n \"T_GLR max value in all lines\",\n )\n if not np.isnan(source_info[\"nsigTGLR\"]):\n source.header[\"OR_nTGLR\"] = (\n source_info[\"nsigTGLR\"],\n \"max of T_GLR/std(T_GLR) in all lines\",\n )\n\n # source_header_keyword: (key_in_origin_param, description)\n parameters_to_add = {\n \"OR_PROF\": (\"profiles\", \"OR input, spectral profiles\"),\n \"OR_FSF\": (\"PSF\", \"OR input, FSF cube\"),\n \"OR_THL%02d\": (\"threshold_list\", \"OR input threshold per area\"),\n \"OR_NA\": (\"nbareas\", \"OR number of areas\"),\n \"preprocessing\": {\"OR_DCT\": (\"dct_order\", \"OR input, DCT order\")},\n \"areas\": {\n \"OR_PFAA\": (\"pfa\", \"OR input, PFA used to create the area map\"),\n \"OR_SIZA\": (\"maxsize\", \"OR input, maximum area size in pixels\"),\n \"OR_MSIZA\": (\"minsize\", \"OR input, minimum area size in pixels\"),\n },\n \"compute_PCA_threshold\": {\"OR_PFAT\": (\"pfa_test\", \"OR input, PFA test\")},\n \"compute_greedy_PCA\": {\n \"OR_FBG\": (\"Noise_population\", \"OR input: fraction of spectra estimated\"),\n \"OR_ITMAX\": (\"itermax\", \"OR input, maximum number of iterations\"),\n },\n \"compute_TGLR\": {\"OR_NG\": (\"size\", \"OR input, connectivity size\")},\n \"detection\": {\n \"OR_DXY\": (\"tol_spat\", \"OR input, spatial tolerance for merging (pix)\"),\n \"OR_DZ\": (\"tol_spec\", \"OR input, spectral tolerance for merging (pix)\"),\n },\n \"compute_spectra\": {\"OR_NXZ\": (\"grid_dxy\", \"OR input, grid Nxy\")},\n }\n\n def add_keyword(keyword, param, description, params):\n if param == \"threshold_list\" and param in params:\n for idx, threshold in enumerate(params[\"threshold_list\"]):\n source.header[keyword % idx] = (float(\"%0.2f\" % threshold), description)\n elif param in params:\n if params[param] is None:\n source.header[keyword] = \"\", description\n else:\n source.header[keyword] = params[param], description\n else:\n logger.debug(\"Parameter %s absent of the parameter list.\", param)\n\n for keyword, val in parameters_to_add.items():\n if isinstance(val, dict) and keyword in origin_params:\n for key, val2 in val.items():\n add_keyword(key, *val2, origin_params[keyword][\"params\"])\n else:\n add_keyword(keyword, *val, origin_params)\n\n source.header[\"COMP_CAT\"] = (\n source_info[\"comp\"],\n \"1/0 (1=Pre-detected in STD, 0=detected in CORREL)\",\n )\n\n if source.COMP_CAT:\n threshold_keyword, purity_keyword = \"threshold_std\", \"purity_std\"\n else:\n threshold_keyword, purity_keyword = \"threshold\", \"purity\"\n source.header[\"OR_TH\"] = (\n float(\"%0.2f\" % origin_params[threshold_keyword]),\n \"OR input, threshold\",\n )\n source.header[\"OR_PURI\"] = (\n float(\"%0.2f\" % origin_params[purity_keyword]),\n \"OR input, purity\",\n )\n\n # Mini-cubes\n source.add_cube(\n data_cube, \"MUSE_CUBE\", size=mask_size, unit_size=None, add_white=True\n )\n # Add FSF with the full cube, to have the same shape as fieldmap, then we\n # can work directly with the subcube\n has_fsf = True\n try:\n source.add_FSF(data_cube, fieldmap=origin_params[\"fieldmap\"])\n except:\n logger.debug('No FSF information found in the cube')\n has_fsf = False\n data_cube = source.cubes[\"MUSE_CUBE\"]\n\n if source.COMP_CAT:\n cube_ori = Cube(cube_std_filename, convert_float64=False)\n source.add_cube(cube_ori, \"ORI_SNCUBE\", size=mask_size, unit_size=None)\n cube_ori = source.cubes[\"ORI_SNCUBE\"]\n else:\n cube_ori = Cube(cube_cor_filename, convert_float64=False)\n source.add_cube(cube_ori, \"ORI_CORREL\", size=mask_size, unit_size=None)\n cube_ori = source.cubes[\"ORI_CORREL\"]\n\n # Table of sources around the exported sources.\n radius = mask_size / 2\n x_min, x_max = source_info[\"x\"] - radius, source_info[\"x\"] + radius\n y_min, y_max = source_info[\"y\"] - radius, source_info[\"y\"] + radius\n nearby_sources = (\n (source_table[\"x\"] >= x_min)\n & (source_table[\"x\"] <= x_max)\n & (source_table[\"y\"] >= y_min)\n & (source_table[\"y\"] <= y_max)\n )\n source.tables[\"ORI_CAT\"] = source_table[\"ID\", \"ra\", \"dec\"][nearby_sources]\n\n # Maps\n # The white map was added when adding the MUSE cube.\n source.images[\"ORI_MAXMAP\"] = cube_ori.max(axis=0)\n # Using add_image, the image size is taken from the white map.\n source.add_image(mask, \"ORI_MASK_OBJ\")\n source.add_image(Image(skymask_filename), \"ORI_MASK_SKY\")\n for segmap_type, segmap_filename in segmaps.items():\n source.add_image(Image(segmap_filename), \"ORI_SEGMAP_%s\" % segmap_type)\n if expmap_filename is not None:\n source.add_image(Image(expmap_filename), \"EXPMAP\")\n\n # Full source spectra\n source.extract_spectra(\n data_cube, obj_mask=\"ORI_MASK_OBJ\", sky_mask=\"ORI_MASK_SKY\", skysub=True\n )\n source.extract_spectra(\n data_cube, obj_mask=\"ORI_MASK_OBJ\", sky_mask=\"ORI_MASK_SKY\", skysub=False\n )\n if source.COMP_CAT:\n source.spectra[\"ORI_CORR\"] = (\n source.cubes[\"ORI_SNCUBE\"] * source.images[\"ORI_MASK_OBJ\"]\n ).mean(axis=(1, 2))\n else:\n source.spectra[\"ORI_CORR\"] = (\n source.cubes[\"ORI_CORREL\"] * source.images[\"ORI_MASK_OBJ\"]\n ).mean(axis=(1, 2))\n\n # Add the FSF information to the source and use this information to compute\n # the PSF weighted spectra.\n if has_fsf:\n try:\n fsfmodel = source.get_FSF()\n fwhm_fsf = fsfmodel.get_fwhm(data_cube.wave.coord())\n beta_fsf = fsfmodel.get_beta(data_cube.wave.coord())\n source.extract_spectra(\n data_cube,\n obj_mask=\"ORI_MASK_OBJ\",\n sky_mask=\"ORI_MASK_SKY\",\n skysub=True,\n psf=fwhm_fsf,\n beta=beta_fsf,\n )\n source.extract_spectra(\n data_cube,\n obj_mask=\"ORI_MASK_OBJ\",\n sky_mask=\"ORI_MASK_SKY\",\n skysub=False,\n psf=fwhm_fsf,\n beta=beta_fsf,\n )\n except:\n # WIP to work with the new FSF model\n has_fsf = False\n\n # Per line data: the line table, the spectrum of each line, the narrow band\n # map from the data and from the correlation cube.\n # Content of the line table in the source\n line_columns, line_units, line_fmt = zip(\n *[\n (\"NUM_LINE\", None, None),\n (\"RA_LINE\", u.deg, \".2f\"),\n (\"DEC_LINE\", u.deg, \".2f\"),\n (\"LBDA_OBS\", u.Angstrom, \".2f\"),\n (\"FWHM\", u.Angstrom, \".2f\"),\n (\"FLUX\", u.erg / (u.s * u.cm ** 2), \".1f\"),\n (\"GLR\", None, \".1f\"),\n (\"nGLR\", None, \".1f\"),\n (\"PROF\", None, None),\n (\"PURITY\", None, \".2f\"),\n ]\n )\n\n # If the line is a complementary one, the GLR column is replace by STD\n if source.COMP_CAT:\n line_columns = list(line_columns)\n line_columns[6] = \"STD\"\n line_columns[7] = \"nSTD\"\n\n # We put all the ORIGIN lines in an ORI_LINES tables but keep only the\n # unique lines in the LINES tables.\n source.add_table(source_lines, \"ORI_LINES\", select_in=None, col_dist=None)\n\n # Table containing the information on the narrow band images.\n nb_par_rows = []\n\n hdulist = fits.open(spectra_fits_filename)\n\n for line in source_lines[source_lines[\"merged_in\"] == -9999]:\n num_line, lbda_ori, prof = line[[\"num_line\", \"lbda\", \"profile\"]]\n fwhm_ori = profile_fwhm[prof] * data_cube.wave.get_step(unit=u.Angstrom)\n if source.COMP_CAT:\n glr_std = line[\"STD\"]\n nglr_std = line[\"nsigSTD\"]\n else:\n glr_std = line[\"T_GLR\"]\n nglr_std = line[\"nsigTGLR\"]\n\n source.add_line(\n cols=line_columns,\n values=[\n num_line,\n line[\"ra\"],\n line[\"dec\"],\n lbda_ori,\n fwhm_ori,\n line[\"flux\"],\n glr_std,\n nglr_std,\n prof,\n line[\"purity\"],\n ],\n units=line_units,\n fmt=line_fmt,\n desc=None,\n )\n\n if f\"DATA{num_line}\" in hdulist: # RB add test\n source.spectra[f\"ORI_SPEC_{num_line}\"] = Spectrum(\n hdulist=hdulist,\n ext=(f\"DATA{num_line}\", f\"STAT{num_line}\"),\n convert_float64=False,\n )\n\n source.add_narrow_band_image_lbdaobs(\n data_cube,\n f\"NB_LINE_{num_line}\",\n lbda=lbda_ori,\n width=nb_fwhm * fwhm_ori,\n method=\"sum\",\n subtract_off=True,\n margin=10.0,\n fband=3.0,\n )\n\n nb_par_rows.append(\n [f\"NB_LINE_{num_line}\", lbda_ori, nb_fwhm * fwhm_ori, 10.0, 3.0]\n )\n\n source.add_narrow_band_image_lbdaobs(\n cube_ori,\n f\"ORI_CORR_{num_line}\",\n lbda=lbda_ori,\n width=nb_fwhm * fwhm_ori,\n method=\"max\",\n subtract_off=False,\n )\n\n # Compute the spectra weighted by the correlation map for the\n # current line\n tags = [f\"ORI_CORR_{num_line}\"]\n source.extract_spectra(\n data_cube,\n obj_mask=\"ORI_MASK_OBJ\",\n sky_mask=\"ORI_MASK_SKY\",\n skysub=True,\n tags_to_try=tags,\n )\n source.extract_spectra(\n data_cube,\n obj_mask=\"ORI_MASK_OBJ\",\n sky_mask=\"ORI_MASK_SKY\",\n skysub=False,\n tags_to_try=tags,\n )\n\n # set REFSPEC to the spectrum weighted by the correlation map of the\n # brightest line\n num_max = source.lines[\"NUM_LINE\"][np.argmax(source.lines[\"FLUX\"])]\n source.header[\"REFSPEC\"] = f\"ORI_CORR_{num_max}_SKYSUB\"\n\n hdulist.close()\n\n nb_par = Table(\n names=[\"LINE\", \"LBDA\", \"WIDTH\", \"MARGIN\", \"FBAND\"],\n dtype=[\"U20\", float, float, float, float],\n rows=nb_par_rows,\n )\n source.add_table(nb_par, \"NB_PAR\", select_in=None, col_dist=None)\n\n if save_to is not None:\n source.write(save_to)\n else:\n return source", "def generate_stl(idx=None): \n run_cmd(\"vsp -script scripts/exportstl.vscript\")\n offset_zaxis(15.0)\n\n if idx == None:\n planename = \"planes/plane.png\"\n else:\n planename = \"planes/plane\"+str(idx)+\".png\"\n\n run_cmd(\"openscad scripts/genpng.scad --imgsize=500,500 -o \"+planename)", "def create_datafile(datasource, ticlist, dest_basename):\n def get_gvkeys_from_ticlist(ticlist): #TODO: use actual gvkeys\n \"\"\"\n Returns 'gvkeys' from ticlist.dat as a sorted list.\n\n NOTE: Right now, 'gvkeys' are not the actual gvkeys that you'd see in\n Compustat. Instead, they're unique identifiers constructed by concatenating\n a numeric id for the exchange (1 for Nasdaq, 2 for NYSE) with the ticker\n name.\n \"\"\"\n ticlist_filepath = os.path.join(DATASETS_PATH, ticlist)\n\n if os.path.isfile(ticlist_filepath):\n ticlist_df = pd.read_csv(ticlist_filepath, sep=' ', header=None)\n gvkeys = list()\n for line in ticlist_df.values:\n if line[1] == 'Nasdaq':\n gvkeys.append('1'+line[0])\n elif line[1] == 'NYSE':\n gvkeys.append('2'+line[0])\n else:\n gvkeys.append('9'+line[0]) # TODO: is that best way to handle\n # unrecognized market?\n else:\n gvkeys = list()\n \n return gvkeys\n\n def shave_open_dataset(ticlist, dest):\n \"\"\"\n Shaves wanted data (in terms of tics and features only; the shaving by\n dates is done by BatchGenerator's constructor), stores shaved .dat file\n at dest.\n\n NOTE: shaving by features not implemented yet, will rely on a\n feat_map.txt file.\n \"\"\"\n gvkeys = get_gvkeys_from_ticlist(ticlist)\n open_df = pd.read_csv(OPEN_DF_PATH, sep=' ', dtype={'gvkey': str})\n shaved_df = open_df[open_df.gvkey.isin(gvkeys)]\n shaved_df.to_csv(dest, sep=' ', index=False)\n\n def write_WRDS_data(dest):\n \"\"\"\n Writes .dat file using data from WRDS.\n \"\"\"\n raise NotImplementedError(\"Sorry! WRDS integration not ready.\") # TODO\n\n dest = get_data_path(DATASETS_PATH, dest_basename)\n\n if datasource == \"open_dataset\":\n shave_open_dataset(ticlist, dest)\n elif datasource == \"WRDS\":\n write_WRDS_data(ticlist, dest)\n else:\n raise Exception(\"Unknown datasource.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempt to repair a fragmented skeleton into a single connected component. Rather than a single tree, skeletons from neuprint sometimes consist of multiple fragments, i.e. multiple connected components. That's due to artifacts in the underlying segmentation from which the skeletons were generated. In such skeletons, there will be multiple 'root' nodes (SWC rows where ``link == 1``). This function 'heals' a fragmented skeleton by joining its fragments into a single tree. First, each fragment is joined to every other fragment at their nearest points. The resulting graph has unnecessary edges, which are then removed by extracting the minimum spanning tree. The MST is returned as the healed skeleton.
def heal_skeleton(skeleton_df, max_distance=np.inf, root_parent=None): if max_distance is True: max_distance = np.inf if not max_distance: max_distance = 0.0 if root_parent is None: root_parent = -1 else: # Fast path to exit early if we can easily check the number of roots. num_roots = (skeleton_df['link'] == root_parent).sum() if num_roots == 1: # There's only one root and therefore only one component. # No healing necessary. return skeleton_df skeleton_df = skeleton_df.sort_values('rowId', ignore_index=True) g = skeleton_df_to_nx(skeleton_df, False, False) # Extract each fragment's rows and construct a KD-Tree Fragment = namedtuple('Fragment', ['frag_id', 'df', 'kd']) fragments = [] for frag_id, cc in enumerate(nx.connected_components(g)): if len(cc) == len(skeleton_df): # There's only one component -- no healing necessary return skeleton_df df = skeleton_df.query('rowId in @cc') kd = cKDTree(df[[*'xyz']].values) fragments.append( Fragment(frag_id, df, kd) ) # Sort from big-to-small, so the calculations below use a # KD tree for the larger point set in every fragment pair. fragments = sorted(fragments, key=lambda frag: -len(frag.df)) # We could use the full graph and connect all # fragment pairs at their nearest neighbors, # but it's faster to treat each entire fragment as # a single node and run MST on that quotient graph, # which is tiny. frag_graph = nx.Graph() for frag_a, frag_b in combinations(fragments, 2): coords_b = frag_b.df[[*'xyz']].values distances, indexes = frag_a.kd.query(coords_b) index_b = np.argmin(distances) index_a = indexes[index_b] node_a = frag_a.df['rowId'].iloc[index_a] node_b = frag_b.df['rowId'].iloc[index_b] dist_ab = distances[index_b] # Add edge from one fragment to another, # but keep track of which fine-grained skeleton # nodes were used to calculate distance. frag_graph.add_edge( frag_a.frag_id, frag_b.frag_id, node_a=node_a, node_b=node_b, distance=dist_ab ) # Compute inter-fragment MST edges frag_edges = nx.minimum_spanning_edges(frag_graph, weight='distance', data=True) # For each inter-fragment edge, add the corresponding # fine-grained edge between skeleton nodes in the original graph. omit_edges = [] for _u, _v, d in frag_edges: g.add_edge(d['node_a'], d['node_b']) if d['distance'] > max_distance: omit_edges.append((d['node_a'], d['node_b'])) # Traverse in depth-first order to compute edges for final tree root = skeleton_df['rowId'].iloc[0] # Replace 'link' (parent) column using MST edges _reorient_skeleton(skeleton_df, root, root_parent, g=g) assert (skeleton_df['link'] == root_parent).sum() == 1 assert skeleton_df['link'].iloc[0] == root_parent # Delete edges that violated max_distance if omit_edges: # Make sure this is invariant to edge direction (check both directions). omit_edges = omit_edges + [(b, a) for (a, b) in omit_edges] omit_df = pd.DataFrame(omit_edges, columns=['rowId', 'link']) omit_df['omit_link'] = -1 # Remove links for omitted edges (convert child node to a new root). skeleton_df = skeleton_df.merge(omit_df, 'left', on=['rowId', 'link']) skeleton_df['link'].update(skeleton_df['omit_link']) del skeleton_df['omit_link'] return skeleton_df
[ "def extract_graph_from_skeleton(sk): \n #used/unsused\n sk_used = np.zeros_like(sk)\n sk_unused = np.copy(sk)\n #root node\n root_position = findroot(sk)\n print('root_position',root_position)\n root = Branch(pixels=[root_position],name='root')\n setvalue(sk_used,root_position,1)\n setvalue(sk_unused,root_position,0)\n #extract rood edge\n edgelist,branchlist,endlist = next_pixels(root_position,sk_used,sk_unused)\n #assert len(edgelist)==1,'root has more than 1 branchedge'################!!!!!!!!\n rootedge = BranchEdge(edgelist[:1])\n while True:\n edgelist,branchlist,endlist = next_pixels(edgelist[0],sk_used,sk_unused)\n if edgelist:\n rootedge.add_pixels(edgelist)\n else:\n break\n assert len(branchlist)>=1,'root has no children'\n #first node(perhaps split LM and RM)\n branch1 = Branch(pixels=branchlist)\n root.add_child(branch1,rootedge)\n branch_startpoint_list = [branch1]##BFS\n edge_startpoint_list = []\n while branch_startpoint_list:\n branch1 = branch_startpoint_list.pop(0)\n edgelist,branchlist,endlist = next_pixels(branch1.pixels[0],sk_used,sk_unused)\n edge_startpoint_list = edgelist\n branch_cumulate_list = branchlist\n while branch_cumulate_list:#cumulate all the branch pixels(>3)\n bposition = branch_cumulate_list.pop(0)\n branch1.add_pixel(bposition)\n edgelist,branchlist,endlist = next_pixels(bposition,sk_used,sk_unused)\n edge_startpoint_list += edgelist\n branch_cumulate_list += branchlist\n #for each connected edge start,trace until next node\n for edge in edge_startpoint_list:\n branchedge1 = BranchEdge([edge])\n edgelist,branchlist,endlist = next_pixels(edge,sk_used,sk_unused)\n while edgelist:#trace until next node\n #print('edgelist',edgelist)\n branchedge1.add_pixels(edgelist)\n edgelist,branchlist,endlist = next_pixels(edgelist[0],sk_used,sk_unused)\n if branchlist:#next branch\n branch2 = Branch(pixels=branchlist)\n ##if branchedge too short, do nothing\n branch1.add_child(branch2,branchedge1)\n branch_startpoint_list.append(branch2)\n elif endlist:#end node\n branch2 = Branch(pixels=endlist)\n ##if branchedge too short, threshold based on rank(todo)\n branch1.add_child(branch2,branchedge1)\n else:#end without endlist (pixel value=3)\n branch2 = Branch(pixels=branchedge1.pixels[-1:])\n ##if branchedge too short, threshold based on rank(todo)\n branch1.add_child(branch2,branchedge1)\n #if this branch has only one edge, merge(may throw assert error)\n if len(branch1.edges) == 1:\n branch1.edges[0].endbracnch.rank-=1\n branch1.parent_edge.endbracnch = branch1.edges[0].endbracnch\n branch1.parent_edge.add_pixels_nocontinious(branch1.pixels)\n branch1.parent_edge.add_pixels(branch1.edges[0].pixels)\n branch1.edges[0].endbracnch.parent_edge = branch1.parent_edge\n return root", "def __cut_short_skeleton_terminal_edges(self, cut_ratio=2.0):\n\n def remove_elm(elm_id, elm_neigh, elm_box, sklabel):\n sklabel[sklabel == elm_id] = 0\n del elm_neigh[elm_id]\n del elm_box[elm_id]\n for elm in elm_neigh:\n elm_neigh[elm] = [x for x in elm_neigh[elm] if x != elm]\n return elm_neigh, elm_box, sklabel\n\n len_edg = np.max(self.sklabel)\n len_node = np.min(self.sklabel)\n logger.debug(\"len_edg: \" + str(len_edg) + \" len_node: \" + str(len_node))\n\n # get edges and nodes that are near the edge. (+bounding box)\n logger.debug(\"skeleton_analysis: starting element_neighbors processing\")\n self.elm_neigh = {}\n self.elm_box = {}\n for edg_number in list(range(len_node, 0)) + list(range(1, len_edg + 1)):\n self.elm_neigh[edg_number], self.elm_box[\n edg_number\n ] = self.__element_neighbors(edg_number)\n logger.debug(\"skeleton_analysis: finished element_neighbors processing\")\n # clear unneeded data. IMPORTANT!!\n\n self.__clean_shifted()\n # remove edges+nodes that are not connected to rest of the skeleton\n logger.debug(\n \"skeleton_analysis: Cut - Removing edges that are not\"\n + \" connected to rest of the skeleton (not counting its nodes)\"\n )\n cut_elm_neigh = dict(self.elm_neigh)\n cut_elm_box = dict(self.elm_box)\n for elm in self.elm_neigh:\n elm = int(elm)\n if elm > 0: # if edge\n conn_nodes = [i for i in self.elm_neigh[elm] if i < 0]\n conn_edges = []\n for n in conn_nodes:\n if n in self.elm_neigh:\n nn = self.elm_neigh[n] # get neighbours elements of node\n else:\n logger.debug(f\"Node {str(n)} not found! May be already deleted.\")\n continue\n\n for (e) in (nn): # if there are other edges connected to node add them to conn_edges\n if e > 0 and e not in conn_edges and e != elm:\n conn_edges.append(e)\n\n if (len(conn_edges) == 0): # if no other edges are connected to nodes, remove from skeleton\n logger.debug(f\"removing edge {str(elm)} with its nodes {str(self.elm_neigh[elm])}\")\n for night in self.elm_neigh[elm]:\n remove_elm(night, cut_elm_neigh, cut_elm_box, self.sklabel)\n self.elm_neigh = cut_elm_neigh\n self.elm_box = cut_elm_box\n\n # remove elements that are not connected to the rest of skeleton\n logger.debug(\"skeleton_analysis: Cut - Removing elements that are not connected to rest of the skeleton\")\n cut_elm_neigh = dict(self.elm_neigh)\n cut_elm_box = dict(self.elm_box)\n for elm in self.elm_neigh:\n elm = int(elm)\n if len(self.elm_neigh[elm]) == 0:\n logger.debug(f\"removing element {str(elm)}\")\n remove_elm(elm, cut_elm_neigh, cut_elm_box, self.sklabel)\n self.elm_neigh = cut_elm_neigh\n self.elm_box = cut_elm_box\n\n # get list of terminal nodes\n logger.debug(\"skeleton_analysis: Cut - get list of terminal nodes\")\n terminal_nodes = []\n for elm in self.elm_neigh:\n if elm < 0: # if node\n conn_edges = [i for i in self.elm_neigh[elm] if i > 0]\n if len(conn_edges) == 1: # if only one edge is connected\n terminal_nodes.append(elm)\n\n # init radius analysis\n logger.debug(\"__radius_analysis_init\")\n if self.volume_data is not None:\n skdst = self.__radius_analysis_init()\n\n # removes end terminal edges based on radius/length ratio\n logger.debug(\n \"skeleton_analysis: Cut - Removing bad terminal edges based on\"\n + \" radius/length ratio\"\n )\n cut_elm_neigh = dict(self.elm_neigh)\n cut_elm_box = dict(self.elm_box)\n for tn in terminal_nodes:\n te = [i for i in self.elm_neigh[tn] if i > 0][0] # terminal edge\n radius = float(self.__radius_analysis(te, skdst))\n edgst = self.__connection_analysis(int(te))\n edgst = self.__ordered_points_with_pixel_length(edg_number, edg_stats=edgst)\n edgst.update(self.__edge_length(edg_number, edgst))\n length = edgst[\"lengthEstimation\"]\n\n # logger.debug(str(radius / float(length))+\" \"+str(radius)+\" \"+str(length))\n if (radius / float(length)) > cut_ratio:\n logger.debug(f\"removing edge {str(te)} with its terminal node.\")\n remove_elm(elm, cut_elm_neigh, cut_elm_box, self.sklabel)\n self.elm_neigh = cut_elm_neigh\n self.elm_box = cut_elm_box\n\n self.__check_nodes_to_be_just_curve_from_elm_neig()\n\n # regenerate new nodes and edges from cut skeleton (sklabel)\n logger.debug(\"Regenerate new nodes and edges from cut skeleton\")\n self.sklabel[self.sklabel != 0] = 1\n skelet_nodes = self.__skeleton_nodes(self.sklabel)\n self.sklabel = self.__generate_sklabel(skelet_nodes)", "def _process_mst(min_spanning_tree):\n # Sort edges of the min_spanning_tree by weight\n row_order = np.argsort(min_spanning_tree[\"distance\"])\n min_spanning_tree = min_spanning_tree[row_order]\n # Convert edge list into standard hierarchical clustering format\n return make_single_linkage(min_spanning_tree)", "def reassemble(self, head=True):\n if not head:\n self.path.reverse()\n\n head_neighbors = list(self.graph.neighbors(self.path[0]))\n np.random.shuffle(head_neighbors)\n for q in head_neighbors:\n if q != self.path[1] and q in self.path:\n new_path = self.path.copy()\n q_index = new_path.index(q)\n new_path[:q_index] = reversed(new_path[:q_index])\n return Snake(self.graph, new_path, self.weight_fun)\n\n if not head:\n # reverse back so you can try other moves in same orientation\n self.path.reverse()\n\n return self # wasn't able to reassemble", "def make_minimal_spanning_tree(self, root):\r\n # Reset the network.\r\n self.reset_network()\r\n\r\n # The total cost of the links in the spanning tree.\r\n total_cost = 0\r\n\r\n # Add the root node's links to the link candidate list.\r\n candidate_links = []\r\n for link in root.links:\r\n candidate_links.append(link)\r\n\r\n # Visit the root node.\r\n root.visited = True\r\n\r\n # Process the list until it's empty.\r\n while len(candidate_links) > 0:\r\n # Find the link with the lowest cost.\r\n best_link = candidate_links[0]\r\n best_cost = best_link.cost\r\n for i in range(1, len(candidate_links)):\r\n if candidate_links[i].cost < best_cost:\r\n # Save this improvement.\r\n best_link = candidate_links[i]\r\n best_cost = best_link.cost\r\n\r\n # Remove the link from the list.\r\n candidate_links.remove(best_link)\r\n\r\n # Get the node at the other end of the link.\r\n to_node = best_link.node1\r\n\r\n # See if the link's node is still unmarked.\r\n if not to_node.visited:\r\n # Use the link.\r\n best_link.visited = True\r\n total_cost += best_link.cost\r\n to_node.visited = True\r\n\r\n # Record the node that got us here.\r\n to_node.from_node = best_link.node0\r\n\r\n # Process to_node's links.\r\n for new_link in to_node.links:\r\n # If the node hasn't been visited,\r\n # add the link to the list.\r\n if not new_link.node1.visited:\r\n candidate_links.append(new_link)\r\n\r\n # See if the network is connected.\r\n is_connected = True\r\n for node in self.all_nodes:\r\n if not node.visited:\r\n is_connected = False\r\n break\r\n\r\n return total_cost, is_connected", "def _fetch_hemibrain_skeleton(hemi_body):\n from requests import HTTPError\n from tqdm import tqdm\n from neuclease.dvid import fetch_skeleton\n\n try:\n df = fetch_skeleton(*Hemibrain_v12, 'segmentation_skeletons', hemi_body, 'pandas')\n df['hemibrain_body'] = hemi_body\n df['source'] = 'skeleton'\n return df\n except HTTPError:\n with tqdm.external_write_mode():\n logger.error(f\"Failed to fetch skeleton for body {hemi_body}\")\n return None", "def prune_branches(skeleton):\n for he in skeleton.half_edges.values():\n assert he.face.id is not None, he.id\n assert he.twin.face.id is not None, he.id\n # remove edges that have the same face on both sides\n remove = set()\n for he in skeleton.half_edges.values():\n if he.face is he.twin.face:\n remove.add(he.id)\n for edge_id in remove:\n skeleton.remove_edge(edge_id, remove_nodes=True)", "def _brute_mst(mutual_reachability, min_samples):\n if not issparse(mutual_reachability):\n return mst_from_mutual_reachability(mutual_reachability)\n\n # Check connected component on mutual reachability\n # If more than one component, it means that even if the distance matrix X\n # has one component, there exists with less than `min_samples` neighbors\n if (\n csgraph.connected_components(\n mutual_reachability, directed=False, return_labels=False\n )\n > 1\n ):\n raise ValueError(\n f\"There exists points with fewer than {min_samples} neighbors. Ensure\"\n \" your distance matrix has non-zero values for at least\"\n f\" `min_sample`={min_samples} neighbors for each points (i.e. K-nn\"\n \" graph), or specify a `max_distance` in `metric_params` to use when\"\n \" distances are missing.\"\n )\n\n # Compute the minimum spanning tree for the sparse graph\n sparse_min_spanning_tree = csgraph.minimum_spanning_tree(mutual_reachability)\n rows, cols = sparse_min_spanning_tree.nonzero()\n mst = np.core.records.fromarrays(\n [rows, cols, sparse_min_spanning_tree.data],\n dtype=MST_edge_dtype,\n )\n return mst", "def upsample_skeleton(skeleton_df, max_segment_length):\n if len(skeleton_df) in (0, 1) or (skeleton_df['link'] == -1).all():\n # Can't upsample a skeleton with no child-parent segments\n return skeleton_df\n\n seg_df = _skeleton_segments(skeleton_df)\n seg_df = seg_df.loc[seg_df['length'] > max_segment_length]\n\n if len(seg_df) == 0:\n return skeleton_df\n\n I0 = seg_df['rowId']\n I1 = seg_df['rowId_parent']\n next_id = 1 + skeleton_df['rowId'].max()\n\n # It's best to minimize the number of times we call np.linspace(),\n # so we interpolate points and radii in conjunction with a single array.\n PR0 = seg_df[[*'xyz', 'radius']].values\n PR1 = seg_df[['x_parent', 'y_parent', 'z_parent', 'radius_parent']].values\n\n D = seg_df['length']\n\n segment_nodes = []\n for i0, i1, pr0, pr1, d in zip(I0, I1, PR0, PR1, D):\n # Number of nodes from child (i0) to parent (i1)\n # excluding the parent (which we won't edit).\n n = int(np.ceil(d / max_segment_length))\n\n # IDs of the original child and new intermediates going towards\n # the original parent, but not the parent itself.\n I = [i0, *range(next_id, next_id + n - 1)] # noqa\n next_id += n - 1\n\n # 'link' (parent id) for the original child and new intermediates\n L = I[1:] + [i1]\n\n # Interpolate points and radii\n PR = np.linspace(pr0, pr1, n, endpoint=False)\n\n assert len(PR) == len(I) == len(L)\n segment_nodes.append((I, *PR.T, L))\n\n segment_cols = [*zip(*segment_nodes)]\n full_cols = [np.concatenate(a) for a in segment_cols]\n new_df = pd.DataFrame(dict(zip(['rowId', *'xyz', 'radius', 'link'], full_cols)))\n\n # Expand the DataFrame to make room for the new rows,\n # then copy them over.\n all_rowIds = np.sort(pd.concat((skeleton_df['rowId'], new_df['rowId'])).unique())\n dtypes = skeleton_df.dtypes\n skeleton_df = skeleton_df.set_index('rowId').reindex(all_rowIds)\n skeleton_df.update(new_df.set_index('rowId'))\n\n # Restore to standard column form.\n return skeleton_df.reset_index().astype(dtypes)", "def gen_maze_min_spanning_tree(algorithm='kruskal'):\n G = nx.grid_graph(DIM)\n for (u, v) in G.edges():\n G.edges[u, v]['weight'] = np.random.random() # Change distributions?\n return nx.algorithms.minimum_spanning_tree(G, weight='weight', \\\n algorithm=algorithm)", "def check_skeleton(self):\n\n assert_equal(simplicial_mesh(array([[0]]),array([[0]])).skeleton(0), \\\n set([simplex([0])])) \n assert_equal(simplicial_mesh(array([[0],[1]]),array([[0,1]])).skeleton(0), \\\n set([simplex([0]),simplex([1])])) \n assert_equal(simplicial_mesh(array([[0],[1]]),array([[0,1]])).skeleton(1), \\\n set([simplex([0,1])]))\n assert_equal(simplicial_mesh(array([[0],[1],[2]]),array([[0,1],[1,2]])).skeleton(0),\\\n set([simplex([0]),simplex([1]),simplex([2])]))\n assert_equal(simplicial_mesh(array([[0],[1],[2]]),array([[0,1],[1,2]])).skeleton(1),\\\n set([simplex([0,1]),simplex([1,2])]))\n assert_equal(simplicial_mesh(array([[0,0],[1,0],[0,1]]),array([[0,1,2]])).skeleton(1),\\\n set([simplex([0,1]),simplex([1,2]),simplex([2,0])]))", "def remove_single_nodes(treenodes: pd.DataFrame):\n skids, counts = np.unique(treenodes[\"skeleton_id\"], return_counts=True)\n single_tns = skids[counts == 1]\n to_drop = np.zeros(len(treenodes), bool)\n for skid in single_tns:\n to_drop |= treenodes[\"skeleton_id\"] == skid\n return treenodes.loc[~to_drop].copy()", "def truncate_graph(self) -> None:\n last_atom_idx = self.n_nodes - 1\n\n if self.n_nodes == 1:\n # remove the last atom\n self.node_features[last_atom_idx, :] = 0\n self.n_nodes -= 1\n else:\n # determine how many bonds on the least important atom\n bond_idc = []\n for bond_type in range(self.constants.n_edge_features):\n bond_idc.extend(\n list(\n np.nonzero(self.edge_features[:, last_atom_idx, bond_type])[0]\n )\n )\n\n degree = len(bond_idc)\n\n if degree == 1:\n # delete atom from node features\n self.node_features[last_atom_idx, :] = 0\n self.n_nodes -= 1\n else: # if degree > 1\n # if the last atom is bound to multiple atoms, only delete the\n # least important bond, but leave the atom and remaining bonds\n bond_idc = bond_idc[-1] # mark bond for deletion (below)\n\n # delete bond from row feature tensor (first row, then column)\n self.edge_features[bond_idc, last_atom_idx, :] = 0\n self.edge_features[last_atom_idx, bond_idc, :] = 0", "def _minimum_rooted_branching(D, root):\n rooted = D.copy()\n # root the graph by removing all predecessors to `root`.\n rooted.remove_edges_from([(u, root) for u in D.predecessors(root)])\n # Then compute the branching / arborescence.\n A = nx.minimum_spanning_arborescence(rooted)\n return A", "def skeleton_image(folder, image_file, threshold=50, area_thresh=50, figsize=(10, 10), show=False):\n # Median filtered image.\n fname = '{}/{}'.format(folder, image_file)\n image0 = sio.imread(fname)\n image0 = np.ceil(255* (image0[:, :, 1] / image0[:, :, 1].max())).astype(int)\n image0 = skimage.filters.median(image0)\n filt = 'filt_{}.png'.format(image_file.split('.')[0])\n sio.imsave(folder+'/'+filt, image0)\n\n #threshold the image\n binary0 = binary_image(folder, filt, threshold=threshold, close=True, show=False)\n clean = 'clean_{}'.format(filt)\n\n #label image\n short_image, props = label_image(folder, clean, area_thresh=area_thresh, show=False)\n short = 'short_{}'.format(clean)\n short_image = short_image > 1\n # Skeletonize\n skeleton0 = skeletonize(short_image)\n\n branch_data = csr.summarise(skeleton0)\n branch_data_short = branch_data\n\n #Remove small branches\n mglia = branch_data['skeleton-id'].max()\n nbranches = []\n\n ncount = 0\n for i in range(1, mglia+1):\n bcount = branch_data[branch_data['skeleton-id']==i]['skeleton-id'].count()\n if bcount > 0:\n ids = branch_data.index[branch_data['skeleton-id']==i].tolist()\n nbranches.append(bcount)\n for j in range(0, len(ids)):\n branch_data_short.drop([ids[j]])\n\n ncount = ncount + 1\n if show:\n fig, ax = plt.subplots(figsize=(10, 10))\n draw.overlay_euclidean_skeleton_2d(image0, branch_data_short,\n skeleton_color_source='branch-type', axes=ax)\n plt.savefig('{}/skel_{}'.format(folder, short))\n\n return skeleton0, branch_data_short, nbranches, short_image, props", "def prune_skeleton(skel, skel_ep, prune_length=0.15):\n pruned_skel = np.copy(skel)\n prune_indices = np.transpose(np.nonzero(skel)).tolist()\n \n # Set pruning length\n length_of_trace = len(prune_indices)\n max_branch_length = int(length_of_trace * prune_length) # short branch limit\n \n # Identify all end points of all branches\n branch_ends = np.transpose(np.nonzero(skel_ep)).tolist()\n \n # Check for branch - and if it is delete it\n for x_b, y_b in branch_ends:\n branch_coordinates = [[x_b,y_b]]\n branch_continues = True\n temp_coordinates = prune_indices[:]\n temp_coordinates.pop(temp_coordinates.index([x_b,y_b])) # remove end point\n\n while branch_continues:\n tree = cKDTree(temp_coordinates)\n query_point = [x_b, y_b]\n no_of_neighbours = len(tree.query_ball_point(query_point, r=1.5))\n \n # If branch continues\n if no_of_neighbours == 1:\n _, ind = tree.query([x_b, y_b], distance_upper_bound=1.5)\n x_b, y_b = tree.data[ind].astype(int) # move one pixel\n branch_coordinates.append([x_b,y_b])\n temp_coordinates.pop(temp_coordinates.index([x_b,y_b]))\n\n # If the branch reaches the edge of the main trace\n elif no_of_neighbours > 1:\n branch_coordinates.pop(branch_coordinates.index([x_b,y_b]))\n branch_continues = False\n is_branch = True\n # Weird case that happens sometimes\n elif no_of_neighbours == 0:\n is_branch = True\n branch_continues = False\n\n if len(branch_coordinates) > max_branch_length:\n branch_continues = False\n is_branch = False\n\n if is_branch:\n for x, y in branch_coordinates:\n pruned_skel[x,y] = False\n return pruned_skel", "def shallowest_spanning_tree(self):\r\n min = inf\r\n node = \"unassigned\"\r\n for i in range(self.size): # checks each node\r\n temp = self.nodes.copy()\r\n temp.remove(i)\r\n res = self.aux_sst(i) # calls auxiliary function\r\n if res < min: # returns node with min depth\r\n min = res\r\n node = i\r\n return node, min", "def find_mid_node(self, head: TreeNode) -> TreeNode:\n # pointer used to disconnect left half from the mid node.\n prev_pointer = None\n slow_pointer = head\n fast_pointer = head\n\n # Iterate until fast pointer doesn't reach the end of linked list.\n while fast_pointer and fast_pointer.next:\n prev_pointer = slow_pointer\n slow_pointer = slow_pointer.next\n fast_pointer = fast_pointer.next.next\n\n # handling the case when slow pointer was equal to head.\n if prev_pointer:\n prev_pointer.next = None\n\n return slow_pointer", "def generate_random_ham_cycle(self):\n empty_graph = self.create_empty_graph() # empty graph with weighted vertices\n\n min_span_tree = self.make_min_tree(empty_graph) # min span tree to draw around\n\n self.draw_around_span_tree(min_span_tree)\n\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replace the 'link' column in each row of the skeleton dataframe so that its parent corresponds to a depthfirst traversal from the given root node.
def _reorient_skeleton(skeleton_df, root, root_parent=-1, g=None): g = g or skeleton_df_to_nx(skeleton_df, False, False) assert isinstance(g, nx.Graph) and not isinstance(g, nx.DiGraph), \ "skeleton graph must be undirected" edges = list(nx.dfs_edges(g, source=root)) # If the graph has more than one connected component, # the remaining components have arbitrary roots if len(edges) != len(g.edges): for cc in nx.connected_components(g): if root not in cc: edges += list(nx.dfs_edges(g, source=cc.pop())) edges = pd.DataFrame(edges, columns=['link', 'rowId']) # parent, child edges = edges.set_index('rowId')['link'] # Replace 'link' (parent) column using DFS edges skeleton_df['link'] = skeleton_df['rowId'].map(edges).fillna(root_parent).astype(int)
[ "def upsample_skeleton(skeleton_df, max_segment_length):\n if len(skeleton_df) in (0, 1) or (skeleton_df['link'] == -1).all():\n # Can't upsample a skeleton with no child-parent segments\n return skeleton_df\n\n seg_df = _skeleton_segments(skeleton_df)\n seg_df = seg_df.loc[seg_df['length'] > max_segment_length]\n\n if len(seg_df) == 0:\n return skeleton_df\n\n I0 = seg_df['rowId']\n I1 = seg_df['rowId_parent']\n next_id = 1 + skeleton_df['rowId'].max()\n\n # It's best to minimize the number of times we call np.linspace(),\n # so we interpolate points and radii in conjunction with a single array.\n PR0 = seg_df[[*'xyz', 'radius']].values\n PR1 = seg_df[['x_parent', 'y_parent', 'z_parent', 'radius_parent']].values\n\n D = seg_df['length']\n\n segment_nodes = []\n for i0, i1, pr0, pr1, d in zip(I0, I1, PR0, PR1, D):\n # Number of nodes from child (i0) to parent (i1)\n # excluding the parent (which we won't edit).\n n = int(np.ceil(d / max_segment_length))\n\n # IDs of the original child and new intermediates going towards\n # the original parent, but not the parent itself.\n I = [i0, *range(next_id, next_id + n - 1)] # noqa\n next_id += n - 1\n\n # 'link' (parent id) for the original child and new intermediates\n L = I[1:] + [i1]\n\n # Interpolate points and radii\n PR = np.linspace(pr0, pr1, n, endpoint=False)\n\n assert len(PR) == len(I) == len(L)\n segment_nodes.append((I, *PR.T, L))\n\n segment_cols = [*zip(*segment_nodes)]\n full_cols = [np.concatenate(a) for a in segment_cols]\n new_df = pd.DataFrame(dict(zip(['rowId', *'xyz', 'radius', 'link'], full_cols)))\n\n # Expand the DataFrame to make room for the new rows,\n # then copy them over.\n all_rowIds = np.sort(pd.concat((skeleton_df['rowId'], new_df['rowId'])).unique())\n dtypes = skeleton_df.dtypes\n skeleton_df = skeleton_df.set_index('rowId').reindex(all_rowIds)\n skeleton_df.update(new_df.set_index('rowId'))\n\n # Restore to standard column form.\n return skeleton_df.reset_index().astype(dtypes)", "def heal_skeleton(skeleton_df, max_distance=np.inf, root_parent=None):\n if max_distance is True:\n max_distance = np.inf\n\n if not max_distance:\n max_distance = 0.0\n\n if root_parent is None:\n root_parent = -1\n else:\n # Fast path to exit early if we can easily check the number of roots.\n num_roots = (skeleton_df['link'] == root_parent).sum()\n if num_roots == 1:\n # There's only one root and therefore only one component.\n # No healing necessary.\n return skeleton_df\n\n skeleton_df = skeleton_df.sort_values('rowId', ignore_index=True)\n g = skeleton_df_to_nx(skeleton_df, False, False)\n\n # Extract each fragment's rows and construct a KD-Tree\n Fragment = namedtuple('Fragment', ['frag_id', 'df', 'kd'])\n fragments = []\n for frag_id, cc in enumerate(nx.connected_components(g)):\n if len(cc) == len(skeleton_df):\n # There's only one component -- no healing necessary\n return skeleton_df\n df = skeleton_df.query('rowId in @cc')\n kd = cKDTree(df[[*'xyz']].values)\n fragments.append( Fragment(frag_id, df, kd) )\n\n # Sort from big-to-small, so the calculations below use a\n # KD tree for the larger point set in every fragment pair.\n fragments = sorted(fragments, key=lambda frag: -len(frag.df))\n\n # We could use the full graph and connect all\n # fragment pairs at their nearest neighbors,\n # but it's faster to treat each entire fragment as\n # a single node and run MST on that quotient graph,\n # which is tiny.\n frag_graph = nx.Graph()\n for frag_a, frag_b in combinations(fragments, 2):\n coords_b = frag_b.df[[*'xyz']].values\n distances, indexes = frag_a.kd.query(coords_b)\n\n index_b = np.argmin(distances)\n index_a = indexes[index_b]\n\n node_a = frag_a.df['rowId'].iloc[index_a]\n node_b = frag_b.df['rowId'].iloc[index_b]\n dist_ab = distances[index_b]\n\n # Add edge from one fragment to another,\n # but keep track of which fine-grained skeleton\n # nodes were used to calculate distance.\n frag_graph.add_edge( frag_a.frag_id, frag_b.frag_id,\n node_a=node_a, node_b=node_b,\n distance=dist_ab )\n\n # Compute inter-fragment MST edges\n frag_edges = nx.minimum_spanning_edges(frag_graph, weight='distance', data=True)\n\n # For each inter-fragment edge, add the corresponding\n # fine-grained edge between skeleton nodes in the original graph.\n omit_edges = []\n for _u, _v, d in frag_edges:\n g.add_edge(d['node_a'], d['node_b'])\n if d['distance'] > max_distance:\n omit_edges.append((d['node_a'], d['node_b']))\n\n # Traverse in depth-first order to compute edges for final tree\n root = skeleton_df['rowId'].iloc[0]\n\n # Replace 'link' (parent) column using MST edges\n _reorient_skeleton(skeleton_df, root, root_parent, g=g)\n assert (skeleton_df['link'] == root_parent).sum() == 1\n assert skeleton_df['link'].iloc[0] == root_parent\n\n # Delete edges that violated max_distance\n if omit_edges:\n # Make sure this is invariant to edge direction (check both directions).\n omit_edges = omit_edges + [(b, a) for (a, b) in omit_edges]\n omit_df = pd.DataFrame(omit_edges, columns=['rowId', 'link'])\n omit_df['omit_link'] = -1\n\n # Remove links for omitted edges (convert child node to a new root).\n skeleton_df = skeleton_df.merge(omit_df, 'left', on=['rowId', 'link'])\n skeleton_df['link'].update(skeleton_df['omit_link'])\n del skeleton_df['omit_link']\n\n return skeleton_df", "def create_indirect_links_recursive(df: pd.DataFrame) -> pd.DataFrame:\n\n df_copy = df.copy()\n\n # As long as new lines are added to the Dataframe continue looking for indirect links\n while True:\n old_len = len(df_copy)\n df_copy = create_indirect_links_once(df_copy)\n new_len = len(df_copy)\n if old_len == new_len:\n break\n\n SORT_COLUMNS = [\"input_study\", \"input_dataset\", \"input_version\", \"input_variable\"]\n return df_copy.sort_values(by=SORT_COLUMNS).reset_index(drop=True)", "def populate_siblings_table(conn, family_tree):\n log_info('. adding \"siblings\" links')\n parent_attributes = commize(PARENT_ATTRIBUTES)\n sql_stmt = \"\"\"\n select A.node_id, A.text_value\n from nodes_tab P\n join nodes_tab O\n on O.link_id = P.link_id\n and O.node_id <> P.node_id\n and O.attr_id in ( %s )\n join nodes_tab A\n on A.node_id = O.node_id\n and A.attr_id = %d\n where P.node_id = ?\n and P.attr_id in ( %s );\n \"\"\" % (parent_attributes, NAME_ATTRIBUTE, parent_attributes)\n populate_materialized_view(conn, family_tree, sql_stmt, SIBLING_ATTRIBUTE)", "def rebuildtable(cls):\n cls._closure_model.objects.all().delete()\n cls._closure_model.objects.bulk_create([cls._closure_model(\n parent_id=x['pk'],\n child_id=x['pk'],\n depth=0\n ) for x in cls.objects.values(\"pk\")])\n for node in cls.objects.all():\n node._closure_createlink()", "def skeleton_df_to_nx(df, with_attributes=True, directed=True, with_distances=False, virtual_roots=False, root_dist=np.inf):\n if directed:\n g = nx.DiGraph()\n else:\n g = nx.Graph()\n\n if with_attributes:\n for row in df.itertuples(index=False):\n g.add_node(row.rowId, x=row.x, y=row.y, z=row.z, radius=row.radius)\n else:\n g.add_nodes_from(df['rowId'].sort_values())\n\n if not virtual_roots:\n # Instead of assuming that the root node refers to a special parent (e.g. -1),\n # we determine the root_parents by inspection.\n root_parents = pd.Index(df['link'].unique()).difference(df['rowId'].unique())\n root_parents\n\n if with_distances:\n edges_df = df[['rowId', 'link']].copy()\n edges_df['distance'] = calc_segment_distances(df, root_dist)\n if not virtual_roots:\n edges_df = edges_df.query('link not in @root_parents')\n edges_df = edges_df.sort_values(['rowId', 'link'])\n g.add_weighted_edges_from(edges_df.itertuples(index=False), 'distance')\n else:\n if not virtual_roots:\n edges_df = df.query('link not in @root_parents')\n edges_df = edges_df[['rowId', 'link']]\n edges_df = edges_df.sort_values(['rowId', 'link'])\n g.add_edges_from(edges_df.values)\n\n return g", "def attach_synapses_to_skeleton(skeleton_df, synapses_df):\n skeleton_df = skeleton_df.copy(deep=False).reset_index(drop=True)\n synapses_df = synapses_df.copy(deep=False).reset_index(drop=True)\n\n skeleton_df['structure'] = 'neurite'\n synapses_df['structure'] = synapses_df['type']\n synapses_df['radius'] = 0.0\n\n kd = cKDTree(skeleton_df[[*'xyz']].values)\n _, indexes = kd.query(synapses_df[[*'xyz']].values)\n\n synapses_df['link'] = skeleton_df.loc[indexes, 'rowId'].values\n synapses_df['rowId'] = synapses_df.index + skeleton_df['rowId'].max() + 1\n\n relevant_cols = ['rowId', *'xyz', 'radius', 'link', 'structure']\n synapses_df = synapses_df[relevant_cols]\n skeleton_df = skeleton_df[relevant_cols]\n\n combined = pd.concat((skeleton_df, synapses_df), ignore_index=True)\n combined['structure'] = pd.Categorical(combined['structure'])\n return combined", "def populate_children_table(conn, family_tree):\n log_info('. adding \"children\" links')\n parent_attributes = commize(PARENT_ATTRIBUTES)\n sql_stmt = \"\"\"\n select A.node_id, A.text_value\n from nodes_tab P\n join nodes_tab A\n on A.node_id = P.node_id\n and A.attr_id = %d\n where P.link_id = ?\n and P.attr_id in ( %s );\n \"\"\" % (NAME_ATTRIBUTE, parent_attributes)\n populate_materialized_view(conn, family_tree, sql_stmt, CHILD_ATTRIBUTE)", "def _relink(self, parent, child, is_child_left):\n if is_child_left:\n parent._left = child\n else:\n parent._right = child\n if child is not None:\n child._parent = parent", "def _prep_node_data(node_data):\n data = node_data \\\n if isinstance(node_data, pd.core.frame.DataFrame) \\\n else pd.read_csv(node_data)\n\n (left, right) = data.columns\n return pd.concat([data[left], data[right]], keys=['left', 'right'])", "def _skeleton_segments(skeleton_df):\n segment_df = skeleton_df.merge(skeleton_df[['rowId', 'link', *'xyz', 'radius']],\n 'inner',\n left_on='link',\n right_on='rowId',\n suffixes=['', '_parent'])\n\n child_points = segment_df[[*'xyz']].values\n parent_points = segment_df[['x_parent', 'y_parent', 'z_parent']].values\n segment_df['length'] = np.linalg.norm(child_points - parent_points, axis=1)\n return segment_df", "def _create_links_between_nodes(self, nodes):\n for node in nodes:\n node.left = self._get_left(node.row_id, node.column_id)\n node.right = self._get_right(node.row_id, node.column_id)\n\n # header node does not need up or down links\n if node.value != 'H':\n node.up = self._get_up(node.row_id, node.column_id)\n node.down = self._get_down(node.row_id, node.column_id)\n\n # create reference to column header\n if node.value == 1:\n node.column_header = self._get_column_header(node.column_id)\n node.column_header.size += 1", "def extract_graph_from_skeleton(sk): \n #used/unsused\n sk_used = np.zeros_like(sk)\n sk_unused = np.copy(sk)\n #root node\n root_position = findroot(sk)\n print('root_position',root_position)\n root = Branch(pixels=[root_position],name='root')\n setvalue(sk_used,root_position,1)\n setvalue(sk_unused,root_position,0)\n #extract rood edge\n edgelist,branchlist,endlist = next_pixels(root_position,sk_used,sk_unused)\n #assert len(edgelist)==1,'root has more than 1 branchedge'################!!!!!!!!\n rootedge = BranchEdge(edgelist[:1])\n while True:\n edgelist,branchlist,endlist = next_pixels(edgelist[0],sk_used,sk_unused)\n if edgelist:\n rootedge.add_pixels(edgelist)\n else:\n break\n assert len(branchlist)>=1,'root has no children'\n #first node(perhaps split LM and RM)\n branch1 = Branch(pixels=branchlist)\n root.add_child(branch1,rootedge)\n branch_startpoint_list = [branch1]##BFS\n edge_startpoint_list = []\n while branch_startpoint_list:\n branch1 = branch_startpoint_list.pop(0)\n edgelist,branchlist,endlist = next_pixels(branch1.pixels[0],sk_used,sk_unused)\n edge_startpoint_list = edgelist\n branch_cumulate_list = branchlist\n while branch_cumulate_list:#cumulate all the branch pixels(>3)\n bposition = branch_cumulate_list.pop(0)\n branch1.add_pixel(bposition)\n edgelist,branchlist,endlist = next_pixels(bposition,sk_used,sk_unused)\n edge_startpoint_list += edgelist\n branch_cumulate_list += branchlist\n #for each connected edge start,trace until next node\n for edge in edge_startpoint_list:\n branchedge1 = BranchEdge([edge])\n edgelist,branchlist,endlist = next_pixels(edge,sk_used,sk_unused)\n while edgelist:#trace until next node\n #print('edgelist',edgelist)\n branchedge1.add_pixels(edgelist)\n edgelist,branchlist,endlist = next_pixels(edgelist[0],sk_used,sk_unused)\n if branchlist:#next branch\n branch2 = Branch(pixels=branchlist)\n ##if branchedge too short, do nothing\n branch1.add_child(branch2,branchedge1)\n branch_startpoint_list.append(branch2)\n elif endlist:#end node\n branch2 = Branch(pixels=endlist)\n ##if branchedge too short, threshold based on rank(todo)\n branch1.add_child(branch2,branchedge1)\n else:#end without endlist (pixel value=3)\n branch2 = Branch(pixels=branchedge1.pixels[-1:])\n ##if branchedge too short, threshold based on rank(todo)\n branch1.add_child(branch2,branchedge1)\n #if this branch has only one edge, merge(may throw assert error)\n if len(branch1.edges) == 1:\n branch1.edges[0].endbracnch.rank-=1\n branch1.parent_edge.endbracnch = branch1.edges[0].endbracnch\n branch1.parent_edge.add_pixels_nocontinious(branch1.pixels)\n branch1.parent_edge.add_pixels(branch1.edges[0].pixels)\n branch1.edges[0].endbracnch.parent_edge = branch1.parent_edge\n return root", "def dataArrangeLink(self):\n selectList = self.currentSelectionModel().uniqueBranches()\n children = []\n for node in selectList:\n children.extend(node.childList)\n fieldList = self.model.formats.commonFields(children)\n if not fieldList:\n QtGui.QMessageBox.warning(self.activeWindow, 'TreeLine',\n _('Cannot expand without common fields'))\n return\n linkField, ok = QtGui.QInputDialog.getItem(self.activeWindow,\n _('Link Field'),\n _('Select field with links '\n 'to parents'), fieldList,\n 0, False)\n if not ok:\n return\n QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n undo.BranchUndo(self.model.undoList, selectList)\n for node in selectList:\n node.arrangeByLink(linkField)\n self.updateAll()\n QtGui.QApplication.restoreOverrideCursor()", "def _closure_createlink(self):\n linkparents = self._closure_model.objects.filter(\n child__pk=self._closure_parent_pk\n ).values(\"parent\", \"depth\")\n linkchildren = self._closure_model.objects.filter(\n parent__pk=self.pk\n ).values(\"child\", \"depth\")\n newlinks = [self._closure_model(\n parent_id=p['parent'],\n child_id=c['child'],\n depth=p['depth']+c['depth']+1\n ) for p in linkparents for c in linkchildren]\n self._closure_model.objects.bulk_create(newlinks)", "def children(data, source):\n # get rows with specified utility as the source\n rows = data[data.source == source]\n \n # if end is reached, return nothing\n if rows.shape[0] == 0:\n return \n \n # create starting node\n down_nodes = [source] \n \n #iterate through each row in sorted rows\n for i, row in rows.iterrows():\n r = []\n target = row.source\n next_target = row.target\n \n if data[data.source == next_target].shape[0] == 0:\n r.append([next_target])\n else:\n r.append(children(data, next_target))\n \n down_nodes.extend(r)\n \n return down_nodes", "def distances_from_root(df):\n g = skeleton_df_to_nx(df, directed=False, with_distances=True, virtual_roots=True, root_dist=0.0)\n d = nx.shortest_path_length(g, -1, weight='distance')\n d = pd.Series(d, name='distance').rename_axis('rowId')\n df = df.merge(d, 'left', on='rowId')\n return df", "def replace_with_node(self,node):\n\n self.set_for_parents(node) # connect new to parent on proper locations\n node.parent= self.parent # set node paent correctly\n self.parent = None # disconnect self from the parent\n return node.find_root() # find root again", "def alifestd_assign_root_ancestor_token(\n phylogeny_df: pd.DataFrame,\n root_ancestor_token: str,\n mutate: bool = False,\n) -> pd.DataFrame:\n\n if not mutate:\n phylogeny_df = phylogeny_df.copy()\n\n phylogeny_df[\"ancestor_list\"] = alifestd_convert_root_ancestor_token(\n phylogeny_df[\"ancestor_list\"],\n root_ancestor_token,\n mutate=False, # prevent assign to slice warning\n )\n\n return phylogeny_df" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change the root node of a skeleton. In general, the root node of the skeletons stored in neuprint is not particularly significant, so the directionality of the nodes (parent to child or viceversa) on any given neuron branch is arbitrary. This function allows you to pick a different root node and reorient the tree with respect to that node. Replaces the 'link' column in each row of the skeleton dataframe so that its parent corresponds to a depthfirst traversal from the new root node. You can specify the new root node either by its row, or by a coordinate (the closest node to that coordinate will be selected) or by size (the largest node will be selected). Works inplace. Only the 'link' column is changed. If the given skeleton has more than one connected component (and thus more than one root node), the orientation of the edges in other components will be arbitrary.
def reorient_skeleton(skeleton_df, rowId=None, xyz=None, use_max_radius=False): assert rowId != 0, \ "rowId is never 0 in NeuTu skeletons" assert bool(rowId) + (xyz is not None) + use_max_radius == 1, \ "Select either a rowId to use as the new root, or a coordinate, or use_max_radius=True" if xyz is not None: # Find closest node to the given coordinate distances = np.linalg.norm(skeleton_df[[*'xyz']] - xyz, axis=1) rowId = skeleton_df['rowId'].iloc[np.argmin(distances)] elif use_max_radius: # Find the node with the largest radius idx = skeleton_df['radius'].idxmax() rowId = skeleton_df.loc[idx, 'rowId'] assert rowId is not None, "You must specify a new root node" _reorient_skeleton(skeleton_df, rowId)
[ "def _reorient_skeleton(skeleton_df, root, root_parent=-1, g=None):\n g = g or skeleton_df_to_nx(skeleton_df, False, False)\n assert isinstance(g, nx.Graph) and not isinstance(g, nx.DiGraph), \\\n \"skeleton graph must be undirected\"\n\n edges = list(nx.dfs_edges(g, source=root))\n\n # If the graph has more than one connected component,\n # the remaining components have arbitrary roots\n if len(edges) != len(g.edges):\n for cc in nx.connected_components(g):\n if root not in cc:\n edges += list(nx.dfs_edges(g, source=cc.pop()))\n\n edges = pd.DataFrame(edges, columns=['link', 'rowId']) # parent, child\n edges = edges.set_index('rowId')['link']\n\n # Replace 'link' (parent) column using DFS edges\n skeleton_df['link'] = skeleton_df['rowId'].map(edges).fillna(root_parent).astype(int)", "def upsample_skeleton(skeleton_df, max_segment_length):\n if len(skeleton_df) in (0, 1) or (skeleton_df['link'] == -1).all():\n # Can't upsample a skeleton with no child-parent segments\n return skeleton_df\n\n seg_df = _skeleton_segments(skeleton_df)\n seg_df = seg_df.loc[seg_df['length'] > max_segment_length]\n\n if len(seg_df) == 0:\n return skeleton_df\n\n I0 = seg_df['rowId']\n I1 = seg_df['rowId_parent']\n next_id = 1 + skeleton_df['rowId'].max()\n\n # It's best to minimize the number of times we call np.linspace(),\n # so we interpolate points and radii in conjunction with a single array.\n PR0 = seg_df[[*'xyz', 'radius']].values\n PR1 = seg_df[['x_parent', 'y_parent', 'z_parent', 'radius_parent']].values\n\n D = seg_df['length']\n\n segment_nodes = []\n for i0, i1, pr0, pr1, d in zip(I0, I1, PR0, PR1, D):\n # Number of nodes from child (i0) to parent (i1)\n # excluding the parent (which we won't edit).\n n = int(np.ceil(d / max_segment_length))\n\n # IDs of the original child and new intermediates going towards\n # the original parent, but not the parent itself.\n I = [i0, *range(next_id, next_id + n - 1)] # noqa\n next_id += n - 1\n\n # 'link' (parent id) for the original child and new intermediates\n L = I[1:] + [i1]\n\n # Interpolate points and radii\n PR = np.linspace(pr0, pr1, n, endpoint=False)\n\n assert len(PR) == len(I) == len(L)\n segment_nodes.append((I, *PR.T, L))\n\n segment_cols = [*zip(*segment_nodes)]\n full_cols = [np.concatenate(a) for a in segment_cols]\n new_df = pd.DataFrame(dict(zip(['rowId', *'xyz', 'radius', 'link'], full_cols)))\n\n # Expand the DataFrame to make room for the new rows,\n # then copy them over.\n all_rowIds = np.sort(pd.concat((skeleton_df['rowId'], new_df['rowId'])).unique())\n dtypes = skeleton_df.dtypes\n skeleton_df = skeleton_df.set_index('rowId').reindex(all_rowIds)\n skeleton_df.update(new_df.set_index('rowId'))\n\n # Restore to standard column form.\n return skeleton_df.reset_index().astype(dtypes)", "def set_root(self,node) :\n if not node is None:\n node.parent = None\n self.__root = node", "def heal_skeleton(skeleton_df, max_distance=np.inf, root_parent=None):\n if max_distance is True:\n max_distance = np.inf\n\n if not max_distance:\n max_distance = 0.0\n\n if root_parent is None:\n root_parent = -1\n else:\n # Fast path to exit early if we can easily check the number of roots.\n num_roots = (skeleton_df['link'] == root_parent).sum()\n if num_roots == 1:\n # There's only one root and therefore only one component.\n # No healing necessary.\n return skeleton_df\n\n skeleton_df = skeleton_df.sort_values('rowId', ignore_index=True)\n g = skeleton_df_to_nx(skeleton_df, False, False)\n\n # Extract each fragment's rows and construct a KD-Tree\n Fragment = namedtuple('Fragment', ['frag_id', 'df', 'kd'])\n fragments = []\n for frag_id, cc in enumerate(nx.connected_components(g)):\n if len(cc) == len(skeleton_df):\n # There's only one component -- no healing necessary\n return skeleton_df\n df = skeleton_df.query('rowId in @cc')\n kd = cKDTree(df[[*'xyz']].values)\n fragments.append( Fragment(frag_id, df, kd) )\n\n # Sort from big-to-small, so the calculations below use a\n # KD tree for the larger point set in every fragment pair.\n fragments = sorted(fragments, key=lambda frag: -len(frag.df))\n\n # We could use the full graph and connect all\n # fragment pairs at their nearest neighbors,\n # but it's faster to treat each entire fragment as\n # a single node and run MST on that quotient graph,\n # which is tiny.\n frag_graph = nx.Graph()\n for frag_a, frag_b in combinations(fragments, 2):\n coords_b = frag_b.df[[*'xyz']].values\n distances, indexes = frag_a.kd.query(coords_b)\n\n index_b = np.argmin(distances)\n index_a = indexes[index_b]\n\n node_a = frag_a.df['rowId'].iloc[index_a]\n node_b = frag_b.df['rowId'].iloc[index_b]\n dist_ab = distances[index_b]\n\n # Add edge from one fragment to another,\n # but keep track of which fine-grained skeleton\n # nodes were used to calculate distance.\n frag_graph.add_edge( frag_a.frag_id, frag_b.frag_id,\n node_a=node_a, node_b=node_b,\n distance=dist_ab )\n\n # Compute inter-fragment MST edges\n frag_edges = nx.minimum_spanning_edges(frag_graph, weight='distance', data=True)\n\n # For each inter-fragment edge, add the corresponding\n # fine-grained edge between skeleton nodes in the original graph.\n omit_edges = []\n for _u, _v, d in frag_edges:\n g.add_edge(d['node_a'], d['node_b'])\n if d['distance'] > max_distance:\n omit_edges.append((d['node_a'], d['node_b']))\n\n # Traverse in depth-first order to compute edges for final tree\n root = skeleton_df['rowId'].iloc[0]\n\n # Replace 'link' (parent) column using MST edges\n _reorient_skeleton(skeleton_df, root, root_parent, g=g)\n assert (skeleton_df['link'] == root_parent).sum() == 1\n assert skeleton_df['link'].iloc[0] == root_parent\n\n # Delete edges that violated max_distance\n if omit_edges:\n # Make sure this is invariant to edge direction (check both directions).\n omit_edges = omit_edges + [(b, a) for (a, b) in omit_edges]\n omit_df = pd.DataFrame(omit_edges, columns=['rowId', 'link'])\n omit_df['omit_link'] = -1\n\n # Remove links for omitted edges (convert child node to a new root).\n skeleton_df = skeleton_df.merge(omit_df, 'left', on=['rowId', 'link'])\n skeleton_df['link'].update(skeleton_df['omit_link'])\n del skeleton_df['omit_link']\n\n return skeleton_df", "def update_root(self, new_root_id):", "def _setRoot(self, newRoot: HuffNode) -> None:\n if not isinstance(newRoot, HuffNode):\n raise TypeError('not an instance of HuffNode')\n\n self._root = newRoot", "def setRoot(G, root, size=50, radius=200):\n if len(root) == 1: #if steady state set as root in (0,0)\n # Set position of root\n G.node[root[0]]['x'] = 0\n G.node[root[0]]['y'] = 0\n G.node[root[0]]['angle'] = 0 \n # Set color and size\n # Note: the color here is a number that will be later transformed to rgb using cmap\n if 'color' not in G.node[root[0]]: #if no defined color assign a random one\n G.node[root[0]]['color'] = random()\n if 'size' not in G.node[root[0]]: #if no defined size assign a default\n G.node[root[0]]['size'] = size\n\n else: # if cycle create fan\n # Determine position of root\n pos = createFanPoints(len(root), 0, 0, 0, 360, radius)\n for n in zip(root, pos):\n # Set position of root\n log.info(\"node, x, y, angle={}\".format(n))\n G.node[n[0]]['x'] = n[1][0]\n G.node[n[0]]['y'] = n[1][1]\n G.node[n[0]]['angle'] = n[1][2] \n # Set color and size\n if 'color' not in G.node[n[0]]: #if no defined color assign a random one\n G.node[n[0]]['color'] = random()\n if 'size' not in G.node[n[0]]: #if no defined size assign a default\n G.node[n[0]]['size'] = size\n\n return G", "def setRootNode(self, root: 'SoNode') -> \"void\":\n return _coin.SoProtoInstance_setRootNode(self, root)", "def attach_synapses_to_skeleton(skeleton_df, synapses_df):\n skeleton_df = skeleton_df.copy(deep=False).reset_index(drop=True)\n synapses_df = synapses_df.copy(deep=False).reset_index(drop=True)\n\n skeleton_df['structure'] = 'neurite'\n synapses_df['structure'] = synapses_df['type']\n synapses_df['radius'] = 0.0\n\n kd = cKDTree(skeleton_df[[*'xyz']].values)\n _, indexes = kd.query(synapses_df[[*'xyz']].values)\n\n synapses_df['link'] = skeleton_df.loc[indexes, 'rowId'].values\n synapses_df['rowId'] = synapses_df.index + skeleton_df['rowId'].max() + 1\n\n relevant_cols = ['rowId', *'xyz', 'radius', 'link', 'structure']\n synapses_df = synapses_df[relevant_cols]\n skeleton_df = skeleton_df[relevant_cols]\n\n combined = pd.concat((skeleton_df, synapses_df), ignore_index=True)\n combined['structure'] = pd.Categorical(combined['structure'])\n return combined", "def resetRoot(self):\n self._rootID = self.model.changes.get(self._leafID).getRoot()\n pass", "def set_root(self, root):\n self.clear()\n self._root = root\n if root is not None:\n pen = make_pen(Qt.blue, width=1, cosmetic=True,\n join_style=Qt.MiterJoin)\n for node in postorder(root):\n item = DendrogramWidget.ClusterGraphicsItem(self._itemgroup)\n item.setAcceptHoverEvents(True)\n item.setPen(pen)\n item.node = node\n item.installSceneEventFilter(self)\n for branch in node.branches:\n assert branch in self._items\n self._cluster_parent[branch] = node\n self._items[node] = item\n\n self._relayout()\n self._rescale()\n self.updateGeometry()", "def setSceneGraphRoot(self, root: 'SoNode') -> \"void\":\n return _coin.SoScXMLStateMachine_setSceneGraphRoot(self, root)", "def skeleton_df_to_nx(df, with_attributes=True, directed=True, with_distances=False, virtual_roots=False, root_dist=np.inf):\n if directed:\n g = nx.DiGraph()\n else:\n g = nx.Graph()\n\n if with_attributes:\n for row in df.itertuples(index=False):\n g.add_node(row.rowId, x=row.x, y=row.y, z=row.z, radius=row.radius)\n else:\n g.add_nodes_from(df['rowId'].sort_values())\n\n if not virtual_roots:\n # Instead of assuming that the root node refers to a special parent (e.g. -1),\n # we determine the root_parents by inspection.\n root_parents = pd.Index(df['link'].unique()).difference(df['rowId'].unique())\n root_parents\n\n if with_distances:\n edges_df = df[['rowId', 'link']].copy()\n edges_df['distance'] = calc_segment_distances(df, root_dist)\n if not virtual_roots:\n edges_df = edges_df.query('link not in @root_parents')\n edges_df = edges_df.sort_values(['rowId', 'link'])\n g.add_weighted_edges_from(edges_df.itertuples(index=False), 'distance')\n else:\n if not virtual_roots:\n edges_df = df.query('link not in @root_parents')\n edges_df = edges_df[['rowId', 'link']]\n edges_df = edges_df.sort_values(['rowId', 'link'])\n g.add_edges_from(edges_df.values)\n\n return g", "def extract_graph_from_skeleton(sk): \n #used/unsused\n sk_used = np.zeros_like(sk)\n sk_unused = np.copy(sk)\n #root node\n root_position = findroot(sk)\n print('root_position',root_position)\n root = Branch(pixels=[root_position],name='root')\n setvalue(sk_used,root_position,1)\n setvalue(sk_unused,root_position,0)\n #extract rood edge\n edgelist,branchlist,endlist = next_pixels(root_position,sk_used,sk_unused)\n #assert len(edgelist)==1,'root has more than 1 branchedge'################!!!!!!!!\n rootedge = BranchEdge(edgelist[:1])\n while True:\n edgelist,branchlist,endlist = next_pixels(edgelist[0],sk_used,sk_unused)\n if edgelist:\n rootedge.add_pixels(edgelist)\n else:\n break\n assert len(branchlist)>=1,'root has no children'\n #first node(perhaps split LM and RM)\n branch1 = Branch(pixels=branchlist)\n root.add_child(branch1,rootedge)\n branch_startpoint_list = [branch1]##BFS\n edge_startpoint_list = []\n while branch_startpoint_list:\n branch1 = branch_startpoint_list.pop(0)\n edgelist,branchlist,endlist = next_pixels(branch1.pixels[0],sk_used,sk_unused)\n edge_startpoint_list = edgelist\n branch_cumulate_list = branchlist\n while branch_cumulate_list:#cumulate all the branch pixels(>3)\n bposition = branch_cumulate_list.pop(0)\n branch1.add_pixel(bposition)\n edgelist,branchlist,endlist = next_pixels(bposition,sk_used,sk_unused)\n edge_startpoint_list += edgelist\n branch_cumulate_list += branchlist\n #for each connected edge start,trace until next node\n for edge in edge_startpoint_list:\n branchedge1 = BranchEdge([edge])\n edgelist,branchlist,endlist = next_pixels(edge,sk_used,sk_unused)\n while edgelist:#trace until next node\n #print('edgelist',edgelist)\n branchedge1.add_pixels(edgelist)\n edgelist,branchlist,endlist = next_pixels(edgelist[0],sk_used,sk_unused)\n if branchlist:#next branch\n branch2 = Branch(pixels=branchlist)\n ##if branchedge too short, do nothing\n branch1.add_child(branch2,branchedge1)\n branch_startpoint_list.append(branch2)\n elif endlist:#end node\n branch2 = Branch(pixels=endlist)\n ##if branchedge too short, threshold based on rank(todo)\n branch1.add_child(branch2,branchedge1)\n else:#end without endlist (pixel value=3)\n branch2 = Branch(pixels=branchedge1.pixels[-1:])\n ##if branchedge too short, threshold based on rank(todo)\n branch1.add_child(branch2,branchedge1)\n #if this branch has only one edge, merge(may throw assert error)\n if len(branch1.edges) == 1:\n branch1.edges[0].endbracnch.rank-=1\n branch1.parent_edge.endbracnch = branch1.edges[0].endbracnch\n branch1.parent_edge.add_pixels_nocontinious(branch1.pixels)\n branch1.parent_edge.add_pixels(branch1.edges[0].pixels)\n branch1.edges[0].endbracnch.parent_edge = branch1.parent_edge\n return root", "def setHead(self, node: 'SoNode') -> \"void\":\n return _coin.SoLightPath_setHead(self, node)", "def stretch_skeleton(skeleton):\n total_length = 100\n\n s = pd.Series(index=np.arange(0, total_length))\n i = 0\n num_moves = len(skeleton)\n # Stretch the TS\n for n, v in skeleton.iteritems():\n s.ix[i] = v\n i += int(total_length/num_moves+1)\n\n return s", "def replace_with_node(self,node):\n\n self.set_for_parents(node) # connect new to parent on proper locations\n node.parent= self.parent # set node paent correctly\n self.parent = None # disconnect self from the parent\n return node.find_root() # find root again", "def set_root_as_prefix(self) -> None:\n self.root_node.prefix_flag = True\n self._prefix_nodes[0] += 1", "def set_root(self, root):\n if root is None:\n return\n for plot in self.traverse(lambda x: x):\n plot._root = root", "def transform_one_root():\n global tot_block_len, GRAPH, NODE_OPS, tot_block_len, OP_PARENTS_NUM, \\\n priorities, predecessor_count, OP_CHILDREN_NUM, successor_count\n roots = find_roots()\n if len(roots) == 1:\n return roots.pop()\n if len(roots) < 1:\n print(\"ERROR: graph doesn't have any roots\")\n return None\n\n root_op = IRArray(9, None, None, None)\n setattr(root_op, \"line_num\", tot_block_len + 1)\n # print(\"length: %d. blcok len %d\" % (len(NODE_OPS), tot_block_len))\n NODE_OPS[tot_block_len + 1] = root_op\n\n new_root = tot_block_len + 1\n # TODO: map the new root to its operation (a NOP) in here\n for root in roots:\n REV_GRAPH[root][new_root] = False\n GRAPH[new_root][root] = False\n # OP_PARENTS_NUM[root] += 1 # PROBABLY DON'T NEED TO DO THIS\n # # increment its parents count\n # OP_CHILDREN_NUM[new_root] += 1\n # # increment its children count\n\n priorities.append(0)\n OP_CHILDREN_NUM.append(0)\n OP_PARENTS_NUM.append(0)\n predecessor_count.append(0)\n successor_count.append(0)\n\n return new_root" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the table of childtoparent points and segment lengths.
def _skeleton_segments(skeleton_df): segment_df = skeleton_df.merge(skeleton_df[['rowId', 'link', *'xyz', 'radius']], 'inner', left_on='link', right_on='rowId', suffixes=['', '_parent']) child_points = segment_df[[*'xyz']].values parent_points = segment_df[['x_parent', 'y_parent', 'z_parent']].values segment_df['length'] = np.linalg.norm(child_points - parent_points, axis=1) return segment_df
[ "def n_segments(tree):\n return sum(1 for _ in tr.isegment(tree))", "def get_distances_section(self):\n table = {}\n\n row_length = (self.height // MyCommon.Constants.NUM_SECTIONS) + 1 ## + 1 TO COUNT LAST ITEM FOR RANGE\n col_length = (self.width // MyCommon.Constants.NUM_SECTIONS) + 1\n\n for r in range(row_length):\n for c in range(col_length):\n curr_section = (r,c)\n table[curr_section] = self.calculate_distance_sections(curr_section, row_length, col_length)\n\n return table", "def _ragged_size(table: ArrayLike) -> list:\n\n R, G, B = tsplit(table)\n\n R_len = R.shape[-1] - np.sum(np.isnan(R))\n G_len = G.shape[-1] - np.sum(np.isnan(G))\n B_len = B.shape[-1] - np.sum(np.isnan(B))\n\n return [R_len, G_len, B_len]", "def test_05_01_relate_zero_parents_and_children(self):\n x = cellprofiler_core.object.Objects()\n x.segmented = numpy.zeros((10, 10), int)\n y = cellprofiler_core.object.Objects()\n y.segmented = numpy.zeros((10, 10), int)\n children_per_parent, parents_of_children = x.relate_children(y)\n assert numpy.product(children_per_parent.shape) == 0\n assert numpy.product(parents_of_children.shape) == 0", "def draw_pine_tree():\r\n height = random.randint(50, 200)\r\n draw_trunk(height)\r\n branch_height = draw_triangle(height)\r\n return height, branch_height", "def side_lengths(self):\n side1 = abs(self.vertices[1].x - self.vertices[0].x)\n side2 = abs(self.vertices[0].y - self.vertices[3].y)\n side3 = abs(self.vertices[3].x - self.vertices[2].x)\n side4 = abs(self.vertices[2].y - self.vertices[1].y)\n return side1, side2, side3, side4 # was TODO", "def get_segment_pathlength(self,to_node) :\n # upated 2014-01-21 for compatibility with new btstructs2\n L = 0\n if self._tree.is_leaf(to_node) :\n path = self._tree.path_to_root(to_node)\n L = 0\n else :\n path = self._tree.path_to_root(to_node)[1:]\n p = to_node.parent.content['p3d']\n n = to_node.content['p3d']\n d = np.sqrt(np.sum((n.xyz-p.xyz)**2))\n L = L + d\n \n for node in path :\n # print 'going along the path'\n n = node.content['p3d']\n if len(node.children) >= 2 : # I arrive at either the soma or a branchpoint close to the soma\n return L\n else :\n p = node.parent.content['p3d']\n d = np.sqrt(np.sum((n.xyz-p.xyz)**2))\n L = L + d", "def contingency_table(left_vol, right_vol):\n assert left_vol.shape == right_vol.shape\n df = pd.DataFrame( {\"left\": left_vol.reshape(-1),\n \"right\": right_vol.reshape(-1)},\n dtype=left_vol.dtype )\n sizes = df.groupby(['left', 'right']).size()\n sizes.name = 'voxel_count'\n return sizes", "def verticalPoints(self, row, col, rack, pid):\n \n if(pid == 1): # determine players\n enemypid = 2\n else:\n enemypid = 1\n \n \n consecutive = 0\n \n if ((row-3) < 0): # if there's not enough space\n return 0 # it's worth nothing \n \n for i in range(row, (row-4), -1):\n if rack[col][i] == pid:\n consecutive += 1 # calculate the number of consecutive pieces\n elif(rack[col][i] == enemypid): # if there's an enemy piece there, it's useless so return 0\n return 0\n \n if consecutive == 0: # return a point value based on the number of consecutive pieces\n return 0\n elif consecutive == 1:\n return 1\n elif consecutive == 2:\n return 10\n elif consecutive == 3:\n return 100\n elif consecutive == 4:\n return 100000", "def edge_lengths(self):\n points = list(self.base_piece.polygon.points())\n NUM = 4\n assert len(points) == NUM\n return [(points[i] - points[(i+1) % NUM]).norm() for i in range(NUM)]", "def _makebaselines(self):\n nholes = self.ctrs_eqt.shape[0]\n blist = []\n for i in range(nholes):\n for j in range(nholes):\n if i < j:\n blist.append((i, j))\n barray = np.array(blist).astype(int)\n # blname = []\n bllist = []\n for basepair in blist:\n # blname.append(\"{0:d}_{1:d}\".format(basepair[0],basepair[1]))\n baseline = self.ctrs_eqt[basepair[0]] - self.ctrs_eqt[basepair[1]]\n bllist.append(baseline)\n return barray, np.array(bllist)", "def length(self):\n act_loc = self.thin_face.parent_thin.parent_lattice.z_line\n myo_loc = self.thick_face.get_axial_location(-1)\n ls = self.parent_lattice.lattice_spacing\n length = np.sqrt( (act_loc-myo_loc)**2 + ls**2 )\n return length", "def calc_segment_distances(df, root_dist=np.inf):\n # Append parent (link) columns to each row by matching\n # each row's 'link' ID with the parent's 'rowId'.\n edges_df = df[['rowId', 'link', *'xyz']].merge(\n df[['rowId', *'xyz']], 'left',\n left_on='link', right_on='rowId', suffixes=['', '_link'])\n\n diff = edges_df[[*'xyz']] - edges_df[['x_link', 'y_link', 'z_link']].values\n distances = np.linalg.norm(diff, axis=1).astype(np.float32)\n distances[np.isnan(distances)] = root_dist\n return distances", "def compute_n_points(self):\n\n # initiate the counter to be 0\n self.n_points = 0\n\n # loop over all subspaces\n for s in range(self.nSubspaces):\n # get current subspace\n current_subspace = self.subspace_list[s]\n # add the number of points in the current subspace\n self.n_points += current_subspace.n_points", "def compute_probas_from_table(table, joint_xy):\n len_y = table.shape[0]\n len_x = table.shape[1]\n len_z = 2\n probas_Z = np.zeros(len_z)\n joint_xz = np.zeros((len_z, len_x))\n joint_yz = np.zeros((len_z, len_y))\n joint_xyz = np.zeros((len_y, len_x, len_z))\n\n # Get P(x) and P(Y)\n probas_X, probas_Y = compute_probas_from_joint(joint_xy)\n \n # Compute P(Z)\n sum_pxy = 0\n for y in range(len_y):\n for x in range(len_x):\n if table[y,x] == 1:\n sum_pxy += joint_xy[y,x]\n probas_Z[0] = sum_pxy\n probas_Z[1] = 1 - probas_Z[0]\n\n # Compute joint between X and Z\n for x in range(len_x):\n sum_y0 = 0\n sum_y1 = 0\n for y in range(len_y):\n if table[y,x] == 1: # Z = 1\n sum_y1 += joint_xy[y][x]\n if table[y,x] == 0: # Z = 0\n sum_y0 += joint_xy[y][x] \n joint_xz[0,x] = sum_y0\n joint_xz[1,x] = sum_y1\n\n # Compute joint between Y and Z\n for y in range(len_y):\n sum_x0 = 0\n sum_x1 = 0\n for x in range(len_x):\n if table[y,x] == 1: # Z = 1\n sum_x1 += joint_xy[y][x]\n if table[y,x] == 0: # Z = 0\n sum_x0 += joint_xy[y][x] \n joint_yz[0,y] = sum_x0\n joint_yz[1,y] = sum_x1\n\n\n # Compute joint P(X,Y,Z)\n for y in range(len_y):\n for x in range(len_x):\n if table[y,x] == 0:\n joint_xyz[y,x,0] = joint_xy[y,x]\n else:\n joint_xyz[y,x,1] = joint_xy[y,x]\n\n return (probas_Z, joint_xz, joint_yz, joint_xyz)", "def calc_overlap_matrix(self, points=None):", "def find_track_lengths(self):\n # find track lengths of paths with multiple cutouts\n path_lengths = Track_Length_Helper.find_track_lengths_path(self)\n # construct present_points from the cutouts in the path array, to focus on the cutouts that do not belong to\n # any path\n if self.path_arr:\n present_points = np.concatenate(self.path_arr)\n else:\n present_points = []\n self.path_orientations = np.zeros((len(self.satellite_centers), 2))\n cutout_no = len(self.satellite_centers)\n count = len(self.path_arr)\n # for each cutout,\n for ind in range(0, cutout_no):\n # if it is not included in any other path,\n if ind not in present_points:\n self.path_arr.append([ind])\n # estimate its length\n path_orientation, path_length = Track_Length_Helper.find_individual_track(self,\n self.satellite_centers[ind],\n self.all_thetas[ind])\n # add its orientation to path_orientations\n self.path_orientations[count] = path_orientation\n path_lengths.append(path_length)\n count += 1\n return self.path_arr, self.path_orientations, path_lengths", "def contingency_table(seg, gt, ignore_seg=[0], ignore_gt=[0], norm=True):\n gtr = gt.ravel()\n segr = seg.ravel() \n ij = numpy.zeros((2,len(gtr)))\n ij[0,:] = segr\n ij[1,:] = gtr\n cont = coo_matrix((numpy.ones((len(gtr))), ij)).toarray()\n cont[:, ignore_gt] = 0\n cont[ignore_seg,:] = 0\n if norm:\n cont /= float(cont.sum())\n return cont", "def hier_to_table(hier):\n curr_id = hier[0]\n children = hier[-1]\n\n if children:\n subtables = []\n subtable_widths = []\n for i, child in enumerate(children):\n subtable = hier_to_table(child)\n subtable_widths += [len(subtable[0])]\n subtables += [subtable]\n max_subtable_width = max(subtable_widths) \n \n table = []\n for subtable, subtable_width in zip(subtables, subtable_widths):\n left_fill = (max_subtable_width - subtable_width)*['?']\n for row in subtable:\n table_row = [[curr_id] + row + left_fill]\n table += table_row\n else:\n table = [[curr_id]]\n return table" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Insert new nodes into a skeleton make it "higher resolution". For all childparent segments with length greater than the given maximum length, subdivide each segment into N smaller equallength segments, such that all of the new segments are (ideally) not larger than the given max. The 'radius' column is interpolated between the child and parent radius values.
def upsample_skeleton(skeleton_df, max_segment_length): if len(skeleton_df) in (0, 1) or (skeleton_df['link'] == -1).all(): # Can't upsample a skeleton with no child-parent segments return skeleton_df seg_df = _skeleton_segments(skeleton_df) seg_df = seg_df.loc[seg_df['length'] > max_segment_length] if len(seg_df) == 0: return skeleton_df I0 = seg_df['rowId'] I1 = seg_df['rowId_parent'] next_id = 1 + skeleton_df['rowId'].max() # It's best to minimize the number of times we call np.linspace(), # so we interpolate points and radii in conjunction with a single array. PR0 = seg_df[[*'xyz', 'radius']].values PR1 = seg_df[['x_parent', 'y_parent', 'z_parent', 'radius_parent']].values D = seg_df['length'] segment_nodes = [] for i0, i1, pr0, pr1, d in zip(I0, I1, PR0, PR1, D): # Number of nodes from child (i0) to parent (i1) # excluding the parent (which we won't edit). n = int(np.ceil(d / max_segment_length)) # IDs of the original child and new intermediates going towards # the original parent, but not the parent itself. I = [i0, *range(next_id, next_id + n - 1)] # noqa next_id += n - 1 # 'link' (parent id) for the original child and new intermediates L = I[1:] + [i1] # Interpolate points and radii PR = np.linspace(pr0, pr1, n, endpoint=False) assert len(PR) == len(I) == len(L) segment_nodes.append((I, *PR.T, L)) segment_cols = [*zip(*segment_nodes)] full_cols = [np.concatenate(a) for a in segment_cols] new_df = pd.DataFrame(dict(zip(['rowId', *'xyz', 'radius', 'link'], full_cols))) # Expand the DataFrame to make room for the new rows, # then copy them over. all_rowIds = np.sort(pd.concat((skeleton_df['rowId'], new_df['rowId'])).unique()) dtypes = skeleton_df.dtypes skeleton_df = skeleton_df.set_index('rowId').reindex(all_rowIds) skeleton_df.update(new_df.set_index('rowId')) # Restore to standard column form. return skeleton_df.reset_index().astype(dtypes)
[ "def heal_skeleton(skeleton_df, max_distance=np.inf, root_parent=None):\n if max_distance is True:\n max_distance = np.inf\n\n if not max_distance:\n max_distance = 0.0\n\n if root_parent is None:\n root_parent = -1\n else:\n # Fast path to exit early if we can easily check the number of roots.\n num_roots = (skeleton_df['link'] == root_parent).sum()\n if num_roots == 1:\n # There's only one root and therefore only one component.\n # No healing necessary.\n return skeleton_df\n\n skeleton_df = skeleton_df.sort_values('rowId', ignore_index=True)\n g = skeleton_df_to_nx(skeleton_df, False, False)\n\n # Extract each fragment's rows and construct a KD-Tree\n Fragment = namedtuple('Fragment', ['frag_id', 'df', 'kd'])\n fragments = []\n for frag_id, cc in enumerate(nx.connected_components(g)):\n if len(cc) == len(skeleton_df):\n # There's only one component -- no healing necessary\n return skeleton_df\n df = skeleton_df.query('rowId in @cc')\n kd = cKDTree(df[[*'xyz']].values)\n fragments.append( Fragment(frag_id, df, kd) )\n\n # Sort from big-to-small, so the calculations below use a\n # KD tree for the larger point set in every fragment pair.\n fragments = sorted(fragments, key=lambda frag: -len(frag.df))\n\n # We could use the full graph and connect all\n # fragment pairs at their nearest neighbors,\n # but it's faster to treat each entire fragment as\n # a single node and run MST on that quotient graph,\n # which is tiny.\n frag_graph = nx.Graph()\n for frag_a, frag_b in combinations(fragments, 2):\n coords_b = frag_b.df[[*'xyz']].values\n distances, indexes = frag_a.kd.query(coords_b)\n\n index_b = np.argmin(distances)\n index_a = indexes[index_b]\n\n node_a = frag_a.df['rowId'].iloc[index_a]\n node_b = frag_b.df['rowId'].iloc[index_b]\n dist_ab = distances[index_b]\n\n # Add edge from one fragment to another,\n # but keep track of which fine-grained skeleton\n # nodes were used to calculate distance.\n frag_graph.add_edge( frag_a.frag_id, frag_b.frag_id,\n node_a=node_a, node_b=node_b,\n distance=dist_ab )\n\n # Compute inter-fragment MST edges\n frag_edges = nx.minimum_spanning_edges(frag_graph, weight='distance', data=True)\n\n # For each inter-fragment edge, add the corresponding\n # fine-grained edge between skeleton nodes in the original graph.\n omit_edges = []\n for _u, _v, d in frag_edges:\n g.add_edge(d['node_a'], d['node_b'])\n if d['distance'] > max_distance:\n omit_edges.append((d['node_a'], d['node_b']))\n\n # Traverse in depth-first order to compute edges for final tree\n root = skeleton_df['rowId'].iloc[0]\n\n # Replace 'link' (parent) column using MST edges\n _reorient_skeleton(skeleton_df, root, root_parent, g=g)\n assert (skeleton_df['link'] == root_parent).sum() == 1\n assert skeleton_df['link'].iloc[0] == root_parent\n\n # Delete edges that violated max_distance\n if omit_edges:\n # Make sure this is invariant to edge direction (check both directions).\n omit_edges = omit_edges + [(b, a) for (a, b) in omit_edges]\n omit_df = pd.DataFrame(omit_edges, columns=['rowId', 'link'])\n omit_df['omit_link'] = -1\n\n # Remove links for omitted edges (convert child node to a new root).\n skeleton_df = skeleton_df.merge(omit_df, 'left', on=['rowId', 'link'])\n skeleton_df['link'].update(skeleton_df['omit_link'])\n del skeleton_df['omit_link']\n\n return skeleton_df", "def _skeleton_segments(skeleton_df):\n segment_df = skeleton_df.merge(skeleton_df[['rowId', 'link', *'xyz', 'radius']],\n 'inner',\n left_on='link',\n right_on='rowId',\n suffixes=['', '_parent'])\n\n child_points = segment_df[[*'xyz']].values\n parent_points = segment_df[['x_parent', 'y_parent', 'z_parent']].values\n segment_df['length'] = np.linalg.norm(child_points - parent_points, axis=1)\n return segment_df", "def __init__(self, maxnodepts = 64, initsize = 4):\n this = _coin.new_SbBSPTree(maxnodepts, initsize)\n try: self.this.append(this)\n except: self.this = this", "def _make_supercell(atoms, cutoff):\n # when the cell lengths are smaller than radius, make supercell to be longer than the radius\n scale_abc = []\n for l in atoms.cell.cellpar()[:3]:\n if l < cutoff:\n scale_abc.append(math.ceil(cutoff / l))\n else:\n scale_abc.append(1)\n\n # make supercell\n m = np.zeros([3, 3])\n np.fill_diagonal(m, scale_abc)\n atoms = make_supercell(atoms, m)\n return atoms", "def __init__(self, maxnodepts: 'int const'=64, initsize: 'int const'=4):\n this = _coin.new_SbBSPTree(maxnodepts, initsize)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def steinerTree(nodes):\n #works in 2 or 3 dimensions\n R = len(nodes[0]) # either 2 or 3 -- this is the dimension we're working in\n n = len(nodes)\n steinerPoints = []\n for i in range(n - 2):\n steinerPoints.append([random.uniform(min([i[dim] for i in nodes]), max([i[dim] for i in nodes])) for dim in\n range(R)])\n jump = 0\n for i in steinerPoints:\n for j in nodes:\n jump += dist(i, j)\n jump /= (len(steinerPoints) * len(nodes))\n #now the initial topology must be created\n snLocs = [i for i in range(n - 2)]\n snConnections = [random.choice(snLocs) for i in range(len(nodes))] #connections between steiner points and nodes\n ssLocs = [i for i in range(int(nCr(len(steinerPoints), 2)))]\n ssConnections = [] #connections between steiner points and other steiner points\n for i in range(n - 3):\n ssConnections.append(random.choice(ssLocs))\n ssLocs.remove(ssConnections[-1])\n print(createTree(snConnections, ssConnections)) #this is the structure of the initial tree\n iterations = 0\n while iterations < 25000:\n oldConnections = (snConnections[:],\n ssConnections[:]) #these fucking colons needing to be here cost me hours of time\n\n vec = [random.random() for dim in range(R)]\n negaters = [random.randint(0, 1) for dim in range(R)]\n for dim in range(R):\n if negaters[dim]:\n vec[dim] *= -1\n vec = normalize(vec)\n #multiply each component by the jump size\n for j in range(R):\n vec[j] *= jump\n r = random.randint(0, len(steinerPoints) - 1)\n newsol = [steinerPoints[r][dim] + vec[dim] for dim in range(R)]\n newsteinerPoints = steinerPoints[:r] + [newsol] + steinerPoints[r + 1:]\n if pathLength(nodes, newsteinerPoints, ssConnections, snConnections) < \\\n pathLength(nodes, steinerPoints, ssConnections, snConnections):\n steinerPoints = newsteinerPoints\n\n r1 = random.randint(0, len(snConnections) - 1)\n r2 = random.randint(0, len(snConnections) - 1)\n newSnConnections = snConnections[:]\n newSnConnections[r1], newSnConnections[r2] = newSnConnections[r2], newSnConnections[r1]\n if pathLength(nodes, steinerPoints, ssConnections, newSnConnections) < \\\n pathLength(nodes, steinerPoints, ssConnections,snConnections):\n snConnections = newSnConnections[:]\n r = random.randint(0, len(ssConnections) - 1)\n newSsConnection = random.randint(0, nCr(len(steinerPoints), 2) - 1)\n if pathLength(nodes, steinerPoints, ssConnections[:r] + [newSsConnection] + ssConnections[r + 1:], snConnections) < \\\n pathLength(nodes, steinerPoints, ssConnections, snConnections) and unique(\n ssConnections[:r] + [newSsConnection] + ssConnections[r + 1:]):\n ssConnections[r] = newSsConnection\n allssConnections = [i for i in combinations([i for i in range(n - 2)], 2)]\n steinerPointsCounts = [3 for i in range(len(steinerPoints))]\n for i in ssConnections:\n for j in allssConnections[i]:\n steinerPointsCounts[j] -= 1\n snConnections = []\n for i in range(len(steinerPointsCounts)):\n for j in range(steinerPointsCounts[i]):\n snConnections.append(i)\n random.shuffle(snConnections)\n if not isValid(snConnections, ssConnections, steinerPoints):\n snConnections, ssConnections = oldConnections\n jump *= .9995\n iterations += 1\n if iterations == 25000 and not isValid(snConnections, ssConnections, steinerPoints):\n # restarts if we've failed\n print(\"Starting over...\")\n steinerPoints = []\n for i in range(n - 2):\n steinerPoints.append([random.uniform(min([i[dim] for i in nodes]), max([i[dim] for i in nodes])) for dim in\n range(R)])\n jump = 0\n for i in steinerPoints:\n for j in nodes:\n jump += dist(i, j)\n jump /= (len(steinerPoints) * len(nodes))\n #now the initial topology must be created\n snLocs = [i for i in range(n - 2)]\n snConnections = [random.choice(snLocs) for i in range(len(nodes))] #connections between steiner points and nodes\n ssLocs = [i for i in range(int(nCr(len(steinerPoints), 2)))]\n ssConnections = [] #connections between steiner points and other steiner points\n for i in range(n - 3):\n ssConnections.append(random.choice(ssLocs))\n ssLocs.remove(ssConnections[-1])\n iterations = 0\n\n #wrap up program\n\n print(\"steinerPoints:\")\n for sol in steinerPoints:\n print(sol)\n print(\"ssConnections: \", ssConnections)\n print(\"snConnections: \", snConnections)\n print(\"tree: \", createTree(snConnections, ssConnections))\n print(pathLength(nodes, steinerPoints, ssConnections, snConnections))\n # if not isValid(snConnections, ssConnections):\n # print(\"I have not generated a valid Steiner tree for you. I am very sorry.\")\n # return\n\n #for 3D plots\n if R == 3:\n lines = []\n for i in range(n):\n lines.append([nodes[i], steinerPoints[snConnections[i]]])\n allssConnections = []\n for i in combinations([i for i in range(n - 2)], 2):\n allssConnections.append(i)\n for i in ssConnections:\n lines.append([steinerPoints[allssConnections[i][0]], steinerPoints[allssConnections[i][1]]])\n VecStart_x = []\n VecStart_y = []\n VecStart_z = []\n VecEnd_x = []\n VecEnd_y = []\n VecEnd_z = []\n for line in lines:\n VecStart_x.append(line[0][0])\n VecEnd_x.append(line[1][0])\n VecStart_y.append(line[0][1])\n VecEnd_y.append(line[1][1])\n VecStart_z.append(line[0][2])\n VecEnd_z.append(line[1][2])\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n for i in range(len(VecStart_x)):\n ax.plot([VecStart_x[i], VecEnd_x[i]], [VecStart_y[i], VecEnd_y[i]], zs=[VecStart_z[i], VecEnd_z[i]])\n pl.plot([i[0] for i in steinerPoints], [i[1] for i in steinerPoints], [i[2] for i in steinerPoints], 'bo')\n pl.plot([i[0] for i in nodes], [i[1] for i in nodes], [i[2] for i in nodes], 'ro')\n # ax.text(min([i[0] for i in nodes])-1, min(i[1] for i in nodes)-1, min(i[2] for i in nodes)-1,\n # \"Total distance: \"+str(pathLength(nodes, steinerPoints, ssConnections, snConnections)), fontsize=15)\n ax.set_title(\"Total Distance: \" + str(pathLength(nodes, steinerPoints, ssConnections, snConnections)))\n\n ## draw sphere\n# u = np.linspace(0, 2 * np.pi, 100)\n# v = np.linspace(0, np.pi, 100)\n#\n# x = 1 * np.outer(np.cos(u), np.sin(v))\n# y = 1 * np.outer(np.sin(u), np.sin(v))\n# z = 1 * np.outer(np.ones(np.size(u)), np.cos(v))\n# elev = 10.0\n# rot = 80.0 / 180 * np.pi\n# ax.plot_surface(x, y, z, rstride=4, cstride=4, color='b', linewidth=0, alpha=0.5)\n# pl.show()\n # Create a sphere\n pi = np.pi\n cos = np.cos\n sin = np.sin\n phi, theta = np.mgrid[0.0:pi:100j, 0.0:2.0*pi:100j]\n x = radius*sin(phi)*cos(theta)\n y = radius*sin(phi)*sin(theta)\n z = radius*cos(phi)\n\n\n def slerp(p1, p2, t):\n omega = np.arccos( p1.dot(p2) )\n sin_omega = np.sin(omega)\n t = t[:, np.newaxis]\n return ( np.sin( (1-t)*omega )*p1 + np.sin( t*omega )*p2 )/sin_omega\n\n p1 = np.array([1, 0, 0])\n p2 = np.array([0, 1, 0])\n t = np.linspace(0, 1, 30)\n\n arc = slerp(p1, p2, t)\n\n #Import data\n# data = np.genfromtxt('leb.txt')\n# theta, phi, r = np.hsplit(data, 3)\n# theta = theta * pi / 180.0\n# phi = phi * pi / 180.0\n# xx = sin(phi)*cos(theta)\n# yy = sin(phi)*sin(theta)\n# zz = cos(phi)\n\n #Set colours and render\n# ax = fig.add_subplot(111, projection='3d')\n\n ax.plot_surface(\n x, y, z, rstride=1, cstride=1, color='c', alpha=0.3, linewidth=0)\n\n pl.plot( arc[:, 0], arc[:, 1] )\n ax.set_xlim([-1,1])\n ax.set_ylim([-1,1])\n ax.set_zlim([-1,1])\n# ax.set_aspect(\"equal\")\n pl.tight_layout()\n manager = plt.get_current_fig_manager()\n manager.window.showMaximized()\n plt.savefig('Steiner_tree.png')\n pl.show()", "def _learn_on_new_segments(self, connections: Connections, new_segment_cells, growth_candidates, sample_size,\n max_synapses_per_segment,\n initial_permanence, max_segments_per_cell):\n num_new_synapses = len(growth_candidates)\n\n if sample_size != -1:\n num_new_synapses = min(num_new_synapses, sample_size)\n\n if max_synapses_per_segment != -1:\n num_new_synapses = min(num_new_synapses, max_synapses_per_segment)\n\n for cell in new_segment_cells:\n new_segment = connections.createSegment(cell, max_segments_per_cell)\n connections.growSynapses(new_segment, growth_candidates, initial_permanence, self.rng,\n maxNew=num_new_synapses)", "def remesh(self,max_length = None):\n dz = self.dz\n mesh = self.mesh\n bmesh = BoundaryMesh(mesh,'exterior',order=True)\n x = bmesh.coordinates()[:,0]\n\n if max_length == None:\n max_length = np.max(x)*10\n\n pts = sort_boundary_nodes(bmesh)\n\n # Now remove nodes that are plast the cutoff length and\n pt_new = []\n pt_flag = None\n length_flag = True\n xcliff = max_length\n for n in range(len(pts)):\n pt = pts[n]\n # We will stack x points along the calving front if they exceed the distance\n if near(pt[0],0) and pt[1]<self.bed_fun(0.0):\n pt_new.append(pt)\n else:\n if pt[0]<=xcliff:\n if len(pt_new)==0:\n pt_new.append(pt)\n else:\n # If there is at least one point, we calculate the distance\n # between the new and old point\n dist = np.sqrt((pt[0]-pt_new[-1][0])**2+(pt[1]-pt_new[-1][1])**2)\n pt_new.append(pt)\n\n pt_new = np.array(pt_new)\n # The characteristic length is the radius so twice the mesh size\n new_mesh = meshGmsh(pt_new.transpose(),dz*2)\n\n\n #mesh = Mesh()\n #with XDMFFile(\"tmp.xdmf\") as infile:\n # infile.read(mesh)\n self.mesh=new_mesh\n self.mesh.bounding_box_tree().build(self.mesh)\n self.generate_function_spaces()\n return self", "def __cut_short_skeleton_terminal_edges(self, cut_ratio=2.0):\n\n def remove_elm(elm_id, elm_neigh, elm_box, sklabel):\n sklabel[sklabel == elm_id] = 0\n del elm_neigh[elm_id]\n del elm_box[elm_id]\n for elm in elm_neigh:\n elm_neigh[elm] = [x for x in elm_neigh[elm] if x != elm]\n return elm_neigh, elm_box, sklabel\n\n len_edg = np.max(self.sklabel)\n len_node = np.min(self.sklabel)\n logger.debug(\"len_edg: \" + str(len_edg) + \" len_node: \" + str(len_node))\n\n # get edges and nodes that are near the edge. (+bounding box)\n logger.debug(\"skeleton_analysis: starting element_neighbors processing\")\n self.elm_neigh = {}\n self.elm_box = {}\n for edg_number in list(range(len_node, 0)) + list(range(1, len_edg + 1)):\n self.elm_neigh[edg_number], self.elm_box[\n edg_number\n ] = self.__element_neighbors(edg_number)\n logger.debug(\"skeleton_analysis: finished element_neighbors processing\")\n # clear unneeded data. IMPORTANT!!\n\n self.__clean_shifted()\n # remove edges+nodes that are not connected to rest of the skeleton\n logger.debug(\n \"skeleton_analysis: Cut - Removing edges that are not\"\n + \" connected to rest of the skeleton (not counting its nodes)\"\n )\n cut_elm_neigh = dict(self.elm_neigh)\n cut_elm_box = dict(self.elm_box)\n for elm in self.elm_neigh:\n elm = int(elm)\n if elm > 0: # if edge\n conn_nodes = [i for i in self.elm_neigh[elm] if i < 0]\n conn_edges = []\n for n in conn_nodes:\n if n in self.elm_neigh:\n nn = self.elm_neigh[n] # get neighbours elements of node\n else:\n logger.debug(f\"Node {str(n)} not found! May be already deleted.\")\n continue\n\n for (e) in (nn): # if there are other edges connected to node add them to conn_edges\n if e > 0 and e not in conn_edges and e != elm:\n conn_edges.append(e)\n\n if (len(conn_edges) == 0): # if no other edges are connected to nodes, remove from skeleton\n logger.debug(f\"removing edge {str(elm)} with its nodes {str(self.elm_neigh[elm])}\")\n for night in self.elm_neigh[elm]:\n remove_elm(night, cut_elm_neigh, cut_elm_box, self.sklabel)\n self.elm_neigh = cut_elm_neigh\n self.elm_box = cut_elm_box\n\n # remove elements that are not connected to the rest of skeleton\n logger.debug(\"skeleton_analysis: Cut - Removing elements that are not connected to rest of the skeleton\")\n cut_elm_neigh = dict(self.elm_neigh)\n cut_elm_box = dict(self.elm_box)\n for elm in self.elm_neigh:\n elm = int(elm)\n if len(self.elm_neigh[elm]) == 0:\n logger.debug(f\"removing element {str(elm)}\")\n remove_elm(elm, cut_elm_neigh, cut_elm_box, self.sklabel)\n self.elm_neigh = cut_elm_neigh\n self.elm_box = cut_elm_box\n\n # get list of terminal nodes\n logger.debug(\"skeleton_analysis: Cut - get list of terminal nodes\")\n terminal_nodes = []\n for elm in self.elm_neigh:\n if elm < 0: # if node\n conn_edges = [i for i in self.elm_neigh[elm] if i > 0]\n if len(conn_edges) == 1: # if only one edge is connected\n terminal_nodes.append(elm)\n\n # init radius analysis\n logger.debug(\"__radius_analysis_init\")\n if self.volume_data is not None:\n skdst = self.__radius_analysis_init()\n\n # removes end terminal edges based on radius/length ratio\n logger.debug(\n \"skeleton_analysis: Cut - Removing bad terminal edges based on\"\n + \" radius/length ratio\"\n )\n cut_elm_neigh = dict(self.elm_neigh)\n cut_elm_box = dict(self.elm_box)\n for tn in terminal_nodes:\n te = [i for i in self.elm_neigh[tn] if i > 0][0] # terminal edge\n radius = float(self.__radius_analysis(te, skdst))\n edgst = self.__connection_analysis(int(te))\n edgst = self.__ordered_points_with_pixel_length(edg_number, edg_stats=edgst)\n edgst.update(self.__edge_length(edg_number, edgst))\n length = edgst[\"lengthEstimation\"]\n\n # logger.debug(str(radius / float(length))+\" \"+str(radius)+\" \"+str(length))\n if (radius / float(length)) > cut_ratio:\n logger.debug(f\"removing edge {str(te)} with its terminal node.\")\n remove_elm(elm, cut_elm_neigh, cut_elm_box, self.sklabel)\n self.elm_neigh = cut_elm_neigh\n self.elm_box = cut_elm_box\n\n self.__check_nodes_to_be_just_curve_from_elm_neig()\n\n # regenerate new nodes and edges from cut skeleton (sklabel)\n logger.debug(\"Regenerate new nodes and edges from cut skeleton\")\n self.sklabel[self.sklabel != 0] = 1\n skelet_nodes = self.__skeleton_nodes(self.sklabel)\n self.sklabel = self.__generate_sklabel(skelet_nodes)", "def update_seams_max_error(self, radius=0):\n self.maxErrNodeNbGlobal = -1\n maxErr = -1.0\n bs = self.borderSize\n for j in range(bs, self.output_height - bs):\n for i in range(bs, self.output_width - bs):\n err_sum = 0.0\n nodeNbGlobal = self.get_node_number_global(0, 0, i, j)\n\n # all the error nearby\n for jj in range(-radius, radius + 1):\n for ii in range(-radius, radius + 1):\n node_neighbor_global = self.get_node_number_global(0, 0, i + ii, j + jj)\n neighbor_node = self.global_nodes[node_neighbor_global]\n if not neighbor_node.empty:\n if neighbor_node.seamRight:\n err_sum += neighbor_node.rightCost\n if neighbor_node.seamBottom:\n err_sum += neighbor_node.bottomCost\n if err_sum > maxErr:\n maxErr = err_sum\n self.maxErrNodeNbGlobal = nodeNbGlobal\n\n return maxErr", "def straigh_subsample(neuron, num, distance):\n\n # Selecting the main points: branching nodes and end nodes\n selected_index = get_main_points(neuron)\n\n #print(num)\n\n\n # for each segment between two consecuative main points, a few nodes from the segment will be added to the selected node.\n # These new nodes will be selected base on the fact that neural distance of two consecuative nodes is around 'distance'.\n # Specifically, it starts from the far main point, and goes on the segment toward the near main point. Then the first node which is\n # going to add has the property that it is the farest node from begining on the segment such that its distance from begining is\n # less than 'distance'. The next nodes will be selected similarly.\n\n for i in selected_index:\n upList = np.array([i], dtype = int)\n index = neuron.parent_index[i]\n dist = neuron.distance_from_parent[i]\n while(~np.any(selected_index == index)): # find selected_index & it's parent index in selected_index\n upList = np.append(upList, index)\n index = neuron.parent_index[index]\n dist = np.append(dist, sum(neuron.distance_from_parent[upList]))\n dist = np.append(0, dist)\n (I,) = np.where(np.diff(np.floor(dist/distance)) > 0) # select node be added\n I = upList[I]\n selected_index = np.append(selected_index, I)\n selected_index = np.unique(selected_index)\n print(\"main_node_index\", selected_index.shape[0])\n if (selected_index.shape[0] < num):\n print('exit to straight')\n return neuron\n neuron = neuron_with_selected_nodes(neuron, selected_index)\n return neuron", "def _rescale_children(self):\n n_children = len(self.children)\n self_scale = self.hyperparam('spatial_scale')\n\n # Update children\n\n for i, child in enumerate(self.children):\n child: ConvBlockGene\n\n new_scale = self_scale + n_children - 1 - i\n\n # Update spatial scale\n self._update_scale(child, new_scale, self_scale)\n\n pass", "def expand_leaf(self, leaf):\n for omp in self.omp:\n if leaf is None:\n if self.scaling_type == 'strong':\n max_ranks = int(self.max_cores / omp)\n self.leaf[omp] = max_ranks * np.array(self.leaf_per_max_rank)\n\n elif self.scaling_type == 'weak':\n self.leaf[omp] = np.array(self.leaf_per_rank)\n else:\n self.leaf[omp] = tools.ensure_sequence(leaf)", "def set_children_bounds(self, lower, upper):\n ...", "def smooth(self, maxCollectionSize = 30, lengthLimit = 20, cornerTolerance = 10):\n smallLineLength = lengthLimit\n segs = self.asSegments()\n i = 0\n collection = []\n while i < len(segs):\n s = segs[i]\n if s.length < smallLineLength and len(collection) <= maxCollectionSize:\n collection.append(s)\n else:\n corner = False\n if len(collection) > 1:\n last = collection[-1]\n if abs(last.tangentAtTime(1).angle - s.tangentAtTime(0).angle) > math.radians(cornerTolerance):\n corner = True\n if len(collection) > maxCollectionSize or corner or i == len(segs)-2:\n points = [x.start for x in collection]\n bp = BezierPath.fromPoints(points)\n if len(bp.asSegments()) > 0:\n segs[i-len(collection):i] = bp.asSegments()\n i -= len(collection)\n collection = []\n i += 1\n if len(collection) > 0:\n points = [x.start for x in collection]\n bp = BezierPath.fromPoints(points)\n if len(bp.asSegments()) > 0:\n segs[i-(1+len(collection)):i-1] = bp.asSegments()\n\n self.activeRepresentation = SegmentRepresentation(self, segs)\n return self", "def generate_child(self):\r\n x1,y1,x2,y2 = self.find(self.data,'-')\r\n \"\"\" val_list contains position values for moving the blank space in either of\r\n the 4 directions [up,down,left,right] respectively. \"\"\"\r\n val_list = [[x1,y1-1],[x1,y1+1],[x1-1,y1],[x1+1,y1],[x2,y2-1],[x2,y2+1],[x2-1,y2],[x2+1,y2]]\r\n children = []\r\n cntt=0\r\n for i in val_list:\r\n cntt+=1\r\n if(cntt==1):\r\n child = self.shuffle(self.data,x1,y1,i[0],i[1])\r\n if child is not None:\r\n child_node = Node(child,self.level+1,0,self,[self.data[x1][y1-1],\"right\"])\r\n children.append(child_node)\r\n elif(cntt==2):\r\n child = self.shuffle(self.data,x1,y1,i[0],i[1])\r\n if child is not None:\r\n child_node = Node(child,self.level+1,0,self,[self.data[x1][y1+1],\"left\"])\r\n children.append(child_node)\r\n elif(cntt==3):\r\n child = self.shuffle(self.data,x1,y1,i[0],i[1])\r\n if child is not None:\r\n child_node = Node(child,self.level+1,0,self,[self.data[x1-1][y1],\"down\"])\r\n children.append(child_node)\r\n elif(cntt==4):\r\n child = self.shuffle(self.data,x1,y1,i[0],i[1])\r\n if child is not None:\r\n child_node = Node(child,self.level+1,0,self,[self.data[x1+1][y1],\"up\"])\r\n children.append(child_node)\r\n elif(cntt==5):\r\n child = self.shuffle(self.data,x2,y2,i[0],i[1])\r\n if child is not None:\r\n child_node = Node(child,self.level+1,0,self,[self.data[x2][y2-1],\"right\"])\r\n children.append(child_node)\r\n elif(cntt==6):\r\n child = self.shuffle(self.data,x2,y2,i[0],i[1])\r\n if child is not None:\r\n child_node = Node(child,self.level+1,0,self,[self.data[x2][y2+1],\"left\"])\r\n children.append(child_node)\r\n elif(cntt==7):\r\n child = self.shuffle(self.data,x2,y2,i[0],i[1])\r\n if child is not None:\r\n child_node = Node(child,self.level+1,0,self,[self.data[x2-1][y2],\"down\"])\r\n children.append(child_node)\r\n elif(cntt==8):\r\n child = self.shuffle(self.data,x2,y2,i[0],i[1])\r\n if child is not None:\r\n child_node = Node(child,self.level+1,0,self,[self.data[x2+1][y2],\"up\"])\r\n children.append(child_node)\r\n ##print (child)\r\n return children", "def setRoot(G, root, size=50, radius=200):\n if len(root) == 1: #if steady state set as root in (0,0)\n # Set position of root\n G.node[root[0]]['x'] = 0\n G.node[root[0]]['y'] = 0\n G.node[root[0]]['angle'] = 0 \n # Set color and size\n # Note: the color here is a number that will be later transformed to rgb using cmap\n if 'color' not in G.node[root[0]]: #if no defined color assign a random one\n G.node[root[0]]['color'] = random()\n if 'size' not in G.node[root[0]]: #if no defined size assign a default\n G.node[root[0]]['size'] = size\n\n else: # if cycle create fan\n # Determine position of root\n pos = createFanPoints(len(root), 0, 0, 0, 360, radius)\n for n in zip(root, pos):\n # Set position of root\n log.info(\"node, x, y, angle={}\".format(n))\n G.node[n[0]]['x'] = n[1][0]\n G.node[n[0]]['y'] = n[1][1]\n G.node[n[0]]['angle'] = n[1][2] \n # Set color and size\n if 'color' not in G.node[n[0]]: #if no defined color assign a random one\n G.node[n[0]]['color'] = random()\n if 'size' not in G.node[n[0]]: #if no defined size assign a default\n G.node[n[0]]['size'] = size\n\n return G", "def max_children(self, num_children):\n nq = self._clone()\n try:\n num_children = int(num_children)\n except ValueError:\n num_children = 15\n\n nq.num_children = num_children\n return nq", "def create_children(parent_snakes, n_children=None):\n # Make sure the random generator is sed randomly. There are following\n # reasons:\n # 1. We may need to create a group of randomly generated children,\n # according to `random_children_percentage`, to improve the group diversity.\n # 2. We need to randomly select pairs of parents to mate.\n np.random.seed()\n n_parents = len(parent_snakes)\n\n def select_random_parent():\n idx = np.random.randint(n_parents)\n return parent_snakes[idx]\n\n def create_child_from_mating():\n p1 = select_random_parent()\n p2 = select_random_parent()\n game_snake = create_snake()\n l = len(p1.brain.W)\n for i in range(l):\n # crossover\n w1, b1 = p1.brain.W[i], p1.brain.b[i]\n w2, b2 = p2.brain.W[i], p2.brain.b[i]\n # for each coefficient element, choose it from p1 or p2.\n fw = np.random.randint(0, 2, size=w1.shape)\n fb = np.random.randint(0, 2, size=b1.shape)\n fw_neg = 1 - fw\n fb_neg = 1 - fb\n game_snake.brain.W[i] = fw * w1 + fw_neg * w2\n game_snake.brain.b[i] = fb * b1 + fb_neg * b2\n return game_snake\n\n # create children with mating.\n children = []\n if not n_children:\n n_children = population - n_parents\n for _ in range(n_children):\n c = create_child_from_mating()\n children.append(c)\n\n # create children randomly\n n_new_children = int(population * random_children_percentage)\n for _ in range(n_new_children):\n c = create_snake()\n children.append(c)\n\n return children" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attach a neuron's synapses to its skeleton as new skeleton nodes. Synapses are attached to their nearest skeleton node (in euclidean terms).
def attach_synapses_to_skeleton(skeleton_df, synapses_df): skeleton_df = skeleton_df.copy(deep=False).reset_index(drop=True) synapses_df = synapses_df.copy(deep=False).reset_index(drop=True) skeleton_df['structure'] = 'neurite' synapses_df['structure'] = synapses_df['type'] synapses_df['radius'] = 0.0 kd = cKDTree(skeleton_df[[*'xyz']].values) _, indexes = kd.query(synapses_df[[*'xyz']].values) synapses_df['link'] = skeleton_df.loc[indexes, 'rowId'].values synapses_df['rowId'] = synapses_df.index + skeleton_df['rowId'].max() + 1 relevant_cols = ['rowId', *'xyz', 'radius', 'link', 'structure'] synapses_df = synapses_df[relevant_cols] skeleton_df = skeleton_df[relevant_cols] combined = pd.concat((skeleton_df, synapses_df), ignore_index=True) combined['structure'] = pd.Categorical(combined['structure']) return combined
[ "def __connect(self):\n #self.outputNeuron = outputNeuron\n #self.inputNeuron = inputNeuron\n self.outputNeuron.addInputSynapse(self)\n self.inputNeuron.addOutputSynapse(self)", "def star_neuron(wing_number=3,\n node_on_each_wings=4,\n spherical=False,\n length=10):\n nodes_list = []\n root = Node()\n root.r = 1.\n root.node_type = 1\n root.xyz = np.array([0, 0, 0], dtype=float)\n nodes_list.append(root)\n\n for i in range(0):\n soma = Node()\n soma.r = .2\n soma.node_type = 1\n soma.xyz = np.array([0, 0, 0], dtype=float)\n nodes_list.append(soma)\n root.add_child(soma)\n soma.parent = root\n\n angle = 2 * np.pi/wing_number\n for j in range(wing_number):\n rand_vec = np.random.randn(3)\n rand_vec = rand_vec/np.sqrt(sum(rand_vec**2))\n for i in range(node_on_each_wings):\n node = Node()\n node.r = .2\n node.node_type = 2\n if spherical:\n x = rand_vec[0] * length * (i+1)\n y = rand_vec[1] * length * (i+1)\n z = rand_vec[2] * length * (i+1)\n else:\n x = np.sin(j*angle) * length * (i+1)\n y = np.cos(j*angle) * length * (i+1)\n z = 0.\n node.xyz = np.array([x, y, z], dtype=float) # +0*np.random.rand(3)\n if i == 0:\n root.add_child(node)\n node.parent = root\n nodes_list.append(node)\n else:\n nodes_list[-1:][0].add_child(node)\n node.parent = nodes_list[-1:][0]\n nodes_list.append(node)\n neuron = Neuron(input_format='only list of nodes', input_file=nodes_list)\n return neuron", "def insert_synapses(params_dict, syn_params_dict, sections_dict, logger, syn_loc_seed):\n \n section_list = sections_dict['section_list']\n \n # defining a dictionary for the inserted synapse. \n syndict={ \n 'x': [], 'y': [], 'weight': [], 'BC_syn': [], 'dist': [] \n }\n \n count = 0 # counts total number of introduced synapses\n tot_segs = 0 # counts total number of segments in all sections\n syn_loc_rnd = np.random.RandomState(syn_loc_seed)\n for k, sec in enumerate(section_list):\n \n n3d = int(NEURON.n3d(sec = sec)) - 1\n tot_segs += sec.nseg\n\n # Interpolating the arc length position of the i'th point in the 3d list to the i'th 3d point\n f2 = interp1d([NEURON.arc3d(i, sec = sec) for i in range(n3d + 1)], np.array(range(n3d + 1)))\n\n for l in range(int(sec.L)):\n \n frac = (l + 0.5) / sec.L\n dist = path_distance(frac, sec, sections_dict)\n \n ################## This is a manually defined density function ##################\n # In order to use the XML-defined density function use the line: ##\n # --> synape = (np.random.rand() < syn_params_dict['density_function'](dist)) ##\n #################################################################################\n \n def synapse_sigmoid_distribution_function(x, scale_factor, offset, transition_placement):\n return 1 - (scale_factor * (0.5 * (1 + math.tanh(x - transition_placement))) + offset)\n \n # (distance, scale_factor, offset, transition_placement)\n scale_factor = params_dict['scale_factor'] \n offset = params_dict['offset'] \n transition_placement = params_dict['transition_placement']\n synape = (syn_loc_rnd.rand() < synapse_sigmoid_distribution_function(dist, scale_factor, \n offset, transition_placement))\n \n #################################################################################\n \n # transforming loaction on the segment to the segment number\n seg_no = int(f2(l + 0.5))\n \n x = (NEURON.x3d(seg_no, sec=sec) + NEURON.x3d(seg_no + 1, sec = sec)) / 2\n y = (NEURON.y3d(seg_no, sec=sec) + NEURON.y3d(seg_no + 1, sec = sec)) / 2\n \n if synape:\n count += 1\n \n BC_syn = NEURON.Exp2Syn(sec(frac))\n BC_syn.tau1 = 0.89\n BC_syn.tau2 = 1.84\n BC_syn.e = 0\n \n syndict['weight'].append(params_dict['syn_weight'])\n syndict['x'].append(x)\n syndict['y'].append(y)\n syndict['BC_syn'].append(BC_syn)\n syndict['dist'].append(dist)\n \n logger.info('{} synapses were defined in {} segments'.format(count, tot_segs)) \n return syndict", "def link_nodes(self, node_list):\n for nd in node_list:\n if nd.name_ == \"SplitterNode\":\n self.sp_node_ = nd", "def skeletonEmbed(segmentationResolution=int, segmentationMethod=int, mergedMesh=bool):\n pass", "def addStones(self, stones):\n self.rack = np.append(self.rack, stones, axis=0)", "def __skeleton_nodes(self, data3d_skel, kernel=None):\n\n if kernel is None:\n kernel = np.ones([3, 3, 3])\n\n mocnost = scipy.ndimage.filters.convolve(data3d_skel, kernel) * data3d_skel\n\n nodes = (mocnost > 3).astype(np.int8)\n terminals = ((mocnost == 2) | (mocnost == 1)).astype(np.int8)\n\n data3d_skel[nodes == 1] = 2\n data3d_skel[terminals == 1] = 3\n # maybe swap next two lines\n data3d_skel = self.__skeleton_nodes_aggregation(data3d_skel)\n data3d_skel = self.__remove_terminal_nodes_in_neghborhood_of_the_branching_node(data3d_skel)\n\n return data3d_skel", "def Nu_connectDualSkeleton() :\n\tsysPath = 'O:/studioTools/maya/python/tool/rig/nuTools/pipeline'\n\tif not sysPath in sys.path : \n\t\tsys.path.append(sysPath)\n\n\timport pipeTools\n\treload(pipeTools)\n\t\n\n\tpipeTools.connectDualSkeleton(False, False)", "def add_synapse(self, synapse_name):\n self.synapses['%s' % synapse_name] = SynapseModel(synapse_name, self)", "def setConnectionsWeights(self, newWeights):\n matrix = self.connectionsMatrix\n assert len(matrix) == len(newWeights), 'Dimension missmatch in setConnectionsWeights! %s | %s' % (len(matrix), len(newWeights))\n\n for line, lineWeights in zip(matrix, newWeights):\n assert len(line) == len(lineWeights)-1, 'Dimension missmatch in setConnectionsWeights!'\n\n # set bias\n line[0].getOutputNeuron().setBias(lineWeights[0])\n lineWeights = lineWeights[1:]\n\n # set synapses weights\n for syn, synWeight in zip(line, lineWeights):\n syn.setWeight(synWeight)", "def _create_skeleton(self, width, height, bb_thickness=0.05):\n base_link = urdf.Link('base_link',\n urdf.Inertial(\n urdf.Origin(xyz=(0, 0, 0), rpy=(0, 0, 0)),\n urdf.Mass(value=0),\n urdf.Inertia(ixx=0.001, ixy=0, ixz=0, iyy=0.001, iyz=0, izz=0.001)\n ),\n urdf.Collision(\n urdf.Origin(xyz=(0, 0, 0), rpy=(0, 0, 0)),\n urdf.Geometry(\n urdf.Box(size=(width, 0.3, 0.1))\n )\n ),\n urdf.Visual(\n urdf.Origin(xyz=(0, 0, 0), rpy=(0, 0, 0)),\n urdf.Geometry(\n urdf.Box(size=(width, 0.3, 0.1))\n ),\n urdf.Material('brown',\n urdf.Color(rgba=(0.82, 0.71, 0.55, 1.0))\n )\n ))\n\n back_link = urdf.Link('back_link',\n urdf.Inertial(\n urdf.Origin(xyz=(0, 0, 0), rpy=(0, 0, 0)),\n urdf.Mass(value=0.5),\n urdf.Inertia(ixx=0.001, ixy=0, ixz=0, iyy=0.001, iyz=0, izz=0.001)\n ),\n urdf.Collision(\n urdf.Origin(xyz=(0, 0, 0), rpy=(0, 0, 0)),\n urdf.Geometry(\n urdf.Box(size=(width, bb_thickness, height))\n )\n ),\n urdf.Visual(\n urdf.Origin(xyz=(0, 0, 0), rpy=(0, 0, 0)),\n urdf.Geometry(\n urdf.Box(size=(width, bb_thickness, height))\n ),\n urdf.Material('brown',\n urdf.Color(rgba=(0.82, 0.71, 0.55, 1.0))\n )\n ))\n\n fixed_joint = urdf.Joint('fixed_backboard',\n urdf.Parent('base_link'),\n urdf.Child('back_link'),\n urdf.Origin(xyz=(0, 0, height/2.0 + 0.05), rpy=(0, 0, 0)),\n type='fixed')\n\n self._links.append(base_link)\n self._links.append(back_link)\n self._joints.append(fixed_joint)", "def petri_to_skeleton(net: PetriNet) -> DiGraph:\n skeleton = DiGraph()\n inserted_transitions = set()\n for place in net.places:\n for arcIn in place.in_arcs:\n for arcOut in place.out_arcs:\n skeleton.add_edge(arcIn.source, arcOut.target)\n inserted_transitions.add(arcIn.source.label)\n inserted_transitions.add(arcOut.target.label)\n\n # add all transitions that have no places attached to it\n # (floating transitions)\n for t in net.transitions:\n if t.label not in inserted_transitions:\n skeleton.add_edge(t, t)\n\n return skeleton", "def createnodes(self):\n i = 0\n for j in range(0, self.width):\n for k in range(0, self.height):\n n = node.Node(j, k, i)\n self.nodes[int(n.guid)] = n\n i += 1\n for nds in self.nodes:\n self.nodes[nds].walkable = True\n self.nodes[nds].neighbors = helpers.get_neighbors(self.nodes[nds], self)", "def __link_nodes(self):\n def __link_north(node):\n if node.x is 0:\n return\n\n pos = (node.x - 1, node.y)\n step = 0\n\n while self.maze.array[pos[0]][pos[1]] is not 0:\n step = step + 1\n\n if str(pos[0]) + str(pos[1]) in self.graph:\n node.connect(self.graph[str(pos[0]) + str(pos[1])], step)\n break\n pos = (pos[0] - 1, pos[1])\n\n def __link_south(node):\n if node.x is self.maze.height - 1:\n return\n\n try:\n pos = (node.x + 1, node.y)\n step = 0\n\n while self.maze.array[pos[0]][pos[1]] is not 0:\n step = step + 1\n\n if str(pos[0]) + str(pos[1]) in self.graph:\n node.connect(self.graph[str(pos[0]) + str(pos[1])], step)\n break\n pos = (pos[0] + 1, pos[1])\n except IndexError:\n return\n\n def __link_east(node):\n pos = (node.x, node.y + 1)\n step = 0\n\n while self.maze.array[pos[0]][pos[1]] is not 0:\n step = step + 1\n\n if str(pos[0]) + str(pos[1]) in self.graph:\n node.connect(self.graph[str(pos[0]) + str(pos[1])], step)\n break\n pos = (pos[0], pos[1] + 1)\n\n def __link_west(node):\n pos = (node.x, node.y - 1)\n step = 0\n\n while self.maze.array[pos[0]][pos[1]] is not 0:\n step = step + 1\n\n if str(pos[0]) + str(pos[1]) in self.graph:\n node.connect(self.graph[str(pos[0]) + str(pos[1])], step)\n break\n pos = (pos[0], pos[1] - 1)\n\n for node in self.graph.values():\n __link_south(node)\n __link_north(node)\n __link_east(node)\n __link_west(node)", "def connect_stim_syn_one_one(spiketrains, synlist, threshold=-20, delay=0, weight=1):\n netcons = []\n stimtimes = []\n stimlist = []\n if not isinstance(threshold, collections.Iterable):\n threhsold = [threshold] * len(spiketrains)\n if not isinstance(delay, collections.Iterable):\n delay = [delay] * len(spiketrains)\n if not isinstance(weight, collections.Iterable):\n weight = [weight] * len(spiketrains)\n \n for st, syn, th, d, wt in zip(spiketrains, synlist, threhsold,\n delay, weight):\n stimvec = h.Vector()\n # stimvec.append(*st)\n stimvec = stimvec.from_python(st)\n stimtimes.append(stimvec)\n vecstim = h.VecStim()\n vecstim.play(stimvec)\n stimlist.append(vecstim)\n netcons.append(h.NetCon(vecstim, syn, th, d, wt))\n return netcons, stimlist, stimtimes", "def draw_skeleton(self):\n raise NotImplementedError", "def attach_nodes(self):\n\n # Define variables:\n attached_node = self.attached_node.text()\n clone_node = self.clone_node.isChecked()\n\n # Attach node(s):\n for node in self.selected:\n node.setSelected(False)\n\n try:\n if clone_node is False:\n for node in self.selected:\n node.setSelected(True)\n created_node = nuke.createNode(attached_node.title())\n self.created_nodes.append(created_node) # add to list\n node.setSelected(False)\n created_node.setSelected(False)\n\n # Clone node:\n if clone_node is True:\n if self.selected[0]:\n self.selected[0].setSelected(True)\n created_node = nuke.createNode(attached_node.title())\n self.created_nodes.append(created_node) # add to list\n created_node.setSelected(False)\n\n del self.selected[0]\n\n for node in self.selected:\n node.setSelected(True)\n nuke.clone(created_node)\n node.setSelected(False)\n created_node.setSelected(False)\n\n self.created_nodes[0].setSelected(True)\n\n except RuntimeError:\n nuke.message('The name you entered was not a node name.'\n ' Please enter a node name.')\n\n # Show Panel 2 if Panel 1 is successful:\n if self.created_nodes:\n self.panel_two = ANSetKnobs()\n self.panel_two.show()", "def initNetworkWeights(self, sigma0=None, init_input_layer=True):\n from scipy.cluster.vq import kmeans2\n from scipy.spatial.distance import cdist\n \n layers = [i for i in self.module.modulesSorted \n if not isinstance(i, BiasUnit)]\n\n t = []\n for k in range(self.ds.getLength()):\n t.append(self.ds.getSample(k)[1])\n t = np.array(t)\n\n if not sigma0:\n sigma1 = 1.0/np.sqrt(self.module.indim+1)\n sigma2 = 1.0/np.sqrt(layers[-2].indim+1)\n else:\n sigma1 = sigma0\n sigma2 = sigma0\n\n # init connection weights\n if init_input_layer:\n conn_i_h = self.module.connections[layers[0]][0]\n size_i = conn_i_h.paramdim\n conn_i_h.params[:] = np.random.normal(loc=0.0, scale = sigma1, \n size=size_i)\n conn_h_o = self.module.connections[layers[-2]][0] \n size_h = conn_h_o.paramdim\n conn_h_o.params[:] = np.random.normal(loc=0.0, scale = sigma2,\n size=size_h)\n \n ##############################################\n # init biases (adapted from netlab, gmminit.m)\n ##############################################\n # sort bias connections\n bias_unit = [i for i in self.module.modulesSorted \n if isinstance(i, BiasUnit)][0]\n biascons = [c for c in self.module.connections[bias_unit]] \n biascons.sort(key=lambda c: layers.index(c.outmod))\n \n if init_input_layer:\n # first layer biases are only initialized if input layer weights are\n # initialized as well\n biascons[0].params[:] = np.random.normal(loc=0.0, scale = sigma1,\n size=biascons[0].paramdim)\n\n # init output layer biases using the kmeans clustering algorithm\n conn_b_o = biascons[-1]\n \n # added minit=\"points\", since this seems to give better results, \n # since minit=\"random\" sometimes gives centroids outside the \n # target range\n [centroid, label] = kmeans2(t, self.module.M, minit='points')\n cluster_sizes = np.maximum(np.bincount(label, minlength=self.module.M), 1) # avoid empty clusters\n alpha = cluster_sizes.astype('float64') / np.sum(cluster_sizes)\n if (self.module.M > 1):\n # estimate variance from the distance to the nearest centre\n sigma = cdist(centroid, centroid)\n sigma = np.min(sigma + np.diag(np.diag(np.ones(sigma.shape))) * 1000, 1)\n sigma = np.maximum(sigma, np.finfo(float).eps) # avoid underflow\n else:\n # only one centre: take average variance\n sigma = [np.mean(np.diag([np.var(t)]))]\n # set biases (adapted from netlab, mdninit.m)\n print \"Initial target value distribution\"\n print \"Alpha:\"\n print alpha\n print \"Sigma:\"\n print sigma\n print \"Centers:\"\n print centroid\n conn_b_o.params[:] = np.reshape([alpha, np.log(sigma), centroid.flatten()], \n conn_b_o.params.shape)", "def apply_stimulation (synapse_dict, simulation_parameters, logger):\n \n if simulation_parameters['print_mode']:\n print(\"Applying stimulation\")\n logger.info('Applying stimulation')\n \n stimulation_spikes = calculate_spike_times (synapse_dict, simulation_parameters, logger)\n simulation_parameters['spikes'] = stimulation_spikes \n\n ncons = {}\n for cell_type in synapse_dict['light']:\n ncons[cell_type] = {}\n for cell in synapse_dict['light'][cell_type]:\n ncons[cell_type][cell] =[NEURON.NetCon(None,syn) for syn in synapse_dict['light'][cell_type][cell]['BC_syn']]\n for ncon in ncons[cell_type][cell]: \n ncon.weight[0] = simulation_parameters['syn_weight'] \n\n def set_sp_times():\n for cell_type in synapse_dict['light']:\n for cell in synapse_dict['light'][cell_type]:\n for i, _ in enumerate(synapse_dict['light'][cell_type][cell]['BC_syn']):\n for sp_time in stimulation_spikes[cell_type][cell][i]:\n ncons[cell_type][cell][i].event(sp_time)\n\n fh = NEURON.FInitializeHandler(set_sp_times) #<- Here I tell neuron to run the function above before it start the simulation \n # All the spike times of the netcons are deleted before the simulation start\n # so I need to tell neuron to add these times after all the netcons are reseted \n \n simulation_parameters['FinalizeHandler'] = fh\n\n if simulation_parameters['print_mode']:\n print(\"Stimulation applied\")\n logger.info('Stimulation applied')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
calls coach user api
def http_request_coachuser(self, data): response_data_json = self._http_request( method='POST', url_suffix=URL_SUFFIX_COACH_USER, json_data=data, data=data, ) return response_data_json
[ "def yo_user(self, username, **kwargs):\n username = username.upper()\n youser_data = {\"api_token\": self.token, \"username\": username}\n for kw in kwargs:\n youser_data.update( { kw:kwargs[kw] } )\n youser_url = \"https://api.justyo.co/yo/\"\n youser = urlfetch.fetch(url=youser_url, \n payload=json.dumps(youser_data), \n method=urlfetch.POST,\n headers={'content-type': 'application/json'})\n if youser.status_code == 200:\n return True\n else:\n raise Exception(youser.status_code)", "def _do_api_call(self, session, url):\n headers = {\n \"X-OESP-Token\": session.oespToken,\n \"X-OESP-Username\": self.username,\n }\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n return response.json()\n else:\n raise ZiggoNextConnectionError(\"API call failed: \" + str(response.status_code))", "def test_api_v1_users_get(self):\n pass", "def user_data(username: str):\n url = f\"https://cas.gmri.org/api/cas/v1/users/?username={username}\"\n\n response = session.get(url)\n\n if response.status_code != 200:\n raise Error404\n return response.json()[0]", "def get_user_info(self):\n url = self.BASE_URL +'users/1?api_key=' + self.api_key\n req = requests.get(url)\n return req", "def get_me(self):\n\n # Informationen abholen\n url = \"{}{}/getMe\".format(API_URL, self.token)\n r = requests.get(url)\n result = r.json()\n\n # Abfrage auswerten\n self.result = result\n if result[\"ok\"]:\n self.botuser = result[\"result\"]\n return True\n else:\n print(\"Abfrage fehlgeschlagen, Token ok?\")\n return False", "def apic_login(self):\n pass", "def user(ctx):\n pass", "def _call_ambari_api(self, url):\n\n ambari_info = self.ambari_info\n\n response = requests.get(\n url\n , auth=(ambari_info['AMBARI_USER'], ambari_info['AMBARI_PASS'])\n , headers={'X-Requested-By': 'ambari'}\n , verify=False\n )\n\n if response.status_code != requests.codes.ok:\n return {}\n return response.json()", "def test_search_users(self):\n i = self.instance.search_users(\"tom repos:>42 followers:>1000\")\n self.get_next(i)\n\n self.session.get.assert_called_once_with(\n url_for(\"search/users\"),\n params={\"per_page\": 100, \"q\": \"tom repos:>42 followers:>1000\"},\n headers={},\n )", "def perform(self, client, url):\n (resp, content) = client.request(url, 'GET')\n if resp['status'] != '200':\n self.publish('oauth_error', url, resp['status'], content)\n else:\n self.publish('oauth_success', url, content)", "def view_users():\n\n users = []\n for user in crud.get_users():\n print(user.user_id)\n lessons = []\n for lesson in user.lessons:\n lessons.append(lesson.as_dict()) # lessons = dictionary of each lesson\n user_lessons = user.as_dict()\n user_lessons['lessons'] = lessons\n users.append(user_lessons)\n print(f'{users} from server.py /api/users endpoint')\n return {'users': users}", "async def get_user(request, next_id):\n log_request(request)\n head_block = await get_request_block(request)\n conn = await create_connection()\n user_resource = await users_query.fetch_user_resource(\n conn, escape_user_input(next_id)\n )\n conn.close()\n\n return await create_response(conn, request.url, user_resource, head_block)", "def rest_team_members(request, league_url, team_url):\n\n # Check for valid data \n try:\n league_name = decode_url(league_url)\n league = League.objects.get(name=league_name)\n\n team_name = decode_url(team_url)\n team = league.team_set.get(name=team_name)\n\n players = team.player_set.all()\n\n data = []\n for player in players:\n data.append(extract_player(player.user))\n except:\n data = None\n\n return HttpResponse(data, mimetype='application/json')", "def user():\n username = request.args.get('username')\n\n if username is None or username == '':\n username = auth_username()\n\n if username is None:\n return bad_json_response(\"Bad request: Missing parameter 'username'.\")\n\n # Export used details of the user.\n user_details = users.export(\n 'username', 'firstname', 'lastname', 'uploads_id',\n 'location', 'study', 'bio', 'creation_date',\n 'last_edit_date', 'relationship_status', 'phone_number',\n username=username\n )\n\n if not user_details:\n return bad_json_response('User not found')\n\n # Check what the status of the friendship is between the users.\n friend_status = is_friend(username)\n if username == get_jwt_identity():\n friend_status = 1\n\n # Get image.\n up_id = user_details[0][3]\n imageurl = '../static/images/default.jpg'\n if friend_status == 1 and uploads.exists(id=up_id):\n filename = uploads.export_one('filename', id=up_id)\n imageurl = get_own_ip() + 'file/{}/{}'.format(up_id, filename)\n\n # Basic information visible if not friends.\n basic_info = {\n 'username': user_details[0][0],\n 'friend': friend_status,\n 'image_url': imageurl\n }\n\n if friend_status != 1:\n return good_json_response(basic_info)\n\n # All information visible if friends.\n sensitive_info = {\n 'firstname': user_details[0][1],\n 'lastname': user_details[0][2],\n 'location': user_details[0][4],\n 'study': user_details[0][5],\n 'bio': user_details[0][6],\n 'creation_date': str(user_details[0][7]),\n 'last_edit_date': str(user_details[0][8]),\n 'relationship_status': user_details[0][9],\n 'phone_number': user_details[0][10]\n\n }\n\n return good_json_response({**basic_info, **sensitive_info})", "async def async_call_homegraph_api(self, url, data):\n session = async_get_clientsession(self.hass)\n\n async def _call():\n headers = {\n \"Authorization\": f\"Bearer {self._access_token}\",\n \"X-GFE-SSL\": \"yes\",\n }\n async with session.post(url, headers=headers, json=data) as res:\n _LOGGER.debug(\n \"Response on %s with data %s was %s\", url, data, await res.text()\n )\n res.raise_for_status()\n return res.status\n\n try:\n await self._async_update_token()\n try:\n return await _call()\n except ClientResponseError as error:\n if error.status == HTTPStatus.UNAUTHORIZED:\n _LOGGER.warning(\n \"Request for %s unauthorized, renewing token and retrying\", url\n )\n await self._async_update_token(True)\n return await _call()\n raise\n except ClientResponseError as error:\n _LOGGER.error(\"Request for %s failed: %d\", url, error.status)\n return error.status\n except (asyncio.TimeoutError, ClientError):\n _LOGGER.error(\"Could not contact %s\", url)\n return HTTPStatus.INTERNAL_SERVER_ERROR", "def find_all_users(self):\n token = self.config.get('PEOPLE_GATEWAY_APP_TOKEN')\n headers = {'app_token': token}\n\n url = '%s/cit/api/v2/people' % self.config.get('PEOPLE_GATEWAY_HOST')\n\n logger.debug('Retreive all users')\n logger.debug('url = %s' %url)\n response = requests.get(url=url, headers=headers)\n\n logger.info('status %s' % response.status_code)\n\n return response.json()", "async def osu(self, ctx, player_id):\r\n osu_api_key = self.config.osukey\r\n async with aiohttp.ClientSession() as session:\r\n async with session.get(\"https://osu.ppy.sh/api/get_user\", params={\"k\": osu_api_key, \"u\": player_id}) as resp:\r\n resp.raise_for_status()\r\n payload = await resp.json()\r\n if len(payload) == 0:\r\n await ctx.send(f'cyka blyat! couldn\\'t find any user matching this name / ID.')\r\n else:\r\n user = payload[0]\r\n\r\n USERNAME = user[\"username\"]\r\n USERID = user[\"user_id\"]\r\n JOINDATE = user[\"join_date\"]\r\n PLAYCOUNT = user[\"playcount\"]\r\n PPRAW = user[\"pp_raw\"]\r\n PPRANK = user[\"pp_rank\"]\r\n COUNTRYRANK = user[\"pp_country_rank\"]\r\n LEVEL = user[\"level\"]\r\n COUNTRY = user[\"country\"]\r\n\r\n country = COUNTRY.lower()\r\n a0 = float(LEVEL)\r\n a1 = int(a0)\r\n b0 = float(PLAYCOUNT)\r\n b1 = int(b0)\r\n c0 = float(PPRAW)\r\n c1 = int(c0)\r\n\r\n userinfo=f\"\"\"Joined {JOINDATE}\r\nLevel: `{a1}`\r\nRanked games count: `{b1}`\r\nCountry: :flag_{country}:\"\"\"\r\n \r\n ranking=f\"\"\"Total performance points : `{c1}`\r\nGlobal ranking: `#{PPRANK}`\r\nCountry ranking: `#{COUNTRYRANK}`\"\"\"\r\n \r\n e = discord.Embed(title = f'{USERNAME}', description=f'ID: {USERID}', url = f'https://osu.ppy.sh/users/{USERID}', color = 0xFF69B4)\r\n e.set_author(name=\"osu! profile info\", icon_url='https://up.ppy.sh/files/osu-36.png')\r\n e.add_field(name='User info', value=userinfo)\r\n e.add_field(name='Ranking', value=ranking)\r\n e.set_thumbnail(url=f'https://a.ppy.sh/{USERID}')\r\n await ctx.send(embed = e)", "def test_get_participants_when_logged_in(\n client, access_token, add_participants\n):\n rv = client.get(\n '/participants/',\n headers={'Authorization': 'Bearer {}'.format(access_token)}\n )\n response = rv.get_json()\n assert rv.status_code == HTTPStatus.OK\n assert len(response[\"participants\"]) == 10" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sigmoid function for use with Numpy for CPU evaluation.
def sigmoid(x): return 1 / (1 + np.exp(-x))
[ "def sigmoid(X):\n\n pass", "def sigmoid(x):\n\t\n\t# Returning sigmoided array.\n\treturn 1 / (1 + np.exp(-x))", "def sigmoid_array(x): \n\treturn 1 / (1 + np.exp(-x))", "def sigmoid_numpy(x):\n\n s = 1/(1+np.exp(-x))\n\n return s", "def sigmoid(t):\n return 1.0 / (1.0 + np.exp(-t))", "def sigmoid(t):\n return (np.exp(t))/(1+(np.exp(t)))", "def sigmoid(t):\n def sig_elem(z):\n if z <= 0:\n return np.exp(z) / (np.exp(z) + 1)\n else:\n return 1 / (1 + np.exp(-z))\n return np.vectorize(sig_elem)(t)", "def ultra_fast_sigmoid(x):\n return T.nnet.ultra_fast_sigmoid(x)", "def inplace_sigmoid(X): \n \n Y = X.copy()\n X *= 0\n X += 1 / (1 + np.exp(1) ** -Y)", "def sigmoid(z):\n return ss.expit(z)", "def sigmoid(z):\r\n \r\n return 1.0 / (1.0 + np.exp(-1.0 * z))", "def inverse_sigmoid_numpy(x):\n return np.log(x / (1. - x))", "def sigmoid(data):\n for i in range(len(data)):\n data[i] = 1 / (1 + np.exp(-data[i]))", "def sigmoid(x, lb=0, ub=1):\n\n # since image data is too large, use approximate value to speed up program\n if isinstance(x, Matrix):\n arr = [[0 for col in range(x.shape[1])] for row in range(x.shape[0])]\n for row in range(x.shape[0]):\n for col in range(x.shape[1]):\n if x[row][col] > 5:\n arr[row][col] = ub\n elif x[row][col] < -5:\n arr[row][col] = lb\n else:\n arr[row][col] = (1. / (1. + exp(-x[row][col]))) * (ub - lb) + lb\n return Matrix(arr)\n return (1. / (1. + exp(-x))) * (ub - lb) + lb", "def sigmoid(input: Matrix):\n\n # Instantiate output as a matrix same dimensions as input\n # output = [ [0 for i in range(len(input))] for j in range(len(input[0])) ] \n output = Matrix(input.size())\n\n # Perform sigmoid on all elements in input matrix\n for x in range(input.height()):\n for y in range(input.width()):\n output[x][y] = 1 / (1 + math.exp(-1 * input[x][y])) \n\n return output", "def sigmoid(z_values):\n return 1 / (1 + np.exp(-z_values))", "def perf_sigmoid_derivative(x):\n # result = perf_sigmoid(x)\n # return result * (1 - result)\n return x * (1 - x)", "def sigmoid(self,z): # will be used for computing the Activation given by A=sigmoid(z)\n result = 1/(1+np.exp(-z))\n return result", "def hardsigmoid__default(g, self):\n return g.op('HardSigmoid', self, alpha_f=1 / 6)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DIOU nonmaximum suppression. diou = iou square of euclidian distance of box centers / square of diagonal of smallest enclosing bounding box
def diou_nms(dets, iou_thresh=None): iou_thresh = iou_thresh or 0.5 x1 = dets[:, 0] y1 = dets[:, 1] x2 = dets[:, 2] y2 = dets[:, 3] scores = dets[:, 4] areas = (x2 - x1 + 1) * (y2 - y1 + 1) order = scores.argsort()[::-1] center_x = (x1 + x2) / 2 center_y = (y1 + y2) / 2 keep = [] while order.size > 0: i = order[0] keep.append(i) xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) intersection = w * h iou = intersection / (areas[i] + areas[order[1:]] - intersection) smallest_enclosing_box_x1 = np.minimum(x1[i], x1[order[1:]]) smallest_enclosing_box_x2 = np.maximum(x2[i], x2[order[1:]]) smallest_enclosing_box_y1 = np.minimum(y1[i], y1[order[1:]]) smallest_enclosing_box_y2 = np.maximum(y2[i], y2[order[1:]]) square_of_the_diagonal = ( (smallest_enclosing_box_x2 - smallest_enclosing_box_x1)**2 + (smallest_enclosing_box_y2 - smallest_enclosing_box_y1)**2) square_of_center_distance = ((center_x[i] - center_x[order[1:]])**2 + (center_y[i] - center_y[order[1:]])**2) # Add 1e-10 for numerical stability. diou = iou - square_of_center_distance / (square_of_the_diagonal + 1e-10) inds = np.where(diou <= iou_thresh)[0] order = order[inds + 1] return dets[keep]
[ "def box_diou(boxes):\n # get box coordinate and area\n x = boxes[:, 0]\n y = boxes[:, 1]\n w = boxes[:, 2]\n h = boxes[:, 3]\n areas = w * h\n\n # check IoU\n inter_xmin = np.maximum(x[:-1], x[-1])\n inter_ymin = np.maximum(y[:-1], y[-1])\n inter_xmax = np.minimum(x[:-1] + w[:-1], x[-1] + w[-1])\n inter_ymax = np.minimum(y[:-1] + h[:-1], y[-1] + h[-1])\n\n inter_w = np.maximum(0.0, inter_xmax - inter_xmin + 1)\n inter_h = np.maximum(0.0, inter_ymax - inter_ymin + 1)\n\n inter = inter_w * inter_h\n iou = inter / (areas[:-1] + areas[-1] - inter)\n\n # box center distance\n x_center = x + w / 2\n y_center = y + h / 2\n center_distance = np.power(x_center[:-1] - x_center[-1], 2) + np.power(\n y_center[:-1] - y_center[-1], 2)\n\n # get enclosed area\n enclose_xmin = np.minimum(x[:-1], x[-1])\n enclose_ymin = np.minimum(y[:-1], y[-1])\n enclose_xmax = np.maximum(x[:-1] + w[:-1], x[-1] + w[-1])\n enclose_ymax = np.maximum(x[:-1] + w[:-1], x[-1] + w[-1])\n enclose_w = np.maximum(0.0, enclose_xmax - enclose_xmin + 1)\n enclose_h = np.maximum(0.0, enclose_ymax - enclose_ymin + 1)\n # get enclosed diagonal distance\n enclose_diagonal = np.power(enclose_w, 2) + np.power(enclose_h, 2)\n # calculate DIoU, add epsilon in denominator to avoid dividing by 0\n diou = iou - 1.0 * (center_distance) / (\n enclose_diagonal + np.finfo(float).eps)\n\n return diou", "def bbox_iou(main_box, aux_boxes):\n\n max_Xmin = np.maximum(main_box[0], aux_boxes[0,:]) # Valor máximo de los \"X min\"\n max_Ymin = np.maximum(main_box[1], aux_boxes[1,:]) # Valor máximo de los \"Y min\"\n min_Xmax = np.minimum(main_box[2], aux_boxes[2,:]) # Valor mínimo de los \"X max\"\n min_Ymax = np.minimum(main_box[3], aux_boxes[3,:]) # Valor mínimo de los \"Y max\"\n\n X_overlap = np.maximum(0, min_Xmax - max_Xmin) # Overlap de las cajas sobre el eje X. Valor mínimo de los \"X max\" - Valor máximo de los \"X min\". Si la resta < 0, el valor se trunca en 0\n Y_overlap = np.maximum(0, min_Ymax - max_Ymin) # Overlap de las cajas sobre el eje Y. Valor mínimo de los \"Y max\" - Valor máximo de los \"Y min\". Si la resta < 0, el valor se trunca en 0\n Intersection = X_overlap * Y_overlap # Intersection = Multiplicación de los overlaps en ambos ejes.\n\n area_main = (main_box[2] - main_box[0]) * (main_box[3] - main_box[1]) # Area de la caja principal\n area_aux = (aux_boxes[2,:] - aux_boxes[0,:]) * (aux_boxes[3,:] - aux_boxes[1,:]) # Area de las cajas auxiliares\n Union = area_main + area_aux - Intersection # Union = La suma de las areas de cada caja - la intersección (Ya que de lo contrario la región de la intersección se incluiría dos veces.)\n\n return (Intersection/Union).astype(float) # Se retorna el IOU (Intersection/Union) en tipo float", "def iou(box, clusters):\r\n x = np.minimum(clusters[:, 0], box[0])\r\n y = np.minimum(clusters[:, 1], box[1])\r\n if np.count_nonzero(x == 0) > 0 or np.count_nonzero(y == 0) > 0:\r\n raise ValueError(\"Box has no area\")\r\n\r\n intersection = x * y\r\n box_area = box[0] * box[1]\r\n cluster_area = clusters[:, 0] * clusters[:, 1]\r\n\r\n iou_ = np.true_divide(intersection, box_area + cluster_area - intersection + 1e-10)\r\n # iou_ = intersection / (box_area + cluster_area - intersection + 1e-10)\r\n\r\n return iou_", "def test_disjoint_iou(self):\n a = bounding_box.BoundingBox(\n bounding_box.Point(5, 10), bounding_box.Size(5, 20)\n )\n b = bounding_box.BoundingBox(\n bounding_box.Point(15, 5), bounding_box.Size(15, 20)\n )\n self.assertAlmostEqual(bounding_box.calculate_iou(a, b), 0.0)", "def marginal_iou(boxes):\n inter = intersection(boxes, boxes)\n mask = 1 - tf.eye(tf.shape(inter)[0], dtype=tf.float32)\n inter = inter * mask\n are = area(boxes)\n # Fix bug of remove both boxes when two boxes has high intersection with each other\n # Fix this bug is under the help of Wu Jiahong.\n # max_inter = tf.reduce_max(inter, axis=0)\n # margin_iou = max_inter / (1e-8 + are)\n # keep smaller box and remove bigger one. Be careful about same boxes,\n # because both boxes will be kept when they exactly same.\n margin_iou = inter / (1e-8 + are)\n margin_iou_tran = tf.transpose(margin_iou)\n mk = tf.greater(margin_iou, margin_iou_tran)\n margin_iou = margin_iou * tf.cast(mk, tf.float32)\n margin_iou = tf.reduce_max(margin_iou, axis=0)\n return margin_iou", "def calc_iou(prediction_bbox, gt_bbox):\n iou = 0\n #np.maxmium(prediction_bbox[:,0],gt_bbox[:,0])\n if prediction_bbox[0]>gt_bbox[0]:\n xmax = gt_bbox[0]\n else:\n xmax = prediction_bbox[0]\n if prediction_bbox[2]>gt_bbox[2]:\n xmin = gt_bbox[2]\n else:\n xmin = prediction_bbox[2]\n if prediction_bbox[1]<gt_bbox[1]:\n xmax = gt_bbox[1]\n else:\n xmax = prediction_bbox[1]\n if prediction_bbox[3]>gt_bbox[3]:\n xmin = gt_bbox[3]\n else:\n xmin = prediction_bbox[3]\n \n if xmax>xmin:\n return 0\n if ymax>ymin:\n return 0\n prebboxarea = (prediction_bbox[2] - prediction_bbox[0])*(prediction_bbox[3] - prediction_bbox[1])\n gtbboxarea = (gt_bbox[2] - gt_bbox[0])*(gt_bbox[3] - gt_bbox[1])\n #print(\"prebbox:(%d,%d)to(%d,%d), area:%d\" %(prediction_bbox[0],prediction_bbox[1],prediction_bbox[2],prediction_bbox[3],prebboxarea))\n #print(\"gtbbox :(%d,%d)to(%d,%d), area:%d\" %(gt_bbox[0],gt_bbox[1],gt_bbox[2],gt_bbox[3],gtbboxarea))\n\n iou = (xmin-xmax)*(ymin-ymax)\n iou = iou/(prebboxarea+gtbboxarea-iou)\n if iou < 0:\n raise Exception(\"iou is Negtive\")\n iou = 0\n #print(\"iou:\",iou)\n return iou", "def compute_iou(box, boxes, box_area, boxes_area):\n # Calculate intersection areas\n y1 = np.maximum(box[0], boxes[:, 0])\n y2 = np.minimum(box[2], boxes[:, 2])\n x1 = np.maximum(box[1], boxes[:, 1])\n x2 = np.minimum(box[3], boxes[:, 3])\n intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)\n union = box_area + boxes_area[:] - intersection[:]\n iou = intersection / union\n return iou", "def DIoU(y_pred_box, y_true_box):\n y_pred_box_min = y_pred_box[..., :2]\n y_pred_box_max = y_pred_box[..., 2:4]\n y_pred_box_wh = y_pred_box_max - y_pred_box_min\n y_pred_box_center = (y_pred_box_min + y_pred_box_max) / 2.\n\n y_true_box_min = y_true_box[..., :2]\n y_true_box_max = y_true_box[..., 2:4]\n y_true_box_wh = y_true_box_max - y_true_box_min\n y_true_box_center = (y_true_box_min + y_true_box_max) / 2.\n\n intersect_min = tf.maximum(y_pred_box_min, y_true_box_min)\n intersect_max = tf.minimum(y_pred_box_max, y_true_box_max)\n intersect_wh = tf.maximum(intersect_max - intersect_min, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n y_true_box_area = y_true_box_wh[..., 0] * y_true_box_wh[..., 1]\n y_pred_box_area = y_pred_box_wh[..., 0] * y_pred_box_wh[..., 1]\n union_area = y_true_box_area + y_pred_box_area - intersect_area\n # calculate IoU, add epsilon in denominator to avoid dividing by 0\n iou = intersect_area / tf.maximum(union_area, EPS)\n\n # box center distance\n center_distance = tf.reduce_sum(tf.square(y_pred_box_center - y_true_box_center), axis=-1)\n # get enclosed area\n enclose_min = tf.minimum(y_pred_box_min, y_true_box_min)\n enclose_max = tf.maximum(y_pred_box_max, y_true_box_max)\n enclose_wh = tf.maximum(enclose_max - enclose_min, 0.0)\n # get enclosed diagonal distance\n enclose_diagonal = tf.reduce_sum(tf.square(enclose_wh), axis=-1)\n # calculate DIoU, add epsilon in denominator to avoid dividing by 0\n diou = iou - center_distance / tf.maximum(enclose_diagonal, EPS)\n\n return diou", "def shortdEdx( xzye ):\n \n upsPointIndex = np.argmin( xzye[:,1] )\n\n xzye5cm = []\n for i in range(len(xzye)):\n if np.linalg.norm( xzye[i][:3] - xzye[upsPointIndex][:3] ) <= 5:\n xzye5cm.append( xzye[i] )\n\n try:\n xzye5cm = np.asarray(xzye5cm)\n dEdx = sum(xzye5cm[:,3])*1000/5\n return dEdx\n except:\n return 0", "def _virial_overdensity(self):\n Om_mz = self.cosmology._Omega_m()\n x = Om_mz-1.;\n Dv0 = 18.*pow(np.pi,2);\n Dv = (Dv0+82.*x-39.*pow(x,2))/Om_mz;\n\n return Dv;", "def consistance_noeuds(self):\n\n for c in self.contraintes:\n if c.dimension() == 1:\n # /!\\ iterer sur domaine[:], sinon on ne peut pas supprimer d'elements\n for v in c.variables[0].domaine[:]:\n if not c.est_valide(v):\n c.variables[0].domaine.remove(v)\n c.variables[0].label.remove(v)", "def get_Hu():\n \n ue = np.zeros((nx+1,ny)) \n uw = np.zeros((nx+1,ny))\n un = np.zeros((nx+1,ny))\n us = np.zeros((nx+1,ny))\n vn = np.zeros((nx+1,ny))\n vs = np.zeros((nx+1,ny))\n τxxe = np.zeros((nx+1,ny))\n τxxw = np.zeros((nx+1,ny))\n τxyn = np.zeros((nx+1,ny))\n τxys = np.zeros((nx+1,ny))\n Hu = np.zeros((nx+1,ny))\n \n i = np.arange(1,nx) # u-cell centers in domain interior\n \n ue[i,:] = (u[i+1,:] + u[i,:])/2\n uw[i,:] = (u[i,:] + u[i-1,:])/2\n \n j = np.arange(0,ny-1)\n un[IJ(i,j)] = (u[IJ(i,j+1)] + u[IJ(i,j)])/2\n un[i,ny-1] = ubc_t\n j = np.arange(1,ny)\n us[IJ(i,j)] = (u[IJ(i,j)] + u[IJ(i,j-1)])/2\n us[i,0] = ubc_b\n \n j = np.arange(0,ny)\n vn[IJ(i,j)] = (v[IJ(i-1,j+1)]+v[IJ(i,j+1)])/2\n vs[IJ(i,j)] = (v[IJ(i-1,j)] +v[IJ(i,j)]) /2\n \n τxxe[i,:] = -2*ν*(u[i+1,:] - u[i,:]) /Δx\n τxxw[i,:] = -2*ν*(u[i,:] - u[i-1,:])/Δx\n \n j = np.arange(0,ny-1)\n τxyn[IJ(i,j)] = -ν*(u[IJ(i,j+1)]-u[IJ(i,j)])/Δy - ν*(v[IJ(i,j+1)]-v[IJ(i-1,j+1)])/Δx\n τxyn[i,ny-1] = -ν*(ubc_t-u[i,ny-1])/(Δy/2) - ν*(v[i,ny]-v[i-1,ny])/Δx \n \n j = np.arange(1,ny)\n τxys[IJ(i,j)] = -ν*(u[IJ(i,j)]-u[IJ(i,j-1)])/Δy - ν*(v[IJ(i,j)]-v[IJ(i-1,j)])/Δx\n τxys[i,0] = -ν*(u[i,0]-ubc_b)/(Δy/2) - ν*(v[i,0]-v[i-1,0])/Δx\n \n Hu[i,:] = -((ue[i,:]*ue[i,:] - uw[i,:]*uw[i,:])/Δx + (un[i,:]*vn[i,:] - us[i,:]*vs[i,:])/Δy) \\\n -((τxxe[i,:] - τxxw[i,:])/Δx + (τxyn[i,:] - τxys[i,:])/Δy)\n \n return Hu", "def test_iou(self):\n a = bounding_box.BoundingBox(\n bounding_box.Point(5, 10), bounding_box.Size(5, 20)\n )\n b = bounding_box.BoundingBox(\n bounding_box.Point(7, 5), bounding_box.Size(15, 20)\n )\n self.assertAlmostEqual(bounding_box.calculate_iou(a, b), 0.12676056338)", "def test502(self):\n npix=17\n res=sdgrid(infiles=self.rawfile,gridfunction='GAUSS',npix=npix,cell='20arcsec',outfile=self.outfile,plot=False)\n self.assertEqual(res,None,\n msg='Any error occurred during gridding')\n self.getdata()\n \n # default width for GAUSS is 4\n width=3\n npol=2\n nonzeropix=self.data.nonzero()[1]\n nonzeropix_ref = numpy.array([218, 219, 220, 221, 222, 223, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 354, 355, 356, 357, 358, 359])\n #nonzeropix_ref=self.generateNonzeroPix(npol,npix,width)\n self.nonzero(nonzeropix_ref,nonzeropix)\n\n refdata = [1.37290766e-03, 1.37290757e-04, 3.63217224e-03,\n 3.63217230e-04, 1.37290766e-03, 1.37290757e-04,\n 1.37290766e-03, 1.37290757e-04, 2.71596070e-02,\n 2.71596084e-03, 7.29541257e-02, 7.29541294e-03,\n 2.71596070e-02, 2.71596084e-03, 1.37290766e-03,\n 1.37290757e-04, 3.63217224e-03, 3.63217230e-04,\n 7.29541257e-02, 7.29541294e-03, 1.98309869e-01,\n 1.98309869e-02, 7.29541257e-02, 7.29541294e-03,\n 3.63217224e-03, 3.63217230e-04, 1.37290766e-03,\n 1.37290757e-04, 2.71596070e-02, 2.71596084e-03,\n 7.29541257e-02, 7.29541294e-03, 2.71596070e-02,\n 2.71596084e-03, 1.37290766e-03, 1.37290757e-04,\n 1.37290766e-03, 1.37290757e-04, 3.63217224e-03,\n 3.63217230e-04, 1.37290766e-03, 1.37290757e-04]\n nonzerodata=numpy.take(self.data,nonzeropix,axis=1).squeeze()\n for i in xrange(len(nonzerodata)):\n self.check(refdata[i],nonzerodata[i])", "def non_max_suppression(image, direction):\n\n # get the height and width of the image\n height, width = image.shape[:2]\n\n # generate the output matrix of zeros\n output = np.zeros((height, width))\n\n # iterate through the rows and cols of the edge matrix and\n # compare to all neighboring pixels to determine if the value\n # will be preserved or suppressed, if not set in loop, will \n # be 0\n for row in xrange(1,height-1):\n for col in xrange(1,width-1):\n # get the direction value at the edge position\n theta = angle_buckets(direction[row, col])\n\n # check if 0 degree bucket\n if theta == 0:\n # for 0 degrees the point will be considered to be on the edge \n # if its gradient magnitude is greater than the magnitudes at pixels \n # in the east and west directions\n if (image[row,col] >= image[row, col-1]):\n if (image[row,col] >= image[row, col+1]):\n output[row,col] = image[row,col]\n \n # check if 90 degree bucket\n elif theta == 90:\n # for 90 degrees the point will be considered to be on the edge if its \n # gradient magnitude is greater than the magnitudes at pixels in the \n # north and south directions\n if (image[row,col] >= image[row-1, col]):\n if (image[row,col] >= image[row+1, col]):\n output[row,col] = image[row,col]\n\n # check if 135 degree bucket \n elif theta == 135:\n # for 135 degrees the point will be considered to be on the edge if its \n # gradient magnitude is greater than the magnitudes at pixels in the \n # north west and south-east directions\n if (image[row,col] >= image[row-1, col-1]):\n if (image[row,col] >= image[row+1, col+1]):\n output[row,col] = image[row,col]\n\n # check if 45 degree bucket \n elif theta == 45:\n # for 45 degrees the point will be considered to be on the edge if its \n # gradient magnitude is greater than the magnitudes at pixels in the \n # north east and south west directions\n if (image[row,col] >= image[row-1, col+1]):\n if (image[row,col] >= image[row+1, col-1]):\n output[row,col] = image[row,col]\n \n # write the output to file\n out = OUT_FOLDER+\"/suppressed.jpg\"\n cv2.imwrite(out, output)\n\n # return the edge matrix\n return output", "def test500(self):\n npix=17\n res=sdgrid(infiles=self.rawfile,gridfunction='BOX',npix=npix,cell='20arcsec',outfile=self.outfile,plot=False)\n self.assertEqual(res,None,\n msg='Any error occurred during gridding')\n self.getdata()\n\n # center is only nonzero pixel\n npol=2\n width=1\n nonzeropix_ref=self.generateNonzeroPix(npol,npix,width)\n nonzeropix=self.data.nonzero()[1]\n self.nonzero(nonzeropix_ref,nonzeropix)\n\n pol0=self.data[0,nonzeropix[0]]\n #self.check(0.625,pol0)\n #self.check(0.5,pol0)\n self.check(0.6666666667,pol0)\n \n pol1=self.data[0,nonzeropix[1]]\n #self.check(0.0625,pol1)\n #self.check(0.05,pol1)\n self.check(0.06666666667,pol1)", "def test_diagonalizing_gates_non_overlapping(self):\n diag_op = ValidOp(qml.PauliZ(wires=0), qml.Identity(wires=1))\n assert diag_op.diagonalizing_gates() == []", "def area_superfecie_cubo(c):\n return 6*(c**2)", "def get_iou(ref_rect, rects):\n\n x1 = np.maximum(ref_rect[0], rects[:, 0])\n y1 = np.maximum(ref_rect[1], rects[:, 1])\n x2 = np.minimum(ref_rect[2], rects[:, 2])\n y2 = np.minimum(ref_rect[3], rects[:, 3])\n\n w = np.maximum(0, x2 - x1 + 1)\n h = np.maximum(0, y2 - y1 + 1)\n inter_area = w*h\n rects_area = (rects[:,2] - rects[:, 0] + 1)*(rects[:,3] - rects[:, 1] + 1)\n ref_rect_area = (ref_rect[2] - ref_rect[0] + 1)*(ref_rect[3] - ref_rect[1] + 1)\n iou = inter_area/(rects_area + ref_rect_area - inter_area)\n\n return iou", "def compute_iou(box, proposal):\r\n\tlen_proposal = np.shape(proposal)[0]\r\n\tIoU = np.empty([len_proposal,1])\r\n\tfor i in range(len_proposal):\r\n\t\txA = max(box[0], proposal[i,0])\r\n\t\tyA = max(box[1], proposal[i,1])\r\n\t\txB = min(box[2], proposal[i,2])\r\n\t\tyB = min(box[3], proposal[i,3])\r\n\r\n\t\tif xB<xA or yB<yA:\r\n\t\t\tIoU[i,0]=0\r\n\t\telse:\r\n\t\t\tarea_I = (xB - xA) * (yB - yA)\r\n\t\t\tarea1 = (box[2] - box[0])*(box[3] - box[1])\r\n\t\t\tarea2 = (proposal[i,2] - proposal[i,0])*(proposal[i,3] - proposal[i,1])\r\n\t\t\tIoU[i,0] = area_I/float(area1 + area2 - area_I)\r\n\treturn IoU" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates detections with model outputs and anchors.
def _generate_detections(cls_outputs, box_outputs, anchor_boxes, indices, classes, image_id, image_scale, num_classes, max_boxes_to_draw, nms_configs): anchor_boxes = anchor_boxes[indices, :] scores = sigmoid(cls_outputs) # apply bounding box regression to anchors boxes = decode_box_outputs_np( box_outputs.swapaxes(0, 1), anchor_boxes.swapaxes(0, 1)) # run class-wise nms return per_class_nms(boxes, scores, classes, image_id, image_scale, num_classes, max_boxes_to_draw, nms_configs)
[ "def generate_detections(det_model,seq_dir,conf_thresh,bs,imdim):\n\n # get model predictor object \n model,predictor = load_model(float(conf_thresh),det_model)\n detector = Detector(model,predictor)\n\n # detection list\n det_list = []\n #print(\"Processing %s\" % sequence)\n image_filenames = sorted(glob.glob(seq_dir+\"/*.jpg\"))\n\n # frame pointer\n pointer = 0\n\n while pointer <len(image_filenames):\n if pointer+bs>len(image_filenames):\n bs = len(image_filenames)-pointer\n\n #slice image filenames to batch\n batch = image_filenames[pointer:pointer+bs]\n #get system time before prediction\n starttime = datetime.datetime.now()\n #predict on batch\n detector.predict_on_batch(batch,imdim)\n #compute frames / seconds fp/s\n sec = (datetime.datetime.now()-starttime).total_seconds()\n fps = len(batch) / sec\n\n print(\"generate detections in frame %05d/%05d \\\n %01f [fp/s]\" % (pointer,len(image_filenames),\n fps),end=\"\\r\")\n pointer+=bs\n detector.outputs_instances_to_cpu()\n '''\n for frame_idx,output in enumerate(detector.outputs_cpu):\n for box_pred,score_pred,classes_pred in \\\n zip(output[\"pred_boxes\"],output[\"scores\"],output[\"pred_classes\"]):\n det_list.append([frame_idx,-1,round(box_pred[0]),round(box_pred[1]),\n round(box_pred[2]),round(box_pred[3]),1])\n '''\n return detector.outputs_cpu", "def main():\n MODEL_URL = \"https://github.com/robmarkcole/object-detection-app/raw/master/model/MobileNetSSD_deploy.caffemodel\" # noqa: E501\n MODEL_LOCAL_PATH = HERE / \"./models/MobileNetSSD_deploy.caffemodel\"\n PROTOTXT_URL = \"https://github.com/robmarkcole/object-detection-app/raw/master/model/MobileNetSSD_deploy.prototxt.txt\" # noqa: E501\n PROTOTXT_LOCAL_PATH = HERE / \"./models/MobileNetSSD_deploy.prototxt.txt\"\n CLASSES_FILE = HERE / \"./info/classes.txt\" \n \n with open(CLASSES_FILE) as f:\n CLASSES = [line.rstrip() for line in f]\n \n COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))\n\n download_file(MODEL_URL, MODEL_LOCAL_PATH, expected_size=23147564)\n download_file(PROTOTXT_URL, PROTOTXT_LOCAL_PATH, expected_size=29353)\n\n DEFAULT_CONFIDENCE_THRESHOLD = 0.5\n\n class Detection(NamedTuple):\n name: str\n prob: float\n\n class MobileNetSSDVideoTransformer(VideoTransformerBase):\n confidence_threshold: float\n result_queue: \"queue.Queue[List[Detection]]\"\n\n def __init__(self) -> None:\n self._net = cv2.dnn.readNetFromCaffe(\n str(PROTOTXT_LOCAL_PATH), str(MODEL_LOCAL_PATH)\n )\n self.confidence_threshold = DEFAULT_CONFIDENCE_THRESHOLD\n self.result_queue = queue.Queue()\n\n def _annotate_image(self, image, detections):\n # loop over the detections\n (h, w) = image.shape[:2]\n result: List[Detection] = []\n for i in np.arange(0, detections.shape[2]):\n confidence = detections[0, 0, i, 2]\n\n if confidence > self.confidence_threshold:\n # extract the index of the class label from the `detections`,\n # then compute the (x, y)-coordinates of the bounding box for\n # the object\n idx = int(detections[0, 0, i, 1])\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n name = CLASSES[idx]\n result.append(Detection(name=name, prob=float(confidence)))\n\n # display the prediction\n label = f\"{name}: {round(confidence * 100, 2)}%\"\n cv2.rectangle(image, (startX, startY), (endX, endY), COLORS[idx], 2)\n y = startY - 15 if startY - 15 > 15 else startY + 15\n cv2.putText(\n image,\n label,\n (startX, y),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5,\n COLORS[idx],\n 2,\n )\n return image, result\n\n def transform(self, frame: av.VideoFrame) -> np.ndarray:\n image = frame.to_ndarray(format=\"bgr24\")\n blob = cv2.dnn.blobFromImage(\n cv2.resize(image, (300, 300)), 0.007843, (300, 300), 127.5\n )\n self._net.setInput(blob)\n detections = self._net.forward()\n annotated_image, result = self._annotate_image(image, detections)\n\n # NOTE: This `transform` method is called in another thread,\n # so it must be thread-safe.\n self.result_queue.put(result)\n\n return annotated_image\n\n webrtc_ctx = webrtc_streamer(\n key=\"object-detection\",\n mode=WebRtcMode.SENDRECV,\n client_settings=WEBRTC_CLIENT_SETTINGS,\n video_transformer_factory=MobileNetSSDVideoTransformer,\n async_transform=True,\n )\n\n confidence_threshold = st.slider(\n \"Confidence threshold\", 0.0, 1.0, DEFAULT_CONFIDENCE_THRESHOLD, 0.05\n )\n if webrtc_ctx.video_transformer:\n webrtc_ctx.video_transformer.confidence_threshold = confidence_threshold\n\n if st.checkbox(\"Show the detected labels\", value=True):\n if webrtc_ctx.state.playing:\n labels_placeholder = st.empty()\n # NOTE: The video transformation with object detection and\n # this loop displaying the result labels are running\n # in different threads asynchronously.\n # Then the rendered video frames and the labels displayed here\n # are not strictly synchronized.\n while True:\n if webrtc_ctx.video_transformer:\n try:\n result = webrtc_ctx.video_transformer.result_queue.get(\n timeout=1.0\n )\n except queue.Empty:\n result = None\n labels_placeholder.table(result)\n else:\n break\n\n st.markdown(\n \"This Template uses a model and code from \"\n \"https://github.com/robmarkcole/object-detection-app. and https://github.com/whitphx/streamlit-webrtc-example \"\n \"Many thanks to these projects.\"\n )", "def _get_detections(dataset, retinanet, image_size, score_threshold=0.05, max_detections=100, save_path=None, use_gpu=True):\n all_detections = [[None for i in range(dataset.num_classes())] for j in range(len(dataset))]\n\n retinanet.eval()\n regressBoxes = BBoxTransform()\n clipBoxes = ClipBoxes()\n pred_version = 'v1'\n with torch.no_grad():\n for index, imgid in enumerate(tqdm(dataset.image_ids)):\n if pred_version == 'v1':\n iter = imgid\n else:\n iter = index\n scores, labels, boxes = predict(dataset, model, image_size, iter, regressBoxes, clipBoxes,\n score_threshold, 0.5, pred_version)\n\n # select indices which have a score above the threshold\n indices = np.where(scores > score_threshold)[0]\n if indices.shape[0] > 0:\n # select those scores\n scores = scores[indices]\n\n # find the order with which to sort the scores\n scores_sort = np.argsort(-scores)[:max_detections]\n\n # select detections\n image_boxes = boxes[indices[scores_sort], :]\n image_scores = scores[scores_sort]\n image_labels = labels[indices[scores_sort]]\n image_detections = np.concatenate(\n [image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1)\n\n # copy detections to all_detections\n for label in range(dataset.num_classes()):\n all_detections[index][label] = image_detections[image_detections[:, -1] == label, :-1]\n else:\n # copy detections to all_detections\n for label in range(dataset.num_classes()):\n all_detections[index][label] = np.zeros((0, 5))\n\n print('{}/{}'.format(index + 1, len(dataset)), end='\\r')\n\n return all_detections", "def run_detection(self):\n self.rows = self.result_image.shape[0]\n self.cols = self.result_image.shape[1]\n self.cvNet.setInput(cv2.dnn.blobFromImage(self.input_image, size=self.rsize,\n swapRB=True, crop=False))\n self.cvOut = self.cvNet.forward()\n print(\"[INFO] Inference completed successfully.\")", "def process_outputs(self, outputs, image_size):\n boxes = []\n box_class = []\n box_confidences = []\n i = 0\n for output in outputs:\n boxes.append(output[:, :, :, 0:4])\n box_class.append(self.sigmoid(output[:, :, :, 5:]))\n box_confidences.append(self.sigmoid(output[:, :, :, 4:5]))\n\n for box in boxes:\n H_box = box.shape[0]\n W_box = box.shape[1]\n anchor_box = box.shape[2]\n\n the_box = np.zeros((H_box, W_box, anchor_box))\n\n ind_x = np.arange(W_box)\n ind_y = np.arange(H_box)\n ind_x = ind_x.reshape(1, W_box, 1)\n ind_y = ind_y.reshape(H_box, 1, 1)\n\n box_x = the_box + ind_x\n box_y = the_box + ind_y\n\n tx = box[..., 0]\n ty = box[..., 1]\n tw = box[..., 2]\n th = box[..., 3]\n\n sig_tx = self.sigmoid(tx)\n sig_ty = self.sigmoid(ty)\n\n bx = sig_tx + box_x\n by = sig_ty + box_y\n bx = bx / W_box\n by = by / H_box\n\n pw = self.anchors[i, :, 0]\n ph = self.anchors[i, :, 1]\n\n bw = pw * np.exp(tw)\n bh = ph * np.exp(th)\n\n inp_w = self.model.input.shape[1].value\n inp_h = self.model.input.shape[2].value\n\n bw = bw / inp_w\n bh = bh / inp_h\n\n x1 = bx - bw / 2\n y1 = by - bh / 2\n x2 = x1 + bw\n y2 = y1 + bh\n\n box[..., 0] = x1 * image_size[1]\n box[..., 1] = y1 * image_size[0]\n box[..., 2] = x2 * image_size[1]\n box[..., 3] = y2 * image_size[0]\n i = i + 1\n\n return (boxes, box_confidences, box_class)", "def get_predictions(image_paths, CAFFE_HOME, DATA_HOME, MODELS_HOME):\n\n # Model creation\n # Using bvlc_reference_caffenet model for training\n import os\n if os.path.isfile(CAFFE_HOME + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'):\n print('CaffeNet found.')\n\n model_def = CAFFE_HOME + 'models/bvlc_reference_caffenet/deploy.prototxt'\n model_weights = CAFFE_HOME + \\\n 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'\n\n # Create a net object\n model = caffe.Net(model_def, # defines the structure of the model\n model_weights, # contains the trained weights\n caffe.TEST) # use test mode (e.g., don't perform dropout)\n\n # set up transformer - creates transformer object\n transformer = caffe.io.Transformer(\n {'data': model.blobs['data'].data.shape})\n # transpose image from HxWxC to CxHxW\n transformer.set_transpose('data', (2, 0, 1))\n transformer.set_mean('data', np.load(\n CAFFE_HOME + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1))\n # set raw_scale = 255 to multiply with the values loaded with caffe.io.load_image\n transformer.set_raw_scale('data', 255)\n # swap image channels from RGB to BGR\n transformer.set_channel_swap('data', (2, 1, 0))\n\n def extract_features(image_paths):\n \"\"\"\n This function is used to extract feature from the current batch of photos.\n Features are extracted using the pretrained bvlc_reference_caffenet\n Instead of returning 1000-dim vector from SoftMax layer, using fc7 as the final layer to get 4096-dim vector\n \"\"\"\n test_size = len(image_paths)\n model.blobs['data'].reshape(test_size, 3, 227, 227)\n model.blobs['data'].data[...] = list(map(lambda x: transformer.preprocess(\n 'data', skimage.img_as_float(skimage.io.imread(x)).astype(np.float32)), image_paths))\n out = model.forward()\n return model.blobs['fc7'].data\n\n features = extract_features(image_paths)\n\n mlb, clf = joblib.load(MODELS_HOME + \"KNeighborsClassifier.pkl\")\n\n # Predict the labels for the validation data\n preds_binary = clf.predict(features)\n\n # Predicted labels are converted back\n # (1, 0, 1, 0, 1, 0, 0, 0, 1) -> (1, 3, 5, 9)\n predicted_labels = mlb.inverse_transform(preds_binary)\n return predicted_labels", "def detect_faces(input_dir, output_dir, multiple_inputs_flilepath=None, save_images=True):\n\n # create paths\n root_folder = os.path.dirname(os.path.abspath(__file__))\n data_folder = os.path.join(root_folder, \"Data\")\n model_folder = os.path.join(data_folder, \"Model_Weights\")\n\n # Now run the cat face detector\n detector_script = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"3_Inference\", \"Detector.py\"\n )\n result_file = os.path.join(output_dir, \"Detection_Results.csv\")\n model_weights = os.path.join(model_folder, \"trained_weights_final.h5\")\n classes_file = os.path.join(model_folder, \"data_classes.txt\")\n anchors = os.path.join(\n root_folder, \"2_Training\", \"src\", \"keras_yolo3\", \"model_data\", \"yolo_anchors.txt\"\n )\n\n arglist = [\n [\"input_path\", input_dir],\n [\"classes\", classes_file],\n [\"output\", output_dir],\n [\"yolo_model\", model_weights],\n [\"box_file\", result_file],\n [\"anchors\", anchors],\n ]\n\n # check for multiple inputs\n if multiple_inputs_flilepath:\n arglist.append([\"multiple_inputs_filepath\", multiple_inputs_flilepath])\n\n # check whether to save detected images\n if not save_images:\n arglist.append(['no_save_img', ' '])\n\n call_string = \" \".join([\"python\", detector_script, make_call_string(arglist)])\n print(\"Detecting Cat Faces by calling: \\n\\n\", call_string, \"\\n\")\n start = time.time()\n subprocess.call(call_string, shell=True)\n end = time.time()\n print(\"Detected Cat Faces in {0:.1f} seconds\".format(end - start))", "def main(batch_size=8, epochs=300, images_per_epoch=8192, validation_images=1024, image_size=224, color_space='yuv',\n train_data_dir='/mnt/bolbol/raw-data/train', valid_data_dir='/mnt/bolbol/raw-data/validation',\n model_save_dir='finetune_models'):\n data_mapper = get_mapper(color_space=color_space, classifier=False)\n\n ''' Modify VGG16 to work with greyscale images '''\n vgg = VGG16()\n for layer in vgg.layers:\n layer.trainable = False\n vgg.get_layer(name='block1_conv1').trainable = True\n vgg.get_layer(name='block1_conv2').trainable = True\n vgg.get_layer(name='block2_conv1').trainable = True\n vgg.get_layer(name='block2_conv2').trainable = True\n\n needed_layers = vgg.layers[2:]\n model = Sequential()\n model.add(InputLayer(input_shape=(image_size, image_size, 1), name='gray'))\n model.add(Conv2D(filters=64, kernel_size=3, padding='same'))\n for layer in needed_layers:\n model.add(layer)\n model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n model.summary()\n train_generator = ImageDataGenerator().flow_from_directory(directory=train_data_dir,\n interpolation='bilinear',\n target_size=(image_size, image_size),\n batch_size=batch_size,\n color_mode='rgb', class_mode='sparse')\n valid_generator = ImageDataGenerator().flow_from_directory(directory=valid_data_dir,\n interpolation='bilinear',\n target_size=(image_size, image_size),\n batch_size=batch_size,\n color_mode='rgb', class_mode='sparse')\n train_generator = ImageGenerator(rgb_generator=train_generator, workers=4, input_processing_function=data_mapper.rgb_to_colorizer_input)\n valid_generator = ImageGenerator(rgb_generator=valid_generator, workers=4, input_processing_function=data_mapper.rgb_to_colorizer_input)\n\n # Configure model checkpoints\n model_save_path = os.path.join(model_save_dir, 'vgg-{epoch:02d}-{val_acc:.2f}-{val_loss:.2f}.hdf5')\n if not os.path.exists(model_save_dir):\n os.mkdir(model_save_dir)\n\n ''' FineTune VGG '''\n model.fit_generator(generator=train_generator,\n steps_per_epoch=images_per_epoch // batch_size,\n epochs=epochs,\n validation_data=valid_generator,\n validation_steps=validation_images // batch_size,\n callbacks=[EarlyStopping(patience=5),\n ModelCheckpoint(filepath=model_save_path, monitor='val_acc', save_best_only=True)])", "def _get_detections(dataset, retinanet, score_threshold=0.05, max_detections=100, save_path=None):\n all_detections = [[None for i in range(dataset.num_classes())] for j in range(len(dataset))]\n\n retinanet.eval()\n \n with torch.no_grad():\n\n for index in range(len(dataset)):\n data = dataset[index]\n scale = data['scale']\n\n # run network\n scores, labels, boxes = retinanet(data['img'].permute(2, 0, 1).cuda().float().unsqueeze(dim=0))\n scores = scores.cpu().numpy()\n labels = labels.cpu().numpy()\n boxes = boxes.cpu().numpy()\n\n # correct boxes for image scale\n boxes /= scale\n\n # select indices which have a score above the threshold\n indices = np.where(scores > score_threshold)[0]\n if indices.shape[0] > 0:\n # select those scores\n scores = scores[indices]\n\n # find the order with which to sort the scores\n scores_sort = np.argsort(-scores)[:max_detections]\n\n # select detections\n image_boxes = boxes[indices[scores_sort], :]\n image_scores = scores[scores_sort]\n image_labels = labels[indices[scores_sort]]\n image_detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1)\n\n # copy detections to all_detections\n for label in range(dataset.num_classes()):\n all_detections[index][label] = image_detections[image_detections[:, -1] == label, :-1]\n else:\n # copy detections to all_detections\n for label in range(dataset.num_classes()):\n all_detections[index][label] = np.zeros((0, 5))\n\n print('{}/{}'.format(index + 1, len(dataset)), end='\\r')\n\n return all_detections", "def preprocess_output(self, results, image, draw_boxes):\n # Check the model output blob shape\n if not (\"detection_out\" in results.keys()):\n log.error(f\"Incorrect model output dictonary in \\\"{self.__class__.__name__}\\\": {results.keys()}\")\n sys.exit(1)\n output_dims = results[\"detection_out\"].shape\n if (len(output_dims) != 4) or (output_dims[3] != 7):\n log.error(f\"Incorrect output dimensions in \\\"{self.__class__.__name__}\\\": {output_dims}\")\n sys.exit(1)\n\n # Look up frame shapes\n width = image.shape[1]\n height = image.shape[0]\n\n # Parse detection results\n coords = []\n for box in results[\"detection_out\"][0][0]: # 1x1xNx7\n conf = box[2]\n if conf >= self.probability_threshold:\n xmin = int(box[3] * width)\n ymin = int(box[4] * height)\n xmax = int(box[5] * width)\n ymax = int(box[6] * height)\n coords.append((xmin, ymin, xmax, ymax))\n\n # Draw detection box, if applicable\n if draw_boxes:\n cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (0, 255, 0), 3)\n return image, coords", "def get_anchors(self, sample_image):\n # Get Detectron 2 model config and build it.\n predictor = DefaultPredictor(self.det2_cfg)\n model = build_model(self.det2_cfg)\n\n # Image preprocessing.\n input_im = cv2.imread(sample_image)\n raw_height, raw_width = input_im.shape[:2]\n image = predictor.aug.get_transform(input_im).apply_image(input_im)\n image = torch.as_tensor(image.astype(\"float32\").transpose(2, 0, 1))\n\n # Model preprocessing.\n inputs = [{\"image\": image, \"height\": raw_height, \"width\": raw_width}]\n images = [x[\"image\"].to(model.device) for x in inputs]\n images = [(x - model.pixel_mean) / model.pixel_std for x in images]\n imagelist_images = ImageList.from_tensors(images, 1344)\n\n # Get feature maps from backbone.\n features = predictor.model.backbone(imagelist_images.tensor)\n\n # Get proposals from Region Proposal Network and obtain anchors from anchor generator.\n features = [features[f] for f in predictor.model.proposal_generator.in_features]\n det2_anchors = predictor.model.proposal_generator.anchor_generator(features)\n\n # Extract anchors based on feature maps in ascending order (P2->P6).\n p2_anchors = det2_anchors[0].tensor.detach().cpu().numpy()\n p3_anchors = det2_anchors[1].tensor.detach().cpu().numpy()\n p4_anchors = det2_anchors[2].tensor.detach().cpu().numpy()\n p5_anchors = det2_anchors[3].tensor.detach().cpu().numpy()\n p6_anchors = det2_anchors[4].tensor.detach().cpu().numpy()\n final_anchors = np.concatenate((p2_anchors,p3_anchors,p4_anchors,p5_anchors,p6_anchors))\n\n return final_anchors", "def predict_voting(configs, datasets, model_names, epochs = None, \r\n augment_flips = False, augment_scale = False, \r\n param_dict = {},\r\n use_semantic = False,\r\n nms_threshold = 0.3, voting_threshold = 0.5,\r\n img_pad = 0, dilate = False, \r\n save_predictions = False, create_submission = True):\r\n\r\n # Generalise the format of configs and datasets to cater for cases where a single model set may be\r\n # made up of multiple models/datasets\r\n configs = [_config if isinstance(_config, list) else [_config] for _config in configs]\r\n datasets = [dataset if isinstance(dataset, list) else [dataset] for dataset in datasets]\r\n model_names = [model_name if isinstance(model_name, list) else [model_name] for model_name in model_names]\r\n epochs = [epoch if isinstance(epoch, list) else [epoch] for epoch in epochs] if epochs is not None else [[None for d in dataset] for dataset in datasets]\r\n config_batch_sizes = [[c.BATCH_SIZE for c in _config] for _config in configs]\r\n batch_size = max([max([b for b in _config_batch_size]) for _config_batch_size in config_batch_sizes])\r\n\r\n # Create the models\r\n models = [[create_model(c, m, e) for c, e, m in zip(_config, epoch, model_name)] for _config, epoch, model_name in zip(configs, epochs, model_names)]\r\n\r\n # Create a mapping for each model of image_path: model index\r\n model_infos = merge_model_info(datasets)\r\n\r\n # Make sure that you have a full set of model mappings for each model set\r\n assert np.all([len(m) == len(model_infos[0]) for m in model_infos[1:]])\r\n\r\n img_paths = list(model_infos[0].keys())\r\n img_paths.sort()\r\n img_paths = np.array(img_paths)\r\n n_images = len(img_paths)\r\n\r\n # Set up holders for the submission rles which you will accumulate\r\n ImageId = []\r\n EncodedPixels = []\r\n\r\n list_fn_apply = [] + (['apply_flips_rotations'] if augment_flips else []) + (['apply_scaling'] if augment_scale else [])\r\n \r\n # NB: we need to predict in batches of _config.BATCH_SIZE\r\n # as there are layers within the model that have strides dependent on this.\r\n for i in tqdm(range(0, n_images, batch_size)):\r\n\r\n batch_img_paths = img_paths[i : (i + batch_size)]\r\n\r\n if len(batch_img_paths) != batch_size:\r\n batch_img_paths = np.append(batch_img_paths, batch_img_paths[:(i + batch_size - len(img_paths))])\r\n\r\n images, images_idx = gather_images(datasets, batch_img_paths)\r\n\r\n images_model_set = [[model[_idx] for _idx in idx] for model, idx in zip(models, images_idx)]\r\n configs_model_set = [[_config[_idx] for _idx in idx] for _config, idx in zip(configs, images_idx)]\r\n identical_idx = [np.all([id == _idx[0] for id in _idx]) for _idx in images_idx]\r\n\r\n # Run detection\r\n res = []\r\n for model, _images, _config, same_model in zip(images_model_set, images, configs_model_set, identical_idx):\r\n\r\n # Check if we can run the whole batch through with one model\r\n if same_model and _config[0].BATCH_SIZE == batch_size:\r\n\r\n # Run detection\r\n if len(list_fn_apply) > 0:\r\n r = maskrcnn_detect_augmentations(_config[0], model[0], _images, list_fn_apply, \r\n threshold = nms_threshold, voting_threshold = voting_threshold, \r\n param_dict = param_dict, \r\n use_nms = False, use_semantic = use_semantic)\r\n else:\r\n r = maskrcnn_detect(_config[0], model[0], _images, param_dict = param_dict, use_semantic = use_semantic) \r\n\r\n else:\r\n\r\n # The batch needs to be split into individual models\r\n r = []\r\n for _model, c, img in zip(model, _config, _images):\r\n\r\n # Artifically expand the batch if required by batch_size\r\n batch_img = [img] if c.BATCH_SIZE == 1 else [img] * c.BATCH_SIZE\r\n\r\n # Run detection\r\n if len(list_fn_apply) > 0:\r\n prediction = maskrcnn_detect_augmentations(c, _model, batch_img, list_fn_apply, \r\n threshold = nms_threshold, voting_threshold = voting_threshold, \r\n param_dict = param_dict, \r\n use_nms = False, use_semantic = use_semantic)\r\n else:\r\n prediction = maskrcnn_detect(c, _model, batch_img, param_dict = param_dict, use_semantic = use_semantic)\r\n\r\n prediction = prediction[0] \r\n\r\n r.append(prediction)\r\n\r\n # r now contains the results for the images in the batch\r\n res.append(r)\r\n # Reduce to N images\r\n for j, idx in enumerate(range(i, i + batch_size)): \r\n\r\n if idx < n_images: \r\n\r\n # Get masks via voting\r\n \r\n # First reshape masks so that they can be concatenated:\r\n for r in res:\r\n r[j]['masks'] = np.moveaxis(r[j]['masks'], -1, 0)\r\n if use_semantic:\r\n # semantic_masks is flat. We need to expand to the r[j]['masks'] dimensions\r\n r[j]['semantic_masks'] = np.stack([r[j]['semantic_masks']] * max(1, r[j]['masks'].shape[0]), axis = 0)\r\n \r\n # Concatenate\r\n img_results = du.concatenate_list_of_dicts([r[j] for r in res])\r\n\r\n # Reduce via voting\r\n img_results = reduce_via_voting(img_results, nms_threshold, voting_threshold, param_dict, use_semantic = use_semantic, n_votes = len(models))\r\n\r\n # Reshape \r\n img_results['masks'] = np.moveaxis(img_results['masks'], 0, -1)\r\n img_results['class_ids'] = img_results['class_ids'].reshape(-1, )\r\n img_results['scores'] = img_results['scores'].reshape(-1, )\r\n\r\n img_name = os.path.splitext(os.path.split(batch_img_paths[j])[-1])[0]\r\n \r\n # Create submission rle entry\r\n ImageId_batch, EncodedPixels_batch = f.numpy2encoding_no_overlap_threshold(img_results['masks'], img_name, img_results['scores'])\r\n ImageId += ImageId_batch\r\n EncodedPixels += EncodedPixels_batch\r\n # Print interim update\r\n f.write2csv(os.path.join(submissions_dir, '_'.join(('submission_ensemble_interim', '.csv'))), ImageId, EncodedPixels)\r\n \r\n if create_submission:\r\n submission_filename = os.path.join(\r\n submissions_dir, \r\n '_'.join(\r\n ('submission_ensemble', datetime.datetime.now().strftime('%Y%m%d%H%M%S'), '.csv')))\r\n\r\n f.write2csv(submission_filename, ImageId, EncodedPixels)", "def test_batch_detect_of_multiple_images(self):\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n detection = detector.detect(images=[VLIMAGE_SEVERAL_FACE, VLIMAGE_ONE_FACE])\n self.assertFaceDetection(detection[0], VLIMAGE_SEVERAL_FACE)\n self.assertFaceDetection(detection[1], VLIMAGE_ONE_FACE)\n assert 2 == len(detection)\n assert 5 == len(detection[0])\n assert 1 == len(detection[1])", "def get_predictions(args):\n ## List of .json file inside the args.path (waiting to be predicted by the testing model)\n datasets = sorted([f.split('.')[-2] for f in os.listdir(args.path.replace('_pred', '')) if not f.startswith('.') and f.endswith('.ndjson')])\n\n ## Extract Model names from arguments and create its own folder in 'test_pred' for storing predictions\n ## WARNING: If Model predictions already exist from previous run, this process SKIPS WRITING\n for model in args.output:\n model_name = model.split('/')[-1].replace('.pkl', '')\n model_name = model_name + '_modes' + str(args.modes)\n\n ## Check if model predictions already exist\n if not os.path.exists(args.path):\n os.makedirs(args.path)\n if not os.path.exists(args.path + model_name):\n os.makedirs(args.path + model_name)\n else:\n print('Predictions corresponding to {} already exist.'.format(model_name))\n print('Loading the saved predictions')\n continue\n\n print(\"Model Name: \", model_name)\n predictor = load_predictor(model)\n goal_flag = predictor.model.generator.goal_flag\n\n # Iterate over test datasets\n for dataset in datasets:\n # Load dataset\n dataset_name, scenes, scene_goals = load_test_datasets(dataset, goal_flag, args)\n\n # Get all predictions in parallel. Faster!\n scenes = tqdm(scenes)\n pred_list = Parallel(n_jobs=12)(delayed(predict_scene)(predictor, model_name, paths, scene_goal, args)\n for (_, _, paths), scene_goal in zip(scenes, scene_goals))\n \n # Write all predictions\n write_predictions(pred_list, scenes, model_name, dataset_name, args)", "def save_detections(self, video_meta):\n\n # Save detections\n output_dir = os.path.join('./output', video_meta['filename'])\n create_dir(output_dir)\n\n # Save all_recognitions dict\n logger.info('Saving recognitions per class')\n\n # Interpolate and save recognitions\n self.interpolate_recognition_dict(output_dir, video_meta)\n\n # Save all_detections dict\n logger.info('Saving all detections in one file')\n\n output_path = os.path.join(output_dir, 'detect_00_all.csv')\n\n all_detections_df = pd.DataFrame(data=self.all_detections)\n all_detections_df.to_csv(output_path, index=None)\n\n # Save per_class_detections dict\n logger.info('Saving detections per class')\n\n # Fill list with all args to run through save_dicts_for_classes()\n arg_list = []\n for class_id in self.per_class_detections:\n output_path = os.path.join(output_dir, 'detect_{}_{}.csv'.format(class_id, self.id2cat[class_id]))\n arg_list.append(((class_id, video_meta), output_path))\n\n with Pool(processes=self.config['settings']['num_workers']) as pool:\n pool.starmap(self.save_dicts_for_classes, arg_list)\n\n # Clear dicts for next video file\n self.prepare_dicts()", "def get_inference_image(self):\n for detection in self.cvOut[0,0,:,:]:\n score = float(detection[2])\n if score > self.Threshold:\n left = int(detection[3] * self.cols)\n top = int(detection[4] * self.rows)\n right = int(detection[5] * self.cols)\n bottom = int(detection[6] * self.rows)\n\n # Draw the bounding-box on the image\n cv2.rectangle(self.result_image,(left, top),(right, bottom), (23, 230, 210), thickness=2)\n cv2.drawMarker(self.result_image,get_rect_centre(left, top,right, bottom),(255,0,0))\n cv2.putText(self.result_image, self.label_dict[int(detection[1])] + \" : \" + str(round(score,4)),\\\n (int(left-10),int(top-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 2)\n\n print(\"[INFO] Result image generated successfully.\")\n return self.result_image", "def run_detect(gpus=None, caffenet=None, caffemodel=None,\n exp=None, thresh=None, class_thresh=None, obj_thresh=None,\n test_data=None,\n logger=None, interval=1, iterations=0, input_range=None, root=None, expid=None):\n if gpus is None:\n gpus = list(gpu_indices())\n num_gpu = len(gpus)\n logging.info(\"Detecting {} for {} on {} GPUs\".format(caffemodel, test_data, num_gpu))\n\n if exp is not None:\n imdb = exp.imdb\n else:\n assert test_data\n assert caffenet and caffemodel and op.isfile(caffenet) and op.isfile(caffemodel)\n name = None\n if op.isabs(test_data) and (op.isdir(test_data) or op.isfile(test_data)):\n imdb = ImageDatabase(test_data)\n else:\n # when test_data is a subdirectory inside 'data', quickdetection generated\n for fname in ['testX.tsv', 'test.tsv']:\n intsv_file = op.join('data', test_data, fname)\n if op.isfile(intsv_file):\n break\n name = op.basename(caffemodel) + \".\" + test_data\n imdb = ImageDatabase(intsv_file, name=test_data)\n if expid:\n name += \".{}\".format(expid)\n\n caffemodel_clone = None\n if op.isdir(\"/tmp\"):\n caffemodel_clone = op.join(\"/tmp\", \"{}.caffemodel\".format(ompi_rank()))\n shutil.copy(caffemodel, caffemodel_clone)\n if os.stat(caffemodel_clone).st_size < 1:\n logging.error(\"caffemodel: {} is not ready yet\".format(caffemodel))\n return\n exp = Experiment(imdb, caffenet, caffemodel, caffemodel_clone=caffemodel_clone,\n input_range=input_range, name=name, root=root, expid=expid)\n\n outtsv_file = exp.predict_path\n if op.isfile(outtsv_file):\n logging.info(\"Ignore already computed prediction: {} Experiment: {}\".format(outtsv_file, exp))\n return exp\n\n # create one detector for each GPU\n detectors = [\n Detector(exp, num_gpu=num_gpu, gpu=gpu) for gpu in gpus\n ]\n\n logging.info(\"Detection Experiment {}\".format(exp))\n\n if input_range is None:\n input_range = six.moves.range(len(imdb))\n else:\n assert input_range[0] >= 0, \"Invalid range: {} in {}\".format(input_range, imdb)\n if input_range[-1] >= len(imdb):\n logging.info(\"Last range corrected: {} in {}\".format(input_range, imdb))\n input_range = range(input_range[0], len(imdb))\n if len(input_range) == 0:\n logging.warning(\"Empty range: {} Experiment: {}\".format(input_range, exp))\n return exp\n total_count = len(input_range)\n assert total_count, \"No data to evaluate in experiment: {}\".format(exp)\n assert total_count < 0xFFFFFFFF, \"Too many images to evaluate\"\n processed = 0\n in_queue = Queue(2000 * len(gpus))\n\n def result_done(res):\n in_queue.put(res.result())\n\n writer = None\n reader = None\n try:\n # noinspection PyBroadException\n try:\n writer = Process(name=\"writer\", target=write_predict, args=(outtsv_file, in_queue,))\n writer.daemon = True\n writer.start()\n\n out_queue = Queue(400 * len(gpus))\n reader = Process(name=\"reader\", target=read_image, args=(imdb, input_range, out_queue,))\n reader.daemon = True\n reader.start()\n\n idx = 0\n while True:\n idx += 1\n out = out_queue.get()\n if not out:\n break\n key, im = out\n det_idx = idx % len(detectors)\n detector = detectors[det_idx]\n result = detector.detect_async(\n key, im=im,\n thresh=thresh, class_thresh=class_thresh, obj_thresh=obj_thresh\n )\n result.add_done_callback(result_done) # call when future is done to averlap\n processed += 1\n if logger and processed % 100 == 0:\n logger.set_iterations(iterations + interval * float(processed) / total_count)\n except Exception as e:\n logging.error(\"Exception thrown: {}\".format(e))\n raise\n finally:\n logging.info(\"Joining reader\")\n if reader:\n reader.join()\n logging.info(\"Shutting down the detectors\")\n for detector in detectors:\n detector.shutdown()\n if writer:\n in_queue.put(None)\n writer.join()\n return exp", "def create_detection_msg(im, output_dict, category_index, bridge):\n\n boxes = output_dict[\"detection_boxes\"]\n scores = output_dict[\"detection_scores\"]\n classes = output_dict[\"detection_classes\"]\n masks = None\n\n if 'detection_masks' in output_dict:\n masks = output_dict[\"detection_masks\"]\n\n msg = DetectionArray()\n\n msg.header = im.header\n\n scores_above_threshold = np.where(scores > 0.5)[0]\n\n for s in scores_above_threshold:\n # Get the properties\n\n bb = boxes[s,:]\n sc = scores[s]\n cl = classes[s]\n print('box::::::::::::' + str(im.width) +'|'+ str(im.height))\n\n # Create the detection message\n detection = Detection()\n detection.header = im.header\n detection.label = category_index[int(cl)]['name']\n detection.id = cl\n detection.score = sc\n detection.detector = 'Tensorflow object detector'\n detection.mask.roi.x = int((im.width-1) * bb[1])\n detection.mask.roi.y = int((im.height-1) * bb[0])\n detection.mask.roi.width = int((im.width-1) * (bb[3]-bb[1]))\n detection.mask.roi.height = int((im.height-1) * (bb[2]-bb[0]))\n\n if 'detection_masks' in output_dict:\n detection.mask.mask = \\\n bridge.cv2_to_imgmsg(masks[s], \"mono8\")\n\n print detection.mask.mask.width\n\n\n msg.detections.append(detection)\n\n return msg", "def generate_images_pred(self, inputs, outputs):\n for scale in self.opt.scales:\n disp = outputs[(\"disp\", scale)]\n if self.opt.v1_multiscale:\n source_scale = scale\n else:\n # without interpolate\n if self.opt.using_v not in [3,4]:\n disp = F.interpolate(\n disp, [self.opt.height, self.opt.width], mode=\"bilinear\", align_corners=False)\n source_scale = 0\n\n _, depth = disp_to_depth(disp, self.opt.min_depth, self.opt.max_depth)#disp_to_depth function is in layers.py\n\n outputs[(\"depth\", 0, scale)] = depth\n\n for i, frame_id in enumerate(self.opt.frame_ids[1:]):\n\n if frame_id == \"s\":\n T = inputs[\"stereo_T\"]\n else:\n T = outputs[(\"cam_T_cam\", 0, frame_id)]\n\n # from the authors of https://arxiv.org/abs/1712.00175\n if self.opt.pose_model_type == \"posecnn\":\n\n axisangle = outputs[(\"axisangle\", 0, frame_id)]\n translation = outputs[(\"translation\", 0, frame_id)]\n\n inv_depth = 1 / depth\n mean_inv_depth = inv_depth.mean(3, True).mean(2, True)\n\n T = transformation_from_parameters(\n axisangle[:, 0], translation[:, 0] * mean_inv_depth[:, 0], frame_id < 0)\n\n cam_points = self.backproject_depth[source_scale](\n depth, inputs[(\"inv_K\", source_scale)])\n pix_coords = self.project_3d[source_scale](\n cam_points, inputs[(\"K\", source_scale)], T)\n\n outputs[(\"sample\", frame_id, scale)] = pix_coords\n\n outputs[(\"color\", frame_id, scale)] = F.grid_sample(\n inputs[(\"color\", frame_id, source_scale)],\n outputs[(\"sample\", frame_id, scale)],\n padding_mode=\"border\")\n\n if not self.opt.disable_automasking:\n outputs[(\"color_identity\", frame_id, scale)] = \\\n inputs[(\"color\", frame_id, source_scale)]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns trend objects. Expects the geological area for which the trends are to be fetched
def get_trends(self, geo_location): api_url = t_const.API_TREND + "?id=%s" json_str = self.__https_obj.make_req(api_url % (geo_location), "GET", "", self.__token) res_data = json.loads(json_str.decode('UTF-8')) trends = res_data[0]['trends'] # return a list of trend objects return [Trend(t) for t in trends]
[ "def getTrends(self, geoID=23424977):\n trends = self.api.trends_place(geoID)\n self.helper.dumpJson(\"trends\", str(datetime.date.today()) + \".json\",\n trends)", "def google_trends(term: str) -> dict:\n pytrend = TrendReq()\n pytrend.build_payload(kw_list=[term])\n region_wise = pytrend.interest_by_region()\n top10 = region_wise.sort_values(by=term, ascending=False).head(10)\n# top10 = pd.DataFrame.to_dict(top10)\n top10 = st.bar_chart(top10)\n related_queries = pytrend.related_queries()\n related_queries = pd.DataFrame(related_queries[term]['rising'].sort_values(by=\"value\", ascending=False))\n# related_queries = st.bar_chart(related_queries)\n# stopwords = stoplists.gtrends_stop_words\n# remove_words = [word for word in related_queries['query'] if word in stopwords]\n# related_queries = related_queries[~related_queries['query'].isin(remove_words)]\n# related_queries = pd.DataFrame.to_dict(related_queries)\n return (top10,related_queries)", "def get_trending_tracks(args, strategy):\n db = get_db_read_replica()\n with db.scoped_session() as session:\n current_user_id, genre, time = (\n args.get(\"current_user_id\"),\n args.get(\"genre\"),\n args.get(\"time\", \"week\"),\n )\n time_range = \"week\" if time not in [\"week\", \"month\", \"year\"] else time\n key = make_trending_cache_key(time_range, genre, strategy.version)\n\n # Will try to hit cached trending from task, falling back\n # to generating it here if necessary and storing it with no TTL\n (tracks, track_ids) = use_redis_cache(\n key,\n None,\n make_generate_unpopulated_trending(session, genre, time_range, strategy),\n )\n\n # populate track metadata\n tracks = populate_track_metadata(session, track_ids, tracks, current_user_id)\n tracks_map = {track[\"track_id\"]: track for track in tracks}\n\n # Re-sort the populated tracks b/c it loses sort order in sql query\n sorted_tracks = [tracks_map[track_id] for track_id in track_ids]\n\n if args.get(\"with_users\", False):\n user_id_list = get_users_ids(sorted_tracks)\n users = get_users_by_id(session, user_id_list, current_user_id)\n for track in sorted_tracks:\n user = users[track[\"owner_id\"]]\n if user:\n track[\"user\"] = user\n return sorted_tracks", "def get_trends(access_token, num_buckets, bucket_size=None, end_date=None):\n url = UP_API_HOST + '/users/@me/trends' + '?' + urllib.urlencode(_build_param_dict(locals()))\n return _url_handler(url, access_token)", "def get_google_trends_palestine_between_dates(self, start_date, end_date):", "def get_google_trends_israel_between_dates(self, start_date, end_date):", "def fetchTrendsByQuery(query):\n cur = connection.cursor()\n cur.execute(query)\n return cur.fetchall()", "def display_trends():\n #setting the input to the list returned from GetTrendsCurrent()\n trends = api.GetTrendsWoeid(woeid=23424977, exclude=None)\n #for the list of objects trends, provide the name and url attribute to the\n top_tweets = []\n for trend in trends:\n top_tweets.append((trend.name, trend.url))\n top_tweets = top_tweets[:5]\n return top_tweets", "def _calculate_trends(\n history: list[OrderedDict], measurements_to_use: int\n) -> dict[str, Any]:\n if measurements_to_use == -1:\n index_range = np.arange(0, len(history))\n else:\n index_range = np.arange(0, measurements_to_use)\n\n measured_attributes = set().union(*(d.keys() for d in history))\n metrics_to_trend = measured_attributes.intersection(list(METRICS_TO_TREND))\n\n trends = {}\n for attribute in metrics_to_trend:\n values = [\n float(value)\n for measurement in history\n for attr, value in measurement.items()\n if attr == attribute\n ]\n\n if measurements_to_use != -1:\n values = values[-measurements_to_use:]\n\n index_array = np.array(values)\n linear_fit = np.polyfit(\n index_range,\n index_array,\n 1,\n )\n slope = round(linear_fit[0], 2)\n\n metric = _get_normalized_metric_name(attribute)\n\n if slope > 0:\n trends[metric] = TREND_INCREASING\n elif slope < 0:\n trends[metric] = TREND_DECREASING\n else:\n trends[metric] = TREND_FLAT\n\n return trends", "def _collect_price_time_series(self):\n r = requests.get(self.GRAPH_URL)\n #dictionary of 2 dictionaries, \"daily\" and \"average\"\n response = r.json()\n daily_series = TimeSeries.from_dictionary(response[\"daily\"])\n average_series = TimeSeries.from_dictionary(response[\"average\"])\n return (daily_series, average_series)", "def fetchTrendsAsList(queryCategory, queryId, startDt, endDt):\n result = fetchTrends(queryCategory, queryId, startDt, endDt)\n trends = result[\"trends\"]\n\n ntrends = []\n for trend in trends:\n ntrend = map(str, trend)\n ntrends.append(ntrend)\n\n result[\"trends\"] = ntrends\n return result", "def trendingTweets():\n api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)\n trending_topics = api.GetTrendsWoeid(BOSTON_WOEID)\n for tweet in trending_topics:\n util.safe_print(tweet.GetText())", "def trending():\n\tstocks = []\n\ttrendingList = helpers.getTrending()\n\tfor symbol in trendingList:\n\t\tstocks = stocks + db.execute(\"SELECT * FROM companylist WHERE Symbol = :symbol\", symbol = symbol)\n\treturn render_template(\"index.html\", stocks = stocks)", "def getTwitterTrendsDataFrame(credentialsFilePath, woeid):\n # Twitter class to get data from Twitter, format is json by default.\n t = createTwitterObject(credentialsFilePath)\n\n # Use _id instead of id, because id would end up been appended to the base URL.\n # See source code api.py line 169.\n\n\n jsonTrends = t.trends.place(_id=woeid) # Response is python object json representation\n #print(jsonTrends);\n\n fs = createTrendsDataFrameFromJson(jsonTrends)\n return fs\n # Process into data frame\n frame = createPlacesDataFrameFromJson(jsonData)", "def hot():\n try:\n listing = feedparser.parse(TRENDING_URL)['entries']\n trends = [item['title'] for item in listing]\n return trends\n except Exception as e:\n print('ERR hot terms failed!', str(e))\n return None", "def trendingTopics():\n api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)\n trending_topics = api.GetTrendsWoeid(BOSTON_WOEID)\n for topic in trending_topics:\n util.safe_print(topic.name)", "def load_trend(query_api, measurements, trend_window=3, bucket=\"sdd\"):\n print(f\"load_trend... (trend_window={trend_window})\")\n logging.debug(f\"Influx DB query for load_trend() with trend_window={trend_window}\")\n filterstring = \" or \".join([f'r[\"_field\"] == \"{helpers.fieldnames[x]}\"' for x in measurements])\n query = f'''\n from(bucket: \"{bucket}\")\n |> range(start: -{trend_window + 2}d)\n |> filter(fn: (r) => {filterstring})\n |> filter(fn: (r) => r[\"unverified\"] != \"True\")\n '''\n tables = query_api.query_data_frame(query)\n print(\"query executed\")\n if isinstance(tables, list):\n df = pd.concat(tables)\n else:\n df = tables\n df[\"c_id\"] = compound_index(df)\n\n output = {\n \"model\": {},\n \"trend\": {},\n \"last_value\": {},\n \"last_time\": {}\n }\n df[\"_time\"] = df[\"_time\"].apply(helpers.utc_to_local, 1)\n df[\"unixtime\"] = df[\"_time\"].apply(lambda x: int(x.timestamp()), 1) # unixtime in s\n for cid in set(df[\"c_id\"]):\n # get sub-dataframe for this id\n\n tmpdf = df[df[\"c_id\"] == cid].sort_values(by=[\"unixtime\"])\n output[\"last_value\"][cid] = tmpdf[\"_value\"].iloc[-1]\n output[\"last_time\"][cid] = tmpdf[\"_time\"].iloc[-1]\n\n lastday = max(tmpdf[\"_time\"])\n firstday = min(tmpdf[\"_time\"])\n\n if (lastday - firstday).days < trend_window - 1:\n # not enough data for this station, trend window not covered\n output[\"model\"][cid] = (np.nan, np.nan)\n output[\"trend\"][cid] = np.nan\n continue\n\n day0 = lastday - timedelta(days=trend_window - 1)\n tmpdf = tmpdf[tmpdf[\"_time\"] >= day0]\n tmpdf = tmpdf.reset_index(drop=True)\n\n values = pd.to_numeric(tmpdf[\"_value\"])\n\n COUNT_LOW_THRESHOLD = 3\n PERCENT_NONZEROS_THRESHOLD = 0.75\n # perform linear regression only when the mean is above COUNT_LOW_THRESHOLD\n # or if the fraction of non-zero numbers exceeds PERCENT_NONZEROS_THRESHOLD.\n # This is to suppress unhelpful fits for low-value data sources\n if np.mean(values) > COUNT_LOW_THRESHOLD or \\\n np.count_nonzero(values) / len(values) > PERCENT_NONZEROS_THRESHOLD:\n # linear regression y = a*x +b\n model = np.polyfit(tmpdf[\"unixtime\"], values, 1)\n output[\"model\"][cid] = model\n\n # calculate trend\n a, b = model[:2]\n t1 = day0.timestamp()\n t2 = lastday.timestamp()\n y1 = (a * t1 + b)\n y2 = (a * t2 + b)\n if y1 > 0:\n output[\"trend\"][cid] = y2 / y1 - 1\n else:\n output[\"trend\"][cid] = np.nan\n else:\n # counts too low for reliable regression\n output[\"model\"][cid] = (np.nan, np.nan)\n output[\"trend\"][cid] = np.nan\n\n return output # dicts", "def get_occupations_trend(input_file_name, output_file_name):\n\n field_names = [\"SOC_NAME\"]\n output_header = [['TOP_OCCUPATIONS', 'NUMBER_CERTIFIED_APPLICATIONS',\n 'PERCENTAGE']]\n\n trend = FindTrends(field_names, output_header)\n\n datagatherer = DataGatherer(input_file_name)\n certified_cases = datagatherer.get_status_data()\n trend.fieldname_index_finder(datagatherer, certified_cases[0])\n\n all_trend_counts = trend.collect_trend(certified_cases[1:])\n script_output = trend.generate_output_data(all_trend_counts,\n len(certified_cases)-1)\n\n trend.generate_output_file(output_file_name, script_output)", "def trend(obj, dim=None, type='linear'):\n\tcoord = obj[dim]\n\tif _utils.is_datetime(coord.data):\n\t\t# Use the 1e-9 to scale nanoseconds to seconds (by default, xarray use\n\t\t# datetime in nanoseconds\n\t\tt = pd.to_numeric(coord) * 1e-9\n\telse:\n\t\tt = coord \t\n\tif type is 'constant':\n\t\tobj_trend = obj.mean(dim=dim)\n\t\t_, obj_trend = xr.broadcast(obj, obj_trend)\n\telif type is 'linear':\n\t\tslope, offset = linreg(obj, dim=dim)\n\t\tobj_trend = t * slope + offset\n\telif type is 'quadratic':\n\t\traise NotImplementedError\n\telse:\n\t\traise ValueError('This type of trend is not supported')\n\treturn obj_trend" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize object with a trend
def __init__(self, json_data): self._trend = json_data
[ "def _init_trend_array(self):\n\t\tself.T = [sum([self.X[i + self.q] - self.X[i]\n\t\t for i in range(self.q)]) / (self.q ** 2)]", "def __init__(self):\n self.min_time = 6.0*60.0*60.0\n self.min_temp = -10.0\n self.max_temp = 10.0\n self.period = 60.0*60.0*24.0", "def __init__(self, T, start=None, end=None, step=None):\n # Period\n self.__T = T\n \n # Angular frecuency\n self.__w = (2*np.pi)/self.__T\n \n # Time\n if (start == None):\n start = -2*T\n if (end == None):\n end = 2*T\n if (step == None):\n step = 0.01\n \n N = (end - start)/step\n self.__t = np.linspace(start, end, N)", "def __init__(self, ax):\n super(TimeSeriesTester, self).__init__(ax)", "def __init__(self, ticker, date_to_performance):\n \n # String for the ticker\n self.ticker = ticker\n \n # instance variable for dictionary from date to performance\n # !!!IMPORTANT!!!\n # This dictionary is not sorted. To sort, use sorted(date_to_performance.items(), key=operator.itemgetter(0))\n self.date_to_performance = date_to_performance\n\n # Since date_to_performance is a dictionary of datetime objects to floats,\n # sort the dictionary by the datetime value \n # (note that the key=operator.itemgetter(0) bit tells the sorted() method to sort based on the dictionary's key,\n # in this case datetime)\n # The sorted method creates a list of tuples in the form: [(datetime1, performance1), (datetime2, performance2)...]\n # For sorted(date_to_performance.items(), key=operator.itemgetter(0))[1][0], the [1] part grabs the second tuple\n # (i.e. the tuple at the first index) and the [0] part grabs the first part of that tuple (i.e. the datetime value)\n # whew...\n self.earliest_date = sorted(date_to_performance.items(), key=operator.itemgetter(0))[1][0]\n\n self.raw_data = sorted(self.date_to_performance.items(), key=operator.itemgetter(0))", "def __init__(self, temps, taxi, traiter=None):\n Evenement.__init__(self, temps, traiter)\n self._taxi = taxi", "def __init__(self, traj, dyn, pg, Kp=1.0, dt=0.005):\n self.Kp = Kp\n self.trajfunction = traj\n self.pg = pg\n self.rate = 200\n self.dyn = dyn\n self.dt = dt", "def __init__(self, memoryCapacity, clock, energyModel=None, forecastEnergyModel = None):\n self.clock= clock;\n self.history = DataSet(memoryCapacity, clock); \n self.energyModel = energyModel;\n self.forecastEnergyModel = None;", "def __init__(\n self,\n baseline: BaselineLike = None,\n resolution: int = 10,\n *,\n cut: Cut = None,\n ):\n super(LinearDoi, self).__init__(cut)\n self._baseline = baseline\n self._resolution = resolution", "def __init__(self, target_temp, building, furnace, climate):\n self.building = building\n self.furnace = furnace\n self.climate = climate\n self.time = 0\n self.ticksize_s = 60.0", "def __init__(self):\n self.stat = Statistics()", "def __init__(self, base=2, *percentages):\n\n self.values = range(base)\n self.percentages = percentages\n self.make_percentages_cumulative(percentages)", "def __init__(self, feed, target, turbine, num):\n\n super().__init__(f\"Turbine Assembly Line {num}\")\n\n self.feed = feed\n self.target = target\n self.turbine = turbine", "def __init__(self):\n self.count = 0\n self.weather = 1", "def __init__(self, load, temp, solar, holiday, lockdown):\n\t\t\t#Data\n\t\t\tself._load = load.values #Electric load\n\t\t\tself._temp = temp.values #Forecasted temperatures\n\t\t\tself._solar = solar.values #Forecast solar production\n\t\t\tself._holiday = holiday.values #Holiday score\n\t\t\tself._lockdown = lockdown.values #Lockdown score\n\t\t\t#Metadata\n\t\t\tself._date = load.index[0].date() #Date\n\t\t\tself._dayofweek = load.index[0].dayofweek #Day of week\n\t\t\tself._dayofyear = load.index[0].dayofyear #Day of year - used to compute period vector\n\t\t\tself._month = load.index[0].month #Month number\n\t\t\tself._period = self._compute_period() #2-D array representation of period of year\n\t\t\t#Predictor array\n\t\t\tself._predictor = None #Computed only on call\n\t\t\t#Links\n\t\t\tself._prev = None\n\t\t\tself._next = None", "def __init__(self, temps, taxi, station):\n EvTaxi.__init__(self, temps, taxi, self.__traitementStation)\n self._station = station", "def __init__(self, ranker=None):\n super().__init__(Tautology() if ranker is None else ranker)", "def __init__(self, config):\n super(ConstantLearningRate, self).__init__(\n update_granularity='none', config=config)", "def __init__(self,df, init_pars, var='dep_var', var_name='Volume of Nile'):\n self.df = df\n self.var = var\n self.var_name = var_name\n self.y = np.array(df[var].values.flatten())\n self.times = df.index\n self.pardict = init_pars" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get name if the trend.
def _get_name(self): return self._trend['name']
[ "def name(self):\n return super(Treant, self).name", "def _get_name(self) -> \"std::string\" :\n return _core.DataHub__get_name(self)", "def get_name(self):\n return self.__name_army", "def trend_description(self) -> Optional[str]:\n return TREND_DESCRIPTIONS[self._trend]", "def name(self):\n if self.yticker.info.get('longName') is None:\n raise NotFoundError(\n f'Cannot retrieve ticker (\"{self._ticker}\") '\n 'from Yahoo Finance')\n return self.yticker.info['longName']", "def get_name(self):\r\n return self.__nombre", "def cometname(self):\n return _measures.measures_cometname(self)", "def getName(self):\n\t\treturn self.dataUnitName", "def name(self):\n return self._pr.title", "def get_name():\n return _(strings.bot_title)", "def name(self):\n return self.transcript_name", "def _get_name(self) -> \"std::string\" :\n return _core.Workspace__get_name(self)", "def get_station_name(self):\n pass", "def getName(self):\n return HopperLowLevel.getTagName(self.__tag_internal__)", "def name(self):\n return self.solv_dict['name']", "def _get_name(self, n):\n return ''", "def get_name_item(self):\n return self.name_item", "def get_plant_name(self):\n if not self.plant_name:\n self.plant_name = self._search('botanische naam')\n return self.plant_name", "def getName(self,system):\n return system.findAdjectiveName(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the object with the tweet data(json)
def __init__(self, json_data): self._tweet = json_data
[ "def initWithRawData(self, tweet):\n\n for attr in self.desired_features['tweet']:\n if attr in tweet.keys():\n setattr(self, attr, tweet[attr])\n\n if 'preprocessed_text' in self.desired_features['tweet']:\n self.preprocessText(tweet['text'])", "def parse_tweet(data):\r\n return Tweet01(\r\n id=data.get('id', None),\r\n created_at=data.get('created_at', None),\r\n user_id=data.get('user_id', None),\r\n user_name=data.get('user_name', None),\r\n tweet_text=data.get('tweet_text', None),\r\n url=data.get('url')\r\n )", "def __init__(self, tweets=None):\n self.tweets = {\n id_str: PrepTweet.from_dict(pt_dict)\n for id_str, pt_dict\n in tweets.items()\n } if tweets is not None else {}", "def __init__(self, json_data):\n self._trend = json_data", "def __init__(self, tweet_text):\n self.tweet_text = tweet_text\n self.sentiment_results = self._total_results()", "def __init__(self, file_loc):\n\t\tself.tweets = []\n\t\tself.file_loc = file_loc\n\t\ttry:\n\t\t\tsys.stderr.write(\"Attempting to read stored tweets from '{}'... \".format(file_loc))\n\t\t\tsys.stderr.flush()\n\t\t\twith open(file_loc, 'r') as f:\n\t\t\t\tself.tweets = json.loads(f.read())\n\t\t\t\tsys.stderr.write(\"Read {} tweets.\\n\".format(len(self.tweets)))\n\t\texcept:\n\t\t\tsys.stderr.write(\"\\n --> File not found or file not in json format -- using empty dataset\\n\")", "def __init__(self):\n super().__init__()\n \n # Count number of tweets processed\n preparation.__total_tweets = 0\n preparation.__count_tweets = 0\n \n # Configurations\n myy = my_yaml.my_yaml_tweet()\n preparation.__usernames = [u.lower() for u in sum(myy.get_username_covid_vaccine().values(), [])] # return the list and change to lowercase\n\n default_config = myy.get_default_prep_config() \n preparation.__is_insert_data_after = default_config[\"is_insert_data_after\"]\n preparation.__date_insert_data_after = datetime.strptime(default_config[\"date_insert_data_after\"], '%Y,%m,%d').date()\n\n del myy \n \n # Cleaning tweeter\n preparation.__my_preprocessor = my_tweet.my_preprocessor()\n\n # Datetime convertor\n preparation.__convert_date = lambda dstr: datetime.strptime(dstr, \"%Y-%m-%dT%H:%M:%S.%fZ\")", "def on_data(\n self, data\n ): # this is called whenever new tweet arives/ data is the tweet itself, as json-file\n t = json.loads(\n data\n )\n text = t[\"text\"]\n if \"extended_tweet\" in t:\n text = t[\"extended_tweet\"][\"full_text\"]\n\n keyword = None\n for key in [\n \"covid-19\",\n \"vaccine\",\n \"pandemic\",\n \"Pfizer\",\n \"Biontech\",\n \"AstraZeneca\",\n \"Moderna\",\n ]:\n if (key in text) or (key in t[\"entities\"][\"hashtags\"]):\n keyword = key\n tweet = {\n \"text\": text,\n \"user_name\": t[\"user\"][\"screen_name\"],\n \"followers_count\": t[\"user\"][\"followers_count\"],\n \"location\": t[\"user\"][\"location\"],\n \"reply_count\": t[\"reply_count\"],\n \"retweet_count\": t[\"retweet_count\"],\n \"keyword\": keyword,\n \"timestamp\": datetime.strptime(\n t[\"created_at\"], \"%a %b %d %H:%M:%S +0000 %Y\"\n ),\n }\n #\n # print(text + '\\n\\n') # instead of the logging.critical below\n db_mongo.twitter.insert(tweet)\n logging.critical(\"tweet added to mondoDB\")\n logging.critical(f'\\n\\n\\nTWEET INCOMING: {tweet[\"text\"]}\\n\\n\\n')", "def init_get_data(self):\n self.mock_tweet = MockTweetData()", "def fromJSON(json_in: str):\n obj = json.loads(json_in)\n processed_tweet = ProcessedTweet(\n obj.get(\"id\"),\n obj.get(\"user_id\"),\n obj.get(\"text\")\n )\n\n return processed_tweet", "def insert_tweet_data(tweet_data):\n if not isinstance(tweet_data, dict):\n tweet_data = json.loads(tweet_data)\n if tweet_data['is_retweet']:\n insert_retweet_data(tweet_data)\n else:\n insert_non_retweet_data(tweet_data)", "def load(self, input_file):\n self.tweets = Tweets(input_file)", "def create_an_instance_from_json(json_str, dataset_obj):\n tweet_data = json.loads(json_str)\n if tweet_data.get('lang'):\n lang = tweet_data.get('lang')\n if lang != \"en\":\n return False\n return get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj)", "def createTwitterObject(credentialsFilePath):\n configFP = open(credentialsFilePath)\n mydict = json.load(configFP)\n\n oauth_consumerKey = mydict['consumerKey']\n oauth_consumerSecret = mydict['consumerSecret']\n oauth_token = mydict['token']\n oauth_tokenSecret = mydict['tokenSecret']\n\t \n # Twitter class to get data from Twitter, format is json by default.\n t = twitter.Twitter(\n auth=twitter.OAuth(oauth_token, oauth_tokenSecret,\n oauth_consumerKey, oauth_consumerSecret),\n api_version='1.1'\n )\n return t", "def __init__(self, user_data):\n\n self._data = user_data\n self._timestamp = datetime.now()", "def __init__(self, data):\n super(GeoJson, self).__init__()\n self.plugin_name = 'GeoJson'\n if 'read' in dir(data):\n self.data = data.read()\n elif type(data) is dict:\n self.data = json.dumps(data)\n else:\n self.data = data", "def __init__(self):\n with open('app_pybot/request_tools/parsing_words.json', encoding='utf-8') as f:\n parsing_words = json.loads(f.read())\n self.punctuation = parsing_words[\"punctuation\"]\n self.stopwords = parsing_words[\"stopwords\"]", "def add_tweet_data(tweet):\n req_url = '{0}{1}'.format(eleanor_url, 'add-tweet-data')\n headers = {'content-type': 'application/json'}\n payload = json.dumps(tweet)\n requests.post(req_url, headers=headers, data=payload)", "def __init__(self, data: dict):\n self._tickets = defaultdict(Ticket)\n\n for i in range(len(data['tickets'])):\n ticket_object = data['tickets'][i]\n ticket = Ticket()\n\n if ticket_object['id']:\n ticket.set_id(ticket_object['id'])\n\n if ticket_object['assignee_id']:\n ticket.set_assignee_id(ticket_object['assignee_id'])\n\n if ticket_object['created_at']:\n ticket.set_created_date(ticket_object['created_at'])\n\n if ticket_object['description']:\n ticket.set_description(ticket_object['description'])\n\n if ticket_object['due_at']:\n ticket.set_due_date(ticket_object['due_at'])\n\n if ticket_object['priority']:\n ticket.set_priority(ticket_object['priority'])\n\n if ticket_object['requester_id']:\n ticket.set_requester(ticket_object['requester_id'])\n\n if ticket_object['subject']:\n ticket.set_subject(ticket_object['subject'])\n\n if ticket_object['status']:\n ticket.set_status(ticket_object['status'])\n\n if ticket_object['type']:\n ticket.set_type(ticket_object['type'])\n\n if ticket_object['url']:\n ticket.set_url(ticket_object['url'])\n\n self._tickets[i + 1] = ticket", "def _load(self):\n if self._data is None:\n self._data = json.loads(self._json)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns location of the user
def _get_location(self): return self._get_user()['location']
[ "def get_user_location(user):\n if user and user.is_authenticated(): \n prof = user.get_profile()\n if prof:\n return prof.location if prof.location else \\\n prof.supply_point.location if prof.supply_point \\\n else None", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def Location(self) -> str:", "def location(self):\n return self.patient.get('location', None)", "def location(self):\n return self._redunda.location", "def getFriendsLocation(self):\n return self.base.get(\"friends_location\", [])", "def get_user_location(user_id):\n stored_locations = read_locations()\n if stored_locations:\n for stored_location in stored_locations:\n if stored_location[\"user_id\"] == user_id:\n return (\n stored_location[\"city\"].strip(),\n stored_location[\"country\"].strip(),\n )\n break\n else:\n return None\n else:\n return None", "def _home_location(self, user):\n\n try:\n home = user.get('home')\n return self.geocoder[home]\n except (AstralError, KeyError) as e:\n raise DataError(e)", "def user_geocode(geocode_dict):\n\tuser_data = geocode_dict['results'][0]['geometry']['location']\n\tuser_loc = (user_data['lat'], user_data['lng'])\n\n\treturn user_loc", "def test_user_location(self):\n assert self.user.location == 'Seattle, WA'", "def user(self):\n return self.parsed_prefix.user", "def getUserHometown(self):\n return self.base.get(\"user_hometown\", [])", "def current_location(self):\n headers, content = self.api('currentLocation')\n return content", "def display_location(self):\n self.get()", "def location(self):\n return (self._moment.get(\"latitude\"), self._moment.get(\"longitude\"))", "def get_ip(self, user: str) -> str:\n\t\treturn self.__user[user]", "def show_user_locations():\n\n if g.user:\n\n locations = Location.query.filter_by(user_id=g.user.id).all()\n\n return render_template(\"user-location-list.html\", locations=locations)\n\n flash(\"You must be logged in to your account to create/view user locations\", 'danger')\n return redirect('/', code=302)", "def homepage(self):\r\n return \"{}{}{}\".format(self._gis.url,\r\n \"/home/user.html?user=\",\r\n self._user_id)", "def tweet_location(tweet):\n # Create a tupple of the latitude and longitude\n location = (float(tweet[\"lat\"]), float(tweet[\"lon\"]))\n return location", "def resolve_me(root, info, **kwargs):\n return info.context.user" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gives the count of retweets
def _get_retweets(self): return int(self._tweet['retweet_count'])
[ "def _setRetweetCount(self):\n retweetCount = 0\n if \"retweeted_status\" in self._tweet:\n retweetCount = self._tweet[\"retweeted_status\"][\"retweet_count\"]\n return retweetCount", "def retweetCount(self):\n return self._retweetCount", "def __get_count_tweets(data, batch_name, filename):\n if 'meta' in data.keys():\n return int(data['meta']['result_count'])\n else:\n if 'error' in data.keys():\n print(data, batch_name, filename)\n input(\"Click ENTER to conitnue\")\n return 0", "def number_of(tweets, condition={\"$exists\": True}):\n\n total = tweets.count_documents({\"sentiment\": condition})\n rt = tweets.count_documents({\"retweeted_status\": {\"$exists\": True}, \"sentiment\": condition})\n quotes = tweets.count_documents({\"is_quote_status\": True, \"sentiment\": condition})\n replies = tweets.count_documents({\"in_reply_to_status_id\":{\"$ne\":None}, \"is_quote_status\": False, \"sentiment\": condition})\n return total, rt, quotes, replies", "def scrape_tweet(tweet):\n\n\n dateUntil = tweet.created_at + timedelta(1)\n tweetCriteria = got.manager.TweetCriteria().setUsername(tweet.author.screen_name).setSince(\n tweet.created_at.strftime(\"%Y-%m-%d\")).setUntil(dateUntil.strftime(\"%Y-%m-%d\")).setMaxTweets(-1)\n found = False\n tweets = got.manager.TweetManager.getTweets(tweetCriteria)\n for tw in tweets:\n if tw.id == tweet.id_str:\n tweet.reply_count = tw.replies\n break;\n return tweet", "def get_num_of_tweets(screen_name, num_of_tweets=20):\n if num_of_tweets < 201:\n return api.user_timeline(screen_name=screen_name, count=num_of_tweets, include_rts=False)\n else:\n tweets_to_return = []\n while len(tweets_to_return) < num_of_tweets:\n if len(tweets_to_return) == 0:\n tweets = api.user_timeline(screen_name=screen_name, count=200, include_rts=False)\n tweets_to_return.extend(tweets)\n else:\n oldest_tweet = tweets_to_return[-1].id - 1\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, include_rts=False,\n max_id=oldest_tweet)\n # If the request for more tweets yielded 0 tweets, we must be at the end & its time to return...\n if new_tweets == 0:\n return tweets_to_return\n tweets_to_return.extend(new_tweets)\n\n return tweets_to_return", "def count_publishers(url):\n params = {'rows': 0}\n resp = requests.get(url=url, params=params)\n data = json.loads(resp.text)\n return data['message']['total-results']", "def get_n_tweets(self, username, last_n_tweets=1):\n req = requests.get(url=\"https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=%s&count=%d\" % (username, last_n_tweets), auth=self.twitter_oauth)\n return [tweet['text'] for tweet in req.json()]", "def total_number_of_tweets(input_file=sample_file):\n\n return input_file.count()", "def followers_and_retweets_of_tweet(tweet):\n\n # search tweet dictionary for follower count\n followers = 0\n if 'user' in str(tweet):\n if 'followers_count' in str(tweet['user']):\n followers = tweet['user']['followers_count']\n\n # search tweet dictionary for retweet count\n retweets = 0\n if 'retweeted_status' in str(tweet):\n if 'retweet_count' in str(tweet['retweeted_status']):\n retweets = tweet['retweeted_status']['retweet_count']\n\n return followers, retweets", "def countTweetsPerUser(tweet_list):\n\n TweetsPerUser_count = {}\n \n for tweet in tweet_list:\n user = tweet['screen_name']\n\n # update TweetsPerUser_count\n TweetsPerUser_count[user] = TweetsPerUser_count.setdefault(user, 0)\n TweetsPerUser_count[user] += 1\n\n return TweetsPerUser_count", "def getCount(tweets):\n diction = dict()\n \n for tweet in tweets:\n for word in tweet:\n if word[0].isupper() and word not in exclude:\n if word in diction:\n diction[word] += 1\n else:\n diction[word] = 1\n\n return diction", "def tweet_counter():\n tweet_tokenizer = TweetTokenizer()\n # Setting variables for counters, and list of racist keywords\n racist_words = [\"bruin\", \"bruinen\", \"neger\", \"negers\", \"negerin\", \"negerinnen\", \"nikker\", \"nikkers\", \"zwart\", \"zwarten\", \"cappuccin\", \"cappuccino\", \"chocolademelk\", \"chocomel\", \"chocomelk\", \"donkerbruin\", \"kleurboek\", \"kleurboeken\", \"kleurling\", \"kleurpotloden\", \"koffie\", \"lichtbruin\", \"rascist\", \"rascisten\", \"nigger\", \"niggers\", \"nigga\", \"niggas\", \"negro\", \"negroes\"]\n\n total_tweets = 0\n racist_tweets = 0\n # Iteration over all lines (tweets)\n for tweet in sys.stdin:\n # Racist is set to False, as the program is unsure yet whether the\n # Tweet is racist or not, thus it is safer to assume that it is not.\n racist = False\n # For each iteration, 1 is added to the total Tweets.\n total_tweets += 1\n # Creates a list with all the tokens in the Tweet.\n tweet_tokens = tweet_tokenizer.tokenize(tweet)\n # Iteration over the token list\n for token in tweet_tokens:\n for element in racist_words:\n # If the word is racist, 1 is added to the racist Tweet\n # counter and the racst condition is set to True.\n if element.lower() == token.lower():\n racist_tweets += 1\n racist = True\n # If the Tweet is racist, the iteration over the Tweet ends\n # using two break statements. This is so that Tweets with\n # several racist words are not counted more than once.\n if racist is True:\n break\n if racist is True:\n break\n # Takes the total number of Tweets and the number of racist Tweets and\n # sends them back to the main function.\n return total_tweets, racist_tweets", "def count() -> int:\n return _api_calls.get(Inner._ANNOTATIONS_ENDPOINT + \"count\").json()", "def draw_retweets(dataframe):\n return dbc.Card([\n dbc.CardBody([\n html.H4('TOTAL RETWEETS', className=\"card-title\"),\n html.H3('{:,}'.format(dataframe['Retweet_count'].sum()).replace(',', ' '), className=\"card-text\")\n ])\n ])", "def get_tweets(self):\n\t\treturn self.tweets", "def test_tweet_count(self, search_key):\n tweet_classifier = TweetClassifier(search_key, 10, \"tweets\")\n self.assertTrue(len(tweet_classifier.classify_tweets()) <= 10)", "def ask_num_tweets_live(self):\n print()\n tweets_wanted = input(\"How many tweets do you want to collect (MAX=100)? \")\n # Handle invalid responses\n while not tweets_wanted.isdigit() or not 0 < int(tweets_wanted) < 101:\n tweets_wanted = input('Invalid response. Please enter a digit between 1 and 100: ')\n # Store user's desired number of tweets\n self._num_tweets = tweets_wanted", "def gather_completed_crawl_count(self):\n response_dictionary = self.response.json()\n completed_crawl_count = len(response_dictionary['finished']) \n return completed_crawl_count", "def analyze_tweet(tweet, results):\n\n ######################################\n # fields that are relevant for user-level and tweet-level analysis\n # count the number of valid Tweets here\n # if it doesn't have at least a body and an actor, it's not a tweet\n try: \n body = tweet[\"body\"]\n userid = tweet[\"actor\"][\"id\"].split(\":\")[-1]\n results[\"tweet_count\"] += 1\n except (ValueError, KeyError):\n if \"non-tweet_lines\" in results:\n results[\"non-tweet_lines\"] += 1\n return\n\n # count the number of tweets from each user\n if \"tweets_per_user\" in results:\n results[\"tweets_per_user\"][tweet[\"actor\"][\"id\"][15:]] += 1\n \n #######################################\n # fields that are relevant for the tweet-level analysis\n # ------------------> term counts\n # Tweet body term count\n if \"body_term_count\" in results:\n results[\"body_term_count\"].add(tweet[\"body\"])\n\n # count the occurences of different hashtags\n if \"hashtags\" in results:\n if \"hashtags\" in tweet[\"twitter_entities\"]:\n for h in tweet[\"twitter_entities\"][\"hashtags\"]:\n results[\"hashtags\"][h[\"text\"].lower()] += 1\n \n try:\n # count the occurences of different top-level domains\n if (\"urls\" in results) and (\"urls\" in tweet[\"gnip\"]):\n for url in tweet[\"gnip\"][\"urls\"]:\n try:\n results[\"urls\"][url[\"expanded_url\"].split(\"/\")[2]] += 1\n except (KeyError,IndexError,AttributeError):\n pass\n # and the number of links total\n if (\"number_of_links\" in results) and (\"urls\" in tweet[\"gnip\"]):\n results[\"number_of_links\"] += len(tweet[\"gnip\"][\"urls\"])\n except KeyError:\n pass\n \n # -----------> timelines\n # make a timeline of UTC day of Tweets posted\n if \"utc_timeline\" in results:\n date = tweet[\"postedTime\"][0:10]\n results[\"utc_timeline\"][date] += 1\n\n # make a timeline in normalized local time (poster's time) of all of the Tweets\n if \"local_timeline\" in results:\n utcOffset = tweet[\"actor\"][\"utcOffset\"]\n if utcOffset is not None:\n posted = tweet[\"postedTime\"]\n hour_and_minute = (datetime.datetime.strptime(posted[0:16], \"%Y-%m-%dT%H:%M\") + \n datetime.timedelta(seconds = int(utcOffset))).time().strftime(\"%H:%M\")\n results[\"local_timeline\"][hour_and_minute] += 1\n \n # ------------> mention results\n # which users are @mentioned in the Tweet\n if \"at_mentions\" in results:\n for u in tweet[\"twitter_entities\"][\"user_mentions\"]:\n # update the mentions with weight + 1 and \n # list all of the screennames (in case a name changes)\n if u[\"id_str\"] is not None:\n results[\"at_mentions\"][u[\"id_str\"]][\"weight\"] += 1 \n results[\"at_mentions\"][u[\"id_str\"]][\"screennames\"].update([u[\"screen_name\"].lower()])\n \n # count the number of times each user gets replies\n if (\"in_reply_to\" in results) and (\"inReplyTo\" in tweet):\n results[\"in_reply_to\"][tweet[\"inReplyTo\"][\"link\"].split(\"/\")[3].lower()] += 1\n\n # --------------> RTs and quote Tweet\n # count share actions (RTs and quote-Tweets)\n # don't count self-quotes or self-RTs, because that's allowed now\n if ((\"quote_of_user\" in results) or (\"RT_of_user\" in results)) and (tweet[\"verb\"] == \"share\"):\n # if it's a quote tweet\n if (\"quote_of_user\" in results) and (\"twitter_quoted_status\" in tweet[\"object\"]):\n quoted_id = tweet[\"object\"][\"twitter_quoted_status\"][\"actor\"][\"id\"][15:]\n quoted_name = tweet[\"object\"][\"twitter_quoted_status\"][\"actor\"][\"preferredUsername\"]\n if quoted_id != tweet[\"actor\"][\"id\"]:\n results[\"quote_of_user\"][quoted_id][\"weight\"] += 1 \n results[\"quote_of_user\"][quoted_id][\"screennames\"].update([quoted_name])\n # if it's a RT\n elif (\"RT_of_user\" in results):\n rt_of_name = tweet[\"object\"][\"actor\"][\"preferredUsername\"].lower()\n rt_of_id = tweet[\"object\"][\"actor\"][\"id\"][15:]\n if rt_of_id != tweet[\"actor\"][\"id\"]:\n results[\"RT_of_user\"][rt_of_id][\"weight\"] += 1 \n results[\"RT_of_user\"][rt_of_id][\"screennames\"].update([rt_of_name])\n\n # Tweet expended url content term count\n if \"url_content\" in results:\n try:\n urls = tweet[\"gnip\"][\"urls\"]\n except KeyError:\n urls = []\n url_content = \"\"\n for url in urls:\n try:\n expanded_url_title = url[\"expanded_url_title\"]\n if expanded_url_title is None:\n expanded_url_title = \"\"\n except KeyError:\n expanded_url_title = \"\"\n try:\n expanded_url_description = url[\"expanded_url_description\"]\n if expanded_url_description is None:\n expanded_url_description = \"\"\n except KeyError:\n expanded_url_description = \"\"\n url_content = url_content + \" \" + expanded_url_title + \" \" + expanded_url_description\n results[\"url_content\"].add(url_content)\n \n ############################################\n # actor-property qualities\n # ------------> bio terms\n if \"bio_term_count\" in results:\n if tweet[\"actor\"][\"id\"][:15] not in results[\"tweets_per_user\"]:\n try:\n if tweet[\"actor\"][\"summary\"] is not None:\n results[\"bio_term_count\"].add(tweet[\"actor\"][\"summary\"])\n except KeyError:\n pass\n \n # ---------> profile locations\n if \"profile_locations_regions\" in results:\n # if possible, get the user's address\n try:\n address = tweet[\"gnip\"][\"profileLocations\"][0][\"address\"]\n country_key = address.get(\"country\", \"no country available\")\n region_key = address.get(\"region\", \"no region available\")\n except KeyError:\n country_key = \"no country available\"\n region_key = \"no region available\"\n results[\"profile_locations_regions\"][country_key + \" , \" + region_key] += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
assemble file content, then pass it to hasher via temp file Either str or bytearray can be passed to outputText. Since we need to write this to a file and calculate a SHA1, we need bytes. For unicode servers, we have a charset specified which is used to convert a str to bytes. For a nonunicode server, we will have specified "raw" encoding to P4Python, so we should never see a str.
def outputText(self, h): if self.p4.charset: try: # self.p4.__convert() doesn't work correctly here if type(h) == str: b = getattr(self.p4, '__convert')(self.p4.charset, h) else: b = getattr(self.p4, '__convert')(self.p4.charset, h.decode()) except: msg = _("error: failed '{}' conversion for '{}#{}'").format( self.p4.charset, self.rev.depot_path, self.rev.revision) raise P4Exception(msg) else: if type(h) == str: raise RuntimeError(_('unexpected outputText')) b = h self.appendContent(b) return OutputHandler.HANDLED
[ "def _get_log_file_data_as_encoded_content():\n with io.BytesIO() as fp:\n with tarfile.open(fileobj=fp, mode='w:gz') as tar:\n for f in OUTPUT_FILES:\n tar.add(f)\n\n fp.seek(0)\n return base64.encode_as_bytes(fp.getvalue())", "def __bytes__(self):\n with open(self._plaintext, \"rb\") as pf:\n return pf.read()", "def update_content_hash(self,running_hash, file, encoding=''):\n if encoding:\n lines = file.split(\"\\n\")\n for line in lines:\n hashed_line = hashlib.sha1(line)\n hex_digest = hashed_line.hexdigest().encode(encoding)\n running_hash.update(hex_digest)\n else:\n running_hash.update(hashlib.sha1(file).hexdigest())", "def getHash(self):\n # Read file contents using a subprocess.\n script_name = 'filehash.py'\n script_path = os.path.join(frameworkdir, script_name)\n # TODO: add a Timer thread to interrupt if it takes too long.\n try:\n data = subprocess.check_output(['python', script_path, self.filepath])\n except IncompleteScanError:\n self.logger.log('Failed to download %s, unable to get hash' % (self.filepath))\n return ''\n except subprocess.CalledProcessError, e:\n self.logger.log('Non-zero return code from %s' % script_name)\n self.logger.log('Return code: %s, output: %s' % (e.returncode, e.output))\n return ''\n except Exception, e:\n self.logger.log('Unknown failure while trying to download %s' % (self.filepath))\n self.logger.log('Exception was: %s' % e)\n return ''\n self.logger.log('Waiting: %s' % self.filepath)\n self.logger.log('output of filehash.py: %s' % data)\n lines = data.split('\\r\\n')\n if 'opened' == lines[0]:\n if len(lines) > 1:\n lines[:] = lines[1:]\n else:\n return ''\n lines[:] = [l.strip() for l in lines]\n self.contenthash = pickle.loads('\\n'.join(lines))\n self.logger.log('Got contenthash: %s' % self.contenthash)", "def test_file_write(self):\n\n args = self.parser.parse_args([self.str_len, '--file', '--raw-output'])\n\n self.randstr_output(args).process_parsed_args()\n output = sys.stdout.getvalue()\n\n filename = os.path.join(self.test_dir, args.file)\n with open(filename, 'r') as f:\n random_string = f.read()\n\n self.assertIn(random_string, output)", "def bytes(self):\n assert self.hasfilename(), \"Invalid filename\"\n with open(self.filename(), 'rb') as f:\n data = io.BytesIO(f.read())\n return str(data.read()).encode('UTF-8')", "def _create_temp_file(self, content=None):\n\n file = tempfile.NamedTemporaryFile(mode='w+b', delete=False)\n if content is not None:\n if isinstance(content, str):\n content = content.encode(self.encoding)\n file.write(content)\n file.close()\n return file.name", "def encrypt_file_content(self, file_content: Union[str, bytes]) -> bytes:\n\n if not isinstance(file_content, bytes):\n file_content = file_content.encode()\n\n pad = primitive_padding.PKCS7(self.AES_BLOCK_SIZE).padder()\n padded_file_content = pad.update(file_content) + pad.finalize()\n\n encryptor = self.__get_cipher().encryptor()\n\n return encryptor.update(padded_file_content) + encryptor.finalize()", "def main(req: azf.HttpRequest, file: azf.Out[bytes]) -> azf.HttpResponse:\n content_size = int(req.params['content_size'])\n\n # When this is set, then 0x01 byte is repeated content_size number of\n # times to use as input.\n # This is to avoid generating random input for large size which can be\n # slow.\n if 'no_random_input' in req.params:\n content = b'\\x01' * content_size\n else:\n content = bytearray(random.getrandbits(8) for _ in range(content_size))\n content_md5 = hashlib.md5(content).hexdigest()\n\n file.set(content)\n\n response_dict = {\n 'content_size': content_size,\n 'content_md5': content_md5\n }\n\n response_body = json.dumps(response_dict, indent=2)\n\n return azf.HttpResponse(\n body=response_body,\n mimetype=\"application/json\",\n status_code=200\n )", "def pencrypt():\r\n if options.password is not None:\r\n binary_password = options.password.encode('utf-8')\r\n password_length = len(binary_password)\r\n else:\r\n binary_password = defualt_password.encode('utf-8')\r\n password_length = len(binary_password)\r\n with open(options.input_filename, 'r', encoding='utf-8') as fin, open(\r\n options.output_filename, 'wb') as fout:\r\n new_str = bytes()\r\n line_bytes = fin.read().encode('utf-8')\r\n for index, one_byte in enumerate(line_bytes):\r\n new_str += bytes([one_byte^binary_password[index%password_length]])\r\n fout.write(new_str)", "def _source_hash_file(source):\n h = hashlib.sha1()\n h.update(source.encode('utf-8'))\n return h.hexdigest()", "def stringToBinaryFile(contentString, filename):\n with open(filename, 'wb') as target:\n target.write(contentString)", "def _encode_audio_file(self, src_path, out_path, target_type):\n if sys.platform.startswith('win'):\n return None\n if target_type == 'audio/mpeg':\n executable_name = 'lame'\n args = [\n '--quiet',\n src_path,\n out_path]\n elif target_type == 'audio/ogg':\n executable_name = 'oggenc'\n args = [\n '-Q',\n '-o', out_path,\n src_path]\n elif target_type == 'audio/mp4':\n # TODO(benvanik): support MP4 conversion somehow\n return None\n else:\n # Copy\n return None\n\n return self._run_task_async(ExecutableTask(\n self.build_env, executable_name, call_args=args))", "def part4a(filename, username, password):\n encyrpted_user = username.encode(\"utf-8\")\n encrypted_psw = password.encode(\"utf-8\")\n encyrpted_user = b64encode(encyrpted_user)\n encrypted_psw = b64encode(encrypted_psw)\n \n file = open(filename, \"w+\")\n file.write(str(encyrpted_user))\n file.write(\"\\n\")\n file.write(str(encrypted_psw))\n file.write(\"\\n\")\n file.close()", "def GenerateId_base64(hash_sha512, input_file):\n string_id = base64.urlsafe_b64encode(hash_sha512)\n return string_id", "def generate_hash(self, file):\n with open(file, 'rb') as afile:\n BLOCKSIZE = 65536\n hasher = hashlib.sha1()\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n return hasher.hexdigest()", "def GenerateId_base64old(hash_sha512, input_file):\n string_id = base64.b64encode(hash_sha512)\n return string_id", "def code_sha256(self) -> str:\n file_hash = FileHash(hashlib.sha256())\n file_hash.add_file(self.archive_file)\n return base64.b64encode(file_hash.digest).decode()", "def encode(self, value: Any, failsafe: bool = False) -> Union[str, bytes]:\n raise NotImplementedError(\"Encoding not implemented\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get moves for which reports should be generated Moves are grouped by stock transfer and by product, and assigned a reporting name based on the order within the stock transfer.
def move_report_list(self, _doc, moves): return ( product_moves.with_context(default_name="%04d" % index) for _pick, pick_moves in moves.groupby(lambda x: x.picking_id) for index, (_product, product_moves) in enumerate( pick_moves.groupby(lambda x: x.product_id) ) )
[ "def _my_prepare_stock_moves(self, picking, qty, warehouse_id):\n self.ensure_one()\n res = []\n if self.product_id.type not in ['product', 'consu']:\n return res\n\n price_unit = self._get_stock_move_price_unit()\n\n template = {\n 'name': self.name or '',\n 'product_id': self.product_id.id,\n 'product_uom': self.product_uom.id,\n 'date': self.order_id.date_order,\n 'date_expected': self.date_planned,\n 'location_id': self.order_id.partner_id.property_stock_supplier.id,\n 'location_dest_id': warehouse_id.lot_stock_id.id,\n 'picking_id': picking.id,\n 'partner_id': self.order_id.dest_address_id.id,\n 'move_dest_id': False,\n 'state': 'draft',\n 'purchase_line_id': self.id,\n 'company_id': self.order_id.company_id.id,\n 'price_unit': price_unit,\n 'picking_type_id': picking.picking_type_id.id,\n 'group_id': self.order_id.group_id.id,\n 'procurement_id': False,\n 'origin': self.order_id.name,\n 'route_ids': warehouse_id and [(6, 0, [x.id for x in warehouse_id.route_ids])] or [],\n 'warehouse_id': warehouse_id.id,\n 'product_uom_qty': qty,\n }\n\n res.append(template)\n return res", "def _compute_stock_move(self):\n stock_move_obj = self.env['stock.move']\n stock_moves = stock_move_obj.search([('picking_id', '=', False),\n ('sale_line_id', 'in',\n self.order_line.ids)])\n self.moves_count = len(stock_moves)", "def process_order(self, order, warehouses):\n\n print(\"Order: \")\n print(order)\n print(\"Warehouses: \")\n print(warehouses)\n order, warehouses = self.remove_empty_items(order, warehouses)\n\n if len(order) == 0:\n return []\n\n if len(warehouses) == 0:\n return []\n\n final_shipment = []\n names_index = {}\n\n for item in order:\n i = 0\n # Iterate until we have checked every warehouse or we have satisfied the quantity for item\n while order[item] > 0 and i < len(warehouses):\n\n inventory = warehouses[i]['inventory']\n name = warehouses[i]['name']\n\n if item in inventory:\n\n # Inventory can't satisfy order for item -> amt_to_take is all of inventory - clear inventory\n if inventory[item] - order[item] <= 0:\n amt_to_take = inventory[item]\n order[item] -= inventory[item]\n del inventory[item]\n # Inventory can satisfy order for item -> amt_to_take is equal to the order amount\n else:\n amt_to_take = order[item]\n order[item] = 0\n\n # Create new warehouse object if warehouse not in final shipment\n if name not in names_index:\n warehouse = { name : { item : amt_to_take } }\n names_index[name] = len(final_shipment) # add to end of names\n final_shipment.append(warehouse)\n # Names_order[name] == index of warehouse in final shipment. Add (item : amt_to_take ) pair\n else:\n final_shipment[names_index[name]][name][item] = amt_to_take\n\n i += 1\n\n # Check if order for specific item has been satisfied\n if i == len(warehouses) and order[item] > 0:\n return []\n\n print(\"Final shipment: \")\n print(final_shipment)\n return final_shipment", "def action_view_stock_move_ept(self):\n stock_move_obj = self.env['stock.move']\n records = stock_move_obj.search([('picking_id', '=', False),\n ('sale_line_id', 'in',\n self.order_line.ids)])\n action = {\n 'domain': \"[('id', 'in', \" + str(records.ids) + \" )]\",\n 'name': 'Order Stock Move',\n 'view_mode': 'tree,form',\n 'res_model': 'stock.move',\n 'type': 'ir.actions.act_window',\n }\n return action", "def split(self, cr, uid, ids, move_ids, context=None):\n if context is None:\n context = {}\n inventory_id = context.get('inventory_id', False)\n prodlot_obj = self.pool.get('stock.production.lot')\n inventory_obj = self.pool.get('stock.inventory')\n line_obj = self.pool.get('stock.move.split.lines')\n move_obj = self.pool.get('stock.move')\n new_move = []\n company_id = self.pool.get('res.company')._company_default_get(cr, uid, 'stock.move.split', context=context),\n for data in self.browse(cr, uid, ids, context=context):\n for move in move_obj.browse(cr, uid, move_ids, context=context):\n move_qty = move.product_qty\n quantity_rest = move.product_qty\n uos_qty_rest = move.product_uos_qty\n new_move = []\n if data.use_exist:\n lines = [l for l in data.line_exist_ids if l]\n else:\n lines = [l for l in data.line_ids if l]\n total_move_qty = 0.0\n for line in lines:\n quantity = line.quantity1\n total_move_qty += quantity\n diff = round(total_move_qty - move_qty,4)\n if diff > 0:\n raise osv.except_osv(_('Processing Error'), _('Processing quantity %f for %s is larger than the available quantity %f!')\\\n %(total_move_qty, move.product_id.name, move_qty))\n if quantity <= 0 or move_qty == 0:\n continue\n \n quantity_rest = round(quantity_rest-quantity,4)\n uos_qty = quantity / move_qty * move.product_uos_qty\n uos_qty_rest = quantity_rest / move_qty * move.product_uos_qty\n if quantity_rest < 0:\n quantity_rest = quantity\n break\n default_val = {\n 'product_qty': quantity,\n 'product_uos_qty': uos_qty,\n 'pieces_qty': line.pieces_qty,\n 'state': move.state\n }\n \n if quantity_rest > 0:\n current_move = move_obj.copy(cr, uid, move.id, default_val, context=context)\n if inventory_id and current_move:\n inventory_obj.write(cr, uid, inventory_id, {'move_ids': [(4, current_move)]}, context=context)\n new_move.append(current_move)\n if quantity_rest == 0:\n current_move = move.id\n prodlot_id = False\n if data.use_exist:\n prodlot_id = line.prodlot_id.id\n if not prodlot_id:\n \n if line.length1 and line.heigth1:\n \n prodlot_id = prodlot_obj.create(cr, uid, {\n 'name': line.name,\n 'product_id': move.product_id and move.product_id.id,\n 'length':line.length1,\n 'width':line.width1,\n 'heigth':line.heigth1,\n 'company_id':company_id[0]\n },\n context=context)\n \n if line.length and line.heigth:\n prodlot_id = prodlot_obj.create(cr, uid, {\n 'name': line.name,\n 'product_id': move.product_id and move.product_id.id,\n 'length':line.length,\n 'width':line.width,\n 'heigth':line.heigth,\n 'company_id':company_id[0]\n },\n context=context)\n move_obj.write(cr, uid, [current_move], {'prodlot_id': prodlot_id, 'state':move.state})\n \n update_val = {}\n if quantity_rest > 0:\n update_val['product_qty'] = quantity_rest\n update_val['product_uos_qty'] = uos_qty_rest\n update_val['state'] = move.state\n update_val['pieces_qty'] = line.pieces_qty\n move_obj.write(cr, uid, [move.id], update_val)\n return new_move", "def _create_account_move_line(self, session=None, move=None):\n # Tricky, via the workflow, we only have one id in the ids variable\n IrProperty = self.env['ir.property']\n ResPartner = self.env['res.partner']\n\n if session and not all(session.id == order.session_id.id for order in self):\n raise UserError(_('Selected orders do not have the same session!'))\n\n grouped_data = {}\n have_to_group_by = session and session.config_id.group_by or False\n rounding_method = session and session.config_id.company_id.tax_calculation_rounding_method\n\n for order in self.filtered(lambda o: not o.account_move or o.state == 'paid'):\n current_company = order.sale_journal.company_id\n account_def = IrProperty.get(\n 'property_account_receivable_id', 'res.partner')\n order_account = order.partner_id.property_account_receivable_id.id or account_def and account_def.id\n partner_id = ResPartner._find_accounting_partner(order.partner_id).id or False\n if move is None:\n # Create an entry for the sale\n journal_id = self.env['ir.config_parameter'].sudo().get_param(\n 'pos.closing.journal_id_%s' % current_company.id, default=order.sale_journal.id)\n move = self._create_account_move(\n order.session_id.start_at, order.name, int(journal_id), order.company_id.id)\n\n def insert_data(data_type, values):\n # if have_to_group_by:\n values.update({\n 'partner_id': partner_id,\n 'move_id': move.id,\n 'currency_id': order.order_currency_id.id,\n })\n if values['credit'] != 0.0:\n values.update({'amount_currency': -(values['credit'] * order.order_currency_id.rate)})\n else:\n values.update({'amount_currency': order.amount_currency})\n\n key = self._get_account_move_line_group_data_type_key(data_type, values)\n if not key:\n return\n\n grouped_data.setdefault(key, [])\n\n if have_to_group_by:\n if not grouped_data[key]:\n grouped_data[key].append(values)\n else:\n current_value = grouped_data[key][0]\n current_value['quantity'] = current_value.get('quantity', 0.0) + values.get('quantity', 0.0)\n current_value['credit'] = current_value.get('credit', 0.0) + values.get('credit', 0.0)\n current_value['debit'] = current_value.get('debit', 0.0) + values.get('debit', 0.0)\n else:\n grouped_data[key].append(values)\n\n # because of the weird way the pos order is written, we need to make sure there is at least one line,\n # because just after the 'for' loop there are references to 'line' and 'income_account' variables (that\n # are set inside the for loop)\n # TOFIX: a deep refactoring of this method (and class!) is needed\n # in order to get rid of this stupid hack\n assert order.lines, _('The POS order must have lines when calling this method')\n # Create an move for each order line\n cur = order.pricelist_id.currency_id\n for line in order.lines:\n amount = line.price_subtotal\n\n # Search for the income account\n if line.product_id.property_account_income_id.id:\n income_account = line.product_id.property_account_income_id.id\n elif line.product_id.categ_id.property_account_income_categ_id.id:\n income_account = line.product_id.categ_id.property_account_income_categ_id.id\n else:\n raise UserError(_('Please define income '\n 'account for this product: \"%s\" (id:%d).')\n % (line.product_id.name, line.product_id.id))\n\n name = line.product_id.name\n if line.notice:\n # add discount reason in move\n name = name + ' (' + line.notice + ')'\n\n # Create a move for the line for the order line\n insert_data('product', {\n 'name': name,\n 'quantity': line.qty,\n 'product_id': line.product_id.id,\n 'account_id': income_account,\n 'analytic_account_id': self._prepare_analytic_account(line),\n 'credit': ((amount > 0) and amount) or 0.0,\n 'debit': ((amount < 0) and -amount) or 0.0,\n 'tax_ids': [(6, 0, line.tax_ids_after_fiscal_position.ids)],\n 'partner_id': partner_id\n })\n\n # Create the tax lines\n taxes = line.tax_ids_after_fiscal_position.filtered(lambda t: t.company_id.id == current_company.id)\n if not taxes:\n continue\n price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n for tax in taxes.compute_all(price, cur, line.qty)['taxes']:\n insert_data('tax', {\n 'name': _('Tax') + ' ' + tax['name'],\n 'product_id': line.product_id.id,\n 'quantity': line.qty,\n 'account_id': tax['account_id'] or income_account,\n 'credit': ((tax['amount'] > 0) and tax['amount']) or 0.0,\n 'debit': ((tax['amount'] < 0) and -tax['amount']) or 0.0,\n 'tax_line_id': tax['id'],\n 'partner_id': partner_id\n })\n\n # round tax lines per order\n if rounding_method == 'round_globally':\n for group_key, group_value in grouped_data.items():\n if group_key[0] == 'tax':\n for line in group_value:\n line['credit'] = cur.round(line['credit'])\n line['debit'] = cur.round(line['debit'])\n\n # counterpart\n insert_data('counter_part', {\n 'name': _(\"Trade Receivables\"), # order.name,\n 'account_id': order_account,\n 'credit': ((order.amount_total < 0) and -order.amount_total) or 0.0,\n 'debit': ((order.amount_total > 0) and order.amount_total) or 0.0,\n 'partner_id': partner_id\n })\n\n order.write({'state': 'done', 'account_move': move.id})\n\n all_lines = []\n for group_key, group_data in grouped_data.items():\n for value in group_data:\n all_lines.append((0, 0, value),)\n if move: # In case no order was changed\n move.sudo().write({'line_ids': all_lines})\n move.sudo().post()\n return True", "def auto_generator_export_stockinfo_report(self):\n report_list = []\n all_report_list = []\n all_report_data = {}\n report_data = {}\n data_dict = {}\n product_obj = self.env['product.product']\n product_cate_obj = self.env['product.category']\n today = datetime.now().strftime(\"%Y-%m-%d\")\n if self.product_ids:\n all_product_ids = self.product_ids\n else:\n all_product_ids = product_obj.search([('type', '!=', 'service')]) or []\n\n if self.product_category_ids:\n product_category_ids = self.product_category_ids\n else:\n product_category_ids = product_cate_obj.search([]) or []\n\n if not all_product_ids or not product_category_ids:\n raise ValidationError(\"No Records Found !!!\")\n\n data_dict, prodcut_data_dict = self.get_data(today, all_product_ids, product_category_ids, data_dict)\n # print(prodcut_data_dict)\n count = 0\n for product, data in prodcut_data_dict.items():\n count += 1\n report_data = {\n 'no': count,\n 'product_name': data.get('product_name') or '-',\n 'default_code': data.get('default_code') or '-',\n 'categories': data.get('category') or '-',\n 'current_stock': data.get('current_stock') or '0',\n 'total_sale': data.get('total_sale') or '0',\n 'average_sale_price': data.get('avg_sale_price') or '0',\n 'average_cost_price': data.get('avg_purchase_price') or '0',\n 'average_sale': data.get('average_sale') or '0',\n 'selected_period_sales': data.get('selected_period_sales') or '0',\n 'total_purchase': data.get('total_purchase') or '0',\n 'growth_ratio': data.get('growth_ratio') or '0',\n 'rack_location': data.get('rack_location')\n }\n all_report_list.append(report_data)\n\n for categ_id, data_details in data_dict.items():\n count = 0\n for single_data in data_details:\n count += 1\n all_report_data = {\n 'no': count,\n 'product_name': single_data.get('product_name') or '-',\n 'default_code': single_data.get('default_code') or '-',\n 'categories': single_data.get('category') or '-',\n 'current_stock': single_data.get('current_stock') or '0',\n 'total_sale': single_data.get('total_sale') or '0',\n 'average_sale_price': single_data.get('avg_sale_price') or '0',\n 'average_cost_price': single_data.get('avg_purchase_price') or '0',\n 'average_sale': single_data.get('average_sale') or '0',\n 'selected_period_sales': single_data.get('selected_period_sales') or '0',\n 'total_purchase': single_data.get('total_purchase') or '0',\n 'growth_ratio': single_data.get('growth_ratio') or '0',\n 'rack_location': data.get('rack_location')\n }\n report_list.append(all_report_data)\n\n return report_list, all_report_list", "def plan_by_product():\n Shipment = fulfil.model('stock.shipment.out')\n StockMove = fulfil.model('stock.move')\n shipments = list(Shipment.search_read_all(\n [('state', 'in', ('assigned', 'waiting'))],\n None,\n ['inventory_moves']\n ))\n move_ids = list(\n chain(*map(lambda s: s['inventory_moves'], shipments))\n )\n outgoing_moves = list(StockMove.search_read_all(\n [('id', 'in', move_ids), ('state', 'in', ('draft', 'assigned'))],\n None,\n fields=[\n 'product', 'product.code',\n 'product.template.name',\n 'planned_date',\n 'internal_quantity',\n ]\n ))\n today = date.today()\n for move in outgoing_moves:\n move['Planned Date'] = move['planned_date'] or today\n move['Product'] = move['product.template.name']\n move['SKU'] = move['product.code']\n move['Quantity'] = move['internal_quantity']\n\n df = pd.DataFrame(outgoing_moves)\n pivot_table = pd.pivot_table(\n df,\n index=[\"Product\", \"SKU\"],\n columns=[\"Planned Date\"],\n values=[\"Quantity\"],\n fill_value=\"\",\n aggfunc=\"sum\"\n )\n return render_template(\n 'plan-by-product.html',\n pivot_table=pivot_table,\n current_year=datetime.utcnow().year,\n )", "def crawl_stockx_data(shoe, save_csv=False):\n global USER_AGENT\n shoe_info = get_shoe_info(shoe)\n if shoe_info is None:\n return None\n\n out_folder = Path()\n headers = {'User-Agent': USER_AGENT,\n 'referer': 'https://google.com'}\n out_file = out_folder / f\"stockx_{shoe}.csv\"\n rows = []\n header = [\"shoe_name\", \"release_date\", \"brand\", \"model\", \"shoe_id\", \"color\", \"time\", \"quantity\", \"shoe_size\",\n \"price\", \"currency\"]\n\n query = API_ENDPOINT + shoe_info[\n 4] + f\"/activity?state=480&currency={CURRENCY}&limit=10000&page=1&sort=createdAt&order=DESC&country={COUNTRY}\"\n\n while True:\n r = requests.get(query, headers=headers)\n if check_request_status(r.status_code):\n for x in r.json()[\"ProductActivity\"]:\n d = datetime.fromisoformat(x[\"createdAt\"])\n row = shoe_info + [d, int(x[\"amount\"]), x[\"shoeSize\"], x[\"localAmount\"], x[\"localCurrency\"]]\n rows.append(row)\n if r.json()[\"Pagination\"][\"nextPage\"] is None:\n break\n else:\n nextPage = re.findall('/activity.*', r.json()[\"Pagination\"][\"nextPage\"])[0]\n query = API_ENDPOINT + shoe_info[4] + nextPage\n\n df_shoe = pd.DataFrame(data=rows, columns=header)\n df_shoe.drop_duplicates(inplace=True)\n df_shoe['time'] = pd.to_datetime(df_shoe.time.astype(str))\n df_shoe.set_index('time', inplace=True)\n if save_csv:\n df_shoe.to_csv(out_file, encoding=\"utf-8\")\n return df_shoe", "def create_picking(self):\n Picking = self.env['stock.picking']\n Move = self.env['stock.move']\n StockWarehouse = self.env['stock.warehouse']\n for order in self:\n # custom multi location\n multi_loc = False\n for line_order in order.lines:\n if line_order.location_id:\n multi_loc = True\n break\n if multi_loc:\n order.multi_picking()\n else:\n if not order.lines.filtered(\n lambda l: l.product_id.type in [\n 'product', 'consu']):\n continue\n address = order.partner_id.address_get(['delivery']) or {}\n picking_type = order.picking_type_id\n return_pick_type = order.picking_type_id.return_picking_type_id or order.picking_type_id\n order_picking = Picking\n return_picking = Picking\n moves = Move\n location_id = order.location_id.id\n if order.partner_id:\n destination_id = order.partner_id.property_stock_customer.id\n else:\n if (not picking_type) or (\n not picking_type.default_location_dest_id):\n customerloc, supplierloc = StockWarehouse._get_partner_locations()\n destination_id = customerloc.id\n else:\n destination_id = picking_type.default_location_dest_id.id\n\n if picking_type:\n message = _(\n \"This transfer has been created from the point of sale session: <a href=# data-oe-model=pos.order data-oe-id=%d>%s</a>\") % (order.id, order.name)\n picking_vals = {\n 'origin': order.name,\n 'partner_id': address.get('delivery', False),\n 'date_done': order.date_order,\n 'picking_type_id': picking_type.id,\n 'company_id': order.company_id.id,\n 'move_type': 'direct',\n 'note': order.note or \"\",\n 'location_id': location_id,\n 'location_dest_id': destination_id,\n }\n pos_qty = any(\n [x.qty > 0 for x in order.lines if x.product_id.type in ['product', 'consu']])\n if pos_qty:\n order_picking = Picking.create(picking_vals.copy())\n order_picking.message_post(body=message)\n neg_qty = any(\n [x.qty < 0 for x in order.lines if x.product_id.type in ['product', 'consu']])\n if neg_qty:\n return_vals = picking_vals.copy()\n return_vals.update({\n 'location_id': destination_id,\n 'location_dest_id': return_pick_type != picking_type and return_pick_type.default_location_dest_id.id or location_id,\n 'picking_type_id': return_pick_type.id\n })\n return_picking = Picking.create(return_vals)\n return_picking.message_post(body=message)\n\n for line in order.lines.filtered(\n lambda l: l.product_id.type in [\n 'product', 'consu'] and not float_is_zero(\n l.qty, precision_digits=l.product_id.uom_id.rounding)):\n moves |= Move.create({\n 'name': line.name,\n 'product_uom': line.product_id.uom_id.id,\n 'picking_id': order_picking.id if line.qty >= 0 else return_picking.id,\n 'picking_type_id': picking_type.id if line.qty >= 0 else return_pick_type.id,\n 'product_id': line.product_id.id,\n 'product_uom_qty': abs(line.qty),\n 'state': 'draft',\n 'location_id': location_id if line.qty >= 0 else destination_id,\n 'location_dest_id': destination_id if line.qty >= 0 else return_pick_type != picking_type and return_pick_type.default_location_dest_id.id or location_id,\n })\n\n # prefer associating the regular order picking, not the\n # return\n order.write(\n {'picking_id': order_picking.id or return_picking.id})\n\n if return_picking:\n order._force_picking_done(return_picking)\n if order_picking:\n order._force_picking_done(order_picking)\n\n # when the pos.config has no picking_type_id set only the moves\n # will be created\n if moves and not return_picking and not order_picking:\n tracked_moves = moves.filtered(\n lambda move: move.product_id.tracking != 'none')\n untracked_moves = moves - tracked_moves\n tracked_moves.action_confirm()\n untracked_moves.action_assign()\n moves.filtered(\n lambda m: m.state in [\n 'confirmed',\n 'waiting']).force_assign()\n moves.filtered(\n lambda m: m.product_id.tracking == 'none').action_done()\n\n return True", "def print_wo_xlsx_report(self):\n for vehicle in self:\n wo_obj = self.env[\"fleet.vehicle.log.services\"]\n records = wo_obj.search([])\n if vehicle.vehicle_ids:\n records = wo_obj.search([(\"vehicle_id\", \"in\", vehicle.vehicle_ids.ids)])\n if vehicle.select_report == \"wo_month_sum_rep\":\n wo_obj = self.env[\n \"report.fleet_operations.workorder.monthly.summary.xls\"\n ]\n file = wo_obj.generate_xlsx_report(records)\n vehicle.write(\n {\"name\": \"WorkOrder Monthly Summary Report.xls\", \"file\": file}\n )\n return {\n \"view_type\": \"form\",\n \"view_mode\": \"form\",\n \"res_model\": \"work.order.reports\",\n \"type\": \"ir.actions.act_window\",\n \"target\": \"new\",\n \"res_id\": vehicle.id,\n }", "def _get_current_move_lines(self):\n MoveLine = self.env[\"stock.move.line\"]\n return MoveLine.search(\n [(\"package_id\", \"child_of\", self.id), (\"state\", \"not in\", [\"done\", \"cancel\"])],\n order=\"id\",\n )", "def _compute_savings(self):\n list_savings = []\n for route1_guid in self.current_route:\n route1 = self.current_route[route1_guid]\n stop1 = route1.last_stop\n\n for route2_guid in self.current_route:\n if route2_guid != route1_guid:\n route2 = self.current_route[route2_guid]\n # check feasibility\n if route1.demand + route2.demand <= self.env.capacity:\n stop2 = route2.first_stop\n\n saving = self.matrix_dist[stop1.guid][self.manager_stop.depot.guid] + self.matrix_dist[self.manager_stop.depot.guid][stop2.guid] +\\\n self.matrix_dist[stop1.guid][stop2.guid]\n\n id_couple = route1_guid + \"-\" + route2_guid\n list_savings.append((saving, id_couple))\n\n return list_savings", "def find_move_lines(self, aux_domain=None):\n MoveLine = self.env[\"stock.move.line\"]\n self.ensure_one()\n\n if aux_domain is None:\n aux_domain = [(\"state\", \"not in\", [\"done\", \"cancel\"])]\n\n domain = aux_domain + [\n \"|\",\n (\"result_package_id\", \"child_of\", self.id),\n (\"package_id\", \"child_of\", self.id),\n ]\n move_lines = MoveLine.search(domain)\n return move_lines", "def fill_inventory_modified(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n\n inventory_line_obj = self.pool.get('stock.inventory.line')\n location_obj = self.pool.get('stock.location')\n product_obj = self.pool.get('product.product')\n stock_location_obj = self.pool.get('stock.location')\n move_obj = self.pool.get('stock.move')\n uom_obj = self.pool.get('product.uom')\n if ids and len(ids):\n ids = ids[0]\n else:\n return {'type': 'ir.actions.act_window_close'}\n fill_inventory = self.browse(cr, uid, ids, context=context)\n res = {}\n res_location = {}\n\n if fill_inventory.recursive:\n location_ids = location_obj.search(cr, uid, [('location_id',\n 'child_of', [fill_inventory.location_id.id])], order=\"id\",\n context=context)\n else:\n location_ids = [fill_inventory.location_id.id]\n\n res = {}\n flag = False\n\n for location in location_ids:\n datas = {}\n res[location] = {}\n if context.get('inv_date') and context['inv_date']:\n move_ids = move_obj.search(cr, uid, ['|',('location_dest_id','=',location),('location_id','=',location),('state','=','done'),('date','<=',context['inv_date'])], context=context)\n else:\n move_ids = move_obj.search(cr, uid, ['|',('location_dest_id','=',location),('location_id','=',location),('state','=','done')], context=context)\n\n for move in move_obj.browse(cr, uid, move_ids, context=context):\n lot_id = move.prodlot_id.id\n prod_id = move.product_id.id\n\t\tif move.location_dest_id.id == move.location_id.id :\n\t qty = 0.0\n\t\telif move.location_dest_id.id == location:\n qty = uom_obj._compute_qty(cr, uid, move.product_uom.id,move.product_qty, move.product_id.uom_id.id)\n else:\n qty = -uom_obj._compute_qty(cr, uid, move.product_uom.id,move.product_qty, move.product_id.uom_id.id)\n\n\n if datas.get((prod_id, lot_id)):\n qty += datas[(prod_id, lot_id)]['product_qty']\n\n datas[(prod_id, lot_id)] = {'product_id': prod_id, 'location_id': location, 'product_qty': qty, 'product_uom': move.product_id.uom_id.id, 'prod_lot_id': lot_id}\n\n if datas:\n flag = True\n res[location] = datas\n\n if not flag:\n raise osv.except_osv(_('Warning !'), _('No product in this location.'))\n\n for stock_move in res.values():\n for stock_move_details in stock_move.values():\n stock_move_details.update({'inventory_id': context['active_ids'][0]})\n domain = []\n\n if fill_inventory.set_stock_zero:\n stock_move_details.update({'product_qty': 0})\n\n for field, value in stock_move_details.items():\n domain.append((field, '=', value))\n\n line_ids = inventory_line_obj.search(cr, uid, domain, context=context)\n\n if not line_ids:\n inventory_line_obj.create(cr, uid, stock_move_details, context=context)\n\n inventory_line_obj = self.pool.get('stock.inventory.line')\n\n\n\n\n # return {'type': 'ir.actions.act_window_close'}\n return res", "def getPossibleMovesFor(self, piece):\n toReturn = []\n for i in range(0, 72):\n if piece == 0:\n if (8+i) < 72:\n if not ((8+i) % 6 == 0):\n if not ((1 + i) % 6 == 0):\n toReturn.append([1+i, 6+i, 7+i, 8+i])\n if piece == 1:\n if (13 + i) < 72:\n if not ((1 + i) % 6 == 0):\n if ((1 + i) > 0):\n toReturn.append([1 + i, 6 + i, 7 + i, 13 + i])\n if piece == 2:\n if (13 + i) < 72:\n if not ((7 + i) % 6 == 0):\n toReturn.append([0 + i, 6 + i, 7 + i, 12 + i])\n if piece == 3:\n if (7 + i) < 72:\n if not ((2 + i) % 6 == 0):\n if not((1 + i) % 6 == 0):\n toReturn.append([0 + i, 1 + i, 2 + i, 7 + i])\n if piece == 4:\n if (12 + i) < 36:\n toReturn.append([0 + i, 6 + i, 12 + i, 42 + i])\n if piece == 5:\n if (2 + i) < 36:\n if not ((2 + i) % 6 == 0):\n if not ((1 + i) % 6 == 0):\n toReturn.append([0 + i, 1 + i, 2 + i, 37 + i])\n if piece == 6:\n if (38 + i) < 72:\n if not ((38 + i) % 6 == 0):\n if not ((37 + i) % 6 == 0):\n toReturn.append([36 + i, 37 + i, 38 + i, 1 + i])\n if piece == 7:\n if (48 + i) < 72:\n toReturn.append([36 + i, 42 + i, 48 + i, 6 + i])\n return toReturn", "def write(self, vals):\n res = super(MrpProduction, self).write(vals)\n if 'date_planned_finished' in vals:\n for record in self:\n moves = self.env['stock.move'].search(['|', ('raw_material_production_id', '=', record.id),\n ('production_id', '=', record.id),\n ('state', 'not in', ('cancel', 'done'))])\n moves.sudo().write({'date_expected': vals['date_planned_finished'],\n 'date': vals['date_planned_finished']})\n return res", "def create_expanded_moves(cls, pokemon_name_2: str):\n request = pokedex.setup_request_commandline()\n moves_url = Pokemon.create_pokemon_object(pokemon_name_2)\n moves_url_list = moves_url[3]\n async_ability_expanded = \\\n asyncio.run(\n RequestApi.expanded_process_multiple_pokemon_requests(\n moves_url_list))\n ability_expanded_dump = json.dumps(async_ability_expanded)\n ability_expanded_query = json.loads(ability_expanded_dump)\n print(\"\\n---------------EXPANDED MOVES INCLUDED----\"\n \"-------------------------\")\n for move in ability_expanded_query:\n move_name = move[\"name\"]\n move_id = move[\"id\"]\n move_gen = move[\"generation\"][\"name\"]\n move_accuracy = move[\"accuracy\"]\n move_pp = move[\"pp\"]\n move_power = move[\"power\"]\n move_type = move[\"type\"][\"name\"]\n move_damage_class = move[\"damage_class\"][\"name\"]\n move_short_effect = move[\"effect_entries\"][0][\"short_effect\"]\n final_move_object = Moves(move_name, move_id, move_gen,\n move_accuracy,\n move_pp,\n move_power, move_type, move_damage_class,\n move_short_effect)\n if request[3] is None:\n print(final_move_object)\n elif request[3] is not None:\n with open(request[3], mode=\"a\") as output_file:\n output_file.write(\"\\n\\n-----EXPANDED MOVE-----\\n\")\n output_file.write(str(final_move_object))", "def _create_account_move_line(self, cr, uid, ids, session=None, move_id=None, context=None):\n # Tricky, via the workflow, we only have one id in the ids variable\n account_move_obj = self.pool.get('account.move')\n account_move_line_obj = self.pool.get('account.move.line')\n account_period_obj = self.pool.get('account.period')\n account_tax_obj = self.pool.get('account.tax')\n user_proxy = self.pool.get('res.users')\n property_obj = self.pool.get('ir.property')\n cur_obj = self.pool.get('res.currency')\n\n ctx = dict(context or {}, account_period_prefer_normal=True)\n period = account_period_obj.find(cr, uid, context=ctx)[0]\n\n #session_ids = set(order.session_id for order in self.browse(cr, uid, ids, context=context))\n\n if session and not all(session.id == order.session_id.id for order in self.browse(cr, uid, ids, context=context)):\n raise osv.except_osv(_('Error!'), _('Selected orders do not have the same session!'))\n\n current_company = user_proxy.browse(cr, uid, uid, context=context).company_id\n\n grouped_data = {}\n have_to_group_by = session and session.config_id.group_by or False\n\n def compute_tax(amount, tax, line):\n if amount > 0:\n tax_code_id = tax['base_code_id']\n tax_amount = line.price_subtotal * tax['base_sign']\n else:\n tax_code_id = tax['ref_base_code_id']\n tax_amount = line.price_subtotal * tax['ref_base_sign']\n\n return (tax_code_id, tax_amount,)\n\n for order in self.browse(cr, uid, ids, context=context):\n if order.account_move:\n continue\n if order.state != 'paid':\n continue\n\n user_company = user_proxy.browse(cr, order.user_id.id, order.user_id.id).company_id\n\n group_tax = {}\n account_def = property_obj.get(cr, uid, 'property_account_receivable', 'res.partner', context=context)\n\n order_account = order.partner_id and \\\n order.partner_id.property_account_receivable and \\\n order.partner_id.property_account_receivable.id or \\\n account_def and account_def.id or current_company.account_receivable.id\n\n if move_id is None:\n # Create an entry for the sale\n move_id = account_move_obj.create(cr, uid, {\n 'ref' : order.name,\n 'journal_id': order.sale_journal.id,\n }, context=context)\n\n def insert_data(data_type, values):\n # if have_to_group_by:\n\n sale_journal_id = order.sale_journal.id\n\n # 'quantity': line.qty,\n # 'product_id': line.product_id.id,\n values.update({\n 'date': order.date_order[:10],\n 'ref': order.name,\n 'journal_id' : sale_journal_id,\n 'period_id' : period,\n 'move_id' : move_id,\n 'company_id': user_company and user_company.id or False,\n })\n\n if data_type == 'product':\n key = ('product', values['partner_id'], values['product_id'], values['debit'] > 0)\n elif data_type == 'tax':\n key = ('tax', values['partner_id'], values['tax_code_id'], values['debit'] > 0)\n elif data_type == 'counter_part':\n key = ('counter_part', values['partner_id'], values['account_id'], values['debit'] > 0)\n else:\n return\n\n grouped_data.setdefault(key, [])\n\n # if not have_to_group_by or (not grouped_data[key]):\n # grouped_data[key].append(values)\n # else:\n # pass\n\n if have_to_group_by:\n if not grouped_data[key]:\n grouped_data[key].append(values)\n else:\n current_value = grouped_data[key][0]\n current_value['quantity'] = current_value.get('quantity', 0.0) + values.get('quantity', 0.0)\n current_value['credit'] = current_value.get('credit', 0.0) + values.get('credit', 0.0)\n current_value['debit'] = current_value.get('debit', 0.0) + values.get('debit', 0.0)\n current_value['tax_amount'] = current_value.get('tax_amount', 0.0) + values.get('tax_amount', 0.0)\n else:\n grouped_data[key].append(values)\n\n #because of the weird way the pos order is written, we need to make sure there is at least one line, \n #because just after the 'for' loop there are references to 'line' and 'income_account' variables (that \n #are set inside the for loop)\n #TOFIX: a deep refactoring of this method (and class!) is needed in order to get rid of this stupid hack\n assert order.lines, _('The POS order must have lines when calling this method')\n # Create an move for each order line\n\n cur = order.pricelist_id.currency_id\n for line in order.lines:\n tax_amount = 0\n taxes = [t for t in line.product_id.taxes_id]\n computed_taxes = account_tax_obj.compute_all(cr, uid, taxes, line.price_unit * (100.0-line.discount) / 100.0, line.qty)['taxes']\n\n for tax in computed_taxes:\n tax_amount += cur_obj.round(cr, uid, cur, tax['amount'])\n group_key = (tax['tax_code_id'], tax['base_code_id'], tax['account_collected_id'], tax['id'])\n\n group_tax.setdefault(group_key, 0)\n group_tax[group_key] += cur_obj.round(cr, uid, cur, tax['amount'])\n\n amount = line.price_subtotal\n\n # Search for the income account\n if line.product_id.property_account_income.id:\n income_account = line.product_id.property_account_income.id\n elif line.product_id.categ_id.property_account_income_categ.id:\n income_account = line.product_id.categ_id.property_account_income_categ.id\n else:\n raise osv.except_osv(_('Error!'), _('Please define income '\\\n 'account for this product: \"%s\" (id:%d).') \\\n % (line.product_id.name, line.product_id.id, ))\n\n # Empty the tax list as long as there is no tax code:\n tax_code_id = False\n tax_amount = 0\n while computed_taxes:\n tax = computed_taxes.pop(0)\n tax_code_id, tax_amount = compute_tax(amount, tax, line)\n\n # If there is one we stop\n if tax_code_id:\n break\n\n # Create a move for the line\n insert_data('product', {\n 'name': line.product_id.name,\n 'quantity': line.qty,\n 'product_id': line.product_id.id,\n 'account_id': income_account,\n 'credit': ((amount>0) and amount) or 0.0,\n 'debit': ((amount<0) and -amount) or 0.0,\n 'tax_code_id': tax_code_id,\n 'tax_amount': tax_amount,\n 'partner_id': order.partner_id and self.pool.get(\"res.partner\")._find_accounting_partner(order.partner_id).id or False\n })\n\n # For each remaining tax with a code, whe create a move line\n for tax in computed_taxes:\n tax_code_id, tax_amount = compute_tax(amount, tax, line)\n if not tax_code_id:\n continue\n\n insert_data('tax', {\n 'name': _('Tax'),\n 'product_id':line.product_id.id,\n 'quantity': line.qty,\n 'account_id': income_account,\n 'credit': 0.0,\n 'debit': 0.0,\n 'tax_code_id': tax_code_id,\n 'tax_amount': tax_amount,\n 'partner_id': order.partner_id and self.pool.get(\"res.partner\")._find_accounting_partner(order.partner_id).id or False\n })\n\n # Create a move for each tax group\n (tax_code_pos, base_code_pos, account_pos, tax_id)= (0, 1, 2, 3)\n\n for key, tax_amount in group_tax.items():\n tax = self.pool.get('account.tax').browse(cr, uid, key[tax_id], context=context)\n insert_data('tax', {\n 'name': _('Tax') + ' ' + tax.name,\n 'quantity': line.qty,\n 'product_id': line.product_id.id,\n 'account_id': key[account_pos] or income_account,\n 'credit': ((tax_amount>0) and tax_amount) or 0.0,\n 'debit': ((tax_amount<0) and -tax_amount) or 0.0,\n 'tax_code_id': key[tax_code_pos],\n 'tax_amount': tax_amount,\n 'partner_id': order.partner_id and self.pool.get(\"res.partner\")._find_accounting_partner(order.partner_id).id or False\n })\n\n # counterpart\n insert_data('counter_part', {\n 'name': _(\"Trade Receivables\"), #order.name,\n 'account_id': order_account,\n 'credit': ((order.amount_total < 0) and -order.amount_total) or 0.0,\n 'debit': ((order.amount_total > 0) and order.amount_total) or 0.0,\n 'partner_id': order.partner_id and self.pool.get(\"res.partner\")._find_accounting_partner(order.partner_id).id or False\n })\n\n order.write({'state':'done', 'account_move': move_id})\n\n all_lines = []\n for group_key, group_data in grouped_data.iteritems():\n for value in group_data:\n all_lines.append((0, 0, value),)\n if move_id: #In case no order was changed\n self.pool.get(\"account.move\").write(cr, uid, [move_id], {'line_id':all_lines}, context=context)\n\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test exponential learning rate schedule
def test_exp_schedule(backend): lr_init = 0.1 decay = 0.01 sch = ExpSchedule(decay) for epoch in range(10): lr = sch.get_learning_rate(learning_rate=lr_init, epoch=epoch) assert np.allclose(lr, lr_init / (1. + decay * epoch))
[ "def test_cyclic_exp_lr(self):\n gold = [0.1,0.26200002,0.39159995,0.49366,0.5723919,0.631441,0.48263744,0.35828033,0.25496817,0.1697357,0.1,\n 0.15648592,0.20167467,0.23726073,0.2647129,0.285302,0.23341745,0.19005677,0.15403408,0.12431534,0.1,\n 0.1196954,0.13545176,0.14785986,0.15743186]\n lrs = cyclic_learning_rate.ExponentialCyclicalLearningRate(\n initial_learning_rate=0.1,\n maximal_learning_rate=1.0,\n step_size=5,\n gamma=0.9,\n )\n for i in range(25):\n assert round(float(lrs(i).numpy()), 5) == round(gold[i], 5)", "def exponentialDecay(self):\n\n lr = self._lr * pow(self._decay_rate, self._step / self._decay_steps)\n for param_group in self._optimizer.param_groups:\n param_group[\"lr\"] = lr", "def lr_schedule(epoch: int) -> float:\n epoch += epoch_base\n learning_rate = 1e-3\n if epoch > 180:\n learning_rate *= 0.5e-3\n elif epoch > 160:\n learning_rate *= 1e-3\n elif epoch > 120:\n learning_rate *= 1e-2\n elif epoch > 80:\n learning_rate *= 1e-1\n return learning_rate", "def test_learning_rate_found():\n m = Member(get_test_model, steps_to_ready, tune_lr=True)\n assert len(m.hyperparameters) == 4", "def testMomentumOptimizerWithNewLearningRate(self):\n self._assertOptimizerWithNewLearningRate(\"momentum_optimizer\")", "def lr_schedule(epoch):\n lr = learning_rate\n if epoch > 180:\n lr *= 0.5e-3\n elif epoch > 160:\n lr *= 1e-3\n elif epoch > 80:\n lr *= 1e-2\n elif epoch > 40:\n lr *= 1e-1\n print('\\nLearning rate: ', lr)\n return lr", "def testAdamOptimizerWithNewLearningRate(self):\n self._assertOptimizerWithNewLearningRate(\"adam_optimizer\")", "def test_train(self):\n trace.train(10)", "def learning_rate_decay(alpha, decay_rate, global_step, decay_step):\n train = tf.train.inverse_time_decay(alpha, global_step, decay_step,\n decay_rate, staircase=True)\n return train", "def testRMSPropWithNewLearingRate(self):\n self._assertOptimizerWithNewLearningRate(\"rms_prop_optimizer\")", "def test(self, failure_rate, iteration_n):\n pass", "def test_lr_scheduler_epoch(self) -> None:\n my_module = torch.nn.Linear(2, 2)\n\n auto_unit = DummyLRSchedulerAutoUnit(\n module=my_module,\n step_lr_interval=\"epoch\",\n )\n\n input_dim = 2\n dataset_len = 8\n batch_size = 2\n max_epochs = 3\n\n train_dl = generate_random_dataloader(dataset_len, input_dim, batch_size)\n\n train(auto_unit, train_dataloader=train_dl, max_epochs=max_epochs)\n self.assertEqual(auto_unit.lr_scheduler.step.call_count, max_epochs)", "def learning_rate_step_decay(epoch, lr, step=24, initial_power=-3):\n num = epoch // step\n lrate = 10 ** (initial_power - num)\n print(\"Learning rate for epoch {} is {}.\".format(epoch + 1, 1.0 * lrate))\n return np.float(lrate)", "def range_test(\n self,\n train_loader,\n val_loader=None,\n start_lr=None,\n end_lr=10,\n epochs=100,\n step_mode=\"exp\",\n ):\n\n # Reset test results\n self.history = {\"lr\": [], \"loss\": [], \"acc\": []}\n self.best_loss = None\n self.best_acc = None\n\n # Move the model to the proper device\n self.model.to(self.device)\n\n # Check if the optimizer is already attached to a scheduler\n self._check_for_scheduler()\n\n # Set the starting learning rate\n if start_lr:\n self._set_learning_rate(start_lr)\n\n total_steps = epochs * len(train_loader)\n\n # Initialize the proper learning rate policy\n if step_mode.lower() == \"exp\":\n self.lr_schedule = ExponentialLR(self.optimizer, end_lr, total_steps)\n elif step_mode.lower() == \"linear\":\n self.lr_schedule = LinearLR(self.optimizer, end_lr, total_steps)\n else:\n raise ValueError(\"expected one of (exp, linear), got {}\".format(step_mode))\n\n for epoch in tqdm(range(epochs)):\n # Train on batch and retrieve loss\n loss, acc = self._train_epoch(train_loader)\n if val_loader:\n loss, acc = self._validate(val_loader)\n\n self.history[\"lr\"].append(self.lr_schedule.get_lr()[0])\n\n # Track the best loss and smooth it if smooth_f is specified\n if epoch == 0:\n self.best_loss = loss\n self.best_acc = acc\n else:\n if loss < self.best_loss:\n self.best_loss = loss\n if acc > self.best_acc:\n self.best_acc = acc\n\n # Check if the loss has diverged; if it has, stop the test\n self.history[\"loss\"].append(loss)\n self.history[\"acc\"].append(acc)\n\n print(\"Learning rate search finished. See the graph with {finder_name}.plot()\")", "def test_linear_lr_scheduler():\n model = nn.Conv2d(3, 32, (2, 2), bias=False)\n optimizer = optim.Adam(model.parameters(), lr=0.0002)\n epochs = 120\n total_examples = 1281167\n batch_size = 256\n steps_per_epoch = int(math.ceil(total_examples / batch_size))\n scheduler = LinearLR(optimizer, 2e-7, epochs, steps_per_epoch)\n\n lrs = []\n for epoch in range(epochs):\n for batch in range(steps_per_epoch):\n lrs.append(optimizer.param_groups[0]['lr'])\n optimizer.step()\n scheduler.step()\n\n assert lrs[0] == 0.0002\n assert pytest.approx(lrs[1], 0.000199999663866, 1e-14)\n assert pytest.approx(lrs[2], 0.000199999327731, 1e-14)\n assert pytest.approx(lrs[80], 0.000199973109244, 1e-14)\n assert pytest.approx(lrs[160], 0.000199946218487, 1e-14)\n assert pytest.approx(lrs[60000], 0.000179831932773, 1e-14)\n assert lrs[epochs * steps_per_epoch - 1] == 2e-7", "def test_train_static_multiplication():\n\n train_retry(\n epoch_count=4000,\n expected_interpolation_loss=0.0001,\n expected_extrapolation_loss=0.0001,\n learning_rate=0.05,\n task=lambda a, b: a * b,\n )", "def train(self):\n if self.train_time <= 0:\n # train on episodes\n begin = datetime.datetime.utcnow()\n \n time = []\n for i in range(self.iterations):\n i_begin = datetime.datetime.utcnow()\n self.iteration()\n i_end = datetime.datetime.utcnow()\n\n delta = i_end - i_begin\n delta = delta.total_seconds()\n time.append(delta)\n eta = np.mean(time)*(self.iterations - i)\n\n self._pprint(\"ITERATION %d done in %.2f seconds, ETA %.2f seconds\\n\" %(i+1, delta, eta))\n\n end = datetime.datetime.utcnow()\n delta = end - begin\n delta = delta.total_seconds()\n self._pprint(\"Done training, took %.2f seconds\" %(delta))\n\n else:\n time = []\n begin = datetime.datetime.utcnow()\n i = 0\n while datetime.datetime.utcnow() - begin < self.calculation_time:\n i_begin = datetime.datetime.utcnow()\n self.iteration()\n i_end = datetime.datetime.utcnow()\n\n delta = i_end - i_begin\n delta = delta.total_seconds()\n time.append(delta)\n ave_time = np.mean(time)\n total = np.round(self.calculation_time.total_seconds()/ave_time)\n i += 1\n\n self._pprint(\"Iteration %d done in %.2f seconds, ESTIMATED ITERATIONS: %d\\n\" %(i, delta, total))\n\n end = datetime.datetime.utcnow()\n delta = end - begin\n delta = delta.total_seconds()\n self._pprint(\"Done training, took %.2f seconds\" %(delta))", "def test_against_exponential(self):\n t = np.arange(-10, 50, step = 0.3)\n offset = 2\n exp = exponential(t, self.tzero, self.amp1, self.tconst1, offset = offset)\n biexp=biexponential(t,self.tzero,self.amp1, 0, self.tconst1, 1, offset = offset)\n\n self.assertTrue(np.allclose(exp, biexp))", "def test_get_alpha(self):\n for T in [300, 400, 500, 600, 800, 1000, 1500, 2000]:\n dEdown0 = 1000. * self.alpha0 * (T / self.T0) ** self.n\n dEdown = self.singleExponentialDown.get_alpha(T)\n self.assertAlmostEqual(dEdown0, dEdown, 6)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Changes TERM to new value; useful for overriding terminal defaults.
def term(self, term): self._env['TERM'] = term
[ "def SetTerminal(self, isTerminal):\n self.terminalNode = isTerminal", "def terminal_mode( self ):\r\n self.mode = \"Terminal\"\r\n\r\n self.port = \"COM5\" #\r\n self.baudrate = 19200 # 9600 19200 38400, 57600, 115200, 128000 and 256000\r\n\r\n self.send_ctrls = [\r\n # text cmd can edit\r\n ( \"Version\", \"v\", True ),\r\n ( \"Help\", \"h\", True ),\r\n ( \"TimeLoop\", \"t\", True ),\r\n ( \"More Different\", \"yes different\", True ),\r\n ]", "def _turn_sigterm_into_systemexit():\n\n def handle_term(signo, frame):\n raise SystemExit\n signal.signal(signal.SIGTERM, handle_term)", "def __init__(self):\n self.term = Terminal()\n self.term_status = TermStatus.OPEN\n self.term.enter_fullscreen()\n self.clear()", "def OnTermSettings(self, event):\n dialog = TerminalSettingsDialog(None, -1, \"\", settings=self.terminal.settings)\n result = dialog.ShowModal()\n dialog.Destroy()", "def reset_terminal(self) -> None:\n self.display_back_ground()\n self.display_menu()\n self.print_moves()", "def sigterm(self, num, frame):\n self.quit()", "def _apply_configuration(self, terminal):\n terminal.set_colors(self._fg_color, self._bg_color, self._palette)\n terminal.set_font_scale(self._font_scale)\n if self._font_family:\n font = terminal.get_font()\n font.set_family(self._font_family)\n terminal.set_font(font)", "def setTerminateRule(self, termRule):\n\t\t\t\tself._terminateAnalysis = termRule", "def select_terminal_class(colored=True):\n # MAYBE: BEHAVE_TERM\n # TERM = os.environ.get(\"TERM\", None)\n if colored:\n if sys.platform.startswith(\"win\"): # pragma: no cover\n from behave_ext.terminal import winterm\n return winterm.Terminal\n elif os.name == \"posix\": # pragma: no cover\n # -- PLATFORM-GROUP: POSIX/UNIX SYSTEMS\n from behave_ext.terminal import ansiterm\n return ansiterm.AnsiTerminalWriter\n # -- OTHERWISE: Monochrome or unknown platform in colored mode.\n return PlainTerminalWriter", "def set_terminal_pulse (self, channel, terminal):\n channel = str(channel)\n terminal = str(terminal)\n return CALL ('SetCOPulseTerm', self, channel, terminal)==0", "def __init__(self, kind=None, stream=None, force_styling=False):\n global _CUR_TERM\n self.keyboard_fd = None\n\n # default stream is stdout, keyboard only valid as stdin when\n # output stream is stdout and output stream is a tty\n if stream is None or stream == sys.__stdout__:\n stream = sys.__stdout__\n self.keyboard_fd = sys.__stdin__.fileno()\n\n try:\n stream_fd = (stream.fileno() if hasattr(stream, 'fileno')\n and callable(stream.fileno) else None)\n except IOUnsupportedOperation:\n stream_fd = None\n\n self._is_a_tty = stream_fd is not None and os.isatty(stream_fd)\n self._does_styling = ((self.is_a_tty or force_styling) and\n force_styling is not None)\n\n # keyboard_fd only non-None if both stdin and stdout is a tty.\n self.keyboard_fd = (self.keyboard_fd\n if self.keyboard_fd is not None and\n self.is_a_tty and os.isatty(self.keyboard_fd)\n else None)\n self._normal = None # cache normal attr, preventing recursive lookups\n\n # The descriptor to direct terminal initialization sequences to.\n # sys.__stdout__ seems to always have a descriptor of 1, even if output\n # is redirected.\n self._init_descriptor = (stream_fd is None and sys.__stdout__.fileno()\n or stream_fd)\n self._kind = kind or os.environ.get('TERM', 'unknown')\n\n if self.does_styling:\n # Make things like tigetstr() work. Explicit args make setupterm()\n # work even when -s is passed to nosetests. Lean toward sending\n # init sequences to the stream if it has a file descriptor, and\n # send them to stdout as a fallback, since they have to go\n # somewhere.\n try:\n if (platform.python_implementation() == 'PyPy' and\n isinstance(self._kind, unicode)):\n # pypy/2.4.0_2/libexec/lib_pypy/_curses.py, line 1131\n # TypeError: initializer for ctype 'char *' must be a str\n curses.setupterm(self._kind.encode('ascii'), self._init_descriptor)\n else:\n curses.setupterm(self._kind, self._init_descriptor)\n except curses.error as err:\n warnings.warn('Failed to setupterm(kind={0!r}): {1}'\n .format(self._kind, err))\n self._kind = None\n self._does_styling = False\n else:\n if _CUR_TERM is None or self._kind == _CUR_TERM:\n _CUR_TERM = self._kind\n else:\n warnings.warn(\n 'A terminal of kind \"%s\" has been requested; due to an'\n ' internal python curses bug, terminal capabilities'\n ' for a terminal of kind \"%s\" will continue to be'\n ' returned for the remainder of this process.' % (\n self._kind, _CUR_TERM,))\n\n for re_name, re_val in init_sequence_patterns(self).items():\n setattr(self, re_name, re_val)\n\n # build database of int code <=> KEY_NAME\n self._keycodes = get_keyboard_codes()\n\n # store attributes as: self.KEY_NAME = code\n for key_code, key_name in self._keycodes.items():\n setattr(self, key_name, key_code)\n\n # build database of sequence <=> KEY_NAME\n self._keymap = get_keyboard_sequences(self)\n\n self._keyboard_buf = collections.deque()\n if self.keyboard_fd is not None:\n locale.setlocale(locale.LC_ALL, '')\n self._encoding = locale.getpreferredencoding() or 'ascii'\n try:\n self._keyboard_decoder = codecs.getincrementaldecoder(\n self._encoding)()\n except LookupError as err:\n warnings.warn('%s, fallback to ASCII for keyboard.' % (err,))\n self._encoding = 'ascii'\n self._keyboard_decoder = codecs.getincrementaldecoder(\n self._encoding)()\n\n self.stream = stream", "def clear(self):\n os.system(self.clear_term)", "def addTerminator(self, newTerm):\n self.terminators = self.terminators + [newTerm]", "def test_setupterm_singleton_issue_33():\n @as_subprocess\n def child():\n warnings.filterwarnings(\"error\", category=UserWarning)\n\n # instantiate first terminal, of type xterm-256color\n term = TestTerminal(force_styling=True)\n first_kind = term.kind\n next_kind = 'xterm'\n\n try:\n # a second instantiation raises UserWarning\n term = TestTerminal(kind=next_kind, force_styling=True)\n except UserWarning as err:\n assert (err.args[0].startswith(\n 'A terminal of kind \"' + next_kind + '\" has been requested')\n ), err.args[0]\n assert ('a terminal of kind \"' + first_kind + '\" will '\n 'continue to be returned' in err.args[0]), err.args[0]\n else:\n # unless term is not a tty and setupterm() is not called\n assert not term.is_a_tty, 'Should have thrown exception'\n warnings.resetwarnings()\n\n child()", "def open_xterm_console():\n\n suite_path = os.path.dirname(THIS_PATH)\n terminal = 'xterm'\n terminal_title = '\"controller-0 boot console\"'\n geometry = '-0+0' # upper right hand corner\n if 'DISPLAY' not in os.environ:\n os.environ['DISPLAY'] = ':0'\n command = 'python {suite}/Utils/watcher.py {log_path}'.format(\n suite=suite_path, log_path=LOG_PATH)\n\n try:\n pid_list = subprocess.check_output(['pidof', terminal]).split()\n\n # killing all xterm active sessions\n for pid in pid_list:\n _pid = psutil.Process(int(pid))\n # terminate the process\n _pid.terminate()\n\n if _pid.is_running():\n # forces the process to terminate\n _pid.suspend()\n _pid.resume()\n except subprocess.CalledProcessError:\n LOG.info('There is not process for : %s', terminal)\n\n os.system('{term} -geometry {geo} -T {title} -e {cmd} &'.format(\n term=terminal, geo=geometry, title=terminal_title, cmd=command))", "def clear_terminal(self):\n cmd = \"\"\n if os.name == \"posix\":\n cmd = \"clear\"\n else:\n cmd = \"clr\"\n os.system(cmd)", "def change_font_size_in_terminal(should_be_big):\n\n run_applescript('tell application \"Terminal\" to ' \\\n 'set font size of current settings of front window to '\n '{0}'.format(BIG_FONT if should_be_big else SMALL_FONT))", "def mock_terminal_logoff(*args, **kwargs):\n orig_logoff(*args, **kwargs)\n cms_obj._terminal._s3270 = mock_s3270" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get number of buildings in a neighborhood
def get_num_buildings(nname): engine = get_sql_engine() building_stats = text( """ SELECT count(v.*) as num_buildings FROM vacant_buildings as v JOIN philadelphia_neighborhoods as n ON ST_Intersects(v.geom, n.geom) WHERE n.neighborhood_name = :nname """ ) resp = engine.execute(building_stats, nname=nname).fetchone() return resp["num_buildings"]
[ "def neighborhood_size(G, v):\n return len([u for u, v1 in G if v1 == v])", "def num_building(building_type: BuildingType, ai_stat: AI_GameStatus, count_under_construction=False):\n value = 0\n for b in ai_stat.map.building_list:\n if b.type == building_type:\n if count_under_construction:\n if b.state == BuildingState.UNDER_CONSTRUCTION or b.state == BuildingState.ACTIVE:\n value = value + 1\n else:\n if b.state == BuildingState.ACTIVE:\n value = value + 1\n return value", "def island_count(*grid: str) -> int:\n unvisited = {(x, y)\n for (y, row) in enumerate(grid)\n for (x, char) in enumerate(row)\n if bool(int(char))}\n\n number_of_islands = 0\n while unvisited:\n explore_island(next(iter(unvisited)), unvisited)\n number_of_islands += 1\n return number_of_islands", "def number_of_neighbors(self):\n return len(self.indices_of_atoms_connecting)", "def count_lights(grid):\n return int(np.sum(grid))", "def num_neighbours(lag=1):\n win_size = 2*lag + 1\n neighbours = win_size**2 - (2*(lag-1) + 1)**2\n \n return neighbours", "def neighborCount(self):\n \n data=self.EZSPtrans([0x7A]);\n return ord(data[5]);", "def num_neighbors(self, index):\n return len(self.neighbors(index))", "def getBuildingCount(self):\n p = Selector(text=self.content).xpath('//*[@id=\"MainContent_lblBldCount\"]/text()')\n try:\n buildingcount = p.extract()[0]\n except IndexError:\n print(\"No Building Count information is available for %s\" % self.getLocation())\n return \"\"\n return buildingcount", "def __len__(self):\n totalLength = 0\n for node in self.grid.iter():\n totalLength += len(node.get('grid'))\n\n return totalLength", "def neighbors(self):\n hood = (self.x, self.y, self.neighborhood_radius) # neighborhood\n n = collide_single(hood, self.others)\n return n", "def count_visited_places(self):\r\n visited_places = 0\r\n for place in self.places:\r\n if place[0].status == 'n':\r\n visited_places += 1\r\n return visited_places", "def compute_neighborhood_size(self, current_epoch: int) -> float:\n return self.neighbors_radius_const * np.exp(-current_epoch / self.time_const)", "def number_at_cell(self, index):\n if self._game[index] == FLAG:\n return FLAG\n elif index in self._pokemon_locations:\n return\n count = 0\n for neighbour in self.neighbour_directions(index):\n if neighbour in self._pokemon_locations:\n count += 1\n return count", "def count_obstacles_in_my_elf_way_to_castle(game, elf):\n count = 0\n for portal in game.get_enemy_portals():\n if portal.distance(elf) + portal.distance(game.get_enemy_castle()) < elf.distance(game.get_enemy_castle()) + game.portal_size or \\\n portal.distance(elf) + portal.distance(game.get_enemy_castle()) > elf.distance(game.get_enemy_castle()) - game.portal_size:\n \n count += 2 # portals are harder to kill so i consider them as 2 (in comperisson it wont matter)\n \n for mana_fountain in game.get_enemy_mana_fountains():\n if mana_fountain.distance(elf) + mana_fountain.distance(game.get_enemy_castle()) < elf.distance(game.get_enemy_castle()) + game.portal_size or \\\n mana_fountain.distance(elf) + mana_fountain.distance(game.get_enemy_castle()) > elf.distance(game.get_enemy_castle()) - game.portal_size:\n \n count +=1\n\n return count", "def count_islands(grid):\n\tvisited = grid.copy() # copy the grid in order not to lose the real information.\n\tM = len(grid)\n\tN = len(grid[0])\n\tc = 0\n\tfor k in range(M):\n\t\tfor l in range(N):\n\t\t\tif visited[k][l]:\n\t\t\t\tc += 1 # found a new island\n\t\t\t\tvisit_island(visited, k, l, M, N) # visit the connected pieces\n\treturn c", "def alive_neighbors(self, row, col):\n\n count = 0 # Keeps track of how many alive cells are in the neighborhood\n\n if row > 0: # Checks if there is an upper row\n count += self._cells[row - 1][col].get_value() # Gets the value of the cell immediately above\n\n if col > 0: # Checks if there is a column to the left\n count += self._cells[row - 1][col - 1].get_value() # Gets the value of the cell at its top left\n\n if col < self._cols - 1: # Checks if there is a column to the right\n count += self._cells[row - 1][col + 1].get_value() # Gets the value of the cell at its top right\n\n if row < self._rows - 1: # Cheks if there is a lower row\n count += self._cells[row + 1][col].get_value() # Gets the value of the cell immediately below\n\n if col > 0: # Checks if there is a column to the left\n count += self._cells[row + 1][col - 1].get_value() # Gets the value of the cell at its bottom left\n\n if col < self._cols - 1: # Checks if there is a column to the right\n count += self._cells[row + 1][col + 1].get_value() # Gets the value of the cell at its bottom right\n\n if col > 0: # Checks if there is a column to the left\n count += self._cells[row][col - 1].get_value() # Gets the value of the cell at its left\n\n if col < self._cols - 1: # Checks if there is a column to the right\n count += self._cells[row][col + 1].get_value() # Gets the value of the cell at its right\n\n return count", "def _get_count(self) -> \"size_t\" :\n return _core.Workspaces__get_count(self)", "def get_number_of_grains(self):\n return len(self.grains)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all buildings for a neighborhood
def get_neighborhood_buildings(nname): engine = get_sql_engine() vacant_buildings = text( """ SELECT "ADDRESS" as address, "BLDG_DESC" as building_description, "OPA_ID" as opa_id, v.geom as geom FROM vacant_buildings as v JOIN philadelphia_neighborhoods as n ON ST_Intersects(v.geom, n.geom) WHERE n.neighborhood_name = :nname """ ) buildings = gpd.read_postgis(vacant_buildings, con=engine, params={"nname": nname}) return buildings
[ "def get_neighborhoods(state):\n\n neighborhoods = set()\n \n listings = Listing.query.filter(Listing.address.like('%{}%'.format(state))).all()\n for listing in listings: \n neighborhoods.add(listing.neighborhood)\n\n return neighborhoods", "def get_neighborhood_listings(\n self, neighborhoods, city, limit=50, pages=10, delay=1\n ):\n\n listings = None\n\n for n in neighborhoods:\n # get listings for current neighborhood and append to all listings\n df = self.get_listings(\n f'{n}, {city}', limit=limit, pages=pages, delay=delay\n )\n listings = listings.append(df) if listings is not None else df\n time.sleep(delay)\n\n # drop duplicate listings just in case\n if listings is not None:\n listings = listings.drop_duplicates(subset='id')\n\n return listings", "def get_buildings(request, name):\n\tname = name.replace(\"_\", \" \")\n\tcommunity = Community.objects.get(name=name)\n\tbuildings = community.building_set.all()\n\tserializer = BuildingSerializer(buildings, many=True)\n\treturn JSONResponse(serializer.data)", "def get_buildings(self):\n url = f'{self.API_URL}/buildings/'\n headers = {\n 'accept': 'application/json',\n 'Authorization': self._token,\n }\n\n try:\n response = requests.request(\"GET\", url, headers=headers)\n if response.status_code != 200:\n return None, [f'Expected 200 response from BETTER but got {response.status_code}: {response.content}']\n except Exception as e:\n return None, [f'Unexpected error creating BETTER portfolio: {e}']\n\n return response.json(), []", "def get_all_appliance_locations(self) -> list:\n return self._get(\"/gms/grNode\")", "def _house_list_for_street(self, street):\n u = self.PARSER_URL\n street_id, _ = street\n nmbrs = range(1, 10)\n\n rs = (grequests.get(u, params={\"act\": \"get_street_data\",\n \"data\": n,\n \"street\": street_id})\n for n in nmbrs)\n results = grequests.map(rs)\n for resp in results:\n yield from self._houses_from_api_response(resp)", "def get_building_possibilities(house_grid, build_grid, num_to_build):\n # set up output file\n outfile = open('output_%d.txt' % num_to_build, 'w')\n results = []\n\n # find empty places to build\n empties = get_points_with_char(build_grid, EMPTY_CHAR)\n\n # figure out all possible building combinations\n trials = itertools.combinations(empties, num_to_build)\n time_start = datetime.datetime.now()\n for trial in trials:\n # create a copy so we're not changing the actual grid\n grid_copy = copy.deepcopy(build_grid)\n set_points_in_grid(grid_copy, trial, RESTAURANT_CHAR)\n max_min = max(calculate_min_distances(house_grid, grid_copy))\n\n # write to file\n outfile.write(str(max_min) + ' ' +\n print_grid_as_line(grid_copy) + '\\n')\n\n # if we find a feasible solution, add to the set\n if max_min <= THRESHOLD:\n results.append((trial, max_min))\n\n time_stop = datetime.datetime.now()\n outfile.close()\n print(\"Found %d results, runtime: %s\" % (len(results),\n time_stop - time_start))\n # return set of feasible results; not used elsewhere\n return results", "def get_neighbors(self):\n return [neighbor for neighbor in self.neighbors]", "def get_neighborhood(self, ids, radius=1, full_subgraph=True):\n\n\n verts = ids\n\n ## find the vertices within radius (and the path edges)\n for i in range(radius):\n edges_out = self.get_edges(src_ids=verts)\n edges_in = self.get_edges(dst_ids=verts)\n\n verts = list(edges_in['__src_id']) + list(edges_in['__dst_id']) + \\\n list(edges_out['__src_id']) + list(edges_out['__dst_id'])\n verts = list(set(verts))\n\n ## make a new graph to return and add the vertices\n g = SGraph()\n g = g.add_vertices(self.get_vertices(verts), vid_field='__id')\n\n ## add the requested edge set\n if full_subgraph is True:\n induced_edge_out = self.get_edges(src_ids=verts)\n induced_edge_in = self.get_edges(dst_ids=verts)\n df_induced = induced_edge_out.append(induced_edge_in)\n df_induced = df_induced.groupby(df_induced.column_names(), {})\n\n verts_sa = SArray(list(verts))\n edges = df_induced.filter_by(verts_sa, \"__src_id\")\n edges = edges.filter_by(verts_sa, \"__dst_id\")\n\n else:\n path_edges = edges_out.append(edges_in)\n edges = path_edges.groupby(path_edges.column_names(), {})\n\n g = g.add_edges(edges, src_field='__src_id', dst_field='__dst_id')\n return g", "def _get_all_rooms(klass, floor):\n unidentified_rooms = floor.get(\"unidentified_rooms\", [])\n unidentified_rooms = (\n (None, room) for room in unidentified_rooms )\n rooms = floor.get(\"rooms\", {})\n room_items = (\n (rid, room) for rid, room in rooms.items() if \"polygon\" in room\n )\n return chain(room_items, unidentified_rooms)", "def all_in_neighbors(G,S):\n nbrs = set([])\n for nobj in S:\n nbrs.update(set(G.in_neighbors(nobj)))\n return nbrs", "def get_neighbors(self):\n\n # create an empty list for neighbors.\n neighbors = []\n\n # go through all of the rows in the map array\n for j in range(0, self.height):\n \n row = []\n \n # go through the items in the row, and add the \n # amount of neighbors that item has to the \n # array.\n for i in range(0, self.width):\n row += [self.get_point_neighbor(j, i)]\n\n # add the row into the neighbors array.\n neighbors += [row]\n\n # check the map, and return the neighbors array.\n self.assert_array_size('get_neighbors', self.neighbors)\n return neighbors", "def getRoomsByBuildingAndFloor(self, bid, rfloor):\n cursor = self.conn.cursor()\n query = sql.SQL(\"select {fields} from {table1} \"\n \"left outer join {table2} \"\n \"on {table1}.{table1Identifier} = {table2}.{table2Identifier} \"\n \"where {pkey1}= %s and {pkey2}= %s\"\n \"order by {orderkey};\").format(\n fields=sql.SQL(',').join([\n sql.Identifier('rid'),\n sql.Identifier('bid'),\n sql.Identifier('rcode'),\n sql.Identifier('rfloor'),\n sql.Identifier('rdescription'),\n sql.Identifier('roccupancy'),\n sql.Identifier('rdept'),\n sql.Identifier('rcustodian'),\n sql.Identifier('rlongitude'),\n sql.Identifier('rlatitude'),\n sql.Identifier('raltitude'),\n sql.Identifier('photourl')\n ]),\n table1=sql.Identifier('rooms'),\n table2=sql.Identifier('photos'),\n table1Identifier=sql.Identifier('photoid'),\n table2Identifier=sql.Identifier('photoid'),\n pkey1=sql.Identifier('bid'),\n pkey2=sql.Identifier('rfloor'),\n orderkey=sql.Identifier('rcode'))\n cursor.execute(query, (int(bid), int(rfloor)))\n result = []\n for row in cursor:\n result.append(row)\n return result", "def _get_all_ops_in_neighborhood(op: Op, direction: str, neighborhood=None):\n if neighborhood is None:\n neighborhood = {}\n neighborhood[op] = direction\n if direction == 'input' and op.inputs:\n input_products = [inp for inp in op.inputs if inp.is_inter_module()]\n input_ops = [inp.producer for inp in input_products]\n for input_op in input_ops:\n if input_op not in neighborhood:\n neighborhood[input_op] = 'output'\n if input_op.type == 'Split':\n _get_all_ops_in_neighborhood(input_op, 'input', neighborhood)\n _get_all_ops_in_neighborhood(input_op, 'output', neighborhood)\n else:\n _get_all_ops_in_neighborhood(input_op, 'output', neighborhood)\n elif op.output:\n output_ops = [consumer for consumer in op.output.consumers]\n for output_op in output_ops:\n if output_op not in neighborhood:\n neighborhood[output_op] = 'input'\n if output_op.type == 'Split':\n _get_all_ops_in_neighborhood(output_op, 'output', neighborhood)\n else:\n _get_all_ops_in_neighborhood(output_op, 'input', neighborhood)\n return neighborhood", "def ward_neighbourhoods(citydata, ward):\n\n neighbourhoods = []\n\n for neighbourhood in citydata:\n\n if ward == citydata[neighbourhood][\"Ward\"]:\n\n neighbourhoods.append(neighbourhood)\n\n\n return neighbourhoods", "def get_neighbourhood(self, winner):\n\t\tnr_rows = self.W.shape[0]\n\t\tnr_cols = self.W.shape[1]\n\n\t\trow_span = np.arange(winner[0] - self.radius, winner[0] + self.radius + 1)\n\t\tcol_span = np.arange(winner[1] - self.radius, winner[1] + self.radius + 1)\n\n\t\tneighbourhood = []\n\t\tfor i in range((2*self.radius) + 1):\n\t\t\tfor j in range((2*self.radius) + 1):\n\t\t\t\tif((row_span[i] > (nr_rows - 1)) or (row_span[i] < 0) \\\n\t\t\t\t\tor (col_span[j] > (nr_cols - 1)) or (col_span[j] < 0)):\n\t\t\t\t\tcontinue\n\t\t\t\telse: \n\t\t\t\t\tneighbourhood.append([row_span[i], col_span[j]])\n\n\t\treturn neighbourhood", "def get_one_exchange_neighbourhood(configuration: Configuration, seed: int) -> List[Configuration]:\n random = np.random.RandomState(seed)\n hyperparameters_list = list(configuration.keys())\n hyperparameters_list_length = len(hyperparameters_list)\n neighbors_to_return = dict()\n hyperparameters_used = list()\n number_of_usable_hyperparameters = sum(np.isfinite(configuration.get_array()))\n configuration_space = configuration.configuration_space\n\n while len(hyperparameters_used) != number_of_usable_hyperparameters:\n index = random.randint(hyperparameters_list_length)\n hp_name = hyperparameters_list[index]\n if hp_name in neighbors_to_return:\n random.shuffle(neighbors_to_return[hp_name])\n n_ = neighbors_to_return[hp_name].pop()\n if len(neighbors_to_return[hp_name]) == 0:\n del neighbors_to_return[hp_name]\n hyperparameters_used.append(hp_name)\n yield n_\n\n else:\n neighbourhood = []\n number_of_sampled_neighbors = 0\n array = configuration.get_array()\n\n if not np.isfinite(array[index]):\n continue\n\n iteration = 0\n while True:\n hp = configuration_space.get_hyperparameter(hp_name)\n configuration._populate_values()\n num_neighbors = hp.get_num_neighbors(configuration.get(hp_name))\n\n # Obtain neigbors differently for different possible numbers of\n # neighbors\n if num_neighbors == 0:\n break\n # No infinite loops\n elif iteration > 100:\n break\n elif np.isinf(num_neighbors):\n if number_of_sampled_neighbors >= 4:\n break\n num_samples_to_go = 4 - number_of_sampled_neighbors\n neighbors = hp.get_neighbors(array[index], random,\n number=num_samples_to_go)\n else:\n if iteration > 0:\n break\n neighbors = hp.get_neighbors(array[index], random)\n\n\n # Check all newly obtained neigbors\n for neighbor in neighbors:\n new_array = array.copy()\n new_array = change_hp_value(configuration_space,\n new_array, hp_name, neighbor,\n index)\n\n try:\n # Populating a configuration from an array does not check\n # if it is a legal configuration - check this (slow)\n new_configuration = Configuration(configuration_space, vector=new_array)\n # Only rigorously check every tenth configuration (\n # because moving around in the neighborhood should\n # just work!)\n if np.random.random() > 0.9:\n new_configuration.is_valid_configuration()\n else:\n configuration_space._check_forbidden(new_array)\n neighbourhood.append(new_configuration)\n number_of_sampled_neighbors += 1\n # todo: investigate why tests fail when ForbiddenValueError is caught here\n except ForbiddenValueError as e:\n pass\n\n # Count iterations to not run into an infinite loop when\n # sampling floats/ints and there is large amount of forbidden\n # values; also to find out if we tried to get a neighbor for\n # a categorical hyperparameter, and the only possible\n # neighbor is forbidden together with another active\n # value/default hyperparameter\n iteration += 1\n if len(neighbourhood) == 0:\n hyperparameters_used.append(hp_name)\n else:\n if hp_name not in hyperparameters_used:\n neighbors_to_return[hp_name] = neighbourhood\n random.shuffle(neighbors_to_return[hp_name])\n n_ = neighbors_to_return[hp_name].pop()\n if len(neighbors_to_return[hp_name]) == 0:\n del neighbors_to_return[hp_name]\n hyperparameters_used.append(hp_name)\n yield n_", "def get_neighbourhood(self):\n\n mu, var = np.random.normal(loc=0,scale=4,size=2)\n var = np.abs(var)\n self.neigh_feats = np.random.normal(loc=mu,scale=var,size=(self.n_neighbours,self.n_ftrs))\n #self.neigh_feats = np.random.normal(loc=mu,scale=var,size=(np.random.uniform(int(self.n_neighbours*.8),int(self.n_neighbours*1.2),self.n_ftrs)))\n #conn_str = lambda x,y", "def do_neighbors(self,args):\n\t\tprint(self.location.neighbors)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A context manager that yields keyboard events. The context object is a generator that yields the actual events. An event is the tuple ``(key, pressed)``. The generator is stopped whenever the timeout is triggered. If this happens, ``None`` will be yielded as the last event.
def events(self, timeout=5.0): def generator(q): while True: try: yield q.get(timeout=timeout) except queue.Empty: yield None break # Yield the generator and allow the client to capture events q = queue.Queue() with self.listener( on_press=lambda k: q.put((k, True)), on_release=lambda k: q.put((k, False))): yield generator(q)
[ "def prepare_raw_getkey():\n #this is weird - pygame turns off keyboard repeat by default, which you can re-enable\n #by setting a delay in ms, but \"what the system normally does\" is not an option.\n #it seems like 150ms delay and 15 keys-per-second is normalish.\n pygame.key.set_repeat(150, 1000 / 15)\n\n global raw_getkey\n\n def translate(event):\n if event.type == MOUSEMOTION:\n x, y = event.pos\n return (\"mouse_motion\", x / W, y / H)\n\n if event.type == KEYDOWN:\n log.debug(\"key event: %r\", event.dict)\n if event.key in key_map:\n return key_map[event.key]\n return event.unicode\n\n if event.type == MOUSEBUTTONDOWN:\n x, y = event.pos\n return (\"mouse_down\", x / W, y / H)\n\n def keypump():\n items = []\n event_types = [MOUSEMOTION, KEYDOWN, MOUSEBUTTONDOWN]\n while True:\n if not items:\n if pygame.event.peek(event_types):\n #there's keyboard input pending! great!\n items.extend(pygame.event.get(event_types))\n\n else:\n #there's no keyboard input pending, so we need to take a nap until there is.\n\n #if we get an event we dont care about, we have to put it back\n #but if we put it back, .wait() will give it right back to us\n #so we have to keep it around until we find what we want, then re-add it.\n #ugh.\n ignored_items = []\n while True:\n item = pygame.event.wait()\n if item.type == USEREVENT:\n blink_cursor(item)\n elif item.type not in event_types:\n ignored_items.append(item)\n else:\n items.append(item)\n break\n\n for ignored_item in ignored_items:\n pygame.event.post(ignored_item)\n\n\n yield translate(items.pop(0))\n\n #assign the generator's next() method as raw_getkey\n raw_getkey = keypump().next", "def inkey(self, timeout=None, esc_delay=0.35, _intr_continue=True):\n # TODO(jquast): \"meta sends escape\", where alt+1 would send '\\x1b1',\n # what do we do with that? Surely, something useful.\n # comparator to term.KEY_meta('x') ?\n # TODO(jquast): Ctrl characters, KEY_CTRL_[A-Z], and the rest;\n # KEY_CTRL_\\, KEY_CTRL_{, etc. are not legitimate\n # attributes. comparator to term.KEY_ctrl('z') ?\n def _timeleft(stime, timeout):\n \"\"\"_timeleft(stime, timeout) -> float\n\n Returns time-relative time remaining before ``timeout``\n after time elapsed since ``stime``.\n \"\"\"\n if timeout is not None:\n if timeout is 0:\n return 0\n return max(0, timeout - (time.time() - stime))\n\n resolve = functools.partial(resolve_sequence,\n mapper=self._keymap,\n codes=self._keycodes)\n\n stime = time.time()\n\n # re-buffer previously received keystrokes,\n ucs = u''\n while self._keyboard_buf:\n ucs += self._keyboard_buf.pop()\n\n # receive all immediately available bytes\n while self.kbhit(0):\n ucs += self.getch()\n\n # decode keystroke, if any\n ks = resolve(text=ucs)\n\n # so long as the most immediately received or buffered keystroke is\n # incomplete, (which may be a multibyte encoding), block until until\n # one is received.\n while not ks and self.kbhit(_timeleft(stime, timeout), _intr_continue):\n ucs += self.getch()\n ks = resolve(text=ucs)\n\n # handle escape key (KEY_ESCAPE) vs. escape sequence (which begins\n # with KEY_ESCAPE, \\x1b[, \\x1bO, or \\x1b?), up to esc_delay when\n # received. This is not optimal, but causes least delay when\n # (currently unhandled, and rare) \"meta sends escape\" is used,\n # or when an unsupported sequence is sent.\n if ks.code is self.KEY_ESCAPE:\n esctime = time.time()\n while (ks.code is self.KEY_ESCAPE and\n self.kbhit(_timeleft(esctime, esc_delay))):\n ucs += self.getch()\n ks = resolve(text=ucs)\n\n # buffer any remaining text received\n self._keyboard_buf.extendleft(ucs[len(ks):])\n return ks", "def waitKey(delay=...) -> retval:\n ...", "def keyboardLoop(btns,timeout):\n t0 = pg.time.get_ticks()\n btn = None\n while pg.time.get_ticks() - t0 < timeout:\n event = pg.event.wait()\n if event.type == KEYDOWN:\n btn = checkKey(event.key,btns)\n if btn != None: break\n t = pg.time.get_ticks()\n return (btn,t-t0)", "def __enter__(self) -> xkbcommon.Result:\n return reduce(\n lambda _, key: self.keymap.press(key),\n self.keys,\n xkbcommon.Result(0, 0, \"\", \"\", 0, NoModifier, NoModifier, ()),\n )", "def test_yield_keypad():\n @as_subprocess\n def child(kind):\n # given,\n t = TestTerminal(stream=six.StringIO(), force_styling=True)\n expected_output = u''.join((t.smkx, t.rmkx))\n\n # exercise,\n with t.keypad():\n pass\n\n # verify.\n assert (t.stream.getvalue() == expected_output)\n\n child(kind='xterm')", "async def listen(on_press, on_release, stop_or_join='stop'):\n listener = keyboard.Listener(on_press=on_press, on_release=on_release)\n listener.start()\n try:\n yield listener\n finally:\n await asyncio.get_event_loop().run_in_executor(None, getattr(listener, stop_or_join))", "def OnKeyboardInterrupt(handler):\n\n signal.signal(signal.SIGINT, handler)\n yield", "def raw(self):\n if HAS_TTY and self.keyboard_fd is not None:\n # save current terminal mode,\n save_mode = termios.tcgetattr(self.keyboard_fd)\n tty.setraw(self.keyboard_fd, termios.TCSANOW)\n try:\n yield\n finally:\n # restore prior mode,\n termios.tcsetattr(self.keyboard_fd,\n termios.TCSAFLUSH,\n save_mode)\n else:\n yield", "def get_key_press(self, key):\n for keys in KEY_MAPPING:\n if key in keys:\n try:\n self.direction_queue.put(KEY_MAPPING[keys], block=False)\n break\n except queue.Full:\n pass", "def _get_keys(self, read, input_records):\n for i in range(read.value):\n ir = input_records[i]\n\n # Get the right EventType from the EVENT_RECORD.\n # (For some reason the Windows console application 'cmder'\n # [http://gooseberrycreative.com/cmder/] can return '0' for\n # ir.EventType. -- Just ignore that.)\n if ir.EventType in EventTypes:\n ev = getattr(ir.Event, EventTypes[ir.EventType])\n\n # Process if this is a key event. (We also have mouse, menu and\n # focus events.)\n if type(ev) == KEY_EVENT_RECORD and ev.KeyDown:\n for key_press in self._event_to_key_presses(ev):\n yield key_press\n\n elif type(ev) == MOUSE_EVENT_RECORD:\n for key_press in self._handle_mouse(ev):\n yield key_press", "def handle_KeyPress(self, e):\r\n state = e.state & ~(self.qtile.numlockMask)\r\n keysym = self.qtile.conn.keycode_to_keysym(e.detail, state)\r\n if keysym == xkeysyms.keysyms['Tab']:\r\n self.userInput = self.completer.complete(self.userInput)\r\n else:\r\n actual_value = self.completer.actual()\r\n self.completer.reset()\r\n if keysym < 127 and chr(keysym) in string.printable:\r\n # No LookupString in XCB... oh,\r\n # the shame! Unicode users beware!\r\n self.userInput += chr(keysym)\r\n elif (keysym == xkeysyms.keysyms['BackSpace'] and\r\n len(self.userInput) > 0):\r\n self.userInput = self.userInput[:-1]\r\n elif keysym == xkeysyms.keysyms['Escape']:\r\n self.active = False\r\n self.bar.widget_ungrab_keyboard()\r\n elif keysym == xkeysyms.keysyms['Return']:\r\n self.active = False\r\n self.bar.widget_ungrab_keyboard()\r\n if self.strict_completer:\r\n self.callback(actual_value or self.userInput)\r\n else:\r\n self.callback(self.userInput)\r\n self._update()", "def kbhit(self, timeout=None, _intr_continue=True):\n # Special care is taken to handle a custom SIGWINCH handler, which\n # causes select() to be interrupted with errno 4 (EAGAIN) --\n # it is ignored, and a new timeout value is derived from the previous,\n # unless timeout becomes negative, because signal handler has blocked\n # beyond timeout, then False is returned. Otherwise, when timeout is 0,\n # we continue to block indefinitely (default).\n stime = time.time()\n check_w, check_x, ready_r = [], [], [None, ]\n check_r = [self.keyboard_fd] if self.keyboard_fd is not None else []\n\n while HAS_TTY and True:\n try:\n ready_r, ready_w, ready_x = select.select(\n check_r, check_w, check_x, timeout)\n except InterruptedError:\n if not _intr_continue:\n return u''\n if timeout is not None:\n # subtract time already elapsed,\n timeout -= time.time() - stime\n if timeout > 0:\n continue\n # no time remains after handling exception (rare)\n ready_r = []\n break\n else:\n break\n\n return False if self.keyboard_fd is None else check_r == ready_r", "def input_loop(handler_func):\n while True:\n try:\n ch = click.getchar()\n except (KeyboardInterrupt, EOFError):\n return\n\n # If it's an escape sequence grab the ascii char code\n if len(ch) > 1:\n escape_ord = ord(ch[0])\n ch = ch[-1]\n else:\n escape_ord = None\n\n if 32 <= ord(ch) <= 126:\n # Only pass \"regular\" keys onto the handler\n\n if escape_ord == 27 or escape_ord == 224:\n # Translate the arrow key escape sequence into the mapped letter\n if ch == \"A\" or ch == \"H\":\n ch = ArrowKeyMapping.UP.value\n elif ch == \"B\" or ch == \"P\":\n ch = ArrowKeyMapping.DOWN.value\n elif ch == \"D\" or ch == \"K\":\n ch = ArrowKeyMapping.LEFT.value\n elif ch == \"C\" or ch == \"M\":\n ch = ArrowKeyMapping.RIGHT.value\n\n handler_func(ch.lower())\n elif ord(ch) == 13:\n # Translate the enter key to a newline\n handler_func(\"\\n\")\n elif ord(ch) == 27:\n # Exit on ESC\n return", "def key_released(self, event):\n pass", "def _poll(self, timeout=None):\n\n\t\tif timeout is None and \\\n\t\t\tnot self._poll_event_handlers:\n\t\t\traise StopIteration(\n\t\t\t\t\"timeout is None and there are no poll() event handlers\")\n\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tself._poll_event_queue.extend(self._poll_obj.poll(timeout))\n\t\t\t\tbreak\n\t\t\texcept (IOError, select.error) as e:\n\t\t\t\t# Silently handle EINTR, which is normal when we have\n\t\t\t\t# received a signal such as SIGINT (epoll objects may\n\t\t\t\t# raise IOError rather than select.error, at least in\n\t\t\t\t# Python 3.2).\n\t\t\t\tif not (e.args and e.args[0] == errno.EINTR):\n\t\t\t\t\twritemsg_level(\"\\n!!! select error: %s\\n\" % (e,),\n\t\t\t\t\t\tlevel=logging.ERROR, noiselevel=-1)\n\t\t\t\tdel e\n\n\t\t\t\t# This typically means that we've received a SIGINT, so\n\t\t\t\t# raise StopIteration in order to break out of our current\n\t\t\t\t# iteration and respond appropriately to the signal as soon\n\t\t\t\t# as possible.\n\t\t\t\traise StopIteration(\"interrupted\")", "def exhaust_kept_commands(self):\n kept_commands = self._kept\n \n if (kept_commands is not None):\n while kept_commands:\n yield kept_commands.pop()\n \n self._kept = None", "def test_controller_events(self):\n with self.assert_event(\n 'Failed to send press',\n on_press=lambda k: getattr(k, 'char', None) == u'a'):\n self.controller.press(u'a')\n with self.assert_event(\n 'Failed to send release',\n on_release=lambda k: getattr(k, 'char', None) == u'a'):\n self.controller.release(u'a')\n\n self.controller.press(pynput.keyboard.Key.enter)\n self.controller.release(pynput.keyboard.Key.enter)\n input()", "def wait_key(self):\n result = None\n if os.name == 'nt':\n result = input(\"Press Enter to continue ...\")\n else:\n fd = sys.stdin.fileno()\n\n oldterm = termios.tcgetattr(fd)\n newattr = termios.tcgetattr(fd)\n newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO\n termios.tcsetattr(fd, termios.TCSANOW, newattr)\n\n try:\n result = sys.stdin.read(1)\n except IOError:\n pass\n finally:\n termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)\n\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Yields all events necessary to type a string.
def string_to_events(self, s): for c in s: yield (c, True) yield (c, False)
[ "def translate(self, string, regex=re.compile(r'%\\((\\w+)\\)s')):\r\n substream = None\r\n\r\n def yield_parts(string):\r\n for idx, part in enumerate(regex.split(string)):\r\n if idx % 2:\r\n yield self.values[part]\r\n elif part:\r\n yield (TEXT,\r\n part.replace('\\[', '[').replace('\\]', ']'),\r\n (None, -1, -1)\r\n )\r\n\r\n parts = parse_msg(string)\r\n parts_counter = {}\r\n for order, string in parts:\r\n parts_counter.setdefault(order, []).append(None)\r\n\r\n while parts:\r\n order, string = parts.pop(0)\r\n events = self.events[order].pop(0)\r\n parts_counter[order].pop()\r\n\r\n for event in events:\r\n if event[0] is SUB_START:\r\n substream = []\r\n elif event[0] is SUB_END:\r\n # Yield a substream which might have directives to be\r\n # applied to it (after translation events)\r\n yield SUB, (self.subdirectives[order], substream), event[2]\r\n substream = None\r\n elif event[0] is TEXT:\r\n if string:\r\n for part in yield_parts(string):\r\n if substream is not None:\r\n substream.append(part)\r\n else:\r\n yield part\r\n # String handled, reset it\r\n string = None\r\n elif event[0] is START:\r\n if substream is not None:\r\n substream.append(event)\r\n else:\r\n yield event\r\n if string:\r\n for part in yield_parts(string):\r\n if substream is not None:\r\n substream.append(part)\r\n else:\r\n yield part\r\n # String handled, reset it\r\n string = None\r\n elif event[0] is END:\r\n if string:\r\n for part in yield_parts(string):\r\n if substream is not None:\r\n substream.append(part)\r\n else:\r\n yield part\r\n # String handled, reset it\r\n string = None\r\n if substream is not None:\r\n substream.append(event)\r\n else:\r\n yield event\r\n elif event[0] is EXPR:\r\n # These are handled on the strings itself\r\n continue\r\n else:\r\n if string:\r\n for part in yield_parts(string):\r\n if substream is not None:\r\n substream.append(part)\r\n else:\r\n yield part\r\n # String handled, reset it\r\n string = None\r\n if substream is not None:\r\n substream.append(event)\r\n else:\r\n yield event", "def type_names(self) -> Generator[str, None, None]:\n for k in self:\n if isinstance(k, str):\n yield k\n continue", "def generate_strings(self):\n yield self.begin()\n if self.text():\n yield self.text()\n for e in self:\n if type(e) is str:\n yield e\n else:\n for s in e.generate_strings():\n yield s\n yield self.end()", "def scan(str_, trace=False):\n iter_ = ScannerIter(CScanner(), str_, trace)\n\n for tok in iter_:\n yield tok\n\n # The top level scanner should always consume all input\n assert iter_.done", "def events():\n for el in _list_events():\n yield Event(el)", "def type_gen(types: List[str]) -> Generator[str, None, None]:\n\n t_i = 0\n while t_i < len(types):\n if types[t_i] == '...':\n t_i = 0\n yield types[t_i]\n t_i += 1\n elif types[t_i][-3::] == '...':\n yield types[t_i][:-3:]\n else:\n yield types[t_i]\n t_i += 1\n # If reached the end, raise error\n yield('Type string \"' + \" , \".join(types) + '\" is missing types')", "def string(self, string):\n\n self.__emulate_keyboard('type', string)", "def _typed(self, event) -> None:\n\t\tself._active.typed(event)", "def list_event_types():\n print('\\nValid event types:')\n for etype in EVENT_TYPES:\n print(' {0}'.format(etype))", "def _ensure(stream):\r\n stream = iter(stream)\r\n event = next(stream)\r\n\r\n # Check whether the iterable is a real markup event stream by examining the\r\n # first item it yields; if it's not we'll need to do some conversion\r\n if type(event) is not tuple or len(event) != 3:\r\n for event in chain([event], stream):\r\n if hasattr(event, 'totuple'):\r\n event = event.totuple()\r\n else:\r\n event = TEXT, str(event), (None, -1, -1)\r\n yield event\r\n return\r\n\r\n # This looks like a markup event stream, so we'll just pass it through\r\n # unchanged\r\n yield event\r\n for event in stream:\r\n yield event", "def stringReceived(self, string):\n raise NotImplementedError()", "def GenTypesReady(self):\n assert not self.nested, 'Stack was not fully processed'\n for cppname, _, _, dict_ in self.types_init:\n self.init.extend('if (PyDict_SetItemString(%s->tp_dict, \"%s\", %s) < 0)'\n ' goto err;' % (cppname, n, o) for n, o in dict_)\n for s in gen.ReadyFunction(self.types_init):\n yield s", "def process_str(node, *_):\n # TODO: According to new specification, the following structure\n # should be used: {\"type\": \"literal, \"value\": {\"dtype\": <type>,\n # \"value\": <value>}}. Confirm with Clay.\n return [{\"type\": \"literal\", \"dtype\": \"string\", \"value\": node.s}]", "def _generator(events, inventory, rf=False):\n channels = inventory.get_contents()['channels']\n stations = list(set(ch.rsplit('.', 1)[0] for ch in channels))\n one_channel = {ch.rsplit('.', 1)[0]: ch for ch in channels}\n if events is not None:\n yield len(stations) * len(events)\n for event in events:\n for station in stations:\n seed_id = one_channel[station][:-1] + '?'\n net, sta, loc, cha = seed_id.split('.')\n stats = {'network': net, 'station': sta, 'location': loc,\n 'channel': cha}\n if rf:\n stats['event'] = event\n #stats['seed_id'] = seed_id\n coords = inventory.get_coordinates(one_channel[station])\n yield stats, event, coords\n else:\n stats['event_time'] = event.preferred_origin()['time']\n yield stats\n else:\n yield len(stations)\n for station in stations:\n net, sta, loc, cha = one_channel[station].split('.')\n stats = {'network': net, 'station': sta, 'location': loc,\n 'channel': cha[:-1] + '?',\n 'event_time': _DummyUTC()}\n yield stats", "def __init__(self, typestr):\n from collections import defaultdict\n self.typestr = typestr\n self.ndims = 0\n self.funcs = []\n self.types = self.__class__.expandFTypeStr(typestr)\n #log(\"For typestr %s, got types of %s\" % (typestr, self.types))\n self.funcs = [self.parse(s) for s in self.types]\n self.times = defaultdict(float)\n self.times.update(typestr=typestr, types=self.types)", "def walk(string: str, step: int) -> str:\n for i in range(0, len(string), step):\n yield string[i:i+step]", "def generate_event():\n pass", "def parse_event(self, event):", "def event_type(et):\n def cmd_method(meth):\n def command(self, event, arg):\n if event.type == et:\n meth(self, event, arg)\n return command\n return cmd_method" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that a single key can be tapped
def test_tap(self): self.notify('Press and release "a"') self.assert_keys( 'Failed to register event', ('a', True), ('a', False))
[ "def tap_and_check(\n self, key: str, keysym: str, group: int = BASE_GROUP, level: int = BASE_LEVEL\n ) -> xkbcommon.Result:\n r = self.tap(key)\n assert r.group == group\n assert r.level == level\n assert r.keysym == keysym\n # Return the result for optional further tests\n return r", "def key_pressed(self, key):\n return False", "def press_key(k):\n\tif pygame.key.get_pressed()[k]:\n\t\t\treturn True", "def test_attemptByKey(self):\n self.failUnless(self.reset.attemptByKey(\n self.reset.newAttemptForUser(u'joe@divmod.com').key))\n self.failIf(self.reset.attemptByKey(u'not really a key'))", "def press_kp1():\n\tif pygame.key.get_pressed()[pygame.K_KP1]:\n\t\treturn True", "def test_keys(self):\n from pynput.keyboard._base import Key\n for key in Key:\n self.assertTrue(\n hasattr(pynput.keyboard.Key, key.name),\n '%s is not defined for the current platform' % key.name)", "def press_kp0():\n\tif pygame.key.get_pressed()[pygame.K_KP0]:\n\t\treturn True", "def is_pressed (self, key):\n return pygame.key.get_pressed () [key]", "def test_alt_pressed(self):\n # We do not test alt_r, since that does not necessarily exist on the\n # keyboard\n for key in (\n pynput.keyboard.Key.alt,\n pynput.keyboard.Key.alt_l):\n self.controller.press(key)\n self.assertTrue(\n self.controller.alt_pressed,\n 'alt_pressed was not set with %s down' % key.name)\n self.controller.release(key)\n self.assertFalse(\n self.controller.alt_pressed,\n 'alt_pressed was incorrectly set')", "def test_pressed_is_release(self):\n with self.capture() as collect:\n with self.controller.pressed(pynput.keyboard.Key.shift):\n self.controller.press(u'a')\n self.controller.release(u'a')\n\n self.controller.press(u'a')\n self.controller.release(u'a')\n\n with self.controller.pressed(pynput.keyboard.Key.shift):\n self.controller.press(u'a')\n self.controller.release(u'a')\n\n\n self.assertIn(\n u'AaA',\n collect(),\n 'Keys were not propertly released')", "def specialKeyPressed(self, key):\n pass", "def test_touch_dead(self):\n with self.capture() as collect:\n dead = pynput.keyboard.KeyCode.from_dead(u'~')\n self.controller.press(dead)\n self.controller.release(dead)\n self.controller.press(u'a')\n self.controller.release(u'a')\n\n self.assertIn(\n u'ã',\n collect(),\n 'Failed to apply dead key')", "def fireAccelerator(self, e):\n stroke = KeyStroke.getKeyStrokeForEvent(e)\n item = accelerators.get(stroke)\n if item != None:\n item.doClick(0)\n return True\n return False", "def keyPressed(self, key):\n pass", "def key_press(key):\n try:\n key_name = key.upper()\n except AttributeError:\n raise ValueError('invalid literal for key_press(): {}'.format(key))\n try:\n hex_code = KEYS[key_name]\n except KeyError:\n pass\n else:\n flags = KEYEVENTF_SCANCODE\n send_keyboard_input(hex_code, flags)\n return\n try:\n hex_code = EXTENDED_KEYS[key_name]\n except KeyError:\n raise ValueError('invalid literal for key_press(): {}'.format(key))\n else:\n flags = win32con.KEYEVENTF_EXTENDEDKEY | KEYEVENTF_SCANCODE\n send_keyboard_input(hex_code, flags)", "def test_controller_events(self):\n with self.assert_event(\n 'Failed to send press',\n on_press=lambda k: getattr(k, 'char', None) == u'a'):\n self.controller.press(u'a')\n with self.assert_event(\n 'Failed to send release',\n on_release=lambda k: getattr(k, 'char', None) == u'a'):\n self.controller.release(u'a')\n\n self.controller.press(pynput.keyboard.Key.enter)\n self.controller.release(pynput.keyboard.Key.enter)\n input()", "def test_touchscreen(self):\r\n vmajor, _, _ = pygame.version.vernum\r\n if vmajor < 2:\r\n return\r\n\r\n menu = MenuUtils.generic_menu(title='mainmenu', touchscreen_enabled=True)\r\n menu.mainloop(surface, bgfun=dummy_function)\r\n\r\n # Add a menu and a method that set a function\r\n event_val = [False]\r\n\r\n def _some_event():\r\n event_val[0] = True\r\n return 'the value'\r\n\r\n # Add some widgets\r\n button = menu.add_button('button', _some_event)\r\n\r\n # Check touch\r\n click_pos = PygameUtils.get_middle_rect(button.get_rect())\r\n menu.update(PygameUtils.touch_click(click_pos[0], click_pos[1], normalize=False)) # Event must be normalized\r\n self.assertFalse(event_val[0])\r\n\r\n menu.update(PygameUtils.touch_click(click_pos[0], click_pos[1], menu=menu))\r\n self.assertTrue(event_val[0])\r\n event_val[0] = False\r\n self.assertEqual(menu.get_selected_widget().get_id(), button.get_id())\r\n btn = menu.get_selected_widget() # type: Button\r\n self.assertTrue(btn.get_selected_time() >= 0)", "def press_kp_equals():\n\tif pygame.key.get_pressed()[pygame.K_KP_EQUALS]:\n\t\treturn True", "def press_kp9():\n\tif pygame.key.get_pressed()[pygame.K_KP9]:\n\t\treturn True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that the enter key can be tapped
def test_enter(self): self.notify('Press <enter>') self.assert_keys( 'Failed to register event', (pynput.keyboard.Key.enter, True))
[ "def press_kp_enter():\n\tif pygame.key.get_pressed()[pygame.K_KP_ENTER]:\n\t\treturn True", "def press_enter():\n\tif pygame.key.get_pressed()[pygame.K_RETURN]:\n\t\treturn True", "def press_enter():\n return input('Press ENTER to continue...')", "def _enter_key( self, event ) :\n w = event.widget\n self._run_command( w )", "def _sendenter(self) -> None:\n call([self.adbpath, \"shell\", \"input\", \"keyevent\", Adbkeycodes.KEYCODE_ENTER])", "def enter_hit(self, event):\n self.search_button()", "def test_controller_events(self):\n with self.assert_event(\n 'Failed to send press',\n on_press=lambda k: getattr(k, 'char', None) == u'a'):\n self.controller.press(u'a')\n with self.assert_event(\n 'Failed to send release',\n on_release=lambda k: getattr(k, 'char', None) == u'a'):\n self.controller.release(u'a')\n\n self.controller.press(pynput.keyboard.Key.enter)\n self.controller.release(pynput.keyboard.Key.enter)\n input()", "def passkey_enter(passkey: int, /) -> None:\n ...", "def on_key_pressed(self, obj, event):\n if event.type == Gdk.EventType.KEY_PRESS:\n if event.keyval in (_RETURN, _KP_ENTER):\n self.on_booklist_ok_clicked(obj)\n #emit OK response on dialog to close it automatically\n self.top.response(-5)\n return True\n return False", "def pressed_enter(self):\n if self.persist[\"players\"][self.index] != \"EMPTY\":\n self.done = True\n self.next = \"CONFIRM\"", "def EnterKeyEvent(self, event):\n # Handle auto-complete first.\n if self.SCIAutoCActive():\n self.SCIAutoCComplete()\n self.SCIAutoCCancel()\n # Call the IDLE event.\n return self.bindings.fire(\"<<newline-and-indent>>\", event)", "def OnTInputTextEnter(self, event):\r\n \r\n #-----------------------------\r\n # If Player JUST pressed Enter\r\n #-----------------------------\r\n \r\n # If the player hit the Enter key without typing a command we skip the\r\n # event (pretend it didn't happen) and return immediately. This allows\r\n # the player to type another command.\r\n \r\n if not len(self.TInput.GetValue()):\r\n event.Skip()\r\n return\r\n\r\n #-----------------------\r\n # Process Player's Input\r\n #-----------------------\r\n \r\n # This is the routine that actually processes the player's input,\r\n # calling the parser, which in turn breaks down the typed command and\r\n # executes it, thus running the game.\r\n \r\n self.ProcessPlayerInput()\r\n # event.Skip()\r", "def key_pressed(self, key):\n return False", "def on_return_pressed(self, event):\n # Don't skip the event here or it will result in an annoying\n # Windows system beep.\n self.shell_obj._field_return_pressed()", "def check_input(self) -> None:\n self.move_user_cursor()\n\n if self.program.back_key:\n self.program.curr_menu = self.program.main_menu\n self.run_display = False\n\n # ENTER starts simulation for user\n elif self.program.enter_key:\n if self.state != 'next_win':\n self.selected_maze = self.state\n else: # if self.state == 'next_window'\n maze_info = get_maze_details(self.selected_maze)\n self.program.maze_program_run = MazeProgramRun(self.program, maze_info[0],\n maze_info[1])\n if self.selected_maze == 'map4':\n self.program.maze_program_run.map_4 = True\n self.program.curr_menu = self.program.maze_program_run\n\n self.run_display = False", "def keypress (self, size, key):\n if key is 'enter':\n self.item_selected()\n return\n self.cb_cursor(key)\n return key", "def key_pressed_dispatch(self, event):\r\n try:\r\n self.key_map[event.char]()\r\n except KeyError:\r\n try:\r\n self.key_map[event.keycode]()\r\n except KeyError:\r\n print(\"No handler for key \" + (\"enter\" if event.keycode == 13 else event.char) + \"(\" + str(\r\n event.keycode) + \")\")", "def handle_KeyPress(self, e):\r\n state = e.state & ~(self.qtile.numlockMask)\r\n keysym = self.qtile.conn.keycode_to_keysym(e.detail, state)\r\n if keysym == xkeysyms.keysyms['Tab']:\r\n self.userInput = self.completer.complete(self.userInput)\r\n else:\r\n actual_value = self.completer.actual()\r\n self.completer.reset()\r\n if keysym < 127 and chr(keysym) in string.printable:\r\n # No LookupString in XCB... oh,\r\n # the shame! Unicode users beware!\r\n self.userInput += chr(keysym)\r\n elif (keysym == xkeysyms.keysyms['BackSpace'] and\r\n len(self.userInput) > 0):\r\n self.userInput = self.userInput[:-1]\r\n elif keysym == xkeysyms.keysyms['Escape']:\r\n self.active = False\r\n self.bar.widget_ungrab_keyboard()\r\n elif keysym == xkeysyms.keysyms['Return']:\r\n self.active = False\r\n self.bar.widget_ungrab_keyboard()\r\n if self.strict_completer:\r\n self.callback(actual_value or self.userInput)\r\n else:\r\n self.callback(self.userInput)\r\n self._update()", "def keyHandler(event:Event):\r\n if event.keysym == \"Return\": # Enter key\r\n addTaskClick()\r\n elif event.keysym == \"Delete\": # Delete Key\r\n removeSelectedClick()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that the modifier keys can be tapped
def test_modifier(self): from pynput.keyboard import Key for key in ( (Key.alt, Key.alt_l, Key.alt_r), (Key.ctrl, Key.ctrl_l, Key.ctrl_r), (Key.shift, Key.shift_l, Key.shift_r)): self.notify('Press <%s>' % key[0].name) self.assert_keys( 'Failed to register event', (key, True))
[ "def test_modifier_and_normal(self):\n from pynput.keyboard import Key\n self.notify('Press a, <ctrl>, a')\n self.assert_keys(\n 'Failed to register event',\n ('a', True),\n ('a', False),\n ((Key.ctrl, Key.ctrl_l, Key.ctrl_r), True),\n ((Key.ctrl, Key.ctrl_l, Key.ctrl_r), False),\n ('a', True),\n ('a', False))", "def test_alt_pressed(self):\n # We do not test alt_r, since that does not necessarily exist on the\n # keyboard\n for key in (\n pynput.keyboard.Key.alt,\n pynput.keyboard.Key.alt_l):\n self.controller.press(key)\n self.assertTrue(\n self.controller.alt_pressed,\n 'alt_pressed was not set with %s down' % key.name)\n self.controller.release(key)\n self.assertFalse(\n self.controller.alt_pressed,\n 'alt_pressed was incorrectly set')", "def in_key_modifier(_modifier_state, _modifier_mask):\n\n assert(isinstance(_modifier_state, QtCore.Qt.KeyboardModifiers))\n\n # no modifier is a special case\n if (_modifier_mask == QtCore.Qt.NoModifier):\n if (_modifier_state == QtCore.Qt.NoModifier):\n return True\n else:\n return False\n\n # depends on operator'&' of Qt.KeyboardModifiers\n if (_modifier_state & _modifier_mask == _modifier_mask):\n return True\n\n return False", "def test_shift_pressed(self):\n for key in (\n pynput.keyboard.Key.shift,\n pynput.keyboard.Key.shift_l,\n pynput.keyboard.Key.shift_r):\n self.controller.press(key)\n self.assertTrue(\n self.controller.shift_pressed,\n 'shift_pressed was not set with %s down' % key.name)\n self.controller.release(key)\n self.assertFalse(\n self.controller.shift_pressed,\n 'shift_pressed was incorrectly set')", "def test_pressed_shift(self):\n with self.capture() as collect:\n with self.controller.pressed(pynput.keyboard.Key.shift):\n self.controller.press(u'a')\n self.controller.release(u'a')\n\n with self.controller.modifiers as modifiers:\n self.assertIn(\n pynput.keyboard.Key.shift,\n modifiers)\n\n self.assertIn(\n u'A',\n collect(),\n 'shift+a did not yield \"A\"')", "def test_right_click_input(self):\n self.button.right_click_input()\n self.assertEqual(self.label.window_text(), \"RightClick\")\n\n # def test_press_move_release(self):\n # pass", "def test_ShiftAlt(self, keymap: Keymap):\n r = keymap.tap_and_check(\"AC10\", \"semicolon\", level=1)\n assert r.active_mods == NoModifier\n with keymap.key_down(\"LFSH\", \"LALT\"):\n r = keymap.tap_and_check(\"AC10\", \"colon\", level=2)\n assert r.active_mods == Shift | Mod1\n assert r.consumed_mods == Shift", "def test_LevelThree(self, keymap: Keymap, mod_key: str):\n with keymap.key_down(mod_key):\n r = keymap.tap_and_check(\"AD01\", \"adiaeresis\", level=3)\n assert r.active_mods == Mod5 == r.consumed_mods\n with keymap.key_down(\"LFSH\"):\n r = keymap.tap_and_check(\"AD01\", \"Adiaeresis\", level=4)\n assert r.active_mods == Shift | Mod5 == r.consumed_mods", "def test_tap(self):\n self.notify('Press and release \"a\"')\n self.assert_keys(\n 'Failed to register event',\n ('a', True), ('a', False))", "def test_shift_pressed_caps_lock(self):\n self.controller.press(pynput.keyboard.Key.caps_lock)\n self.controller.release(pynput.keyboard.Key.caps_lock)\n self.assertTrue(\n self.controller.shift_pressed,\n 'shift_pressed was not set with caps lock toggled')\n\n self.controller.press(pynput.keyboard.Key.caps_lock)\n self.controller.release(pynput.keyboard.Key.caps_lock)\n self.assertFalse(\n self.controller.shift_pressed,\n 'shift_pressed was not deactivated with caps lock toggled')", "def tap_and_check(\n self, key: str, keysym: str, group: int = BASE_GROUP, level: int = BASE_LEVEL\n ) -> xkbcommon.Result:\n r = self.tap(key)\n assert r.group == group\n assert r.level == level\n assert r.keysym == keysym\n # Return the result for optional further tests\n return r", "def test_pressed_is_release(self):\n with self.capture() as collect:\n with self.controller.pressed(pynput.keyboard.Key.shift):\n self.controller.press(u'a')\n self.controller.release(u'a')\n\n self.controller.press(u'a')\n self.controller.release(u'a')\n\n with self.controller.pressed(pynput.keyboard.Key.shift):\n self.controller.press(u'a')\n self.controller.release(u'a')\n\n\n self.assertIn(\n u'AaA',\n collect(),\n 'Keys were not propertly released')", "def test_is_keyboard_focusable(self):\n edit = self.dlg.TestLabelEdit.find()\n label = self.dlg.TestLabel.find()\n button = self.dlg.by(class_name=\"Button\",\n name=\"OK\").find()\n self.assertEqual(button.is_keyboard_focusable(), True)\n self.assertEqual(edit.is_keyboard_focusable(), True)\n self.assertEqual(label.is_keyboard_focusable(), False)", "def press_right_alt():\n\tif pygame.key.get_pressed()[pygame.K_RALT]:\n\t\treturn True", "def verify_modifiers(self,command,afmt,**kwargs):\n # optomux command format contains 'modifiers'\n if 'modifiers' in afmt:\n if 'modifiers' in kwargs:\n if isinstance(kwargs['modifiers'],tuple):\n if len(kwargs['modifiers']) == 1:\n return self.verify_single_modifier(command,kwargs['modifiers'][0])\n elif len(kwargs['modifiers']) == 2:\n return self.verify_double_modifier(command,kwargs['modifiers'])\n elif isinstance(kwargs['modifiers'],str) \\\n or isinstance(kwargs['modifiers'],int):\n return self.verify_single_modifier(command,kwargs['modifiers'])\n return ('E',-23)\n return (0,'')", "def test_ctrl_pressed(self):\n for key in (\n pynput.keyboard.Key.ctrl,\n pynput.keyboard.Key.ctrl_l,\n pynput.keyboard.Key.ctrl_r):\n self.controller.press(key)\n self.assertTrue(\n self.controller.ctrl_pressed,\n 'ctrl_pressed was not set with %s down' % key.name)\n self.controller.release(key)\n self.assertFalse(\n self.controller.ctrl_pressed,\n 'ctrl_pressed was incorrectly set')", "def keyboard_valid(self):\n\n if globals.KEYBOARD_STATE['TARGET'] == []:\n return True\n\n for note in globals.KEYBOARD_STATE['TARGET']:\n if note not in globals.KEYBOARD_STATE['RIGHT']:\n return False\n\n if len(globals.KEYBOARD_STATE['WRONG']) >= 2:\n return False\n\n return True", "def _control_key_down(self, modifiers, include_command=False):\n # Note that on Mac OS, ControlModifier corresponds to the Command key\n # while MetaModifier corresponds to the Control key.\n if sys.platform == 'darwin':\n down = include_command and (modifiers & QtCore.Qt.ControlModifier)\n return bool(down) ^ bool(modifiers & QtCore.Qt.MetaModifier)\n else:\n return bool(modifiers & QtCore.Qt.ControlModifier)", "def altPressed(self):\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that the modifier keys do not stick
def test_modifier_and_normal(self): from pynput.keyboard import Key self.notify('Press a, <ctrl>, a') self.assert_keys( 'Failed to register event', ('a', True), ('a', False), ((Key.ctrl, Key.ctrl_l, Key.ctrl_r), True), ((Key.ctrl, Key.ctrl_l, Key.ctrl_r), False), ('a', True), ('a', False))
[ "def test_modifier(self):\n from pynput.keyboard import Key\n for key in (\n (Key.alt, Key.alt_l, Key.alt_r),\n (Key.ctrl, Key.ctrl_l, Key.ctrl_r),\n (Key.shift, Key.shift_l, Key.shift_r)):\n self.notify('Press <%s>' % key[0].name)\n self.assert_keys(\n 'Failed to register event',\n (key, True))", "def in_key_modifier(_modifier_state, _modifier_mask):\n\n assert(isinstance(_modifier_state, QtCore.Qt.KeyboardModifiers))\n\n # no modifier is a special case\n if (_modifier_mask == QtCore.Qt.NoModifier):\n if (_modifier_state == QtCore.Qt.NoModifier):\n return True\n else:\n return False\n\n # depends on operator'&' of Qt.KeyboardModifiers\n if (_modifier_state & _modifier_mask == _modifier_mask):\n return True\n\n return False", "def test_ShiftAlt(self, keymap: Keymap):\n r = keymap.tap_and_check(\"AC10\", \"semicolon\", level=1)\n assert r.active_mods == NoModifier\n with keymap.key_down(\"LFSH\", \"LALT\"):\n r = keymap.tap_and_check(\"AC10\", \"colon\", level=2)\n assert r.active_mods == Shift | Mod1\n assert r.consumed_mods == Shift", "def _only_shifts(self, modifiers):\n if not modifiers or len(modifiers) > 2:\n return False\n if len(modifiers) == 2:\n return 'left shift' in modifiers and 'right shift' in modifiers\n if len(modifiers) == 1:\n return 'left shift' in modifiers or 'right shift' in modifiers", "def test_shift_pressed_caps_lock(self):\n self.controller.press(pynput.keyboard.Key.caps_lock)\n self.controller.release(pynput.keyboard.Key.caps_lock)\n self.assertTrue(\n self.controller.shift_pressed,\n 'shift_pressed was not set with caps lock toggled')\n\n self.controller.press(pynput.keyboard.Key.caps_lock)\n self.controller.release(pynput.keyboard.Key.caps_lock)\n self.assertFalse(\n self.controller.shift_pressed,\n 'shift_pressed was not deactivated with caps lock toggled')", "def _control_key_down(self, modifiers, include_command=False):\n # Note that on Mac OS, ControlModifier corresponds to the Command key\n # while MetaModifier corresponds to the Control key.\n if sys.platform == 'darwin':\n down = include_command and (modifiers & QtCore.Qt.ControlModifier)\n return bool(down) ^ bool(modifiers & QtCore.Qt.MetaModifier)\n else:\n return bool(modifiers & QtCore.Qt.ControlModifier)", "def test_alt_pressed(self):\n # We do not test alt_r, since that does not necessarily exist on the\n # keyboard\n for key in (\n pynput.keyboard.Key.alt,\n pynput.keyboard.Key.alt_l):\n self.controller.press(key)\n self.assertTrue(\n self.controller.alt_pressed,\n 'alt_pressed was not set with %s down' % key.name)\n self.controller.release(key)\n self.assertFalse(\n self.controller.alt_pressed,\n 'alt_pressed was incorrectly set')", "def test_shift_pressed(self):\n for key in (\n pynput.keyboard.Key.shift,\n pynput.keyboard.Key.shift_l,\n pynput.keyboard.Key.shift_r):\n self.controller.press(key)\n self.assertTrue(\n self.controller.shift_pressed,\n 'shift_pressed was not set with %s down' % key.name)\n self.controller.release(key)\n self.assertFalse(\n self.controller.shift_pressed,\n 'shift_pressed was incorrectly set')", "def keyboard_valid(self):\n\n if globals.KEYBOARD_STATE['TARGET'] == []:\n return True\n\n for note in globals.KEYBOARD_STATE['TARGET']:\n if note not in globals.KEYBOARD_STATE['RIGHT']:\n return False\n\n if len(globals.KEYBOARD_STATE['WRONG']) >= 2:\n return False\n\n return True", "def altPressed(self):\n return False", "def test_keyring_not_created_world_writable(self):\n self.set_password('system', 'user', 'password')\n\n assert os.path.exists(self.keyring.file_path)\n group_other_perms = os.stat(self.keyring.file_path).st_mode & 0o077\n assert group_other_perms == 0", "def key_pressed(self, key):\n return False", "def test_LevelThree(self, keymap: Keymap, mod_key: str):\n with keymap.key_down(mod_key):\n r = keymap.tap_and_check(\"AD01\", \"adiaeresis\", level=3)\n assert r.active_mods == Mod5 == r.consumed_mods\n with keymap.key_down(\"LFSH\"):\n r = keymap.tap_and_check(\"AD01\", \"Adiaeresis\", level=4)\n assert r.active_mods == Shift | Mod5 == r.consumed_mods", "def press_shift():\n\tif press_key(pygame.K_LSHIFT) or press_key(pygame.K_RSHIFT):\n\t\treturn True", "def abnt2_trema():\n\tif press_key(pygame.K_6) and press_shift():\n\t\treturn True", "def test_pressed_shift(self):\n with self.capture() as collect:\n with self.controller.pressed(pynput.keyboard.Key.shift):\n self.controller.press(u'a')\n self.controller.release(u'a')\n\n with self.controller.modifiers as modifiers:\n self.assertIn(\n pynput.keyboard.Key.shift,\n modifiers)\n\n self.assertIn(\n u'A',\n collect(),\n 'shift+a did not yield \"A\"')", "def _only_right_alt(self, modifiers):\n if not modifiers or len(modifiers) > 1:\n return False\n return 'right alt' in modifiers", "def test_touch_dead(self):\n with self.capture() as collect:\n dead = pynput.keyboard.KeyCode.from_dead(u'~')\n self.controller.press(dead)\n self.controller.release(dead)\n self.controller.press(u'a')\n self.controller.release(u'a')\n\n self.assertIn(\n u'ã',\n collect(),\n 'Failed to apply dead key')", "def test_keys(self):\n from pynput.keyboard._base import Key\n for key in Key:\n self.assertTrue(\n hasattr(pynput.keyboard.Key, key.name),\n '%s is not defined for the current platform' % key.name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that options are correctly set on OSX
def test_options_darwin(self): self.assertTrue( pynput.keyboard.Listener( darwin_test=True, win32_test=False, xorg_test=False)._options['test'])
[ "def test_options_win32(self):\n self.assertTrue(\n pynput.keyboard.Listener(\n darwin_test=False,\n win32_test=True,\n xorg_test=False)._options['test'])", "def test_get_options(self):\n pass", "def validate_options():\n if os.environ.get(\"POAP_PHASE\", None) == \"USB\" and options[\"mode\"] == \"personality\":\n abort(\"POAP Personality is not supported via USB!\")\n \n os.system(\"rm -rf /bootflash/poap_files\")\n os.system(\"rm -rf /bootflash_sup-remote/poap_files\")\n # Compare the list of what options users have to what options we actually support.\n supplied_options = set(options.keys())\n # Anything extra shouldn't be there\n invalid_options = supplied_options.difference(valid_options)\n for option in invalid_options:\n poap_log(\"Invalid option detected: %s (check spelling, capitalization, and underscores)\" %\n option)\n if len(invalid_options) > 0:\n abort()", "def test_extract_options():\n options = extract_options(CONF)\n assert options == OPTIONS", "def _check_option_support(options):\n for opt in options:\n if _is_option_supported(opt) is None:\n try:\n cmd = ipmitool_command_options[opt]\n out, err = utils.execute(*cmd)\n except processutils.ProcessExecutionError:\n _is_option_supported(opt, False)\n else:\n _is_option_supported(opt, True)\n\n return", "def toolHasOptions():\n pass", "def test_cli_invalid_option(self):\n returncode, output = run_cli(main, \"-x\", merged=True)\n assert returncode != 0\n assert \"Error:\" in output", "def test_get_options_expirations(self):\n pass", "def test_device_option(build_command):\n options = build_command.parse_options([\"-d\", \"myphone\"])\n\n assert options == {\"udid\": \"myphone\", \"update\": False}", "def ValidateOptions(self, opt, args):", "def testParseOptions(self):\n options = cli_test_lib.TestOptions()\n options.preferred_language = 'is'\n\n test_tool = tools.CLITool()\n language.LanguageArgumentsHelper.ParseOptions(options, test_tool)\n\n self.assertEqual(test_tool._preferred_language, options.preferred_language)\n\n with self.assertRaises(errors.BadConfigObject):\n language.LanguageArgumentsHelper.ParseOptions(options, None)", "def test_get_option_strikes_realtime(self):\n pass", "def _check_options(self, p, idx, feature):\n if self.options.has_feature(feature):\n return\n self._add_error('Feature \"%s\" is not allowed by options' % feature,\n p.lineno(idx), p.lexpos(idx))", "def os_test_macos():\n cli.log.info(\"Detected {fg_cyan}macOS.\")\n\n return True", "def test_get_option(self, debug_session, tdevice):\n debug_session.connect()\n\n result = debug_session.get_option(tdevice[\"option\"])\n assert result == False", "def test_check_options_exception(self, hp, opts):\n with pytest.raises(ValueError, match=\"XXX\"):\n check_is_in_options(hp, opts, msg=\"XXX\")", "def test_set_unknown_option(self):\n self.settings[u\"General.quiet\"] = True\n self.assertEqual(self.settings[\"General.quiet\"], True)", "def wrong_option():\n return '-1'", "def testSystemOptions(self):\n warnings.filterwarnings('ignore', category=CharmmPSFWarning)\n psf = CharmmPsfFile('systems/ala3_solv.psf',\n periodicBoxVectors=(Vec3(32.7119500, 0, 0)*angstroms, Vec3(0, 32.9959600, 0)*angstroms, Vec3(0, 0, 33.0071500)*angstroms))\n crd = CharmmCrdFile('systems/ala3_solv.crd')\n params = CharmmParameterSet('systems/par_all36_prot.prm',\n 'systems/toppar_water_ions.str')\n\n # Check some illegal options\n self.assertRaises(ValueError, lambda:\n psf.createSystem(params, nonbondedMethod=5))\n self.assertRaises(TypeError, lambda:\n psf.createSystem(params, nonbondedMethod=PME,\n nonbondedCutoff=1*radian)\n )\n self.assertRaises(TypeError, lambda:\n psf.createSystem(params, nonbondedMethod=PME,\n switchDistance=1*radian)\n )\n\n # Check what should be some legal options\n psf.createSystem(params, nonbondedMethod=PME, switchDistance=0.8,\n nonbondedCutoff=1.2)\n psf.createSystem(params, nonbondedMethod=PME, switchDistance=0.8,\n nonbondedCutoff=1.2*nanometer)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that options are correctly set on Windows
def test_options_win32(self): self.assertTrue( pynput.keyboard.Listener( darwin_test=False, win32_test=True, xorg_test=False)._options['test'])
[ "def test_options_darwin(self):\n self.assertTrue(\n pynput.keyboard.Listener(\n darwin_test=True,\n win32_test=False,\n xorg_test=False)._options['test'])", "def _check_option_support(options):\n for opt in options:\n if _is_option_supported(opt) is None:\n try:\n cmd = ipmitool_command_options[opt]\n out, err = utils.execute(*cmd)\n except processutils.ProcessExecutionError:\n _is_option_supported(opt, False)\n else:\n _is_option_supported(opt, True)\n\n return", "def test_get_options(self):\n pass", "def verify_use_incredibuild_win(ctx, option_name, value):\t\n\tif not _is_user_option_true(value):\n\t\treturn (True,\"\",\"\")\t\n\t(res, warning, error) = _verify_incredibuild_licence('Make && Build Tools Extension Package', 'Windows')\t\n\treturn (res, warning, error)", "def toolHasOptions():\n pass", "def test_get_options_expirations(self):\n pass", "def validate_options():\n if os.environ.get(\"POAP_PHASE\", None) == \"USB\" and options[\"mode\"] == \"personality\":\n abort(\"POAP Personality is not supported via USB!\")\n \n os.system(\"rm -rf /bootflash/poap_files\")\n os.system(\"rm -rf /bootflash_sup-remote/poap_files\")\n # Compare the list of what options users have to what options we actually support.\n supplied_options = set(options.keys())\n # Anything extra shouldn't be there\n invalid_options = supplied_options.difference(valid_options)\n for option in invalid_options:\n poap_log(\"Invalid option detected: %s (check spelling, capitalization, and underscores)\" %\n option)\n if len(invalid_options) > 0:\n abort()", "def test_extract_options():\n options = extract_options(CONF)\n assert options == OPTIONS", "def test_cli_invalid_option(self):\n returncode, output = run_cli(main, \"-x\", merged=True)\n assert returncode != 0\n assert \"Error:\" in output", "def test_cli_option_errors(self):\n stderr = self.getCliErrorMessages(\n args=[\"__non_existent_wrapper__\", \"__non_existent_script__\"]\n )\n self.assertIn(\n \"Could not resolve '__non_existent_wrapper__'\",\n stderr,\n \"Wrong invalid option message\",\n )", "def test_set_unknown_option(self):\n self.settings[u\"General.quiet\"] = True\n self.assertEqual(self.settings[\"General.quiet\"], True)", "def os_test_windows():\n cli.log.info(\"Detected {fg_cyan}Windows.\")\n\n return True", "def testSystemOptions(self):\n warnings.filterwarnings('ignore', category=CharmmPSFWarning)\n psf = CharmmPsfFile('systems/ala3_solv.psf',\n periodicBoxVectors=(Vec3(32.7119500, 0, 0)*angstroms, Vec3(0, 32.9959600, 0)*angstroms, Vec3(0, 0, 33.0071500)*angstroms))\n crd = CharmmCrdFile('systems/ala3_solv.crd')\n params = CharmmParameterSet('systems/par_all36_prot.prm',\n 'systems/toppar_water_ions.str')\n\n # Check some illegal options\n self.assertRaises(ValueError, lambda:\n psf.createSystem(params, nonbondedMethod=5))\n self.assertRaises(TypeError, lambda:\n psf.createSystem(params, nonbondedMethod=PME,\n nonbondedCutoff=1*radian)\n )\n self.assertRaises(TypeError, lambda:\n psf.createSystem(params, nonbondedMethod=PME,\n switchDistance=1*radian)\n )\n\n # Check what should be some legal options\n psf.createSystem(params, nonbondedMethod=PME, switchDistance=0.8,\n nonbondedCutoff=1.2)\n psf.createSystem(params, nonbondedMethod=PME, switchDistance=0.8,\n nonbondedCutoff=1.2*nanometer)", "def test_get_option_strikes_realtime(self):\n pass", "def test_qfiledialog_flags_typedef():\n assert QtWidgets.QFileDialog.Options is not None\n assert QtWidgets.QFileDialog.Options() == QtWidgets.QFileDialog.Option(0)", "def ValidateOptions(self, opt, args):", "def testParseOptions(self):\n options = cli_test_lib.TestOptions()\n options.preferred_language = 'is'\n\n test_tool = tools.CLITool()\n language.LanguageArgumentsHelper.ParseOptions(options, test_tool)\n\n self.assertEqual(test_tool._preferred_language, options.preferred_language)\n\n with self.assertRaises(errors.BadConfigObject):\n language.LanguageArgumentsHelper.ParseOptions(options, None)", "def test_platforms():\n assert sys.platform in (\"win32\", \"linux2\", \"darwin\")", "def test_check_options_exception(self, hp, opts):\n with pytest.raises(ValueError, match=\"XXX\"):\n check_is_in_options(hp, opts, msg=\"XXX\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorate methods that requires the user be logged in.
def authenticated(method): @functools.wraps(method) def wrapper(self, *args, **kwargs): if not self.current_user: self.write({'status_code':404, 'error_msg':'not login'}) return return method(self, *args, **kwargs) return wrapper
[ "def requires_logged_in(func):\n def ret_fn(*args):\n \"\"\"\n wrapper function\n :param args: argument for decorated function\n :return: return decorated function return values\n \"\"\"\n self = args[0]\n if not self.is_logged_in:\n self.try_login(raise_if_fail=True)\n return func(*args)\n return ret_fn", "def requires_login(func):\n @wraps(func)\n def requires_login_inner(self):\n self.enforce_login()\n return func(self)\n return requires_login_inner", "def logged_in(view):\n @functools.wraps(view)\n def decorated_view(*args, **kwargs):\n user_id = session.get('user', -1)\n logged_in_at = session.get('logged_in_at', None)\n user = User.query.get(user_id)\n\n # does check for database logout of user\n if user and user.logged_out_at > logged_in_at:\n session.clear()\n user = None\n\n return view(user=user, *args, **kwargs)\n return decorated_view", "def _decorator(request, *args, **kwargs):\n is_authenticated = request.user.is_authenticated\n authenticated = is_authenticated if isinstance(is_authenticated, bool)\\\n else is_authenticated()\n if authenticated:\n return func(request, *args, **kwargs)\n if 'HTTP_AUTHORIZATION' in request.META.keys():\n authmeth, auth = request.META['HTTP_AUTHORIZATION'].split(' ', 1)\n if authmeth.lower() == 'basic':\n auth = auth.strip().decode('base64')\n identifier, password = auth.split(':', 1)\n username = get_username(identifier)\n user = authenticate(username=username, password=password)\n if user:\n login(request, user)\n return func(request, *args, **kwargs)\n raise Http404", "def login_required(view):\n \n @wraps(view)\n def inner_decorator(request,*args, **kwargs):\n \n out = createBaseResponseObject()\n \n try:\n if request.user.is_authenticated():\n return view(request, *args, **kwargs)\n \n except Exception, e:\n out['status'] = 0\n out['errors'] = [str(e)]\n return HttpResponse(json.dumps(out))\n \n out['status'] = 0\n out['errors'] = ['You must be logged in to use this feature']\n return HttpResponse(json.dumps(out))\n\n return inner_decorator", "def user_required(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n handler = args[0]\n if handler.user:\n return fn(*args, **kwargs)\n handler.redirect(u'/login')\n return wrapper", "def authenticated(func):\n\tnewfunc = tornado.web.authenticated(func)\n\tnewfunc.original = func\n\treturn newfunc", "def authenticated(fn):\n def wrapper(*args, **kwargs):\n if args[0]['valid']:\n return fn(*args, **kwargs)\n return wrapper", "def add_auth(f):\n\n def add_auth_decorator(*args, **kwargs):\n token = get_user_token()\n if 'headers' not in kwargs:\n kwargs['headers'] = {}\n kwargs['headers']['Authorization'] = \"Bearer %s\" % token\n return f(*args, **kwargs)\n\n return add_auth_decorator", "def login_required(func):\n @wraps(func)\n def decorator():\n if not 'user' in session:\n return redirect(url_for('login'))\n return func()\n return decorator", "def login_required_ajax(function=None):\n def _decorator(view_func):\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n if request.user.is_authenticated():\n return view_func(request, *args, **kwargs)\n else:\n return HttpResponse(status=401)\n return _wrapped_view\n\n if function is None:\n return _decorator\n else:\n return _decorator(function)", "def anonimous_required(view_func):\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n if request.user.is_authenticated():\n return HttpResponseRedirect(resolve_url(settings.LOGIN_REDIRECT_URL))\n else:\n return view_func(request, *args, **kwargs)\n return _wrapped_view", "def restrict_authenticated_users(view_func):\n\n @wraps(view_func)\n def wrapper_func(view, *args, **kwargs):\n if view.request.user.is_authenticated:\n return redirect(reverse('posts:all'))\n else:\n return view_func(view, *args, **kwargs)\n\n return wrapper_func", "def elevated_required(unauthorized):\n def decorator_wrapper(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n if not current_user.is_authenticated() or not current_user.is_elevated():\n return unauthorized()\n return func(*args, **kwargs)\n return decorated_view\n return decorator_wrapper", "def login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if session.get(\"user_id\") is None:\n return redirect(url_for(\"index\"))\n\n return view(**kwargs)\n\n return wrapped_view", "def login_optional(f):\n\n page_name = f.__name__\n\n @functools.wraps(f)\n def decorated_function(*pargs, **kwargs):\n client = flask.session.get(\"client\", None)\n if client:\n _get_client_values(client)\n else:\n flask.g.logged_in = False\n flask.g.auth_method = None\n flask.g.user_id = None\n flask.g.displayed_name = None\n\n flask.g.page_name = page_name\n return f(*pargs, **kwargs)\n\n return decorated_function", "def require_auth(fn):\n\n @wraps(fn)\n def wrapper(*args, **kwargs):\n if 'auth' not in kwargs:\n raise RuntimeError('auth required.')\n return fn(*args, **kwargs)\n return wrapper", "def requires_authorization(f):\n @wraps(f)\n def endpoint(*args, **kwargs):\n if not g.auth_user:\n raise Unauthorized('Not authenticated.')\n return f(*args, **kwargs)\n return endpoint", "def admin_required(unauthorized):\n def decorator_wrapper(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n if not current_user.is_authenticated() or not current_user.is_admin():\n return unauthorized()\n return func(*args, **kwargs)\n return decorated_view\n return decorator_wrapper" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorate methods that access ip restricted.
def access_restricted(method): @functools.wraps(method) def wrapper(self, *args, **kwargs): app_log.info("http access %s", self.request.remote_ip) return method(self, *args, **kwargs) return wrapper
[ "def local_or_whitelist_only(f):\n def decorator(*args, **kwargs):\n if not request.headers.getlist(\"X-Forwarded-For\"):\n ip = request.remote_addr\n else:\n ip = request.headers.getlist(\"X-Forwarded-For\")[0]\n if not ip == \"127.0.0.1\" and ip not in app.config[\"IP_WHITELIST\"]:\n abort(403)\n return f(*args, **kwargs)\n return decorator", "def restrict_access(my_func):\n @wraps(my_func)\n def wrapper(*args, **kwds):\n \"\"\"Check access_problem_p and then execute wrapped function.\n \"\"\"\n\n problem = access_problem_p()\n if problem is None:\n return my_func(*args, **kwds)\n return problem\n\n return wrapper", "def SetIpPermission(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def ownership_required(methods=[]): \n def decorator(view_func): \n @wraps(view_func)\n def _wrapped_view(request, *args, **kwargs): \n if request is not None and (methods is None or request.method in methods): \n # Get device id as json field or as function argument parsed from url\n device_id = kwargs.get('device_id', None)\n if device_id is not None:\n device_id = int(device_id)\n else: \n json_data = json.loads(request.body) \n device_id = json_data.get('device_id', None) \n \n if device_id is None:\n logging.error('Can not retrieve device_id from funciton '\\\n 'arguments or request.json (function: {})'.format(view_func.__name__))\n return HttpResponseForbidden()\n \n user_id = request.user.id \n if not check_ownership(user_id, device_id): \n logging.info('Access forbidden: User {} '\\\n 'is attempting to get/change information '\\\n 'about device {}'.format(user_id, device_id))\n return HttpResponseForbidden()\n\n return view_func(request, *args, **kwargs) \n return _wrapped_view\n return decorator", "def restricted(func):\n\t@wraps(func)\n\tdef wrapped(update, context, *args, **kwargs):\n\t\tuser_id = update.effective_user.id\n\t\tif user_id not in LIST_OF_ADMINS:\n\t\t\tprint(\"Unauthorized access denied for {}.\".format(user_id))\n\t\t\treturn\n\t\treturn func(update, context, *args, **kwargs)\n\treturn wrapped", "def access_control(self):\n return '%s.0/16 allow' % '.'.join(self.ip_addr.split('.')[:3])", "def show_ip(): #TODO\n pass", "def UpdateIpPermission(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def allow_request(self, request, view):\n if app_settings.THROTTLE_APIKEY_LIST:\n key = request.META.get(app_settings.THROTTLE_APIKEY_HEADER.upper().replace('-', '_'))\n if not key:\n key = request.GET.get(app_settings.THROTTLE_APIKEY_PARAM)\n if key and key in app_settings.THROTTLE_APIKEY_LIST:\n return True\n\n self.ident = request.META.get(\n self.settings.THROTTLE_IP_HEADER, None)\n if self.ident in app_settings.THROTTLE_IP_WHITELIST:\n return True\n\n # Not whitelisted; continue checking by IP\n return super(AnonRateThrottle, self).allow_request(request, view)", "def enabled_checker(func):\n\n @wraps(func)\n def wrap(self, *args, **kwargs):\n if self.allowed_methods and isinstance(self.allowed_methods, list) and func.__name__ not in self.allowed_methods:\n raise Exception(\"Method {} is disabled\".format(func.__name__))\n return func(self, *args, **kwargs)\n\n return wrap", "def defineMyIpAddress(address) :\n print(\"not yet implemented\")", "def denied_tools_views():\n def decorated(view_function):\n def wrapper(request):\n if is_webapp() and not is_tools():\n return HTTPNotFound()\n elif not is_webapp() and not is_tools():\n return HTTPNotFound()\n\n return view_function(request)\n return wrapper\n return decorated", "def get_ip(self, request):\r\n return request.META['REMOTE_ADDR']", "def cachekey_request_user_ip(func, *args, **kwargs):\n try:\n args2 = (args[0].user.id, args[0].META['REMOTE_ADDR'])\n except AttributeError:\n callargs = getcallargs(func, *args, **kwargs)\n request = callargs.get('request', args[0])\n args2 = (request.user.id, request.META['REMOTE_ADDR'])\n return prefix(func) + str(args2)", "def ListIpPermission(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def require_methods(*methods):\n def decorator(func):\n @functools.wraps(func)\n def wrapped(request, *args, **kwds):\n if request.method not in methods:\n allowed = ', '.join(methods)\n rsp = HttpTextResponse('This requires a specific method: %s' % allowed,\n status=405)\n rsp['Allow'] = allowed\n return rsp\n return func(request, *args, **kwds)\n return wrapped\n return decorator", "def set_apiip_to_ext(self):\n ip = self.s.get(GET_IP_URL).text.strip()\n # TODO test if ipv4 returned\n return self.set_apiip(ip)", "def method(*args):\n\n def __decor(view_func):\n def __view(request):\n if request.method not in args:\n return _http.HttpResponseNotAllowed(args)\n return view_func(request)\n\n return __view\n\n return __decor", "def pdp_protect(view):\n\n @functools.wraps(view)\n def wrapped(request, *args, **kwargs):\n pdp = getPDP()\n if pdp:\n try:\n authorized = pdp.authorize(request)\n message = \"Not authorized\"\n code = \"NotAuthorized\"\n except AuthorisationException as e:\n authorized = False\n message = str(e)\n code = e.code\n\n if not authorized:\n encoder = OWS20ExceptionXMLEncoder()\n return HttpResponse(\n encoder.serialize(\n encoder.encode_exception(message, \"2.0.0\", code)\n ),\n encoder.content_type, status=403\n )\n\n return view(request, *args, **kwargs)\n\n return wrapped" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorate methods that request source signature verify.
def request_verify(method): @functools.wraps(method) def wrapper(self, *args, **kwargs): params = [] for key in sorted(args): if key == 'sign' or len(args[key]) == 0: continue params.append( '='.join([key, args[key][0]]) ) sign = args['sign'][0] pre_sign_str = '&'.join(params) if sign != md5.md5(pre_sign_str + options.http_request_key).hexdigest(): return return method(self, *args, **kwargs) return wrapper
[ "def signature_checking(self,meta):\n if self.vertification(meta):\n pass\n else:\n raise Exception('Incorrect Signature')", "def verify(self, key, msg, sig): # pragma: no cover\n raise NotImplementedError()", "def _check_signature(self, request, key):\n supercls = super(TokenServerAuthenticationPolicy, self)\n try:\n return supercls._check_signature(request, key)\n except HTTPUnauthorized:\n logger.warn(\"Authentication Failed: invalid hawk signature\")\n raise", "def verifiergetter(self, f):\r\n self._verifiergetter = f\r\n return f", "def check_sign_transform_algorithm(self, source):\n signed_info = source['Signature']['SignedInfo']\n # Transform alogrithm\n trans = signed_info['Reference']['Transforms']['Transform@Algorithm']\n if trans != DCP_SETTINGS['xmluri']['enveloped_sig']:\n self.error(\"Invalid transform method\")", "def _check_signature(self, request, key):\n supercls = super(SagradaAuthenticationPolicy, self)\n try:\n return supercls._check_signature(request, key)\n except HTTPUnauthorized:\n log_cef(\"Authentication Failed: invalid MAC signature\", 5,\n request.environ, request.registry.settings,\n \"\", signature=AUTH_FAILURE)\n raise", "def drawSignature(\n source, signature, result=..., radiusToShorterSideRatio=..., borderThickness=...\n ) -> result:\n ...", "def authenticated(fn):\n def wrapper(*args, **kwargs):\n if args[0]['valid']:\n return fn(*args, **kwargs)\n return wrapper", "def wrapper(*args, **kwargs):\n os.environ[\"REQUESTS_CA_BUNDLE\"] = grab_pem()\n return method(*args, **kwargs)", "def check_sign_signature_algorithm(self, source):\n # Additionnal. XML coherence checks\n signed_info = source['Signature']['SignedInfo']\n\n # Signature algorithm\n sig = signed_info['SignatureMethod@Algorithm']\n if self.sig_ns_map[self.dcp.schema] != sig:\n self.error(\n \"Invalid Signature Algorithm, expected {} but got {}\".format(\n self.sig_ns_map[self.dcp.schema], sig))", "def gen_utils_decorators():\n\n doc = '''\n# pylint: disable=R0904\n\"\"\"\nUtils.\n\"\"\"\nimport sys\nsys.dont_write_bytecode = True\nfrom functools import wraps\n\n\ndef is_authenticated(method):\n \"\"\"\n Basic authenticated check decorator.\n \"\"\"\n\n @wraps(method)\n def wrapper(self, *args, **kwargs):\n \"\"\"\n Wrapper method for is_authenticated decorator.\n \"\"\"\n\n # Add decorator flow.\n\n return method(self, *args, **kwargs)\n\n return wrapper\n\n\n__all__ = ['is_authenticated']\n\n\nif __name__ == '__main__':\n pass\n'''\n\n return doc", "def verified(f):\n\n @functools.wraps(f)\n def wrapper(request, *args, **kwargs):\n if d1_common.const.SUBJECT_VERIFIED not in request.all_subjects_set:\n raise d1_common.types.exceptions.NotAuthorized(\n 0,\n \"Access allowed only for verified accounts. Please reconnect with a \"\n \"valid DataONE session certificate in which the identity of the \"\n 'primary subject has been verified. session_subjects=\"{}\"'.format(\n d1_gmn.app.auth.format_session_subjects(request)\n ),\n )\n return f(request, *args, **kwargs)\n\n return wrapper", "def test_verifier_set_signature(self):\n self.mock_verifier_instance._signature = b''\n verifier = aws_encryption_sdk.internal.crypto.Verifier(\n algorithm=self.mock_algorithm,\n public_key=self.mock_verifier_public_key\n )\n assert verifier.verifier._signature == b''\n verifier.set_signature(sentinel.signature)\n assert verifier.verifier._signature == sentinel.signature", "def validate_slack_signature(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n # Retrieve the X-Slack-Request-Timestamp header on the HTTP request\n timestamp = request.headers.get(\"X-Slack-Request-Timestamp\")\n\n # Retrieve the X-Slack-Signature header on the HTTP request, and the body of the request\n signature = request.headers.get(\"X-Slack-Signature\")\n body = request.get_data(as_text=True)\n\n # Concatenate the version number (right now always v0), \n # the timestamp, and the body of the request.\n # Use a colon as the delimiter and encode as bytestring\n format_req = str.encode(f\"v0:{timestamp}:{body}\")\n\n # Encode as bytestring\n encoded_secret = str.encode(SLACK_SIGNING_SECRET)\n\n # Using HMAC SHA256, hash the above basestring, using the Slack Signing Secret as the key.\n request_hash = hmac.new(encoded_secret, format_req, hashlib.sha256).hexdigest()\n\n # Compare this computed signature to the X-Slack-Signature header on the request.\n if hmac.compare_digest(f\"v0={request_hash}\", signature):\n # hooray, the request came from Slack! Run the decorated function\n return func(*args, **kwargs)\n else:\n return \"\", http.HTTPStatus.NO_CONTENT\n\n return wrapper", "def advapi32_CryptVerifySignature(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"hHash\", \"pbSignature\", \"dwSigLen\", \"hPubKey\", \"sDescription\", \"dwFlags\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def test_patch_certificate_signing_request(self):\n pass", "async def verify_signature(request: Request):\n verify_key = VerifyKey(bytes.fromhex(config.DISCORD_APPLICATION_PUBLIC_KEY))\n\n try:\n signature = request.headers[\"X-Signature-Ed25519\"]\n timestamp = request.headers[\"X-Signature-Timestamp\"]\n body = (await request.body()).decode(\"utf-8\")\n verify_key.verify(f'{timestamp}{body}'.encode(), bytes.fromhex(signature))\n except (KeyError, BadSignatureError):\n raise HTTPException(401, 'invalid request signature')", "def verify_header(verify_func):\n\n def _decorator(e, *args, **kwargs):\n if not _is_header_valid(e, *args, **kwargs):\n return STATE_DISCONNECTED\n return verify_func(e, *args, **kwargs)\n\n return wraps(verify_func)(_decorator)", "def checkPassed(self, func, *args, **kw):\n return _passedSignature(inspect.signature(func), args, kw)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an open file handle to read the given external object.
def read_external_object( self, doi_or_unique_name: str, title: str, component: Optional[str] = None, ) -> IOBase: kwds = dict(doi_or_unique_name=doi_or_unique_name, title=title) if component is not None: kwds["component"] = component return self.file_api.open_for_read(**kwds)
[ "def open(self, oid):\n return open(self.path(oid), 'rb')", "def get_file_obj(path: str) -> TextIOWrapper:\n return open(path, 'r')", "def file_object(self) -> BufferedReader:\n return self.reader.file_object", "def get_file_object(self):\n\n if self.file_obj == None:\n self._open()\n return self.file_obj", "def open_file_or_filename(obj, mode='r'):\n if is_filelike(obj, modes_needed=mode):\n return obj\n elif isinstance(obj, basestring):\n return possibly_compressed_file(obj, mode)\n else:\n raise TypeError(\"Can't make a file out of %r.\" % obj)", "def getObject(rootFile, objectName):\n if not os.path.exists(rootFile):\n raise IOError(\"The file '%s' does not exist\" % rootFile)\n\n obj = None\n try:\n input = rt.TFile.Open(rootFile)\n obj = input.Get(objectName)\n finally:\n pass\n return obj", "def get_file_handle(self, filename):\n file_handle = open(self.path + filename, 'r')\n return file_handle", "def readOBJREF(stream):\n return ObjRef(stream)", "def get_file_object(self, file_name, command):\n # this method exists to aid in unit testing. we do not want to actually\n # be accessing files during unit tests, so we will use a subclass\n # of this class wherein this method is overwritten.\n return open(file_name, command)", "def get_fd(fileobj):\n if IS_WINDOWS:\n import msvcrt\n return msvcrt.open_osfhandle(fileobj.fileno(), os.O_TEXT)\n return fileobj.fileno()", "def _open_for_random_access(filename):\n handle = open(filename, \"rb\")\n magic = handle.read(2)\n handle.seek(0)\n\n if magic == b\"\\x1f\\x8b\":\n # This is a gzipped file, but is it BGZF?\n from . import bgzf\n try:\n # If it is BGZF, we support that\n return bgzf.BgzfReader(mode=\"rb\", fileobj=handle)\n except ValueError as e:\n assert \"BGZF\" in str(e)\n # Not a BGZF file after all,\n handle.close()\n raise ValueError(\"Gzipped files are not suitable for indexing, \"\n \"please use BGZF (blocked gzip format) instead.\")\n\n return handle", "def open(self, fileobject, mode=\"a\"):\n return open(fileobject._path, mode=mode)", "def open(filename, mode=\"rb\"):\n return QshFile(filename, mode)", "def get_filepointer(filename):\n\n\tfp = None\n\tif filename.endswith('.gz'): fp = gzip.open(filename, 'rt')\n\telif filename == '-': fp = sys.stdin\n\telse: fp = open(filename)\n\treturn fp", "def open_resource(fn):\n return open(_get_res_path(fn))", "def mock_open(*args, **kwargs):\n return MockFileObject()", "def get_file_handle(self) -> IO:\n with open(self.path, 'rb') as f:\n tmp_magic: bytes = f.read(4)\n if tmp_magic == compressed_magic:\n return gzip.open(self.path, 'rb')\n else:\n return super().get_file_handle()", "def open(self):\n\n # allow Readers to read with or without context management\n # pylint: disable-next=consider-using-with, unspecified-encoding\n self._fobj = open(self.path, self.mode, **self.kwargs)", "def win_open(filename):\n # get an handle using win32 API, specifyng the SHARED access!\n handle = win32file.CreateFile(filename,\n win32file.GENERIC_READ|win32file.GENERIC_WRITE,\n win32file.FILE_SHARE_DELETE |\n win32file.FILE_SHARE_READ |\n win32file.FILE_SHARE_WRITE,\n None,\n win32file.OPEN_ALWAYS,\n 0,\n None)\n # detach the handle\n detached_handle = handle.Detach()\n # get a file descriptor associated to the handle\\\n file_descriptor = msvcrt.open_osfhandle(\n detached_handle, os.O_RDWR)\n # open the file descriptor\n return os.fdopen(file_descriptor , \"rb+\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if tweet is filtered or not
def tweet_filter(self, tweet): for rule in self.tf: if not self.tf[rule](tweet): return False return True
[ "def _filter_tweet(self, tweet):\n if \"extended_tweet\" in tweet.keys():\n tweet[\"text\"] = tweet[\"extended_tweet\"][\"full_text\"]\n elif \"retweeted_status\" in tweet.keys() and \"full_text\" in tweet[\"retweeted_status\"].keys():\n tweet[\"text\"] = \"RT \" + tweet[\"retweeted_status\"][\"full_text\"]\n\n filtered_data = self._extract(tweet, TwitterFetcher.tweet_fields)\n filtered_data[\"user\"] = self._extract(tweet[\"user\"], TwitterFetcher.user_fields)\n filtered_data[\"CC\"] = self._get_location(tweet[\"user\"][\"location\"])\n filtered_data[\"social\"] = {\"topic\": self.topic, \"topic_id\": self.topic_id, \"user_id\": self.user_id}\n filtered_data[\"source\"] = self._get_source(tweet[\"source\"])\n self.redis.publish(f'twitter:stream', json.dumps(filtered_data))\n self._initialize_results(filtered_data)\n return filtered_data", "def filter(self, tweet: dict) -> dict:\n\n # Filter required fields\n filtered_tweet = {\"user_id\": tweet.user.id_str,\n \"name\": tweet.user.name,\n \"nickname\": tweet.user.screen_name,\n \"description\": tweet.user.description,\n \"user_location\": tweet.user.location,\n \"followers_count\": tweet.user.followers_count,\n \"tweets_count\": tweet.user.statuses_count,\n \"user_date\": tweet.user.created_at.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"verified\": tweet.user.verified,\n \"tweet_id\": tweet.id_str,\n \"text\": tweet.full_text,\n \"favs\": tweet.favorite_count,\n \"retweets\": tweet.retweet_count,\n \"tweet_date\": tweet.created_at.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"tweet_location\": tweet.place.full_name if tweet.place else None,\n \"source\": tweet.source,\n \"sentiment\": self.detect_sentiment(tweet.full_text, tweet.lang)}\n\n return filtered_tweet", "def test_filter_tweets_unchanged(self):\n\n consumer = ELDConsumer(Queue(), 60)\n with open(os.path.join(os.path.dirname(__file__), 'corpus.json'), 'r') as f:\n lines = f.readlines()\n tweets = [ json.loads(line) for line in lines ]\n filtered = consumer._filter_tweets(tweets)\n self.assertTrue(all(tweet in tweets for tweet in filtered))", "def test_filter_tweets_no_favourites(self):\n\n consumer = ELDConsumer(Queue(), 60)\n with open(os.path.join(os.path.dirname(__file__), 'corpus.json'), 'r') as f:\n lines = f.readlines()\n tweets = [ json.loads(line) for line in lines ]\n count = len(tweets)\n tweets = consumer._filter_tweets(tweets)\n self.assertTrue(all(tweet['user']['favourites_count'] > 0 for tweet in tweets))\n self.assertGreater(count, len(tweets))", "def test_filter_tweets_document(self):\n\n consumer = ELDConsumer(Queue(), 60, scheme=TF())\n with open(os.path.join(os.path.dirname(__file__), 'corpus.json'), 'r') as f:\n lines = f.readlines()\n tweets = [ json.loads(line) for line in lines ]\n documents = [ Document('', attributes={ 'tweet': tweet }) for tweet in tweets ]\n\n tweets = consumer._filter_tweets(tweets)\n documents = consumer._filter_tweets(documents)\n self.assertEqual(len(tweets), len(documents))\n self.assertTrue(all( document.attributes['tweet'] in tweets for document in documents ))", "def test_filter_tweets_hashtags(self):\n\n consumer = ELDConsumer(Queue(), 60)\n with open(os.path.join(os.path.dirname(__file__), 'corpus.json'), 'r') as f:\n lines = f.readlines()\n tweets = [ json.loads(line) for line in lines ]\n count = len(tweets)\n tweets = consumer._filter_tweets(tweets)\n self.assertTrue(all(len(tweet['entities']['hashtags']) <= 2 for tweet in tweets))\n self.assertGreater(count, len(tweets))", "def test_filter_tweets_bio(self):\n\n consumer = ELDConsumer(Queue(), 60)\n with open(os.path.join(os.path.dirname(__file__), 'corpus.json'), 'r') as f:\n lines = f.readlines()\n tweets = [ json.loads(line) for line in lines ]\n count = len(tweets)\n tweets = consumer._filter_tweets(tweets)\n self.assertTrue(all(tweet['user']['description'] for tweet in tweets))\n self.assertGreater(count, len(tweets))", "def isFiltered():\n\treturn True", "def filter(self, user_id=None, count=20, tweet_timeline=None):\n try:\n timeline = None\n if tweet_timeline is not None:\n timeline = tweet_timeline\n else:\n timeline = self.get_tweets(user_id=user_id, count=count)\n\n if timeline is None:\n raise Exception(\"Error: Could not fetch the tweets\")\n\n filtered_tweets = {}\n for tweets in timeline:\n tweet = tweets._json\n if (\n tweet[\"retweet_count\"] > self.min_retweet\n and tweet[\"retweet_count\"] < self.max_retweet\n ):\n temp_tweet = {\n \"text\": tweet[\"text\"],\n \"retweet_count\": tweet[\"retweet_count\"],\n }\n filtered_tweets[tweet[\"id\"]] = temp_tweet\n\n print(\n \"Status: Filtered {} tweets with the given criteria\".format(\n len(filtered_tweets)\n )\n )\n return filtered_tweets\n\n except Exception as e:\n print(str(e))\n sys.exit(0)", "def test_is_tweet_nontweet(self):\n self.assertEqual(False, self.messagetools.is_tweet({\n 'friends': [],\n }))", "def filter(self, user_id=None, count=20, tweet_timeline=None):\n try:\n timeline = None\n if tweet_timeline is not None:\n timeline = tweet_timeline\n else:\n timeline = self.get_tweets(user_id=user_id, count=count)\n\n if timeline is None:\n raise Exception(\"Error: Could not fetch the tweets\")\n\n filtered_tweets = {}\n for tweets in timeline:\n tweet = tweets._json\n\n if (\n tweet[\"favorite_count\"] > self.min_favorite\n and tweet[\"favorite_count\"] < self.max_favorite\n ):\n\n temp_tweet = {\n \"text\": tweet[\"text\"],\n \"favorite_count\": tweet[\"favorite_count\"],\n }\n filtered_tweets[tweet[\"id\"]] = temp_tweet\n\n print(\n \"Status: Filtered {} tweets with the given criteria\".format(\n len(filtered_tweets)\n )\n )\n return filtered_tweets\n\n except Exception as e:\n print(str(e))\n sys.exit(0)", "def financial_check(tweet):\n if any(word in tweet for word in FINANCIAL_PARAMETERS):\n return tweet", "def tweet_filter(tweet_obj, condition, key=\"text\", strip=\"True\"):\n\n if key in tweet_obj:\n if strip:\n text = strip_all_entities(strip_links(tweet_obj[key]))\n else:\n text = tweet_obj[key]\n else:\n print(\"Not a valid key (\" + key + \")\")\n sys.exit(1)\n\n if evaluate_condition(text, parse_condition(tokenize_condition(condition))):\n return tweet_obj\n else:\n return None", "def test_filter_tweets_urls(self):\n\n consumer = ELDConsumer(Queue(), 60)\n with open(os.path.join(os.path.dirname(__file__), 'corpus.json'), 'r') as f:\n lines = f.readlines()\n tweets = [ json.loads(line) for line in lines ]\n count = len(tweets)\n tweets = consumer._filter_tweets(tweets)\n self.assertTrue(all(len(tweet['entities']['urls']) <= 1 for tweet in tweets))\n self.assertGreater(count, len(tweets))", "def test_filter_tweets_english(self):\n\n consumer = ELDConsumer(Queue(), 60)\n with open(os.path.join(os.path.dirname(__file__), 'corpus.json'), 'r') as f:\n lines = f.readlines()\n tweets = [ json.loads(line) for line in lines ]\n count = len(tweets)\n tweets = consumer._filter_tweets(tweets)\n self.assertTrue(all(tweet['lang'] == 'en' for tweet in tweets))\n self.assertGreater(count, len(tweets))", "def isTwitter(cls, s):\n return s and s.startswith('@')", "def test_filter_tweets_empty(self):\n\n consumer = ELDConsumer(Queue(), 60)\n self.assertEqual([ ], consumer._filter_tweets([ ]))", "def test_is_tweet(self):\n self.assertEqual(True, self.messagetools.is_tweet({\n 'id_str': '12345',\n 'text': 'This is a tweet.',\n 'user': {},\n }))", "def check_tweet_is_instamoffer(self, tweet):\n return tweet.lower().startswith(\"instamoffer\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print single tweet on the wall
def print_tweet(self, tweet): self.printer.echo('{}'.format( tweet.get_created().strftime(self.outformat)), nl=False ) self.printer.echo(' ({})'.format(tweet.get_url())) self.printer.echo(tweet.get_author_name(), nl=False) self.printer.echo(' [{}]'.format(tweet.get_author_nick()), nl=False) self.printer.echo(': {}'.format(tweet.get_text())) self.printer.echo()
[ "def print_tweet(tweet):\n text = colorize(tweet, hashtag_wrap, mention_wrap, url_wrap)\n text = Markup.unescape(text)\n created_at = time_filter(tweet['created_at'])\n\n click.echo('------')\n click.secho('ID: {}'.format(tweet['id']), fg='green')\n click.secho(tweet['user']['name'], fg='blue', bold=True, nl=False)\n click.secho(\n ' @{}'.format(tweet['user']['screen_name']),\n fg='white',\n bold=True,\n nl=False)\n click.secho(' {}'.format(created_at), fg='magenta')\n click.echo(text)\n click.echo('Retweets: {}, Likes: {}'.format(tweet['retweet_count'], tweet[\n 'favorite_count']))\n\n click.echo('------')", "def print_tweet(tweet):\n print('At: {} : {}'.format(tweet.created_at, tweet.text))\n print(' tweeted by : {} from {}'.format(tweet.user.screen_name, tweet.user.location))", "def printTweets(tweet_list):\n\n for tweet in tweet_list:\n print(\"{} : {}\\n\".format(tweet[\"tweet_id\"], tweet[\"full_text\"]))", "def print_tweets(public_tweets):\n for tweet in public_tweets:\n print_tweet(tweet)", "def get_tweet(self):\n\t\t\n\t\tinTweet=\"fdfdfdff\"\n\t\tself.d_tweet(inTweet)", "def print_tweet(i, ith_tweet):\n\n print(f\"\"\"\nTweet {i}:\nUsername:{ith_tweet[\"username\"]}\nDate:{ith_tweet[\"date\"]}\nDescription:{ith_tweet[\"description\"]}\nLocation:{ith_tweet[\"location\"]}\nFollowing Count:{ith_tweet[\"following\"]}\nFollower Count:{ith_tweet[\"followers\"]}\nTotal Tweets:{ith_tweet[\"totaltweets\"]}\nRetweet Count:{ith_tweet[\"retweetcount\"]}\nTweet Text:{ith_tweet[\"text\"]}\nHashtags Used:{ith_tweet[\"hashtext\"]}\n\"\"\")", "def twitter_display(twitters):\n separator = \"\\t\"\n for status in reversed(twitters):\n nick = unicode(status.user.screen_name)\n nick_color = get_nick_color(nick)\n\n\n text = unicode(status.text)\n timestamp = int(dt2lt(status.created_at))\n print_line( \"%s%s%s%s\" %(nick_color, nick, separator, text), timestamp)", "def userTweets(username):\n api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)\n user_tweet = api.GetUserTimeline(screen_name=username)\n for tweet in user_tweet:\n util.safe_print(tweet.GetText())", "def print_timeline(self):\n tweets = self.get_tweets_from_timeline()\n tweets = MakingActions.get_text_from_list(tweets)\n for items in tweets:\n print items", "def save_tweet(self,tweet):\n print '.',\n return", "def send_my_tweet(my_tweet_text, local_screenshot):\n TWITTER.update_with_media(local_screenshot, status=my_tweet_text)", "def tweet_trumpet():\n\tgen = generator.Generator(\"@realDonaldTrump.dat\")\n\ttext = gen.generate_paragraphs(25, 1)\n\ttext = \"Trumpet:\\n\" + text.replace(\"@\", \"\")\n\n\tclient.update_status(status=text)\n\tlogging.info(text)", "def ft_post_twt(status):\r\n\ttry:\r\n\t\ttwitter.update_status(status=status)\r\n\t\tsleep(1)\r\n\t\tprint(\"{} publish :\\n{}\".format(strftime(\"%d/%m/%y %H:%M:%S\"),status))\r\n\texcept Exception as e:\r\n\t\tprint(e)", "def reply():\n # Get all (available) status texts by Int_SORSE after last seen tweet id\n id = read_last_seen()\n new_tweets = []\n new_statuses = Cursor(api.user_timeline, id=RETWEET_USER, since_id=id).items()\n\n # Add all new statuses since the last seen to list\n for status in new_statuses:\n new_tweets.append(status.id)\n\n # If there were any new tweets, retweet them\n if len(new_tweets) > 0:\n # Write last status\n write_last_seen(new_tweets[0])\n\n for id in reversed(new_tweets):\n print('Replying to tweet with ID ' + str(id))\n # Favourite this tweet\n api.create_favorite(id)\n # Retweet\n api.retweet(id)", "def retweet():\n tw_id = request.args.get(\"tweet\")\n\n tws = session[\"tweets\"]\n tws[tw_id][\"retweet_time\"] = datetime.now().strftime(\"%m/%d/%Y %H:%M:%S\")\n tws[tw_id][\"retweeter\"] = session[\"user\"]\n\n session[\"tweets\"] = tws\n save_tweets()\n\n return redirect(\"/personal_feed\")", "def scrape_tweet(tweet):\n\n\n dateUntil = tweet.created_at + timedelta(1)\n tweetCriteria = got.manager.TweetCriteria().setUsername(tweet.author.screen_name).setSince(\n tweet.created_at.strftime(\"%Y-%m-%d\")).setUntil(dateUntil.strftime(\"%Y-%m-%d\")).setMaxTweets(-1)\n found = False\n tweets = got.manager.TweetManager.getTweets(tweetCriteria)\n for tw in tweets:\n if tw.id == tweet.id_str:\n tweet.reply_count = tw.replies\n break;\n return tweet", "def run(self):\n twitter_userstream = twitter.TwitterStream(\n auth=self.oauth,\n domain='userstream.twitter.com')\n for msg in twitter_userstream.user():\n if not self.run:\n break\n print ('Incoming Twitter stream message:')\n print ('-' * 72)\n pprint.pprint(msg)\n print ('-' * 72)\n if 'text' not in msg:\n # Not a status update, so skip this...\n continue\n self.send_message(u'_Received tweet from @%s:_\\n%s' % (\n msg['user']['screen_name'],\n msg['text']),\n parse_mode='Markdown')", "def capture_tweets_for_posterity():\n their_tweets = TWITTER.user_timeline(\n ORIGINAL_TWEETER,\n count=BATCH_SIZE,\n since_id=LATEST_CAPTURED_TWEET)\n their_tweets.reverse() # i.e. put in chronological order\n for their_tweet in their_tweets:\n try:\n local_screenshot = get_tweet_screenshot(their_tweet)\n my_tweet_text = build_intro(their_tweet.user.screen_name)\n send_my_tweet(my_tweet_text, local_screenshot)\n check_off(their_tweet)\n finally:\n os.remove(local_screenshot)", "def main():\n #Tokens for authentication\n client_key = ''\n client_secret = ''\n resource_owner_key = ''\n resource_owner_secret = ''\n token = {\n \"client_key\": client_key, \n \"client_secret\": client_secret,\n \"resource_owner_key\": resource_owner_key, \n \"resource_owner_secret\": resource_owner_secret\n } \n i = 0\n tweets = twitter.public_stream(token)\n fp1 = open(\"Streamtweets.txt\",\"w\")\n while True:\n for tweet in tweets:\n print \"writing\"\n fp1.write(tweet)\n fp1.write(\"\\n\\n\")\n i = i + 1\n if i == 1000: \n break\n if i == 1000:\n break\n fp1.close()\n print \"Exiting!!!\"\n raise SystemExit()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Highlight parts of tweet by going thru its entities
def tweet_highlighter(self, tweet): text = tweet.get_text() result = "" entities = [] for hashtag in tweet.get_entities_of_type('hashtags'): entities.append( (hashtag['indices'][0], hashtag['indices'][1], self.printer.style( '#'+hashtag['text'], fg=self.colors['hashtag'], bold=True )) ) for mention in tweet.get_entities_of_type('user_mentions'): entities.append( (mention['indices'][0], mention['indices'][1], self.printer.style( '@'+mention['screen_name'], fg=self.colors['mention'], bold=True )) ) for url in tweet.get_entities_of_type('urls'): entities.append( (url['indices'][0], url['indices'][1], self.printer.style( url['url'], underline=True) ) ) entities.sort(reverse=True) index = 0 while len(entities) > 0: act = entities.pop() result += text[index:act[0]] + act[2] index = act[1] result += text[index:] return result
[ "def colorize(tweet, hashtag_wrap, mention_wrap, url_wrap):\n text = tweet['text']\n\n entities = tweet['entities']['hashtags'] + tweet['entities'][\n 'user_mentions'] + tweet['entities']['urls']\n entities.sort(key=lambda e: e['indices'][0])\n\n shift = 0\n for entity in entities:\n text_len = 0\n styled_text = None\n\n if 'screen_name' in entity:\n text_len = len(entity['screen_name']) + 1\n styled_text = mention_wrap(entity)\n elif 'url' in entity:\n text_len = len(entity['url'])\n styled_text = url_wrap(entity)\n else:\n text_len = len(entity['text']) + 1\n styled_text = hashtag_wrap(entity)\n\n text = text[:(entity['indices'][0] + shift)] + styled_text + text[(\n entity['indices'][1] + shift):]\n shift = shift + len(styled_text) - (text_len)\n\n return text", "def process_tweets(tweets, classify_tweet_type=True, extract_tweet_entities=True):\n\n for line in tweets:\n\n if classify_tweet_type is True:\n # classify tweet as retweet/mention/tweet\n if \"retweeted_status\" in line:\n line[\"TWEET_TYPE\"] = \"retweet\"\n elif len(line[\"entities\"][\"user_mentions\"]) > 0:\n line[\"TWEET_TYPE\"] = \"mention\"\n else:\n line[\"TWEET_TYPE\"] = \"tweet\"\n\n if extract_tweet_entities is True:\n # check if line contains a menetion, and if so, extract all users mentione\n tweeties = []\n line[\"TWEETIES\"] = \"\"\n if len(line[\"entities\"][\"user_mentions\"]) > 0:\n tweeties.extend(line[\"entities\"][\"user_mentions\"])\n line[\"TWEETIES\"] = \" \".join([user[\"screen_name\"] for user in tweeties])\n\n # check if line contains a hashtag, and if so, extact all hashtags\n hashtags = []\n line[\"HASHTAGS\"] = \"\"\n if len(line[\"entities\"][\"hashtags\"]) > 0:\n hashtags.extend(line[\"entities\"][\"hashtags\"])\n line[\"HASHTAGS\"] = \" \".join([tag[\"text\"] for tag in hashtags])\n\n # check if line contains a URL, and if so, extract all expanded URLS\n expanded_urls = []\n line[\"EXPANDED_URLS\"] = \"\"\n if len(line[\"entities\"][\"urls\"]) > 0:\n expanded_urls.extend(line[\"entities\"][\"urls\"])\n line[\"EXPANDED_URLS\"] = \" \".join(\n [url[\"expanded_url\"] for url in expanded_urls]\n )\n\n # check if line has lat/long, and if so, extract lat/long\n line[\"LATITUDE\"] = \"\"\n line[\"LONGITUDE\"] = \"\"\n if line[\"geo\"] is not None:\n line[\"LATITUDE\"] = line[\"geo\"][\"coordinates\"][0]\n line[\"LONGITUDE\"] = line[\"geo\"][\"coordinates\"][1]\n\n return tweets", "def extract_stories(self, text):\n pass", "def highlight(html_str, wordlist):\n\n for word in wordlist:\n\n start = html_str.lower().find(word.lower())\n end = start + len(word)\n\n if start != -1:\n tmp = html_str[:start]\n tmp += '<mark>'\n tmp += html_str[start:end]\n tmp += '</mark>'\n tmp += highlight(html_str[end:], [word])\n\n html_str = tmp\n\n return html_str", "def split_story(doc):\n # find first highlight\n index = doc.find('@highlight')\n # split into story and highlights\n story, highlights = doc[:index], doc[index:].split('@highlight')\n # strip extra white space around each highlight\n highlights = [h.strip() for h in highlights if len(h) > 0]\n return story, highlights", "def tag_tweets(line):\n\n # read three geo file into memory\n state_geo_info0 = open(STATE_GEO_FILE_NAME, 'r')\n state_geo_info = json.load(state_geo_info0)\n state_geo = state_geo_info['features']\n\n vic_geo_file = open(VIC_DIST_GEO_FILE_NAME, 'r')\n vic_geo0 = json.load(vic_geo_file)\n vic_geo = vic_geo0['features']\n\n file = open(MEL_DIST_GEO_FILE_NAME, 'r')\n mel_geo0 = json.load(file)\n mel_geo = mel_geo0['features']\n\n # initialization of coordinate\n coordinate = None\n stored_tweet = None\n # use the coordinate of tweet to find the location name\n if line['coordinates']:\n raw = line['coordinates']['coordinates']\n coordinate = tuple(raw)\n elif line['place']:\n # get the central point of a place\n raw = average_bounding_box(line['place']['bounding_box']['coordinates'])\n coordinate = tuple(raw)\n\n point = Point(coordinate)\n state_name = get_state_by_coordinate(state_geo, point)\n vic_district = get_vic_dist_by_coordinate(vic_geo, point)\n mel_district = get_mel_dist_by_coordinate(mel_geo, point)\n\n if line['user']['location']:\n if not state_name:\n state_name = get_state_by_user_location(line['user']['location'])\n if not vic_district:\n vic_district = get_vic_dist(line['user']['location'])\n if not mel_district:\n mel_district = get_mel_dist(line['user']['location'])\n\n # put all name in upper case\n if state_name:\n state_name = state_name.upper()\n if vic_district:\n vic_district = vic_district.upper()\n if mel_district:\n if mel_district == \"Melbourne (3000)\" or mel_district == \"Melbourne (3004)\":\n mel_district = \"MELBOURNE\"\n mel_district = mel_district.upper()\n\n # tag and store if location exists\n if state_name or vic_district or mel_district:\n # get sentiment score\n blob = TextBlob(line['text'])\n score = blob.sentiment.polarity\n\n # get emoji list\n emoji_list = extract_emojis(line['text'])\n\n # if there are multiple emojis in the list, use the most frequently used one\n to_store_emoji = get_most_used_eomji(emoji_list)\n\n # if score == 0, use the ranking score of emoji list\n if score == 0:\n if len(emoji_list) > 0:\n rank = 0\n for e in emoji_list:\n if e in eur.EMOJI_UNICODE_RANKING.keys():\n rank += eur.EMOJI_UNICODE_RANKING[e]\n\n score = rank / len(emoji_list)\n\n # get the name of mobile end system\n if line['source']:\n system = get_system(line['source'])\n\n stored_tweet = {\n 'system': system, 'sentiment': score, 'state': state_name, 'districtInMel': mel_district,\n 'districtInVic': vic_district, 'emoji_list':to_store_emoji}\n # to_store = json.dumps(stored_tweet)\n state_geo_info0.close()\n vic_geo_file.close()\n file.close()\n\n return stored_tweet", "def _find_text(self):\n tweet_text = []\n for item in self.search_results['statuses']:\n tweet_text.append(item['full_text'])\n return tweet_text", "def extract_entities_results_html(text, normalize):\n try:\n result = rester.get_ner_tags(\n text, concatenate=True, normalize=normalize\n )\n except MatScholarRestError:\n rester_error_txt = RESTER_ERROR_TEXT\n return common_rester_error_html(rester_error_txt)\n tagged_doc = result[\"tags\"]\n relevance = result[\"relevance\"]\n highlighted = highlight_entities_html(tagged_doc)\n\n # Add the warning\n if not relevance:\n warning_header_txt = \"Warning! Abstract not relevant.\"\n warning_body_txt = (\n \"Our classifier has flagged this document as not relevant to \"\n \"inorganic materials science. Expect lower than optimum \"\n \"performance.\"\n )\n warning = common_warning_html(\n warning_header_txt, warning_body_txt, \"is-fullwidth\"\n )\n else:\n warning = html.Div(\"\")\n\n # Update download link\n doc = {\"sentences\": []}\n for sent in tagged_doc:\n new_sent = []\n for token, tag in sent:\n new_sent.append({\"token\": token, \"tag\": tag})\n doc[\"sentences\"].append(new_sent)\n json_string = json.dumps(doc)\n json_string = \"data:text/csv;charset=utf-8,\" + urllib.parse.quote(\n json_string\n )\n download_link = html.A(\n \"Download entities as json\",\n id=\"entity-download-link\",\n href=json_string,\n download=\"tagged_docs.json\",\n target=\"_blank\",\n )\n download_container = html.Div(\n download_link, className=\"has-text-size-4 has-margin-top 10\"\n )\n\n label = html.Label(\"Extracted Entity Tags:\")\n label_container = html.Div(label, className=\"is-size-4 has-margin-top-30\")\n\n highlighted_container = html.Div(highlighted)\n\n label_label = html.Label(\"Labels:\")\n label_label_container = html.Div(\n label_label, className=\"is-size-4 has-margin-top-30\"\n )\n\n entity_colormap_key = copy.deepcopy(entity_color_map_extended)\n entities_keys = []\n for e, color in entity_colormap_key.items():\n # don't need the \"other\" label\n if e == \"other\":\n continue\n entity_key = html.Div(\n e, className=f\"is-size-4 msweb-is-{color}-txt has-text-weight-bold\"\n )\n entity_key_container = html.Div(\n entity_key, className=\"flex-column is-narrow has-margin-5 box\"\n )\n entities_keys.append(entity_key_container)\n\n entity_key_container = html.Div(\n entities_keys, className=\"columns is-multiline has-margin-5\"\n )\n\n results = html.Div(\n [\n warning,\n label_container,\n highlighted_container,\n label_label_container,\n entity_key_container,\n download_container,\n ]\n )\n return results", "def add_color(tweets):\n colors = list(Color(\"red\").range_to(Color(\"green\"), 100))\n for t in tweets:\n print t\n score = t['score']\n colorscore = (score + 1) / 2 * 100\n color = colors[int(colorscore)]\n t['color'] = color\n\n return tweets", "def render_tweet_text(tweet):\n text = microblogging_tags.render_tweet_text(tweet)\n text = template.defaultfilters.urlize(text)\n return text", "def recover_entity_mentions(self, tcfg, snt):\n tokens = re.split('[ ]+', snt.text)\n # build entity coreference like ENT1(ENT2)\n if tcfg.ent_coref:\n tokens, coref_pair = bio_coref_build(tokens)\n for emid1, emid2 in coref_pair:\n self.crfdict[emid1].append(emid2)\n self.get_entity_mention(emid2).visible = False # disable visible if it is a coreferent\n # build entity mention list for the line\n # entity mention refilling for trigger word and entity in GE09 task\n ntokens = [] # new tokens refilled\n for j, token in enumerate(tokens):\n etype, emid = is_bio_entity(token)\n if emid:\n oem = self.get_entity_mention(emid) # original entity mention\n if oem and etype == oem.type: # emid is found and the type matches\n tlen = len(ntokens)\n oem = self.get_entity_mention(emid)\n #if task == 've' and oem.type in ('TRIG', 'ENTI'): # trigger and entities replacement\n # not in the blinded entity types, refill the placeholder with the original name\n if oem.type not in tcfg.bld_ent_types:\n names = oem.name.split('_') # originally separated by space\n ntokens.extend(names)\n em = oem.__copy__(lineno=snt.no, hsno=tlen, heno=tlen+len(names))\n else:\n em = oem.__copy__(lineno=snt.no, hsno=tlen, heno=tlen+1)\n ntokens.append(token)\n snt.append_entity_mention(em)\n for cid in self.crfdict[emid]:\n em = self.get_entity_mention(cid).__copy__(lineno=snt.no, hsno=tlen, heno=tlen+1)\n snt.append_entity_mention(em)\n continue\n # elif verbose: # a mismatched entity placeholder\n # print('EntNotFound: {} in {}'.format(token, self.id))\n ntokens.append(token)\n # update the sentence\n snt.text = ' '.join(ntokens)\n return", "def extract_entities(self, text):\n results = self.fetch(self.base_url, text)\n return [_ for _ in self.process_results(results)]", "def scrape_tweet(tweet):\n\n\n dateUntil = tweet.created_at + timedelta(1)\n tweetCriteria = got.manager.TweetCriteria().setUsername(tweet.author.screen_name).setSince(\n tweet.created_at.strftime(\"%Y-%m-%d\")).setUntil(dateUntil.strftime(\"%Y-%m-%d\")).setMaxTweets(-1)\n found = False\n tweets = got.manager.TweetManager.getTweets(tweetCriteria)\n for tw in tweets:\n if tw.id == tweet.id_str:\n tweet.reply_count = tw.replies\n break;\n return tweet", "def _parse_entities(self, tagged_text):\n return (m.groups() for m in INLINEXML_EPATTERN.finditer(tagged_text))", "def preprocessing(company, lang):\n\n # get tweets\n tweets = np.array(execute(\"SELECT * FROM tweet WHERE searchterm = '@\" + company + \"'\"))\n tweets = tweets[:,2]\n\n # count retweets\n pattern = re.compile(\"^RT \")\n rt_tweets = [ tweet for tweet in tweets if pattern.match(tweet) ]\n\n # only lang tweets\n lang_tweets = []\n for tweet in rt_tweets:\n try:\n if detect(tweet) == lang:\n lang_tweets.append(tweet)\n except:\n continue\n\n # no urls\n url = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n no_url_tweets = [ re.sub(url, '', tweet) for tweet in lang_tweets ]\n\n # remove @ words\n no_arobas_tweets = [ re.sub(r\"([@?]\\w+)\\b\", '', text) for text in no_url_tweets ]\n\n # remove non-alphanumerical characters\n only_alphanum_tweets = [ re.sub(r'[^\\w]', ' ', text) for text in no_arobas_tweets ]\n\n # tokenizing\n tokenized_tweets = [ tweet.split(\" \") for tweet in only_alphanum_tweets ]\n\n # lower tweets and remove one char words\n lowered_tweets = [ [ word.lower() for word in text if len(word) > 1 ] for text in tokenized_tweets ]\n \n # remove stopwords\n stopwords = open(\"./stopwords\").read().split(\"\\n\")\n stopwords += [\"mon\", \"tue\", \"wed\", \"thu\", \"fri\", \"sat\", \"sun\", \n \"jan\", \"feb\", \"mar\", \"apr\", \"may\", \"jun\", \"jul\", \"aug\", \"sep\", \"oct\", \"nov\", \"dec\",\n \"amp\", \"rt\", \"https\"]\n filtered_tweets = [ [ word for word in text if word not in stopwords ] for text in lowered_tweets ]\n\n # isolate bigrams\n bigrams = mark_bigrams(filtered_tweets)\n\n # reduce to one list of words\n flat_text_bigrams = [ word for tweet in bigrams for word in tweet ]\n flat_text = [ word for tweet in filtered_tweets for word in tweet ]\n\n # get frequency dictionary\n frequ = collections.Counter(flat_text_bigrams).most_common()\n\n # return format\n # * name company\n # * number tweets\n # * nb retweet\n # * language chosen\n # * nb tweet in chosen language\n # * nb words\n # * nb unique words\n data = (company, len(tweets), len(rt_tweets), lang, len(lang_tweets), len(flat_text_bigrams), len(frequ), filtered_tweets)\n\n return data", "def read_ner_tags_file(path):\n ENTITY_BEGIN = 'B-ENTITY'\n ENTITY_INTERMEDIATE = 'I-ENTITY'\n all_entities = []\n\n with open(path, 'r') as f:\n for tweet in f:\n words = [x[:x.rfind('/')] for x in tweet.split()]\n tags = [x[x.rfind('/')+1:] for x in tweet.split()]\n curr_entity = None\n ents = []\n for i in xrange(len(words)):\n if tags[i] == ENTITY_BEGIN:\n if curr_entity:\n ents.append(curr_entity)\n curr_entity = words[i]\n if (i+1 == len(words)) or tags[i+1] != ENTITY_INTERMEDIATE:\n ents.append(curr_entity)\n curr_entity = None\n elif tags[i] == ENTITY_INTERMEDIATE:\n curr_entity += (' ' + words[i])\n if (i+1 == len(words)) or tags[i+1] != ENTITY_INTERMEDIATE:\n ents.append(curr_entity)\n curr_entity = None\n all_entities.append(ents)\n\n return all_entities", "def transfer_document_entity_mentions(self, verbose=0):\n #\n self.mask_outer_entity_mentions()\n lineno = 0 # start from line 0\n for em in self.emlist:\n if not em.visible: continue # nested or duplicated\n spos, epos = -1, -1\n while lineno < len(self.sntlist):\n offsets = self.sntlist[lineno].offsets\n # entity mention is in the sentence\n if len(offsets) == 0: # empty sentence\n i = 0 # null statement\n elif em.hsno >= offsets[0][0] and em.heno <= offsets[-1][1]:\n for i, offset in enumerate(offsets):\n # some erroneous annotation which omits one preceeding char\n if spos < 0 and (offset[0] == em.hsno or (offset[0] == em.hsno-1 and offset[1]-offset[0] > 1)):\n spos = i\n if spos >=0 and epos < 0 and offset[1] >= em.heno: epos = i\n if spos >= 0 and epos >= 0: break\n break\n # entity mention across multiple sentences\n elif em.hsno < offsets[-1][1] and em.heno > offsets[-1][1]:\n if verbose:\n print('\\nEntAcrossMultiSents: {} {}'.format(self.id, em))\n break\n lineno += 1\n # valid entity mention\n if spos >= 0 and epos >= 0:\n nem = em.__copy__(lineno=lineno, hsno=spos, heno=epos+1)\n self.sntlist[lineno].append_entity_mention(nem)\n # debug purpose\n if verbose: # the numbers of entity mentions in doc and snt are different\n dset = set(em.id for em in self.emlist if em.visible)\n sset = set(em.id for snt in self.sntlist for em in snt.emlist)\n dno, sno = len(dset), len(sset)\n if verbose >= 1 and dno > sno:\n print('\\nEntNumDocSnts: {} {} {}'.format(self.id, dno, sno))\n if verbose >= 2:\n #for em in self.emlist: print(em)\n eids = dset.difference(sset)\n print('Diff:')\n for eid in eids: print(self.get_entity_mention(eid))\n return", "def preprocess_twitter(post):\n # TODO\n return post", "def tweet2rest(tweets_json):\n for tweet in tweets_json:\n if not tweet['retweeted'] and tweet['in_reply_to_status_id_str'] == None and tweet['text'][0] != '@' and not 'Instagram' in tweet['source']:\n text = tweet['text']\n summary = text.split(\"\\n\")[0]\n data = \"####################\\n\"\n data += tweet['id_str'] + \"\\n\"\n data += \"####################\\n\"\n data += \"\\n\"\n date = datetime.strptime(tweet['created_at'], \"%a %b %d %H:%M:%S %z %Y\")\n data += \":date: \" + date.astimezone(tz.gettz('Europe/Paris')).strftime(\"%Y-%m-%d %H:%M:%S\") + \"\\n\"\n if \"entities\" in tweet.keys():\n if \"hashtags\" in tweet['entities'].keys():\n if len(tweet['entities']['hashtags']) > 0:\n data += \":tags: \"\n for tag in tweet['entities']['hashtags']:\n data += \"#\" + tag['text'] + \", \"\n data = data[:-2] # Remove last comma-space\n data += \"\\n\"\n if \"media\" in tweet['entities'].keys() and tweet['entities']['media'][0]['type'] == \"photo\":\n data += \":image: {photo}../images/tweets/\" + tweet['entities']['media'][0]['id_str'] + \".jpg\\n\"\n data += \":og_image: /images/tweets/\" + tweet['entities']['media'][0]['id_str'] + \".jpg\\n\"\n img = urllib.request.urlopen(tweet['entities']['media'][0]['media_url']).read()\n try:\n stat(\"./content/images\")\n except:\n mkdir(\"./content/images\")\n try:\n stat(\"./content/images/tweets\")\n except:\n mkdir(\"./content/images/tweets\")\n Image.open(io.BytesIO(img)).save(\"./content/images/tweets/\" + tweet['entities']['media'][0]['id_str'] + \".jpg\", quality=95, optimize=True)\n logging.debug(\"Image \" + tweet['entities']['media'][0]['id_str'] + \".jpg saved\")\n for img in tweet['entities']['media']:\n summary = summary.replace(img['url'], '')\n text = text.replace(img['url'], '')\n #TODO : Add gallery support for multiple photos in a tweet.\n for url in URLExtract().find_urls(text):\n text = text.replace(url, \"`\"+url+\" <\"+url+\">`_\")\n text_2 = list()\n for word in text.split():\n if word[0] == \"@\" or word[0:2] == \".@\" :\n if word[-1].isalnum(): # Take care of non alphanum at the end, like comma or point.\n word = word.replace(word, '`' + word + ' <https://twitter.com/' + word[1:] + '>`_')\n else:\n word = word.replace(word, '`' + word[:-1] + ' <https://twitter.com/' + word[1:-1] + '>`_' + word[-1])\n if word[0] == \"#\":\n if word[-1].isalnum():\n word = word.replace(word, '`' + word + ' <https://twitter.com/hashtag/' + word[1:] + '>`_')\n else:\n word = word.replace(word, '`' + word[:-1] + ' <https://twitter.com/hashtag/' + word[1:-1] + '>`_' + word[-1])\n text_2.append(word)\n text = ' '.join(text_2)\n data += \":summary: \" + summary + \"\\n\"\n data += \"\\n\"\n data += text\n try:\n stat(\"./content/SocialNetworks\")\n except:\n mkdir(\"./content/SocialNetworks\")\n f = open(\"./content/SocialNetworks/tweet_\" + tweet['id_str'] + \".rst\", \"w\", encoding=\"UTF-8\")\n f.write(data)\n logging.debug(\"Tweet number \" + tweet['id_str'] + \" saved !\")\n f.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simple signal handler to say good bye to the user
def signal_handler(sig, frame): print('\nBye! See you soon...') sys.exit(0)
[ "def signal_handler(self, signum, frame):\n exit(0)", "def signal_handler(signum, frame):\n\n raise ProgramKilledError", "def quit_signal(self):\n\t\tprint 'Emitted a quit signal'", "def signal_handler(sig_num, frame):\n\n global exit_flag\n if sig_num == signal.SIGINT:\n logger.warning(\n \" SIGINT recieved from the os: program terminated w/ ctr-c\"\n )\n exit_flag = True\n elif sig_num == signal.SIGTERM:\n logger.warning(\" SIGTERM recieved from the os: program terminated\")\n exit_flag = True", "def sigterm_handler(_signo, _stack_frame):\n do_exit()", "def _sigint_handler(self, signum, frame):\n self._sigint_caught = True\n logger.debug(\" >> SIGINT caught.\")", "def __signal_handler(signal, frame):\n global INTERRUPTED\n INTERRUPTED = True", "def signal_handler(signum, frame):\n print(glog.red(f\"Caught signal {signal.Signals(signum).name}. Exiting...\"))\n close_section_logs()\n sys.exit(0)", "def sigint_caught(self):\n self._sigint_response = self._sigint_caught\n self._sigint_caught = False\n return self._sigint_response", "def test_ignore_return(self):\n from signal import signal, SIGUSR1, SIG_DFL, SIG_IGN\n\n try:\n for handler in SIG_DFL, SIG_IGN, lambda *a: None:\n signal(SIGUSR1, SIG_IGN)\n assert signal(SIGUSR1, handler) == SIG_IGN\n finally:\n signal(SIGUSR1, SIG_DFL)", "def handle_sigint(num, frame):\n print()\n if num == signal.Signals.SIGINT:\n print(f'\\nReceived a SIGINT\\n')", "def sigterm(self, num, frame):\n self.quit()", "def set_signal_handler():\n def signal_handler(signalnum: int, frame: TypeVar(\"Frame\")):\n \"\"\"Handle Ctrl-C signals(KeyboardInterupt) gracefully.\n\n Parameters\n ----------\n signalnum: int\n signam identifier\n frame: Frame\n current stack frame\n \"\"\"\n print(\"\\nAborting by user request...\")\n sys.exit()\n\n signal.signal(signal.SIGINT, signal_handler)", "def test01_abortive_signals(self):\n\n if ispypy:\n py.test.skip('signals not yet implemented')\n\n import cppyy\n import cppyy.ll\n\n f = cppyy.gbl.fragile\n\n assert issubclass(cppyy.ll.BusError, cppyy.ll.FatalError)\n assert issubclass(cppyy.ll.SegmentationViolation, cppyy.ll.FatalError)\n assert issubclass(cppyy.ll.IllegalInstruction, cppyy.ll.FatalError)\n assert issubclass(cppyy.ll.AbortSignal, cppyy.ll.FatalError)\n\n import os\n os.putenv('CPPYY_CRASH_QUIET', '1')\n\n with raises((cppyy.ll.SegmentationViolation, cppyy.ll.IllegalInstruction)):\n with cppyy.ll.signals_as_exception():\n f.segfault()\n\n with raises(cppyy.ll.AbortSignal):\n with cppyy.ll.signals_as_exception():\n f.sigabort()\n\n # can only recover once from each error on Windows, which is functionally\n # enough, but precludes further testing here\n if not IS_WINDOWS:\n cppyy.ll.set_signals_as_exception(True)\n with raises((cppyy.ll.SegmentationViolation, cppyy.ll.IllegalInstruction)):\n f.segfault()\n with raises(cppyy.ll.AbortSignal):\n f.sigabort()\n cppyy.ll.set_signals_as_exception(False)\n\n f.segfault.__sig2exc__ = True\n with raises((cppyy.ll.SegmentationViolation, cppyy.ll.IllegalInstruction)):\n f.segfault()\n\n f.sigabort.__sig2exc__ = True\n with raises(cppyy.ll.AbortSignal):\n f.sigabort()", "def default_int_handler(sig, frame):\r\n raise KeyboardInterrupt", "def test_default_return(self):\n from signal import signal, SIGUSR1, SIG_DFL, SIG_IGN\n\n try:\n for handler in SIG_DFL, SIG_IGN, lambda *a: None:\n signal(SIGUSR1, SIG_DFL)\n assert signal(SIGUSR1, handler) == SIG_DFL\n finally:\n signal(SIGUSR1, SIG_DFL)", "def test_getsignal(self):\n from signal import getsignal, signal, SIGUSR1, SIG_DFL, SIG_IGN\n\n def handler(*a):\n pass\n\n try:\n assert getsignal(SIGUSR1) == SIG_DFL\n signal(SIGUSR1, SIG_DFL)\n assert getsignal(SIGUSR1) == SIG_DFL\n signal(SIGUSR1, SIG_IGN)\n assert getsignal(SIGUSR1) == SIG_IGN\n signal(SIGUSR1, handler)\n assert getsignal(SIGUSR1) is handler\n finally:\n signal(SIGUSR1, SIG_DFL)\n\n raises(ValueError, getsignal, 4444)\n raises(ValueError, signal, 4444, lambda *args: None)", "def alarmHandler(signum, frame):\n raise Alarm", "def signalHandler(signal, frame):\r\n global Processes\r\n print('Stopping Lagramge!')\r\n for proc in Processes:\r\n proc.send_signal(signal)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Output polarity scores for a text using Vader approach.
def polarity(text): vader_analyzer = SentimentIntensityAnalyzer() return (vader_analyzer.polarity_scores(text))
[ "def print_polarity_from_input(quest, text):\n if quest == 'naive':\n blob = Naive_Analysis(text).sentiment\n return blob\n #this will be: Sentiment(classification='pos', p_pos=0.5702702702702702, p_neg=0.4297297297297299)\n else:\n blob = TextBlob(text).sentiment\n return blob.polarity", "def nltk_sentiment_analyzer(summary):\n score = SentimentIntensityAnalyzer().polarity_scores(summary)\n print(score)", "def sentiment_analysis(text):\n\n # pass text into sentiment url\n if True:\n ret = get_sentiment_from_url(text, sentimentURL)\n if ret is None:\n sentiment_url = None\n else:\n sentiment_url, neg_url, pos_url, neu_url = ret\n else:\n sentiment_url = None\n\n # pass text into TextBlob\n text_tb = TextBlob(text)\n\n # pass text into VADER Sentiment\n analyzer = SentimentIntensityAnalyzer()\n text_vs = analyzer.polarity_scores(text)\n\n # determine sentiment from our sources\n if sentiment_url is None:\n #threshold values\n if text_tb.sentiment.polarity < 0 and text_vs['compound'] <= -0.05:\n sentiment = \"negative\"\n elif text_tb.sentiment.polarity > 0 and text_vs['compound'] >= 0.05:\n sentiment = \"positive\"\n else:\n sentiment = \"neutral\"\n else:\n # this works if the above function executes properly\n if text_tb.sentiment.polarity < 0 and text_vs['compound'] <= -0.05 and sentiment_url == \"negative\":\n sentiment = \"negative\"\n elif text_tb.sentiment.polarity > 0 and text_vs['compound'] >= 0.05 and sentiment_url == \"positive\":\n sentiment = \"positive\"\n else:\n sentiment = \"neutral\"\n\n polarity = (text_tb.sentiment.polarity + text_vs['compound']) / 2\n\n # output sentiment polarity\n print(\"************\")\n print(\"Sentiment Polarity: \" + str(round(polarity, 3)))\n\n # output sentiment subjectivity (TextBlob)\n print(\"Sentiment Subjectivity: \" + str(round(text_tb.sentiment.subjectivity, 3)))\n\n # output sentiment\n print(\"Sentiment (url): \" + str(sentiment_url))\n print(\"Sentiment (algorithm): \" + str(sentiment))\n print(\"Overall sentiment (textblob): \", text_tb.sentiment)\n print(\"Overall sentiment (vader): \", text_vs)\n print(\"sentence was rated as \", round(text_vs['neg']*100, 3), \"% Negative\")\n print(\"sentence was rated as \", round(text_vs['neu']*100, 3), \"% Neutral\")\n print(\"sentence was rated as \", round(text_vs['pos']*100, 3), \"% Positive\")\n print(\"************\")\n\n return polarity, text_tb.sentiment.subjectivity, sentiment", "def get_polarity_score(self, doc):\n\n if self.algorithm == \"nltk_vader\":\n return self._sia.polarity_scores(doc)[\"compound\"]\n elif self.algorithm == \"ML-Senticon\":\n return self.spa_polarity_score(doc)", "def get_vader_scores(review):\n scores = []\n for sentence in review:\n scores.append(ANALYZER.polarity_scores(sentence)['compound'])\n return scores", "def calculate_polarity(self, text_series, batch_size):\n\n nlp = spacy.load(\"en_core_web_sm\")\n\n spacy_text_blob = SpacyTextBlob()\n\n nlp.add_pipe(spacy_text_blob)\n\n polarity = [sentence._.sentiment.polarity for sentence in tqdm(nlp.pipe(text_series, batch_size=batch_size), total=len(text_series))] # List comprehension for calculating polarity for each sentence\n\n return polarity", "def sentiment_analysis(text):\n return SentimentIntensityAnalyzer().polarity_scores(skip_gutenberg_header_and_tail(text))", "def set_waveform_polarity(self, invert=False):\n _polarity = 'NORM' if not invert else 'INV'\n self.com.send(f'OUTP:POL {_polarity}')\n return", "def extract_sentiment(text):\n text = TextBlob(text)\n return text.sentiment.polarity", "def spa_polarity_score(self, doc):\n mlsscore = 0\n for word in doc.split():\n lem_word = self.lemmatize_spa(word)\n if word in self.mlsent.keys():\n mlsscore = mlsscore + self.mlsent[word]\n elif lem_word in self.mlsent.keys():\n mlsscore = mlsscore + self.mlsent[lem_word]\n if mlsscore > self.max_score:\n self.max_score = mlsscore\n if mlsscore < self.min_score:\n self.min_score = mlsscore\n return mlsscore", "async def on_linear_actuator_motor_motor_polarity(\n self, state: int\n ) -> None:\n self.print('{} Motor: polarity {}'.format(\n self.axis, 'flipped' if state == -1 else 'normal'\n ))", "def scorepersentence(reviews):\n vs = []\n for sentence in reviews:\n vs.append(analyzer.polarity_scores(sentence))\n return vs", "def main():\n\n # command line parsing\n parser = buildParser()\n args = parser.parse_args()\n\n\n # construct the tweet pro-processing object\n tweetTokenizer = TweetTokenizer()\n lPunct = list(string.punctuation)\n lStopwords = stopwords.words('english') + lPunct + ['rt', 'via', '...', '…', '\"', \"'\", '`']\n\n tweetProcessor = TwitterProcessing(tweetTokenizer, lStopwords)\n\n\n # load set of positive words\n lPosWords = []\n with open(args.posWordFile, 'r', encoding='utf-8', errors='ignore') as fPos:\n for sLine in fPos:\n lPosWords.append(sLine.strip())\n\n setPosWords = set(lPosWords)\n\n\n # load set of negative words\n lNegWords = []\n with codecs.open(args.negWordFile, 'r', encoding='utf-8', errors='ignore') as fNeg:\n for sLine in fNeg:\n lNegWords.append(sLine.strip())\n\n setNegWords = set(lNegWords)\n\n # compute the sentiment\n lSentiment = []\n if args.approach == 'count':\n lSentiment = countWordSentimentAnalysis(setPosWords, setNegWords, args.tweetsFile, args.print, tweetProcessor)\n elif args.approach == 'vader':\n lSentiment = vaderSentimentAnalysis(args.tweetsFile, args.print, tweetProcessor)\n\n\n # determine if we should output a time series of sentiment scores across time\n if args.ts:\n # TODO: write code to display the time series\n # we are using pandas for this, but first we need to get it into a pandas data frame structure\n series = pd.DataFrame(lSentiment, columns=['date', 'sentiment'])\n # tell pandas that the date column is the one we use for indexing (or x-axis)\n series.set_index('date', inplace=True)\n # pandas makes a guess at the type of the columns, but to make sure it doesn't get it wrong, we set the sentiment\n # column to floats\n series[['sentiment']] = series[['sentiment']].apply(pd.to_numeric)\n\n # This step is not necessary, but pandas has a neat function that allows us to group the series at different\n # resultion. The 'how=' part tells it how to group the instances. In this example, it sames we want to group\n # by day, and add up all the sentiment scores for the same day and create a new time series called 'newSeries'\n # with this day resolution\n # TODO: play with this for different resolution, '1H' is by hour, '1M' is by minute etc\n sentimentSeries = series.resample('1H').sum()\n tweetCountSeries = series.resample('1H').count()\n \n # this plots and shows the time series\n plt.figure(figsize=(6,3), dpi = 100)\n plt.plot(sentimentSeries)\n plt.plot(tweetCountSeries)\n plt.legend(['Sentiment', 'Tweet Count'], loc='upper left')\n plt.savefig('fig6.png')\n plt.show()\n plt.close()", "def polarity(self) -> Polarity:\n try:\n cid = self.data[0][0][\"identification\"]\n except IndexError:\n return Polarity(\"positive\")\n return Polarity(cid.mz_references[0].detected_polarity)", "def addPolarity(coll):\n for doc in coll.find():\n polarity, category = tweetPolarityOneHot(doc['full_text'])\n\n coll.update_one({\"_id\": doc[\"_id\"]}, {\"$set\": {\n \"polarity\": polarity,\n \"positive\": category[0],\n \"neutral\": category[1],\n \"negative\": category[2]\n }})", "def add_berttone_polarity(nlp, verbose: bool = True):\n return add_danlp_model(\n nlp,\n download_name=\"bert.polarity\",\n subpath=\"bert.pol.v0.0.1\",\n doc_extention=\"berttone_pol_trf_data\",\n model_name=\"berttone_pol\",\n category=\"polarity\",\n labels=[\"positive\", \"neutral\", \"negative\"],\n verbose=verbose,\n )", "def tag_sentiment_type(polarity):\n\n if polarity < 0:\n sentiment_type = 'negative'\n\n elif polarity > 0:\n sentiment_type = 'positive'\n\n else:\n sentiment_type = 'neutral'\n\n return sentiment_type", "def __get_tweet_polarity(self, tweet):\n analysis = TextBlob(self.__normalize_tweet(tweet))\n return analysis.sentiment.polarity", "def sentiment_analysis_by_text(self,tweet):\n blob = TextBlob(tweet['text'].decode('ascii', errors=\"replace\"))\n sentiment_polarity = blob.sentiment.polarity\n if sentiment_polarity < 0:\n sentiment = self.NEGATIVE\n elif sentiment_polarity <= 0.25:\n sentiment = self.NEUTRAL\n else:\n sentiment = self.POSITIVE\n tweet['sentiments'] = sentiment" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the active policy when MSVC is not found.
def msvc_get_notfound_policy(): debug( 'policy.symbol=%s, policy.value=%s', repr(_MSVC_NOTFOUND_POLICY_DEF.symbol), repr(_MSVC_NOTFOUND_POLICY_DEF.value) ) return _MSVC_NOTFOUND_POLICY_DEF.symbol
[ "def msvc_get_scripterror_policy():\n debug(\n 'policy.symbol=%s, policy.value=%s',\n repr(_MSVC_SCRIPTERROR_POLICY_DEF.symbol), repr(_MSVC_SCRIPTERROR_POLICY_DEF.value)\n )\n return _MSVC_SCRIPTERROR_POLICY_DEF.symbol", "def is_policy(self):\n return self._policy", "def promisc_mode_policy(self):\n ret = self._get_attr(\"promiscModePolicy\")\n return NetworkAdapterPromiscModePolicy(ret)", "def isMSVC(cls, compiler, log):\n output, error, _ = cls.executeShellCommand(compiler + ' --version', checkCommand=noCheck, log=log)\n output = '\\n'.join((output, error)).casefold()\n found = all(\n sub.casefold() in output for sub in ('microsoft', 'c/c++ optimizing compiler')\n )\n if log:\n log.write('Detected MSVC\\n' if found else 'Did not detect MSVC\\n')\n return int(found)", "def get_execution_policy(self):\n return self.session.run_ps(GET_EXECUTION_POLICY).std_out", "def __get_image_policy():\n image_policy = os.environ.get(\"CONNAISSEUR_IMAGE_POLICY\")\n\n path = f\"apis/connaisseur.policy/v1/imagepolicies/{image_policy}\"\n response = k_api.request_kube_api(path)\n\n return response[\"spec\"]", "def test_get_default_policy__strict(self):\n policy = csp.get_default_policy(nonce='12345')\n self.assertCountEqual(list(csp.DEFAULT_POLICY.keys()), list(policy.keys()))\n self.assertIn('\\'strict-dynamic\\'', policy['script-src'])\n self.assertIn(\"'nonce-12345'\", policy['script-src'])", "def get_policy(self, policy):\r\n return self.manager.get_policy(self, policy)", "def get_random_gc_policy():\n policies = _get_gc_expiration_policies()\n if not policies:\n return None\n\n return random.choice(policies)", "def get_soft_limit_mode(self):\n return MOT_LimitsSoftwareApproachPolicy(self.sdk.SCC_GetSoftLimitMode(self._serial))", "def get_cli_active_cloud():\n\n try:\n from azure.cli.core.cloud import get_active_cloud\n except ImportError:\n raise ImportError(\"You need to install 'azure-cli-core' to load CLI active Cloud\")\n return get_active_cloud()", "def _QueryPolicyValue(value_name, expected_type=winreg.REG_DWORD):\n system_policy_path = (r'Software\\Microsoft\\Windows'\n r'\\CurrentVersion\\Policies\\System')\n\n try:\n hklm = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)\n policy_key = winreg.OpenKeyEx(hklm, system_policy_path)\n value, data_type = winreg.QueryValueEx(policy_key, value_name)\n return value if data_type == expected_type else None\n except FileNotFoundError:\n return None", "def test_get_default_policy__strict_two(self):\n policy = csp.get_default_policy(nonce='12345')\n self.assertCountEqual(list(csp.NONCE_ONLY_POLICY.keys()), list(policy.keys()))\n self.assertNotIn('strict-dynamic', policy['script-src'])\n self.assertIn(\"'nonce-12345'\", policy['script-src'])", "def advapi32_SaferGetPolicyInformation(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"dwScopeId\", \"SaferPolicyInfoClass\", \"InfoBufferSize\", \"InfoBuffer\", \"InfoBufferRetSize\", \"lpReserved\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def update_policy_level(self):\n return (\n max(change.update_policy.level for change in self.changes)\n if len(self.changes)\n else UpdatePolicy.SUPPORTED.level\n )", "def getPolicybyGuid(self):\n computersURL = '/policies/89912c9e-8dbd-4c2b-a1d8-dee8a0c2bb29'\n apiRequest = Wrapper_API()\n apiResponse = apiRequest.send_api_request(computersURL)\n return apiResponse", "def get_policy_type(policy):\n if not isinstance(policy, dict):\n return NetworkPolicy.PolicyType.Unknown\n\n kind = policy.get('kind')\n api_version = policy.get('apiVersion')\n if not kind or not api_version:\n return NetworkPolicy.PolicyType.Unknown\n if not isinstance(kind, str) or not isinstance(api_version, str):\n return NetworkPolicy.PolicyType.Unknown\n\n policy_type = NetworkPolicy.PolicyType.Unknown\n if kind.endswith('List'):\n policy_type = NetworkPolicy.PolicyType.List\n elif 'calico' in api_version:\n if kind == 'Profile':\n policy_type = NetworkPolicy.PolicyType.CalicoProfile\n elif kind == 'NetworkPolicy':\n policy_type = NetworkPolicy.PolicyType.CalicoNetworkPolicy\n elif kind == 'GlobalNetworkPolicy':\n policy_type = NetworkPolicy.PolicyType.CalicoGlobalNetworkPolicy\n elif 'istio' in api_version:\n if kind == 'AuthorizationPolicy':\n policy_type = NetworkPolicy.PolicyType.IstioAuthorizationPolicy\n elif kind == 'NetworkPolicy':\n policy_type = NetworkPolicy.PolicyType.K8sNetworkPolicy\n\n return policy_type", "def get_optimal_policy(self):\n # first compute the solution to the CARE: P_optimal\n self.compute_optimal_cost_matrix()\n # policy K = - gamma * R^-1 * B' * P\n # action can be computed as K * x\n return - self.gamma * np.linalg.pinv(self.R).dot(self.B.T).dot(self.P_optimal)", "def compliance_control(self):\n return self._compliance_control" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the default policy when msvc batch file execution errors are detected.
def msvc_set_scripterror_policy(MSVC_SCRIPTERROR_POLICY=None): global _MSVC_SCRIPTERROR_POLICY_DEF prev_policy = _MSVC_SCRIPTERROR_POLICY_DEF.symbol policy = MSVC_SCRIPTERROR_POLICY if policy is not None: _MSVC_SCRIPTERROR_POLICY_DEF = _msvc_scripterror_policy_lookup(policy) debug( 'prev_policy=%s, set_policy=%s, policy.symbol=%s, policy.value=%s', repr(prev_policy), repr(policy), repr(_MSVC_SCRIPTERROR_POLICY_DEF.symbol), repr(_MSVC_SCRIPTERROR_POLICY_DEF.value) ) return prev_policy
[ "def set_execution_policy_to_restrict(self):\n code_status = self.session.run_ps('%s restricted' % SET_EXECUTION_POLICY).status_code\n return SUCCESSFUL if code_status == 0 else ERROR", "def set_policy(self, policy):\n self._policy = 'custom'\n self._P = policy", "def _set_transaction_safety_enforcement_policy(policy):\n assert policy in ('ts-enforce-none', 'ts-enforce-all',\n 'ts-enforce-all-except-user-lock'), policy\n _REQUEST_STATE.ts_enforcement_policy = policy", "def msvc_get_scripterror_policy():\n debug(\n 'policy.symbol=%s, policy.value=%s',\n repr(_MSVC_SCRIPTERROR_POLICY_DEF.symbol), repr(_MSVC_SCRIPTERROR_POLICY_DEF.value)\n )\n return _MSVC_SCRIPTERROR_POLICY_DEF.symbol", "def set_termination_message_policy(\n self, termination_message_policy) -> 'Container':\n if termination_message_policy not in ['File', 'FallbackToLogsOnError']:\n raise ValueError(\n 'terminationMessagePolicy must be `File` or `FallbackToLogsOnError`'\n )\n self.termination_message_policy = termination_message_policy\n return self", "def set_base_policy(self, policy):\n if policy in self.matched_policies:\n self.policy = policy\n else:\n raise ValueError(\"Policy is not valid for this candidate policy\")", "def advapi32_AuditSetSystemPolicy(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"pAuditPolicy\", \"PolicyCount\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def set_execution_policy_to_unrestricted(self):\n code_status = self.session.run_ps('%s Unrestricted' % SET_EXECUTION_POLICY).status_code\n return SUCCESSFUL if code_status == 0 else ERROR", "def __check_security_policy(self):\n\n cmd = \"setenforce 0; \"\n\n cmd = cmd + \"supolicy --live \\\"allow init logd dir getattr\\\";\"\n\n # # Depreciated supolicies. Still keep them for backup purpose\n cmd = cmd + \"supolicy --live \\\"allow init init process execmem\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow atfwd diag_device chr_file {read write open ioctl}\\\";\"\n cmd = cmd + \"supolicy --live \\\"allow init properties_device file execute\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow system_server diag_device chr_file {read write}\\\";\"\n\n # # Suspicious supolicies: MI works without them, but it seems that they SHOULD be enabled...\n\n # # mi2log permission denied (logcat | grep denied), but no impact on log collection/analysis\n cmd = cmd + \\\n \"supolicy --live \\\"allow untrusted_app app_data_file file {rename}\\\";\"\n\n # # Suspicious: why still works after disabling this command? Won't FIFO fail?\n cmd = cmd + \\\n \"supolicy --live \\\"allow init app_data_file fifo_file {write open getattr}\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow init diag_device chr_file {getattr write ioctl}\\\"; \"\n\n # Nexus 6 only\n cmd = cmd + \\\n \"supolicy --live \\\"allow untrusted_app diag_device chr_file {write open getattr}\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow system_server diag_device chr_file {read write}\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow netmgrd diag_device chr_file {read write}\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow rild diag_device chr_file {read write}\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow rild debuggerd app_data_file {read open getattr}\\\";\"\n\n cmd = cmd + \\\n \"supolicy --live \\\"allow wcnss_service mnt_user_file dir {search}\\\";\"\n\n cmd = cmd + \\\n \"supolicy --live \\\"allow wcnss_service fuse dir {read open search}\\\";\"\n\n cmd = cmd + \\\n \"supolicy --live \\\"allow wcnss_service mnt_user_file lnk_file {read}\\\";\"\n\n cmd = cmd + \\\n \"supolicy --live \\\"allow wcnss_service fuse file {read append getattr}\\\";\"\n\n main_utils.run_shell_cmd(cmd)", "def command_file_retention_policy(self):\n policy = self.get('agent', self.COMMAND_FILE_RETENTION_POLICY_PROPERTY, default=self.COMMAND_FILE_RETENTION_POLICY_KEEP)\n policies = [self.COMMAND_FILE_RETENTION_POLICY_KEEP,\n self.COMMAND_FILE_RETENTION_POLICY_REMOVE,\n self.COMMAND_FILE_RETENTION_POLICY_REMOVE_ON_SUCCESS]\n\n if policy.lower() in policies:\n return policy.lower()\n else:\n logger.warning('The configured command_file_retention_policy is invalid, returning \"%s\" instead: %s',\n self.COMMAND_FILE_RETENTION_POLICY_KEEP,\n policy)\n return self.COMMAND_FILE_RETENTION_POLICY_KEEP", "def advapi32_AuditSetPerUserPolicy(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"pSid\", \"pAuditPolicy\", \"PolicyCount\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def test_get_default_policy__strict(self):\n policy = csp.get_default_policy(nonce='12345')\n self.assertCountEqual(list(csp.DEFAULT_POLICY.keys()), list(policy.keys()))\n self.assertIn('\\'strict-dynamic\\'', policy['script-src'])\n self.assertIn(\"'nonce-12345'\", policy['script-src'])", "def reset_policy(self, policy=None):\n if policy is None:\n self._reset_read_policy()\n self._reset_write_policy()\n elif policy == 'read':\n self._reset_read_policy()\n elif policy == 'write':\n self._reset_write_policy()\n else:\n _LOGGER.error(\"Invalid policy name, do not reset.\")", "def set_limits_software_approach_policy(self, policy):\n policy_ = self.convert_to_enum(policy, MOT_LimitsSoftwareApproachPolicy)\n self.sdk.SCC_SetLimitsSoftwareApproachPolicy(self._serial, policy_)", "def policy(self, input_policy):\n self._policy = input_policy", "def advapi32_LsaSetInformationPolicy(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"PolicyHandle\", \"InformationClass\", \"Buffer\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def test_get_default_policy__strict_two(self):\n policy = csp.get_default_policy(nonce='12345')\n self.assertCountEqual(list(csp.NONCE_ONLY_POLICY.keys()), list(policy.keys()))\n self.assertNotIn('strict-dynamic', policy['script-src'])\n self.assertIn(\"'nonce-12345'\", policy['script-src'])", "def create_fw_policy(self,name):", "def setenforce(mode):\n mode = mode.strip().title()\n assert mode in [\"Permissive\", \"Enforcing\"]\n assert Test.Run.command(\"/usr/sbin/setenforce %s\" % mode)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the active policy when msvc batch file execution errors are detected.
def msvc_get_scripterror_policy(): debug( 'policy.symbol=%s, policy.value=%s', repr(_MSVC_SCRIPTERROR_POLICY_DEF.symbol), repr(_MSVC_SCRIPTERROR_POLICY_DEF.value) ) return _MSVC_SCRIPTERROR_POLICY_DEF.symbol
[ "def msvc_set_scripterror_policy(MSVC_SCRIPTERROR_POLICY=None):\n global _MSVC_SCRIPTERROR_POLICY_DEF\n\n prev_policy = _MSVC_SCRIPTERROR_POLICY_DEF.symbol\n\n policy = MSVC_SCRIPTERROR_POLICY\n if policy is not None:\n _MSVC_SCRIPTERROR_POLICY_DEF = _msvc_scripterror_policy_lookup(policy)\n\n debug(\n 'prev_policy=%s, set_policy=%s, policy.symbol=%s, policy.value=%s',\n repr(prev_policy), repr(policy),\n repr(_MSVC_SCRIPTERROR_POLICY_DEF.symbol), repr(_MSVC_SCRIPTERROR_POLICY_DEF.value)\n )\n\n return prev_policy", "def set_execution_policy_to_restrict(self):\n code_status = self.session.run_ps('%s restricted' % SET_EXECUTION_POLICY).status_code\n return SUCCESSFUL if code_status == 0 else ERROR", "def get_execution_policy(self):\n return self.session.run_ps(GET_EXECUTION_POLICY).std_out", "def msvc_get_notfound_policy():\n debug(\n 'policy.symbol=%s, policy.value=%s',\n repr(_MSVC_NOTFOUND_POLICY_DEF.symbol), repr(_MSVC_NOTFOUND_POLICY_DEF.value)\n )\n return _MSVC_NOTFOUND_POLICY_DEF.symbol", "def command_file_retention_policy(self):\n policy = self.get('agent', self.COMMAND_FILE_RETENTION_POLICY_PROPERTY, default=self.COMMAND_FILE_RETENTION_POLICY_KEEP)\n policies = [self.COMMAND_FILE_RETENTION_POLICY_KEEP,\n self.COMMAND_FILE_RETENTION_POLICY_REMOVE,\n self.COMMAND_FILE_RETENTION_POLICY_REMOVE_ON_SUCCESS]\n\n if policy.lower() in policies:\n return policy.lower()\n else:\n logger.warning('The configured command_file_retention_policy is invalid, returning \"%s\" instead: %s',\n self.COMMAND_FILE_RETENTION_POLICY_KEEP,\n policy)\n return self.COMMAND_FILE_RETENTION_POLICY_KEEP", "def __check_security_policy(self):\n\n cmd = \"setenforce 0; \"\n\n cmd = cmd + \"supolicy --live \\\"allow init logd dir getattr\\\";\"\n\n # # Depreciated supolicies. Still keep them for backup purpose\n cmd = cmd + \"supolicy --live \\\"allow init init process execmem\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow atfwd diag_device chr_file {read write open ioctl}\\\";\"\n cmd = cmd + \"supolicy --live \\\"allow init properties_device file execute\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow system_server diag_device chr_file {read write}\\\";\"\n\n # # Suspicious supolicies: MI works without them, but it seems that they SHOULD be enabled...\n\n # # mi2log permission denied (logcat | grep denied), but no impact on log collection/analysis\n cmd = cmd + \\\n \"supolicy --live \\\"allow untrusted_app app_data_file file {rename}\\\";\"\n\n # # Suspicious: why still works after disabling this command? Won't FIFO fail?\n cmd = cmd + \\\n \"supolicy --live \\\"allow init app_data_file fifo_file {write open getattr}\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow init diag_device chr_file {getattr write ioctl}\\\"; \"\n\n # Nexus 6 only\n cmd = cmd + \\\n \"supolicy --live \\\"allow untrusted_app diag_device chr_file {write open getattr}\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow system_server diag_device chr_file {read write}\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow netmgrd diag_device chr_file {read write}\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow rild diag_device chr_file {read write}\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow rild debuggerd app_data_file {read open getattr}\\\";\"\n\n cmd = cmd + \\\n \"supolicy --live \\\"allow wcnss_service mnt_user_file dir {search}\\\";\"\n\n cmd = cmd + \\\n \"supolicy --live \\\"allow wcnss_service fuse dir {read open search}\\\";\"\n\n cmd = cmd + \\\n \"supolicy --live \\\"allow wcnss_service mnt_user_file lnk_file {read}\\\";\"\n\n cmd = cmd + \\\n \"supolicy --live \\\"allow wcnss_service fuse file {read append getattr}\\\";\"\n\n main_utils.run_shell_cmd(cmd)", "def update_policy_level(self):\n return (\n max(change.update_policy.level for change in self.changes)\n if len(self.changes)\n else UpdatePolicy.SUPPORTED.level\n )", "def is_policy(self):\n return self._policy", "def advapi32_AuditComputeEffectivePolicyByToken(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hTokenHandle\", \"pSubCategoryGuids\", \"PolicyCount\", \"ppAuditPolicy\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def PolicyEnforcement(self) -> PolicyEnforcement:", "def try_launch_auth (file, policy):\n global gl_auth_proc # We need to assign to this global.\n gl_auth_proc = (None, file, policy)\n success = False\n retries = 0\n while not success and retries <= AUTH_LAUNCH_RETRIES: # We might need to retry this several times.\n try:\n gl_auth_proc = (Popen([file, policy, str(GL_BATCH_SIZE)], stdin=PIPE, stdout=PIPE), file, policy)\n time.sleep(2 * (retries + 1)) # Wait, it might exit immediately if parameters are incorrect.\n success = gl_auth_proc[0].poll() == None\n if success:\n # Wait for state to come back from launched authority.\n state = gl_auth_proc[0].stdout.readline().decode().strip().lower()\n if state != \"ready\":\n success = false\n except:\n print(f'Authority launch failed, retrying (attempt {retries + 1} of {AUTH_LAUNCH_RETRIES})...', file=sys.stderr)\n retries += 1\n return success", "def policy_statements(self) -> typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]]:\n return self._values.get('policy_statements')", "def advapi32_AuditSetSystemPolicy(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"pAuditPolicy\", \"PolicyCount\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def get_mlapp_policy(self):\n self._verify_mlops_is_ready()\n return self.mlapp_policy", "def get_next_behavior(self):\n self.policy.init(1, 1)\n return self.policy", "def advapi32_AuditComputeEffectivePolicyBySid(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"pSid\", \"pSubCategoryGuids\", \"PolicyCount\", \"ppAuditPolicy\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def execute_policy(self, policy):\r\n return self.manager.execute_policy(scaling_group=self, policy=policy)", "def __policy_evaluation(self):\n error = float(\"inf\")\n count = 0\n\n num_legal_state = len(self.__state_action_space.get_legal_state_space())\n state_range = [i for i in xrange(0, num_legal_state - 1)]\n\n while error > self.__epsilon or count < 5:\n pre_val_func_vector = deepcopy(self.__val_func_vector)\n\n trans_prob_mat, reward_vector = self.__cal_trans_prob_mat_and_reward_vector(\n self.__policy\n )\n\n val_func_vector_temp = reward_vector + self.__alpha * np.matmul(\n trans_prob_mat,\n self.__val_func_vector\n )\n\n self.__val_func_vector[state_range, :] = val_func_vector_temp[state_range, :]\n\n error = np.linalg.norm(\n pre_val_func_vector -\n self.__val_func_vector\n ) / 24\n if self.optimal_value is not None:\n error2 = np.linalg.norm(\n self.optimal_value -\n self.__val_func_vector\n ) / 24\n self.__error2.append(error2)\n\n if error < self.__epsilon:\n count += 1\n else:\n count = 0\n self.__error.append(error)", "def isMSVC(cls, compiler, log):\n output, error, _ = cls.executeShellCommand(compiler + ' --version', checkCommand=noCheck, log=log)\n output = '\\n'.join((output, error)).casefold()\n found = all(\n sub.casefold() in output for sub in ('microsoft', 'c/c++ optimizing compiler')\n )\n if log:\n log.write('Detected MSVC\\n' if found else 'Did not detect MSVC\\n')\n return int(found)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Detects sentiment in the text.
def detect_sentiment(text): document = language.types.Document( content=text, type=language.enums.Document.Type.PLAIN_TEXT) sentiment = client.analyze_sentiment(document).document_sentiment return sentiment.score, sentiment.magnitude
[ "def get_sentiment(text):\n response = requests.post(settings.SENTIMENT_ANALYSIS_API, data={\n 'text': text\n })\n return response.json()", "def extract_sentiment(text):\n text = TextBlob(text)\n return text.sentiment.polarity", "def sentiment_analysis_by_text(self,tweet):\n blob = TextBlob(tweet['text'].decode('ascii', errors=\"replace\"))\n sentiment_polarity = blob.sentiment.polarity\n if sentiment_polarity < 0:\n sentiment = self.NEGATIVE\n elif sentiment_polarity <= 0.25:\n sentiment = self.NEUTRAL\n else:\n sentiment = self.POSITIVE\n tweet['sentiments'] = sentiment", "def analyze(self, text):\n # split sentences into words\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n \n score = 0\n \n for word in tokens:\n if word.lower() in self.positives:\n score += 1\n elif word.lower() in self.negatives:\n score -= 1\n \n return score", "def sentiment_analysis(text):\n return SentimentIntensityAnalyzer().polarity_scores(skip_gutenberg_header_and_tail(text))", "def entity_sentiment_text(text):\n client = language.LanguageServiceClient()\n\n if isinstance(text, six.binary_type):\n text = text.decode('utf-8')\n\n document = types.Document(\n content=text.encode('utf-8'),\n type=enums.Document.Type.PLAIN_TEXT)\n\n # Detect and send native Python encoding to receive correct word offsets.\n encoding = enums.EncodingType.UTF32\n if sys.maxunicode == 65535:\n encoding = enums.EncodingType.UTF16\n\n result = client.analyze_entity_sentiment(document, encoding)\n for entity in result.entities:\n# print('Mentions: ')\n print(u'Name: \"{}\"'.format(entity.name))\n for mention in entity.mentions:\n# print(u' Begin Offset : {}'.format(mention.text.begin_offset))\n print(u' Content : {}'.format(mention.text.content))", "def sentiment(text):\n words = pattern_split.split(text.lower())\n sentiments1 = map(lambda word: wordlist.get(word, 0), words)\n sentiments = []\n for k in sentiments1:\n\tif k != 0:\n\t\tsentiments.append(k)\n if sentiments:\n # How should you weight the individual word sentiments? \n # You could do N, sqrt(N) or 1 for example. Here I use sqrt(N)\n sentiment = float(sum(sentiments))/math.sqrt(len(sentiments))\n else:\n sentiment = 0\n print 'from function',sentiment\n return sentiment", "def sentiment(tweet, language):\r\n\tsentiment=0\r\n\ttext=Text(tweet, hint_language_code = language)\r\n\tfor w in text.words:\r\n\t\tsentiment+=w.polarity\r\n\treturn sentiment", "def sentiment_analysis(text):\n\n # pass text into sentiment url\n if True:\n ret = get_sentiment_from_url(text, sentimentURL)\n if ret is None:\n sentiment_url = None\n else:\n sentiment_url, neg_url, pos_url, neu_url = ret\n else:\n sentiment_url = None\n\n # pass text into TextBlob\n text_tb = TextBlob(text)\n\n # pass text into VADER Sentiment\n analyzer = SentimentIntensityAnalyzer()\n text_vs = analyzer.polarity_scores(text)\n\n # determine sentiment from our sources\n if sentiment_url is None:\n #threshold values\n if text_tb.sentiment.polarity < 0 and text_vs['compound'] <= -0.05:\n sentiment = \"negative\"\n elif text_tb.sentiment.polarity > 0 and text_vs['compound'] >= 0.05:\n sentiment = \"positive\"\n else:\n sentiment = \"neutral\"\n else:\n # this works if the above function executes properly\n if text_tb.sentiment.polarity < 0 and text_vs['compound'] <= -0.05 and sentiment_url == \"negative\":\n sentiment = \"negative\"\n elif text_tb.sentiment.polarity > 0 and text_vs['compound'] >= 0.05 and sentiment_url == \"positive\":\n sentiment = \"positive\"\n else:\n sentiment = \"neutral\"\n\n polarity = (text_tb.sentiment.polarity + text_vs['compound']) / 2\n\n # output sentiment polarity\n print(\"************\")\n print(\"Sentiment Polarity: \" + str(round(polarity, 3)))\n\n # output sentiment subjectivity (TextBlob)\n print(\"Sentiment Subjectivity: \" + str(round(text_tb.sentiment.subjectivity, 3)))\n\n # output sentiment\n print(\"Sentiment (url): \" + str(sentiment_url))\n print(\"Sentiment (algorithm): \" + str(sentiment))\n print(\"Overall sentiment (textblob): \", text_tb.sentiment)\n print(\"Overall sentiment (vader): \", text_vs)\n print(\"sentence was rated as \", round(text_vs['neg']*100, 3), \"% Negative\")\n print(\"sentence was rated as \", round(text_vs['neu']*100, 3), \"% Neutral\")\n print(\"sentence was rated as \", round(text_vs['pos']*100, 3), \"% Positive\")\n print(\"************\")\n\n return polarity, text_tb.sentiment.subjectivity, sentiment", "def get_sentiment(text):\n # check that text does not exceed API's character limit\n url = \"http://text-processing.com/api/sentiment/\"\n if len(text) < 80000:\n # query text-processing API for sentiment score\n payload = {'text': text}\n\n # make API call\n r = requests.post(url, data=payload)\n\n # load JSON from API call\n result = json.loads(r.text)\n\n # pull sentiment score\n sen_score = result['probability']['pos']\n\n time.sleep(random.randint(0,5))\n return sen_score", "def compute_sentiment_for_tweet(tweet):\n text = polyglot.text.Text(tweet.lower(), hint_language_code='it')\n scores = [word.polarity for word in text.words if word.polarity != 0]\n return np.mean(scores) if scores else 0.0", "def _extract_sentiment_from_text(self, corpus_list, doc_name_to_id_dict):\n vader = SentimentIntensityAnalyzer()\n '''\n Go through the documents and rate their sentiment\n '''\n doc_count=0\n sentiment_feature_dict=defaultdict(list)\n for doc_name, row_id in doc_name_to_id_dict.iteritems():\n logger.debug(\"Extracting sentiment from: \" + doc_name)\n doc=corpus_list[row_id]\n ''' \n doc is one document from our corpus\n '''\n sentences=doc.split(\".\")\n pos_count=0\n neg_count=0\n prev_word_was_positive=False\n prev_word_was_negative=False\n pos_neg_count=0\n count=0\n longest_run_of_positives=0\n longest_run_of_negatives=0\n run_of_positives_count=0\n run_of_negatives_count=0\n score=vader.polarity_scores(' '.join(sentences))\n compound_polarity=score['compound']\n '''\n Rate the overall polarity of the document (1 positive, 0 negative)\n '''\n if compound_polarity>0:\n compound_polarity=1\n else:\n compound_polarity=0\n\n '''\n Rate each word in the corpus for sentiment and construct the word-based\n features\n '''\n for sentence in sentences:\n words=sentence.split(\" \")\n for word in words:\n score=vader.polarity_scores(word)\n '''\n If the negative sentiment of a word is greater than the positive sentiment\n '''\n if score['pos']>abs(score['neg']):\n pos_count+=1\n if prev_word_was_negative:\n pos_neg_count+=1\n prev_word_was_negative=False\n if run_of_negatives_count>longest_run_of_negatives:\n longest_run_of_negatives=run_of_negatives_count\n run_of_negatives_count=0\n else:\n run_of_positives_count+=1\n prev_word_was_positive=True\n\n '''\n If the positive sentiment of a word is greater than the negative sentiment\n '''\n if score['pos']<abs(score['neg']):\n neg_count+=1\n if prev_word_was_positive:\n prev_word_was_positive=False\n pos_neg_count+=1\n if run_of_positives_count>longest_run_of_positives:\n longest_run_of_positives=run_of_positives_count\n run_of_negatives_count=0\n else:\n run_of_negatives_count+=1\n prev_word_was_negative=True\n count+=1\n\n sentiment_feature_dict[doc_name].append([pos_count,neg_count,pos_neg_count,longest_run_of_negatives,longest_run_of_positives,compound_polarity])\n \n return sentiment_feature_dict", "def detect_intent_with_sentiment_analysis(project_id, session_id, texts, language_code):\n\n session_client = dialogflow.SessionsClient()\n\n session_path = session_client.session_path(project_id, session_id)\n print(\"Session path: {}\\n\".format(session_path))\n i=0\n score=0\n \n text_input = dialogflow.TextInput(text=texts, language_code=language_code)\n\n query_input = dialogflow.QueryInput(text=text_input)\n\n # Enable sentiment analysis\n sentiment_config = dialogflow.SentimentAnalysisRequestConfig(\n analyze_query_text_sentiment=True\n )\n\n # Set the query parameters with sentiment analysis\n query_params = dialogflow.QueryParameters(\n sentiment_analysis_request_config=sentiment_config\n )\n\n response = session_client.detect_intent(\n request={\n \"session\": session_path,\n \"query_input\": query_input,\n \"query_params\": query_params,\n }\n )\n score= response.query_result.sentiment_analysis_result.query_text_sentiment.score\n texts=texts.split(' ')\n mx=-1\n word=[]\n for text in texts:\n text_input = dialogflow.TextInput(text=text, language_code=language_code)\n\n query_input = dialogflow.QueryInput(text=text_input)\n sentiment_config = dialogflow.SentimentAnalysisRequestConfig(\n analyze_query_text_sentiment=True\n )\n\n # Set the query parameters with sentiment analysis\n query_params = dialogflow.QueryParameters(\n sentiment_analysis_request_config=sentiment_config\n )\n response = session_client.detect_intent(\n request={\n \"session\": session_path,\n \"query_input\": query_input,\n \"query_params\": query_params,\n }\n )\n s=response.query_result.sentiment_analysis_result.query_text_sentiment.score\n if s > 0.1 or s < -0.1:\n word.append(response.query_result.query_text)\n words=[]\n for w in word:\n for synset in wordnet.synsets(w):\n for lemma in synset.lemmas():\n if len(lemma.name()) > 3:\n words.append(lemma.name()) #add the synonyms\n return (score,words)", "def market_sentiment(raw_data):\n # TODO\n pass", "def sentiment_analysis(self,tweet):\n tweet['emoticons'] = []\n tweet['sentiments'] = []\n self.sentiment_analysis_by_emoticons(tweet)\n if ((len(tweet['sentiments']) == 0) or (tweet['sentiments'] == self.NEUTRAL) or (tweet['sentiments'] == self.CONFUSED)):\n self.sentiment_analysis_by_text(tweet)", "def calculate_sentiment(positive_words,negative_words,tweet_text):\n\tpos = 0\n\tneg = 0\n\tfor x in tweet_text:\n\t\tif np.any(positive_words==x):\n\t\t\tpos+=1\n\t\telif np.any(negative_words==x):\n\t\t\tneg+=1\n\treturn(pos,neg)", "def analyse_text(cls, text: str) -> List[str]:\n print(\"\\nSending data to Deep AI for analysis...\\n\")\n try:\n response = requests.post(\n \"https://api.deepai.org/api/sentiment-analysis\",\n data={\n 'text': text,\n },\n headers={\n 'api-key': DEEP_API_KEY\n }\n )\n\n sentiments = response.json()['output']\n return sentiments\n except Exception:\n print(\"\\nSorry, looks like something went wrong!\")\n return []", "def compute_sentiment(self, tweet, table_name):\n vs = self.analyzer.polarity_scores(tweet['text'])\n print(vs['compound'])\n self.database_interface.run_command(\"INSERT INTO %s(timestamp,sentiment_metric) VALUES (%s,%s)\",\n (table_name, tweet['time'], vs['compound']),\n should_return=False)", "def detect(image, args):\n data = {\n 'requests':[\n {\n 'image': {'content': image},\n 'features':[\n {\n 'type': 'FACE_DETECTION',\n 'maxResults': 2,\n },\n {\n 'type': 'TEXT_DETECTION',\n 'maxResults': 1,\n },\n ]\n }\n ]\n }\n r = requests.post('https://vision.googleapis.com/v1/images:annotate?key=' + args.g,\n data=json.dumps(data))\n\n if r.status_code != 200:\n print 'error status ' + str(r.json())\n return None\n else:\n return likelySentiment(r, args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
How to do basic cleaning up of the text in each paragraph
def cleanparagraph(self, text): text = cleantext(text) text = text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ') text = ' '.join(text.split()).strip() return text
[ "def cleanup(self):\n text = ''\n try:\n paragraphs = self.text.split('\\n')\n except AttributeError:\n return text\n\n for par in paragraphs:\n par = par.strip()\n if par == '':\n continue\n\n par = RE_SPACES.sub(' ', par)\n text += par + '\\n'\n\n return text.strip()", "def tidy_article_allow_punct( text ): \n #Get rid of new lines. Need this for the <figure> removal\n text = text.replace('\\n', ' ').replace('\\r', '')\n #<figure> tag has contents. Remove all this.\n text = re.sub( '<figure(.*?)/figure>', '', text)\n #<span> contains \"facebook twitter google plus bst\"\n text = re.sub( '<span(.*?)/span>', '', text)\n #<sup> is a little supplementary notice\n text = re.sub( '<sup(.*?)/sup>', '', text)\n #\"Read more\" text\n text = re.sub( '<div class=\"rich-link__read-more(.*?)/div>', '', text)\n text = re.sub( 'Read more here:', '', text)\n #Remove other html tags but keep content\n text = re.sub( '<[^>]+>', ' ', text) \n #Remove numbers\n text = ' '.join(s for s in text.split() if not any(c.isdigit() for c in s))\n # Remove contractions\n text = re.sub('\\'s', '', text)\n text = re.sub('’s', '', text) \n #Remove extra spaces\n text = re.sub('\\s+', ' ' , text) \n return text.strip()", "def tidy_article( text ): \n #Get rid of new lines. Need this for the <figure> removal\n text = text.replace('\\n', ' ').replace('\\r', '')\n #<figure> tag has contents. Remove all this.\n text = re.sub( '<figure(.*?)/figure>', '', text)\n #<span> contains \"facebook twitter google plus bst\"\n text = re.sub( '<span(.*?)/span>', '', text)\n #<sup> is a little supplementary notice\n text = re.sub( '<sup(.*?)/sup>', '', text)\n #\"Read more\" text\n text = re.sub( '<div class=\"rich-link__read-more(.*?)/div>', '', text)\n text = re.sub( 'Read more here:', '', text)\n #Remove other html tags but keep content\n text = re.sub( '<[^>]+>', ' ', text) \n #Remove numbers\n text = ' '.join(s for s in text.split() if not any(c.isdigit() for c in s))\n # Remove contractions\n text = re.sub('\\'s', '', text)\n text = re.sub('’s', '', text)\n #text = re.sub('n\\'t', ' not', text) \n #text = re.sub('s\\'', 's', text)\n #text = re.sub('I\\'m', 'I am', text)\n ##( she'd -> she would, OR she had;\n # Remove punctuation but leave hyphenated words \n text = re.sub(' - ', ' ', text)\n text = re.sub('–', ' ' , text )\n text = re.sub('-', ' ', text)\n text = re.sub(r'[?|$|.|!|)|\\]|\\[|(|\"|“|”|’|,|:|\\']', r'', text) \n #Remove extra spaces\n text = re.sub('\\s+', ' ' , text) \n return text.strip()", "def clean_text(self, corpus):\n # TODO: Add option for Lemmetization\n punctuation = \"\"\"@.,?!:;(){}[]\"\"\"\n corpus = [z.strip().lower().replace('\\n','') for z in corpus]\n for c in punctuation:\n # inserts whitespace on both sides of a punctuation\n # so that in the next step it gets split\n corpus = [z.replace(c, ' %s '%c) for z in corpus]\n corpus = [z.split() for z in corpus]\n\n return corpus", "def preprocess(text):\n text = normalize_unicode(text)\n text = remove_newline(text)\n text = text.lower()\n text = decontracted(text)\n text = replace_negative(text)\n text = removePunctuations(text)\n text = remove_number(text)\n text = remove_space(text)\n text = removeArticlesAndPronouns(text)\n text = removeNLTKStopWords(text)\n #text = performStemming(text)\n return text", "def clean_text(text):\n\n cleaning_log = {}\n # Remove html.\n text = _apply_cleaning(text, _remove_html, cleaning_log, \"remove_html\")\n # Replace whitespaces.\n text = _apply_cleaning(text, _replace_whitespaces, cleaning_log, 'replace_whitespaces')\n # Replace multiple stopwords.\n text = _apply_cleaning(text, _replace_multiple_stop_characters, cleaning_log, 'replace_multiple_stop_characters')\n # Replace apostrophes.\n text = _apply_cleaning(text, _replace_apostrophes, cleaning_log, 'replace_apostrophes')\n # Expand contractions.\n text = _apply_cleaning(text, _expand_contractions, cleaning_log, 'expand_contractions')\n # Remove hyperlinks.\n text = _apply_cleaning(text, _remove_hyperlinks, cleaning_log, 'remove_hyperlinks')\n # Remove special characters.\n text = _apply_cleaning(text, _remove_special_characters, cleaning_log, 'remove_special_characters')\n # Remove numbers.\n text = _apply_cleaning(text, _remove_numbers, cleaning_log, 'remove_numbers')\n # Convert to lower case.\n text = _apply_cleaning(text, _convert_case, cleaning_log, 'convert_case')\n # Remove repeated characters.\n text = _apply_cleaning(text, _remove_repeated_characters, cleaning_log, 'remove_repeated_characters')\n # Manually correct words.\n text = _apply_cleaning(text, _correct_manually, cleaning_log, 'correct_manually')\n # Sentence tokenize.\n text = _apply_cleaning(text, _sentence_tokenize, cleaning_log, 'sentence_tokenize')\n # Remove sentence ending characters.\n text = _apply_cleaning(text, _remove_end_characters, cleaning_log, 'remove_end_characters')\n # POS tag.\n text = _apply_cleaning(text, _pos_tag_text, cleaning_log, 'pos_tag')\n # Lemmatize.\n text = _apply_cleaning(text, _lemmatize_text, cleaning_log, 'lemmatize')\n # Remove stopwords.\n text = _apply_cleaning(text, _remove_stopwords, cleaning_log, 'remove_stopwords')\n # Merge.\n text = _apply_cleaning(text, _merge_sentences, cleaning_log, 'merge_sentences')\n # Merge tokens.\n text = _apply_cleaning(text, _merge_tokens, cleaning_log, '_merge_tokens')\n\n # Return cleaned text and cleaning log.\n return text", "def preprocessing_text(self):\n print(' >>> Cleaning text...', end='', flush=True)\n self.text = regexp.sub(\" \", \"__\", self.text.lower(), flags=regexp.MULTILINE)\n self.text = \"_\" + regexp.sub(\"[^_a-z]\", \"\", self.text, flags=regexp.MULTILINE) + \"_\"\n print(ANSI.ok_green, 'OK !', ANSI.endc)", "def remove_punction(text_list):\n\tfor i in range(len(text_list)):\n\t\tfor punc in puctuation_removal:\n\t\t\ttext_list[i] = text_list[i].replace(punc, \"\")\n\tprint(text_list)\n\treturn text_list", "def clean_and_sentencize_entry(story_text, question):\n sentences = split_sentences(story_text)\n return [clean_text(s) for s in sentences] + [clean_text(question)]", "def extract_statements(\n text=None, \n nlp=None, \n make_sentence=False, \n n_min_word_paragraph=50, \n n_max_word_paragraph=200\n ):\n \n # remove non ASCII characters\n text = remove_non_ascii(text)\n \n \n lines = []\n prev = \"\"\n n_words = 0\n for line in text.split('\\n'):\n # aggregate consecutive lines where text may be broken down\n # only if next line starts with a space or previous does not end with punctation mark and between\n if((line.startswith(' ') or not prev.endswith(('.','?', '!'))) and n_words <= n_max_word_paragraph):\n prev = prev + ' ' + line\n n_words = len(prev.split())\n \n # min words in paragraph\n elif n_words <=n_min_word_paragraph:\n prev = prev + ' ' + line\n n_words = len(prev.split())\n \n else:\n # new paragraph\n lines.append(prev)\n prev = line\n n_words = 0\n \n # don't forget left-over paragraph\n lines.append(prev)\n # clean paragraphs from extra space, unwanted characters, urls, etc.\n # best effort clean up, consider a more versatile cleaner\n sentences = []\n for line in lines:\n \n # removing header number\n line = re.sub(r'^\\s?\\d+(.*)$', r'\\1', line)\n # removing trailing spaces\n line = line.strip()\n # words may be split between lines, ensure we link them back together\n line = re.sub('\\\\s?-\\\\s?', '-', line)\n # remove space prior to punctuation\n line = re.sub(r'\\s?([,:;\\.])', r'\\1', line)\n # ESG contains a lot of figures that are not relevant to grammatical structure\n line = re.sub(r'\\d{5,}', r' ', line)\n # remove mentions of URLs\n line = re.sub(r'((http|https)\\:\\/\\/)?[a-zA-Z0-9\\.\\/\\?\\:@\\-_=#]+\\.([a-zA-Z]){2,6}([a-zA-Z0-9\\.\\&\\/\\?\\:@\\-_=#])*', r' ', line)\n # remove multiple spaces\n line = re.sub('\\\\s+', ' ', line)\n \n # split paragraphs into well defined sentences using spacy\n if make_sentence:\n try:\n for part in list(nlp(line).sents):\n part_strip = str(part).strip()\n # remove senteces with only 30 characters\n if len(part_strip) > 30:\n sentences.append(part_strip)\n except ValueError:\n print(\"Check if nlp model was loaded\")\n else:\n sentences.append(line)\n \n return sentences", "def clean_text(text):\n text = text.lower()\n text = re.sub('\\[.*?\\]', '', text)\n text = re.sub('https?://\\S+|www\\.\\S+', '', text)\n text = re.sub('<.*?>+', '', text)\n text = re.sub('[%s]' % re.escape(string.punctuation), '', text)\n text = re.sub('\\n', '', text)\n text = re.sub('\\w*\\d\\w*', '', text)\n return text", "def para_tokenize(html):\n # Transform the document into a readability paper summary\n summary = Document(html).summary()\n\n # Parse the HTML using BeautifulSoup\n soup = bs4.BeautifulSoup(summary, 'lxml')\n\n # Extract the paragraph delimiting elements\n for tag in soup.find_all(TAGS):\n\n # Get the HTML node text\n text = tag.get_text()\n if text: yield text", "def preprocess(self):\n for key in self.markdown.keys():\n # data goes to this file \n f = open(key + \".txt\", \"wb\")\n # clean the data up before writing to file\n largeString = \"\\n\".join(self.markdown[key])\n sentences = self.get_sentences(largeString)\n for sentence in sentences:\n x = self.remove_chars(sentence) \n y = self.tokenize_punc(x)\n # write data to file sentence by sentence\n f.write(y.lstrip() + '\\n')\n f.close()", "def clean_text(text):\n \n text = remove_html(text)\n text = remove_hyperlinks(text)\n text = map_punctuation_to_space(text)\n text = regularize_spacing(text)\n # TODO: text.strip\n # TODO: stopwords\n \n return text.lower()", "def clean_document(self,document):\r\n # Remove all characters outside of Alpha Numeric\r\n # and some punctuation\r\n document = re.sub('[^A-Za-z .-]+', ' ', document)\r\n document = document.replace('-', '')\r\n document = document.replace('...', '')\r\n document = document.replace('Mr.', 'Mr').replace('Mrs.', 'Mrs')\r\n\r\n # Remove Ancronymns M.I.T. -> MIT\r\n # to help with sentence tokenizing\r\n document = self.merge_acronyms(document)\r\n\r\n # Remove extra whitespace\r\n document = ' '.join(document.split())\r\n return document", "def normalizePages (\n\n self,\n text = None\n ) :\n\n if utilities.isEmpty( text ) : return \"\"\n\n # removes keywords p. pp. to etc.\n\n for bit in [ \"pp.\", \"p.\", \"&\", \"-\", \" to \", \" and \" ] :\n\n text = text.replace( bit, \" \" )\n##\n## text = text.\\\n## replace( \"pp.\", \" \" ).\\\n## replace( \"p.\", \" \" ).\\\n## replace( \" to \", \" \" ).\\\n## replace( \"&\", \" \" ).\\\n## replace( \" and \", \" \" ).\\\n## replace( \"-\", \" \" )\n\n text = utilities.string( text, format = \"strict\" )\n\n # splits into words\n \n words = utilities.textToWords( text )\n\n if utilities.isEmpty( words ) : return \"\"\n\n # joins consecutive integers or non-integers with --\n\n text = words[ 0 ]\n\n previous = words[ 0 ].isdigit()\n\n minus = False\n\n for word in words[ 1 : ] :\n\n isdigit = word.isdigit() \n\n minus = ( not minus ) and ( previous == isdigit )\n\n previous = isdigit\n\n if minus : text = text + \"--\"\n\n else : text = text + \" \"\n\n text = text + word\n\n return text.strip()", "def cleaning(text):\n txt = []\n for sentence in text:\n sen = ''\n for string in sentence:\n string = string.replace(\",\",\"\")\n string = string.replace(\"\\n\",\"\")\n sen += string\n txt += [sen]\n return txt", "def get_paragraphs(text):\n return [s.strip() for s in re.split(\"\\n+\", text) if s.strip()]", "def _clean(self, corpus):\r\n\t\tclean_corpus = []\r\n\t\tfor doc in corpus:\r\n\t\t\tclean_corpus.append(REGEX.sub(' ', doc))\r\n\t\treturn clean_corpus", "def remove_defined_articles(self, text: str) -> str:\n cleaned_text = re.sub(self.quote_pattern, \"\", text)\n return cleaned_text.strip()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert collected data to paragraphs
def paragraphs(self): pars = [] for par in self.data: if len(par) > 0: text = self.cleanparagraph(''.join(par)).strip() if text: pars.append(text) return pars
[ "def get_paragraph_data(html_soup):\n polluted_text = str(soup.find_all(\"p\"))\n text_soup = BeautifulSoup(polluted_text)\n return text_soup.get_text()", "def generate_paragraph(self):\n\n sentences = set()\n while True:\n sentences.add(self.generate_sentence())\n if self.stop_generation(self.rate_paragraph):\n break\n return ' '.join(sentences)", "def paragraphs(cls, nb=3):\r\n return [cls.paragraph() for _ in range(0, nb)]", "def paragraph(self):\n self.conteudo = BeautifulSoup(self.html_content, 'lxml').find_all()[0]\n self.conteudo = str(self.conteudo).replace('\\xa0', '')\n \"\"\"buscando o artigo\"\"\"\n inicio_artigo = re.search(f'Art. {self.n}', self.conteudo).start()\n fim_artigo = re.search('<b', self.conteudo[(inicio_artigo):]).start()\n #str(paragrafo[0])[inicio_artigo:].replace('\\xa0', '').split('<br/>')[0]\n fim_artigo += inicio_artigo\n self.conteudo[inicio_artigo: fim_artigo]\n if self.p == 0:\n inicio_article = re.search(f'Art. {self.n}', self.html_content).start()\n inicio_paragrafo_unico = re.search('Parágrafo único.', self.conteudo[fim_artigo:]).start()\n fim_paragrafo_unico = re.search('<br/>', self.conteudo[(fim_artigo + inicio_paragrafo_unico):]).start()\n fim_paragrafo_unico += inicio_paragrafo_unico\n conteudo2 = self.conteudo[fim_artigo:]\n conteudo2 = conteudo2[inicio_paragrafo_unico : fim_paragrafo_unico].replace('</i>', '')\n return(conteudo2)\n return('Ainda não implementado a busca de parágrafos numerados.')", "def get_doc_paragraphs(self):\n tokens = nltk.word_tokenize(self.doc_content.decode('utf-8'))\n paragraphs = [tokens[x:x + 500] for x in xrange(0, len(tokens), 500)]\n return paragraphs", "def _split_paragraphs(self, text):\n\n import re\n import textwrap\n\n text = textwrap.dedent(text).strip()\n text = re.sub('\\n\\n[\\n]+', '\\n\\n', text)\n\n last_sub_indent = None\n paragraphs = list()\n for line in text.splitlines():\n (indent, sub_indent) = self._indents(line)\n is_text = len(line.strip()) > 0\n\n if is_text and indent == sub_indent == last_sub_indent:\n paragraphs[-1] += ' ' + line\n else:\n paragraphs.append(line)\n\n if is_text:\n last_sub_indent = sub_indent\n else:\n last_sub_indent = None\n\n return paragraphs", "def paragraphs_to_dict(paragraphs: List[Paragraph]):\n ps = []\n for para in paragraphs:\n paragraph = dict()\n\n # add text\n paragraph[\"text\"] = para.text\n\n # add headings\n for i in range(1, 7):\n if eval(f\"para.h{i}\"):\n paragraph[f\"h{i}\"] = eval(f\"para.h{i}.text\")\n\n # add if skippable\n paragraph[\"is_skippable\"] = para.is_skippable\n ps.append(paragraph)\n return ps", "def test_paragraphs(self):\n self.assertEqual(\n paragraphs(1),\n [\n \"Lorem ipsum dolor sit amet, consectetur adipisicing elit, \"\n \"sed do eiusmod tempor incididunt ut labore et dolore magna \"\n \"aliqua. Ut enim ad minim veniam, quis nostrud exercitation \"\n \"ullamco laboris nisi ut aliquip ex ea commodo consequat. \"\n \"Duis aute irure dolor in reprehenderit in voluptate velit \"\n \"esse cillum dolore eu fugiat nulla pariatur. Excepteur sint \"\n \"occaecat cupidatat non proident, sunt in culpa qui officia \"\n \"deserunt mollit anim id est laborum.\"\n ],\n )", "def para_tokenize(html):\n # Transform the document into a readability paper summary\n summary = Document(html).summary()\n\n # Parse the HTML using BeautifulSoup\n soup = bs4.BeautifulSoup(summary, 'lxml')\n\n # Extract the paragraph delimiting elements\n for tag in soup.find_all(TAGS):\n\n # Get the HTML node text\n text = tag.get_text()\n if text: yield text", "def create_paragraphs(self) -> None:\n new_child_nodes = []\n paragraph_node = None\n\n for node_id in self.child_nodes:\n node = in_scope(node_id)\n\n if node.is_phrasing():\n if not paragraph_node:\n paragraph_node = sdoc.sdoc2.node_store.create_inline_node('paragraph')\n new_child_nodes.append(paragraph_node.id)\n\n paragraph_node.append_child_node(node)\n else:\n if paragraph_node:\n paragraph_node.prune_whitespace()\n sdoc.sdoc2.node_store.store_node(paragraph_node)\n paragraph_node = None\n\n # End paragraph nodes are created temporary to separate paragraphs in a flat list of (text) node. There\n # role ae replaced by the content hierarchy now. So, we must no store end paragraph nodes.\n if not isinstance(node, EndParagraphNode):\n new_child_nodes.append(node.id)\n\n out_scope(node)\n\n if paragraph_node:\n paragraph_node.prune_whitespace()\n sdoc.sdoc2.node_store.store_node(paragraph_node)\n # paragraph_node = None\n\n # Setting child nodes.\n self.child_nodes = new_child_nodes", "def new_paragraph(self):\n if self.chainMode == ChainMode.CHARS:\n return \"\\n\\n\"\n elif self.chainMode == ChainMode.WORDS:\n return [\"\\n\\n\"]", "def _parse_paragraph(self, node, state):\n # Both Paragraphs will share the same parent\n parent = (\n state[\"context\"][node]\n if node in state[\"context\"]\n else state[\"parent\"][node]\n )\n for field in [\"text\", \"tail\"]:\n text = getattr(node, field)\n text = text.strip() if text and self.strip else text\n\n # Skip if \"\" or None\n if not text:\n continue\n\n # Run RegEx replacements\n for (rgx, replace) in self.replacements:\n text = rgx.sub(replace, text)\n\n # Process the Paragraph\n stable_id = \"{}::{}:{}\".format(\n state[\"document\"].name, \"paragraph\", state[\"paragraph\"][\"idx\"]\n )\n parts = {}\n parts[\"stable_id\"] = stable_id\n parts[\"document\"] = state[\"document\"]\n parts[\"position\"] = state[\"paragraph\"][\"idx\"]\n if isinstance(parent, Caption):\n if parent.table:\n parts[\"section\"] = parent.table.section\n elif parent.figure:\n parts[\"section\"] = parent.figure.section\n parts[\"caption\"] = parent\n elif isinstance(parent, Cell):\n parts[\"section\"] = parent.table.section\n parts[\"cell\"] = parent\n elif isinstance(parent, Section):\n parts[\"section\"] = parent\n elif isinstance(parent, Figure): # occurs with text in the tail of an img\n parts[\"section\"] = parent.section\n else:\n raise NotImplementedError(\n 'Paragraph \"{}\" parent must be Section, Caption, or Cell, not {}'.format(\n text, parent\n )\n )\n\n # Create the Figure entry in the DB\n paragraph = Paragraph(**parts)\n\n state[\"paragraph\"][\"idx\"] += 1\n\n state[\"paragraph\"][\"text\"] = text\n state[\"paragraph\"][\"field\"] = field\n\n # Parse the Sentences in the Paragraph\n yield from self._parse_sentence(paragraph, node, state)\n\n return state", "def get_paragraphs(url):\n\n source = urllib.request.urlopen(url).read()\n\n soup = bs.BeautifulSoup(source,'lxml')\n\n paragraphs = []\n\n for paragraph in soup.find_all('p'):\n paragraphs.append(paragraph.text)\n\n new_paragraphs = []\n\n for p in paragraphs:\n p = p.lower()\n p = unidecode(p)\n new_paragraphs.append(p)\n\n return new_paragraphs", "def paragraphs(string):\n return [line.strip() for line in string.split('\\n')]", "def get_paragraphs(text):\n return [s.strip() for s in re.split(\"\\n+\", text) if s.strip()]", "def __handle_start_paragraph_token(cls, output_html, next_token, transform_state):\n _ = next_token\n token_parts = [output_html]\n if output_html and output_html[-1] != ParserHelper.newline_character:\n token_parts.append(ParserHelper.newline_character)\n if transform_state.is_in_loose_list:\n token_parts.append(\"<p>\")\n return \"\".join(token_parts)", "def data_for_paragraph_selector(self): #TODO maybe, if you're bored and there is another lockdown, rename this.\n result = []\n for point in self.data:\n # supp_facts = set([fact[0] for fact in point[\"supporting_facts\"]])\n\n supp_facts_detailed = {}\n for fact in point[\"supporting_facts\"]:\n if supp_facts_detailed.get(fact[0]):\n supp_facts_detailed[fact[0]].append(fact[1])\n else:\n supp_facts_detailed[fact[0]] = [fact[1]]\n result.append([\n point[\"_id\"],\n supp_facts_detailed, # we used to use supp_facts here\n point[\"question\"],\n point[\"context\"],\n point[\"answer\"]\n ])\n return result", "def generate_text(self):\n\n paragraphs = set()\n while True:\n paragraphs.add(self.generate_paragraph())\n if self.stop_generation(self.rate_text):\n break\n return '\\n\\n'.join(paragraphs)", "def predict_paragraph(self, paragraph):\n # TODO: break paragraph into sentences.\n \n pass", "def preprocessPDFData(self, text):\n\n newlines = []\n for line in text:\n if (line[0] == \"\\x0C\"):\n if (len(line) >= 150):\n newlines.append(line)\n else:\n newlines.append(line)\n \n textNew = \"\".join(newlines)\n textNew = textNew.replace(\".\", \". \")\n textNew = textNew.replace(\",\", \", \")\n return textNew.decode(\"utf-8\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DutyDetails a model defined in OpenAPI
def __init__(self, when: DutyDetailsWhen=None, where: DutyDetailsWhere=None, requirements: object=None): self.openapi_types = { 'when': DutyDetailsWhen, 'where': DutyDetailsWhere, 'requirements': object } self.attribute_map = { 'when': 'when', 'where': 'where', 'requirements': 'requirements' } self._when = when self._where = where self._requirements = requirements
[ "def __init__(self, worker_id: str=None, duty: Duty=None):\n self.openapi_types = {\n 'worker_id': str,\n 'duty': Duty\n }\n\n self.attribute_map = {\n 'worker_id': 'workerId',\n 'duty': 'duty'\n }\n\n self._worker_id = worker_id\n self._duty = duty", "def prospects_advisor_details():\n try:\n data = request.get_json()\n\n if data is None:\n return jsonify({'error': 'Request JSON was not found'}), Status.BAD\n\n filter_id = data.get('advisorId', None)\n if filter_id is None:\n return jsonify({'error':'No advisorId provided'})\n\n found_advisor = db.session.query(Advisor)\\\n .filter(Advisor.status == 'Active', Advisor.pk_id == filter_id).first()\n\n if found_advisor is not None:\n response = jsonify({\"id\": found_advisor.pk_id,\n \"email\": found_advisor.email,\n \"firstName\": found_advisor.first_name,\n \"lastName\": found_advisor.last_name[0] if found_advisor.last_name else '',\n \"city\": found_advisor.city,\n \"state\": found_advisor.state,\n \"location\": found_advisor.location,\n \"specialty\": [specialty.to_json() for specialty in (found_advisor.specialties or [])],\n \"occupation\": [occ.to_json() for occ in (found_advisor.occupations or [])],\n \"previousFirm\": [firm.to_json() for firm in (found_advisor.previous_firms or [])],\n \"yearsOfExperience\": found_advisor.years_of_experience_range.value if found_advisor.years_of_experience_range else '',\n \"biography\": found_advisor.biography,\n \"currentFirm\": found_advisor.current_firm.name if found_advisor.current_firm else '',\n \"currentFirmSize\": found_advisor.current_firm_size,\n \"currentFirmRevenue\": found_advisor.current_firm_revenue,\n \"undergradEducation\": found_advisor.undergrad_education,\n \"gradEducation\": found_advisor.grad_education,\n \"imageUrl\": found_advisor.linkedin_picture_url,\n \"resumeUrl\": found_advisor.resume_url} if found_advisor is not None else {})\n else:\n response = jsonify({\"error\": \"No active advisor found with that ID\"})\n\n db.session.close()\n\n return response, Status.COMPLETED\n except:\n db.session.rollback()\n db.session.close()\n e = sys.exc_info()[0]\n v = sys.exc_info()[1]\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(e))\n print(str(v))\n return jsonify({'error': str(e),\n 'value': str(v),\n 'line' : str(exc_tb.tb_lineno)\n }), Status.BAD", "def details(self):\n return Details()", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'user_id': 'int',\n 'title': 'str',\n 'description': 'str',\n 'readonly': 'bool',\n 'hidden': 'bool',\n 'refresh_interval': 'str',\n 'refresh_interval_to_i': 'int',\n 'space': 'SpaceBase',\n 'model': 'str',\n 'scheduled_plan': 'ScheduledPlan'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'user_id': 'user_id',\n 'title': 'title',\n 'description': 'description',\n 'readonly': 'readonly',\n 'hidden': 'hidden',\n 'refresh_interval': 'refresh_interval',\n 'refresh_interval_to_i': 'refresh_interval_to_i',\n 'space': 'space',\n 'model': 'model',\n 'scheduled_plan': 'scheduled_plan'\n }\n\n self._id = None\n self._user_id = None\n self._title = None\n self._description = None\n self._readonly = None\n self._hidden = None\n self._refresh_interval = None\n self._refresh_interval_to_i = None\n self._space = None\n self._model = None\n self._scheduled_plan = None", "def __init__(self, id = ''):\n\n try:\n\n if id == '':\n raise ValueError('Entertainment object expects an id value. Must be passed as string.\\n Usage: Entertainment(id)')\n elif id != None and type(id) != str:\n raise TypeError('Entertainment object expects a string argument.')\n\n self.__id = id\n\n s = requests.get(\"https://api.wdpro.disney.go.com/facility-service/entertainments/{}\".format(self.__id), headers=getHeaders())\n self.__data = json.loads(s.content)\n\n self.__entertainment_name = self.__data['name'].replace(u\"\\u2019\", \"'\").replace(u\"\\u2013\", \"-\").replace(u\"\\u2122\", \"\").replace(u\"\\u2022\", \"-\").replace(u\"\\u00ae\", \"\").replace(u\"\\u2014\", \"-\").replace(u\"\\u00a1\", \"\").replace(u\"\\u00ee\", \"i\").replace(u\"\\u25cf\", \" \").replace(u\"\\u00e9\", \"e\").replace(u\"\\u00ad\", \"\").replace(u\"\\u00a0\", \" \").replace(u\"\\u00e8\", \"e\").replace(u\"\\u00eb\", \"e\").replace(u\"\\u2026\", \"...\").replace(u\"\\u00e4\", \"a\").replace(u\"\\u2018\", \"'\").replace(u\"\\u00ed\", \"i\").replace(u\"\\u201c\", '\"').replace(u\"\\u201d\", '\"').strip()\n self.__type = self.__data['type']\n self.__subType = self.__data['subType']\n try:\n self.__coordinates = self.getRelatedLocations()[0].getPointOfInterestCoordinates()\n except:\n self.__coordinates = ()\n\n self.waitTimeData = None\n\n except ValueError as e:\n print(e)\n sys.exit()\n except TypeError as e:\n print(e)\n sys.exit()\n except Exception as e:\n print(e)\n print('That entertainment or ID is not available. ID = {}\\nFull list of possible entertainments and their ID\\'s can be found here: https://scaratozzolo.github.io/MouseTools/entertainments.txt'.format(id))\n sys.exit()", "def __init__(self, id=None, full_name=None, short_name=None, week_working_hours=None, clarification_working_hours=None, public_phone=None, geo_data=None):\n self.swagger_types = {\n 'id': int,\n 'full_name': str,\n 'short_name': str,\n 'week_working_hours': List[BloodStationWeekWorkingHours],\n 'clarification_working_hours': str,\n 'public_phone': str,\n 'geo_data': List[BloodStationGeoData]\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'full_name': 'FullName',\n 'short_name': 'ShortName',\n 'week_working_hours': 'WeekWorkingHours',\n 'clarification_working_hours': 'ClarificationWorkingHours',\n 'public_phone': 'PublicPhone',\n 'geo_data': 'GeoData'\n }\n\n self._id = id\n self._full_name = full_name\n self._short_name = short_name\n self._week_working_hours = week_working_hours\n self._clarification_working_hours = clarification_working_hours\n self._public_phone = public_phone\n self._geo_data = geo_data", "def make_api_request(self):\n # We can create an instance of an endpoint resource class, and use it to fetch details\n access_token = self.get_token()\n self.request.session['access_token'] = access_token\n\n\n # Grab the first doctor from the list; normally this would be the whole\n # practice group, but your hackathon account probably only has one doctor in it.\n doctor = DoctorEndpoint(access_token).get_doctor()\n #self.request.session['doctor'] = doctor.id\n\n patient = PatientEndpoint(access_token).get_patients(doctor)\n appt = AppointmentEndpoint(access_token).get_appoinments(doctor, None)\n # Get patients and appointments for the doctor and store it in the local DB\n\n return doctor", "def __init__(self):\n self.swagger_types = {\n 'id': 'int',\n 'timestamp': 'int',\n 'user_id': 'int',\n 'correlation': 'float',\n 'cause_id': 'int',\n 'effect_id': 'int',\n 'onset_delay': 'int',\n 'duration_of_action': 'int',\n 'number_of_pairs': 'int',\n 'value_predicting_high_outcome': 'float',\n 'value_predicting_low_outcome': 'float',\n 'optimal_pearson_product': 'float',\n 'vote': 'float',\n 'statistical_significance': 'float',\n 'cause_unit': 'str',\n 'cause_unit_id': 'int',\n 'cause_changes': 'int',\n 'effect_changes': 'int',\n 'qm_score': 'float',\n 'error': 'str',\n 'created_at': 'datetime',\n 'updated_at': 'datetime',\n 'reverse_pearson_correlation_coefficient': 'float',\n 'predictive_pearson_correlation_coefficient': 'float'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'timestamp': 'timestamp',\n 'user_id': 'user_id',\n 'correlation': 'correlation',\n 'cause_id': 'cause_id',\n 'effect_id': 'effect_id',\n 'onset_delay': 'onset_delay',\n 'duration_of_action': 'duration_of_action',\n 'number_of_pairs': 'number_of_pairs',\n 'value_predicting_high_outcome': 'value_predicting_high_outcome',\n 'value_predicting_low_outcome': 'value_predicting_low_outcome',\n 'optimal_pearson_product': 'optimal_pearson_product',\n 'vote': 'vote',\n 'statistical_significance': 'statistical_significance',\n 'cause_unit': 'cause_unit',\n 'cause_unit_id': 'cause_unit_id',\n 'cause_changes': 'cause_changes',\n 'effect_changes': 'effect_changes',\n 'qm_score': 'qm_score',\n 'error': 'error',\n 'created_at': 'created_at',\n 'updated_at': 'updated_at',\n 'reverse_pearson_correlation_coefficient': 'reverse_pearson_correlation_coefficient',\n 'predictive_pearson_correlation_coefficient': 'predictive_pearson_correlation_coefficient'\n }\n\n self._id = None\n self._timestamp = None\n self._user_id = None\n self._correlation = None\n self._cause_id = None\n self._effect_id = None\n self._onset_delay = None\n self._duration_of_action = None\n self._number_of_pairs = None\n self._value_predicting_high_outcome = None\n self._value_predicting_low_outcome = None\n self._optimal_pearson_product = None\n self._vote = None\n self._statistical_significance = None\n self._cause_unit = None\n self._cause_unit_id = None\n self._cause_changes = None\n self._effect_changes = None\n self._qm_score = None\n self._error = None\n self._created_at = None\n self._updated_at = None\n self._reverse_pearson_correlation_coefficient = None\n self._predictive_pearson_correlation_coefficient = None", "def __init__(self, created=None, changed_by=None, updated=None, id=None, crm_id=None, account_id=None, subscription_id=None, subscription_version_id=None, invoice_id=None, name=None, description=None, amount=None, amount_excluding_tax=None, currency=None, period_start=None, period_end=None, type=None, invoicing_type=None, state=None, charge_type=None, calculation=None, remaining_credit_behaviour=None, trial=False, version_id=None, version_number=None):\n self.swagger_types = {\n 'created': 'datetime',\n 'changed_by': 'str',\n 'updated': 'datetime',\n 'id': 'str',\n 'crm_id': 'str',\n 'account_id': 'str',\n 'subscription_id': 'str',\n 'subscription_version_id': 'str',\n 'invoice_id': 'str',\n 'name': 'str',\n 'description': 'str',\n 'amount': 'float',\n 'amount_excluding_tax': 'float',\n 'currency': 'str',\n 'period_start': 'datetime',\n 'period_end': 'datetime',\n 'type': 'str',\n 'invoicing_type': 'str',\n 'state': 'str',\n 'charge_type': 'str',\n 'calculation': 'str',\n 'remaining_credit_behaviour': 'str',\n 'trial': 'bool',\n 'version_id': 'str',\n 'version_number': 'int'\n }\n\n self.attribute_map = {\n 'created': 'created',\n 'changed_by': 'changedBy',\n 'updated': 'updated',\n 'id': 'id',\n 'crm_id': 'crmID',\n 'account_id': 'accountID',\n 'subscription_id': 'subscriptionID',\n 'subscription_version_id': 'subscriptionVersionID',\n 'invoice_id': 'invoiceID',\n 'name': 'name',\n 'description': 'description',\n 'amount': 'amount',\n 'amount_excluding_tax': 'amountExcludingTax',\n 'currency': 'currency',\n 'period_start': 'periodStart',\n 'period_end': 'periodEnd',\n 'type': 'type',\n 'invoicing_type': 'invoicingType',\n 'state': 'state',\n 'charge_type': 'chargeType',\n 'calculation': 'calculation',\n 'remaining_credit_behaviour': 'remainingCreditBehaviour',\n 'trial': 'trial',\n 'version_id': 'versionID',\n 'version_number': 'versionNumber'\n }\n\n self._created = created\n self._changed_by = changed_by\n self._updated = updated\n self._id = id\n self._crm_id = crm_id\n self._account_id = account_id\n self._subscription_id = subscription_id\n self._subscription_version_id = subscription_version_id\n self._invoice_id = invoice_id\n self._name = name\n self._description = description\n self._amount = amount\n self._amount_excluding_tax = amount_excluding_tax\n self._currency = currency\n self._period_start = period_start\n self._period_end = period_end\n self._type = type\n self._invoicing_type = invoicing_type\n self._state = state\n self._charge_type = charge_type\n self._calculation = calculation\n self._remaining_credit_behaviour = remaining_credit_behaviour\n self._trial = trial\n self._version_id = version_id\n self._version_number = version_number", "def get_drs_service_info():\n\n reverse_domain_name = reverse_url(url=os.environ[\"HOSTNAME\"])\n\n ret = {\n \"id\": reverse_domain_name,\n \"name\": \"DRS System\",\n \"version\": \"1.0.3\",\n \"type\": {\n \"group\": \"org.ga4gh\",\n \"artifact\": \"drs\",\n \"version\": \"1.0.3\",\n },\n \"organization\": {\n \"name\": \"CTDS\",\n \"url\": \"https://\" + os.environ[\"HOSTNAME\"],\n },\n }\n\n if blueprint.service_info:\n for key, value in blueprint.service_info.items():\n if key in ret:\n if isinstance(value, dict):\n for inner_key, inner_value in value.items():\n ret[key][inner_key] = inner_value\n else:\n ret[key] = value\n\n return flask.jsonify(ret), 200", "def get(self, request, *args, **kwargs):\n debate_handler = DebateHandler()\n rebuttal = debate_handler.get(kwargs.get('rebuttal_uuid'))\n debate = rebuttal.pointer\n return Response(\n status=status.HTTP_200_OK,\n data={\n 'debate': self.serializer_class(debate).data,\n 'rebuttal': self.serializer_class(rebuttal).data\n }\n )", "def to_patient_dto(cls, patient_api):\n asserts.type_of(patient_api, PatientApiModel)\n\n patient_dto = PatientDto()\n map_props(patient_dto, patient_api, PatientDto._props)\n return patient_dto", "def __init__(self):\n self.swagger_types = {\n 'name': 'str',\n 'days': 'SetWrapperDayOfWeek',\n 'flexible_start_time': 'bool',\n 'exact_start_time_minutes_from_midnight': 'int',\n 'earliest_start_time_minutes_from_midnight': 'int',\n 'latest_start_time_minutes_from_midnight': 'int',\n 'constrain_stop_time': 'bool',\n 'constrain_latest_stop_time': 'bool',\n 'latest_stop_time_minutes_from_midnight': 'int',\n 'constrain_earliest_stop_time': 'bool',\n 'earliest_stop_time_minutes_from_midnight': 'int',\n 'start_increment_minutes': 'int',\n 'flexible_paid_time': 'bool',\n 'exact_paid_time_minutes': 'int',\n 'minimum_paid_time_minutes': 'int',\n 'maximum_paid_time_minutes': 'int',\n 'constrain_contiguous_work_time': 'bool',\n 'minimum_contiguous_work_time_minutes': 'int',\n 'maximum_contiguous_work_time_minutes': 'int',\n 'activities': 'list[WorkPlanActivity]',\n 'id': 'str',\n 'delete': 'bool'\n }\n\n self.attribute_map = {\n 'name': 'name',\n 'days': 'days',\n 'flexible_start_time': 'flexibleStartTime',\n 'exact_start_time_minutes_from_midnight': 'exactStartTimeMinutesFromMidnight',\n 'earliest_start_time_minutes_from_midnight': 'earliestStartTimeMinutesFromMidnight',\n 'latest_start_time_minutes_from_midnight': 'latestStartTimeMinutesFromMidnight',\n 'constrain_stop_time': 'constrainStopTime',\n 'constrain_latest_stop_time': 'constrainLatestStopTime',\n 'latest_stop_time_minutes_from_midnight': 'latestStopTimeMinutesFromMidnight',\n 'constrain_earliest_stop_time': 'constrainEarliestStopTime',\n 'earliest_stop_time_minutes_from_midnight': 'earliestStopTimeMinutesFromMidnight',\n 'start_increment_minutes': 'startIncrementMinutes',\n 'flexible_paid_time': 'flexiblePaidTime',\n 'exact_paid_time_minutes': 'exactPaidTimeMinutes',\n 'minimum_paid_time_minutes': 'minimumPaidTimeMinutes',\n 'maximum_paid_time_minutes': 'maximumPaidTimeMinutes',\n 'constrain_contiguous_work_time': 'constrainContiguousWorkTime',\n 'minimum_contiguous_work_time_minutes': 'minimumContiguousWorkTimeMinutes',\n 'maximum_contiguous_work_time_minutes': 'maximumContiguousWorkTimeMinutes',\n 'activities': 'activities',\n 'id': 'id',\n 'delete': 'delete'\n }\n\n self._name = None\n self._days = None\n self._flexible_start_time = None\n self._exact_start_time_minutes_from_midnight = None\n self._earliest_start_time_minutes_from_midnight = None\n self._latest_start_time_minutes_from_midnight = None\n self._constrain_stop_time = None\n self._constrain_latest_stop_time = None\n self._latest_stop_time_minutes_from_midnight = None\n self._constrain_earliest_stop_time = None\n self._earliest_stop_time_minutes_from_midnight = None\n self._start_increment_minutes = None\n self._flexible_paid_time = None\n self._exact_paid_time_minutes = None\n self._minimum_paid_time_minutes = None\n self._maximum_paid_time_minutes = None\n self._constrain_contiguous_work_time = None\n self._minimum_contiguous_work_time_minutes = None\n self._maximum_contiguous_work_time_minutes = None\n self._activities = None\n self._id = None\n self._delete = None", "def __init__(self):\n self.swagger_types = {\n 'id': 'int',\n 'name': 'str',\n 'description': 'str',\n 'build_script': 'str',\n 'repository_configuration': 'RepositoryConfigurationRest',\n 'scm_revision': 'str',\n 'creation_time': 'datetime',\n 'last_modification_time': 'datetime',\n 'archived': 'bool',\n 'project': 'ProjectRest',\n 'environment': 'BuildEnvironmentRest',\n 'dependency_ids': 'list[int]',\n 'product_version_id': 'int',\n 'build_configuration_set_ids': 'list[int]',\n 'generic_parameters': 'dict(str, str)'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'description': 'description',\n 'build_script': 'buildScript',\n 'repository_configuration': 'repositoryConfiguration',\n 'scm_revision': 'scmRevision',\n 'creation_time': 'creationTime',\n 'last_modification_time': 'lastModificationTime',\n 'archived': 'archived',\n 'project': 'project',\n 'environment': 'environment',\n 'dependency_ids': 'dependencyIds',\n 'product_version_id': 'productVersionId',\n 'build_configuration_set_ids': 'buildConfigurationSetIds',\n 'generic_parameters': 'genericParameters'\n }\n\n self._id = None\n self._name = None\n self._description = None\n self._build_script = None\n self._repository_configuration = None\n self._scm_revision = None\n self._creation_time = None\n self._last_modification_time = None\n self._archived = None\n self._project = None\n self._environment = None\n self._dependency_ids = None\n self._product_version_id = None\n self._build_configuration_set_ids = None\n self._generic_parameters = None", "def to_caregiver_dto(cls, caregiver_api):\n asserts.type_of(caregiver_api, CaregiverApiModel)\n\n caregiver_dto = CaregiverDto()\n map_props(caregiver_dto, caregiver_api, CaregiverDto._props)\n return caregiver_dto", "def spec():\n # from flask_swagger import swagger\n from uber_swagger import swagger\n\n swag = swagger(app)\n\n # TODO: Use in production and remove 'jsonify' below\n # return json.dumps(\n # swag,\n # separators=(',', ':') # This produces a 'minified' JSON output\n # )\n\n return jsonify(swag) # This produces a 'pretty printed' JSON output", "async def openapi_view(self):\n return self._openapi_json", "def get_openapi_spec() -> ResponseDoc:\n\n openapi_request = urllib.request.Request(\n url=\"http://127.0.0.1:5000/dealer/openapi.json\",\n method=\"GET\",\n headers={\"Accept\": \"application/json\",},\n )\n\n with urllib.request.urlopen(openapi_request) as response:\n assert (\n response.getcode() == 200\n ), f\"Error getting OpenAPI Spec: {response.getcode()!r}\"\n openapi_spec = json.loads(response.read().decode(\"utf-8\"))\n validate_spec(openapi_spec)\n assert (\n openapi_spec[\"info\"][\"title\"] == \"Python Cookbook Chapter 12, recipe 5.\"\n ), f\"Unepxected Server {openapi_spec['info']['title']}\"\n assert (\n openapi_spec[\"info\"][\"version\"] == \"1.0\"\n ), f\"Unepxected Server Version {openapi_spec['info']['version']}\"\n pprint(openapi_spec)\n\n return openapi_spec", "def test_application_form_field_dose(self):\n with self.client:\n api_response = application_form_field_based(self, \"dose\")\n response_data = json.loads(api_response.data.decode())\n\n self.assertTrue(response_data['status'] == \"prediction for dose completed\")\n self.assertEqual(api_response.status_code, 200)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the when of this DutyDetails.
def when(self, when): self._when = when
[ "def setJoinTime(self,when):\n if not when:\n return\n self.joinTime = when", "def __init__(self, when: DutyDetailsWhen=None, where: DutyDetailsWhere=None, requirements: object=None):\n self.openapi_types = {\n 'when': DutyDetailsWhen,\n 'where': DutyDetailsWhere,\n 'requirements': object\n }\n\n self.attribute_map = {\n 'when': 'when',\n 'where': 'where',\n 'requirements': 'requirements'\n }\n\n self._when = when\n self._where = where\n self._requirements = requirements", "def setEditTime(self,when):\n self.editTime = when\n if not self.joinTime:\n self.setJoinTime( when )", "def journalctl_time(when=None):\n # pylint: disable=wrong-spelling-in-docstring\n return (when or datetime.now()).strftime(\"%Y-%m-%d %H:%M:%S\")", "def _set_datetime_mark(self, when):\r\n r = TxRedisMapper()\r\n r.set(STATIC_CACHE_KEY_LAST_MODIFIED, data=when)\r\n etag = hashlib.md5(when.isoformat()).hexdigest()\r\n r.set(STATIC_CACHE_KEY_ETAG, data=etag)", "def set_retirement_date(self, when, warn=None):\n self.load_details()\n lcl_btn(\"Set Retirement Date\")\n sel.wait_for_element(\"#miq_date_1\")\n if when is None:\n try:\n wait_for(lambda: sel.is_displayed(retire_remove_button), num_sec=5, delay=0.2)\n sel.click(retire_remove_button)\n wait_for(lambda: not sel.is_displayed(retire_remove_button), num_sec=10, delay=0.2)\n sel.click(form_buttons.save)\n except TimedOutError:\n pass\n else:\n if sel.is_displayed(retire_remove_button):\n sel.click(retire_remove_button)\n wait_for(lambda: not sel.is_displayed(retire_remove_button), num_sec=15, delay=0.2)\n fill(retire_form.date_retire, when)\n wait_for(lambda: sel.is_displayed(retire_remove_button), num_sec=15, delay=0.2)\n if warn is not None:\n fill(retire_form.warn, warn)\n sel.click(form_buttons.save)", "def queued_when(self, queued_when):\n\n self._queued_when = queued_when", "def create_time_not(self, create_time_not):\n\n self._create_time_not = create_time_not", "def __call__(self):\n if \"expiration_date\" not in self.entity.cw_edited:\n delay = self._cw.vreg.config[\"default_expiration_delay\"]\n self.entity.cw_edited[\"expiration_date\"] = (\n datetime.date.today() + datetime.timedelta(delay))", "def _set_dates(self):\n if self.id is None or self.created_at is None:\n self.created_at = datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\")", "def set_duration(self, hours, minutes, seconds):\n self.duration = (hours, minutes, seconds)", "def when_failed(self, when_failed):\n\n self._when_failed = when_failed", "def at_time(self, at_time):\n\n self._at_time = at_time", "def set_generatedAtTime(self, generatedAtTime):\n self._attributes[VOPROV['generatedAtTime']] = {generatedAtTime}", "def set_alarm(self, target_time: datetime.time):\n self.time = target_time.replace(second=0, microsecond=0)\n # print the time\n print(\"Alarm set for {}:{}\".format(self.time.hour, self.time.minute))", "def set_pause_trigger_when (self, when = None):\n if self._pause_trigger_type is None:\n raise TypeError('pause trigger type is not specified')\n routine_map = dict(digital_level = 'SetDigLvlPauseTrigWhen',\n analog_level = 'SetAnlgLvlPauseTrigWhen',\n analog_window = 'SetAnlgWinPauseTrigWhen')\n routine = self._get_map_value('set_pause_trigger_when_routine', routine_map, self._pause_trigger_type)\n type_when_map = dict(digital_level = dict (high = DAQmx_Val_High, low = DAQmx_Val_Low),\n analog_level = dict (above = DAQmx_Val_AboveLvl, below = DAQmx_Val_BelowLvl),\n analog_window = dict (inside = DAQmx_Val_InsideWin, outside=DAQmx_Val_OutsideWin))\n when_map = self._get_map_value('set_pause_trigger_when_map', type_when_map, self._pause_trigger_type)\n when_val = self._get_map_value('when', when_map, when)\n return CALL (routine, self, when_val)", "def set_due(self, due):\n datestr = due.isoformat()\n self._set_remote_attribute('due', datestr)\n self.due = datestr", "def duty(self, duty):\n if duty is None:\n raise ValueError(\"Invalid value for `duty`, must not be `None`\")\n\n self._duty = duty", "def worm_ctime(self, worm_ctime):\n\n self._worm_ctime = worm_ctime", "def MarkAsReviewed(self, when=None):\n self._last_review_epoch_sec = time.time() if when is None else when" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the where of this DutyDetails.
def where(self): return self._where
[ "def where(self):\n if self._where is None:\n self._where = self._get_attributes(\"where\")\n return self._where", "def location(self):\n return self.patient.get('location', None)", "def where(self, where):\n if self.local_vars_configuration.client_side_validation and where is None: # noqa: E501\n raise ValueError(\"Invalid value for `where`, must not be `None`\") # noqa: E501\n\n self._where = where", "def get_location_weather(self):\n payload = self.get_leland_location()\n payload['appid'] = self.weather_api_key\n # The API defaults to metric, so set the option to return measurements\n # in imperial units instead\n payload['units'] = 'imperial'\n req = requests.get(self.weather_base_url, params=payload)\n return req.json()", "def location_details(self):\n return self._location_details", "def get_district(self):\n for p in self.overview['positions']:\n if not p['In Congress']['end']:\n try:\n return p['District']\n except KeyError:\n return None\n return None", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def get_where(self):\n with perforce.TempP4ExceptionLevel(self.p4, 0):\n # \"a\" allows for matching roots\n where = self.p4.run_where(general.join_paths(self._input_path, '_A_'))\n\n if not where:\n return {}\n\n where = where[0]\n for k, v in where.items():\n if v.endswith('_A_'):\n where[k] = os.path.dirname(v)\n return where", "def location(self):\n return self._redunda.location", "def location(self) -> LyricLocation:\n return self.coordinator.data.locations_dict[self._location.locationID]", "def get_cutoff(self):\n return self.data['cutoff_time']", "def get_cutoff(self):\n return self.sport_days[0].get_cutoff()", "def getCity(self):\n return self._city", "def get_today_weather(self):\n return self.data['weather1']", "def getSightPoint(self, distFromEye: 'double const') -> \"SbVec3d\":\n return _coin.SbDPViewVolume_getSightPoint(self, distFromEye)", "def get_upperright_y(self):\n return self[3]", "def location(self) -> Optional[ConsoleNotificationLocation]:\n return self.__location", "def getSightPoint(self, distFromEye: 'float const') -> \"SbVec3f\":\n return _coin.SbViewVolume_getSightPoint(self, distFromEye)", "def get_filter_slope(self):\n return self.slopes[np.int(self.query(\"OFSL?\"))]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the where of this DutyDetails.
def where(self, where): self._where = where
[ "def where(self, where):\n if self.local_vars_configuration.client_side_validation and where is None: # noqa: E501\n raise ValueError(\"Invalid value for `where`, must not be `None`\") # noqa: E501\n\n self._where = where", "def where(self):\n if self._where is None:\n self._where = self._get_attributes(\"where\")\n return self._where", "def where(self):\n return self._where", "def __init__(self, when: DutyDetailsWhen=None, where: DutyDetailsWhere=None, requirements: object=None):\n self.openapi_types = {\n 'when': DutyDetailsWhen,\n 'where': DutyDetailsWhere,\n 'requirements': object\n }\n\n self.attribute_map = {\n 'when': 'when',\n 'where': 'where',\n 'requirements': 'requirements'\n }\n\n self._when = when\n self._where = where\n self._requirements = requirements", "def where(self, cond, other=None, inplace=False):\n raise NotImplementedError", "def where( self, selection:'Selection' ) -> 'Selection':\n\t\tself._where.append(selection)\n\t\treturn self", "def unit_of_measurement(self, unit_of_measurement):\n\n self._unit_of_measurement = unit_of_measurement", "def set_location(self, v):\n self.location = v", "def _parse_where(self, where_dict: dict, parent: BaseObjectBuilder):\r\n assert isinstance(where_dict, dict)\r\n assert isinstance(parent, BaseObjectBuilder)\r\n\r\n name = None\r\n sql_sets = []\r\n arguments = []\r\n\r\n for (key, val) in where_dict.items():\r\n key = _strip_key(key)\r\n if key == 'dialects':\r\n for chv in self.fetch_dicts_from_list(key, val, 'dialect'):\r\n sql = SqlStatementBuilder(parent)\r\n sql_sets.append(sql.make(chv))\r\n elif key in ['sql', 'value']:\r\n chv = {\r\n 'syntax': 'universal',\r\n 'platforms': 'all',\r\n 'sql': val\r\n }\r\n sql = SqlStatementBuilder(parent)\r\n sql_sets.append(sql.make(chv))\r\n elif key == 'name':\r\n name = parent.to_str(key, val).strip()\r\n elif key in ['arg', 'argument']:\r\n arguments.append(self._parse_argument(val, parent))\r\n elif key in ['arguments', 'args']:\r\n for chv in self.fetch_dicts_from_list(\r\n key, val, ['arg', 'argument']):\r\n arguments.append(self._parse_argument(chv, parent))\r\n else:\r\n parent.unknown_key(key, val)\r\n if len(sql_sets) <= 0:\r\n parent.problem(\"no sql or dialects set for where clause\",\r\n FATAL_TYPE)\r\n return None\r\n\r\n return WhereClause(name, SqlSet(sql_sets, arguments))", "async def set_dhw_setpoint(call: ServiceCall) -> None:\n gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]\n await gw_dev.gateway.set_dhw_setpoint(call.data[ATTR_TEMPERATURE])", "def location_details(self, location_details):\n\n self._location_details = location_details", "def set_adm_location(self):\n match = None\n if self.has_non_empty_attribute(\"municipio\"):\n try_match = utils.q_from_first_wikilink(\"es\", self.municipio)\n link_match = utils.get_item_from_dict_by_key(\n dict_name=self.data_files[\"admin\"],\n search_term=try_match,\n search_in=\"item\")\n if len(link_match) == 1:\n match = link_match[0]\n else:\n self.add_to_report(\"municipio\", self.municipio, \"located_adm\")\n if not match:\n dep_match = utils.get_item_from_dict_by_key(\n dict_name=self.data_files[\"departments\"],\n search_term=self.iso,\n search_in=\"iso\")\n if len(dep_match) == 1:\n match = dep_match[0]\n else:\n self.add_to_report(\"iso\", self.iso, \"located_adm\")\n\n if match:\n self.add_statement(\"located_adm\", match)", "def add_where_clause(self):\n if len(self.query_model.triples) > 0 or len(self.query_model.subqueries) > 0 or \\\n len(self.query_model.unions) >0 or len(self.query_model.optionals) > 0 or \\\n len(self.query_model.filter_clause) > 0 or len(self.query_model.optional_subqueries) > 0 or \\\n len(self.query_model.graph_triples) > 0 or len(self.query_model.graph_clause) > 0 or \\\n len(self.query_model.optional_graph_clause) > 0:\n where_string = self.__add_patterns()\n self.query_string += \"WHERE {\" + where_string + \"\\n\\t}\"\n else:\n self.query_string += \"WHERE {}\"", "def duty(self, duty):\n if duty is None:\n raise ValueError(\"Invalid value for `duty`, must not be `None`\")\n\n self._duty = duty", "def where_attr(self, attr, dtype):\n return np.array([self._hfile[\"where\"].attrs[attr]], dtype=dtype)", "def setJoinTime(self,when):\n if not when:\n return\n self.joinTime = when", "def below(self, dy):\n return Location(self.x, self.y+dy)", "def above(self, dy):\n return Location(self.x, self.y-dy)", "def _where(self):\n result = []\n result.extend(self._partition_selector())\n result.extend(self._job_and_fuzzer_selector())\n\n result = ' AND '.join(result)\n if result:\n return 'WHERE ' + result\n\n return ''", "def setcity(ctx, city, cityid, timezone):\n logging.info('Setting City %s to %d', city, cityid)\n\n city_data = get_city_data()\n if city not in city_data:\n city_data[city] = {}\n city_data[city]['id'] = cityid\n city_data[city]['timezone'] = timezone\n write_city_data(city_data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the requirements of this DutyDetails.
def requirements(self): return self._requirements
[ "def get_requirements(self):\n pass", "def optional_requirements(self):\n return self.__optional_requirements", "def data_requirements(self) -> List[DataRequirement]:\n return self._data_requirements", "def equipmentRequirements(self):\n if self.reqid:\n return EquipmentReqsInfo(reqs_id=self.reqid, registry=DEFAULT_EQUIPMENT_REGISTRY)\n else:\n return None", "def data_requirements(self) -> List[DataRequirement]:\n pass", "def getRequirements(self, registry=DEFAULT_BOOKING_REGISTRY):\n if self.requirements:\n # Note that booking requirements are stored in the DEFAULT_EQUIPMENT_REGISTRY\n return BookingReqsInfo( reqs_id=self.requirements, registry=registry )\n else:\n return None", "def get_fcas_requirements(self):\n return self.fcas_requirements", "def test_get_requirements(self):\n\n # Get first part\n prt = Part.list(self.api, limit=1)[0]\n\n # Get requirements list\n req = prt.getRequirements()\n\n # Check for expected content\n self.assertIsInstance(req, dict)\n self.assertIn('available_stock', req)\n self.assertIn('on_order', req)\n self.assertIn('required_build_order_quantity', req)\n self.assertIn('allocated_build_order_quantity', req)\n self.assertIn('required_sales_order_quantity', req)\n self.assertIn('allocated_sales_order_quantity', req)\n self.assertIn('allocated', req)\n self.assertIn('required', req)", "def __get_requirements(self, requirements):\n requirements_section = \"(:REQUIREMENTS \"\n for requirement in requirements:\n requirements_section = requirements_section + requirement + \" \"\n return requirements_section + \")\"", "def find_requirements(self) -> None:\n for reqs in self.tiling.requirements:\n self.requirements.append(list(str(req) for req in reqs))", "def requirements(self):\n if not self:\n return RequirementCollection()\n return RequirementCollection(\n reduce(\n lambda x, y: x + y,\n [package.requirements for package in self]\n )\n )", "def getRequirementsMetString(self):\n return self.requirements_string", "def get_required_components(self):\n return []", "def required_fields(self) -> List[str]:\n return self._required_fields", "def get_requirements():\n\n\tprint('''\n\t\t\\n1. Work with your team.\n\t\t\\n2. Backward-engineer screenshot below.\n\t\t\\n3. If errors, check missing installations (e.g. pandas_datareader, matplotlib pyplot and style)\n\t\t\\n4. Research how to do any necessary installations, *only* if needed:\n\t\t\\n5. Also, include at *least* three graphs (dates from Jan. 1st 2010 until now).\n\t\t\\n6. Create a different * style * of graph for each of the companies shown below.\n\t\t\\n7. Optional: Create at least three functions that are called by the program:\n\t\t\\n\\ta. main(): calls at least two other functions\n\t\t\\n\\tb. get_requirements(): displays the program requirements.\n\t\t\\n\\tc. data_analysis_1(): displays the following data.''')", "def fortifications(self):\n return self.board.possible_fortifications(self.player_id)", "def needs(self) :\r\n return ({'water need':self._waterNeed,'food need':self._foodNeed})", "def required_parts(self):\n parts = []\n\n for item in self.part.bom_items.all():\n part = {'part': item.sub_part,\n 'per_build': item.quantity,\n 'quantity': item.quantity * self.quantity\n }\n\n parts.append(part)\n\n return parts", "def check_requirements(self):\n availableInstruments = []\n availableSettings = []\n for key, val in self.instruments.items(): # create list of available instruments\n availableInstruments.append(key)\n for key, val in self.instruments.items(): # create list of available settings\n availableSettings.append(key)\n for instrument in self.requiredInstruments: # rise error for each missing instrument\n if instrument not in availableInstruments:\n raise NotImplementedError('Instrument {} not available.'.format(instrument))\n for setting in self.requiredSettings: # rise error for each missing Setting\n if setting not in availableSettings:\n raise NotImplementedError('Setting {} not available.'.format(instrument))", "def optional_requirements() -> Sequence[Type[Requirement]]:\n return tuple()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the requirements of this DutyDetails.
def requirements(self, requirements): self._requirements = requirements
[ "def set_demand(self, d, j=None):\n if j is None:\n api.set_demands(d)\n else:\n api.set_demand(j, d)", "def agent_requirements(self, agent_requirements):\n\n self._agent_requirements = agent_requirements", "def update_dietary_requirements():\n if flask.request.method != 'POST':\n return flask.redirect(flask.request.referrer or\n flask.url_for('dashboard.profile'))\n\n dietary_requirements = login.current_user.dietary_requirements\n\n for requirement in [\n 'pescetarian',\n 'vegetarian',\n 'vegan',\n 'gluten_free',\n 'nut_free',\n 'dairy_free',\n 'egg_free',\n 'seafood_free',\n ]:\n if (\n requirement in flask.request.form and\n flask.request.form[requirement] == 'Yes'\n ):\n setattr(dietary_requirements, requirement, True)\n else:\n setattr(dietary_requirements, requirement, False)\n\n if (\n 'other' in flask.request.form and\n flask.request.form['other'] != ''\n ):\n dietary_requirements.other = flask.request.form['other']\n else:\n dietary_requirements.other = None\n\n DB.session.commit()\n\n APP.log_manager.log_event(\n 'Updated dietary requirements',\n user=login.current_user\n )\n\n flask.flash(\n 'Your dietary requirements have been updated',\n 'success'\n )\n\n return flask.redirect(flask.request.referrer or\n flask.url_for('dashboard.profile'))", "def __init__(self, when: DutyDetailsWhen=None, where: DutyDetailsWhere=None, requirements: object=None):\n self.openapi_types = {\n 'when': DutyDetailsWhen,\n 'where': DutyDetailsWhere,\n 'requirements': object\n }\n\n self.attribute_map = {\n 'when': 'when',\n 'where': 'where',\n 'requirements': 'requirements'\n }\n\n self._when = when\n self._where = where\n self._requirements = requirements", "def set_demand_constraints(self, demand):\n # 1. Create the constraints\n lhs_coefficients, rhs_and_type = market_constraints.energy(self.decision_variables['energy_bids'],\n demand, self.unit_info, self.next_constraint_id)\n # 2. Save constraint details\n self.market_constraints_lhs_coefficients['demand'] = lhs_coefficients\n self.market_constraints_rhs_and_type['demand'] = rhs_and_type\n # 3. Update the constraint id\n self.next_constraint_id = max(lhs_coefficients['constraint_id']) + 1", "def setRequirement(self, account, acl, req_name, req_type, allowed_values=None, req_help=None):\n acl.assertIsAdministrator(account)\n\n req_name = to_string(req_name) \n req_type = to_string(req_type)\n allowed_values = AllowedValues(allowed_values).toString()\n req_help = to_string(req_help)\n\n if not (req_name and req_type):\n return\n\n index = -1\n\n for i in range(0, len(self.requirements)):\n if self.requirements[i].reqname == req_name:\n if req_type == self.requirements[i].reqtype and req_help == self.requirements[i].reqhelp and \\\n allowed_values == self.requirements[i].allowed_values:\n # nothing to do\n return\n\n index = i\n break\n\n item = self._getFromDB()\n\n if index == -1:\n item.requirements.append( EquipmentReq( reqname=req_name,\n reqtype=req_type,\n allowed_values=allowed_values,\n reqhelp=req_help ) )\n index = len(item.requirements) - 1\n else:\n item.requirements[index].reqname = req_name\n item.requirements[index].reqtype = req_type\n item.requirements[index].allowed_values = allowed_values\n item.requirements[index].reqhelp = req_help\n\n item.put()\n\n self.requirements.append( EquipmentReqInfo(item.requirements[index]) )", "def set_demand_constraints(self, demand):\n if self.validate_inputs:\n self._validate_demand(demand)\n rhs_and_type, variable_map = market_constraints.energy(demand, self._next_constraint_id)\n self._market_constraints_rhs_and_type['demand'] = rhs_and_type\n self._constraint_to_variable_map['regional']['demand'] = variable_map\n self._next_constraint_id = max(rhs_and_type['constraint_id']) + 1", "def password_requirement(self, password_requirement):\n\n self._password_requirement = password_requirement", "def usage_measurements(self, usage_measurements):\n\n self._usage_measurements = usage_measurements", "def add_requirements(obj, **kw):\n new = dict()\n new.update(obj.required)\n new.update(kw)\n obj.required = new", "def setRequired(self, required):\n self.__isRequired = required", "def data_requirements(self) -> List[DataRequirement]:\n pass", "def set_unit_capacity_constraints(self, unit_limits):\n # 1. Create the constraints\n lhs_coefficients, rhs_and_type = unit_constraints.capacity(self.decision_variables['energy_bids'], unit_limits,\n self.next_constraint_id)\n # 2. Save constraint details.\n self.constraints_lhs_coefficients['unit_capacity'] = lhs_coefficients\n self.constraints_rhs_and_type['unit_capacity'] = rhs_and_type\n # 3. Update the constraint and variable id counter\n self.next_constraint_id = max(lhs_coefficients['constraint_id']) + 1", "def equipmentRequirements(self):\n if self.reqid:\n return EquipmentReqsInfo(reqs_id=self.reqid, registry=DEFAULT_EQUIPMENT_REGISTRY)\n else:\n return None", "def desired_defenders(self, desired_defenders):\n\n self._desired_defenders = desired_defenders", "def with_requirements(self, requirements: Union[str, List[str]]):\n if isinstance(requirements, str):\n with open(requirements, \"r\") as fp:\n requirements = fp.readlines()\n commands = self.spec.build.commands or []\n commands.append(\"python -m pip install \" + \" \".join(requirements))\n self.spec.build.commands = commands\n return self", "def write_requirements(self, lines):\n raise NotImplementedError", "def find_requirements(self) -> None:\n for reqs in self.tiling.requirements:\n self.requirements.append(list(str(req) for req in reqs))", "def duty(self, duty):\n if duty is None:\n raise ValueError(\"Invalid value for `duty`, must not be `None`\")\n\n self._duty = duty", "def required_drop_capabilities(self, required_drop_capabilities):\n if required_drop_capabilities is None:\n raise ValueError(\"Invalid value for `required_drop_capabilities`, must not be `None`\")\n\n self._required_drop_capabilities = required_drop_capabilities" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns the 2norm of the error between vectors x and y
def err_norm2(x, y): normsq = sum(((x[k]-y[k])**2 for k in range(len(x)))) return np.sqrt(normsq)
[ "def std_error_slow(self, x, y):\n return self.std(x, y) / np.sqrt(self.num_points_in_bins(x))", "def std_error(self, x, y):\n std, _, binnum = binned_statistic(x, y, statistic='std', bins=self.bin_edges)\n num_points = np.array([len(binnum[binnum==i+1]) for i in range(len(self))])\n\n return std / np.sqrt(num_points)", "def distance(x, y):\n return np.linalg.norm(x - y)", "def l2_norm(x):\n return np.linalg.norm(x)", "def l2_norm(x):\n return np.sqrt(np.dot(x.T, x))", "def error(pars, func, x, y):\n return rms(func(pars, x) - y)", "def two_norm(self):\n return _vnl_vectorPython.vnl_vectorUS_two_norm(self)", "def rel_error(x, y):\n x, y = np.array(x), np.array(y)\n return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))", "def numpy_l2norm2(x):\n if x.dtype is not np.float64:\n x = x.astype(np.float64)\n x = x.reshape(-1)\n return np.inner(x, x)", "def two_norm(self):\n return _vnl_vectorPython.vnl_vectorLD_two_norm(self)", "def normalized_difference(x, y):\n\n return (x - y) / (x + y)", "def get_error_norm(self):\n return self.error_norm", "def standard_2dnormal(x, y, _sigma):\n return np.exp(-0.5 / _sigma ** 2 * (x ** 2 + y ** 2)) / (2 * np.pi * _sigma ** 2)", "def two_norm(self):\n return _vnl_vectorPython.vnl_vectorUL_two_norm(self)", "def dmse(f_x, y): \n return 2 * (f_x - y)", "def two_norm(self):\n return _vnl_vectorPython.vnl_vectorUC_two_norm(self)", "def error(x1, x2):\n return x2/x1 - 1", "def error(design, ws, ys):\n return sqrt(mean((np.dot(row, ws) - y) ** 2\n for row, y in zip(design, ys)))", "def grad2Dnorm(self,arr):\n\n d_x = self.deriv(arr,axis=0)\n d_y = self.deriv(arr,axis=1)\n \n return np.sqrt(d_x**2+d_y**2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function Finds the distance between each waypoint (by calling Google's Distance Matrix API) and stores the distance and duration between the two in a file.
def find_distances(all_waypoints): waypoint_distances = {} waypoint_durations = {} for (waypoint1, waypoint2) in combinations(all_waypoints, 2): try: response = get_distance_matrix([waypoint1, waypoint2]) ##"distance" is in meters print(response) distance = response['distance'] # "duration" is in seconds duration = response['travelTime'] waypoint_distances[frozenset([waypoint1, waypoint2])] = distance waypoint_durations[frozenset([waypoint1, waypoint2])] = duration except Exception as e: print("Error with finding the route between %s and %s." % (waypoint1, waypoint2), e) with open("my-waypoints-dist-dur.tsv", "w") as out_file: out_file.write("\t".join(["waypoint1", "waypoint2", "distance_m", "duration_s"])) for (waypoint1, waypoint2) in waypoint_distances.keys(): out_file.write("\n" + "\t".join([waypoint1, waypoint2, str(waypoint_distances[frozenset([waypoint1, waypoint2])]), str(waypoint_durations[frozenset([waypoint1, waypoint2])])]))
[ "def find_station_distances(station_list):\n output = \"\"\n for i in range(len(station_list)):\n station = station_list[i][0]\n n1 = station_list[i][1]\n e1 = station_list[i][2]\n for j in range(len(station_list[i:-1])):\n next_station = station_list[j][0]\n n2 = station_list[j][1]\n e2 = station_list[j][2]\n distance = dist(n1,e1,n2,e2)\n output += station + \" -> \" + next_station + \"= \" + str(distance) +\" m\\n\"\n out = open(\"distances.txt\",\"w\")\n out.write(output)\n out.close()", "def export_distances_to_file(experiment,\n distance_id,\n distances,\n times,\n self_distances=False):\n path_to_folder = os.path.join(os.getcwd(), \"experiments\", experiment.experiment_id, \"distances\")\n make_folder_if_do_not_exist(path_to_folder)\n path = os.path.join(path_to_folder, f'{distance_id}.csv')\n\n with open(path, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file, delimiter=';')\n writer.writerow([\"instance_id_1\", \"instance_id_2\", \"distance\", \"time\"])\n\n for i, instance_1 in enumerate(experiment.elections):\n for j, instance_2 in enumerate(experiment.elections):\n if i < j or (i == j and self_distances):\n distance = str(distances[instance_1][instance_2])\n time_ = str(times[instance_1][instance_2])\n writer.writerow([instance_1, instance_2, distance, time_])", "def analyze(coordinates_data,filename,paths,soup):\n stop_coordinates={}\n total_distance=0\n latitudes=[]\n longitudes=[]\n total_speed=[]\n decel_cordinates={}\n for coordinates in coordinates_data:\n \"\"\"\n Get the latitude, longitude and speed in the array.\n \"\"\"\n longitude=float(coordinates[0])\n latitude=float(coordinates[1])\n speed=float(coordinates[2])\n latitudes.append(latitude)\n longitudes.append(longitude)\n total_speed.append(speed)\n for i in range(len(latitudes)-1):\n lat1=latitudes[i]\n long1=longitudes[i]\n lat2=latitudes[i+1]\n long2=longitudes[i+1]\n # calculate distance only if car is not in RIT. Distance within RIT parking space\n # is not the correct measure of path\n if(not checkWithinRITRadius(lat1,long1)):\n # reference https://geopy.readthedocs.io/en/stable/#module-geopy.distance\n distance=geopy.distance.distance((lat1,long1),(lat2,long2)).miles\n total_distance+=distance\n max_speed=max(total_speed)\n # get speed by ignoring small stops at initial and final positions\n corrected_speed_start,corrected_speed_end=get_speed_correction(total_speed)\n correctedSpeedList=total_speed[corrected_speed_start:corrected_speed_end+1]\n # median speed\n median_speed=statistics.median_high(correctedSpeedList)\n for i in range(len(total_speed)-1):\n # detect stops. if nearby coordinate also has 0 speed. It means car has slowly moved to next lat and long.\n # this means car is still stopped at the signal, just moved a little bit. Hence we count all such coordinates\n # into one.\n if total_speed[i]==0.0 and (longitudes[i],latitudes[i]) not in stop_coordinates:\n checkSurrounding=get_surrounding_coordinates(longitudes[i],latitudes[i],stop_coordinates)\n if checkSurrounding==-1:\n stop_coordinates[(longitudes[i],latitudes[i])]=0\n else:\n stop_coordinates[checkSurrounding]+=1\n if total_speed[i]==0.0 and (longitudes[i],latitudes[i]) in stop_coordinates:\n stop_coordinates[(longitudes[i],latitudes[i])]+=1\n # detect decelerations.\n # considering change of 0.05 is not big change in speed\n if total_speed[i+1]+0.05<total_speed[i] and (longitudes[i],latitudes[i]) not in decel_cordinates:\n checkSurrounding=get_surrounding_coordinates(longitudes[i],latitudes[i],decel_cordinates)\n if checkSurrounding==-1:\n decel_cordinates[(longitudes[i],latitudes[i])]=0\n else:\n decel_cordinates[checkSurrounding] += 1\n if total_speed[i] == 0.0 and (longitudes[i], latitudes[i]) in decel_cordinates:\n decel_cordinates[(longitudes[i], latitudes[i])] += 1\n decelerations=len(decel_cordinates)\n print(\"total distance\",total_distance,\"miles\")\n print(\"Max speed\",max_speed)\n # print(\"stops-time function\",stops)\n print(\"stops\",len(stop_coordinates)-2)\n print(\"total decelerations\",decelerations)\n print(\"Median speed\",median_speed)\n print(\"Time taken\",total_distance/median_speed)\n currentPath=Path(total_distance,max_speed,median_speed,len(stop_coordinates)-2,decelerations,filename)\n paths.append(currentPath)", "def main():\n data=loadData('FileName.csv')\n APIKey = \"put here your key\"\n\n file = open('out.txt', 'w')\n\n for i in range(0,len(data)):\n line = list(data[i])\n #The index 13 corresponds to \"From Provider\"\n #The index 19 corresponds to \"To Client\"\n fromP = line[13].replace(\" \",\",\")\n toCl = line[19].replace(\" \",\",\")\n\n #Put the distance at the end of each line\n line.append(getDistance(fromP,toCl,APIKey))\n\n #Write the information in the file\n file.writelines([\"%s;\" % item for item in line])\n file.write(\"\\n\")\n\n file.close()\n print(\"Completed task\")", "def extract_distances(location='1309 NW 5th Ave, Gainesville, FL', key='AIzaSyCyKFoosxiZo-j_i5TE113FbOtGnj1Ls2Q'):\n maps = googlemaps.Client(key)\n # Convert address to long/lat coordinates\n origin = maps.geocode(location)\n origin = (origin[0][u'geometry'][u'location'][u'lat'], origin[0][u'geometry'][u'location'][u'lng'])\n # Acquire nearby stores\n nearby_stores = maps.places('grocery', origin, language='English', radius=10)\n store_addresses = []\n time_debt = dict()\n for store in nearby_stores[u'results']:\n store_addresses.append((store[u'name'], store[u'formatted_address']))\n time_debt[store[u'name']] = dict()\n time_debt[store[u'name']]['address'] = store[u'formatted_address']\n # Compute distance from origin to each store\n distances = maps.distance_matrix(origin, [address for (name, address) in store_addresses],\n mode=\"driving\", language='English')\n dist_list = []\n for element in distances[u'rows'][0][u'elements']:\n dist_list.append((element[u'duration'][u'value'], element[u'distance'][u'value']))\n idx = 0\n for (name, address) in store_addresses:\n time_debt[name]['seconds'] = dist_list[idx][0]\n time_debt[name]['meters'] = dist_list[idx][1]\n idx += 1\n return time_debt", "def read_file_with_distances( file_with_distances,logger ):\r\n\tlist_result = []\r\n\treadHandle = codecs.open( file_with_distances, 'r', 'utf-8', errors = 'replace' )\r\n\tcontent = readHandle.readlines()\r\n\tfor line in content:\r\n\t\tline_splitted = line.rstrip('\\n\\r').split(\"\\t\")\r\n\t\tif len(line_splitted) == 3 :\r\n\t\t\tartefact1=line_splitted[0]\r\n\t\t\tartefact2=line_splitted[1]\r\n\t\t\tdistance=float( line_splitted[2] )\r\n\t\t\tif (not 'ged_filter_zero_distances' in ConfigSectionReader(Config,\"ged_app\")) or (ConfigSectionReader(Config,\"ged_app\")['ged_filter_zero_distances'] == 'False') :\r\n\t\t\t\t#list_result.append( [ (artefact1, artefact2), distance + 0.000001 * random.randint( 0,1000 ) ] )\r\n\t\t\t\tlist_result.append( [ (artefact1, artefact2), distance ] )\r\n\t\t\telse :\r\n\t\t\t\t# for GED a score of 0.0 appears to be a default no GED (i.e. completely different graph)\r\n\t\t\t\t# overall low score is good, so 0.0 is worse than everything. 1.0 is better than 2.0, which is better than 7.0 etc.\r\n\t\t\t\tif distance > 0.0 :\r\n\t\t\t\t\t# list_result.append( [ (artefact1, artefact2), distance + 0.000001 * random.randint( 0,1000 ) ] )\r\n\t\t\t\t\tlist_result.append( [ (artefact1, artefact2), distance ] )\r\n\treadHandle.close()\r\n\r\n\t# remove any duplicate or mirrored artifact pairs\r\n\tlogger.info( 'removing duplicate and mirrored pairs' )\r\n\tnMirror = 0\r\n\tnDuplicate = 0\r\n\tnIndex1 = 0\r\n\twhile nIndex1 < len(list_result) :\r\n\t\tnIndex2 = nIndex1 + 1\r\n\t\twhile nIndex2 < len(list_result) :\r\n\r\n\t\t\tbBad = False\r\n\t\t\t# check duplicate\r\n\t\t\tif list_result[nIndex1][0] == list_result[nIndex2][0] :\r\n\t\t\t\tnDuplicate = nDuplicate + 1\r\n\t\t\t\tbBad = True\r\n\t\t\t# check mirror\r\n\t\t\tif list_result[nIndex1][0] == ( list_result[nIndex2][0][1], list_result[nIndex2][0][0] ) :\r\n\t\t\t\tnMirror = nMirror + 1\r\n\t\t\t\tbBad = True\r\n\t\t\t\r\n\t\t\tif bBad == True :\r\n\t\t\t\tdel list_result[nIndex2]\r\n\t\t\telse :\r\n\t\t\t\tnIndex2 = nIndex2 + 1\r\n\r\n\t\tnIndex1 = nIndex1 + 1\r\n\tlogger.info( 'mirrored (' + str(nMirror) + ') duplicates (' + str(nDuplicate) + ')' )\r\n\r\n\t# sort, using small fraction random noise to split up randomly scores with same distance value\r\n\tlogger.info( 'sorting pairs by score' )\r\n\tlist_result = sorted( list_result, key=lambda entry: entry[1], reverse = False )\r\n\r\n\t# return sorted list\r\n\treturn list_result", "def update_distances(self, position, rows=1):\n waypoints = self.waypoints\n aircraft = self.performance_settings[\"aircraft\"]\n\n def get_duration_fuel(flightlevel0, flightlevel1, distance, weight, lastleg):\n if flightlevel0 == flightlevel1:\n tas, fuelflow = aircraft.get_cruise_performance(flightlevel0 * 100, weight)\n duration = 3600. * distance / (1.852 * tas) # convert to s (tas is in nm/h)\n leg_fuel = duration * fuelflow / 3600.\n return duration, leg_fuel\n else:\n if flightlevel0 < flightlevel1:\n duration0, dist0, fuel0 = aircraft.get_climb_performance(flightlevel0 * 100, weight)\n duration1, dist1, fuel1 = aircraft.get_climb_performance(flightlevel1 * 100, weight)\n else:\n duration0, dist0, fuel0 = aircraft.get_descent_performance(flightlevel0 * 100, weight)\n duration1, dist1, fuel1 = aircraft.get_descent_performance(flightlevel1 * 100, weight)\n duration = (duration1 - duration0) * 60 # convert from min to s\n dist = (dist1 - dist0) * 1.852 # convert from nm to km\n fuel = fuel1 - fuel0\n if lastleg:\n duration_p, fuel_p = get_duration_fuel(flightlevel0, flightlevel0, distance - dist, weight, False)\n else:\n duration_p, fuel_p = get_duration_fuel(flightlevel1, flightlevel1, distance - dist, weight, False)\n return duration + duration_p, fuel + fuel_p\n\n pos = position\n for offset in range(rows):\n pos = position + offset\n wp1 = waypoints[pos]\n # The distance to the first waypoint is zero.\n if pos == 0:\n wp1.distance_to_prev = 0.\n wp1.distance_total = 0.\n\n wp1.leg_time = 0 # time from previous waypoint\n wp1.cum_time = 0 # total time of flight\n wp1.utc_time = self.performance_settings[\"takeoff_time\"].toPyDateTime()\n wp1.weight = self.performance_settings[\"takeoff_weight\"]\n wp1.leg_fuel = 0\n wp1.rem_fuel = self.performance_settings[\"takeoff_weight\"] - self.performance_settings[\"empty_weight\"]\n wp1.ascent_rate = 0\n else:\n wp0 = waypoints[pos - 1]\n wp1.distance_to_prev = get_distance(\n wp0.lat, wp0.lon, wp1.lat, wp1.lon)\n\n last = (pos - 1 == rows)\n time, fuel = get_duration_fuel(\n wp0.flightlevel, wp1.flightlevel, wp1.distance_to_prev, wp0.weight, lastleg=last)\n wp1.leg_time = time\n wp1.cum_time = wp0.cum_time + wp1.leg_time\n wp1.utc_time = wp0.utc_time + datetime.timedelta(seconds=wp1.leg_time)\n wp1.leg_fuel = fuel\n wp1.rem_fuel = wp0.rem_fuel - wp1.leg_fuel\n wp1.weight = wp0.weight - wp1.leg_fuel\n if wp1.leg_time != 0:\n wp1.ascent_rate = int((wp1.flightlevel - wp0.flightlevel) * 100 / (wp1.leg_time / 60))\n else:\n wp1.ascent_rate = 0\n wp1.ceiling_alt = aircraft.get_ceiling_altitude(wp1.weight)\n\n # Update the distance of the following waypoint as well.\n if pos < len(waypoints) - 1:\n wp2 = waypoints[pos + 1]\n wp2.distance_to_prev = get_distance(\n wp1.lat, wp1.lon, wp2.lat, wp2.lon)\n if wp2.leg_time != 0:\n wp2.ascent_rate = int((wp2.flightlevel - wp1.flightlevel) * 100 / (wp2.leg_time / 60))\n else:\n wp2.ascent_rate = 0\n\n # Update total distances of waypoint at index position and all\n # following waypoints.\n for i in range(max(min(position, 1), 1), len(waypoints)):\n wp0 = waypoints[i - 1]\n wp1 = waypoints[i]\n wp1.distance_total = wp0.distance_total + wp1.distance_to_prev\n wp1.weight = wp0.weight - wp0.leg_fuel\n last = (i + 1 == len(waypoints))\n time, fuel = get_duration_fuel(\n wp0.flightlevel, wp1.flightlevel, wp1.distance_to_prev, wp0.weight, lastleg=last)\n\n wp1.leg_time = time\n wp1.cum_time = wp0.cum_time + wp1.leg_time\n wp1.utc_time = wp0.utc_time + datetime.timedelta(seconds=wp1.leg_time)\n wp1.leg_fuel = fuel\n wp1.rem_fuel = wp0.rem_fuel - wp1.leg_fuel\n wp1.weight = wp0.weight - wp1.leg_fuel\n wp1.ceiling_alt = aircraft.get_ceiling_altitude(wp1.weight)\n\n index1 = self.createIndex(0, TIME_UTC)\n self.dataChanged.emit(index1, index1)", "def export_results(file, results):\n\n with open(file, 'w+') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(('Time [ns]', 'Best path', 'Best distance',\n 'Current path', 'Current distance'))\n for r in results:\n best_distance = r.best.distance if r.best else -1\n current_distance = r.current.distance if r.current else -1\n writer.writerow((r.time,\n r.best.path if r.best else '',\n best_distance,\n r.current.path if r.current else '',\n current_distance))", "def find_distances(file_, read=True, segments=None, ids=None):\n\n try:\n\n # read from pickle\n if not read:\n raise IOError \n in_file = open(file_)\n logging.info('Reading distance file')\n distances = pickle.load(in_file)\n\n except IOError:\n\n # calculate\n logging.info('Calculating distances')\n distances = segments.pairwiseDistance(ids=ids, mode='min')\n\n # save \n out_file = open(file_, 'wb')\n pickle.dump(distances, out_file, -1)\n\n return distances", "def _read_dwd(date, timezone, longitude, latitude, path):\n \n # initialize variables \n dwdpath = os.path.join(os.path.join(path, \"dwd\"))\n fields = [\"aswdifd_s\", \"aswdir_s\", \"t_2m\", \"t_g\"]\n \n lastForecast = None\n for f in range(len(fields)):\n # get date of latest forecast\n dirList = os.listdir(os.path.join(dwdpath, fields[f]))\n dirList.sort(reverse = True)\n if dirList[0].rsplit(\"_\", 2)[0] == 120:\n lastForecast = dirList[0].rsplit(\"_\", 2)[1]\n \n if lastForecast != None:\n # unpack compressed, latest forecast\n os.system(\"bunzip2 --keep `find \" + dwdpath + \" -name '*\" + lastForecast + \"*.bz2'`\")\n \n dates = []\n data = []\n for f in range(len(fields)):\n # list all extracted grib files\n dirList = glob.glob(os.path.join(dwdpath, fields[f], \"*\" + lastForecast + \"*.grib2\"))\n dirList.sort()\n \n lastValue = 0\n data.append([])\n \n if len(dirList) >= 48:\n for i in range(24):\n grb = pygrib.open(dirList[i])\n grb.seek(0)\n \n lat, lon = grb.latlons()\n i, j = _get_location_nearest(lat, lon, latitude, longitude)\n \n lastTimestamp = False\n firstTimestamp = False\n for g in grb:\n timestamp = datetime.datetime.strptime(str(g['validityDate']) + \" \" + '%0.0f'%(g['validityTime']/100.0), \"%Y%m%d %H\")\n \n if lastTimestamp:\n if f == 0:\n datestr = datetime.datetime.strftime(lastTimestamp, \"%Y-%m-%d %H\")\n dates.append(datestr)\n \n if fields[f] == \"aswdifd_s\" or fields[f] == \"aswdir_s\":\n diff = (timestamp - lastTimestamp).total_seconds() / 3600.0\n value = (1 / diff) * ((timestamp - firstTimestamp).total_seconds() / 3600 * g['values'][i, j] - (lastTimestamp - firstTimestamp).total_seconds() / 3600 * lastValue)\n else:\n value = g['values'][i, j]\n \n data[f].append(value)\n \n else:\n firstTimestamp = timestamp\n \n lastTimestamp = timestamp\n lastValue = g['values'][i, j]\n \n grb.close()\n \n if len(dates) > 0:\n csvpath = os.path.join(os.path.join(path, \"csv\"))\n with open(os.path.join(csvpath, \"DWD_\" + lastForecast + \".csv\"), 'wb') as csvfile:\n writer = csv.writer(csvfile, delimiter = \",\")\n line = [\"time\"]\n line.extend(fields)\n writer.writerow(line)\n for i in range(len(dates)):\n line = [dates[i] + \":00:00\"]\n for j in range(len(fields)):\n line.append(data[j][i])\n writer.writerow(line)\n \n # clean up\n os.system(\"find \" + dwdpath + \" -name '*\" + lastForecast + \"*.grib2' -exec rm -f {} \\;\")\n \n return None;", "def call_distance_API(sources_list, destinations_list):\n\n\n\tsource_points_list = [stop['lon'] + \",\" + stop['lat'] for stop in sources_list]\n\tdestination_points_list = [stop['lon'] + \",\" + stop['lat'] for stop in destinations_list]\n\n\tpoints_list = [None]*(len(source_points_list)+len(destination_points_list))\n\tpoints_list[::2] = source_points_list\n\tpoints_list[1::2] = destination_points_list\n\n\tapi_base = \"http://router.project-osrm.org/route/v1/driving/\"\n\tapi_options = \"?overview=false\"\n\n\tdistances = []\n\n\t# Do a request per 100 stops\n\tfor x in range(0, len(points_list), 100):\n\t\t\n\t\tresponse_text = requests.get(api_base + ';'.join(points_list[x:x+100]) + api_options).text\t\n\t\tresponse_json = json.loads(response_text)\n\n\t\tresults = response_json['routes'][0]['legs'][::2]\n\t\tdistances = distances + [connection['distance']*0.001 for connection in results]\n\n\treturn distances", "def gpsroute(csvdestination,jsondestination):\n\n results ={\"type\":\"FeatureCollection\",\"features\":[]}\n x={}\n featurelist = []\n lonlatlist = []\n latlonlist = []\n timeheader = ''\n with open(csvdestination,'r') as csvfile:\n headerreader = csv.reader(csvfile)\n headers = headerreader.next()\n csvfile.close()\n for header in headers:\n if 'time' in header.lower():\n timeheader = header\n if 'lat' in header.lower():\n latheader = header\n if 'lon' in header.lower():\n lonheader = header\n with open(csvdestination,'r') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n timeraw=row[timeheader]\n if isTimeFormat(timeraw): \n time = datetime.strptime(timeraw,\"%m/%d/%Y %H:%M:%S\")\n lonlatlist.append([[row[lonheader],row[latheader]],time])\n else:\n time = datetime.strptime(timeraw,\"%m/%d/%Y %H:%M\")\n lonlatlist.append([[row[lonheader],row[latheader]],time])\n #appends to a list to the list: [[lat, lon ],time]\n #necessary becuase couldnt perform following sort if it were a tuple\n lonlatlist =sorted(lonlatlist, key=lambda x: x[1])\n #sorts on time\n for listlon in lonlatlist:\n latlon=((float(listlon[0][0]),float(listlon[0][1])))\n #tuple\n time = listlon[1]\n appenditem=([latlon,time])\n #restructuring format\n latlonlist.append(appenditem)\n #list of ((lat,lon),time)\n \n for i in range(0,len(latlonlist)-1):\n #must be range(len-1) to accomodate line segment lat/lon pairs\n valuedict = {\"type\": \"Feature\",\"geometry\":{\"type\" : \"LineString\",\"coordinates\":[]},\"properties\":{\"distance\":\"\",\"speed\":\"\",\"duration\":\"\"} }\n #creates dictionary that will populate feature list of dict created above\n x=latlonlist[i][0],latlonlist[i+1][0]\n #((lon,lat),(lon,lat)) (tuple,tuple) represents start & end of line seg\n distance = round(haversine_np(x),2)\n time = latlonlist[i+1][1]-latlonlist[i][1]\n #start and end time for seg\n print (float(time.seconds))\n duration = ((float(time.seconds))/3600)\n #converts to hours\n print duration\n if float(duration) > 0:\n print float(distance),\"/\",float(duration)\n speed = round((float(distance)/float(duration)),2)\n else:\n speed = 0\n x=latlonlist[i][0],latlonlist[i+1][0]\n valuedict[\"geometry\"][\"coordinates\"] =x\n valuedict[\"properties\"][\"distance\"] = distance\n print speed\n valuedict[\"properties\"][\"speed\"] = speed\n valuedict[\"properties\"][\"duration\"] = duration\n featurelist.append(valuedict)\n results[\"features\"] = featurelist\n with open (jsondestination,'w')as outfile:\n json.dump(results,outfile)", "def route_multi(self, orig_long, orig_lat, destinations, mode = \"walk\"):\n\n self.log(\"Sending request to Google\")\n result = self.gmaps.distance_matrix(\n origins = (orig_lat, orig_long),\n destinations = [(coord[1], coord[0]) for coord in destinations],\n units = \"metric\",\n mode = self.map_mode(mode)\n )\n self.log(\"Response: %s\" % result)\n\n if (result[\"status\"] == \"OK\"):\n results = []\n for element in result[\"rows\"][0][\"elements\"]:\n results.append({\n \"duration\": element[\"duration\"][\"value\"],\n \"distance\": element[\"distance\"][\"value\"],\n \"response\": element\n })\n return results\n\n return False", "def document_distance(file1: str, file2: str):\n file1_text = process_file(file1)\n file2_text = process_file(file2)\n file1_words = get_words_from_text_list(file1_text)\n file2_words = get_words_from_text_list(file2_text)\n file1_word_freq = get_freq_count_from_words_dict(file1_words)\n file2_word_freq = get_freq_count_from_words_dict(file2_words)\n distance = vector_angle(file1_word_freq, file2_word_freq)\n print(distance)", "def pwdistance(utrees):\n print(\"calculating pairwise distances\")\n pwmat = np.zeros([len(uniqtrees), len(uniqtrees)])\n for i, x in enumerate(uniqtrees):\n for j, y in enumerate(uniqtrees):\n pwmat[i, j] = x.robinson_foulds(y)[0]\n np.savetxt(\"rf.pwmatrix.csv\", pwmat, delimiter=',', fmt='%1.2f')\n return(None)", "def get_distances(addresses):\n if addresses:\n geolocations = get_all_geolocations(addresses)\n distance = calculate_distance(geolocations)\n print(distance)\n distances = create_csv(distance)\n print(\"Distance of Addresses from Adchieve HQ:\\n\")\n print(distances)\n return distances\n else:\n print(\"Data is empty\")", "def getDirections( p1, p2, key, time='now', mode='transit'):\n\t\n\tstart = '%s,%s' %( p1[0], p1[1])\n\tstop = '%s,%s' %( p2[0], p2[1])\n\thtp = \"https://maps.googleapis.com/maps/api/directions/json?origin=%s&destination=%s&mode=%s&departure_time=%s&key=%s\" %(start, stop, mode, time, key)\n\tjsonData = simplejson.loads( urllib2.urlopen(htp).read() )\n\tif jsonData['status'] != 'OK': raise ValueError\n\telse: return jsonData", "def readDistances(fileName):\n infile = open(fileName, \"r\")\n distances = {}\n\n for line in infile:\n line = [i.strip() for i in line.split()]\n if not distances.has_key(line[0]):\n distances[line[0]] = {}\n distances[line[0]][line[1]] = float(line[2])\n\n if not distances.has_key(line[1]):\n distances[line[1]] = {}\n distances[line[1]][line[0]] = float(line[2])\n\n #Set diagonal to 0\n for key in distances.keys():\n distances[key][key] = 0\n\n infile.close()\n return distances", "def calculate_distances(city):\n\n\t# read the previously-built network data\n\tstops_list = read_stops_file(cities[city]['tag'])\n\tconnections_list = read_connections_file(cities[city]['tag'])\n\t\n\t# Get Earth radius at city\n\tradius = cities[city]['radius']\n\n\t# Turn list of stops into dictionary for direct access\n\tstops_dict = {stop['tag']: stop for stop in stops_list}\n\n\t# Calculate the length of every connection\n\tfor connection in connections_list:\n\t\tstop_1 = stops_dict[connection['from']]\n\t\tstop_2 = stops_dict[connection['to']]\n\t\tconnection['length'] = calculate_straight_distance(stop_1['lat'], stop_1['lon'], stop_2['lat'], stop_2['lon'], radius)\n\n\t# pprint(connections_list)\n\twrite_connections_file(cities[city]['tag'], connections_list)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A more general activation function, allowing to use just string (for prelu, leakyrelu and elu) and to add BN before applying the activation
def _activation(activation, BN=True, name=None, momentum=0.9, training=None, config=BATCH_NORM): def f(x): if BN and activation != 'selu': if config == 'keras': h = BatchNormalization(momentum=momentum)(x, training=training) elif config == 'tf' or config == 'tensorflow': h = BatchNorm(is_training=training)(x) else: raise ValueError('config should be either `keras`, `tf` or `tensorflow`') else: h = x if activation is None: return h if activation in ['prelu', 'leakyrelu', 'elu']: if activation == 'prelu': return PReLU(name=name)(h) if activation == 'leakyrelu': return LeakyReLU(name=name)(h) if activation == 'elu': return ELU(name=name)(h) else: h = Activation(activation, name=name)(h) return h return f
[ "def add_activation(layers, activation):\n if activation == 'relu':\n layers.append(nn.ReLU(inplace=True))\n elif activation == 'sigmoid':\n layers.append(nn.Sigmoid())\n elif activation == 'tanh':\n layers.append(nn.Tanh())\n elif activation == 'softplus':\n layers.append(nn.Softplus())\n elif activation == 'softmax':\n layers.append(nn.Softmax(dim=1))\n elif activation == 'leaky-relu0.1':\n layers.append(nn.LeakyReLU(negative_slope=0.1))\n elif activation == 'linear':\n pass\n else:\n raise ValueError(f\"Activation function with name '{activation}' is not implemented.\")\n return layers", "def get_activation_function(activation: str) -> nn.Module:\n if activation == \"ReLU\":\n return nn.ReLU()\n elif activation == \"LeakyReLU\":\n return nn.LeakyReLU(0.1)\n elif activation == \"PReLU\":\n return nn.PReLU()\n elif activation == \"tanh\":\n return nn.Tanh()\n elif activation == \"SELU\":\n return nn.SELU()\n elif activation == \"ELU\":\n return nn.ELU()\n elif activation == \"Linear\":\n return lambda x: x\n else:\n raise ValueError(f'Activation \"{activation}\" not supported.')", "def append_activation(function):\n if function is None or function is keras.activations.linear:\n # Identity: https://github.com/keras-team/keras/blob/bd024a1fc1cd6d88e8bc5da148968ff5e079caeb/keras/activations.py#L187\n pass\n elif function is keras.activations.relu:\n syrenn_layers.append(pysyrenn.ReluLayer())\n else:\n print(function)\n raise NotImplementedError", "def apply_layer(y_in,w,b,activation):\n # to understand the following line, watch the beginning of lecture 2\n z=np.dot(y_in,w)+b # batch processing: y_in is of shape [batchsize,num_neurons_in]\n if activation=='sigmoid':\n return(1/(1+np.exp(-z)))\n elif activation=='jump':\n return(np.array(z>0,dtype='float'))\n elif activation=='linear':\n return(z)\n elif activation=='reLU':\n return((z>0)*z)", "def _activation(func, data):\n if func == LeakyReLU:\n return func(data, slope=0.01)\n return func(data)", "def setActivation(self):\n if self.activation == 'relu': self.act = tf.nn.relu\n elif self.activation == 'leaky_relu': self.act = tf.nn.leaky_relu\n elif self.activation == 'elu': self.act = tf.nn.elu", "def calc_activation(self, inp):\n inp_rightform = ny.matrix( inp ).T\n self.a = [inp_rightform]\n tmp = ny.dot( self.weights_layer[0], inp_rightform ) + self.bias[0]\n tmp = self.activation_function(tmp)\n\n self.a.append(tmp)\n\n for i in range(self.number_hidden_layers-1):\n\n tmp = ny.dot( self.weights_layer[i+1], tmp ) + self.bias[i+1]\n tmp = self.activation_function(tmp)\n self.a.append(tmp)\n\n tmp = ny.dot( self.weights_layer[self.number_hidden_layers], tmp )+self.bias[self.number_hidden_layers]\n tmp = self.activation_function(tmp)\n\n self.a.append(tmp)\n #eventuell muss shape von tmp angepasst werden", "def linear_activate(inputs, weights, biases):\n return(np.matmul(inputs, weights.T) + biases.T)", "def fc_bn_lrelu(x, out_dim, is_train, alpha=0.2):\n fc = fc_layer(x, out_dim)\n bn = batchNormalization(fc, is_train)\n return tf.nn.leaky_relu(bn, alpha)", "def activation_derivative(self, x, act_func):\n\n if act_func == 'sigmoid':\n\n return x*(1 - x)\n\n elif act_func == 'tanh':\n\n return 1 - x**2\n\n elif act_func == 'relu':\n\n return 1*(x >= 0)\n\n elif act_func == None:\n return 1\n else:\n print(\"Invalid activation function. Either 'sigmoid', 'tanh', 'relu', or None.\\nExiting...\")\n sys.exit(0)", "def conv_bn_lrelu(x, filters, kernel_size, is_train, strides=(1, 1), padding='SAME', bn=True, alpha=0.2):\n conv = conv_layer(x, filters, kernel_size, strides, padding, use_bias=True)\n if bn:\n _bn = batchNormalization(conv, is_train)\n else:\n _bn = conv\n return tf.nn.leaky_relu(_bn, alpha)", "def ell_activation_type_to_string(type):\n if type == ell.neural.ActivationType.relu:\n return 'ReLU'\n elif type == ell.neural.ActivationType.sigmoid:\n return 'Sigmoid'\n elif type == ell.neural.ActivationType.leaky:\n return 'LeakyReLU'\n\n return \"\"", "def _apply_activation(self, r):\n if self.activation is None:\n \"\"\"\n No activation function was chosen.\n \"\"\"\n return r\n if self.activation == 'tanh':\n \"\"\"\n Compute tanh values for each sets of scores in x.\n \"\"\"\n return np.tanh(r)\n if self.activation == 'sigmoid':\n \"\"\"\n Compute sigmoid values for each sets of scores in x.\n \"\"\"\n return expit(r) # 1 / (1 + np.exp(-r))\n if self.activation == 'softmax':\n \"\"\"\n Compute softmax values for each sets of scores in x.\n \"\"\"\n # exps = np.exp(r - np.max(r))\n return softmax(r) # exps / np.sum(exps)\n return r", "def get_ell_activation_type(nodes):\n if any(node.op_name == 'ReLU' for node in nodes):\n return ell.neural.ActivationType.relu\n elif any(node.op_name == 'Sigmoid' for node in nodes):\n return ell.neural.ActivationType.sigmoid\n elif any(node.op_name == 'LeakyReLU' for node in nodes):\n return ell.neural.ActivationType.leaky\n\n return None", "def parse_act_function():\n fun = FLAGS.activation_function\n tf_fun = None\n \n if fun is 'elu':\n tf_fun = tf.nn.elu\n elif fun is 'leaky_relu':\n tf_fun = leaky_relu\n elif fun is 'relu':\n tf_fun = tf.nn.relu\n elif fun is 'sigmoid':\n tf_fun = tf.nn.sigmoid\n elif fun is 'tanh':\n tf_fun = tf.nn.tanh\n elif fun is 'identity':\n tf_fun = tf.nn.identity\n \n return tf_fun", "def sigmoid(X):\n\n pass", "def neural_net_predict(params, inputs):\n for W, b in params:\n outputs = np.dot(inputs, W) + b\n inputs = relu(outputs) # missing sigmoid + logits?\n return outputs", "def d_activation(x, name=\"d_a\"):\n if self.config.use_gradient_penalty:\n # WGAN_GP uses layer normalization instead of batch norm in the discriminator (critic)\n norm_layer = layer_norm(name=name)\n else:\n norm_layer = batch_norm(name=name)\n return leaky_relu(norm_layer(x))", "def get_ell_activation_type(nodes):\n if find_node_by_op_name(nodes, 'ReLU') is not None:\n return ell.neural.ActivationType.relu\n elif find_node_by_op_name(nodes, 'Sigmoid') is not None:\n return ell.neural.ActivationType.sigmoid\n elif find_node_by_op_name(nodes, 'LeakyReLU') is not None:\n return ell.neural.ActivationType.leaky\n\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
raise all pylab windows with str in the title, whether they flash or raise depends on window manager and settings Note than figure(num='myname') is a legal way to name a fig
def raise_matching(str): labs = pl.get_figlabels() for lab in labs: if str in lab: pl.figure(lab) mgr = pl.get_current_fig_manager() mgr.window.tkraise()
[ "def namedWindow(winname, flags=...) -> None:\n ...", "def graphic_window(self):", "def update_title(name, window):\n\twindow.wm_title(name)", "def add_window(self, name):\n self.window_names.append(name)\n cv2.namedWindow(name + \" (press ESC to quit)\")\n m = WeakMethod(self.handle_mouse)\n cv2.setMouseCallback(name + \" (press ESC to quit)\", m)", "def destroyWindow(winname) -> None:\n ...", "def setup_window(s):\n\t\tpygame.init()\n\t\tpygame.display.set_caption(s.caption)", "def show():\n mlab.show()", "def __init__( self, wintitle, width, height ):\n print( self.helpMessage )\n #self.win = GraphWin( wintitle, width, height ) \n #MenuMessage( self.helpMessage, 0, 80, self.win )\n #MenuMessage( self.closeMessage, .8 * height, 20, self.win )\n return", "def test_expose(self):\n self.question = (\"Please trigger a redraw of this window.\\n\\n\"\n \"Depending on your OS and window manager you might need to:\\n\"\n \"- Cover the window with another window and uncover again\\n\"\n \"- Minimize and restore the window\\n\\n\"\n \"Repeat up to 5 times (less might be accepted due to initial drawing)\")\n self.window_size = 700, 200\n self._test_main()", "def set_title_notes(window, index, split_name=False):\n\ttitle = config.DEFAULT_WINDOW[\"TITLE\"]\n\n\tdisp_index = str(index + 1) # start at 1\n\ttitle += \" - \" + disp_index\n\n\tif split_name:\n\t\ttitle += \" - \" + split_name\n\n\tif runtime_info[\"timer_running\"]:\n\t\ttitle += \" - \" + config.RUNNING_ALERT\n\n\tupdate_title(title, window)", "def update_window_title(self):\n import anima\n\n window_title = \"Anima Pipeline v%s \" % anima.__version__\n\n if self.dcc:\n window_title = \"%s | %s\" % (window_title, self.dcc.name)\n else:\n window_title = \"%s | No Environment\" % window_title\n\n if self.mode == SAVE_AS_MODE:\n window_title = \"%s | Version: Save-As Mode\" % window_title\n elif self.mode == OPEN_MODE:\n window_title = \"%s | Version: Open Mode\" % window_title\n elif self.mode == SAVE_AS_AND_OPEN_MODE:\n window_title = \"%s | Version: Save As & Open Mode\" % window_title\n\n # change the window title\n self.setWindowTitle(window_title)", "def show(image, killothers=False, message=\"\"):\n if killothers:\n for img in runningprocs.keys():\n close(img)\n\n proc = Process(target=_show, args=(image, message))\n proc.start()\n runningprocs[image] = proc\n if not proc.is_alive():\n print \"proc is already closed\"", "def create_windows(self):\n\n # implemented in sub classes", "def new_plot_window(self, x, y, data):\n counter = len(self._plot_windows) + 1\n self._figure_name = \"imexam\" + str(counter)\n self._plot_windows.append(self._figure_name)\n print(\"Plots now directed towards {0:s}\".format(self._figure_name))", "def set_window_title_changed(self,changes):\n window_title = self.get_window_title()\n if changes:\n if window_title.endswith(\"*\"):\n pass\n else:\n self.set_window_title(window_title+\" *\",clear=True)\n else:\n if window_title.endswith(\"*\"):\n self.set_window_title(window_title[:-2], clear=True)\n else:\n pass", "def showWindow(self, sender):", "def change_window_title(cls, title, new_title):\n try:\n _window = winappdbg.System.find_window(windowName=title)\n except:\n _window = None\n \n if _window:\n _window.set_text(new_title)\n return _window\n \n return False", "def create_windows(self):\n self.left = TaskWindow(self.MAX_WIN_HEIGHT, self.MAX_WIN_WIDTH, 0, 0, \"Backlog\")\n self.center = TaskWindow(self.MAX_WIN_HEIGHT, self.MAX_WIN_WIDTH, 0, self.MAX_WIN_WIDTH, \"In Progress\")\n self.right = TaskWindow(self.MAX_WIN_HEIGHT, self.MAX_WIN_WIDTH, 0, 2 * self.MAX_WIN_WIDTH, \"Done\")\n self.control = ControlWindow(self.control_lines, curses.COLS, self.MAX_WIN_HEIGHT, 0)\n self.scr.refresh()", "def getWindowName(self):\n\n return str(pygame.display.get_caption()[0])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
close all pylab windows with str in the title, see also raise _matching
def close_matching(str): (labs_nums) = zip(pl.get_figlabels(),pl.get_fignums()) closed = 0 for (lab,num) in labs_nums: if str in lab: pl.close(num) closed += 1 if closed == 0: print('No figures matching {s} found' .format(s=str))
[ "def destroyWindow(winname) -> None:\n ...", "def raise_matching(str):\n labs = pl.get_figlabels()\n for lab in labs:\n if str in lab:\n pl.figure(lab)\n mgr = pl.get_current_fig_manager()\n mgr.window.tkraise()", "def close_window(window):\r\n window.destroy()", "def close_image(image_title:str, ij: object) -> None:\n \n available_images = list(ij.WindowManager.getImageTitles())\n \n if image_title in available_images:\n \n logger.debug('Closing {}'.format(image_title))\n logger.debug('Current availalbe images:')\n logger.debug(available_images)\n \n # Attempt to close the image via macro command\n ij.py.run_macro(jpype.JString('close(\"{}\");'.format(image_title)))\n \n else:\n logger.debug('Cannot close {} not in available images'.format(image_title))", "def close_window(window):\n xkill('-id', window)", "def close_other_windows(self) -> None:\r\n while len(self.driver.window_handles) != 1:\r\n self.switch_to_window(1)\r\n self.close_window()", "def close_window(_):\n root.destroy()", "def test_close(self):\n wrp = self.dlg.find()\n\n # mock a failure in get_elem_interface() method only for 'Window' param\n orig_get_elem_interface = uia_defs.get_elem_interface\n with mock.patch.object(uia_defs, 'get_elem_interface') as mock_get_iface:\n def side_effect(elm_info, ptrn_name):\n if ptrn_name == \"Window\":\n raise uia_defs.NoPatternInterfaceError()\n else:\n return orig_get_elem_interface(elm_info, ptrn_name)\n mock_get_iface.side_effect=side_effect\n # also mock a failure in type_keys() method\n with mock.patch.object(UIAWrapper, 'type_keys') as mock_type_keys:\n exception_err = comtypes.COMError(-2147220991, 'An event was unable to invoke any of the subscribers', ())\n mock_type_keys.side_effect = exception_err\n self.assertRaises(WindowNotFoundError, self.dlg.close)\n\n self.dlg.close()\n self.assertEqual(self.dlg.exists(), False)", "def OnCloseWindow(self):\n pass", "def close(self):\n for fig in self.figures:\n plt.close(fig) # to avoid matplotlib warnings", "def _close_wid(self, wid):\n pass", "def close_window(self):\r\n Window.close()", "def namedWindow(winname, flags=...) -> None:\n ...", "def closeExtendedWindows(self):\n for window in self.uiWindows:\n if not self.uiWindows[window]['classObj']:\n continue\n\n self.uiWindows[window]['classObj'].close()\n\n waitDeleted = True\n while waitDeleted:\n for window in self.uiWindows:\n if self.uiWindows[window]['classObj']:\n continue\n\n waitDeleted = False\n QTest.qWait(100)\n return True", "def close_active_document():\n wrap_and_run('close saving yes')", "def close_bashes(self):\n with self.cluster.lock:\n for key in list(self.cluster.shells.keys()):\n if key.endswith(f\"-{self.name}\"):\n shell = self.cluster.shells.pop(key)\n shell.__exit__(None, None, None)", "def kill_all():\n cv.destroyAllWindows()", "def exitWin(entrance: Window):\n pass", "def close_defect_studies_window(self):\n try:\n self.defect_studies_window.destroy()\n self.defect_study_button.configure(state='normal')\n del self.defect_studies_window\n except AttributeError:\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper to make a new item with given session_id, item_id and extra data Sets the housekeeping fields (TTL, created_at, expires_on, etc).
def new_session_item(sid, item_id, **extra) -> SessionItem: return SessionItem( session_id=sid, item_id=item_id, created_at=datetime.now(), updated_at=datetime.now(), expires_on=int(ITEM_TTL + time.time()) if ITEM_TTL else 0, **extra, )
[ "def new_session() -> SessionItem:\n base_session = SessionItem.get(BASE_SESSION_HASH_KEY, META)\n sid = str(uuid.uuid4())\n\n s = new_session_item(sid, META, meta=MetaAttribute())\n s.save()\n # Create the empty placeholders for the collections\n new_session_item(sid, PLOGS, plogs=[]).save()\n new_session_item(sid, PEVENTS, pevents=[]).save()\n new_session_item(sid, STDOUT, stdout=[]).save()\n\n # Record the new session for cheap retrieval later\n SessionItem(\n session_id=BASE_SESSION_HASH_KEY,\n item_id=str(s.created_at), # for sorting by created_at\n created_at=datetime.now(),\n updated_at=datetime.now(),\n expires_on=int(ITEM_TTL + time.time()) if ITEM_TTL else 0,\n new_session_record=sid,\n ).save()\n\n return s", "def create_item(cls, item):\n\n connection = sqlite3.connect(\"data.db\")\n cursor = connection.cursor()\n\n cursor.execute(\"INSERT INTO items VALUES (?, ?)\", (item[\"name\"], item[\"price\"]))\n\n connection.commit()\n connection.close()", "def process_item(self, item, spider):\n\n listing = GenericListings(**item)\n\n self.session.add(listing)\n\n try:\n self.session.commit()\n except:\n self.session.rollback()\n raise\n\n\n return item", "def setup_item(self, item: BaseBasketItem, request: HttpRequest) -> None:", "def add_item(self, item):", "def create_item(self) -> pywikibot.ItemPage:\n data = {\n 'sitelinks': {\n self.site.dbName(): {\n 'site': self.site.dbName(),\n 'title': self.current_page.title()\n }\n },\n 'labels': {\n self.site.lang: {\n 'language': self.site.lang,\n 'value': self.current_page.title()\n }\n }\n }\n for site, page in self.iwlangs.items():\n if not page.exists():\n continue\n dbname = site.dbName()\n title = page.title()\n data['sitelinks'][dbname] = {'site': dbname, 'title': title}\n data['labels'][site.lang] = {'language': site.lang, 'value': title}\n summary = ('Bot: New item with sitelink(s) from '\n + self.current_page.title(as_link=True, insite=self.repo))\n\n item = pywikibot.ItemPage(self.repo)\n item.editEntity(data, new='item', summary=summary)\n info(f'Created item {item.getID()}')\n return item", "def create_item(self, itemid: UUID, price: Decimal):\n self.connection.execute(\"\"\"INSERT INTO stock_service.stock (itemid,price)\n VALUES (%s,%s)\n \"\"\", (itemid, price)\n )\n self.connection.execute(\"\"\"UPDATE stock_service.stock_counts \n SET quantity = quantity + 1\n WHERE itemid = %s\n \"\"\" % itemid\n )", "def create_item(store_id):\n\n form, error, msg, item = ItemForm(), '', '', Item(store_id=store_id)\n item_id = None\n\n if form.validate_on_submit():\n\n if Store.does_item_belong_to_store(store_id, form.url.data):\n\n item.item_name = form.item_name.data\n item.url = form.url.data\n item.description = form.description.data\n\n if form.image.data:\n item.item_image = _upload_image_securely(form)\n if item.save():\n msg = item_constant.SUCCESS\n item_id = item.item_id\n else:\n error = item_constant.DUPLICATE_ITEM\n else:\n error = item_constant.ITEM_INCORRECT_STORE\n\n return render_template('items/new_item.html', form=form,\n msg=msg, error=error, store_id=store_id,\n url_prefix=item.store.url_prefix,\n item_id=item_id\n )", "def add_item(self, item):\n\n logger.info(\"Storing item %r ...\", item)\n _id = self._store(item, insert=True)\n logger.info(\n \"HappiItem %r has been succesfully added to the database\",\n item\n )\n\n def save_item():\n self._store(item, insert=False)\n\n item.save = save_item\n return _id", "def _createagendaitem(order):\n # create new agendaitem\n newagendaitem = AgendaItem(\n interest_id=themeeting.interest_id,\n meeting_id=themeeting.id,\n order=order,\n )\n db.session.add(newagendaitem)\n order += 1\n db.session.flush()\n return newagendaitem, order", "def create_item():\n item = None\n\n # Repeat until the user enters valid types of an item.\n item_type = None\n while item_type not in [\"book\", \"dvd\", \"journal\"]:\n item_type = input(\"Item Type (Book, DVD, Journal): \").lower()\n\n # Common questions about an item.\n title = input(\"Title: \")\n call_number = input(\"Call Number: \")\n num_copies = int(input(\"Number of Copies: \"))\n\n # Ask additional questions based on the type of an item.\n if item_type == \"book\":\n author = input(\"Author: \")\n item = Book(title, call_number, num_copies, author)\n elif item_type == \"dvd\":\n release_date = input(\"Release Date: \")\n region_code = input(\"Region Code: \")\n item = DVD(title, call_number, num_copies, release_date, region_code)\n elif item_type == \"journal\":\n issue_number = input(\"Issue Number: \")\n publisher = input(\"Publisher: \")\n item = Journal(title, call_number, num_copies, issue_number, publisher)\n else:\n print(\"Wrong Type\")\n\n return item", "def create_item(wishlist_id):\n\n app.logger.info(\"Request to create an item in a wishlist\")\n check_content_type(\"application/json\")\n item = Item()\n item.deserialize(request.get_json())\n item.create()\n message = item.serialize()\n location_url = url_for(\"get_item\", wishlist_id=item.wishlist_id, item_id=item.id, _external=True)\n app.logger.info(f'Item with ID {item.id} created')\n\n return make_response(\n jsonify(message), status.HTTP_201_CREATED, {\"Location\": location_url}\n )", "def db_add_order_item(self, room: str, item: str, recipient: str) -> OrderItem:", "def create_item(self, item_cls, **kwargs):\n\n # string -> class, if in the registry\n if item_cls in containers.registry:\n item_cls = containers.registry[item_cls]\n\n # Check that this is a valid HappiItem\n if isinstance(item_cls, str):\n raise TypeError(\n f'The container class {item_cls!r} is not in the registry'\n )\n\n if not (inspect.isclass(item_cls) and issubclass(item_cls, HappiItem)):\n raise TypeError(f\"{item_cls!r} is not a subclass of HappiItem\")\n\n item = item_cls(**kwargs)\n\n def save_item():\n self.add_item(item)\n\n # Add the method to the item\n item.save = save_item\n return item", "def createItem(parent, **args):\n addCreator(args, parent)\n itemModel = loadModel('item')\n args['folder'] = parent\n return itemModel.createItem(**args)", "def create_items(instance, items):\n for item in items:\n # return item to respective stock\n stock_details = back_to_stock(item)\n query = Item.objects.filter(return_sale=instance, sku=item['sku'])\n if query.exists():\n print 'updating....'\n single = query.first()\n single.quantity = int(single.quantity) + int(item['qty'])\n single.total_cost = Decimal(single.total_cost) + Decimal(item['total_cost'])\n if single.quantity > 0:\n single.save()\n else:\n single = Item()\n single.sold_item = stock_details.get('sold_item')\n single.order_item = stock_details.get('order_item')\n single.return_sale = instance\n single.total_cost = item['total_cost']\n single.unit_cost = item['unit_cost']\n single.discount = item['discount']\n single.tax = item['tax']\n single.product_category = item['product_category']\n single.product_name = item['product_name']\n single.sku = item['sku']\n single.quantity = item['qty']\n if single.quantity > 0:\n single.save()\n\n # decrease stock\n # Stock.objects.decrease_stock(item['stock'], item['qty'])", "def make_new_item(self, data, summary=None):\n summary = summary or self.edit_summary\n\n identification = dict() # If empty this defaults to creating an entity\n result = self.repo.editEntity(identification, data, summary=summary)\n pywikibot.output(summary) # afterwards in case an error is raised\n\n # return the new item\n return self.q_to_itempage(result.get('entity').get('id'))", "def put_item(self, **kwargs):\n table = self.tables[kwargs['TableName']]\n table.put_item(kwargs['Item'])", "def add_item():\n if 'username' not in login_session:\n return redirect('/login')\n else:\n if request.method == 'POST':\n # build an item object\n item = db_helper.build_item(request.form['title'], request.form['description']\n , request.form['category_id']);\n \n # check if the item exists (the item must by unique by category)\n item_view_db = db_helper.get_item_by_title(item.title, item.category_id)\n \n if not item_view_db:\n db_helper.add_item(item)\n return redirect(url_for('catalog_page'))\n else:\n categories = db_helper.get_categories()\n return render_template('addItem.html', categories=categories,\n message = 'An item with the same name exists') \n else:\n categories = db_helper.get_categories()\n return render_template('addItem.html', categories=categories, message = '')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new session, returning the 'meta' item for it
def new_session() -> SessionItem: base_session = SessionItem.get(BASE_SESSION_HASH_KEY, META) sid = str(uuid.uuid4()) s = new_session_item(sid, META, meta=MetaAttribute()) s.save() # Create the empty placeholders for the collections new_session_item(sid, PLOGS, plogs=[]).save() new_session_item(sid, PEVENTS, pevents=[]).save() new_session_item(sid, STDOUT, stdout=[]).save() # Record the new session for cheap retrieval later SessionItem( session_id=BASE_SESSION_HASH_KEY, item_id=str(s.created_at), # for sorting by created_at created_at=datetime.now(), updated_at=datetime.now(), expires_on=int(ITEM_TTL + time.time()) if ITEM_TTL else 0, new_session_record=sid, ).save() return s
[ "def test_create_session(self):\n _meta = SessionMeta.new(app_secret=self.manager.secret)\n\n session1 = self.manager.get_session(meta=_meta, new=True)\n session1['foo'] = 'bar'\n session1.commit()\n\n # read back session\n session2 = self.manager.get_session(meta=_meta, new=False)\n self.assertEqual(session2['foo'], session1['foo'])", "def new_session_item(sid, item_id, **extra) -> SessionItem:\n return SessionItem(\n session_id=sid,\n item_id=item_id,\n created_at=datetime.now(),\n updated_at=datetime.now(),\n expires_on=int(ITEM_TTL + time.time()) if ITEM_TTL else 0,\n **extra,\n )", "def get_or_create_session (self):\n\n session_name = self.session_class.name_prefix + '_session'\n session = cherrypy.session.get (session_name, self.session_class ())\n cherrypy.session[session_name] = session\n return session", "def _create_session(self):\n response = self._request_obj(\n self._urls[\"create_session\"],\n method=\"POST\",\n json={\"request_token\": self.request_token}\n )\n self.session_id = response.session_id", "def _login(self):\n self._session = uuidutils.generate_uuid()\n session = DataObject()\n session.key = self._session\n session.userName = 'sessionUserName'\n _db_content['session'][self._session] = session\n return session", "def makeSession(self):\n uid = self._mkuid()\n s = SBSession(self, uid)\n s.expiryTimeout = self.cb.personalRegistryValue('sessionTimeout')\n session = self.sessions[uid] = s\n reactor.callLater(s.expiryTimeout, s.checkExpired)\n \n return session", "def create_session(self):\n\n self.session = self.opentok.create_session(\n media_mode=MediaModes.routed\n )\n return self.session.session_id", "def fresh_session():\n VirtualTN.query.delete()\n ProxySession.query.delete()\n new_tn = VirtualTN('1234567897')\n db_session.add(new_tn)\n db_session.commit()\n new_session = ProxySession(\n new_tn.value, '12223334444', '12223335555', expiry_window=1)\n new_tn.session_id = new_session.id\n db_session.add(new_tn)\n db_session.add(new_session)\n db_session.commit()\n return new_tn, new_session", "def testSessionCreate(self):\n success = False\n meta = None\n\n try:\n meta = self.session.create_metabolome()\n\n success = True\n except Exception:\n pass\n\n self.failUnless(success)\n self.failIf(meta is None)", "def new_session(api_url: Optional[str] = None) -> Session:\n sess = Session()\n sess.mount('http+api://', MetadataAPIAdapter(base_url=api_url))\n return sess", "def create_session(request):\n if not all(key in request.data for key in ['parts', 'hash']):\n raise APIException(\n detail=\"Missing parameters\",\n code=400\n )\n\n agent = get_object_or_404(Agent, name=request.data['agent'])\n if request.user not in agent.users.all():\n raise APIException(\n detail=\"You don't have permission to work on this agent\",\n code=403\n )\n\n # check we don't already have this file\n if DbFile.objects.filter(id=request.data['hash']).exists():\n #print(\"Already seen this file, skipping\")\n return Response(data=json.dumps(\n {\n \"sessionid\": None,\n \"code\": \"seenbefore\",\n \"reason\": \"Already seen file with this hash\"\n }\n ))\n\n session = UploadSession.objects.create(\n agent=agent,\n expectedparts=request.data['parts'],\n fullhash=request.data['hash'],\n timestamp=request.data['timestamp']\n )\n\n session.save()\n\n return Response(json.dumps({\"sessionid\": str(session.id)}))", "def getSessionData(create=True): # pragma: no cover", "def _new_session(self, session: aiohttp_session.Session, user: User) -> UserSession:\n if not self._user_has_session(user):\n # create new session with random id.\n new_id = f\"{random.randint(1000, 9999)}-{random.randint(1000, 9999)}\"\n self._sessions[new_id] = UserSession(user)\n session[SESSION_KEY] = new_id\n\n _LOGGER.debug(f\"New user session: {new_id}\")\n\n return new_id\n else:\n for k in self._sessions:\n if self._sessions[k].user == user and not SESSION_KEY in session:\n session[SESSION_KEY] = k", "def establish_a_session():\n new_session = requests.Session()\n\n jar = requests.cookies.RequestsCookieJar()\n jar.set('view_mature', 'true' if named_args.adult else 'false')\n jar.set('d_browse_bookshelf', '2') # grid-like view\n\n new_session.cookies = jar\n return new_session", "def start_session(self) -> Tuple[\"Session\", List[Dict[str, Any]]]:\n\n session_info = login(\n self.gateway.api_root,\n self.access_token,\n self.gateway.country,\n self.gateway.language,\n )\n session_id = session_info[\"jsessionId\"]\n return Session(self, session_id), get_list(session_info, \"item\")", "def create_session():\n try:\n json = request.json\n session = ConferenceSession(json['title'], json['description'], json['convener'], json['space_id'])\n\n db.session.add(session)\n db.session.commit()\n\n session = dict(id=session.id, title=session.title, description=session.description, convener=session.convener, space_id=session.space_id)\n return jsonify({'session': session})\n except Exception as err:\n raise InvalidUsage('Invalid request. Request json: {}. Error: {}'.format(json, err), status_code=400)", "def policy_sessions_create_one():\n client_dict = request.json[\"client\"]\n mc_id = client_dict[\"mc_id\"]\n http_status, msg = PolicySessionApi.session_create(client_dict)\n http_response = RestServerApis.respond(http_status, \"Client Creation\", msg)\n if http_status == HTTPStatus.CREATED:\n http_response.headers['location'] = request.url + mc_id + \"/\"\n return http_response", "def create_session(self):\n\n return scoped_session(self.session_factory)", "def create_session(username, password):\r\n user = User.objects.get_user_by_password(username, password)\r\n auth_session_engine = get_config('auth_session_engine')\r\n if not user:\r\n raise InvalidInput('Username or password incorrect')\r\n session_key = random_string(15)\r\n while auth_session_engine.get(session_key):\r\n session_key = random_string(15)\r\n auth_session_engine.set(session_key, user.username, get_config('auth_session_expire'))\r\n return {'session_key': session_key, 'user': user}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load all spCFrame files in a given directory. Return a dictionary of CFrame objects, keyed by cameraexpid string
def load_spCFrame_files(platedir): print "loading spCFrame files from " + platedir cframes = dict() for filename in glob(os.path.join(platedir, 'spCFrame-*.fits')): print ' ', os.path.basename(filename), asctime() expid = get_expid(filename) cframes[expid] = CFrame(filename) return cframes
[ "def _process_dir(self) -> dict:\n camera_person = {}\n for scene_cam in self.camera_bboxes: \n scene, camera = scene_cam.split(\"_\")\n folder_path = osp.join(self.root, scene, camera, \"img1\")\n \n scene_cam_data = []\n for frame_id, x, y, w, h in self.camera_bboxes[scene_cam]:\n # frame id format is 000001.jpg\n frame_id = str(frame_id).zfill(6) + \".jpg\"\n img_path = osp.join(folder_path, frame_id) \n bbox = (x, y, w, h) \n scene_cam_data.append((img_path, scene_cam, bbox))\n camera_person[scene_cam] = scene_cam_data\n\n return camera_person", "def load_directory(directory):\n faces = {}\n faces.update(FaceStore.load_face(os.path.join(directory, path))\n for path in os.listdir(directory) if path.endswith(FaceStore.EXTENSION))\n return faces", "def initiate_camera_paths(directory,use_video):\n global path_cameras, frame_names, path_videos\n if use_video == 0:\n directory = directory.replace(\"'\", \"\")\n # Get camera paths to read frame imgs\n aux = 0\n for f in listdir(directory):\n file_path = directory + '/' + f\n path_cameras.append(file_path)\n aux = f\n \n # Get frame names for later use - WE SUPOSE ALL CAMERAS HAVE THE SAME NUMBER OF FRAMES AND WITH THE SAME NAME\n directory = directory + '/' + aux\n for frame_name in listdir(directory):\n frame_names.append(frame_name.replace(\".png\", \"\"))\n \n else:\n for f in listdir(directory):\n file_path = directory + '/' + f\n path_videos.append(file_path)", "def test_all_captures(self):\n\n dir = os.path.join(os.path.dirname(__file__), \"../../../res/captures\")\n\n for c in os.listdir(dir):\n filename = \"../../../res/captures/{}\".format(c)\n try:\n img = cv2.imread(filename)\n except:\n continue\n\n if (img is None):\n continue\n\n playfield = capture.crop_to_playfield(img)", "def loadCards(self, fn):\n if os.path.isfile(fn):\n elapsed = time.time() - os.stat(fn).st_mtime\n else:\n elapsed = NRDB_SYNCH_INTERVAL\n if elapsed >= NRDB_SYNCH_INTERVAL:\n print \"Refreshing cards\"\n uo = urllib.URLopener()\n uo.retrieve(NRDB_ALL_CARDS,\n fn)\n with open(fn, 'r') as f:\n nrdbData = json.load(f)\n imageUrlTemplate = nrdbData['imageUrlTemplate']\n cards = nrdbData['data']\n for card in cards:\n card['title_norm'] = self.normalizeTitle(card['title'])\n card['image_url'] = card.get('image_url', \n imageUrlTemplate.replace('{code}', card['code']))\n cardDict = {card['title_norm']:card for card in cards}\n return (cards, cardDict)", "def frames_avail(dir_np):\n # All available files for blood density\n filenames = glob.glob(os.path.join(dir_np, 'rho_*.npy'))\n # Extract the frame numbers\n basenames = [os.path.basename(filename) for filename in filenames]\n frame_nums = [int(basename.split('.')[0][-7:]) for basename in basenames]\n return frame_nums", "def CAN_OPENER(directory):\n\tfilelist = np.array([])\n\tfor file in os.listdir(directory):\n\t\tif fnmatch.fnmatch(file, '*.hdf5'):\n\t\t\tfilelist = np.append(filelist, file)\n\tnfiles = int(len(filelist))\n\t\n\tif nfiles == 0:\n\t\tprint \"no files found, make sure they end with .hdf5 \\\n\t\tand are in\" + directory\n\n\tpfs = np.array([])\n\tall_data = np.array([])\n\tfor i in xrange(nfiles):\n\t\tpf = load(directory+filelist[i])\n\t\tdata = pf.h.all_data()\n\t\tpfs = np.append(pfs,pf)\n\t\tall_data = np.append(all_data,data)\n\treturn pfs, all_data", "def load_flowers_capt_dict(data_dir):\n flowers_dir = os.path.join(data_dir,constants.FLOWERS_CAP_DICT)\n flowers_capt_dict = pickle.load(open( flowers_dir, \"rb\" ))\n return flowers_capt_dict", "def _createCameras(self):\n # type: () -> None\n cameraJSONFiles = [\n os.path.join(self.SourceDirectoryPath, 'json', 'cameras', 'beachCam.json'),\n os.path.join(self.SourceDirectoryPath, 'json', 'cameras', 'birdseyeCam.json'),\n os.path.join(self.SourceDirectoryPath, 'json', 'cameras', 'dunesACam.json'),\n os.path.join(self.SourceDirectoryPath, 'json', 'cameras', 'grassCam.json'),\n os.path.join(self.SourceDirectoryPath, 'json', 'cameras', 'palmsCam.json'),\n os.path.join(self.SourceDirectoryPath, 'json', 'cameras', 'rootsCam.json'),\n os.path.join(self.SourceDirectoryPath, 'json', 'cameras', 'shotCam.json')\n ]\n\n # Create USD Stage containing only references to cameras, along with a\n # root \"/cameras\" Prim under which all other Prims will be attached:\n cameraStage = Usd.Stage.CreateInMemory(load=Usd.Stage.LoadNone)\n camerasRootPrim = cameraStage.DefinePrim('/cameras')\n cameraStage.SetDefaultPrim(camerasRootPrim.GetPrim())\n\n # Create Camera Prims:\n with tqdm(total=len(cameraJSONFiles), desc='Processing cameras', ncols=self.ProgressBarWidth) as progressBar:\n for cameraJSONFile in cameraJSONFiles:\n self._handleCameraFile(cameraJSONFile, cameraStage)\n progressBar.update()\n\n # Commit the changes and save the Camera Stage:\n cameraStagePath = self.getCameraStageFilePath()\n cameraStage.GetRootLayer().Export(cameraStagePath, comment='')", "def parse_frame_info_file(video_frames_info_path):\n video_frame_info = dict()\n with open(video_frames_info_path) as f:\n reader = csv.reader(f)\n next(reader, None) # Skip headers\n for row in reader:\n video_frame_info[row[0]] = (float(row[1]), int(row[2]))\n return video_frame_info", "def rasters_to_dict(dir):\n\n # Initialize empty dictionary\n\n rstr_dict = {}\n\n file_list = glob.glob(str(dir) + '*.tif')\n\n for f in file_list:\n\n rstr_dict[f] = {}\n\n src = rasterio.open(f)\n rstr_dict[f]['arr'] = src.read(1)\n rstr_dict[f]['profile'] = src.profile\n\n rstr_dict[f]['year'] = re.findall('(\\d{4})', f)\n return rstr_dict", "def generateframes(self):\n # Caching mechanism- don't generate if frames were already rendered\n if os.path.exists(self.framesPath):\n return\n print(f\"Generating frames for video {self.vid_id}\")\n if self.requestedFrames is not None:\n print(f\"Frames requested: {self.requestedFrames}\")\n self.mkdir(self.framesPath)\n count = 0\n while self.cap.isOpened():\n ret, frame = self.cap.read()\n # This cleanly exits if we can't grab another frame\n if not ret:\n break\n cond = count in self.requestedFrames if self.requestedFrames is not None else count % 30 == 1\n if cond:\n outpath = f\"{self.framesPath}/frame{count}.jpg\"\n cv2.imwrite(outpath, frame)\n print(f\"Frame {outpath} written to disk\")\n count += 1", "def extract_frames(self, v_path):\n vidcap = cv2.VideoCapture(v_path)\n succ = True\n v_frames = []\n while succ == True:\n succ, frame = vidcap.read()\n if succ == True:\n v_frames.append(frame)\n return v_frames\n\n # vidcap = cv2.VideoCapture(v_path)\n # if not vidcap.isOpened():\n # print(\"The error occurred when open video: \" + v_path)\n # return None\n #\n # v_frames = []\n # while vidcap.isOpened():\n # success, image = vidcap.read()\n # if success:\n # v_frames.append(image)\n # else:\n # break\n # return v_frames", "def extract_frames(video_path):\r\n ########################################################################################################\r\n # You can change the lines below to implement your own frame extracting method (and possibly other preprocessing),\r\n # or just use the provided codes.\r\n import cv2\r\n vid = cv2.VideoCapture(video_path)\r\n frames = []\r\n while True:\r\n success, frame = vid.read()\r\n if not success:\r\n break\r\n if frame is not None:\r\n frames.append(frame)\r\n # Here, we extract one frame only without other preprocessing\r\n if len(frames) >= 1:\r\n break\r\n vid.release()\r\n return frames\r\n ########################################################################################################\r", "def get_samples():\n samples: dict([str,list]) = dict()\n for genre in os.listdir(samples_directory):\n samples[genre] = []\n for file in os.listdir(f\"{samples_directory}/{genre}\"):\n if file.endswith(sound_file_format):\n samples[genre].append(f\"{samples_directory}/{genre}/{file}\") \n return samples", "def scan_path(directory):\n objname= str(base64.b64encode(directory.encode('utf-8')))\n preprocess='preprocess'\n\n if not os.path.isdir(preprocess):\n os.mkdir(preprocess)\n if os.path.isfile(preprocess+'/'+objname):\n picklefile=open(preprocess+'/'+objname,'rb')\n obj=pickle.load(picklefile)\n if time.ctime(os.path.getmtime(directory))==obj['lastmodified']:\n return obj['images']\n\n images=[]\n for (dirpath, dirnames, filenames) in os.walk(directory):\n for f in filenames:\n path=dirpath+'/'+f;\n image=get_face(path)\n if image is not None:\n encodings = face_recognition.face_encodings(image)\n if len(encodings) > 0:\n img = {\n 'image': image,\n 'encodings': encodings,\n 'name': f\n }\n images.append(img)\n\n obj={\n 'lastmodified':time.ctime(os.path.getmtime(directory)),\n 'images': images\n }\n file=open(preprocess+'/'+objname,'wb')\n pickle.dump(obj,file)\n\n return images", "def process_gpr_dir(data_dir):\n file_list = sorted(os.listdir(data_dir))\n control_cnt = dict()\n for file_name in file_list:\n (base, ext) = os.path.splitext(file_name)\n if (ext == '.gpr') or (ext == '.GPR'):\n logger.info('dir %s file %s base %s ext %s', data_dir, file_name, base, ext)\n input_file = os.path.join(data_dir, file_name)\n logger.info('input %s', input_file)\n process_gpr_file(input_file, control_cnt)\n \n # create a dataframe\n keys = sorted(control_cnt.keys())\n id = [ x[0] for x in keys ]\n name = [ x[1] for x in keys ]\n control = [ control_cnt[x]['control'] for x in keys ]\n exptl = [ control_cnt[x]['exptl'] for x in keys ]\n control_df = DataFrame(data= [ ('id', id), ('name', name), ('control', control), ('exptl', exptl) ] )\n return(control_df)", "def loadAll(saliencyDir):\n #os.chdir(imageDir)\n I = []\n S = []\n #for filename in glob.glob(\"*.jpg\"):\n # I.append(io.imread(filename))\n #os.chdir(saliencyDir)\n for filenmae in glob.glob(saliencyDir+\"*.jpg\"):\n S.append(io.imread(filename))\n return {'saliency':S}", "def get_dict_of_scenes(self, dir_path):\n label_dir_path = dir_path + '/label'\n label_image_files_path = sorted(glob.glob(label_dir_path + '/*.png'))\n label_image_files_name = {x[len(label_dir_path)+1:-4] for x in label_image_files_path}\n\n pcd_dir_path = dir_path + '/pcd'\n pcd_image_files_path = sorted(glob.glob(pcd_dir_path + '/*.pcd'))\n pcd_image_files_name = {x[len(pcd_dir_path)+1:-4] for x in pcd_image_files_path}\n\n dict_of_np_array_dict = {}\n for cur_file_name in label_image_files_name:\n if cur_file_name in pcd_image_files_name:\n np_array_dict = {}\n np_array_dict['label'] = imageio.imread(label_dir_path+'/'+cur_file_name+'.png')\n\n temp_pcd = o3d.io.read_point_cloud(pcd_dir_path+'/'+cur_file_name+'.pcd', remove_nan_points=False)\n np_array_dict['rgb'] = self.get_rgb_from_pcd(temp_pcd)\n np_array_dict['xyz'] = self.get_xyz_from_pcd(temp_pcd)\n np_array_dict['depth'] = np_array_dict['xyz'][:,:,2]\n\n dict_of_np_array_dict[dir_path+'/'+cur_file_name] = np_array_dict\n return dict_of_np_array_dict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that reprojects shp file crs to a given crs. Reprojected .shp files will be on the outshp \ directory. Reprojected .shp files will have the same name and all attributes from inshpdir.
def reproject(self, inshpdir, outshpdir, crs): self.inshpdir = inshpdir self.outshpdir = outshpdir self.crs = crs logging.info('%s %s', "Preparing to reproject files in :", self.inshpdir) # Getting all the path of .shp files path_of_shp_files= [] for filename in os.listdir(self.inshpdir): if filename.endswith(".shp"): path_of_shp_files.append(os.path.join(self.inshpdir +"/", filename)) logging.info('%s %s', "shp file found: ", filename) # Reading the input .shp files. for shpf in path_of_shp_files: output_file_name = (os.path.basename(shpf)) with fiona.open(shpf) as input_shp: meta = input_shp.meta schema = input_shp.schema # Writing the output .shp files logging.info('%s %s', "Writing reprojected files to :", self.outshpdir) with fiona.open(self.outshpdir + '/' + output_file_name, 'w', crs=self.crs, \ driver='ESRI Shapefile', schema=schema) as output_shp: with fiona.open(shpf) as input_shp: meta = input_shp.meta for f in input_shp: output_shp.write(f) logging.info('%s', "Reprojecting done.")
[ "def reproject(shapefile, crs):\n\treturn shapefile.to_crs(crs) if shapefile.crs != crs else shapefile", "def reproject_shapefile(source_dataset, source_layer, source_srs, target_srs):\n # make GeoTransformation\n coord_trans = osr.CoordinateTransformation(source_srs, target_srs)\n\n # make target shapefile\n tar_file_name = verify_shp_name(source_dataset.GetName(), shorten_to=4).split(\".shp\")[\n 0] + \"_epsg\" + target_srs.GetAuthorityCode(None) + \".shp\"\n tar_shp = create_shp(tar_file_name, layer_type=get_geom_simplified(source_layer))\n tar_lyr = tar_shp.GetLayer()\n\n # look up layer (features) definitions in input shapefile\n src_lyr_def = source_layer.GetLayerDefn()\n # copy field names of input layer attribute table to output layer\n for i in range(0, src_lyr_def.GetFieldCount()):\n tar_lyr.CreateField(src_lyr_def.GetFieldDefn(i))\n\n # instantiate feature definitions object for output layer (currently empty)\n tar_lyr_def = tar_lyr.GetLayerDefn()\n\n try:\n feature = source_layer.GetNextFeature()\n except AttributeError:\n logging.error(\"Invalid or empty vector dataset.\")\n return None\n while feature:\n # get the input geometry\n geometry = feature.GetGeometryRef()\n # re-project (transform) geometry to new system\n geometry.Transform(coord_trans)\n # create new output feature\n out_feature = ogr.Feature(tar_lyr_def)\n # assign in-geometry to output feature and copy field values\n out_feature.SetGeometry(geometry)\n for i in range(0, tar_lyr_def.GetFieldCount()):\n out_feature.SetField(tar_lyr_def.GetFieldDefn(i).GetNameRef(), feature.GetField(i))\n # add the feature to the shapefile\n tar_lyr.CreateFeature(out_feature)\n # prepare next iteration\n feature = source_layer.GetNextFeature()\n\n # add projection file\n make_prj(tar_file_name, int(source_srs.GetAuthorityCode(None)))", "def reproject(self, file):\n fname = os.path.basename(file)\n dst = os.path.join(self.tif_folder, \"proj_\" + fname)\n out = gdal.Warp(dst, file, dstSRS=PROJ)\n del out", "def reprojectTiff(self,inFname,outFname):\n if not os.path.exists(inFname):\n print \"reprojectTiff - input file %s does not exist - doing nothing.\" % inFname\n return\n if os.path.exists(outFname):\n print \"reprojectTiff - removing output file %s.\" % outFname\n # Remove the temporary reprojected geotiff.\n os.remove(outFname)\n\n print \" re-projecting SRTM data to map projection...\"\n os.system(\"gdalwarp -of GTiff -co \\\"TILED=YES\\\" -srcnodata 32767 -t_srs \\\"+proj=merc +ellps=sphere +R=6378137 +a=6378137 +units=m\\\" -rcs -order 3 -tr 30 30 -multi %s %s\" % (inFname,outFname))\n print \"reprojectTiff finished...\"", "def reproject_raster_file(source_path: str, destination_path: str, dest_crs: str, width: int, height: int):\n opts = gdal.WarpOptions(dstSRS=dest_crs, width=width, height=height)\n gdal.Warp(destNameOrDestDS=destination_path, srcDSOrSrcDSTab=source_path, options=opts)", "def _reprojectMap(self):\n # reprojection of raster\n if self.proj_srs != self.proj_location: # TODO: do it better\n grass.message(_(\"Reprojecting raster...\"))\n self.temp_warpmap = grass.tempfile()\n \n if int(os.getenv('GRASS_VERBOSE', '2')) <= 2:\n nuldev = file(os.devnull, 'w+')\n else:\n nuldev = None\n \n #\"+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs\"\n # RGB rasters - alpha layer is added for cropping edges of projected raster\n try:\n if self.temp_map_bands_num == 3:\n ps = grass.Popen(['gdalwarp',\n '-s_srs', '%s' % self.proj_srs,\n '-t_srs', '%s' % self.proj_location,\n '-r', self.params['method'], '-dstalpha',\n self.temp_map, self.temp_warpmap], stdout = nuldev)\n # RGBA rasters\n else:\n ps = grass.Popen(['gdalwarp',\n '-s_srs', '%s' % self.proj_srs,\n '-t_srs', '%s' % self.proj_location,\n '-r', self.params['method'],\n self.temp_map, self.temp_warpmap], stdout = nuldev)\n ps.wait()\n except OSError, e:\n grass.fatal('%s \\nThis can be caused by missing %s utility. ' % (e, 'gdalwarp'))\n \n if nuldev:\n nuldev.close()\n \n if ps.returncode != 0:\n grass.fatal(_('%s failed') % 'gdalwarp')\n grass.try_remove(self.temp_map)\n # raster projection is same as projection of location\n else:\n self.temp_warpmap = self.temp_map\n self.temp_files_to_cleanup.remove(self.temp_map)\n\n return self.temp_warpmap", "def project_shapefile(self, shapefile):", "def project_raster(input_raster: str, output_raster: str, crs: str):\n gdal.Warp(output_raster, input_raster, dstSRS=crs)", "def shp_to_json(base_path, shp_path, name):\n print \" -- Projecting shapefile to WGS-84 and converting to JSON\"\n\n # define ogr drivers\n shp_driver = ogr.GetDriverByName('ESRI Shapefile')\n json_driver = ogr.GetDriverByName('GeoJSON')\n\n # define the input layer\n shp = shp_driver.Open(shp_path)\n shp_lyr = shp.GetLayer()\n\n # create the output layer\n json_path = os.path.join(base_path, name + \".geojson\")\n if os.path.exists(json_path):\n json_driver.DeleteDataSource(json_path)\n json = json_driver.CreateDataSource(json_path)\n json_lyr = json.CreateLayer(json_path, geom_type=ogr.wkbMultiPolygon)\n json_lyr_defn = json_lyr.GetLayerDefn()\n\n # create the CoordinateTransformation\n json_ref = osr.SpatialReference()\n json_ref.ImportFromEPSG(4326)\n coord_trans = osr.CoordinateTransformation(\n shp_lyr.GetSpatialRef(), json_ref)\n\n # add fields to output layer\n shp_lyr_defn = shp_lyr.GetLayerDefn()\n for i in range(0, shp_lyr_defn.GetFieldCount()):\n field_defn = shp_lyr_defn.GetFieldDefn(i)\n json_lyr.CreateField(field_defn)\n\n # loop through the input features\n shp_feat = shp_lyr.GetNextFeature()\n while shp_feat:\n # reproject the input geometry\n geom = shp_feat.GetGeometryRef()\n geom.Transform(coord_trans)\n # create a new feature\n json_feat = ogr.Feature(json_lyr_defn)\n # set the feature's geometry and attributes\n json_feat.SetGeometry(geom)\n for i in range(0, json_lyr_defn.GetFieldCount()):\n json_feat.SetField(\n json_lyr_defn.GetFieldDefn(i).GetNameRef(),\n shp_feat.GetField(i))\n # add new feature to output Layer\n json_lyr.CreateFeature(json_feat)\n # destroy the features and get the next input feature\n json_feat.Destroy()\n shp_feat.Destroy()\n shp_feat = shp_lyr.GetNextFeature()\n\n # close the datasets\n shp.Destroy()\n json.Destroy()\n\n return json_path", "def to_crs(self, crs):\n for data in [self.junctions, self.tanks, self.reservoirs,\n self.pipes, self.pumps, self.valves]:\n if 'geometry' in data.columns:\n data = data.to_crs(crs, inplace=True)", "def crs_reproject_cmd(epsg):\n def processor(cm):\n if (cityjson.MODULE_PYPROJ_AVAILABLE == False):\n str = \"Reprojection skipped: Python module 'pyproj' missing (to reproject coordinates)\"\n print_cmd_alert(str)\n str = \"Install it: https://pypi.org/project/pyproj/\"\n print_cmd_warning(str)\n raise click.ClickException('Abort.')\n print_cmd_status('Reproject to EPSG:%d' % epsg)\n if (cm.get_epsg() == None):\n print_cmd_warning(\"WARNING: CityJSON has no EPSG defined, can't be reprojected.\")\n else:\n with warnings.catch_warnings(record=True) as w:\n cm.reproject(epsg)\n print_cmd_warning(w)\n return cm\n return processor", "def reproject(source_dataset, new_projection_dataset):\n\n # get source and target spatial reference systems\n srs_src = get_srs(source_dataset)\n srs_tar = get_srs(new_projection_dataset)\n\n # get dictionary of layer type and layer (or band=layer)\n layer_dict = get_layer(source_dataset)\n\n if layer_dict[\"type\"] == \"raster\":\n reproject_raster(source_dataset, srs_src, srs_tar)\n\n if layer_dict[\"type\"] == \"vector\":\n reproject_shapefile(source_dataset, layer_dict[\"layer\"], srs_src, srs_tar)", "def preprocess_shapefile(self):\n raw = self.load_external_shapefile()\n\n if not raw.crs:\n msg = f\"Department {self.name} has no projection defined\"\n raise InputError(msg)\n pre = raw.to_crs(util.crs.DEFAULT)\n\n self.save_preprocessed_shapefile(pre)", "def proj_srs_convert(srs):\n res = osr.SpatialReference()\n epsg = srs.to_epsg()\n\n if epsg:\n res.ImportFromEPSG(epsg)\n else:\n proj4 = srs.to_proj4()\n res.ImportFromProj4(proj4)\n \n res.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)\n\n return res", "def shp2geojson(sourceDir, outputDir, github=0):\n # make a list of shape files\n sourceDir = fixDirname(sourceDir)\n outputDir = fixDirname(outputDir)\n\n sourceList = dir_list.shpFileList(sourceDir)\n\n\n # run the through the list of shape files\n for shapeFile in sourceList:\n\n # reproject\n # INSERT a test for expected projection print a warning if not expected state plane.\n newName = \"%sproj_%s\"% (outputDir,shapeFile)\n print \"sourceDir: \", sourceDir\n print \"shapeFile: \", shapeFile\n oldName = \"%s%s\"% (sourceDir, shapeFile)\n print \"oldName: \", oldName\n reprojectString = \"ogr2ogr -t_srs EPSG:4326 %s %s\"% (newName, oldName)\n print reprojectString\n os.system(reprojectString) \n\n # convert to geoJSON\n fileNameList = shapeFile.split('.')\n jsonFileName = fileNameList[0]+\".geoJSON\"\n fulljsonFilePath = outputDir+jsonFileName\n print \"output geoJSON path: \" , fulljsonFilePath\n convertString = \"ogr2ogr -f geoJSON %s %s\"% (fulljsonFilePath, newName)\n os.system(convertString)\n if github:\n push_to_github(fulljsonFilePath, jsonFileName)", "def saveAsProj4(crss, filename=None):\r\n\r\n logger.warning(\"Export only ocentric, projected ocentric or ographic (with invserseFlattening=0) CRS \"\r\n \"while proj4 does not convert correctly ocentric latitude to ographic latitude\")\r\n\r\n hasValidationError = False\r\n if filename is None:\r\n filename = crss[0]['wkt'].getAuthorityName()\r\n\r\n if filename and filename is not sys.stdout:\r\n fileToOutput = open(filename, 'w')\r\n else:\r\n fileToOutput = filename\r\n\r\n try:\r\n fileToOutput.write(\"%s\\n\" % IAUCatalog.REFERENCES[crss[0]['wkt'].getAuthorityName()])\r\n for crs in crss:\r\n crsType = crs['type']\r\n wktObj = crs['wkt']\r\n # export all CRS having inverse_flattening=0 to avoid conversion error from ocentric latitude <-->\r\n # ographic latitude with proj4\r\n if IAUCatalog.isEqual(wktObj.getInverseFlattening(), 0):\r\n\r\n # WKT validation\r\n result, projString, wkt = WKT.isValid(wktObj.getWkt())\r\n\r\n if result:\r\n # WKT valid\r\n\r\n # Get the right authority\r\n if wktObj.getProjection() is None:\r\n projection = \"\"\r\n authorityCode = wktObj.getAuthorityCode()\r\n authorityName = wktObj.getAuthorityName()\r\n else:\r\n authorityCode = wktObj.getProjectionAuthorityCode()\r\n authorityName = wktObj.getProjectionAuthorityName()\r\n projection = \" - \"+wktObj.getProjection().value['projection']\r\n\r\n fileToOutput.write(\r\n \"#%s : %s WKT Codes for %s : %s %s\\n\" % (\r\n authorityCode, authorityName,\r\n crs['target'], crsType.value, projection\r\n )\r\n )\r\n fileToOutput.write(\"<%s> %s\\n\" % (authorityCode, projString))\r\n else:\r\n # WKT not valid, skip it\r\n hasValidationError = True\r\n fileToOutput.close()\r\n\r\n if hasValidationError:\r\n raise WKT.ValidationError()\r\n\r\n finally:\r\n if fileToOutput is not sys.stdout:\r\n fileToOutput.close()", "def Dissolve_ShapefileToShapefile(shapefileFolder, inFileName, outFileName):\n \n from osgeo import ogr\n import os\n \n # get layer from data source\n d_in = ogr.GetDriverByName('ESRI Shapefile')\n ds_in = d_in.Open(shapefileFolder + '\\\\' + inFileName + '.shp',0)\n l_in = ds_in.GetLayer()\n \n # check the geometry of the layer\n check_geom = l_in.GetGeomType()\n \n if check_geom == 1:\n # crate multi point geometry\n multi_geom = ogr.Geometry(ogr.wkbMultiPoint)\n set_geom = ogr.wkbMultiPoint\n if check_geom == 2:\n # create multi line string geometry\n multi_geom = ogr.Geometry(ogr.wkbMultiLineString)\n set_geom = ogr.wkbMultiLineString\n if check_geom == 3:\n # create a multi polygon geometry\n multi_geom = ogr.Geometry(ogr.wkbMultiPolygon)\n set_geom = ogr.wkbMultiPolygon\n \n # loop through each feature until there are no more\n for input_feat in l_in:\n # get geometry from feature\n g = input_feat.GetGeometryRef()\n \n # add geometry to multi geometry\n multi_geom.AddGeometry(g)\n \n # delete geometry\n del g\n \n l_in.ResetReading()\n \n \"\"\"\n # dissolve the multi geometry using union cascaded if not a point a layer\n if (check_geom == 2) or (check_geom == 3):\n new_geom = multi_geom.UnionCascaded()\n else:\n new_geom = multi_geom\n \"\"\"\n d_out = ogr.GetDriverByName('ESRI Shapefile')\n \n # remove output shape file if it already exists\n if os.path.exists(shapefileFolder + '\\\\' + outFileName + '.shp'):\n d_out.DeleteDataSource(shapefileFolder + '\\\\' + outFileName + '.shp')\n \n # open new shapefile\n ds_out = d_out.CreateDataSource(shapefileFolder + '\\\\' + outFileName + '.shp')\n l_out = ds_out.CreateLayer(outFileName, l_in.GetSpatialRef(), set_geom)\n \n # add field schema to out layer\n l_out.CreateFields(l_in.schema)\n \n defn = l_in.GetLayerDefn()\n \n # create a new feature\n newFeat = ogr.Feature(l_out.GetLayerDefn())\n # add geometry to the new feature\n newFeat.SetGeometry(multi_geom)\n # add field values to the new feature\n for i in range(0, defn.GetFieldCount()):\n field_value = l_in.GetFeature(0).GetField(i)\n field_name = defn.GetFieldDefn(i).GetNameRef()\n # if the field name is 'ID', set that value to blank\n if field_name == 'ID':\n field_value = \"\"\n if (field_name == 'SHAPE_Leng') or (field_name == 'Shape_Leng'):\n # set the calculated length from above to the field value\n # if geometry is point, set to blank\n if check_geom == 1:\n field_value = ''\n # if geom is line, calculate length\n if check_geom == 2:\n field_value = newFeat.GetGeometryRef().Length()\n # if geom is polygon, calculate the length of the boundary (perimeter)\n if check_geom == 3:\n field_value = newFeat.GetGeometryRef().Boundary().Length()\n if (field_name == 'SHAPE_Area') or (field_name == 'Shape_Area'):\n # if geometry is polygon, calculate the area\n if check_geom == 3:\n field_value = newFeat.GetGeometryRef().Area()\n else:\n # if not a polygon, set value to blank\n field_value = ''\n newFeat.SetField(i, field_value)\n # add new feature to the out layer\n l_out.CreateFeature(newFeat)\n \n # close data sources\n del ds_in\n del ds_out", "def _reproject(xy, crs1, crs2):\n return crs1.transform(crs2, *xy)", "def reproject_raster(source_dataset, source_srs, target_srs):\n # READ THE SOURCE GEO TRANSFORMATION (ORIGIN_X, PIXEL_WIDTH, 0, ORIGIN_Y, 0, PIXEL_HEIGHT)\n src_geo_transform = source_dataset.GetGeoTransform()\n\n # DERIVE PIXEL AND RASTER SIZE\n pixel_width = src_geo_transform[1]\n x_size = source_dataset.RasterXSize\n y_size = source_dataset.RasterYSize\n\n # ensure that TransformPoint (later) uses (x, y) instead of (y, x) with gdal version >= 3.0\n source_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)\n target_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)\n\n # get CoordinateTransformation\n coord_trans = osr.CoordinateTransformation(source_srs, target_srs)\n\n # get boundaries of reprojected (new) dataset\n (org_x, org_y, org_z) = coord_trans.TransformPoint(src_geo_transform[0], src_geo_transform[3])\n (max_x, min_y, new_z) = coord_trans.TransformPoint(src_geo_transform[0] + src_geo_transform[1] * x_size,\n src_geo_transform[3] + src_geo_transform[5] * y_size, )\n\n # INSTANTIATE NEW (REPROJECTED) IN-MEMORY DATASET AS A FUNCTION OF THE RASTER SIZE\n mem_driver = gdal.GetDriverByName('MEM')\n tar_dataset = mem_driver.Create(\"\",\n int((max_x - org_x) / pixel_width),\n int((org_y - min_y) / pixel_width),\n 1, gdal.GDT_Float32)\n # create new GeoTransformation\n new_geo_transformation = (org_x, pixel_width, src_geo_transform[2],\n org_y, src_geo_transform[4], -pixel_width)\n\n # assign the new GeoTransformation to the target dataset\n tar_dataset.SetGeoTransform(new_geo_transformation)\n tar_dataset.SetProjection(target_srs.ExportToWkt())\n\n # PROJECT THE SOURCE RASTER ONTO THE NEW REPROJECTED RASTER\n rep = gdal.ReprojectImage(source_dataset, tar_dataset,\n source_srs.ExportToWkt(), target_srs.ExportToWkt(),\n gdal.GRA_Bilinear)\n\n # SAVE REPROJECTED DATASET AS GEOTIFF\n src_file_name = source_dataset.GetFileList()[0]\n tar_file_name = src_file_name.split(\".tif\")[0] + \"_epsg\" + target_srs.GetAuthorityCode(None) + \".tif\"\n create_raster(tar_file_name, raster_array=tar_dataset.ReadAsArray(),\n epsg=int(target_srs.GetAuthorityCode(None)),\n geo_info=tar_dataset.GetGeoTransform())\n logging.info(\"Saved reprojected raster as %s\" % tar_file_name)", "def Project_and_resample_Raster_EPSG(input_raster, output_raster, dx, dy, epsgCode, resample='near'):\n #Project utm\n cmdString = \"gdalwarp -t_srs EPSG:\"+str(epsgCode)+\" -tr \"\\\n +str(dx)+\" \"+str(dy)+\" -r \"+resample+\" -overwrite \"+input_raster+\" \"+output_raster\n callSubprocess(cmdString, \"project and re-grid Raster\")\n #Delete temp file\n #os.remove(\"tempRaster.tif\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a bounding box of polygon. Takes a polygon shp file as an input and creates a polygon shp file of bounding boxes for each of the polygon they represent. Bounding boxes will have the attributes of their respective pylogons.
def getBbox(self, srcfile, outfile): self.srcfile = srcfile self.outfile = outfile with fiona.drivers(): logging.info("Reading file: " + self.srcfile) with fiona.open(self.srcfile) as src: self.meta = src.meta logging.info("Creating output file: " + self.outfile) with fiona.open(self.outfile, 'w', **self.meta) as dst: for f in src: logging.info("Creating bounds: " + str(fiona.bounds(f))) bbox = Polygon.from_bounds(fiona.bounds(f)[0], fiona.bounds(f)[1],fiona.bounds(f)[2],fiona.bounds(f)[3]) f['geometry'] = mapping(bbox) dst.write(f) logging.info("Done creating bounds for all features. Writing to the specified output file.")
[ "def make_polygon(\n class_name: str,\n point_path: List[Point],\n bounding_box: Optional[Dict] = None,\n subs: Optional[List[SubAnnotation]] = None,\n slot_names: Optional[List[str]] = None,\n) -> Annotation:\n return Annotation(\n AnnotationClass(class_name, \"polygon\"),\n _maybe_add_bounding_box_data({\"path\": point_path}, bounding_box),\n subs or [],\n slot_names=slot_names or [],\n )", "def convert_bnd_to_shape(the_box):\n barycenter = the_box.Center()\n x_dir = the_box.XDirection()\n y_dir = the_box.YDirection()\n z_dir = the_box.ZDirection()\n half_x = the_box.XHSize()\n half_y = the_box.YHSize()\n half_z = the_box.ZHSize()\n\n x_vec = gp_XYZ(x_dir.X(), x_dir.Y(), x_dir.Z())\n y_vec = gp_XYZ(y_dir.X(), y_dir.Y(), y_dir.Z())\n z_vec = gp_XYZ(z_dir.X(), z_dir.Y(), z_dir.Z())\n point = gp_Pnt(barycenter.X(), barycenter.Y(), barycenter.Z())\n axes = gp_Ax2(point, gp_Dir(z_dir), gp_Dir(x_dir))\n axes.SetLocation(\n gp_Pnt(point.XYZ() - x_vec * half_x - y_vec * half_y - z_vec * half_z)\n )\n box = BRepPrimAPI_MakeBox(axes, 2.0 * half_x, 2.0 * half_y, 2.0 * half_z).Shape()\n return box", "def create_point_shapefile(data, polygon, point_shapefile):\n print('Create a point shapefile with all the GLDAS grid cells')\n\n longitude_array = data['longitude_array']\n latitude_array = data['latitude_array']\n polygon_driver = polygon.driver\n point_driver = polygon_driver\n polygon_crs = polygon.crs\n point_crs = polygon_crs.copy()\n\n point_schema = {'geometry': 'Point', \\\n 'properties': {'lon_index': 'int:4', \\\n 'lat_index': 'int:4'}}\n with fiona.open(point_shapefile, 'w', driver=point_driver,\n crs=point_crs,\n schema=point_schema) as point:\n for data_longitude_index in range(len(longitude_array)):\n longitude = longitude_array[data_longitude_index]\n if longitude > 180:\n longitude -= 360\n for data_latitude_index in range(len(latitude_array)):\n latitude = latitude_array[data_latitude_index]\n point_prepared = {'lon_index': data_longitude_index, \\\n 'lat_index': data_latitude_index}\n point_geometry = shapely.geometry.mapping( \\\n shapely.geometry.Point((longitude, latitude)))\n point.write({'properties': point_prepared, \\\n 'geometry': point_geometry})\n\n print(' - Point shapefile created')", "def box(minx, miny, maxx, maxy) -> shapely.Polygon:\n return shapely.Polygon([\n (minx, miny),\n (maxx, miny),\n (maxx, maxy),\n (minx, maxy),\n ])", "def polygonize(input_file, output_file, proj):\n with buzz.Dataset(sr_work=proj, sr_fallback=\"WGS84\").close as ds:\n ds.open_raster(\"raster\", input_file)\n if os.path.isfile(output_file):\n os.remove(output_file)\n fields = [{\"name\": \"class\", \"type\": np.int32}]\n ds.create_vector(\n \"vector\", output_file, \"polygon\", driver=\"geojson\", fields=fields\n )\n fp = ds[\"raster\"].fp\n mask = ds[\"raster\"].get_data()\n for class_idx in np.unique(mask):\n if class_idx != 0:\n polygons = fp.find_polygons(mask == class_idx)\n if not polygons:\n continue\n for poly in polygons:\n ds[\"vector\"].insert_data(poly, {\"class\": class_idx})", "def polygon_to_bounding_box(polygon):\n ii64 = np.iinfo(type(BoundingBox().xmin))\n bbox = BoundingBox(xmin=ii64.max, ymin=ii64.max,\n xmax=ii64.min, ymax=ii64.min)\n for pt in polygon.points:\n bbox.xmin = min(bbox.xmin, pt.x)\n bbox.ymin = min(bbox.ymin, pt.y)\n bbox.xmax = max(bbox.xmax, pt.x)\n bbox.ymax = max(bbox.ymax, pt.y)\n return bbox", "def bbox(coordinates, crs, outname=None, format='ESRI Shapefile', overwrite=True):\n srs = crsConvert(crs, 'osr')\n\n ring = ogr.Geometry(ogr.wkbLinearRing)\n\n ring.AddPoint(coordinates['xmin'], coordinates['ymin'])\n ring.AddPoint(coordinates['xmin'], coordinates['ymax'])\n ring.AddPoint(coordinates['xmax'], coordinates['ymax'])\n ring.AddPoint(coordinates['xmax'], coordinates['ymin'])\n ring.CloseRings()\n\n geom = ogr.Geometry(ogr.wkbPolygon)\n geom.AddGeometry(ring)\n\n geom.FlattenTo2D()\n\n bbox = Vector(driver='Memory')\n bbox.addlayer('bbox', srs, ogr.wkbPolygon)\n bbox.addfield('id', width=4)\n bbox.addfeature(geom, 'id', 1)\n geom.Destroy()\n if outname is None:\n return bbox\n else:\n bbox.write(outname, format, overwrite)", "def demo_polygons_transforms_polygons_bounding_boxes(cls):\n\n image = np.copy(cls.image)\n meerkat_left = cls.meerkat_left\n meerkat_center = cls.meerkat_center\n meerkat_right = cls.meerkat_right\n\n # 1\n psoi = imgaug.PolygonsOnImage([meerkat_left, meerkat_center, meerkat_right], shape=image.shape)\n\n # Convert polygons to BBs and put them in BoundingBoxesOnImage instance\n # we will need that instance below to easily draw all augmented BBs on the image\n bbsoi = BoundingBoxesOnImage([polygon.to_bounding_box() for polygon in psoi.polygons], shape=psoi.shape)\n\n # augment image, BBs and polygons\n batch_aug = imgaug_augmenters.Affine(rotate=45)(images=[image], bounding_boxes=bbsoi,\n polygons=psoi, return_batch=True)\n\n images_aug = batch_aug.images_aug\n bbsoi_aug = batch_aug.bounding_boxes_aug\n psoi_aug = batch_aug.polygons_aug\n\n # visualize\n imgaug.imshow(psoi_aug.draw_on_image(bbsoi_aug.draw_on_image(images_aug[0], size=3),\n alpha_face=0.2, size_points=7))\n pass", "def computeBoundingShape(scene, shape='bellipsoid'):\n \n gr= fruti.pgl.Group([ sh.geometry for sh in scene ])\n tglset = pgl.fit( shape, gr )\n #hull = pgl.Shape( tglSet, __Green )\n return tglset", "def bounding_rect(polygon):\n xs = [q[0] for q in polygon]\n ys = [q[1] for q in polygon]\n return [[min(xs), min(ys)], [max(xs), max(ys)]]", "def create_grid(shpfile, outputgrid):\n\tbbox = get_bbox(shpfile)\n\tminx = bbox[0]\n\tminy = bbox[1]\n\tmaxx = bbox[2]\n\tmaxy = bbox[3]\n\tdivision = float(0.016000)\n\t# so if we have a bbox, we want to create a bbox every .016 we want to get the number of values \n\tdx = (abs(maxx - minx)/division)\n\tnx = int(math.ceil(abs(maxx - minx)/division))\n\tny = int(math.ceil(abs(maxy - miny)/division))\n\tw = shapefile.Writer(shapefile.POLYGON)\n\tw.autoBalance = 1\n\tw.field(\"ID\")\n\tid=0\n\tfor i in range(ny):\n\t\tfor j in range(nx):\n\t\t\tid+=1\n\t\t\tvertices = []\n\t\t\tparts = []\n\t\t\tvertices.append([min(minx+dx*j,maxx),max(maxy-dy*i,miny)])\n\t\t\tvertices.append([min(minx+dx*(j+1),maxx),max(maxy-dy*i,miny)])\n\t\t\tvertices.append([min(minx+dx*(j+1),maxx),max(maxy-dy*(i+1),miny)])\n\t\t\tvertices.append([min(minx+dx*j,maxx),max(maxy-dy*(i+1),miny)])\n\t\t\tparts.append(vertices)\n\t\t\tw.poly(parts)\n\t\t\tw.record(id,\"null\",\"null\")\n\tw.save(outputgrid)\n\treturn outputgrid", "def bounding_box(self):\n# first_point and last_point contain UTM coordinates from self.shapes that\n# correspond to top left and bottom right squares in the geographic grid\n first_point = self.shapes[0].points[0]\n last_point = self.shapes[len(self.shapes)-1].points[0]\n\n# The 0th element in each coord pair describes longitude\n west_UTM = first_point[0]\n east_UTM = last_point[0]\n\n# The 1th element in each coord pair describes latitude\n north_UTM = first_point[1]\n south_UTM = last_point[1]\n\n return [(west_UTM, east_UTM, self.west_lon, self.east_lon), (south_UTM, north_UTM, self.south_lat, self.north_lat)]", "def _create_polygon(klass, poly):\n polygon = Polygon.from_serializable(poly)\n polygon.absolutize()\n return polygon", "def make_bounding_box(\n class_name: str,\n x: float,\n y: float,\n w: float,\n h: float,\n subs: Optional[List[SubAnnotation]] = None,\n slot_names: Optional[List[str]] = None,\n) -> Annotation:\n return Annotation(\n AnnotationClass(class_name, \"bounding_box\"),\n {\"x\": round(x, 3), \"y\": round(y, 3), \"w\": round(w, 3), \"h\": round(h, 3)},\n subs or [],\n slot_names=slot_names or [],\n )", "def box(left: float, bottom: float, right: float, top: float, crs: MaybeCRS) -> Geometry:\n points = [(left, bottom), (left, top), (right, top), (right, bottom), (left, bottom)]\n return polygon(points, crs=crs)", "def bounding_box(self):\n points = np.array(self.polygon.exterior.coords)\n biggest_x = np.max(points, axis=0)[0] # biggest x among the points of this object\n biggest_y = np.max(points, axis=0)[1] # biggest x among the points of this object\n smallest_x = np.min(points, axis=0)[0] # smallest y seen among points of this object\n smallest_y = np.min(points, axis=0)[1] # smallest y seen among points of this object\n length = biggest_x - smallest_x\n width = biggest_y - smallest_y\n bounds = [(smallest_x, smallest_y),(smallest_x, biggest_y),(biggest_x, biggest_y),(biggest_x, smallest_y)]\n poly = Polygon(bounds)\n obj = PlacementObject(poly)\n obj.length = length\n obj.width = width\n return obj", "def _polygon_bbox(polygon: Polygon) -> dict:\n coords = polygon.bounds\n return {\"south\": coords[1], \"west\": coords[0], \"north\": coords[3], \"east\": coords[2], \"crs\": \"EPSG:4326\"}", "def createPolygonRectangle(shp_name, centerX=0.0, centerY=0.0, radiusX=1.0, radiusY=1.0, theta=0.0):\n short_shp_name = shp_name.split('.')\n # -- Create output file\n driver = ogr.GetDriverByName(\"ESRI Shapefile\")\n if os.path.exists(shp_name):\n os.remove(shp_name)\n try:\n output = driver.CreateDataSource(shp_name)\n except:\n print\n 'Could not create output datasource ', shp_name\n sys.exit(1)\n\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(2154)\n newLayer = output.CreateLayer(short_shp_name[0], geom_type=ogr.wkbPolygon, srs=srs)\n if newLayer is None:\n print\n \"Could not create output layer\"\n sys.exit(1)\n newLayer.CreateField(ogr.FieldDefn(\"FID\", ogr.OFTInteger))\n newLayerDef = newLayer.GetLayerDefn()\n\n # -- Create ring feature\n feature = ogr.Feature(newLayerDef)\n\n ring = ogr.Geometry(ogr.wkbLinearRing)\n A = [centerX - radiusX, centerY - radiusY]\n B = [centerX - radiusX, centerY + radiusY]\n C = [centerX + radiusX, centerY + radiusY]\n D = [centerX + radiusX, centerY - radiusY]\n if theta != 0:\n A = rotate(A, theta, [centerX, centerY])\n B = rotate(B, theta, [centerX, centerY])\n C = rotate(C, theta, [centerX, centerY])\n D = rotate(D, theta, [centerX, centerY])\n ring.AddPoint(A[0], A[1])\n ring.AddPoint(B[0], B[1])\n ring.AddPoint(C[0], C[1])\n ring.AddPoint(D[0], D[1])\n ring.CloseRings()\n\n poly = ogr.Geometry(ogr.wkbPolygon)\n poly.AddGeometry(ring)\n\n feature.SetGeometry(poly)\n feature.SetField(\"FID\", 1)\n ring.Destroy()\n poly.Destroy()\n newLayer.CreateFeature(feature)\n\n output.Destroy()", "def project_shapefile(self, shapefile):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a polygon shp file as an input and creates a point shapefile of centroids. Points are at the center of its polygon. This point layer will have the attributes from their respective pylogons.
def getCentroids(self, srcfile, outfile): self.srcfile = srcfile self.outfile = outfile with fiona.drivers(): logging.info("Reading file: " + self.srcfile) with fiona.open(self.srcfile) as src: self.meta = src.meta self.meta['schema']['geometry'] = 'Point' logging.info("Creating output file: " + self.outfile) with fiona.open(self.outfile, 'w', **self.meta) as dst: for f in src: centroid = shape(f['geometry']).centroid f['geometry'] = mapping(centroid) dst.write(f) logging.info("Done creating centroids for all features. Writing to the specified output file.")
[ "def create_point_shapefile(data, polygon, point_shapefile):\n print('Create a point shapefile with all the GLDAS grid cells')\n\n longitude_array = data['longitude_array']\n latitude_array = data['latitude_array']\n polygon_driver = polygon.driver\n point_driver = polygon_driver\n polygon_crs = polygon.crs\n point_crs = polygon_crs.copy()\n\n point_schema = {'geometry': 'Point', \\\n 'properties': {'lon_index': 'int:4', \\\n 'lat_index': 'int:4'}}\n with fiona.open(point_shapefile, 'w', driver=point_driver,\n crs=point_crs,\n schema=point_schema) as point:\n for data_longitude_index in range(len(longitude_array)):\n longitude = longitude_array[data_longitude_index]\n if longitude > 180:\n longitude -= 360\n for data_latitude_index in range(len(latitude_array)):\n latitude = latitude_array[data_latitude_index]\n point_prepared = {'lon_index': data_longitude_index, \\\n 'lat_index': data_latitude_index}\n point_geometry = shapely.geometry.mapping( \\\n shapely.geometry.Point((longitude, latitude)))\n point.write({'properties': point_prepared, \\\n 'geometry': point_geometry})\n\n print(' - Point shapefile created')", "def centroid(features, options=None):\n\n if not options:\n options = {}\n\n coords = get_coords_from_features(features)\n\n if get_input_dimensions(coords) == 1:\n coords = [coords]\n\n x_sum = 0\n y_sum = 0\n length = 0\n\n x_sum, y_sum, length = reduce(reduce_coords, coords, [x_sum, y_sum, length])\n\n return point([x_sum / length, y_sum / length], options.get(\"properties\", None))", "def calc_centroid(self, points):", "def project_shapefile(self, shapefile):", "def centroid(ring: List[List[List[float]]]) -> Point:\n r = ring[0]\n x_avg = sum([point[0] for point in r]) / len(r)\n y_avg = sum([point[1] for point in r]) / len(r)\n return Point(x_avg, y_avg)", "def FindPSFcentroid(PSF, xyCentroidSize=None):\n \n if xyCentroidSize is None:\n \n xyCentroidSize = PSF.shape[-1] / 4\n \n if IsEven(xyCentroidSize): #@\n \n xyCentroidSize -= 1\n \n if PSF.ndim == 2:\n \n (junk, junk, y0, x0) = U.findMax(PSF)\n\n percentage = xyCentroidSize**2. / np.product(PSF.shape[-2:]) * 100\n #U.DEBUG_HERE()\n #print \"#DEBUG:\", percentage\n #PSF.tofile(\"seb_PSF\")\n #import cPickle\n #cPickle.dump(PSF, file('seb_PSF', 'w'), 2)\n #levelAtRim = U.topPercentile(np.around(PSF).astype(np.int32), \n # percentage)\n #print \"#DEBUG: \", levelAtRim\n # workaround for broken Priithon's U.topPercentile\n levelAtRim = U__topPercentile(PSF, percentage)\n\n yxC = tuple(U.findMax(PSF)[-2:])\n \n if yxC == (0,0):\n \n return np.asarray((0,0))\n else:\n \n return CentroidOverSquare(array=PSF, center=yxC, \n size=xyCentroidSize, background_threshold=levelAtRim) #@\n \n\n else: ## then 3D psf\n \n zprofile = U.max2d(PSF) ## max val for each x-y plane\n\n (PSFmax, junk, junk, z0) = U.findMax(zprofile) \n zprofile_baseline = U__topPercentile(zprofile, 99) ## value of 99%\n FWHM = len(np.where(zprofile > (PSFmax/2))[0])/2 ## FWHM/2\n\n #fit = U.fitGaussian1D((z0, FWHM, PSFmax - zprofile_baseline), \n # zprofile - zprofile_baseline)\n fit = U.fitGaussian(zprofile - zprofile_baseline, (z0, FWHM, PSFmax - zprofile_baseline))\n zc = fit[0][0] ## PSF center final z\n\n ## sandwich above & below: find (2D) yx center in both planes\n za = int(zc) ## bottom\n zb = za + 1 ## top\n weight_b = zc - za\n weight_a = 1. - weight_b\n\n (junk, junk, y0, x0) = U.findMax(PSF[za])\n\n percentage = xyCentroidSize**2. / np.product(PSF.shape[-2:]) * 100\n levelAtRim = U__topPercentile(PSF[za:za+2],\n percentage)\n\n yxC = U.findMax(PSF[za])[-2:]\n \n if yxC == (0,0):\n \n return (0, yc, xc) ## assumes zmax is at zero\n else:\n\n (ya, xa) = CentroidOverSquare(array=PSF[za], center=yxC, \n size=xyCentroidSize, background_threshold=levelAtRim) #@\n yxC = U.findMax(PSF[zb])[-2:]\n (yb, xb) = CentroidOverSquare(array=PSF[zb], center=yxC, \n size=xyCentroidSize, background_threshold=levelAtRim) #@\n\n xc = weight_a*xa + weight_b*xb\n yc = weight_a*ya + weight_b*yb\n\n return (zc, yc, xc)", "def text_to_pts(textfile, outputshp):\n\t# TODO read from the textfile to get the headers\n\t# for now just use set values\n\t#0POI_ID,1SUPPLIER,LRO_BUILTUP,3PRIMARY_NAME,4LRO_DISPLAY_LONG,5LRO_DISPLAY_LAT,6POI_NAME,POSTAL_CODE,ISO_COUNTRY_CD\n\txList,yList,idList,nameList,supList=[],[],[],[],[]\n\t#read data from csv file and store in lists\n\twith open(textfile, 'r') as csvfile:\n\t\tr = csv.reader(csvfile, delimiter=',')\n\t\tctr = 0\n\t\tfor row in r:\n\t\t\tprint(row)\n\t\t\tif ctr > 0: #skip header\n\t\t\t\txList.append(float(row[4]))\n\t\t\t\tyList.append(float(row[5]))\n\t\t\t\tuidList.append(ctr)\n\t\t\t\tidList.append(row[0])\n\t\t\t\tnameList.append(row[6])\n\t\t\t\tsupList.append(row[1])\n\t\t\t\tctr += 1\n\t#Set up shapefile writer and create empty fields\n\tw = shp.Writer(shp.POINT)\n\tw.autoBalance = 1 #ensures gemoetry and attributes match\n\tw.field('X','F',10,8)\n\tw.field('Y','F',10,8)\n\tw.field('UID','C',10)\n\tw.field('PoiID','C',50)\n\tw.field('Name','C',100)\n\tw.field('Supplier','C',50)\n\t#loop through the data and write the shapefile\n\tfor j,k in enumerate(xList):\n\t\tw.point(k,yList[j]) #write the geometry\n\t\tprint(w.point(k,yList[j])\n\t\tw.record(k,yList[j],uidList[j], idList[j], nameList[j], supList[j]) #write the attributes\n\tw.save(outputshp)\n\treturn outputshp", "def __set_centroids(self, shape):\n # find areas for isolated shapes\n poly_areas = [(poly, sc.area_for_polygon(poly)) for poly in shape]\n\n # find shapes with max area\n max_area = max([area for poly, area in poly_areas])\n # determine which shape has max area\n max_poly = [poly for poly, area in poly_areas if area == max_area][0]\n # find centroid of that shape\n poly_centroid = sc.centroid_for_polygon(max_poly)\n\n return poly_centroid", "def centroid(vertices):\n return (vertices[0] + vertices[1] + vertices[2]) / 3", "def centroid(pos_1, pos_2, pos_3):\n x1, y1 = pos_1[0], pos_1[1]\n x2, y2 = pos_2[0], pos_2[1]\n x3, y3 = pos_3[0], pos_3[1]\n x = (x1 + x2 + x3) / 3\n y = (y1 + y2 + y3) / 3\n return (x, y)", "def centroid(points):\n return np.mean(points, axis=0)", "def compute_center(points: list) -> list:\n\n\tpolygon = numpy.array(points)\n\n\tlength = polygon.shape[0]\n\tsum_lon = numpy.sum(polygon[:, 0])\n\tsum_lat = numpy.sum(polygon[:, 1])\n\n\treturn [sum_lon / length, sum_lat / length]", "def computePolygonCentroid(pPolygon):\n return _almathinternal.computePolygonCentroid(pPolygon)", "def get_centroid(coords: List[Point]) -> Point:\n nvertices = len(coords)\n signed_area = 0.\n centroid = Point(0., 0.)\n\n for c in range(nvertices - 2):\n # X0, y0 = get_Xy(coords[c])\n # X1, y1 = get_Xy(coords[c + 1])\n X0, y0 = coords[c].Xy\n X1, y1 = coords[c + 1].Xy\n \n area = (X0 * y1) - (X1 * y0)\n signed_area += area\n centroid.X += (X0 + X1) * area\n centroid.y += (y0 + y1) * area\n\n\n # Process last few points\n X0, y0 = coords[nvertices - 2].Xy\n X1, y1 = coords[nvertices - 1].Xy\n\n area = (X0 * y1) - (X1 * y0)\n\n signed_area += area\n\n centroid.X += (X0 + X1) * area\n centroid.y += (y0 + y1) * area\n\n signed_area *= 0.5\n centroid.X /= (6. * signed_area)\n centroid.y /= (6. * signed_area)\n return centroid", "def LocatePSFcenter(PSF, xyCentroidSize=None):\n\n center = FindPSFcentroid(PSF, xyCentroidSize=None) #@\n pixel_center = tuple(np.around(center).astype(np.int32))\n\n return (center, pixel_center)", "def import_shape_file(self):\n\n shp_file_zip = os.path.abspath(os.path.join(self.shape_file.path))\n shp_file_name = self.unzip_file(shp_file_zip, settings.SHAPE_FILE_STORAGE_PATH+\"/files\") # unzip to the shapefile storage directory\n # the path to the actual .shp file wich should have been in the zip\n # file.\n if shp_file_name is not None:\n # Because Django automatically increments files instead of renameing\n # them, we should strip out _\\d+. this will turn file_8.zip into\n # file.zip which is probably the intended file name.\n cleaned_file_name = re.sub(r'_\\d+.zip', '.zip', self.shape_file.name)\n shp_file = os.path.abspath(os.path.join(settings.SHAPE_FILE_STORAGE_PATH, \"files\", shp_file_name))\n ds = DataSource(shp_file)\n layer = ds[0]\n\n # Clean up any old Features that are associate with this shapefile\n # & Create a new MapFeature Based on its geom_type\n\n if layer.geom_type == 'Point':\n PointMapFeature.objects.filter(source=self).delete()\n\n for feature in layer:\n geom = GEOSGeometry(feature.geom.wkt)\n map_feat = PointMapFeature(\n source = self,\n label = feature.get(self.label_column),\n geo_key = feature.get(self.geo_key_column),\n geom = geom\n )\n if self.geo_meta_key_column:\n map_feat.geo_meta = feature.get(self.geo_meta_key_column)\n map_feat.save()\n\n elif layer.geom_type == 'LineString':\n LineStringMapFeature.objects.filter(source=self).delete()\n\n for feature in layer:\n geom = GEOSGeometry(feature.geom.wkt)\n map_feat = LineStringMapFeature(\n source = self,\n label = feature.get(self.label_column),\n geo_key = feature.get(self.geo_key_column),\n geom = geom\n )\n if self.geo_meta_key_column:\n map_feat.geo_meta = feature.get(self.geo_meta_key_column)\n map_feat.save()\n\n elif layer.geom_type == 'Polygon':\n PolygonMapFeature.objects.filter(source=self).delete()\n for feature in layer:\n if feature.geom.geom_type == 'Polygon':\n geom = MultiPolygon(GEOSGeometry(feature.geom.wkt))\n if feature.geom.geom_type == 'MultiPolygon':\n geom = GEOSGeometry(feature.geom.wkt)\n\n map_feat = PolygonMapFeature(\n source = self,\n label = feature.get(self.label_column),\n geo_key = feature.get(self.geo_key_column),\n geom = geom\n )\n\n if self.geo_meta_key_column:\n map_feat.geo_meta = feature.get(self.geo_meta_key_column)\n map_feat.save()\n\n\n else:\n raise ValueError('Geometry Type: %s Is not supported. Only Point, LineString, Polygon are currently supported' % layer.geom_type)\n\n\n map_feat.save()", "def read_centroid_features(base_path, centroid_filename, features_filename):\n centroids = np.genfromtxt(os.path.join(base_path,centroid_filename), delimiter=',')\n features = np.genfromtxt(os.path.join(base_path,features_filename), delimiter=',')\n features = list(features.astype('int64'))\n return centroids, features", "def add_population_to_lsoa_centroid(path):\n pop_data = pd.read_csv(path)\n\n path = os.path.join(DATA_INTERMEDIATE, 'oa_centroids.shp')\n output_areas = gpd.read_file(path, crs='epsg:27700')\n\n output_areas = output_areas.merge(pop_data, left_on='LSOA11CD', right_on='code')\n\n path_out = os.path.join(DATA_INTERMEDIATE, 'oa_centroids.shp')\n output_areas.to_file(path_out, crs='epsg:27700')", "def asShape(self):\n shp = shapefile._Shape(shp_helper.shp_dict[self.geometryType.split('Geometry')[1].upper()])\n if self.geometryType != ESRI_POINT:\n shp.points = self.json[JSON_CODE[self.geometryType]]\n else:\n shp.points = [[self.json[X], self.json[Y]]]\n\n # check if multipart, will need to fix if it is\n if any(isinstance(i, list) for i in shp.points):\n coords = []\n part_indices = [0] + [len(i) for i in iter(shp.points)][:-1]\n## for i in shp.points:\n## coords.extend(i)\n## shp.points = coords\n shp.parts = shapefile._Array('i', part_indices)\n else:\n shp.parts = shapefile._Array('i', [0])\n\n if shp.shapeType not in (0,1,8,18,28,31):\n XMin = min(coords[0] for coords in shp.points)\n YMin = min(coords[1] for coords in shp.points)\n XMax = max(coords[0] for coords in shp.points)\n YMax = max(coords[1] for coords in shp.points)\n shp.bbox = shapefile._Array('d', [XMin, YMin, XMax, YMax])\n\n return shp" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Requests for the current weather data to openweather.com and generates a shapefile.
def getWeather(self, path_ids_file, ow_api, outputshp): logging.info("Reading file for city ids: " + path_ids_file) f = open(path_ids_file,"r") self.api_id = ow_api self.ids_txt = f.readline().strip() self.outputshp = outputshp logging.info("City ids found: " + str(f.readline().strip())) logging.info("Requesting using API KEY: " + self.api_id) logging.info('Request URL: '+'http://api.openweathermap.org/data/2.5/group?id={ids}&APPID={appid}&units=metric'.format(ids=self.ids_txt, appid=self.api_id)) self.r = requests.get('http://api.openweathermap.org/data/2.5/group?id={ids}&APPID={appid}&units=metric'.format(ids=self.ids_txt, appid=self.api_id)) logging.info("Recieved weather response.") wx_json = self.r.json() crs = from_epsg(4326) schema = { 'geometry': 'Point', 'properties': { 'city' :'str', 'humidity': 'int', 'pressure': 'int', 'temp': 'int', 'weather_de': 'str', 'wind_dir': 'float', 'wind_speed': 'float', } } logging.info("Creating output shapefile: " + self.outputshp) with fiona.open(self.outputshp, 'w', crs=crs, schema=schema, driver="ESRI Shapefile") as shpfile: for i in wx_json['list']: point = {u"type": u"Point", u"coordinates": [i['coord']['lon'], i['coord']['lat']]} properties = { 'city' : i['name'], 'humidity': i['main']['humidity'], 'pressure': i['main']['pressure'], 'temp': i['main']['temp'], 'weather_de': i['weather'][0]['main'], 'wind_dir': i['wind']['deg'], 'wind_speed': i['wind']['speed'], } shpfile.write({'geometry': point, 'properties': properties}) logging.info("Writing output shapefile: " + self.outputshp) logging.info("Closing file: " + path_ids_file) f.close()
[ "def download_city_next_hour_weather_data():\n start_time = datetime.now()\n end_time = (start_time + timedelta(hours=1))\n\n requests_params = {'id': CITY_CODE, 'appid': OPEN_WEATHER_API_KEY}\n\n response = requests.get(url=OPEN_WEATHER_API, params=requests_params)\n\n logger.info(msg='Response status ' + str(response.status_code))\n\n city_weather = json.loads(response.text)['main']\n\n is_data_valid = validate_weather_data(city_weather)\n if not is_data_valid:\n logger.error(\"Data is missing for this hour.\")", "def weather_api() -> Response:\n weather_base_url = \"http://api.openweathermap.org/data/2.5/weather?\"\n weather_api_key = keys[\"weather\"]\n city_name = location[\"my-city\"]\n weather_url = weather_base_url + \"appid=\" + weather_api_key + \"&q=\" + city_name + \"&units=metric\"\n return requests.get(weather_url)", "def get_weather_json(geocode):\n pass", "def generate(self):\n \n logging.info(\"Running weather data generation.\")\n \n # Running the private methods to simulated weather data.\n self.__generate_location()\n self.__merge_ref_data()\n self.__generate_timestamp()\n self.__merge_aggregate_data()\n self.__generate_weather_variables()\n self.__finalise_output()\n \n logging.info(\"Completed running weather data generation.\")", "def get_weather(self):\n self.yql_query = f'select * from weather.forecast where woeid={self.woeID}'\n self.yql_url = self.baseurl + urlencode({'q': self.yql_query}) + self.format\n result = requests.get(self.yql_url).text\n weather_data = json.loads(result)\n\n return weather_data", "def api_call():\n url = (\n \"http://api.openweathermap.org/data/2.5/weather?q=Perth,au&units=metric&appid=\"\n + API_KEY\n )\n resp = requests.get(url)\n if resp.status_code != 200:\n print(\"error somewhere\")\n weather = resp.json()\n pprint(weather)\n return weather", "def current_weather_from_geocode(geocode, units=\"metric\"):\n try:\n (deg, vel) = unit_markers(units)\n response = requests.get(f'{OW_API_BASE_URL}weather?appid={OW_API_KEY}&lon={geocode[1]}&lat={geocode[0]}&units={units}')\n resp = response.json()\n city = resp[\"name\"]\n conditions = resp[\"weather\"][0][\"description\"].title()\n weather_icon_url = f'{WEATHER_ICON_BASE_URL}{resp[\"weather\"][0][\"icon\"]}{WEATHER_ICON_SUFFIX}'\n windspeed = f'{round (resp[\"wind\"][\"speed\"], 1)} {vel}'\n if units == \"metric\":\n windspeed = f'{round((resp[\"wind\"][\"speed\"] * 3.6), 1)} {vel}'\n current_weather_details = {\n \"Temperature\": f'{round(resp[\"main\"][\"temp\"], 1)}{deg}',\n \"Feels Like\": f'{round(resp[\"main\"][\"feels_like\"], 1)}{deg}',\n \"High\": f'{round(resp[\"main\"][\"temp_max\"], 1)}{deg}',\n \"Low\": f'{round(resp[\"main\"][\"temp_min\"], 1)}{deg}',\n \"Relative Humidity\": f'{resp[\"main\"][\"humidity\"]}%',\n \"Wind Speed\": windspeed,\n \"Wind Direction\": f'{resp[\"wind\"][\"deg\"]}° {wind_direction_logical(resp[\"wind\"][\"deg\"])}'\n }\n return {\n 'city': city, \n 'conditions': conditions, \n 'weather_icon_url': weather_icon_url, \n 'current_weather_details': current_weather_details,\n 'units': units\n }\n except: \n return False", "def generate_wind():\n# Taken by converting UTM Zone 11 coordinates on\n# https://www.engineeringtoolbox.com/utm-latitude-longitude-d_1370.html\n# These values specific to files called yosemite_landscape_12-03-2019_0900_120m\n west_lon = -120.006255\n east_lon = -119.4736\n south_lat = 37.464649\n north_lat = 37.822073\n\n# Open .shp and .dbf files with rb\n myshp = open(\"SHAPEFILES/HOUR1/yosemite_landscape_12-03-2019_0900_120m.shp\", \"rb\")\n mydbf = open(\"SHAPEFILES/HOUR1/yosemite_landscape_12-03-2019_0900_120m.dbf\", \"rb\")\n wind = Wind(myshp, mydbf, west_lon, east_lon, south_lat, north_lat)\n\n# Regrid the base data onto a 30mx30m grid and bounded at the coordinates described\n# Our model focuses on the area between -120W to -119.5W, and 37.5N to 37.8N\n new_wind = wind.regrid(30, -120, -119.5, 37.5, 37.8)\n return new_wind", "def run(input_shapefile: \"Input Shapefile\" =\"counties/ctygeom.shp\"):\n # Ceate outline geojson structure\n geojson = {\"type\": \"FeatureCollection\", \"features\": [], \"crs\": {\"type\": \"EPSG\", \"properties\": {\"code\": None}}, \"bbox\": []}\n\n num_ticks = 60\n # input_shapefile = input(\"Enter the path (if necessary) and name fo the input shapefile: \")\n\n # print(\"{}\".format(\"=\" * num_ticks))\n # print(\"Getting information for '{}'\".format(input_shapefile))\n # print(\"{}\\n\".format(\"-\" * num_ticks))\n logging.info(\"Getting information for '{}'\".format(input_shapefile))\n\n try:\n with fiona.open(input_shapefile, \"r\") as fh:\n logging.info(\"Driver: \\t{}\".format(fh.driver))\n logging.info(\"Encoding:\\t{}\".format(fh.encoding))\n logging.info(\"Geometry:\\t{}\".format(fh.schema[\"geometry\"]))\n logging.info(\"CRS: \\t{}\".format(fh.crs[\"init\"].upper()))\n logging.info(\"Bounds: \\t{}\".format(fh.bounds))\n logging.info(\"Features \\t{}\".format(len(fh)))\n\n print(\"Attribute Types\")\n\n # Add crs and bbox properties to the geojson structure\n geojson[\"crs\"][\"properties\"][\"code\"] = int(fh.crs[\"init\"].split(\":\")[1])\n geojson[\"bbox\"] = fh.bounds\n\n header_string = \"\"\n csv_header = \"\"\n for k, v in fh.schema[\"properties\"].items():\n print(\"\\t{:10}\\t{}\".format(k, v))\n header_string += \"\\t{:>30}\".format(k)\n csv_header += \"{}\\t\".format(k)\n print(\"\\n\"+header_string)\n\n with open(input_shapefile.split(\".\")[0]+\".csv\", \"w\") as fh_csv:\n fh_csv.write(\"{}\\n\".format(csv_header[:-1]))\n for feature in fh:\n # add each feature to geojson structure, Fiona gives it to us in a suitable format so no further processing\n # required\n geojson[\"features\"].append(feature)\n\n data_string = \"\"\n csv_data = \"\"\n for k,v in feature[\"properties\"].items():\n data_string+= \"\\t{:>30}\".format(v)\n csv_data += \"{}\\t\".format(v)\n print(data_string)\n fh_csv.write(\"{}\\n\".format(csv_data[:-1]))\n\n # Create output geojson file and convert geojson python stucture to json\n with open(input_shapefile.split(\".\")[0]+\".json\", \"w\") as fh:\n fh.write(json.dumps(geojson))\n\n except Exception as e:\n print(e)\n quit()\n finally:\n print(\"{}\".format(\"=\" * num_ticks))", "def get_weather(location):\n log.debug(\"parsing weather from openweathermap\")\n config = get_config()\n\n if hasattr(location, \"lat\") and hasattr(location, \"lon\"):\n url_location = f\"lat={location.lat}&lon={location.lon}\"\n elif hasattr(location, \"zipcode\") and hasattr(location, \"country_code\"):\n url_location = f\"zip={location.zipcode},{location.country_code}\"\n else:\n url_location = f\"q={location.city}\"\n forecast_url = f\"http://api.openweathermap.org/data/2.5/forecast?{url_location}&APPID={api_key}&units={config.units}&lang={config.locale.language_code}\"\n try:\n response = requests.get(forecast_url)\n response = response.json()\n\n if str(response[\"cod\"]) == \"400\":\n raise WeatherError(ErrorCode.LOCATION_ERROR, response[\"message\"])\n elif str(response[\"cod\"]) == \"401\":\n raise WeatherError(ErrorCode.API_ERROR)\n elif str(response[\"cod\"]) == \"429\":\n raise WeatherError(ErrorCode.API_TIMEOUT_ERROR)\n elif str(response[\"cod\"]) == \"404\":\n raise WeatherError(ErrorCode.LOCATION_ERROR)\n\n # Parse the output of Open Weather Map's forecast endpoint\n if not (hasattr(location, \"lat\") and hasattr(location, \"lon\")):\n location.set_lat_and_lon(response[\"city\"][\"coord\"][\"lat\"], response[\"city\"][\"coord\"][\"lon\"])\n\n forecasts = {}\n for x in response[\"list\"]:\n if str(datetime.date.fromtimestamp(x[\"dt\"])) not in forecasts:\n forecasts[str(datetime.date.fromtimestamp(x[\"dt\"]))] = \\\n list(filter(lambda forecast: datetime.date.fromtimestamp(forecast[\"dt\"]) == datetime.date.fromtimestamp(x[\"dt\"]), response[\"list\"]))\n\n weather = Weather()\n for key, forecast in forecasts.items():\n condition_list = []\n weather_condition = [x[\"weather\"][0][\"main\"] for x in forecast]\n weather_description = [x[\"weather\"][0][\"description\"] for x in forecast]\n weather_id = [x[\"weather\"][0][\"id\"] for x in forecast]\n for x in range(len(weather_condition)):\n temp_condition = WeatherCondition(__get_severity_from_open_weather_map_id(weather_id[x]), weather_description[x], __get_condition_type(weather_id[x]))\n condition_list.append(temp_condition)\n\n __parse_weather(\n weather,\n datetime.datetime.strptime(key, \"%Y-%m-%d\").date(),\n location,\n 3,\n [datetime.datetime.strptime(x, \"%H:%M:%S\").time() for x in [x[\"dt_txt\"].split(\" \")[1] for x in forecast]],\n [x[\"main\"][\"temp\"] for x in forecast],\n condition_list,\n [x[\"main\"][\"pressure\"] for x in forecast],\n [x[\"main\"][\"humidity\"] for x in forecast],\n [x[\"wind\"][\"speed\"] for x in forecast],\n [x[\"wind\"][\"deg\"] for x in forecast]\n )\n except (requests.exceptions.ConnectionError, ValueError):\n raise WeatherError(ErrorCode.NO_NETWORK_ERROR, \"Weather could not be fetched.\")\n return weather", "def get_county_map():\n print(\"Retrieving Oregon County Map...\")\n with urlopen('https://raw.githubusercontent.com/emilysellinger/Phoenix/main/phoenix/data/Oregon_counties_map.geojson') as response:# noqa\n counties1 = json.load(response)\n return counties1", "def get_weather(city_name, weather_api):\n\n response = requests.get(\n \"https://community-open-weather-map.p.rapidapi.com/weather?mode=json&q={}\".format(city_name),\n headers={\n \"X-RapidAPI-Host\": \"community-open-weather-map.p.rapidapi.com\",\n \"X-RapidAPI-Key\": weather_api\n })\n\n content = json.loads(response.content.decode('utf8').replace(\"'\", '\"'))\n\n res_dict = dict()\n res_dict['humidity'] = content['main']['humidity']\n res_dict['temp'] = round((content['main']['temp'] - 273.15), 1)\n res_dict['temp_max'] = round((content['main']['temp_max'] - 273.15), 1)\n res_dict['temp_min'] = round((content['main']['temp_min'] - 273.15), 1)\n res_dict['weather'] = content['weather'][0]['main']\n\n return res_dict", "def weather_object_get(api_city):\n \n weather_param = []\n weather_param = openweathermap_get(api_city)\n weather_object = weather_class.Weather(\n weather_param[0], weather_param[1], weather_param[2], weather_param[3])\n return weather_object", "def openweathermap_get(api_city):\n\n url = f'http://api.openweathermap.org/data/2.5/weather?q={api_city[1]}&appid={api_city[0]}'\n response = requests.get(url)\n data = json.loads(response.text)\n\n temperature = data['main']['temp']\n current_weather = data['weather'][0]['main']\n description = data['weather'][0]['description']\n weather_param = []\n weather_param = [api_city[1], current_weather, description, temperature]\n return weather_param", "def get_shapefile(self, date, ignore_cache=False):\n\t\turl = date.strftime('http://droughtmonitor.unl.edu/data/shapefiles_m/USDM_%Y%m%d_M.zip')\n\t\tfilename = os.path.basename(url)\n\t\tpath = os.path.join(self.cache, 'zip', filename)\n\t\tdirname = os.path.dirname(path)\n\t\t\n\t\tif os.path.exists(path) and not ignore_cache:\n\t\t\treturn path\n\n\t\t# go fetch the shapefile, and throw an error if something broke\n\t\tr = requests.get(url)\n\t\tr.raise_for_status()\n\n\t\t# mkdirs as needed\n\t\tif not os.path.exists(dirname):\n\t\t\tos.makedirs(dirname)\n\n\t\t# if we get here we have a file\n\t\twith open(path, 'wb') as f:\n\t\t\tf.write(r.content)\n\n\t\treturn path", "def fetch_forecast(location):\n\n api_key = CredentialsConfig.OPENWEATHER_API_KEY\n\n lat, lon = list(location.values())[0]\n url = (f\"https://api.openweathermap.org/data/2.5/onecall?\"\n f\"lat={lat}&lon={lon}\"\n f\"&exclude=current\"\n f\"&units=metric\"\n f\"&appid={api_key}\")\n\n response = requests.get(url)\n full_response_dict = response.json()\n timezone_offset = full_response_dict[\"timezone_offset\"]\n hourly_forecast = full_response_dict[\"hourly\"]\n daily_forecast = full_response_dict[\"daily\"]\n\n return hourly_forecast, daily_forecast, timezone_offset", "def load_demo_data():\n\n URL = \"https://samples.openweathermap.org/data/2.5/weather?q=London,uk&appid=b6907d289e10d714a6e88b30761fae22\"\n\n json_string = request.urlopen(URL).read()\n\n data = json.loads(json_string)\n return data", "def weather(city_or_zip, weather_url):\n if city_or_zip.isdigit() is True:\n param = {'zip': city_or_zip, 'APPID': '481c487c9b2b7634971be29cdd8a8516'}\n else:\n param = {'q': city_or_zip, 'APPID': '481c487c9b2b7634971be29cdd8a8516'}\n answer = requests.get(weather_url, params=param, timeout=(5, 14))\n weather_site(answer, city_or_zip)\n if answer.status_code == 200:\n print('connection made')\n parsed = json.loads(answer.text)\n display_weather(parsed)", "def request_weather(url):\n response = requests.get(url)\n response_dict = response.json()\n return response_dict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to snap lines to points.
def snapLineToPoints(self, pointshp, lineshp, outshpdir): pass
[ "def snap_line(line, points, tolerance=1e-9): \n if shapely.get_type_id(line.geometry) == 0:\n if shapely.distance(point,line) < tolerance:\n line = shapely.snap(line, points, tolerance=1e-9)\n elif shapely.get_type_id(line.geometry) == 4:\n points = [point for point in points if shapely.distance(point,line) < tolerance]\n for point in points:\n line = shapely.snap(line, points, tolerance=1e-9)\n return line", "def snap_to_line(self, polyline_geometry):\n if not isinstance(self, Point):\n raise Exception('Snap to line can only be performed on a Point geometry object.')\n if polyline_geometry.type.lower() != 'polyline':\n raise Exception('Snapping target must be a single ArcGIS Polyline geometry object.')\n if self.spatial_reference is None:\n raise Warning('The spatial reference for the point to be snapped to a line is not defined.')\n if polyline_geometry.spatial_reference is None:\n raise Warning('The spatial reference of the line being snapped to is not defined.')\n if (self.spatial_reference != polyline_geometry.spatial_reference and\n self.spatial_reference.wkid != polyline_geometry.spatial_reference.wkid and\n self.spatial_reference.latestWkid != polyline_geometry.spatial_reference.wkid and\n self.spatial_reference.wkid != polyline_geometry.spatial_reference.latestWkid and\n self.spatial_reference.latestWkid != polyline_geometry.spatial_reference.latestWkid):\n raise Exception('The spatial reference for the point and the line are not the same.')\n\n if HASARCPY:\n polyline_geometry = polyline_geometry.as_arcpy\n return Point(self.as_arcpy.snapToLine(in_point=polyline_geometry))\n\n elif HASSHAPELY:\n polyline_geometry = polyline_geometry.as_shapely\n point_geometry = self.as_shapely\n snap_point = polyline_geometry.interpolate(polyline_geometry.project(point_geometry))\n snap_point = Point({'x': snap_point.x, 'y': snap_point.y, 'spatialReference': self.spatial_reference})\n return snap_point\n\n else:\n raise Exception('Either arcpy or Shapely is required to perform snap_to_line')", "def snap_to_line(self, second_geometry):\r\n return self._call_method(name='snap_to_line',\r\n is_ga=True,\r\n **{'second_geometry' : second_geometry})", "def _gather_points(self):\n # This is just a stub for now. We should really find the lines only\n # inside the screen range here.\n\n x = self.index.get_data()\n y = self.value.get_data()\n rad = min(self.width / 2.0, self.height / 2.0)\n sx = x * rad + self.x + self.width / 2.0\n sy = y * rad + self.y + self.height / 2.0\n\n points = transpose(array((sx, sy)))\n self._cached_data_pts = points\n self._cache_valid = True\n return", "def _extend_line(coords, target, tolerance, snap=True):\n if snap:\n extrapolation = _get_extrapolated_line(\n coords[-4:] if len(coords.shape) == 1 else coords[-2:].flatten(),\n tolerance,\n )\n int_idx = target.sindex.query(extrapolation, predicate=\"intersects\")\n intersection = shapely.intersection(\n target.iloc[int_idx].geometry.array, extrapolation\n )\n if intersection.size > 0:\n if len(intersection) > 1:\n distances = {}\n ix = 0\n for p in intersection:\n distance = shapely.distance(p, shapely.points(coords[-1]))\n distances[ix] = distance\n ix = ix + 1\n minimal = min(distances.items(), key=operator.itemgetter(1))[0]\n new_point_coords = shapely.get_coordinates(intersection[minimal])\n\n else:\n new_point_coords = shapely.get_coordinates(intersection[0])\n coo = np.append(coords, new_point_coords)\n new = np.reshape(coo, (len(coo) // 2, 2))\n\n return new\n return coords\n\n extrapolation = _get_extrapolated_line(\n coords[-4:] if len(coords.shape) == 1 else coords[-2:].flatten(),\n tolerance,\n point=True,\n )\n return np.vstack([coords, extrapolation])", "def init_draw_points(self):\n self.draw_points = np.vstack((self.points, self.points[0]))\n self.draw_points = self.draw_points.T\n\n # thick line for alignment purposes\n self.thick_line = self.draw_points[:, :2]\n for i in range(1, len(self.draw_points[0]) - 1):\n if math.dist(self.draw_points[:, i].flatten(), self.draw_points[:, i + 1].flatten()) > \\\n math.dist(self.thick_line[:, 0].flatten(), self.thick_line[:, 1].flatten()):\n self.thick_line = self.draw_points[:, i:i + 2]", "def snapToNearestGridPoint(self):\n self.x = round(self.x/50)*50\n self.y = round(self.y/50)*50", "def snap(self):\n vertex = self.vertices # Gets all the vertices of the quadraterial\n lst = [] # the list that is going to hold \n for i in range(0,4):\n x = vertex[i].x - int(vertex[i].x) # deceimal value of x coordinate\n y = vertex[i].y - int(vertex[i].y) # Gets the decemial value of x\n if(x >= 0.5 or ( x < 0 and x >= -0.5)):\n x = float(math.ceil(vertex[i].x)) \n else:\n x = float(math.floor(vertex[i].x)) \n if(y >= 0.5 or ( y < 0 and y >= -0.5)):\n y = float(math.ceil(vertex[i].y))\n else:\n y = float(math.floor(vertex[i].y))\n lst.append(x) #adds the elements into the list\n lst.append(y)\n for i in range(0,8,2):\n if(lst[i%8] == lst[(i+2)%8] and lst[(i+1)%8] == lst[(i+3)%8] or lst[i%8] == lst[(i+4)%8] and lst[(i+1)%8] == lst[(i+5)%8]):\n return self # Checks if there are any points that are missing after being snapped \n return Quadrilateral(*lst) # Creates the new quadlaterial with the list\n # TODO", "def draw_polyline(self, points):\n start = points[0]\n\n self.go_to_point(start[0],start[1])\n self.start()\n\n for point in points[1:]:\n self.draw_to_point(point[0],point[1])\n self.last = point\n\n self.draw_to_point(start[0], start[1])\n self.stop()", "def line(self, points, **kwargs):\r\n if len(points) == 1:\r\n pts = [self.curXY, points[0]]\r\n else:\r\n pts = points\r\n pts = self.points_to_image(pts)\r\n self.draw.line(pts, **kwargs)", "def projectPointToLine(self, *args):\n return _coin.SbDPViewVolume_projectPointToLine(self, *args)", "def _walk_line(p0, p1):\n # unpack the point tuples\n x0, y0 = p0\n x1, y1 = p1\n\n dx, dy = x1 - x0, y1 - y0\n yi = 1\n if dy < 0:\n yi = -1\n dy = -dy\n\n D = 2 * dy - dx\n x = np.arange(x0, x1 + 1, dtype=int).T\n y = np.zeros((len(x),), dtype=int)\n\n yy = y0\n for i in np.arange(len(x)):\n y[i] = yy\n if D > 0:\n yy = yy + yi\n D = D - 2 * dx\n\n D = D + 2 * dy\n\n # sort by major axis, and index the cells\n xI = np.argsort(x)\n x = x[xI]\n y = y[xI]\n\n return x, y", "def iterative_end_point_fit(self, list_of_points_for_lines, breakpoints, start_of_region, end_of_region):\n minimum_distance_to_be_a_corner = 0.06 # meter, the value set is a guess and may need adjusting\n N_min = 3 # this probably should be turned into a variable part of self\n if (end_of_region - start_of_region + 1) <= N_min:\n return None\n max_distance = 0\n farthest_point = -1\n # number_of_potential_corners = 0 # an attempt to ignore single points that disrupt clearly straight lines\n for potential_corner in range(start_of_region + 1, end_of_region):\n distance_to_line = self.distance_line_to_point(breakpoints[start_of_region][0], breakpoints[end_of_region][0], breakpoints[potential_corner][0])\n if distance_to_line > minimum_distance_to_be_a_corner:\n # number_of_potential_corners += 1\n if distance_to_line > max_distance:\n max_distance = distance_to_line\n farthest_point = potential_corner\n\n if farthest_point == -1: # or number_of_potential_corners < 2:\n list_of_points_for_lines.append(self.create_wall(breakpoints[start_of_region], breakpoints[end_of_region]))\n else:\n self.iterative_end_point_fit(list_of_points_for_lines, breakpoints, start_of_region, farthest_point)\n self.iterative_end_point_fit(list_of_points_for_lines, breakpoints, farthest_point, end_of_region)", "def _snap_to_nearest(self):\n assert self.steps is not None\n pos = float(int(self._knob_pos() * self.steps + 0.5))/self.steps\n\n self.set_knob_pos(pos)", "def toggle_snap_to_geometry():\r\n pass", "def _to_line_df(self):\n line_df = self.df.copy()\n line_df[\"prev_pt\"] = line_df.geometry.shift()\n line_df[\"t\"] = self.df.index\n line_df[\"prev_t\"] = line_df[\"t\"].shift()\n line_df[\"line\"] = line_df.apply(self._connect_prev_pt_and_geometry, axis=1)\n return line_df.set_geometry(\"line\")[1:]", "def nearest_point_on_line(point, line): \n return line.interpolate(line.project(point))", "def get_points(self):\n if self._is_horizontal():\n return [Point(self.from_point.x, y) for y in range(self.from_point.y, self.to_point.y + 1)]\n elif self._is_vertical():\n return [Point(x, self.from_point.y) for x in range(self.from_point.x, self.to_point.x + 1)]\n else:\n raise NotImplementedError(\"Only horizontal and vertical lines are implemented so far\")", "def nearest_point_on_edges(point, edges): \n edge = nearest_edge(point, edges)\n snap = nearest_point_on_line(point, edge.geometry)\n return snap" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the schema of a shapefile. PARAMETER(S)
def getSchema(path): path = path with fiona.open(path) as shpfile: schema = shpfile.schema.copy() return schema
[ "def Shapefile(**keywords):\n keywords['type'] = 'shape'\n return CreateDatasource(keywords)", "def schema(self):\n return self.get(\"/schema\").json()", "def view_schema(self):\n pipeline = self._get_one_pipeline()\n uri = pipeline.get_artifacts_uri_by_component(\n GDPComponent.DataSchema.name)[0]\n view_schema(uri)", "def inputSchemaType(self):\n return self.sourceType + \"_schema\"", "def load(fp: str) -> BaseSchema:", "def get_shape(dset):\n # Case of a constant dataset\n if isinstance(dset, h5py.Group):\n shape = dset.attrs['shape']\n # Case of a non-constant dataset\n elif isinstance(dset, h5py.Dataset):\n shape = dset.shape\n\n return(shape)", "def full_schema_path(schema):\n\n return \"/schemas/\"+schema+\".schema.json\"", "def shp_info(shp,shx):\n shpf = tempfile.NamedTemporaryFile('wb', suffix='.shp',delete=False)\n shpf.write(shp)\n shpf.close()\n shxf = open(shpf.name[:-3]+'shx','wb')\n shxf.write(shx)\n shxf.close()\n shp = pysal.open(shpf.name,'r')\n n = len(shp)\n shp.close()\n #Mapscript requires a DBF to read a shapefile.\n dbf = pysal.open(shpf.name[:-3]+'dbf','w')\n dbf.header = ['dtmValue']\n dbf.field_spec = [('C',7,0)]\n for i in range(n):\n dbf.write([\"#%0.6X\"%(i+ID_OFFSET)])\n dbf.close()\n\n shp = mapscript.shapefileObj(shpf.name)\n n = shp.numshapes\n x = shp.bounds.minx\n y = shp.bounds.miny\n X = shp.bounds.maxx\n Y = shp.bounds.maxy\n #wgs84 = mapscript.projectionObj('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ')\n #gmerc = mapscript.projectionObj('init=epsg:3857')\n shp.bounds.project(WGS84,GMERC)\n gx = shp.bounds.minx\n gy = shp.bounds.miny\n GX = shp.bounds.maxx\n GY = shp.bounds.maxy\n os.remove(shpf.name)\n os.remove(shxf.name)\n return (n,[x,y,X,Y],[gx,gy,GX,GY])", "def _get_specific_schema(resource, method, msg_type):\n # type: (object, str, str) -> Optional[Schema]\n log.debug(\n 'Marshmallow._get_specific_schema(%s, %s, %s)',\n resource, method, msg_type\n )\n\n sch_name = '%s_%s_schema' % (method.lower(), msg_type)\n specific_schema = getattr(resource, sch_name, None)\n if specific_schema is not None:\n return specific_schema\n\n sch_name = '%s_schema' % method.lower()\n specific_schema = getattr(resource, sch_name, None)\n return specific_schema", "def get_dataset_schema(dataset):\n return dataset.table_meta[SINGLE_TABLE]", "def _get_schema(cls, resource, method, msg_type):\n # type: (object, str, str) -> Optional[Schema]\n log.debug(\n 'Marshmallow._get_schema(%s, %s, %s)',\n resource, method, msg_type\n )\n specific_schema = cls._get_specific_schema(\n resource, method, msg_type\n )\n if specific_schema is not None:\n return specific_schema\n return getattr(resource, 'schema', None)", "def get_dataframe_tf_record_schema(spark_df):\n return _get_dataframe_tf_record_schema_json(spark_df)[0]", "def shape_type(self):\n return MSO_SHAPE_TYPE.TABLE", "def get_shapefile():\n gdf = gpd.read_file(\"shapes/MD_precinct_primaries\")\n gdf[\"County\"] = gdf.NAME.apply(lambda x: x.split(\" Precinct \")[0])\n gdf[\"Precinct\"] = gdf.NAME.apply(lambda x: x.split(\" Precinct \")[-1])\n gdf[\"MATCH\"] = gdf.County + \" \" + gdf.Precinct\n\n elec_cols_function = lambda x: (\"GOV\" in x or \"SEN\" in x or \"COMP\" in x or \"PRES\" in x or \"AG\" in x) and (\"SSEN\" not in x and \"SEND\" not in x)\n elec_cols = list(filter(elec_cols_function, gdf.columns))\n vap_cols_function = lambda x: \"VAP\" in x\n vap_cols = list(filter(vap_cols_function, gdf.columns))\n cols = [\"County\",\"MATCH\"] + vap_cols + elec_cols\n gdf = gdf[cols]\n gdf[elec_cols + vap_cols] = gdf[elec_cols + vap_cols].astype(int)\n \n return gdf", "def getSchemaFile(self): #$NON-NLS-1$\r\n return self.xsdFile", "def load_schema(path):\n with open(path) as json_data:\n schema = json.load(json_data)\n return schema", "def get_std_shape(shape_text):\n std_shape = None\n # Find the shape in the full dictionary\n shape_code = SHAPE_FULL.get(shape_text)\n if shape_code is not None:\n std_shape = SHAPE_STANDARD[shape_code]\n return std_shape", "def loads(self, schema_txt: str) -> ShExJ.Schema:\n self.schema_text = schema_txt\n if schema_txt.strip()[0] == '{':\n # TODO: figure out how to propagate self.base_location into this parse\n return cast(ShExJ.Schema, loads(schema_txt, ShExJ))\n else:\n return generate_shexj.parse(schema_txt, self.base_location)", "def get_schema(self, variant=process_schema_factory.DFG_FREQ, parameters=None):\n if parameters is None:\n parameters = {}\n parameters[constants.PARAMETER_CONSTANT_ACTIVITY_KEY] = self.activity_key\n parameters[constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY] = self.activity_key\n\n if not self.sorted_dataframe_by_case_id:\n self.sort_dataframe_by_case_id()\n\n if self.most_common_variant is not None:\n parameters[ws_constants.PARAM_MOST_COMMON_VARIANT] = self.most_common_variant\n parameters[ws_constants.PARAM_MOST_COMMON_PATHS] = self.most_common_paths\n\n parameters[constants.GROUPED_DATAFRAME] = self.get_reduced_grouped_dataframe()\n\n if self.variants_df is not None:\n parameters[\"variants_df\"] = self.variants_df\n\n return list(process_schema_factory.apply(self.get_reduced_dataframe(), variant=variant, parameters=parameters)) + [self.get_log_summary_dictio()]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the crs of the given .shp file. PARAMETER(S)
def getCrs(path): path = path with fiona.open(path) as shpfile: crs = shpfile.crs return crs
[ "def getCrs(self):\n with self._getDatasetLock:\n\n # use gcp if available\n if len(self.dataset.gcps[0]) != 0 and self.dataset.gcps[1]:\n crs = self.dataset.gcps[1]\n else:\n crs = self.dataset.crs\n\n # if no crs but the file is a NITF or has a valid affine transform then\n # consider it as 4326\n hasTransform = self.dataset.transform != Affine.identity()\n isNitf = self.dataset.driver.lower() in {'NITF'}\n if not crs and (hasTransform or isNitf):\n crs = make_crs(4326)\n\n return crs", "def _get_raw_crs(self) -> CRS:\n raise NotImplementedError", "def reproject(shapefile, crs):\n\treturn shapefile.to_crs(crs) if shapefile.crs != crs else shapefile", "def read_crs_wkt(path):\n with open(path, 'r') as f:\n crs = CRS.from_wkt(f.read())\n return crs", "def crs(crs_: pyproj.crs.CRS) -> None:\n if len(crs_.axis_info)==2:\n warnings.warn('2D crs used. Will be promoted for transforms.')\n return None", "def proj(self) -> _CRS:\n return self._crs", "def plot_cis_shp(sname):\n # Colors\n colors = dict({'I': 'm',\n 'L': 'lightgray',\n 'W': 'b',\n 'N': 'r',\n 'F': 'c'})\n\n # Manage map projection\n to_lon_lat = _get_lon_lat_converter(sname)\n\n # Read shapefile\n df_records, empty = load_cis_shp(sname, ascending=False)\n df_managed = _manage_shapefile_types(df_records)\n\n # Plot polygons\n for (i, shape) in enumerate(df_records.shapes.values):\n\n # Get polygon coordinates\n lon, lat = _get_polygon_lon_lat(shape, to_lon_lat, separate=True)\n\n # Add to plot\n plt.fill(\n lon, lat, fc=colors[df_managed.iloc[i].LEGEND], ec='k', linestyle='-')\n plt.text(lon.mean(), lat.mean(), df_managed.iloc[i].LEGEND)\n\n plt.show()", "def get_srid(self, filename):\n # Read projection information from shapefile prj file.\n filepath = self.get_path(filename)\n prj_filepath = os.path.splitext(filepath)[0] + '.prj'\n try:\n with open(prj_filepath) as prj_file:\n wkt = prj_file.read().strip()\n except IOError:\n logger.warn(\"Unable to open projection information: %s\"\n % filename)\n return 0\n\n # Attempt to identify EPSG SRID using GDAL.\n if gdal:\n sr = osr.SpatialReference()\n sr.ImportFromESRI([wkt])\n res = sr.AutoIdentifyEPSG()\n if res == 0:\n # Successfully identified SRID.\n srid = int(sr.GetAuthorityCode(None))\n logger.debug(\"GDAL returned SRID %s: %s\" % (srid, filename))\n return srid\n\n # Try querying prj2EPSG API.\n params = urllib.parse.urlencode({'terms': wkt, 'mode': 'wkt'})\n resp = urllib.request.urlopen('http://prj2epsg.org/search.json?'\n + params)\n data = json.load(resp)\n if data['exact']:\n # Successfully identified SRID.\n srid = int(data['codes'][0]['code'])\n logger.debug(\"prj2EPSG API returned SRID %s: %s\"\n % (srid, filename))\n return srid\n\n # Unable to identify EPSG SRID. Use custom SRID.\n srs = self.tables.public.spatial_ref_sys\n with self.database.session() as sess:\n srid = sess.query(srs.srid).filter(srs.srtext == wkt).first()\n if srid:\n return srid[0]\n else:\n if gdal:\n # Need to define custom projection since not in database.\n logger.warn(\"Defining custom projection: %s\" % filename)\n proj4 = sr.ExportToProj4().strip()\n if not proj4:\n raise RuntimeError(\"Unable to project: %s\" % filename)\n with self.database.session() as sess:\n srid = sess.query(func.max(srs.srid)).one()[0] + 1\n projection = srs(srid=srid,\n auth_name=\"custom\", auth_srid=srid,\n srtext=wkt, proj4text=proj4)\n sess.add(projection)\n srid = projection.srid\n else:\n raise RuntimeError(\"No GDAL: unable to define projection.\")\n logger.debug(\"Using custom SRID %s: %s\" % (srid, filename))\n return srid", "def epsg_from_crs(crs): # -> int | None:\n ...", "def explicit_crs_from_epsg(crs=..., epsg=...): # -> CRS:\n ...", "def select_crs(self):\n crs_dialog = QgsProjectionSelectionDialog()\n crs_dialog.setShowNoProjection(True)\n if self.dlg.crsLineEdit.text() != \"None\":\n old_crs = QgsCoordinateReferenceSystem(\"EPSG:{}\".format(self.dlg.crsLineEdit.text()))\n crs_dialog.setCrs(old_crs)\n crs_dialog.exec()\n if crs_dialog.crs().postgisSrid() == 0:\n self.dlg.crsLineEdit.setText(\"None\")\n else:\n self.dlg.crsLineEdit.setText(\"{}\".format(crs_dialog.crs().postgisSrid()))", "def common_crs(geoms: Iterable[Geometry]) -> Optional[CRS]:\n all_crs = [g.crs for g in geoms]\n if len(all_crs) == 0:\n return None\n ref = all_crs[0]\n for crs in all_crs[1:]:\n if crs != ref:\n raise CRSMismatchError()\n return ref", "def load_cis_shp(name, ascending=True):\n sf = shapefile.Reader(name)\n fld = np.array(sf.fields)[:, 0]\n shp = np.array(sf.shapes())\n rcd = np.array(sf.records(), dtype='<U36')\n\n # Empty strings become X\n rcd[rcd == ''] = 'X'\n\n # Load to pandas dataframe\n dataframe = pd.DataFrame(rcd, columns=fld[1:])\n\n # Flag as empty if not enough fields\n empty = dataframe.shape[1] < 11\n\n # Convert area to numeric and sort\n if not empty:\n dataframe['shapes'] = shp\n dataframe['AREA'] = np.float64(dataframe.AREA.values)\n dataframe = dataframe.sort_values(\n 'AREA', ascending=ascending, ignore_index=True)\n\n return dataframe, empty", "def read_gdal_projection(dset):\n wkt = dset.GetProjection()\n srs = osr.SpatialReference()\n srs.ImportFromWkt(wkt)\n # src = None\n return srs", "def SetCRS(self,CRS):\t\n\t\tself.CRS=CRS\n\t\tself.crs_wkt=None", "def read_cris_geo (filelist, ephemeris = False):\n \n if type(filelist) is str: filelist = [filelist]\n if len(filelist) ==0: return None\n \n geos = [h5py.File(filename, 'r') for filename in filelist]\n \n if ephemeris == False: \n Latitude = np.concatenate([f['All_Data']['CrIS-SDR-GEO_All']['Latitude'] [:] for f in geos])\n Longitude = np.concatenate([f['All_Data']['CrIS-SDR-GEO_All']['Longitude'][:] for f in geos])\n SatelliteAzimuthAngle = np.concatenate([f['All_Data']['CrIS-SDR-GEO_All']['SatelliteAzimuthAngle'][:] for f in geos])\n SatelliteRange = np.concatenate([f['All_Data']['CrIS-SDR-GEO_All']['SatelliteRange'][:] for f in geos])\n SatelliteZenithAngle = np.concatenate([f['All_Data']['CrIS-SDR-GEO_All']['SatelliteZenithAngle'][:] for f in geos])\n return Longitude, Latitude, SatelliteAzimuthAngle, SatelliteRange, SatelliteZenithAngle\n if ephemeris == True:\n FORTime = np.concatenate([f['All_Data']['CrIS-SDR-GEO_All']['FORTime'] [:] for f in geos])\n MidTime = np.concatenate([f['All_Data']['CrIS-SDR-GEO_All']['MidTime'] [:] for f in geos])\n SCPosition = np.concatenate([f['All_Data']['CrIS-SDR-GEO_All']['SCPosition'] [:] for f in geos])\n SCVelocity = np.concatenate([f['All_Data']['CrIS-SDR-GEO_All']['SCVelocity'] [:] for f in geos])\n SCAttitude = np.concatenate([f['All_Data']['CrIS-SDR-GEO_All']['SCAttitude'] [:] for f in geos])\n return FORTime, MidTime, SCPosition, SCVelocity, SCAttitude\n if ephemeris == 'Solar':\n SolarZenithAngle = np.concatenate([f['All_Data']['CrIS-SDR-GEO_All']['SolarZenithAngle'] [:] for f in geos])\n SolarAzimuthAngle = np.concatenate([f['All_Data']['CrIS-SDR-GEO_All']['SolarAzimuthAngle'] [:] for f in geos])\n return SolarAzimuthAngle, SolarZenithAngle", "def readGeo(self, rast):\n\n\t\ttry:\n\t\t\tds = gdal.Open(rast)\n\n\t\t\tgtransf = ds.GetGeoTransform()\n\t\t\tprj = ds.GetProjection()\n\t\t\tx_size = gtransf[1]\n\t\t\ty_size = gtransf[5] * (-1)\n\n\t\t\tsrs = osr.SpatialReference(wkt=prj)\n\t\t\tif srs.IsProjected:\n\t\t\t\tEPSG = int(srs.GetAttrValue(\"authority\", 1))\n\t\t\telse:\n\t\t\t\tEPSG = None\n\n\t\t\tdel ds\n\n\t\t\treturn gtransf, prj, x_size, y_size, EPSG\n\n\t\texcept IOError:\n\t\t\twarnings.warn(\"Geographical information has not been readed.\", stacklevel=3)\n\n\t\t\tgtransf = None\n\t\t\tprj = None\n\t\t\tx_size = None\n\t\t\ty_size = None\n\t\t\tEPSG = None\n\n\t\t\treturn gtransf, prj, x_size, y_size, EPSG", "def for_geom(cls, shape, wcs):\n self = cls()\n ax1, ax2 = wcs.wcs.lng, wcs.wcs.lat\n ndim = len(shape)\n # The axes are numbered from outside in...\n self.naxis = np.array([shape[ndim - ax1 - 1],\n shape[ndim - ax2 - 1]], dtype=int)\n # Get just the celestial part.\n wcs = wcs.celestial\n self.wcs = wcs\n\n # Extract the projection name (e.g. CAR)\n proj = [c[-3:] for c in wcs.wcs.ctype]\n assert(proj[0] == proj[1])\n proj_name = proj[0] # Projection name\n self.proj_name = proj_name\n\n # Store the rotation to native spherical coordinates.\n self.q_celestial_to_native = self.get_q(wcs)\n\n # Store the grid info.\n self.cdelt = np.array(wcs.wcs.cdelt) * quat.DEG\n self.crpix = np.array(wcs.wcs.crpix)\n\n return self", "def get_coordinate_reference_systems(self, srids: Union[int, Sequence[int]]) -> CoordinateReferenceSystemList:\n if isinstance(srids, (int, numbers.Integral)):\n srids_processed: Sequence[Union[numbers.Integral, int]] = [srids]\n else:\n srids_processed = srids\n\n res = self._post(\n url_path=f\"{self._RESOURCE_PATH}/crs/byids\", json={\"items\": [{\"srid\": srid} for srid in srids_processed]}\n )\n return CoordinateReferenceSystemList._load(res.json()[\"items\"], cognite_client=self._cognite_client)", "def srid_to_proj(srid):\n from django.contrib.gis.gdal import SpatialReference\n srs = SpatialReference(srid)\n return srs.proj.strip()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Navigate a given path returning all files in folder and subfolders
def explore_path(path, recursive=True): for dirname, _, filenames in os.walk(path): for filename in filenames: yield dirname, filename if not recursive: break
[ "def _get_files(path):\n ret_val = []\n for root, _, files in os.walk(path):\n for f in files:\n ret_val.append(os.path.join(root, f))\n return ret_val", "def get_all_file_paths_in_path(path: str):\n def join_paths(dir_path, filenames):\n return (joinpath(path, dir_path, filename) for \\\n filename in filenames)\n files_iter = (join_paths(dir_path, filenames) for \\\n dir_path, _, filenames in walk(path))\n return chain.from_iterable(files_iter)", "def walk_filepaths(folder):\n for root, dirs, files in os.walk(folder):\n for filename in files:\n yield os.path.join(root, filename)", "def read_all_files_directory(self, path):\n check = Apios.check_files_in_directory(self, path)\n if check:\n src = path + \"*\"\n files = glob.iglob(src)\n for name in files:\n try:\n with open(name) as f:\n sys.stdout.write(f.read())\n except IOError:\n print_exc()", "def listdir(self, path):\n return os.listdir(self.rootpath(path))", "def _walk(rootpath, depth, verbose=False):\n owd = os.getcwd()\n os.chdir(rootpath)\n files = []\n for i in range(depth):\n names = glob.glob(sep.join('*' * (i + 1)))\n fs = [join(rootpath, x) for x in names]\n dirs = _dirs_searched(fs, rootpath)\n files += [x for x in fs if isfile(x)]\n if verbose:\n print_status(i, dirs)\n os.chdir(owd)\n return files", "def depth_first_search(path, accPaths=None, navigate=False):\n \n if not isinstance(accPaths, list):\n accPaths = []\n \n filesOrDirs = os.listdir(path)\n filesOrDirs.sort()\n \n for fod in filesOrDirs:\n realfod = os.path.join(path, fod)\n if not os.path.isfile(realfod):\n accPaths.append(realfod)\n \n if navigate:\n # Each subpath is explored\n PathUtils.depth_first_search(realfod, accPaths, navigate)\n \n return accPaths", "def traverse_dir(directory):\n files = []\n for file in os.listdir(directory):\n full_path = directory + file\n if os.path.isdir(full_path):\n files.extend(traverse_dir(full_path + \"/\"))\n else:\n files.append(full_path)\n return files", "def walk(self, path):\n from gcsfs.core import norm_path\n path = norm_path(_stringify_path(path))\n directories = set()\n files = set()\n\n for key in self.fs.ls(path, detail=True):\n # each info name must be at least [path]/part , but here\n # we check also for names like [path]/part/\n path = key['name']\n if key['storageClass'] == 'DIRECTORY':\n directories.add(path)\n elif key['storageClass'] == 'BUCKET':\n pass\n else:\n files.add(path)\n\n files = sorted([posixpath.split(f)[1] for f in files\n if f not in directories])\n directories = sorted([posixpath.split(x)[1]\n for x in directories])\n\n yield path, directories, files\n\n for directory in directories:\n for tup in self.walk(directory):\n yield tup", "def find_files(suffix, path):\n # Recursion\n result = []\n\n if not bool(path):\n return []\n\n if not bool(suffix):\n suffix = None\n\n if os.path.isdir(path): # if the current path is a file\n if path.endswith(suffix): # if the file has extension suffix='.c'\n result.append(path)\n else:\n children = os.listdir(path)\n \n for child in children:\n full_path = os.path.join(path, child)\n\n if os.path.isdir(full_path):\n result += find_files(suffix, full_path)\n elif os.path.isfile(full_path) and full_path.endswith(suffix):\n result.append(full_path)\n\n return result\n '''\n # Iterative\n result = []\n nodesToExpand = [path] # stack\n\n while nodesToExpand:\n full_path = nodesToExpand.pop()\n if os.path.isfile(full_path) and full_path.endswith(suffix):\n result.append(full_path)\n elif os.path.isdir(full_path):\n for child in os.listdir(full_path):\n nodesToExpand.append(os.path.join(full_path, child))\n return sorted(result)\n '''", "def walk(self, path):\n\n file_list, dirs, nondirs = [], [], []\n try:\n self.ftp.cwd(path)\n except error_perm as ep:\n self.ncbiftp_log.info(\"Current path: %s\" % self.ftp.pwd() + ep.__str__() + path)\n return [], []\n else:\n self.ftp.retrlines('LIST', lambda x: file_list.append(x.split()))\n for info in file_list:\n ls_type, name = info[0], info[-1]\n if ls_type.startswith('d'):\n dirs.append(name)\n else:\n nondirs.append(name)\n return dirs, nondirs", "def get_files(root):\n for item in os.scandir(root):\n if item.is_file():\n yield item.path\n elif item.is_dir():\n yield from get_files(item.path)", "def _findFilesInPath(self, startpath):\n allfiles = []\n if not os.access(startpath, os.R_OK):\n log().info(\"Skipping inaccessible path %s\" % startpath)\n return allfiles\n\n for subf in os.listdir(unicode(startpath)):\n newpath = os.path.join(startpath, subf)\n newpath = os.path.abspath(newpath)\n if os.path.isfile(newpath):\n if not self._checkExtension(subf):\n continue\n elif self._blacklistedFilename(subf):\n continue\n else:\n allfiles.append(newpath)\n else:\n if self.recursive:\n allfiles.extend(self._findFilesInPath(newpath))\n #end if recursive\n #end if isfile\n #end for sf\n return allfiles", "def listdir(path):\n\treturn os.listdir(translatePath(path))", "def all_in(folder):\n for (folder_path, folders, filenames) in os.walk(folder):\n for file in filenames:\n yield os.path.join(folder_path, file)\n for folder in folders:\n yield from all_in(folder)", "def directory_load(self, path: str, recursive=False):\n if not recursive:\n files = [f for f in listdir(path) if isfile(join(path, f))]\n for file in files:\n self.register_file(file, path, splitext(file)[0] if splitext(file)[0] != file else None)\n return files\n else:\n files_list = []\n for root, dirs, files in walk(path, topdown=True):\n for name in files:\n file_name = name\n short_name = splitext(name)[0] if splitext(name)[0] != file_name else None\n self.register_file(file_name, root, short_name)\n files_list.append(file_name)\n return files_list", "def get_test_files(path, mask = None, recursive=True):\n current = []\n directory_items = os.listdir(path)\n directory_items.sort()\n\n for item in directory_items:\n add_to_pythonpath = False\n item_path = os.path.join(path, item)\n if os.path.isfile(item_path):\n if fnmatch.fnmatch(item, mask):\n add_to_pythonpath = True\n current.append(item)\n elif os.path.isdir(item_path):\n if recursive:\n current.extend(get_test_files(item_path, mask = mask))\n if add_to_pythonpath:\n sys.path.append(path)\n return current", "def _getFilePaths(self, folder):\n\n for name in os.listdir(folder):\n if os.path.isfile(os.path.join(folder, name)):\n yield os.path.join(folder, name)", "def traverse_sub_dir(folder, path, name, path_list, name_list):\n\n # Gets the sub directories of a directory (folder names)\n sub_dir = [x for x in os.listdir(path) if os.path.isdir(os.path.join(path, x))]\n\n if not sub_dir:\n # We have reached deepest sub directory\n path_list.append(path)\n name_list.append(name)\n return path_list, name_list\n\n for sd in sub_dir:\n path2 = path + \"/\" + sd\n name2 = folder + \"_\" + sd\n path_list, name_list = traverse_sub_dir(sd, path2, name2, path_list, name_list)\n\n return path_list, name_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collect few info about phisical machine.
def get_machine_info(): return { 'platform': system(), 'hostname': gethostname(), 'ip_address': gethostbyname(gethostname()), 'mac_address': ':'.join(findall('..', '%012x' % getnode())), }
[ "def machine_info():\n BYTES_IN_GIG = 1073741824\n free_bytes = psutil.virtual_memory().available\n return [{\"memory\": int(free_bytes / BYTES_IN_GIG), \"cores\": multiprocessing.cpu_count(),\n \"name\": socket.gethostname()}]", "def gather_chassis_details(self):", "def _get_machine_info(self):\n hostname = socket.gethostname()\n\n # Grab the IP used to connect to 8.8.8.8\n #\n # Use this instead of `socket.gethostbyname(socket.getfqdn())`, because\n # that can be affected by entries in /etc/hosts.\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n private_ip_addr = s.getsockname()[0]\n s.close()\n\n # Ask a public entity what IP is connecting to it.\n public_ip_addr = None\n resp = requests.get(\"https://api.ipify.org\")\n if resp.status_code == 200:\n public_ip_addr = resp.text\n\n return hostname, private_ip_addr, public_ip_addr", "def CollectSystemInfo():\n global cpu, cpuCores, cpuFreqMHz, uname\n uname = \" \".join(platform.uname())\n #print(\"KK_ uname: \", uname)\n code, cpuinfo, err = Run(['cat', '/proc/cpuinfo'])\n #print(\"KK_ cpuinfo:1 ==========================================\\n\", cpuinfo)\n cpuinfo = cpuinfo.split(\"\\n\")\n #print(\"KK_ cpuinfo:2 ==========================================\\n\", cpuinfo)\n if 'ppc64' in uname:\n # Implement grep and sed in Python...\n #print(\"KK_CollectSystemInfo_000001\")\n cpu = grep(cpuinfo, r'model')[0].split(': ')[1].replace('(R)', '').replace('(TM)', '')\n cpuCores = len(grep(cpuinfo, r'processor'))\n try:\n code, dmidecode, err = Run(['dmidecode', '--type', 'processor'])\n cpuFreqMHz = int(round(float(grep(dmidecode.split(\"\\n\"), r'Current Speed')[0].rstrip().lstrip().split(\" \")[2])))\n except:\n cpuFreqMHz = int(round(float(grep(cpuinfo, r'clock')[0].split(': ')[1][:-3])))\n else:\n #model_names = grep(cpuinfo, r'model name')\n model_names = grep(cpuinfo, 'model name') #KK_\n #print(\"KK_CollectSystemInfo_000002\", model_names)\n cpu = model_names[0].split(': ')[1].replace('(R)', '').replace('(TM)', '')\n cpuCores = len(model_names)\n #print(\"\\nKK_cpu\", cpu, \", cpuCores\", cpuCores)\n try:\n code, dmidecode, err = Run(['dmidecode', '--type', 'processor'])\n cpuFreqMHz = int(round(float(grep(dmidecode.split(\"\\n\"), r'Current Speed')[0].rstrip().lstrip().split(\" \")[2])))\n except:\n cpuFreqMHz = int(round(float(grep(cpuinfo, r'cpu MHz')[0].split(': ')[1])))\n #print(\"KK_ cpuFreqMHz: \", cpuFreqMHz)", "async def systeminfo(self, ctx):\r\n\r\n\t\tres = f\"[OS Type][{sys.platform}]\"\r\n\t\tinfo = cpuinfo.get_cpu_info()\r\n\t\tres += f\"\\n[CPU][{psutil.cpu_count(logical=False)} Cores / {psutil.cpu_count()} Threads]\"\r\n\t\tres += f\"\\n[CPU Usage][%{str(psutil.cpu_percent())}]\"\r\n\t\tvmem = psutil.virtual_memory()\r\n\t\tres += f\"\\n[Memory][Total Memory: {int(vmem[0]/2**30)}GB Used: {int(vmem[0]/2**30)-int(vmem[1]/2**30)}GB(%{vmem[2]}) Available: {int(vmem[1]/2**30)}GB]\"\r\n\t\tif str(sys.platform) == 'linux': # Check Windows\r\n\t\t\tsmem = psutil.swap_memory()\r\n\t\t\tres += f\"\\n[Swap Memory][Total Swap Memory: {int(smem[0]/2**30)}GB Used: {int(smem[2]/2**30)}GB(%{smem[3]}) Available: {int(smem[2]/2**30)}GB]\"\r\n\t\t\r\n\t\tres += f\"\\n[Python Version][{sysconfig.get_python_version()}]\"\r\n\r\n\t\tINFO = f\"**{self.bot.user.name}**'s System Hardware:\\n```md\\n{res}\\n```\"\r\n\t\t\r\n\t\tif ctx.author.top_role.colour:\r\n\t\t\tcol = ctx.author.top_role.colour\r\n\t\telse:\r\n\t\t\tcol =self.settings.randomColor()\r\n\r\n\t\tembed = discord.Embed(\r\n\t\t\tdescription = INFO,\r\n\t\t\tcolour = col\r\n\t\t)\r\n\t\tawait ctx.send(embed=embed)", "def get_hwinfo(ns):\n tf = TableFormatter(stdout, 0, True, {0: FIRST_COLUMN_MIN_SIZE})\n\n # Chassis\n try:\n chassis = get_single_instance(ns, 'LMI_Chassis')\n except Exception:\n result = [(get_colored_string('error:', RED_COLOR),\n 'Missing class LMI_Chassis. Is openlmi-hardware package installed on the server?')]\n tf.produce_output(result)\n return []\n\n hwinfo = chassis.Manufacturer\n if chassis.Model and chassis.Model != 'Not Specified' \\\n and chassis.Model != chassis.Manufacturer:\n hwinfo += ' ' + chassis.Model\n elif chassis.ProductName and chassis.ProductName != 'Not Specified' \\\n and chassis.ProductName != chassis.Manufacturer:\n hwinfo += ' ' + chassis.ProductName\n virt = getattr(chassis, 'VirtualMachine', None)\n if virt and virt != 'No':\n hwinfo += ' (%s virtual machine)' % virt\n chassis_res = [\n ('Hardware:', hwinfo),\n ('Serial Number:', chassis.SerialNumber),\n ('Asset Tag:', chassis.Tag)]\n tf.produce_output(chassis_res)\n\n # CPUs\n try:\n cpus = get_all_instances(ns, 'LMI_Processor')\n cpu_caps = get_all_instances(ns, 'LMI_ProcessorCapabilities')\n except Exception:\n cpus = None\n cpu_caps = None\n if cpus and cpu_caps:\n cores = 0\n threads = 0\n for i in cpu_caps:\n cores += i.NumberOfProcessorCores\n threads += i.NumberOfHardwareThreads\n cpus_res = [\n ('CPU:', '%s, %s arch' % (cpus[0].Name, cpus[0].Architecture)),\n ('CPU Topology:', '%d cpu(s), %d core(s), %d thread(s)' % \\\n (len(cpus), cores, threads))]\n else:\n cpus_res = [('CPU:', 'N/A')]\n tf.produce_output(cpus_res)\n\n # Memory\n try:\n memory = get_single_instance(ns, 'LMI_Memory')\n except Exception:\n memory = None\n if memory:\n memory_size = format_memory_size(memory.NumberOfBlocks)\n else:\n memory_size = 'N/A GB'\n tf.produce_output([('Memory:', memory_size)])\n\n return []", "def hardware_info(self):\n return self._send_msg()", "def ex_get_hypervisor_sysinfo(self):\r\n xml = self.connection.getSysinfo()\r\n etree = ET.XML(xml)\r\n\r\n attributes = ['bios', 'system', 'processor', 'memory_device']\r\n\r\n sysinfo = {}\r\n for attribute in attributes:\r\n element = etree.find(attribute)\r\n entries = self._get_entries(element=element)\r\n sysinfo[attribute] = entries\r\n\r\n return sysinfo", "def get_host_plat_info(self):\n ret_plat_dict = {}\n handler = self.get_handler()\n try:\n host_ref = handler.xenapi.host.get_all()[0]\n bios_info = handler.xenapi.host.get_bios_strings(host_ref)\n ret_plat_dict['vendor_name'] = bios_info.get('system-manufacturer', \"\")\n ret_plat_dict['product_name'] = bios_info.get('system-product-name', \"\")\n ret_plat_dict['serial_number'] = bios_info.get('system-serial-number', \"\")\n except Exception as error:\n log.error(\"Exception when get host platform infor:%s\", error)\n\n return ret_plat_dict", "def _printer_details(self,printer):\n\n\t\tresult = {}\n\t\texpr = re.compile('\\s+([^\\s\\:]+)\\:\\s*(.*?)$')\n\t\t(stdout,stderr,status) = self._shell_command(['/usr/bin/lpstat','-l','-p',printer],{'LANG':'C'})\n\t\tif status == 0:\n\t\t\tfor line in stdout.split(\"\\n\"):\n\t\t\t\tmobj = expr.match(line)\n\t\t\t\tif mobj:\n\t\t\t\t\tresult[mobj.group(1).lower()] = mobj.group(2)\n\t\tresult['server'] = self._hostname\n\t\treturn result", "def get_mobile_info(self):\n\t\t# 1. select brand\n\t\tself.select_brand()\n\t\t# 2. select os\n\t\tself.select_os()\n\t\t# 3. device_id\n\t\tself.gen_device_id()\n\t\t# 4. lat lon\n\t\tself.gen_lat_lon()\n\t\t# 5. mac\n\t\tself.gen_mac()", "def get_phone_info(self):\n r_about = self.__send_get_request_to_phone(self.phone.urls['home'])\n info_values = [x.strip() for x in re.findall(\"<td>([\\w\\W]+?)</td>\", r_about.text) if 'span' not in x]\n info_keys = [\"Phone Model\", \"Part Number\", \"MAC Address\", \"IP Address\", \"UC Software Version\",\n \"BootROM Software Version\"]\n return dict(zip(info_keys, info_values))", "def whois(self):\n print(whois(self.src).decode(\"utf8\", \"ignore\"))", "def get_mac_dictionary():\n\n response = urllib2.urlopen('https://code.wireshark.org/review/gitweb?p=wireshark.git;a=blob_plain;f=manuf')\n html = response.read()\n\n mac_lookup = {}\n\n for ln in html.splitlines():\n if ('#' in ln) or (ln == ''):\n continue\n \n mac, manuf = ln.split()[0:2]\n mac_lookup[mac] = manuf\n\n return mac_lookup", "def show_systeminfo():\n\n systeminfo_response = webcli_command('systemInfo')\n for key, value in systeminfo_response['systemInfo'].items():\n print('{}={}'.format(key, value))", "def mem_info(verbose = False) -> None:", "def get_card_info(self,device):\n cmd = \"vgc-monitor -d %s | grep \\\"Card Info\\\"\"%device\n o = self.run_command_chk_rc(cmd)\n out = o['output'][1]\n out_a = out.split(\":\")\n\n return out_a[1].strip()", "def collectWNInfo(self, diskpath):\n\n with open(\"/proc/meminfo\", \"r\") as fd:\n mems = fd.readline()\n while mems:\n if mems.upper().find(\"MEMTOTAL\") != -1:\n self.mem = float(mems.split()[1])/1024\n break\n mems = fd.readline()\n\n with open(\"/proc/cpuinfo\", \"r\") as fd:\n lines = fd.readlines()\n for line in lines:\n if not string.find(line, \"cpu MHz\"):\n self.cpu = float(line.split(\":\")[1])\n break\n\n diskpipe = os.popen(\"df -mP %s\" % (diskpath)) # -m = MB\n disks = diskpipe.read()\n if not diskpipe.close():\n self.disk = float(disks.splitlines()[1].split()[3])\n\n return self.mem, self.cpu, self.disk", "def getNodeInformation(self):\n result = S_OK()\n try:\n cpuInfo = open ( \"/proc/cpuinfo\", \"r\" )\n info = cpuInfo.readlines()\n cpuInfo.close()\n result[\"HostName\"] = socket.gethostname()\n result[\"CPU(MHz)\"] = string.replace(string.replace(string.split(info[6],\":\")[1],\" \",\"\"),\"\\n\",\"\")\n result[\"ModelName\"] = string.replace(string.replace(string.split(info[4],\":\")[1],\" \",\"\"),\"\\n\",\"\")\n result[\"CacheSize(kB)\"] = string.replace(string.replace(string.split(info[7],\":\")[1],\" \",\"\"),\"\\n\",\"\")\n memInfo = open ( \"/proc/meminfo\", \"r\" )\n info = memInfo.readlines()\n memInfo.close()\n result[\"Memory(kB)\"] = string.replace(string.replace(string.split(info[3],\":\")[1],\" \",\"\"),\"\\n\",\"\")\n account = 'Unknown'\n localID = shellCall(10,'whoami')\n if localID['OK']:\n account = localID['Value'][1].strip()\n result[\"LocalAccount\"] = account\n except Exception, x:\n self.log.fatal('Watchdog failed to obtain node information with Exception:')\n self.log.fatal(str(x))\n result = S_ERROR()\n result['Message']='Failed to obtain system information for '+self.systemFlag\n return result\n\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> s = Solution() >>> s.isOneBitCharacter([1,0,0]) True >>> s.isOneBitCharacter([1,1,1,0]) False
def isOneBitCharacter(self, bits: list[int]) -> bool: s = [len(bits) - 2] while s: i = s.pop() if i == -1: return True if bits[i] == 0: s.append(i - 1) if i >= 1 and bits[i - 1] == 1: s.append(i - 2) else: if i >= 1 and bits[i - 1] == 1: s.append(i - 2) return False
[ "def single_chars_only(lst):\n return all(len(i) == 1 for i in lst)", "def __bool__(self: bitlist) -> bool:\n return 1 in self.bits", "def is_binary(t):\n if t == zero or t == one:\n return True\n elif t.ty != Term.COMB:\n return False\n elif t.head == bit0 or t.head == bit1:\n return is_binary(t.arg)\n else:\n return False", "def _is_single_bit(value):\n if value == 0:\n return False\n value &= value - 1\n return value == 0", "def is_unique_chars(s):\n if len(s) > 256:\n return False\n char_set = [False] * 256\n for c in s:\n if char_set[ord(c)]:\n return False\n char_set[ord(c)] = True\n return True", "def one_hot_encode_char(char: Union[str, int]) -> List:\n if isinstance(char, str):\n char = ord(char) - ord('a') + 1\n char_vector = np.zeros((28,))\n char_vector[char] = 1.\n return char_vector.tolist()", "def is_unique_2(string):\n bits = 0\n for char in string:\n position = 1 << ord(char)\n bits ^= position\n if bits & position == 0:\n return False\n return True", "def is_kana(char):\n if len(char) != 1:\n raise Exception(\"The input to is_kana() should be a length-1 string\")\n return \"\\u30a1\" <= char <= \"\\u30f7\"", "def single_letter(word):\n\tif len(word)==1 and word!='a' and word!='I':\n\t\treturn True\n\treturn False", "def _count_ones(byte):\n return sum([1 for i in (1, 2, 4, 8, 16, 32, 64, 128) if i & byte])", "def hasPalindromePermutation(input):\n\n asciiChars = [0 for i in range(128)]\n for c in input:\n asciiChars[ord(c)] += 1\n \n seenOneOdd = False\n\n for c in asciiChars:\n if c % 2 != 0:\n if seenOneOdd:\n return False\n\n seenOneOdd = True\n \n return True", "def IsCombiningChar(c):\n return CombiningCharClass.test(c)", "def is_single_bit(num):\n num &= num - 1\n return num == 0", "def LC_is_one(self):\n if not self.monomials and not self.coeffs:\n return False\n else:\n return self.coeffs[-1] == 1", "def is_unique_chars(in_str):\n checker = 0\n if len(in_str) > 128:\n return False\n for c in in_str:\n val = ord(c)\n if checker & 1 << val > 0:\n return False\n checker |= 1 << val\n return True", "def test_onehot_encoding():\n wordmap = {'C':0, '#':1, '!':2, 'E':3}\n x = np.array(['#C!'])\n result = DataUtils.onehot_encoding(x,5,wordmap)\n assert result.shape == (1,5,4)\n\n return", "def isPossibleFromTiles(word, tiles):\r\n for char in word:\r\n if not valueInList(char, tiles, False):\r\n return False\r\n return True", "def is_unique(cls, chars: list):\n\n compare_list = []\n for char in chars:\n if char in compare_list:\n return False\n compare_list.append(char)\n\n return True", "def match_one(self, match_chars):\n if self.the_char is None:\n return False\n else:\n return self.the_char in match_chars" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set wait condition SharkSEM request header contains set of flags, which specify conditions to execute the request. bit 0 Wait A (SEM scanning) bit 1 Wait B (SEM stage) bit 2 Wait C (SEM optics) bit 3 Wait D (SEM automatic procedure) bit 4 Wait E (FIB scanning) bit 5 Wait F (FIB optics) bit 6 Wait G (FIB automatic procedure)
def SetWaitFlags(self, flags): self.connection.wait_flags = flags
[ "def set_wait_in(self, value: bool) -> None:\n if value:\n self.wait_for |= Direction.IN\n else:\n self.wait_for = (self.wait_for | Direction.IN) ^ Direction.IN", "def request_version_and_flags(self, req, msg):", "def request_fewer_flags(self, req, msg):", "def request_status_bits(self):\n self.sdk.SCC_RequestStatusBits(self._serial)", "def sem_status(self, voltage=-1, turn_off=False, turn_on=False):\n if voltage > -1:\n self.comm('SEM ' + str(voltage))\n ret_string = self.status('RDE', 4)\n else: #NOT IMPLEMENTED\n ret_string = self.status('RDE', 4)\n\n sem_voltage = int(ret_string)\n\n if turn_off ^ turn_on: #Only accept self-consistent sem-changes\n if turn_off:\n self.comm('SEV 0')\n if turn_on:\n self.comm('SEV 1')\n\n ret_string = self.status('ROP', 2)\n sem_on = ret_string == \"1\"\n return sem_voltage, sem_on", "def setWaiting( self, waiting ):\r\n\tif not self.isOut():\r\n self.iswaiting = waiting\r\n\t self.increaseTime()", "def semaphore(self, initval=0, qdis=QDIS.FIFO):\n\n if initval < 0:\n errmsg = \"simulator.semaphore(initval=%r) negative init value\" % initval\n log.error(errmsg)\n raise ValueError(errmsg)\n if qdis < QDIS.FIFO or qdis > QDIS.PRIORITY:\n errmsg = \"simulator.semaphore(qdis=%r) unknown queuing discipline\" % qdis\n log.error(errmsg)\n raise ValueError(errmsg)\n return Semaphore(self, initval, qdis)", "def advapi32_QueryServiceLockStatus(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"hSCManager\", \"lpLockStatus\", \"cbBufSize\", \"pcbBytesNeeded\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def waiting(message):\n status_set(WorkloadState.WAITING, message)", "def _select_spot_condition(self) -> str:\n\n prompt_str = (\n \"Please enter the number for the minimum condition you would \"\n \"like updates for:\\n\"\n )\n for condition_idx, condition_str in enumerate(CONDITIONS):\n # Adding 1 since most user probably aren't used to 0 indexing\n prompt_str += f\"{condition_idx + 1}: {condition_str}\\n\"\n resp = MessagingResponse()\n resp.message(prompt_str)\n self.initial_condition_selection = False\n return str(resp)", "def _hw_wait(self):\n while self.read_status()[0] == Drivable.Status.BUSY:\n sleep(0.3)", "def wait(self, new_data=False):\n try:\n sdk.WaitForAcquisition()\n if not new_data:\n while self.status is 20072:\n sdk.WaitForAcquisition()\n except KeyboardInterrupt:\n pass", "def wait(self, t=1):\n self.flag.clear()\n self.flag.wait(t)", "def wait_reason(self, wait_reason):\n\n self._wait_reason = wait_reason", "def do_set_param(self, line): # pylint: disable=invalid-name\n self._CheckState([actuator_types.kActuatorStateInit,\n actuator_types.kActuatorStateError])\n servos, args = cmd_client.SelectArgs(line.split(), SERVOS,\n require_some=True, select_all=True)\n param, args = cmd_client.SelectArgs(args, SERVO_PARAMS, require_one=True,\n select_all=False)\n try:\n value = int(args[0], 0)\n except ValueError:\n raise ServoClientError('Invalid value: \"%s\".' % args[0])\n\n message = pack_avionics_messages.ServoSetParamMessage()\n message.param = r22_param_helper.Value(param)\n message.value = value\n\n for target in servos:\n print 'Setting %s to %g on %s...' % (param, value, target)\n message.selected_servos = ServosAsBits([target])\n ack_received = False\n for _ in xrange(self._NUM_RETRIES):\n self._set_param_aio_client.Send(\n message, 'kMessageTypeServoSetParam', OPERATOR)\n try:\n _, header, ack = self._ack_param_aio_client.Recv()\n if (header.source == aio_node_helper.Value(\n AioNodeNameFromServoNickname(target))\n and header.type == message_type.kMessageTypeServoAckParam\n and ack.param == message.param):\n if ack.value == message.value:\n ack_received = True\n print '%s %s: %g' % (target, param, ack.value)\n break\n else:\n print 'Got response with incorrect value.'\n except socket.timeout:\n pass\n\n if not ack_received:\n raise ServoClientError('Failed to get %s from %s; giving up.' %\n (param, target))", "def __wait_and_reply_VIN_multi_frame(self, max_wait_time_ms):\n msg = self.__wait_for_VIN_code_request(max_wait_time_ms)\n\n if msg is None:\n print('No \\'VIN code\\' request received in {0} seconds'.format(self.__ms_to_seconds(max_wait_time_ms)))\n return\n\n # Build and send VIN code message as multi-frame can message\n vin_code_multi_messages = self.__get_VIN_code_multi_frame()\n for can_frame in vin_code_multi_messages:\n self.__send_one_msg(can_frame, print_msg_flag=True)\n time.sleep(0.050) # According J1939 std. multi frame messages with 50ms time delay", "def xhdrRequest(group, low, high, header):", "def setRequest(self, message):\n # type: (bytearray) -> ()", "def update_waiting(self):\n value = None\n desc_value = f\"{self.scope}$\" + self.desc_value\n if desc_value in self.struct_variables:\n if self.struct_variables[desc_value][\"structure\"] == \"dict\":\n struct = self.struct_variables[desc_value]\n if self.get_value(0) is not None:\n if (self.get_value(1) is not None) or not self.inputs[1]:\n key = struct[\"key_type\"](self.get_value(0))\n if self.inputs[1]:\n value = self.get_value(1)\n value = struct[\"value_type\"](value)\n self.struct_variables[desc_value][\"values\"][key] = value\n self.set_value(struct[\"values\"][key], 0)\n if not self.inputs[1]:\n if key in struct[\"values\"]:\n self.set_value(struct[\"values\"][key], 0)\n if not self.inputs[1]:\n self.state = ACTIVE\n elif value is not None:\n self.state = ACTIVE" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read multiple images This extends FetchImage() capabilities. More image channels can be processed, 16bit images are supported. channel_list zerobased list of input video channels pxl_size number of image pixels (pixels) Scanning should be initiated first. Then, call this blocking function. During the call, messages from data connection are collected, decoded and images are stored as a ('bytes', 'bytes', ...) type, each string contains one channel. The resulting images are returned as a list of byte strings containing pixles. Both 8bit and 16bit data are supported. In case of 16bit image, each pixel occupies 2 bytes in the output buffer (instead of one byte). The byte order is littleendian.
def FetchImageEx(self, channel_list, pxl_size): return self.connection.FetchImageEx('ScData', channel_list, pxl_size)
[ "def image_gather_channels(image_list: List[Image], im: Image = None, subimages=0) -> Image:\n \n if im is None:\n nchan = len(image_list)\n _, npol, ny, nx = image_list[0].shape\n im_shape = nchan, npol, ny, ny\n im = create_image_from_array(numpy.zeros(im_shape, dtype=image_list[0].data.dtype),\n image_list[0].wcs, image_list[0].polarisation_frame)\n \n assert image_is_canonical(im)\n\n if subimages == 0:\n subimages = len(image_list)\n \n for i, slab in enumerate(image_channel_iter(im, subimages=subimages)):\n slab.data[...] = image_list[i].data[...]\n \n return im", "def Images(self, first, last, type=16):\n nimages = last - first + 1\n pixels_per_image = self._cam.ReadMode.current.pixels\n total_pixels = nimages * pixels_per_image\n final_shape = [nimages] + self._cam.ReadMode.current.shape\n \n validfirst = ctypes.c_int32()\n validlast = ctypes.c_int32()\n \n if type == 16:\n data16 = np.ascontiguousarray(np.empty(shape=total_pixels, dtype=np.uint16))\n sdk.GetImages16(first, last, ctypes.c_void_p(data16.ctypes.data), total_pixels, ctypes.byref(validfirst), ctypes.byref(validlast))\n data = data16\n else:\n data32 = np.ascontiguousarray(np.empty(shape=total_pixels, dtype=np.int32))\n sdk.GetImages(first, last, ctypes.c_void_p(data32.ctypes.data), total_pixels, ctypes.byref(validfirst), ctypes.byref(validlast))\n data = data32\n self.valid = {'first': validfirst, 'last': validlast}\n return data.reshape(final_shape)", "def _read_image(self):\n for i in range(self.N):\n self.images.append(cv2.imread(self._file_at(i), cv2.IMREAD_UNCHANGED))", "def list_images(self, **args):\n\n return self._list(Image, **args)", "def read_images(fn_list):\r\n batch = np.array( [ imread(fn) for fn in fn_list ] )\r\n batch = np.expand_dims(batch,3)\r\n return batch", "def fetch(self, image_list):\n\n client = docker.Client(base_url=self._docker_url, timeout=self._docker_conn_timeout)\n for img in image_list:\n self._logger.info('Pulling image: %s' % str(img))\n if ':' in img and '@' not in img:\n img_comp = img.split(':')\n # Pull specific tag\n self._logger.debug('Syncing repo: ' + img + ' with tag = ' + self._default_tag)\n self._logger.debug(client.pull(repository=img_comp[0], tag=img_comp[1]))\n elif '@' in img:\n # It's a hash. Pass verbatim\n self._logger.debug('Syncing hash-identified image: ' + img)\n self._logger.debug(client.pull(repository=img))\n else:\n # It's repo, assume latest...\n # can later add full-repo support by removing the 'tag' here\n self._logger.debug('Syncing assumed \"latest\" tag for repo: ' + img)\n self._logger.debug(client.pull(repository=img, tag='latest'))", "def get_images(ibs, gid_list):\n gpath_list = ibs.get_image_paths(gid_list)\n image_list = [gtool.imread(gpath) for gpath in gpath_list]\n return image_list", "def images_in_buffer(self):\n #cdef sdk.at_32 first, last #UPDATE\n first = ctypes.c_int32()\n last = ctypes.c_int32()\n sdk.GetNumberAvailableImages(ctypes.byref(first), ctypes.byref(last))\n return {\"first\": first.value, \"last\": last.value}", "def get_next_batch(self):\n images = []\n while len(images) < self._batch_size:\n line = self._catalog.readline()\n self._counter += 1\n if self._counter < self._skip:\n continue\n url, time = line.split(\",\")\n time1 = int(time.split('-')[0])\n if time1 < 1650: # only consider paintings after 1650\n continue\n url = \"https://www.wga.hu/art\" + url.split(\"html\")[1] + \"jpg\"\n try:\n img_arr = self._scrape_image(url)\n except:\n continue\n if img_arr.shape[2] != 3: # only consider RGB paintings\n continue\n img_arr = (img_arr - 127.5) / 127.5\n images.append(img_arr)\n\n result = np.stack(images, axis=0)\n assert result.shape == (self._batch_size, self._input_size[0], self._input_size[1], 3)\n return result", "def read_many_hdf5(num_images):\n images= []\n\n # Open the HDF5 file\n file = h5py.File(hdf5_dir / f\"{num_images}_vids.h5\", \"r+\")\n\n images = np.array(file[\"/images\"]).astype(\"float32\")\n\n return images", "def read_images(img_paths):\n imgs = np.empty([len(img_paths), 160, 320, 3])\n\n for i, path in enumerate(img_paths):\n imgs[i] = imread(path)\n #image = load_img(path, target_size=(160, 320))\n #imgs[i] = img_to_array(image)\n\n return imgs", "def read_images(path, name):\n # For saving images in a list\n imgs = []\n \n # Get all files in a folder\n for filename in glob.glob(path + \"*\" + name + \"*\"):\n imgs.append(Image.open(filename))\n print 1\n return imgs", "def get_images(self):\r\n\r\n image_index = 0\r\n line_index = 1\r\n for link in self.raw_list:\r\n\r\n try:\r\n self.get_one_image(link, image_index)\r\n except ValueError:\r\n logging.warning(\"Line {}: broken or unsafe URL format.\".format(line_index))\r\n except urllib.request.URLError:\r\n logging.warning(\"Line {}: {} -- URL error.\".format(line_index, link))\r\n except TypeError:\r\n logging.warning(\"Line {}: {} -- does not point to an image.\".format(line_index, link))\r\n else:\r\n image_index += 1\r\n\r\n line_index += 1\r\n\r\n logging.info(\"------------- Download finished -------------\\n\")", "def resize(event: Dict) -> List[Image.Image]:\n # Read the images urls passed:\n images_urls = event[\"data_url\"]\n\n # Initialize an empty list for the resized images:\n resized_images = []\n\n # Go through the images urls and read and resize them:\n for image_url in images_urls:\n # Get the image:\n urllib.request.urlretrieve(image_url, \"temp.png\")\n image = Image.open(\"temp.png\")\n # Resize it:\n image = image.resize((224, 224))\n # Collect it:\n resized_images.append(image)\n\n return resized_images", "def process_images(conn, script_params):\n\n message = \"\"\n\n # Get the images\n images, log_message = script_utils.get_objects(conn, script_params)\n message += log_message\n if not images:\n return None, None, message\n image_ids = [i.getId() for i in images]\n\n # Get the channel offsets\n channel_offsets = []\n for i in range(1, 5):\n p_name = \"Channel_%s\" % i\n if script_params[p_name]:\n index = i-1 # UI channel index is 1-based - we want 0-based\n x = \"Channel%s_X_shift\" % i in script_params and \\\n script_params[\"Channel%s_X_shift\" % i] or 0\n y = \"Channel%s_Y_shift\" % i in script_params and \\\n script_params[\"Channel%s_Y_shift\" % i] or 0\n z = \"Channel%s_Z_shift\" % i in script_params and \\\n script_params[\"Channel%s_Z_shift\" % i] or 0\n channel_offsets.append({'index': index, 'x': x, 'y': y, 'z': z})\n\n dataset = None\n if \"New_Dataset_Name\" in script_params:\n # create new Dataset...\n new_dataset_name = script_params[\"New_Dataset_Name\"]\n dataset = omero.gateway.DatasetWrapper(conn,\n obj=omero.model.DatasetI())\n dataset.setName(rstring(new_dataset_name))\n dataset.save()\n # add to parent Project\n parent_ds = images[0].getParent()\n project = parent_ds is not None and parent_ds.getParent() or None\n if project is not None and project.canLink():\n link = omero.model.ProjectDatasetLinkI()\n link.parent = omero.model.ProjectI(project.getId(), False)\n link.child = omero.model.DatasetI(dataset.getId(), False)\n conn.getUpdateService().saveAndReturnObject(link)\n\n # need to handle Datasets eventually - Just do images for now\n new_images = []\n links = []\n for image_id in image_ids:\n new_img, link = new_image_with_channel_offsets(conn, image_id,\n channel_offsets,\n dataset)\n if new_img is not None:\n new_images.append(new_img)\n if link is not None:\n links.append(link)\n\n if not new_images:\n message += \"No image created.\"\n else:\n if len(new_images) == 1:\n if not link:\n link_message = \" but could not be attached\"\n else:\n link_message = \"\"\n message += \"New image created%s: %s.\" % (link_message,\n new_images[0].getName())\n elif len(new_images) > 1:\n message += \"%s new images created\" % len(new_images)\n if not len(links) == len(new_images):\n message += \" but some of them could not be attached.\"\n else:\n message += \".\"\n\n return new_images, dataset, message", "def load_images(mraw_file, cih_obj, N, roll_axis=True):\n h = cih_obj['Image Height']\n w = cih_obj['Image Width']\n bit = cih_obj['Color Bit']\n \n if int(bit) == 16:\n bit_dtype = np.uint16\n elif int(bit) == 8:\n bit_dtype = np.uint8\n else:\n raise Exception('Only 16-bit and 8-bit files supported!')\n\n images = np.memmap(mraw_file, dtype=bit_dtype, mode='r', shape=(N, h, w))\n \n if roll_axis: #omitting true implies true\n return np.rollaxis(images, 0, 3)\n else:\n return images", "def load_images(flowtype, im_number):\n # first load the image information\n im_info = image_info.ImageInfo(flowtype)\n\n # get the formatted filename with the correct image number inserted\n filenames = im_info.formatted_filenames(im_number)\n\n # try to load image A\n if filenames[0][-4:] == \".mat\":\n try:\n # mat files <7.3\n img = sio.loadmat(filenames[0])\n IA = np.array(img['IA'])\n pass\n except NotImplementedError:\n # mat files v7.3\n img = h5py.File(filenames[0])\n IA = np.transpose(np.array(img['IA']))\n else:\n # IA = Image.open(filenames[0])\n # IA.load()\n IA = np.asarray(Image.open(filenames[0])).copy()\n\n # image B\n if filenames[1][-4:] == \".mat\":\n try:\n # mat files <7.3\n img = sio.loadmat(filenames[1])\n IB = np.array(img['IB'])\n pass\n except NotImplementedError:\n # mat files v7.3\n img = h5py.File(filenames[1])\n IB = np.transpose(np.array(img['IB']))\n else:\n IB = np.asarray(Image.open(filenames[1])).copy()\n\n # mask\n mask = load_mask(flowtype)\n\n return IA, IB, mask", "def _read_image(iterator):\n image = []\n for i in range(IMAGE_SIZE):\n image.extend(list(map(int, next(iterator).split(' '))))\n return image", "def load_multiple_images(self, filepath_list):\n self.image = Image.from_multiples(filepath_list)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Store the GUID from the bundle in item's annotations in order to later be able to match up Plone objects with bundle items.
def _set_guid(self, obj, item): IAnnotations(obj)[BUNDLE_GUID_KEY] = item['guid']
[ "def test_division_logistics_items_guiditem_id_put(self):\n pass", "async def _create_bundle(self,\n lta_rc: RestClient,\n bundle: BundleType) -> Any:\n self.logger.info('Creating new bundle in the LTA DB.')\n create_body = {\n \"bundles\": [bundle]\n }\n result = await lta_rc.request('POST', '/Bundles/actions/bulk_create', create_body)\n uuid = result[\"bundles\"][0]\n return uuid", "def bundle_info(self, bundle_info):\n\n self._bundle_info = bundle_info", "def item_id(self):\r\n return self.content['item_id']", "def add_item(self, item):\n item.universe = self\n self.items[item.uuid] = item", "def add_item(self, item):", "def _ExtractSigningBundleIdentifier(self, signed_bundle):\n return self._ExtractSigningAttribute(signed_bundle, 'Identifier')", "def item_id(self):\n return self.content[\"item_id\"]", "def create(self):\n data = {\"labels\": {\"en\": {\"language\": \"en\", \"value\": self.label}}}\n if len(self.description) > 0:\n data[\"descriptions\"] = {\"en\": {\"language\": \"en\", \"value\": self.description}}\n data[\"claims\"] = self.claims\n try:\n self.ID = self.wb_connection.create_entity(\"item\", data)\n print(self.ID)\n return self.ID\n except WBAPIException as e:\n log.error(f\"Item could not be created through the WB API: {str(e)}\")", "def export_item(self, item):\n pass", "def get_bundle_identifier(pid=None):\n return", "def save_item(self, item):\n data = {\n 'item_id': item.item_id,\n 'name': item.name,\n }\n # 'aisle': item.aisle,\n # 'category': item.category,\n # 'description': item.description,\n # 'image_url': item.image_url\n # }\n new_row_id = self._save(item.DB_TABLE_NAME, data, item.id)\n if new_row_id:\n item.id = new_row_id\n return new_row_id", "def app_bundle_id(self) -> str:\n return pulumi.get(self, \"app_bundle_id\")", "def get_bundle_identifier_for_path(path):\n return", "def GUID(self) -> _n_2_t_0:", "def get_item(self, identifier):", "def save(self, *args, **kwargs):\n import uuid\n if not self.guid or self.guid == u'':\n self.guid = uuid.uuid4().get_hex()\n super(AbstractGUID, self).save(*args, **kwargs)", "def bundle(self, bundle):\n if not isinstance(bundle, ClsDict):\n raise ValueError('Type must be a ClsDict child')\n self._bundle = bundle", "def putItem(self, container, item, quantity=1):\n pass", "def _populate(self, row, item):\n uid = row['uid']\n self._data[item.uid] = item.data\n particles = item.particles\n number_of_items = len(item.particles)\n row['n_particles'] = number_of_items\n ids = row['particles']\n for index, uid in enumerate(particles):\n ids[index] = numpy.frombuffer(uid.bytes, dtype=numpy.uint8)\n row['particles'] = ids" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the path relative to the plone site for the given brain.
def get_relative_path(self, brain): return '/'.join(brain.getPath().split('/')[2:])
[ "def portal_path(): # pragma: no cover", "def base_path(self):\n return self.path", "def get_nb_path() -> Path:\n try: \n if is_colab(): return get_colab_nb_name()\n else: \n srv, path = _find_nb()\n if srv and path:\n root_dir = Path(srv.get('root_dir') or srv['notebook_dir'])\n return root_dir / path\n else:\n return\n except: \n return", "def nettle_path(self):\n res = self.config.get(self.section, 'nettle_path')\n if not res:\n res = None\n return res", "def path_on_server(self):\n\n # change dev_base if necessary\n if ConfigHandler.cfg.wb_new == \"True\":\n oPB.DEV_BASE = oPB.DEV_BASE_OPSI41\n else:\n oPB.DEV_BASE = oPB.DEV_BASE_OPSI40\n\n # if on Linux, we have to subtract local share base from development folder\n # -> the local share base acts like the drive letter on windows\n if platform.system() == 'Linux':\n tmp = self.projectfolder.replace(ConfigHandler.cfg.local_share_base, \"\")\n else:\n tmp = self.projectfolder\n\n if platform.system() == \"Windows\":\n # remove drive letter\n return oPB.DEV_BASE + tmp[2:].replace(\"\\\\\", \"/\")\n else:\n # replace possible double '/' with single '/'\n return (oPB.DEV_BASE + \"/\" + tmp).replace(\"//\", \"/\")\n\n \"\"\"\n if tmp.startswith(repo_base):\n return tmp\n else:\n if tmp.strip() != \"\":\n ret = (repo_base + \"/\" + tmp + \"/\" + self.id).replace(\"//\", \"/\")\n print(\"a\", ret)\n return ret\n else:\n ret = (repo_base + \"/\" + self.id).replace(\"//\", \"/\")\n print(\"b\", ret)\n return ret\n \"\"\"", "def short_relative_path_to_here(self):\n return self.short_relative_path_to(os.getcwd())", "def cdn_frontdoor_origin_path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"cdn_frontdoor_origin_path\")", "def short_relative_path_from_here(self):\n return self.__class__(os.getcwd()).short_relative_path_to(self)", "def project_path(project=None, exclude_nn_addr=False):\n\n if project is None:\n project = project_name()\n\n # abspath means \"hdfs://namenode:port/ is preprended\n abspath = hdfs.path.abspath(\"/Projects/\" + project + \"/\")\n if exclude_nn_addr:\n abspath = re.sub(r\"\\d+.\\d+.\\d+.\\d+:\\d+\", \"\", abspath)\n return abspath", "def cdn_frontdoor_origin_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cdn_frontdoor_origin_path\")", "def relative_path(self):\n if self.parent is not None:\n root = self.parent\n while True:\n if root.parent:\n root = root.parent\n else:\n break\n return self.path[len(root.path) + 1:]\n else:\n return ''", "def path_for_script(script):\n return path.join(current_directory(), script)", "def get_project_path():\n return Path(__file__).absolute().parents[1]", "def site_base_url(self) -> str:\n return pulumi.get(self, \"site_base_url\")", "def site_url(self, path=None):\n base = self.config['SITE_URL']\n path = path or '/'\n if base:\n return base if path == '/' else '%s%s' % (base, path)\n else:\n return path", "def find_base_path():\n if platform.system() == 'windows':\n base_path = os.path.join('K:', 'ptestbend')\n else:\n base_path = os.path.join('/mnt','K', 'ptestbend')\n return base_path", "def get_real_path(self):\n return os.path.join(self.root.path, self.path, self.filename)", "def absolute_url_path(self):\n spp = self.getPhysicalPath()\n try:\n toUrl = aq_acquire(self, 'REQUEST').physicalPathToURL\n except AttributeError:\n return path2url(spp) or '/'\n return toUrl(spp, relative=1) or '/'", "def relpath(repo_path):\n repo_path = '../../' + repo_path\n repo_path = repo_path.replace('../../infra/', '../')\n repo_path = repo_path.replace('../bots/', '')\n return repo_path", "def script_path(sname):\n\n return examples_dir / \"scripts\" / Path(sname)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resolves an item's parent pointer to a container obj and its path.
def resolve_parent_pointer(self, item): parent_guid = item.get('parent_guid') formatted_parent_refnum = item.get('_formatted_parent_refnum') if parent_guid is not None: parent_path = self.path_from_guid(parent_guid) elif formatted_parent_refnum is not None: parent_path = self.path_from_refnum(formatted_parent_refnum) elif item['_type'] == 'opengever.repository.repositoryroot': # Repo roots are the only type that don't require a parent # pointer, and get constructed directly in the Plone site container = self.site parent_path = '/' else: # Should never happen - schema requires a parent pointer logger.warning( u'Item with GUID %s is missing a parent pointer, ' u'skipping.' % item['guid']) return if not parent_path: logger.warning( u'Could not determine parent container for item with ' u'GUID %s, skipping.' % item['guid']) return container = traverse(self.site, parent_path, None) return container, parent_path
[ "def parent(cls, item):\n\n parent_id = parent_uid = parent_item = None\n\n is_key = lambda fk, name: fk == name or \\\n isinstance(fk, (tuple, list)) and \\\n fk[1] == name\n\n all_items = item.job.items\n for link_item in all_items.values():\n if link_item.tablename == \"org_organisation_branch\":\n references = link_item.references\n parent = branch = None\n for reference in references:\n fk = reference.field\n if is_key(fk, \"branch_id\"):\n branch = reference.entry\n elif is_key(fk, \"organisation_id\"):\n parent = reference.entry\n if parent and branch:\n break\n if parent and branch and branch.item_id == item.item_id:\n parent_id = parent.id\n parent_uid = parent.uid\n parent_item = all_items.get(parent.item_id)\n break\n\n return parent_id, parent_uid, parent_item", "def get_parent(self, item_id):\r\n return self.get('items/{}/parent'.format(item_id)).json()", "def parentItem(self):\n return self._parentItem", "def get_ultimate_parent(obj):\n if obj.parent:\n return get_ultimate_parent(obj.parent)\n else:\n return obj", "def _get_parent(*, schema: oa_types.Schema, schemas: oa_types.Schemas) -> str:\n ref = peek.ref(schema=schema, schemas=schemas)\n assert ref is not None\n parent, _ = ref_helper.get_ref(ref=ref, schemas=schemas)\n return parent", "def get_parent ( self ):\n return self.parent_ref.deref_safe()", "def get_parent(self, it):\n return self._parent_array[it]", "def f_get_parent(self):\n if self.v_is_root:\n raise TypeError('Root does not have a parent')\n elif self.v_location == '':\n return self.v_root\n else:\n return self.v_root.f_get(self.v_location, fast_access=False, shortcuts=False)", "def _GetParentContainer(self, cwc):\n if cwc.uid == 1:\n return self.ToDoList().root\n names_seen = set()\n for f, path in self.ToDoList().ContainersPreorder():\n names_seen.add(f.name)\n if f.uid == cwc.uid:\n if not path:\n raise InvalidPathError('Already at the root Folder; cannot ascend.')\n return path[0]\n raise InvalidPathError(\n 'No such folder. All folders:\\n%s'\n % (common.Indented('\\n'.join(sorted(names_seen)))))", "def _closure_parent_pk(self):\n if hasattr(self, \"%s_id\" % self._closure_parent_attr):\n return getattr(self, \"%s_id\" % self._closure_parent_attr)\n else:\n parent = getattr(self, self._closure_parent_attr)\n return parent.pk if parent else None", "def parent(self):\n parent_key = self.parent_key()\n if parent_key:\n return db.get(parent_key)", "def find_Parent(obj):\n result_obj = None\n # this findes the 'last' Part..\n # but as fare as i know there should only be one in this list..\n for x in obj.InList:\n if (\n x.isDerivedFrom(\"App::Part\")\n ):\n result_obj = x\n return result_obj", "def fetch_parent(self):\n if not self.parent_id:\n return None\n return self._fetch(self.client, self.parent_id)", "def get_parent_id(khoros_object, identifier=None, category_details=None):\n return get_category_field(khoros_object, 'parent_id', identifier, category_details)", "def Parent(self, *args):\n return _snap.TUnionFind_Parent(self, *args)", "def get_parent_url(khoros_object, identifier=None, category_details=None):\n return get_category_field(khoros_object, 'parent_view_href', identifier, category_details)", "def get_parent(entity):\n return getattr(entity, meta.PARENT_IDENTIFIER, None)", "def get_parent_ref(self, path):\n matches = [r for r in self.refs if path.startswith(r + '/')]\n if len(matches) != 1:\n raise FuseOSError(errno.ENOENT)\n return matches[0]", "def get_parent_news_item(self):\n if self.is_news_item():\n return self.newsitem\n elif self.is_comment():\n return self.comment.get_newsitem()\n else:\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the block at a given location in the world's version
def block_at(world, dimension, x, y, z) -> Tuple[Block, BlockEntity]: block, blockEntity = world.get_version_block( x, y, z, dimension, (world.level_wrapper.platform, world.level_wrapper.version) ) return block, blockEntity
[ "def get(self, pos):\n\n x, y = pos\n block = self.default_block\n\n if self.is_in_bounds(pos):\n block = self.map[y][x]\n\n return block", "def get_block(self, blockname=None):\n if blockname is None:\n blockname = \"xia2\"\n assert blockname, \"invalid block name\"\n if blockname not in self._cif:\n self._cif[blockname] = iotbx.cif.model.block()\n self._cif[blockname][\"_entry.id\"] = blockname\n return self._cif[blockname]", "def getLastBlock(self):\n\t\tquery = 'SELECT * from blocks ORDER BY id DESC LIMIT 1'\n\t\tself.executeQuery(query)\n\t\trawBlock = self.fetchOne()\n\t\tif rawBlock == None:\n\t\t\treturn None\n\t\tblock = Block.Block(None, rawBlock[0], rawBlock[9])\n\t\tblock.setBlockFromDb(rawBlock)\n\t\treturn block", "def block(self, height=None):\n return self._blocks.at(height)", "def get_block(height):\n r = requests.get(getBlockHeight + str(int(height)))\n if r.status_code != requests.codes.ok:\n return\n try:\n miner_id = r.json()['generator']\n block_id = r.json()['block']\n except KeyError:\n miner_id = None\n block_id = None\n\n return miner_id, block_id", "def get_block(self, namespace, offset, key):\n cursor = self.cursor\n cursor.execute('SELECT data, flags FROM gauged_data '\n 'WHERE namespace = ? AND offset = ? AND `key` = ?',\n (namespace, offset, key))\n row = cursor.fetchone()\n return (None, None) if row is None else row", "def get_block(self, bhash):\n try:\n return self.db.get(f'{bhash}')\n except BaseException:\n return None", "def lookupBlock(blockName):\r\n blockName = blockName.upper()\r\n try:\r\n try:\r\n name, data = blockName.rsplit('_', 1)\r\n except ValueError:\r\n return Blocks[blockName]\r\n else:\r\n try:\r\n data = int(data)\r\n except ValueError:\r\n return Blocks[blockName]\r\n return Block(Blocks[name].id, data)\r\n except KeyError:\r\n print 'Invalid block name:', blockName\r\n sys.exit()", "def get_object(self, x, y, z):\r\n for block in self._blocks:\r\n if (x, y, z) == block.location():\r\n return block\r\n if (x, y, z) == self._drone.location():\r\n return self._drone\r\n return None", "def get_version_block(\n self,\n x: int,\n y: int,\n z: int,\n dimension: Dimension,\n version: VersionIdentifierType,\n ) -> Tuple[Union[Block, Entity], Optional[BlockEntity]]:\n cx, cz = block_coords_to_chunk_coords(x, z, chunk_size=self.sub_chunk_size)\n chunk = self.get_chunk(cx, cz, dimension)\n offset_x, offset_z = x - 16 * cx, z - 16 * cz\n\n output, extra_output, _ = self.translation_manager.get_version(\n *version\n ).block.from_universal(\n chunk.get_block(offset_x, y, offset_z), chunk.block_entities.get((x, y, z))\n )\n return output, extra_output", "def block(self) -> 'DXFEntity':\n return self.get_entity_by_handle(self._block_handle)", "def getBlockByHash(self, blockHash):\n\t\tquery = 'SELECT * from blocks where block_hash = %s'\n\t\tself.executeQuery(query, (blockHash,))\n\t\trawBlock = self.fetchOne()\n\t\tblock = Block.Block(None, rawBlock[0], rawBlock[9])\n\t\tblock.setBlockFromDb(rawBlock)\n\t\treturn block", "def resolve_block(\n connection: sqlite3.Connection, at_block, forced_gas_price: int\n) -> Tuple[BlockInfo, int, int]:\n\n if at_block == \"latest\":\n # it has been decided that the latest is whatever pathfinder knows to be latest synced block\n # regardless of it being the highest known (not yet synced)\n cursor = connection.execute(\n \"select number, timestamp, storage_commitment, gas_price, sequencer_address, class_commitment, sn_ver.version from block_headers left join starknet_versions sn_ver on (sn_ver.id = version_id) order by number desc limit 1\"\n )\n elif isinstance(at_block, int):\n cursor = connection.execute(\n \"select number, timestamp, storage_commitment, gas_price, sequencer_address, class_commitment, sn_ver.version from block_headers left join starknet_versions sn_ver on (sn_ver.id = version_id) where number = ?\",\n [at_block],\n )\n else:\n assert isinstance(at_block, bytes), f\"expected bytes, got {type(at_block)}\"\n if len(at_block) < 32:\n # left pad it, as the fields in db are fixed length for this occasion\n at_block = b\"\\x00\" * (32 - len(at_block)) + at_block\n\n cursor = connection.execute(\n \"select number, timestamp, storage_commitment, gas_price, sequencer_address, class_commitment, sn_ver.version from block_headers left join starknet_versions sn_ver on (sn_ver.id = version_id) where hash = ?\",\n [at_block],\n )\n\n try:\n [\n (\n block_number,\n block_time,\n storage_commitment,\n gas_price,\n sequencer_address,\n class_commitment,\n starknet_version,\n )\n ] = cursor\n except ValueError as exc:\n # zero rows, or wrong number of columns (unlikely)\n raise NoSuchBlock(at_block) from exc\n\n gas_price = int.from_bytes(gas_price, \"big\")\n\n if forced_gas_price != 0:\n # allow caller to override any; see rust side's GasPriceSource for more rationale\n gas_price = forced_gas_price\n\n sequencer_address = int.from_bytes(sequencer_address, \"big\")\n\n return (\n BlockInfo(\n block_number, block_time, gas_price, sequencer_address, starknet_version\n ),\n storage_commitment,\n class_commitment,\n )", "def get_block(self, blknum):\n\n return self.blocks[blknum]", "def getBlockByHeigth(self, heigth):\n\t\tquery = 'SELECT * from blocks where real_number = %s'\n\t\tself.executeQuery(query, (heigth,))\n\t\trawBlock = self.fetchOne()\n\t\tblock = Block.Block(None, rawBlock[0], rawBlock[9])\n\t\tblock.setBlockFromDb(rawBlock)\n\t\treturn block", "def get_block(self, block: int = None):\n\n if (block is None):\n block = w3.eth.blockNumber\n\n cprint(\"block {} details = \\n {}\".format(block, (w3.eth.getBlock(block_identifier=block))), \"yellow\") #TODO: make this print pretty json", "def __getitem__(self, block_number: int) -> BlockAPI:\n\n if block_number < 0:\n block_number = len(self) + block_number\n\n return self._get_block(block_number)", "def getBlockByNumber(self, blockNumber):\n\t\tquery = 'SELECT * from blocks where id = %s'\n\t\tself.executeQuery(query, (blockNumber,))\n\t\trawBlock = self.fetchOne()\n\t\tblock = Block.Block(None, rawBlock[0], rawBlock[9])\n\t\tblock.setBlockFromDb(rawBlock)\n\t\treturn block", "def get_block(self, url_or_id: str, force_refresh: bool = False) -> Optional[Block]:\n block_id = extract_id(url_or_id)\n block = self.get_record_data(\"block\", block_id, force_refresh)\n\n if not block:\n return None\n\n if block.get(\"parent_table\") == \"collection\":\n if block.get(\"is_template\"):\n klass = TemplateBlock\n else:\n klass = CollectionRowBlock\n else:\n klass = get_block_type(block.get(\"type\"))\n\n return klass(client=self, block_id=block_id)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for batch_pickup_request
def test_batch_pickup_request(self): pass
[ "def test_call_pickup(self):\n events = self.run_and_get_events('fixtures/xfer_misc/call_pickup.json')\n\n expected_events = self.events_from_tuples((\n ('on_b_dial', {\n 'call_id': 'vgua0-dev-1445001221.106',\n 'caller': CallerId(code=123450001, name='Alice', number='201', is_public=True),\n 'to_number': '202',\n 'targets': [CallerId(code=123450002, number='202', is_public=True)],\n }),\n ('on_up', {\n 'call_id': 'vgua0-dev-1445001221.106',\n 'caller': CallerId(code=123450001, name='Alice', number='201', is_public=True),\n 'to_number': '202',\n 'callee': CallerId(code=123450003, number='202', is_public=True),\n }),\n ('on_hangup', {\n 'call_id': 'vgua0-dev-1445001221.106',\n 'caller': CallerId(code=123450001, name='Alice', number='201', is_public=True),\n 'to_number': '202',\n 'reason': 'completed',\n }),\n ))\n\n self.assertEqual(expected_events, events)", "def __process_pickup_requests(self):\r\n\r\n to_remove = []\r\n for pickup_floor, direction in self.pickup_requests:\r\n possible_elevator = []\r\n\r\n \"\"\"Elevators that are free or going in the same direction\"\"\"\r\n for elevator in self.elevators:\r\n if elevator.matches_request(pickup_floor, direction):\r\n possible_elevator.append(elevator)\r\n\r\n if len(possible_elevator) > 0:\r\n #find the nearest elevator\r\n elevator_id = self.__find_nearest_elevator_id(possible_elevator, pickup_floor)\r\n self.target_floor_request(elevator_id, pickup_floor)\r\n to_remove.append((pickup_floor, direction))\r\n else:\r\n \"\"\"Elevators that are going in the direction of the request.\"\"\"\r\n comming_elevator = []\r\n for elevator in self.elevators:\r\n if elevator.is_coming_to(pickup_floor):\r\n comming_elevator.append(elevator)\r\n\r\n if len(comming_elevator) > 0:\r\n #find the nearest elevator\r\n elevator_id = self.__find_nearest_elevator_id(comming_elevator, pickup_floor)\r\n self.target_floor_request(elevator_id, pickup_floor)\r\n to_remove.append((pickup_floor, direction))\r\n\r\n for items in to_remove:\r\n self.pickup_requests.remove(items)", "def test_enqueue(self):\n self.fail()", "def test_enqueue15(client):\n pytest.skip(\"Not implemented\")", "def test_get_task_instances_batch(self):\n pass", "def test_request_can_create_successfully(self):\r\n initial_count = len(request_model.requests)\r\n res = self.client().post('/api/v1/request', data=json.dumps(self.request),\r\n headers={\"content-type\": \"application/json\", \"access-token\": self.token})\r\n final_count = len(request_model.requests)\r\n self.assertEqual(res.status_code, 201)\r\n self.assertEqual(final_count - initial_count, 1)\r\n self.assertIn(\"Request created\",str(res.data))", "def do_create_batch(self, item, transfer, lot):\n date_done = transfer.picking_id.date_done\n partner = transfer.picking_id.partner_id\n product = item.product_id\n packaging = product.packaging_ids[0]\n\n serial = self.env['estate.nursery.batch'].search_count([]) + 1\n\n batch_data = {\n 'name': \"Batch %d\" % serial,\n 'lot_id': lot.id,\n 'variety_id': item.variety_id.id,\n 'progeny_id': item.progeny_id.id,\n 'date_received': date_done,\n 'age_seed': transfer.age_seed,\n 'qty_received': item.quantity,\n 'picking_id': transfer.picking_id.id,\n 'state': 'draft'\n }\n\n # print \"Create Seed Batch. %s (v: %s, p: %s) is received at %s from %s\" % (item.product_id.name,\n # item.variety_id.name,\n # item.progeny_id.name,\n # date_done,\n # partner.name)\n\n # Check and create batch (box) and batchline (bag) for seed product.\n # if product has no package\n # create one box and one bag\n # else\n # create batch and its batchline as product package.\n # Check and create lot for current good receipt\n # print \"Create Box and Bag Packaging is %s (box: %s, bag: %s @ %s)\" % (product.name,\n # packaging.ul_container.name,\n # packaging.ul.name,\n # packaging.qty * packaging.ul_qty)\n\n return self.env['estate.nursery.batch'].create(batch_data)", "def test_bulk_avail(self):\n with mock.patch('bbarchivist.networkutils.availability', mock.MagicMock(return_value=False)):\n assert bs.bulk_avail([\"fake.url\", \"fakeurl.2\"]) == []", "def test_AddCustomer_request_sends_optional_parameters(self):\n self.mock_request(\n customer_name=self.CUSTOMER_NAME,\n address_1=self.ADDRESS_1,\n country=self.COUNTRY,\n selling_channel_id=self.SELLING_CHANNEL_ID,\n account_name=self.ACCOUNT_NAME,\n address_2=self.ADDRESS_2,\n agent_id=self.AGENT_ID,\n company_fax=self.COMPANY_FAX,\n company_mobile=self.COMPANY_MOBILE,\n company_telephone=self.COMPANY_TELEPHONE,\n contact_email=self.CONTACT_EMAIL,\n contact_fax=self.CONTACT_FAX,\n contact_mobile=self.CONTACT_MOBILE,\n contact_name=self.CONTACT_NAME,\n contact_phone=self.CONTACT_PHONE,\n county=self.COUNTY,\n customer_type=self.CUSTOMER_TYPE,\n eu_vat=self.EU_VAT,\n post_code=self.POST_CODE,\n payment_terms=self.PAYMENT_TERMS,\n town=self.TOWN,\n trade_name=self.TRADE_NAME,\n vat_number=self.VAT_NUMBER,\n credit_limit=self.CREDIT_LIMIT,\n )\n self.assertDataSent(self.request_class.ACCOUNT_NAME, self.ACCOUNT_NAME)\n self.assertDataSent(self.request_class.ADDRESS_2, self.ADDRESS_2)\n self.assertDataSent(self.request_class.AGENT_ID, self.AGENT_ID)\n self.assertDataSent(self.request_class.COMPANY_FAX, self.COMPANY_FAX)\n self.assertDataSent(self.request_class.COMPANY_MOBILE, self.COMPANY_MOBILE)\n self.assertDataSent(\n self.request_class.COMPANY_TELEPHONE, self.COMPANY_TELEPHONE\n )\n self.assertDataSent(self.request_class.CONTACT_EMAIL, self.CONTACT_EMAIL)\n self.assertDataSent(self.request_class.CONTACT_FAX, self.CONTACT_FAX)\n self.assertDataSent(self.request_class.CONTACT_MOBILE, self.CONTACT_MOBILE)\n self.assertDataSent(self.request_class.CONTACT_NAME, self.CONTACT_NAME)\n self.assertDataSent(self.request_class.CONTACT_PHONE, self.CONTACT_PHONE)\n self.assertDataSent(self.request_class.COUNTY, self.COUNTY)\n self.assertDataSent(self.request_class.CUSTOMER_TYPE, self.CUSTOMER_TYPE)\n self.assertDataSent(self.request_class.EU_VAT, int(bool(self.EU_VAT)))\n self.assertDataSent(self.request_class.POST_CODE, self.POST_CODE)\n self.assertDataSent(self.request_class.PAYMENT_TERMS, self.PAYMENT_TERMS)\n self.assertDataSent(self.request_class.TOWN, self.TOWN)\n self.assertDataSent(self.request_class.TRADE_NAME, self.TRADE_NAME)\n self.assertDataSent(self.request_class.VAT_NUMBER, self.VAT_NUMBER)\n self.assertDataSent(self.request_class.CREDIT_LIMIT, self.CREDIT_LIMIT)", "def run_requests(request_collection):", "def testI_requestStructure(self):\n _, campaignIds, requestIds = self._inject(20) # creates x documents / requests\n allRequests = self._getViewResults(\"all\") \n for req in allRequests:\n docId = req[u\"id\"]\n state = req[u\"key\"]\n # all requests should be NewlyHeld state\n self.assertEqual(state, \"NewlyHeld\")\n # check that the doc is well formed and matches the data we inserted\n doc = self.couch.document(docId) \n self.failUnless(doc[u\"state\"] == \"NewlyHeld\")\n self.failUnless(doc.has_key(u\"created\"))\n self.failUnless(doc.has_key(u\"timestamp\"))\n # description is a list of dictionaries, the first one is the initial message\n self.failUnless(\"Initial injection by the RequestManager\" in doc[u\"description\"][0].values())\n self.failUnless(doc[u\"request\"][u\"campaign_id\"] in campaignIds)\n self.failUnless(doc[u'request'][u'request_id'] in requestIds)", "def test_zmq_api_queue_item_add_batch_4_fail(re_manager): # noqa: F811\n _plan2_corrupt = _plan2.copy()\n _plan2_corrupt[\"name\"] = \"nonexisting_name\"\n items = [_plan1, _plan2_corrupt, _instruction_stop, {}, _plan3]\n success_expected = [True, False, True, False, True]\n msg_expected = [\"\", \"is not in the list of allowed plans\", \"\", \"'item_type' key is not found\", \"\"]\n\n params = {\"items\": items, \"user\": _user, \"user_group\": _user_group}\n resp1a, _ = zmq_single_request(\"queue_item_add_batch\", params)\n assert resp1a[\"success\"] is False, f\"resp={resp1a}\"\n assert resp1a[\"msg\"] == \"Failed to add all items: validation of 2 out of 5 submitted items failed\"\n assert resp1a[\"qsize\"] == 0\n item_list = resp1a[\"items\"]\n item_results = resp1a[\"results\"]\n assert len(item_list) == len(items)\n assert len(item_results) == len(items)\n\n assert item_list == items\n for n, res in enumerate(item_results):\n assert res[\"success\"] == success_expected[n], str(res)\n assert msg_expected[n] in res[\"msg\"], str(res)\n\n state = get_queue_state()\n assert state[\"items_in_queue\"] == 0\n assert state[\"items_in_history\"] == 0", "def process_batch(self, batch: List[Dict[str, Any]]) -> List[Response]:\n pass", "def test_bulk_create(self):\n urls = [reverse('api:record-list')]\n rec = self.record_1\n ds = self.ds_1\n data = [\n {\n \"dataset\": rec.dataset.pk,\n \"data\": rec.data\n },\n {\n \"dataset\": rec.dataset.pk,\n \"data\": rec.data\n }\n ]\n access = {\n \"forbidden\": [self.anonymous_client, self.readonly_client, self.custodian_2_client,\n self.admin_client, self.custodian_1_client],\n \"allowed\": []\n }\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.post(url, data, format='json').status_code,\n [status.HTTP_400_BAD_REQUEST, status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n count = ds.record_queryset.count()\n self.assertEqual(\n client.post(url, data, format='json').status_code,\n status.HTTP_201_CREATED\n )\n self.assertEqual(ds.record_queryset.count(), count + len(data))", "def test_zmq_api_queue_item_add_batch_1(\n re_manager, batch_params, queue_seq, batch_seq, expected_seq, success, msgs # noqa: F811\n):\n plan_template = {\n \"name\": \"count\",\n \"args\": [[\"det1\"]],\n \"kwargs\": {\"num\": 50, \"delay\": 0.01},\n \"item_type\": \"plan\",\n }\n\n # Fill the queue with the initial set of plans\n for item_code in queue_seq:\n item = copy.deepcopy(plan_template)\n item[\"kwargs\"][\"num\"] = int(item_code)\n params = {\"item\": item, \"user\": _user, \"user_group\": _user_group}\n resp1a, _ = zmq_single_request(\"queue_item_add\", params)\n assert resp1a[\"success\"] is True\n\n state = get_queue_state()\n assert state[\"items_in_queue\"] == len(queue_seq)\n assert state[\"items_in_history\"] == 0\n\n resp1b, _ = zmq_single_request(\"queue_get\")\n assert resp1b[\"success\"] is True\n queue_initial = resp1b[\"items\"]\n\n # If there are 'before_uid' or 'after_uid' parameters, then convert values of those\n # parameters to actual item UIDs.\n def find_uid(dummy_uid):\n \"\"\"If item is not found, then return ``dummy_uid``\"\"\"\n try:\n ind = queue_seq.index(dummy_uid)\n return queue_initial[ind][\"item_uid\"]\n except Exception:\n return dummy_uid\n\n if \"before_uid\" in batch_params:\n batch_params[\"before_uid\"] = find_uid(batch_params[\"before_uid\"])\n\n if \"after_uid\" in batch_params:\n batch_params[\"after_uid\"] = find_uid(batch_params[\"after_uid\"])\n\n # Create a list of items to add\n items_to_add = []\n for item_code in batch_seq:\n item = copy.deepcopy(plan_template)\n item[\"kwargs\"][\"num\"] = int(item_code)\n items_to_add.append(item)\n\n # Add the batch\n params = {\"items\": items_to_add, \"user\": _user, \"user_group\": _user_group}\n params.update(batch_params)\n resp2a, _ = zmq_single_request(\"queue_item_add_batch\", params)\n\n if success:\n assert resp2a[\"success\"] is True\n assert resp2a[\"msg\"] == \"\"\n assert resp2a[\"qsize\"] == len(expected_seq)\n items_added = resp2a[\"items\"]\n assert len(items_added) == len(batch_seq)\n added_seq = [str(_[\"kwargs\"][\"num\"]) for _ in items_added]\n added_seq = \"\".join(added_seq)\n assert added_seq == batch_seq\n else:\n n_total = len(msgs)\n n_success = len([_ for _ in msgs if not (_)])\n n_failed = n_total - n_success\n msg = (\n f\"Failed to add all items: validation of {n_failed} out of {n_total} submitted items failed\"\n if n_failed\n else \"\"\n )\n\n assert resp2a[\"success\"] is False\n assert resp2a[\"msg\"] == msg\n assert resp2a[\"qsize\"] == len(expected_seq)\n items_added = resp2a[\"items\"]\n assert len(items_added) == len(batch_seq)\n added_seq = [str(_[\"kwargs\"][\"num\"]) for _ in items_added]\n added_seq = \"\".join(added_seq)\n assert added_seq == batch_seq\n\n resp2b, _ = zmq_single_request(\"queue_get\")\n assert resp2b[\"success\"] is True\n queue_final = resp2b[\"items\"]\n queue_final_seq = [str(_[\"kwargs\"][\"num\"]) for _ in queue_final]\n queue_final_seq = \"\".join(queue_final_seq)\n assert queue_final_seq == expected_seq\n\n state = get_queue_state()\n assert state[\"items_in_queue\"] == len(expected_seq)\n assert state[\"items_in_history\"] == 0", "def testUploadUsesBatchSize(self):\n client = DatasetImporter(1)\n client.upload(u'user',\n [{'about': u'hello world', 'values': {u'user/bar': 13}},\n {'about': u'wubble', 'values': {u'user/quux': 42}}])\n self.assertTrue(self.log.getvalue().startswith(\n 'Importing 2 new objects.\\nImported 1/2 new objects.\\n'\n 'Imported 2/2 new objects.\\nImported 2 objects in '))", "def test_multiple_build_retrieval(self):", "def test_zmq_api_item_move_batch_1(\n re_manager, batch_params, queue_seq, selection_seq, batch_seq, expected_seq, success, msg # noqa: F811\n):\n plan_template = {\n \"name\": \"count\",\n \"args\": [[\"det1\"]],\n \"kwargs\": {\"num\": 50, \"delay\": 0.01},\n \"item_type\": \"plan\",\n }\n\n # Fill the queue with the initial set of plans\n for item_code in queue_seq:\n item = copy.deepcopy(plan_template)\n item[\"kwargs\"][\"num\"] = int(item_code)\n params = {\"item\": item, \"user\": _user, \"user_group\": _user_group}\n resp1a, _ = zmq_single_request(\"queue_item_add\", params)\n assert resp1a[\"success\"] is True\n\n state = get_queue_state()\n assert state[\"items_in_queue\"] == len(queue_seq)\n assert state[\"items_in_history\"] == 0\n\n resp1b, _ = zmq_single_request(\"queue_get\")\n assert resp1b[\"success\"] is True\n queue_initial = resp1b[\"items\"]\n\n # If there are 'before_uid' or 'after_uid' parameters, then convert values of those\n # parameters to actual item UIDs.\n def find_uid(dummy_uid):\n \"\"\"If item is not found, then return ``dummy_uid``\"\"\"\n try:\n ind = queue_seq.index(dummy_uid)\n return queue_initial[ind][\"item_uid\"]\n except Exception:\n return dummy_uid\n\n if \"before_uid\" in batch_params:\n batch_params[\"before_uid\"] = find_uid(batch_params[\"before_uid\"])\n\n if \"after_uid\" in batch_params:\n batch_params[\"after_uid\"] = find_uid(batch_params[\"after_uid\"])\n\n # Create a list of UIDs of items to be moved\n uids_of_items_to_move = []\n for item_code in selection_seq:\n uids_of_items_to_move.append(find_uid(item_code))\n\n # Move the batch\n params = {\"uids\": uids_of_items_to_move}\n params.update(batch_params)\n resp2a, _ = zmq_single_request(\"queue_item_move_batch\", params)\n\n if success:\n assert resp2a[\"success\"] is True, pprint.pformat(resp2a)\n assert resp2a[\"msg\"] == \"\"\n assert resp2a[\"qsize\"] == len(expected_seq)\n items_moved = resp2a[\"items\"]\n assert len(items_moved) == len(batch_seq)\n added_seq = [str(_[\"kwargs\"][\"num\"]) for _ in items_moved]\n added_seq = \"\".join(added_seq)\n assert added_seq == batch_seq\n else:\n assert resp2a[\"success\"] is False, pprint.pformat(resp2a)\n assert re.search(msg, resp2a[\"msg\"]), pprint.pformat(resp2a)\n assert resp2a[\"qsize\"] is None\n assert resp2a[\"items\"] == []\n\n resp2b, _ = zmq_single_request(\"queue_get\")\n assert resp2b[\"success\"] is True\n queue_final = resp2b[\"items\"]\n queue_final_seq = [str(_[\"kwargs\"][\"num\"]) for _ in queue_final]\n queue_final_seq = \"\".join(queue_final_seq)\n assert queue_final_seq == expected_seq\n\n state = get_queue_state()\n assert state[\"items_in_queue\"] == len(expected_seq)\n assert state[\"items_in_history\"] == 0", "def test_processrequest_5(base_settings):\n filename = (\n base_settings[\"unittest_data_dir\"] / \"processrequest-example-poll-payrec.json\"\n )\n inst = processrequest.ProcessRequest.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"ProcessRequest\" == inst.resource_type\n\n impl_processrequest_5(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"ProcessRequest\" == data[\"resourceType\"]\n\n inst2 = processrequest.ProcessRequest(**data)\n impl_processrequest_5(inst2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for connections_request
def test_connections_request(self): pass
[ "def test_0040_test_connection(self):\n self.assertTrue(self.api.test_connection())", "def verify_connection(self, request, client_address):\n return 1", "def test_connect_with_prefix(self):\n conn = Connection(url=\"http://test.com/\")\n conn.connect()\n conn.request_path = \"/v1\"\n self.assertEqual(conn.connection.host, \"http://test.com\")\n with requests_mock.mock() as m:\n m.get(\"http://test.com/v1/test\", text=\"data\")\n response = conn.request(\"/test\")\n self.assertEqual(response.body, \"data\")", "def test_setUpConnections(self):\n self.assertListEqual(self.testCity.connections, ['TOKYO', 'MANILA', 'LOSANGELES', 'CHICAGO'])", "def test_connectors_get(self):\n pass", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_worker_connection_status_responder(self):\n pass", "def test_connect_to_api(self):\n \n # Test something which is meant to throw an error\n result = app.connect_to_api(5000)\n assert result == False\n \n # Test something which is meant to work successfully\n result = app.connect_to_api(5)\n assert isinstance(result, dict)", "def test_check_conn(self):\n # Valid hostnames and valid port numbers\n self.assertEquals(check_conn.check_conn('www.google.com', '80'), 0)\n self.assertEquals(check_conn.check_conn('www.google.com', 80), 0)\n\n # Valid hostnames and invalid port numbers\n self.assertEquals(check_conn.check_conn('www.google.com', \"80.\"), 1)\n self.assertEquals(check_conn.check_conn('www.google.com', '80.0'), 1)\n self.assertEquals(check_conn.check_conn('www.google.com', 'ssh'), 1)\n\n # Valid hostnames and port numbers that are accessible.\n self.assertEquals(check_conn.check_conn('www.google.com', \"80\"), 0)\n self.assertEquals(check_conn.check_conn('www.google.com', '443'), 0)\n self.assertEquals(check_conn.check_conn('www.google.com', 80), 0)\n\n # Valid hostnames and port numbers that are inaccessible.\n self.assertEquals(check_conn.check_conn('www.google.com', \"8080\"), 11)\n self.assertEquals(check_conn.check_conn('www.google.com', '22'), 11)\n self.assertEquals(check_conn.check_conn('www.google.com', 9999), 11)\n\n # Invalid hostnames and port numbers that are inaccessible.\n self.assertEquals(check_conn.check_conn('www.googlekjslkdjflaksdlfjldf.com', '8080'), 1)\n self.assertEquals(check_conn.check_conn('www.google.m', '22'), 1)\n self.assertEquals(check_conn.check_conn('www.google.', '9999'), 1)\n self.assertEquals(check_conn.check_conn('www.goo.cm', '80 ere 321 sdf 432 234'), 1)", "def test_list_requests(self):\r\n requests = self.request_manager.list_requests()\r\n self.assertEqual(len(requests), 0)\r\n \r\n helpers.create_dummy_request(self.request_manager, 1)\r\n helpers.create_dummy_request(self.request_manager, 2)\r\n helpers.create_dummy_request(self.request_manager, 3)\r\n\r\n requests = self.request_manager.list_requests()\r\n self.assertEqual(len(requests), 3)", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_worker_connection_status_responder_spaces(self):\n pass", "def test_open_requests(self):\n\n requests = []\n\n # get the open requests\n requests = self.api.get_open_requests()\n\n # Compare the results, we only care now that we got 1 of them not the content\n self.assertEqual(1, len(requests))", "def show_connect_requests() -> object:\n with sqlite3.connect(\"database.db\") as conn:\n # Loads the list of connection requests and their avatars.\n requests = []\n avatars = []\n cur = conn.cursor()\n\n # Extracts incoming requests.\n cur.execute(\n \"SELECT Connection.user1, UserProfile.profilepicture FROM \"\n \"Connection LEFT JOIN UserProfile ON Connection.user1 = \"\n \"UserProfile.username WHERE user2=? AND connection_type=?;\",\n (session[\"username\"], \"request\"),\n )\n conn.commit()\n row = cur.fetchall()\n if len(row) > 0:\n for elem in row:\n requests.append(elem[0])\n avatars.append(elem[1])\n\n # Extracts connections.\n cur.execute(\n \"SELECT Connection.user1, UserProfile.profilepicture FROM \"\n \"Connection LEFT JOIN UserProfile ON Connection.user1 = \"\n \"UserProfile.username WHERE user2=? AND connection_type=?;\",\n (session[\"username\"], \"connected\"),\n )\n connections1 = cur.fetchall()\n cur.execute(\n \"SELECT Connection.user2, UserProfile.profilepicture FROM \"\n \"Connection LEFT JOIN UserProfile ON Connection.user2 = \"\n \"UserProfile.username WHERE user1=? AND connection_type=?;\",\n (session[\"username\"], \"connected\"),\n )\n connections2 = cur.fetchall()\n\n # Extracts pending requests.\n cur.execute(\n \"SELECT Connection.user2, UserProfile.profilepicture FROM \"\n \"Connection LEFT JOIN UserProfile ON Connection.user2 = \"\n \"UserProfile.username WHERE user1=? AND connection_type=?;\",\n (session[\"username\"], \"request\"),\n )\n pending_connections = cur.fetchall()\n\n # Extracts blocked users.\n cur.execute(\n \"SELECT Connection.user2, UserProfile.profilepicture FROM \"\n \"Connection LEFT JOIN UserProfile ON Connection.user2 = \"\n \"UserProfile.username WHERE user1=? AND connection_type=?;\",\n (session[\"username\"], \"block\"),\n )\n blocked_connections = cur.fetchall()\n\n # Extracts recommended connections.\n recommended_connections = helper_connections.get_recommended_connections(\n session[\"username\"]\n )\n mutual_avatars = []\n for mutual in recommended_connections:\n mutual_avatars.append(helper_profile.get_profile_picture(mutual[0]))\n\n # Lists usernames of all connected people.\n connections = connections1 + connections2\n # Adds a close friend to the list, and sorts by close friends first.\n connections = list(\n map(\n lambda x: (\n x[0],\n x[1],\n helper_connections.is_close_friend(session[\"username\"], x[0]),\n ),\n connections,\n )\n )\n connections.sort(key=lambda x: x[2], reverse=True)\n\n session[\"prev-page\"] = request.url\n return render_template(\n \"request.html\",\n requests=requests,\n avatars=avatars,\n allUsernames=helper_general.get_all_usernames(),\n requestCount=helper_connections.get_connection_request_count(),\n connections=connections,\n pending=pending_connections,\n blocked=blocked_connections,\n mutuals=recommended_connections,\n mutual_avatars=mutual_avatars,\n notifications=helper_general.get_notifications(),\n )", "def testI_requestStructure(self):\n _, campaignIds, requestIds = self._inject(20) # creates x documents / requests\n allRequests = self._getViewResults(\"all\") \n for req in allRequests:\n docId = req[u\"id\"]\n state = req[u\"key\"]\n # all requests should be NewlyHeld state\n self.assertEqual(state, \"NewlyHeld\")\n # check that the doc is well formed and matches the data we inserted\n doc = self.couch.document(docId) \n self.failUnless(doc[u\"state\"] == \"NewlyHeld\")\n self.failUnless(doc.has_key(u\"created\"))\n self.failUnless(doc.has_key(u\"timestamp\"))\n # description is a list of dictionaries, the first one is the initial message\n self.failUnless(\"Initial injection by the RequestManager\" in doc[u\"description\"][0].values())\n self.failUnless(doc[u\"request\"][u\"campaign_id\"] in campaignIds)\n self.failUnless(doc[u'request'][u'request_id'] in requestIds)", "def test_reconnect_route_request(self):\n pass", "def dummy_request(db_session):", "def test_sending_and_accepting_request(self):\n\n self.send_request()\n\n request_response_id = RequestResponse.list(\n self._API_CONTEXT,\n self._USER_ID,\n self._MONETARY_ACCOUNT_ID2\n ).value[self._FIRST_INDEX].id_\n\n self.accept_request(request_response_id)", "def test_connectWaitsForConnection(self):\n transports = []\n def connect():\n transports.append(\n gConnectTCP(\"whatever\", 9090, reactor=fakeReactor))\n\n class FakeReactor(object):\n def __init__(self):\n self.connections = []\n def connectTCP(self, host, port, factory):\n self.connections.append((host, port, factory))\n fakeReactor = FakeReactor()\n\n greenlet(connect).switch()\n self.assertEquals(transports, [])\n self.assertEquals(len(fakeReactor.connections), 1)\n self.assertEquals(fakeReactor.connections[0][0], \"whatever\")\n self.assertEquals(fakeReactor.connections[0][1], 9090)\n proto = fakeReactor.connections[0][2].buildProtocol(None)\n proto.makeConnection(FakeTransport()) # This is gonna switch back!\n self.assertEquals(transports, [proto.gtransport])", "def test_reusing_connection(self):\n conn_context = self.rpc.create_connection(new=False)\n conn1 = conn_context.connection\n conn_context.close()\n conn_context = self.rpc.create_connection(new=False)\n conn2 = conn_context.connection\n conn_context.close()\n self.assertEqual(conn1, conn2)", "def test_handle_request_2_c(self):\n ip_addr = self._session_ip_1\n user = self._user_1\n session = self.session_manager.create_session(ip_address=ip_addr, username=user)\n request = NWMRequest(session_secret=session.session_secret, version=2.0)\n\n dummy_scheduler_client = DummySchedulerClient(test_successful=True)\n self.handler._scheduler_client = dummy_scheduler_client\n\n response = asyncio.run(self.handler.handle_request(request=request), debug=True)\n self.assertEquals(response.job_id, dummy_scheduler_client.last_job_id)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for reconnect_all
def test_reconnect_all(self): pass
[ "def reconnect():\n disconnect()\n connect()", "def test_reconnect_route_request(self):\n pass", "def _reconnect(self):\n self._terminate()\n self._session_init()\n self._connect()", "def test_reconnect(self):\n self.transport.client_service.delay = 0\n old_protocol = self.protocol\n yield self.protocol.transport.loseConnection()\n yield deferLater(reactor, 0, lambda: None) # Let the reactor run.\n self.assertNotEqual(old_protocol, self.protocol)\n yield self.process_login_commands('username', 'password')", "def _with_reconnects(self, func, *args, **kwargs):\n for _ in range(self.max_reconnects):\n try:\n self._connect()\n return func(*args, **kwargs)\n\n except (\n ConnectionRefusedError,\n ConnectionResetError,\n socket.timeout,\n socket.gaierror,\n socket.herror,\n error_temp,\n error_perm,\n EOFError,\n OSError,\n ) as err:\n self.quit()\n last_err = err\n\n raise error_temp(\n f\"Failed after {self.max_reconnects} reconnect(s), \"\n f\"the last error was: {last_err}\"\n )", "def test_close_and_reconnect(self):\n assert self.client.is_active, 'Client must be active to test quit'\n\n self.client.close()\n\n assert not self.client.is_active, 'Client must be inactive following close call'\n\n self.client.reconnect()\n\n assert self.client.is_active, 'Client must be active after reconnecting'", "def reconnect_callback():\n return mock.MagicMock()", "def test_failover(self):\n pass", "def test_reconnect_if_mongodb_is_down(self):\n\n accounts_collection = sut.get_collection(\"accounts\")\n self.assertTrue(bool(accounts_collection))\n sut.disconnect()\n\n accounts_collection = sut.get_collection(\"accounts\")\n self.assertTrue(bool(accounts_collection))", "def test_reconnect_no_update(node_factory, executor, bitcoind):\n disconnects = [\"-WIRE_CHANNEL_READY\", \"-WIRE_SHUTDOWN\"]\n # Allow bad gossip because it might receive WIRE_CHANNEL_UPDATE before\n # announcement of the disconnection\n l1 = node_factory.get_node(may_reconnect=True, allow_bad_gossip=True)\n l2 = node_factory.get_node(disconnect=disconnects, may_reconnect=True)\n\n # For channeld reconnection\n l1.rpc.connect(l2.info[\"id\"], \"localhost\", l2.port)\n\n # LightningNode.fundchannel will fund the channel and generate a\n # block. The block triggers the channel_ready message, which\n # causes a disconnect. The retransmission is then caused by the\n # automatic retry.\n fundchannel_exec = executor.submit(l1.fundchannel, l2, 10**6, False)\n if l1.config('experimental-dual-fund'):\n l1.daemon.wait_for_log(r\"dualopend.* Retransmitting channel_ready for channel\")\n else:\n l1.daemon.wait_for_log(r\"channeld.* Retransmitting channel_ready for channel\")\n sync_blockheight(bitcoind, [l1, l2])\n fundchannel_exec.result()\n l1.stop()\n\n # For closingd reconnection\n l1.daemon.start()\n # Close will trigger the -WIRE_SHUTDOWN and we then wait for the\n # automatic reconnection to trigger the retransmission.\n l1.rpc.close(l2.info['id'], 0)\n l2.daemon.wait_for_log(r\"channeld.* Retransmitting channel_ready for channel\")\n l1.daemon.wait_for_log(r\"CLOSINGD_COMPLETE\")", "def reconnect(self):\n self.call_action(\"WANIPConn1\", \"ForceTermination\")", "async def test_multiple_connections(self):\n with _patch_local_sources_watcher(), self._patch_app_session():\n await self.server.start()\n\n self.assertFalse(self.server.browser_is_connected)\n\n # Open a websocket connection\n ws_client1 = await self.ws_connect()\n self.assertTrue(self.server.browser_is_connected)\n\n # Open another\n ws_client2 = await self.ws_connect()\n self.assertTrue(self.server.browser_is_connected)\n\n # Assert that our session_infos are sane\n session_infos = self.server._runtime._session_mgr.list_active_sessions()\n self.assertEqual(2, len(session_infos))\n self.assertNotEqual(\n session_infos[0].session.id,\n session_infos[1].session.id,\n )\n\n # Close the first\n ws_client1.close()\n await asyncio.sleep(0.1)\n self.assertTrue(self.server.browser_is_connected)\n\n # Close the second\n ws_client2.close()\n await asyncio.sleep(0.1)\n self.assertFalse(self.server.browser_is_connected)", "def test_MockReconnectFailSocket(timeouts, tries, expected_result):\n sock = MockReconnectFailSocket(timeouts=timeouts)\n assert sock.connect_called_num == 0\n sock.connect()\n assert sock.connect_called_num == 1\n result = True # got connection\n for cycle in range(tries):\n try:\n sock.connect()\n except OSError:\n result = False\n else:\n result = True\n break\n finally:\n assert (\n sock.connect_called_num == cycle + 2\n ) # cycle is zero based plus initional connection\n assert result == expected_result", "def test_master_reset_connection(self):\n with mock.patch(\"locust.runners.FALLBACK_INTERVAL\", new=0.1):\n with mock.patch(\"locust.rpc.rpc.Server\", mocked_rpc(raise_on_close=False)) as server:\n master = self.get_runner()\n self.assertEqual(0, len(master.clients))\n server.mocked_send(Message(\"client_ready\", NETWORK_BROKEN, \"fake_client\"))\n self.assertTrue(master.connection_broken)\n server.mocked_send(Message(\"client_ready\", __version__, \"fake_client\"))\n sleep(1)\n self.assertFalse(master.connection_broken)\n self.assertEqual(1, len(master.clients))\n master.quit()", "def reconnect(self):\n self.properties['joined'] = False\n self.socket.close()\n self.connect()", "def reconnect(self):\n self.call_action('WANIPConn1', 'ForceTermination')", "def reconnect(self):\n self.exec_command(b'Disconnect')\n self.connect(self.last_host)", "def test_many_sockets(self):\n ctx = self.context\n for i in range(16):\n sockets = [ ctx.socket(zmqpy.REP) for i in range(65) ]\n [ s.close() for s in sockets ]\n # give the reaper a chance\n time.sleep(1e-2)\n ctx.term()\n for s in sockets:\n self.assertTrue(s.closed)", "def TestConnexionBddreuse(self):\r\n TestConnexionBdd()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for reconnect_route_request
def test_reconnect_route_request(self): pass
[ "def test_reconnect_all(self):\n pass", "def reconnect_callback():\n return mock.MagicMock()", "def test_reconnect(self):\n self.transport.client_service.delay = 0\n old_protocol = self.protocol\n yield self.protocol.transport.loseConnection()\n yield deferLater(reactor, 0, lambda: None) # Let the reactor run.\n self.assertNotEqual(old_protocol, self.protocol)\n yield self.process_login_commands('username', 'password')", "def test_connections_request(self):\n pass", "def retry_request():\r\n self.http_connect()\r\n self.connection.request(method, path, data, headers)\r\n return self.connection.getresponse()", "def test_register_route_request(self):\n pass", "def test_restart_failed_monitor():\n socket = MockReconnectFailSocket(\n mock_data=[\"first\\n\", \"\", \"second\\n\"], timeouts=16\n ) # just some timeouts\n fm = FritzMonitor()\n fm.start(\n sock=socket, reconnect_delay=0.001, reconnect_tries=5\n ) # set default explicit for clarity\n # give socket some time to lose connection:\n time.sleep(0.01)\n assert fm.is_alive is False\n assert fm.stop_flag.is_set() is False\n # dont' call stop here!\n # fm.stop()\n socket = MockSocket(timeout=0.01) # socket not losing connection\n # should not trigger a RuntimeError\n fm.start(\n sock=socket, reconnect_delay=0.001, reconnect_tries=5\n ) # set default explicit for clarity\n assert fm.is_alive is True\n fm.stop()", "def _reconnect(self):\n self._terminate()\n self._session_init()\n self._connect()", "def test_52_conn_link_route_proxy(self):\n if self.skip['test_52'] :\n self.skipTest(\"Test skipped during development.\")\n\n fs = ConnLinkRouteService(self.EA1.route_container,\n container_id=\"FakeService\",\n config=[(\"ConnLinkRoute1\",\n {\"pattern\": \"Conn/*/One\",\n \"direction\": \"out\"}),\n (\"ConnLinkRoute2\",\n {\"pattern\": \"Conn/*/One\",\n \"direction\": \"in\"})])\n self.assertEqual(2, len(fs.values))\n\n self.INT_B.wait_address(\"Conn/*/One\", count=2)\n self.assertEqual(2, len(self._get_address(self.INT_A, \"Conn/*/One\")))\n\n # between interiors\n out = self._test_traffic(self.INT_B.listener,\n self.INT_A.listener,\n \"Conn/BLAB/One\",\n count=5)\n self.assertIsNone(out, out)\n\n # edge to edge\n out = self._test_traffic(self.EB1.listener,\n self.EA1.listener,\n \"Conn/BLECH/One\",\n count=5)\n self.assertIsNone(out, out)\n fs.join()\n self.assertEqual(10, fs.in_count)\n self.assertEqual(10, fs.out_count)\n\n self._wait_address_gone(self.INT_A, \"Conn/*/One\")", "def test_MockReconnectFailSocket(timeouts, tries, expected_result):\n sock = MockReconnectFailSocket(timeouts=timeouts)\n assert sock.connect_called_num == 0\n sock.connect()\n assert sock.connect_called_num == 1\n result = True # got connection\n for cycle in range(tries):\n try:\n sock.connect()\n except OSError:\n result = False\n else:\n result = True\n break\n finally:\n assert (\n sock.connect_called_num == cycle + 2\n ) # cycle is zero based plus initional connection\n assert result == expected_result", "def reconnect():\n disconnect()\n connect()", "def test_failover(self):\n pass", "def test_route_added(self):\n resp = requests.get(self.req)\n self.assertEqual(resp.status_code, 200, 'Route was not added')", "def on_connect_failed(self):\n self.log.info('Failed to connect to %s with error %s, will retry in 10 seconds' % (self.address, self.socket.error))\n # Retry with a new address after 10 seconds\n #AsyncDelayed(self.connect, 10)()\n self.hooks.connect_failed(self)", "def test_reqrep(nsproxy, serializer, message, response):\n\n def rep_handler(agent, message):\n return response\n\n a0 = run_agent('a0')\n a1 = run_agent('a1')\n addr = a0.bind('REP', 'reply', rep_handler, serializer=serializer)\n a1.connect(addr, 'request')\n assert a1.send_recv('request', message) == response", "def reconnect(self):\n self.properties['joined'] = False\n self.socket.close()\n self.connect()", "def test_resend_event(self):\n pass", "def test_reaction_restart(self):\n HostLookup().add_node(\"node\", \"192.168.0.1\")\n rr = ReactionRestartNode(\"node\", 0)\n rr.execute_reaction()\n # wait a bit to get the response\n rospy.Rate(10).sleep()\n self.assertIn(\n \"restarting node node returned: restart-\", TestReaction.log)", "def test_terminate_thread_on_failed_reconnection(data, timeouts, tries, success):\n mock_socket = MockReconnectFailSocket(data, timeouts=timeouts)\n fm = FritzMonitor()\n fm.start(sock=mock_socket, reconnect_delay=0.001, reconnect_tries=tries)\n # give thread some time:\n time.sleep(0.01)\n if success:\n assert fm.is_alive is True\n else:\n assert fm.is_alive is False\n assert fm.monitor_thread is None\n fm.stop()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for register_route_request
def test_register_route_request(self): pass
[ "def test_route_added(self):\n resp = requests.get(self.req)\n self.assertEqual(resp.status_code, 200, 'Route was not added')", "def test_route_added_callback(self):\n self.ht.add_route('/blah/<param>', callback=dummy)\n\n resp = requests.get(self.ht.base + '/blah/12345')\n\n last_call = self.last_func()\n self.assertEqual(resp.status_code, 200, 'Route was not added')\n self.assertEqual(resp.content, 'dummy', 'Unexpected return val')\n self.assertIn('param', last_call['args'].keys(),\n 'args should contain param as key')\n self.assertIn('12345', last_call['args'].values(),\n 'args should contain 12345 as val')", "def test_route_added_method_callback(self):\n self.ht.add_route('/blah2/<param>', method='PUT', callback=dummy)\n\n resp = requests.put(self.ht.base + '/blah2/12345')\n\n last_call = self.last_func()\n self.assertEqual(resp.status_code, 200, 'Route was not added')\n self.assertEqual(resp.content, 'dummy', 'Unexpected return val')\n self.assertIn('param', last_call['args'].keys(),\n 'args should contain param as key')\n self.assertIn('12345', last_call['args'].values(),\n 'args should contain 12345 as val')", "def test_routes_added_params(self):\n req, _ = self.ht.add_route('/blah/<param>')\n resp = requests.get(req.replace('<param>', '12345'))\n\n self.assertEqual(resp.status_code, 200, 'Route was not added')\n\n last_call = self.last_func()\n self.assertIn('param', last_call['args'].keys(),\n 'args should contain param as key')\n self.assertIn('12345', last_call['args'].values(),\n 'args should contain 12345 as val')", "def test_routes(self):\n self.route_check('GithubEvents')", "def test_register_entry_point_handler_positive(self):\n ep = entry_point.EntryPoint(\"service\", \"method\")\n message_type = \"request\"\n method_name = \"method_name\"\n self.dispatcher.register(ep, message_type, method_name)\n self.assertEqual(self.dispatcher.handlers[message_type][str(ep)],\n method_name)", "def test_register_access(self):\n\n with HttpListener(settings.HTTP_AGENT_PORT) as listener:\n response = requests.post(\n \"https://localhost:%s/agent/register/\" % settings.HTTPS_FRONTEND_PORT, verify=False\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(listener.requests), 1)\n self.assertEqual(listener.last_request.path, \"/agent/register/\")", "def test_decorator_add_routes(self):\n self.last_func = getattr(self.ht, LAST)\n\n resp = requests.get(self.ht.base + '/test3')\n self.assertEqual(resp.status_code, 200, 'route was not added')\n self.assertEqual(resp.content, 'Return val')\n\n resp = requests.get(self.ht.base + '/test2')\n self.assertEqual(resp.status_code, 200, 'route was not added')\n last_call = self.last_func()\n self.assertIn('/test2', last_call['urlparts'], 'Route not called')", "def test_route_added_return_val(self):\n req, _ = self.ht.add_route('/blah3', callback='Return')\n\n resp = requests.get(req)\n\n self.assertEqual(resp.status_code, 200, 'Route was not added correctl')\n self.assertEqual(resp.content, 'Return', 'Return val incorrect')", "def test_request_init(self):\n\n\t\tself.assertEqual(self.request.path, '/index')\n\t\tself.assertEqual(self.request.method, 'GET')\n\t\tself.assertEqual(self.request._get_data, None)\n\t\tself.assertEqual(self.request._post_data, None)", "def test_route_added_method(self):\n req, _ = self.ht.add_route('/test2', method=\"DELETE\")\n resp = requests.delete(req)\n\n self.assertEqual(resp.status_code, 200, 'Route was not added')", "def test_register_is_resolved(self):\n\n url = reverse('register')\n self.assertEquals(resolve(url).func, register)", "def test_get_handler_positive(self):\n ep = entry_point.EntryPoint(\"service\", \"method\")\n message_type = \"request\"\n method_name = \"method_name\"\n self.dispatcher.register(ep, message_type, method_name)\n self.assertEqual(self.dispatcher.get_handler(ep, message_type),\n method_name)", "def test_get_dispatching_entry_point_for_request(self):\n headers = {\n \"source\": \"src_service.src_method\",\n \"destination\": \"dst_service.dst_method\",\n \"reply_to\": \"rpl_service.rpl_method\",\n \"correlation_id\": \"123\"\n }\n context = {}\n payload = {}\n msg = messages.IncomingRequest(headers, context, payload)\n ep = self.dispatcher._get_dispatching_entry_point(msg)\n self.assertEqual(ep, msg.destination)", "def test_route(self):\n\n\t\t@self.app.route('/index')\n\t\tdef decorator_handler():\n\t\t\tpass\n\n\t\tself.assertEqual(self.app.routes, {'/index': (decorator_handler, ['GET'])})", "def test_request_methods(self):\n gets = list_routes('GET')\n posts = list_routes('POST')\n no_posts = '/', 'about', 'faq', '/canaries'\n\n for route in gets:\n r = self.app.get(route, follow_redirects=True)\n self.assertEqual(r.status_code, 200)\n\n for route in posts:\n r = self.app.post(route, follow_redirects=True)\n \"\"\"Response can be 400 Bad Request since we don't send any\n data in the request.\"\"\"\n self.assertTrue(r.status_code == 200 or r.status_code == 400)\n\n for route in no_posts:\n r = self.app.post(route, follow_redirects=True)\n self.assertEqual(r.status_code, 405)\n\n # Test that PUT, DELETE are forbidden\n for route in list_routes():\n r = self.app.delete(route)\n self.assertEqual(r.status_code, 405)\n r = self.app.put(route)\n self.assertEqual(r.status_code, 405)", "def test_admin_resolve_request(self):\n headers = self.get_token_admin() \n\n # try resolve while request does not exist\n response1 = self.app_client.put('/api/v2/requests/1/resolve', headers=headers)\n self.assertEqual(response1.status_code, 404)\n\n self.insert_requests()\n\n #test resolve while ID exists\n response = self.app_client.put('/api/v2/requests/1/resolve', headers=headers)\n print(response)\n self.assertEqual(response.status_code, 200)", "def test_returns_true_if_request_has_get_parameter(self):\n self.request_mock.GET = {self.parameter_name: 'foobar'}\n self.assertTrue(self.has_parameter(self.get_response_mock, self.request_mock))", "def test_new_registration_view_exists(self):\n url = ''\n\n try:\n url = reverse('rango:register')\n except:\n pass\n \n self.assertEqual(url, '/rango/register/', f\"{FAILURE_HEADER}Have you created the rango:register URL mapping correctly? It should point to the new register() view, and have a URL of '/rango/register/' Remember the first part of the URL (/rango/) is handled by the project's urls.py module, and the second part (register/) is handled by the Rango app's urls.py module.{FAILURE_FOOTER}\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for status_request
def test_status_request(self): pass
[ "def test_verify_new_request_status(self):\r\n request = helpers.create_dummy_request(self.request_manager, 1)\r\n self.assertEquals(request.status, RequestStatus.UNASSIGNED)", "def test_b_check_status_is_returned(self):\n self.assertTrue(self.status.is_returned(), \"The awaited status is returned, the current status is {}\".format(self.status.get_status()))", "def _checkStatus(self, name, attrs):\n if name == \"ResponseData\":\n self.returnStatus = attrs[\"status\"]", "def test_statusml_no_status(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Waiting for files.')\n self.assertEqual(response.status_code, 200)", "def check_status(req, rep, status_codes={200, 201}):\n if not isinstance(status_codes, (list, set)):\n status_codes = [status_codes]\n status_code = rep.getcode()\n if status_code not in status_codes:\n # TODO: log information about the request and the response to help\n # diagnostics...\n raise Exception(\n 'HTTP %d was unexpected (expecting one of %r).' % (\n status_code,\n status_codes,\n )\n )", "def test_status_code_transitions(self):\n job_id = MockJobManagerOne.create_new()\n MockJobManagerOne.enqueue(job_id, taskqueue_services.QUEUE_NAME_DEFAULT)\n\n with self.assertRaisesRegexp(Exception, 'Invalid status code change'):\n MockJobManagerOne.register_completion(job_id, ['output'])\n with self.assertRaisesRegexp(Exception, 'Invalid status code change'):\n MockJobManagerOne.register_failure(job_id, 'error')", "def test_set_job_status(self):\n response = self.client.patch(\n self.url, headers=self.headers,\n data=json.dumps(self.patch_request_body)\n )\n self.assertEqual(200, response.status_code)\n response = self.client.get(self.url, headers=self.headers)\n self.assertEqual(response.status_code, 200)\n response_body = json.loads(response.data.decode('utf-8'))\n self.assertEqual('WORKING', response_body['data']['status'])", "def test_get_responce(self):\n self.assertEqual(self.r.status_code, 200)", "def test_set_job_status_null_request(self) -> None:\n response = self.client.patch(\n self.url, headers=self.headers,\n data=json.dumps(self.patch_request_body_null_results)\n )\n self.assertEqual(200, response.status_code)\n response = self.client.get(self.url, headers=self.headers)\n response_body = json.loads(response.data.decode('utf-8'))\n self.assertEqual('WORKING', response_body['data']['status'])", "def test_statusHistoryMining() -> json:\r\n\r\n # Action\r\n status, result = u.statusHistoryMining()\r\n\r\n # Assertion\r\n AssertNotEmptyOrError(status, result)", "def test_create_run_status(self):\n pass", "def logStatus(which, what, status):\n if status == requests.codes.ok:\n desc = 'OK'\n elif status == requests.codes.not_modified:\n desc = 'Not Modified'\n else:\n desc = 'Server couldn\\'t fulfill the request'\n print '## {0} ## {1} {2} {3}'.format(which, what, status, desc)", "def test_get_task_status(self):\n pass", "def __status(response_json):\n\t\tif not response_json:\n\t\t\traise ApiErrorException('Response Api is None, cannot fetch the status of api')\n\n\t\tstatus = response_json.get('status')\n\n\t\tassert status is not None, \\\n\t\t\t'Response Status is not Available'\n\n\t\tassert status.get('code') == requests.codes.ok, \\\n\t\t\t'Response status not clear, should be any error occurred: {}'.format(status.get('description'))", "def test_server_status(self):\n self.assert_(False)", "def test_change_status_missing_parameter(self):\n self.login()\n\n created_todo = create_todo()\n todo_id = created_todo.id\n pristine_status = created_todo.mark_completed\n\n response = self.client.post(url_for('alaya_todo.todo_change_status'))\n\n self.assert200(response)\n\n response_dict = json.loads(response.data)\n\n # Checking the expected values in the response\n self.assertFalse(response_dict['success'], 'The success key must be False')\n self.assertEqual(response_dict['status'], 400, 'The status key must be 400.')\n self.assertEqual(response_dict['message'], 'The identifier of the task is required to update the status.',\n 'The response messages must math.')\n\n # Checking the database changes\n updated_todo = load_todo(todo_id)\n self.assertEqual(pristine_status, updated_todo.mark_completed, 'The mark_completed properties must match.')\n\n delete_todo(todo_id)\n\n self.logout()", "def test_status_forcelist_2(self):\n\n url = 'http://localhost:7654/status_code=500'\n \n # start counting the number of requests received\n self.http_server.reset_counter()\n\n res = obstinate.oget(url, o_status_forcelist=['501'],\n o_max_attempts=2)\n\n self.assertEqual(1, self.http_server.counter())", "def test_boards_view_status_code(self):\n self.assertEquals(self.response.status_code, 200)", "def test_api(self):\n view = StatusView.as_view()\n rqst = RequestFactory().get('status/',)\n force_authenticate(rqst, user=self.user)\n\n # Correct call\n resp = view(rqst, **{StatusView.MODEL_REF: GeneralStatus})\n self.assertEqual(resp.data, {'class': 'GeneralStatus', 'values': {'COMPLETE': {'key': 30, 'name': 'COMPLETE', 'label': 'Complete', 'color': 'success'}, 'PENDING': {'key': 10, 'name': 'PENDING', 'label': 'Pending', 'color': 'secondary'}, 'PLACED': {'key': 20, 'name': 'PLACED', 'label': 'Placed', 'color': 'primary'}}})\n\n # No status defined\n resp = view(rqst, **{StatusView.MODEL_REF: None})\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(str(resp.rendered_content, 'utf-8'), '[\"StatusView view called without \\'statusmodel\\' parameter\"]')\n\n # Invalid call - not a class\n with self.assertRaises(NotImplementedError) as e:\n resp = view(rqst, **{StatusView.MODEL_REF: 'invalid'})\n self.assertEqual(str(e.exception), \"`status_class` not a class\")\n\n # Invalid call - not the right class\n with self.assertRaises(NotImplementedError) as e:\n resp = view(rqst, **{StatusView.MODEL_REF: object})\n self.assertEqual(str(e.exception), \"`status_class` not a valid StatusCode class\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for unregister_router
def test_unregister_router(self): pass
[ "def delete_router(router):\n return IMPL.delete_router(router)", "def remove_gateway_router(router):\n return IMPL.remove_gateway_router(router)", "def remove_interface_router(router, body=None):\n return IMPL.remove_interface_router(router, body)", "def test_2_remove_router_B(self):\n\n # First make sure there are no inter-router connections on router C\n outs = self.run_qdstat(['--connections'], address=self.routers[2].addresses[1])\n\n inter_router = 'inter-router' in outs\n self.assertFalse(inter_router)\n\n # Kill the router B\n FailoverTest.routers[0].teardown()\n\n # Schedule a test to make sure that the failover url is available\n # and Router C has an inter-router connection\n self.schedule_B_to_C_failover_test()\n\n while not self.can_terminate():\n pass\n\n self.assertTrue(self.success)", "def _router_removed(self, router_id, deconfigure=True):\n ri = self.router_info.get(router_id)\n if ri is None:\n LOG.warning(_LW(\"Info for router %s was not found. \"\n \"Skipping router removal\"), router_id)\n return\n ri.router['gw_port'] = None\n ri.router[l3_constants.INTERFACE_KEY] = []\n ri.router[l3_constants.FLOATINGIP_KEY] = []\n try:\n if deconfigure:\n self._process_router(ri)\n driver = self._drivermgr.get_driver(router_id)\n driver.router_removed(ri, deconfigure)\n self._drivermgr.remove_driver(router_id)\n del self.router_info[router_id]\n self.removed_routers.discard(router_id)\n except cfg_exceptions.DriverException:\n LOG.warning(_LW(\"Router remove for router_id: %s was incomplete. \"\n \"Adding the router to removed_routers list\"), router_id)\n self.removed_routers.add(router_id)\n # remove this router from updated_routers if it is there. It might\n # end up there too if exception was thrown earlier inside\n # `_process_router()`\n self.updated_routers.discard(router_id)", "def test_force_delete_logical_router(self):\n router = self.get_mocked_resource()\n uuid = test_constants.FAKE_ROUTER['id']\n router.delete(uuid, True)\n test_client.assert_json_call(\n 'delete', router,\n 'https://1.2.3.4/api/v1/logical-routers/%s?force=True' % uuid,\n headers=self.default_headers())", "def _remove_router_from_agent_callback(to_agent_id, router_id):\n response = (yield)\n\n _remove_router_from_agent_callback_body(to_agent_id, router_id, response)", "def test_delete_destination(self):\n router_config = self.create_router_config()\n resp = yield self.post('/routers/', router_config)\n router_id = (yield resp.json())['result']['id']\n\n dest_config = self.create_destination_config()\n resp = yield self.post(\n '/routers/{}/destinations/'.format(router_id), dest_config)\n destination_id = (yield resp.json())['result']['id']\n\n router_worker = self.api.service.namedServices[router_id]\n self.assertEqual(len(router_worker.config['destinations']), 1)\n\n resp = yield self.delete(\n '/routers/{}/destinations/{}'.format(router_id, destination_id))\n self.assert_response(resp, http.OK, 'destination deleted', {})\n\n router_worker = self.api.service.namedServices[router_id]\n self.assertEqual(len(router_worker.config['destinations']), 0)", "def _stop_router(self):\n try:\n with open(Two1Composer.COMPOSE_FILE, 'r') as f:\n container_name = yaml.load(f)['services']['router']['container_name']\n if \"router\" in [i[\"Names\"][0].strip(\"/sell_\") for\n i in self.docker_client.containers()]:\n self.docker_client.stop(container_name)\n self.docker_client.remove_container(container_name)\n except Exception as e:\n raise Two1ComposerRouterException()", "def test_remove_gateway(self):\n pass", "def testUnregisterViewWithRegisteredView(self):\r\n self.views.register_view(self.mockView)\r\n self.assertIn(self.mockView, self.views.views)\r\n self.views.unregister_view(self.mockView)\r\n self.assertNotIn(self.mockView, self.views.views)", "def unregister_middleware(self, scheme, middleware):\r\n ...", "def test_delete_route(self):\n self.ht.delete_route(self.test_route, self.method)\n resp = requests.get(self.req)\n msg = 'route %s has not been removed' % self.test_route\n self.assertEqual(resp.status_code, 404, msg)", "def test_unregister_endpoint(self):\n path_parts = [\n ezdiscovery.NAMESPACE,\n 'foo',\n 'bar',\n ezdiscovery.ENDPOINTS,\n 'localhost:' + str(8080)\n ]\n path = '/'.join(path_parts)\n self.client.ensure_path(path)\n\n ezdiscovery.unregister_endpoint('foo', 'bar', 'localhost', 8080)\n self.assertFalse(self.client.exists(path))\n\n # Make sure the parent paths got cleared out as well (except the top\n # namespace).\n self.assertFalse(self.client.exists('/'.join(path_parts[:-1])))\n self.assertFalse(self.client.exists('/'.join(path_parts[:-2])))\n self.assertFalse(self.client.exists('/'.join(path_parts[:-3])))\n self.assertTrue(self.client.exists('/'.join(path_parts[:-4])))", "def _destroy_tunnel(self, reg_req_packet):\n\n tid = self._get_binding_id(reg_req_packet.home_address)\n _destroy_interface(name=\"mip\"+str(tid))", "def _stop_router(self):\n if self._have_message_router:\n logger.debug(f\"{self} stopping message router\")\n for wrapper in self._wrappers:\n self._message_router.close_pipe(wrapper.receiver)\n self._message_router.stop()\n self._message_router = None\n self._have_message_router = False\n logger.debug(f\"{self} message router stopped\")", "def test_delete_logical_router_port(self):\n lrport = self._mocked_lrport()\n\n uuid = test_constants_v3.FAKE_ROUTER_PORT['id']\n lrport.delete(uuid)\n test_client.assert_json_call(\n 'delete', lrport,\n 'https://1.2.3.4/api/v1/logical-router-ports/%s' % uuid)", "def test_process_deregister_remote_system(self):\n error, out = self.process_deregister_remote_system()\n for err in error: assert err == 0", "def test_unregister_node(self) -> None:\n # Prepare\n state: State = self.state_factory()\n node_id = 2\n\n # Execute\n state.register_node(node_id)\n state.unregister_node(node_id)\n retrieved_node_ids = state.get_nodes()\n\n # Assert\n assert len(retrieved_node_ids) == 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of clips for a user.
def _get_clips(self, user_id, user_name, client_id=None, oauth_token=None): logging.info("Getting clips for %s", user_name) clip_headers = {} if client_id is not None: clip_headers['Client-ID'] = client_id if oauth_token is not None: clip_headers['Authorization'] = f'Bearer {oauth_token}' clip_params = { 'broadcaster_id': user_id, 'started_at': self.started_at, 'ended_at': self.ended_at, 'first': 100, } resp = requests.get(f'https://api.twitch.tv/helix/clips', headers=clip_headers, params=clip_params) resp_json = resp.json() if resp.status_code >= 400: logging.error("Error when getting clips of streamer %s: %s", user_name, resp_json['message']) resp.raise_for_status() clips_json = resp_json['data'] clips = [] for clip_json in clips_json: clip = Clip.construct_from(clip_json) logging.debug("Adding clip %s", clip['id']); clips.append(clip) logging.info("Got %s clip(s) from streamer %s", len(clips), user_name) return clips
[ "def get_clips(self, client_id=None, oauth_token=None):\n logging.info(\"Getting clips\")\n self.client = TwitchHelix(client_id=client_id, oauth_token=oauth_token)\n total_clips = []\n for user in self.users_list:\n clips = self._get_clips(user['_id'], user['name'],\n client_id, oauth_token)\n good_clips = self._get_good_clips(clips)\n logging.info(\"Found %s good clip(s) for %s\", len(good_clips),\n user['name'])\n if good_clips:\n total_clips.extend(good_clips)\n logging.info(\"Got %s clips\", len(total_clips))\n return total_clips", "def list_clips():\n\n # if user not authed, start auth proccess\n if not authed():\n return redirect('/auth')\n\n # querying clips from database ordered by total likes\n clips = db.session.query(Clip, func.count(Like.user_id).label('total')).join(Like, isouter=True).group_by(Clip).order_by('total', Clip.created_at).all()\n\n # we need the current user's likes for visual representation of what they already liked\n user = get_user_dict(session['access_token'])\n\n # if user is non, token is bad. start auth\n if user is None:\n return redirect('/auth')\n\n # getting the user from our database to see what posts they like\n user = User.query.get(user.get('id'))\n\n return render_template('clips.html', clips=clips, likes=user.likes)", "def get_clips_list(page=1, results_per_page=5, timeout=60 * 5):\n return get_cached_api_response(\n 'CLIPS:::%d' % page, timeout,\n APIClient(**settings.API_CLIENT).get_clips,\n limit=results_per_page, offset=(page - 1) * results_per_page)", "def get_collection(cls, user_id: int) -> typing.List[typing.Dict[str, str]]:\n return cls._get(sql.Card.collection(), user_id=user_id)", "def getPlaylists(self, user=None):\n pass", "async def clipList(ctx):\n clipList = [clip[:clip.find('.')] for clip in os.listdir(\"./clips\")]\n for hunned in range(0,len(clipList), 100):\n await ctx.send(str(clipList[hunned:hunned+100]))\n return", "def get_all_circles(self, user_id):\n return self._mk.Circle.find({'owner':unicode(user_id)})", "def get_user_playlists(user_id):\n u = User.load(user_id)\n show_collection(Playlist.for_user(u))", "def get_user_permissions(user=None):\n if user is not None:\n return_list = []\n user_query = users.select().where(users.username == user).get()\n for item in permissions.select().where(permissions.user_id == user_query.id):\n return_list.append(item.zone_id)\n return return_list", "def get_menu(cls, user_id: str) -> list:\n query_data = cls.get_query().filter_by(user_id=user_id).all()\n\n menu = [data.to_dict() for data in query_data]\n\n return menu", "def read_dogs_of_owner(self, user_id) -> list:\n return {\n \"command\": \"read_dogs_of_owner\",\n \"kwargs\": {\n \"id\": user_id\n }\n }", "def ex_list_user_drives(self):\r\n response = self.connection.request(action='/drives/detail/').object\r\n drives = [self._to_drive(data=item) for item in response['objects']]\r\n return drives", "def get_user_trips(self):\n trips = []\n for item in self.list():\n try:\n trip = Trip(**item.val())\n trip._id = item.key()\n trips.append(trip)\n except Exception:\n Logger.exception(\"get_user_trips\")\n\n return trips", "def get_queryset(self):\n user = self.request.user\n return Trip.objects.filter(owner=user)", "def ipmiUserList():\n logging.debugv(\"functions/linux.py->ipmiUserList()\", [])\n\n cmd = locations.IPMITOOL + ' -I open user list | awk \\'{print $1\" \"$2}\\''\n logging.debug(cmd)\n\n users = os.popen(cmd)\n users.readline()\n choices = []\n for line in users.readlines():\n (id, user) = line.split()\n choices += [(id, user)]\n return choices", "def get_all(user_id):\n return BucketList.query.filter_by(created_by=user_id)", "def get_visible_courses(user):\n if user.is_superuser:\n return Course.objects.all()\n else:\n return Coordinator.objects.filter(user=user).values_list(\"course\")", "def splitClips(clips, minDuration):\n doneBuckets = distabutor(clips, minDuration)\n\n outputa = list(map(lambda x: list(map(lambda y: y[\"clip\"], x)), doneBuckets))\n \n return (outputa)", "async def list_climbs(db: Session = Depends(get_db)):\n return crud.get_climbs(db)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the view count of the video that a clip was created from.
def _get_clip_video_views(self, clip): logging.info("Getting video views for clip %s", clip['id']) if clip['video_id'] == '': logging.info("Video couldn't be found for clip %s. Default to " "900.", clip['id']) return 900 # Default video views video = self.client.get_videos(video_ids=[clip['video_id']])[0] logging.info("Video %s for clip %s has %s view(s)", clip['video_id'], clip['id'], video.view_count) return video.view_count
[ "def video_count(self) ->int:\n return int(self._statistics.get('videoCount'))", "def view_count(self) -> int:\n return int(self.statistics.get('viewCount'))", "def _get_clip_rating(self, clip_views, video_views):\n return clip_views / (video_views/9 + 100)", "def cameraCount(self):\r\n return self.dll.PvCameraCount()", "def GetNumFrames(vid_path):\n cap = cv2.VideoCapture(vid_path)\n total_frames = cap.get(7)\n cap.release()\n return int(total_frames)", "def count() -> int:\n return _api_calls.get(Inner._VIDEO_SAMPLE_ENDPOINT + \"count\").json()", "def view_count(self):\n return len(self.signal_views)", "def _get_controlPointCountV(self) -> \"int\" :\n return _core.NurbsSurface__get_controlPointCountV(self)", "def getNumFrames(self) -> retval:\n ...", "def get_n_videos(self):\n return len(self._train_videos_names + self._test_videos_names)", "def get_n_captions_per_video(self):\n return self._n_captions_video", "def play_count(self):\n return len(self._played)", "def vid_len(path):\n return int(cv2.VideoCapture(path).get(cv2.CAP_PROP_FRAME_COUNT))", "def upcoming_shows_count(self):\n return len(self.upcoming_shows)", "def frame_count(self) -> int:\n return len(self.track_frames)", "def frames(self):\n f = 0\n if self.isVideo() or self.isAudio():\n if 'nb_frames' in self.__dict__:\n try:\n f = int(self.__dict__['nb_frames'])\n except Exception as e:\n pass\n return f", "def num_times_solution_viewed(self) -> int:\n return self.num_times_solution_viewed_v2", "def movie_count(self):\r\n total_movie_count = 0\r\n for line in self.movies:\r\n total_movie_count += 1\r\n return total_movie_count", "def movie_watched_count(self):\r\n watched_count = 0\r\n for line in self.movies:\r\n if line.is_watched:\r\n watched_count += 1\r\n return watched_count" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a rating given the view count of a clip and a video.
def _get_clip_rating(self, clip_views, video_views): return clip_views / (video_views/9 + 100)
[ "def _get_clip_video_views(self, clip):\n logging.info(\"Getting video views for clip %s\", clip['id'])\n if clip['video_id'] == '':\n logging.info(\"Video couldn't be found for clip %s. Default to \"\n \"900.\", clip['id'])\n return 900 # Default video views\n video = self.client.get_videos(video_ids=[clip['video_id']])[0]\n logging.info(\"Video %s for clip %s has %s view(s)\", clip['video_id'],\n clip['id'], video.view_count)\n return video.view_count", "def get_rating(self):\n rating = 0\n ratings = Rating.objects.filter(video__room=self)\n for r in ratings:\n if r.positive_rating:\n rating = rating + 1\n else:\n rating = rating -1 \n return rating", "def vid_stats(vid_id,youtube_api):\r\n video_statistics = youtube_api.videos().list(id=vid_id,part='statistics').execute()\r\n stat_dict=video_statistics['items'][0]['statistics']\r\n if 'likeCount' in stat_dict.keys():\r\n likes=int(stat_dict['likeCount'])\r\n else:\r\n likes=0\r\n if 'dislikeCount' in stat_dict.keys():\r\n dislikes=int(stat_dict['dislikeCount'])\r\n else:\r\n dislikes=0\r\n if dislikes==0:\r\n likes_dislike_ratio = likes\r\n else:\r\n likes_dislike_ratio=float(likes/dislikes)\r\n if 'commentCount' in stat_dict.keys():\r\n commentCount = int(stat_dict['commentCount'])\r\n else:\r\n commentCount = -1\r\n view_cnt = int(stat_dict['viewCount'])\r\n return likes_dislike_ratio, commentCount, view_cnt", "def video_count(self) ->int:\n return int(self._statistics.get('videoCount'))", "def get_most_popular_talks_by_like_ratio(videos):\r\n return sorted(videos, key=lambda x: (int(x.metrics['likeCount']) - int(x.metrics['dislikeCount'])) / int(x.metrics['viewCount']), reverse=True)", "def vote_clip(id):\n\n # getting the clip from databse to like\n clip = Clip.query.filter_by(id=id).first()\n\n # getting the current user\n user = get_user_dict(session['access_token'])\n\n # creating the like from clip and user id\n like = Like(user_id = user.get('id'), clip_id = id)\n\n # trying to add the like to database\n try:\n db.session.add(like)\n db.session.commit()\n except exc.IntegrityError:\n db.session.rollback()\n return jsonify('failed')\n \n return jsonify('success')", "def get_movie_ratings(movie):\n movie = (movie.lower()).replace(\" \", \"_\")\n URL = \"https://www.rottentomatoes.com/m/\" + movie\n try:\n page = requests.get(URL)\n if not page:\n raise Exception(page.status_code)\n except Exception as e:\n print(\"Cannot Find Movie!\" + str(e))\n sys.exit(0)\n soup = BeautifulSoup(page.content, \"html.parser\")\n\n ratings = soup.find_all(\"span\", class_=\"mop-ratings-wrap__percentage\")\n critic = soup.find_all(\n \"p\", class_=\"mop-ratings-wrap__text mop-ratings-wrap__text--concensus\"\n )\n\n print(\"Critic Consensus: \", (critic[0].get_text()).strip())\n print()\n print(\"TOMATOMETER: \", (ratings[0].get_text()).strip())\n print(\"AUDIENCE SCORE: \", (ratings[1].get_text()).strip())\n\n return 1", "def get_most_popular_talks_by_like_ratio(videos):\n return sorted(\n videos,\n key=lambda x: (int(x.metrics[\"likeCount\"]) - int(x.metrics[\"dislikeCount\"]))\n / int(x.metrics[\"viewCount\"]),\n reverse=True,\n )", "def parse_video(obj):\n\tvideo_name, _id = obj\n\tscore = 0\n\twith tempfile.TemporaryDirectory() as s3dir:\n\t\tvp = os.path.join(s3dir, video_name)\n\t\ts3.Bucket(BUCKET_NAME).download_file(\n\t\t\tvideo_name, vp)\n\t\tscores = read_frames(vp)\n\t\tframestamps = [i for i in range(0, len(scores))]\n\n\t\treturn framestamps, scores, compute_score(scores)", "def video(obj):\n return match(obj, video_matchers)", "def predict_rating(self, movie):\n\n other_ratings = movie.ratings\n\n similarities = [\n (self.similarity(r.user), r)\n for r in other_ratings\n ]\n\n similarities.sort(reverse=True)\n\n similarities = [(sim, r) for sim, r in similarities if sim > 0]\n\n if not similarities:\n return None\n\n numerator = sum([r.score * sim for sim, r in similarities])\n denominator = sum([sim for sim, r in similarities])\n\n return numerator/denominator\n\n\n #this is the one we wrote", "def score_views(self, trainer, volume):\n volumes_pool = []\n views_pool = []\n view_scores = []\n\n for i in range(self.views_sample_pool_size):\n view, rotated_volume, discriminator_score = self.get_a_view(trainer, volume)\n views_pool.append(view)\n view_scores.append(discriminator_score)\n volumes_pool.append(rotated_volume)\n\n view_scores = torch.cat(view_scores, 1)\n volumes_pool = torch.stack(volumes_pool, 1)\n views_pool = torch.stack(views_pool, 1)\n\n return volumes_pool, views_pool, view_scores", "def weight_by_patients(video1, video2):\n\n edge_weight = 0\n\n for patient, watch_count in vid_to_patient_tuples[video1]:\n\n # add weight via diminishing returns function on repeat views by the same patient\n edge_weight += ( 1/2 ) ** ( watch_count - 1 )\n\n return edge_weight", "def scores_vs_rating():\n\n rating_comparison = {\n 1: [], 2: [], 3: [], 4: [], 5: []\n }\n\n rating_key = \"like_rating_specific\"\n\n for user, session in Session.get_users_with_surveys():\n\n boundary = HistogramBoundary(user)\n\n survey = user.get_survey()\n\n for playlist_index, playlist in enumerate(session.recommendations):\n survey_ratings = survey[f\"playlist{playlist_index+1}\"][rating_key]\n\n for track_index, track in enumerate(playlist[\"tracks\"]):\n\n track_rating, _ = boundary.get_boundary_score(track)\n\n survey_rating = int(survey_ratings[f'Song{track_index + 1}'])\n\n rating_comparison[survey_rating].append(track_rating)\n\n result_string = \"\"\n\n for rating_bin, scores in rating_comparison.items():\n result_string += f\"{rating_bin}: {statistics.mean(scores):.3f}, \"\n result_string = result_string[:-2]\n print(result_string)\n\n for rating_bin, scores in rating_comparison.items():\n\n plt.hist(scores, bins=20)\n plt.title(f\"Rating: {rating_bin} (total: {len(scores)})\")\n plt.xlim((0.0, 8.0))\n plt.show()\n\n t_tests = {}\n for i in range(1, 6):\n t_tests[i] = {}\n for j in range(1, 6):\n if i != j:\n\n t_test_score = ttest_ind(\n rating_comparison[i], # [:min_amount],\n rating_comparison[j], # [:min_amount],\n equal_var=False\n )\n t_tests[i][j] = t_test_score[1]\n\n pprint(t_tests)", "def __count_and_average_ratings(self):\n logger.info(\"Counting kindle ratings...\")\n self.rating_count= self.datas.count()", "def video_rating(self, video_rating):\n if video_rating is not None and len(video_rating) > 5:\n raise ValueError(\"Invalid value for `video_rating`, length must be less than or equal to `5`\") # noqa: E501\n\n self._video_rating = video_rating", "def get_rating(full_review):\n regex = r\"ui_bubble_rating bubble_(\\d)0\"\n rating = re.search(regex, str(full_review)).group(1)\n return rating", "def get_most_popular_talks_by_views(videos):\n return sorted(videos, key=lambda x: int(x.metrics[\"viewCount\"]), reverse=True)", "def content_ratings(self, tv_id):\n return self._request_obj(\n self._urls[\"content_ratings\"] % tv_id,\n key=\"results\"\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a subset of 'good' clips from a list of clips.
def _get_good_clips(self, clips): logging.info("Getting good clips from %s clip(s)", len(clips)) good_clips = [] for clip in clips: if (self.lang is None or clip['language'] in self.lang): logging.debug("Clip %s by %s has %s views", clip['id'], clip['broadcaster_name'], clip['view_count']) video_views = self._get_clip_video_views(clip) clip['rating'] = self._get_clip_rating(clip['view_count'], video_views) logging.info("Clip %s rating %s", clip['id'], clip['rating']) if clip['rating'] >= 1: logging.info("Clip %s is 'good'", clip['id']) good_clips.append(clip) else: logging.debug("Clip %s by %s doesn't isn't lang %s", clip['id'], clip['broadcaster_name'], self.lang) return good_clips
[ "def get_clips(self, client_id=None, oauth_token=None):\n logging.info(\"Getting clips\")\n self.client = TwitchHelix(client_id=client_id, oauth_token=oauth_token)\n total_clips = []\n for user in self.users_list:\n clips = self._get_clips(user['_id'], user['name'],\n client_id, oauth_token)\n good_clips = self._get_good_clips(clips)\n logging.info(\"Found %s good clip(s) for %s\", len(good_clips),\n user['name'])\n if good_clips:\n total_clips.extend(good_clips)\n logging.info(\"Got %s clips\", len(total_clips))\n return total_clips", "def delete_clips_with_low_views(clips_to_check, min_number_of_views):\n indices_to_delete = set()\n for index, clip_to_check in enumerate(clips_to_check):\n if clip_to_check['views'] < min_number_of_views:\n indices_to_delete.add(index)\n return delete_clips_from_list(clips_to_check, indices_to_delete)", "def delete_excess_clips(clips):\n indices_to_delete = set()\n combined_clip_time_seconds = 0\n logger.info(\"finding excess clips to delete\")\n # sort clips in order of views\n clips = sorted(clips, key=lambda k: k['views'], reverse=True)\n\n # iterate through the list until the max length is reached (10 minutes)\n for index, clip in enumerate(clips):\n if combined_clip_time_seconds >= 600:\n indices_to_delete.add(index)\n continue\n combined_clip_time_seconds = combined_clip_time_seconds + int(clip['duration'])\n logger.info(\"combined_clip_time_seconds=%s\", combined_clip_time_seconds)\n logger.info(\"excess clip indices to delete=%s\", str(indices_to_delete))\n if combined_clip_time_seconds < 60:\n logger.info(\"Not enough time in clips, returning nothing, combined_clip_time_seconds=%s\"\n , combined_clip_time_seconds)\n clips = []\n return delete_clips_from_list(clips, indices_to_delete)", "def splitClips(clips, minDuration):\n doneBuckets = distabutor(clips, minDuration)\n\n outputa = list(map(lambda x: list(map(lambda y: y[\"clip\"], x)), doneBuckets))\n \n return (outputa)", "def _get_clips(self, user_id, user_name, client_id=None, oauth_token=None):\n logging.info(\"Getting clips for %s\", user_name)\n clip_headers = {}\n if client_id is not None:\n clip_headers['Client-ID'] = client_id\n if oauth_token is not None:\n clip_headers['Authorization'] = f'Bearer {oauth_token}'\n clip_params = {\n 'broadcaster_id': user_id,\n 'started_at': self.started_at,\n 'ended_at': self.ended_at,\n 'first': 100,\n }\n resp = requests.get(f'https://api.twitch.tv/helix/clips',\n headers=clip_headers, params=clip_params)\n resp_json = resp.json()\n\n if resp.status_code >= 400:\n logging.error(\"Error when getting clips of streamer %s: %s\",\n user_name, resp_json['message'])\n resp.raise_for_status()\n\n clips_json = resp_json['data']\n clips = []\n for clip_json in clips_json:\n clip = Clip.construct_from(clip_json)\n logging.debug(\"Adding clip %s\", clip['id']);\n clips.append(clip)\n logging.info(\"Got %s clip(s) from streamer %s\", len(clips), user_name)\n return clips", "async def clipList(ctx):\n clipList = [clip[:clip.find('.')] for clip in os.listdir(\"./clips\")]\n for hunned in range(0,len(clipList), 100):\n await ctx.send(str(clipList[hunned:hunned+100]))\n return", "def get_clips_list(page=1, results_per_page=5, timeout=60 * 5):\n return get_cached_api_response(\n 'CLIPS:::%d' % page, timeout,\n APIClient(**settings.API_CLIENT).get_clips,\n limit=results_per_page, offset=(page - 1) * results_per_page)", "def delete_clips_with_close_times(current_clip, clips_to_check):\n tolerance = 30\n need_to_delete = False\n index_to_delete = clips_to_check.index(current_clip)\n indices_to_delete = set()\n for index, clip_to_check in enumerate(clips_to_check):\n if current_clip['slug'] == clip_to_check['slug']:\n continue\n if clip_to_check['vod'] is None:\n indices_to_delete.add(index)\n logger.info(\"clip_to_check['vod'] is none for %s\", clip_to_check)\n continue\n if current_clip['vod'] is None:\n logger.info(\"current_clip['vod'] is none for %s\", current_clip)\n indices_to_delete.add(index)\n continue\n current_clip_offset = current_clip['vod']['offset']\n clip_to_check_offset = clip_to_check['vod']['offset']\n min_offset = current_clip_offset - tolerance\n max_offset = current_clip_offset + tolerance\n if (min_offset <= clip_to_check_offset <= max_offset) \\\n and (clip_to_check['broadcaster']['display_name'] == current_clip['broadcaster']['display_name']):\n logger.info(\"Similar clip offsets found, clip_to_check_offset=%s current_clip_offset=%s\",\n clip_to_check_offset, current_clip_offset)\n if current_clip['views'] > clip_to_check['views']:\n logger.info(\"current_clip['views']=%s clip_to_check['views']=%s deleting %s\"\n , current_clip['views'], clip_to_check['views'], clip_to_check)\n index_to_delete = index\n else:\n logger.info(\"current_clip['views']=%s clip_to_check['views']=%s deleting %s\"\n , current_clip['views'], clip_to_check['views'], current_clip)\n index_to_delete = clips_to_check.index(current_clip)\n if index_to_delete not in indices_to_delete:\n indices_to_delete.add(index_to_delete)\n logger.info(\"indices_to_delete=%s\", str(indices_to_delete))\n return delete_clips_from_list(clips_to_check, indices_to_delete)", "def get_featured_clips(page=1, results_per_page=5, timeout=60 * 5):\n return get_cached_api_response(\n 'CLIPS:::FEATURED:::%d' % page, timeout,\n APIClient(**settings.API_CLIENT).get_clips, featured=True,\n limit=results_per_page, offset=(page - 1) * results_per_page)", "def find_cheapest_trips(self):\n\n #sort the list by price\n trips = sorted(self.trips, key=lambda trip: trip.price)\n\n cheapest = []\n if len(trips) == 0:\n return ITATrips(cheapest)\n lowest = trips[0]\n for trip in trips:\n #find all trips that equal the cheapest\n if trip.price == lowest.price:\n cheapest.append(trip)\n else:\n break\n\n return ITATrips(cheapest)", "def full2sparse_clipped(vec, topn, eps=1e-9):\n # use np.argpartition/argsort and only form tuples that are actually returned.\n # this is about 40x faster than explicitly forming all 2-tuples to run sort() or heapq.nlargest() on.\n if topn <= 0:\n return []\n vec = np.asarray(vec, dtype=float)\n nnz = np.nonzero(abs(vec) > eps)[0]\n biggest = nnz.take(argsort(abs(vec).take(nnz), topn, reverse=True))\n return list(zip(biggest, vec.take(biggest)))", "def test_lower_than_min_frame_rate(self):\n modify_filter(self.filter, min_frame_rate=100)\n clips = get_all_clips_matching_filter(self.filter)\n self.assertEqual(clips, [])", "def discard(lst, minset) -> list:\n r = []\n for l in lst:\n if set(l) >= minset:\n r.append(l)\n return r", "def _get_clipping_slices(cost_fpath, sc_point_idx, radius=None):\n with ExclusionLayers(cost_fpath) as f:\n shape = f.shape\n\n if radius is not None:\n row, col = sc_point_idx\n row_min = max(row - radius, 0)\n row_max = min(row + radius, shape[0])\n col_min = max(col - radius, 0)\n col_max = min(col + radius, shape[1])\n\n start_indices = (row - row_min, col - col_min)\n else:\n start_indices = sc_point_idx\n row_min, row_max = None, None\n col_min, col_max = None, None\n\n row_slice = slice(row_min, row_max)\n col_slice = slice(col_min, col_max)\n\n return start_indices, row_slice, col_slice", "def test_filter_without_params(self):\n clips = get_all_clips_matching_filter(self.filter)\n self.assertEqual(clips[0].id, self.cid)", "def filter_rdtest(variants, cutoffs):", "def _filter(self, input, target_genomes):\n # Ensure that the input is a list\n input = list(input)\n\n logger.info(\"Building set cover sets input\")\n sets = self._make_sets(input, target_genomes)\n logger.info(\"Building set cover ranks input\")\n ranks = self._make_ranks(input, target_genomes)\n logger.info(\"Building set cover costs input\")\n costs = self._make_costs(input)\n logger.info(\"Building set cover universe_p input\")\n universe_p = self._make_universe_p(target_genomes)\n\n # Run the set cover approximation algorithm\n set_ids_in_cover = self._compute_set_cover(sets,\n costs,\n universe_p,\n ranks,\n target_genomes)\n\n # Warn when less-than-ideal probes are chosen (i.e., probes\n # whose ranks exceed 0)\n num_bad_probes = sum([True for set_id in set_ids_in_cover\n if ranks[set_id] > 0])\n if num_bad_probes > 0:\n logger.warning((\"Forced to choose %d less-than-ideal probe%s \"\n \"(i.e., probes that 'hit' more than one \"\n \"grouping during identification or probes that \"\n \"cover a blacklisted genome)\"), num_bad_probes,\n ('' if num_bad_probes == 1 else 's'))\n\n return [input[id] for id in set_ids_in_cover]", "def clip_scenes_if_not_clipped(source_paths, bounds_tuple, filter_nan, tempdir):\n clipped_scene_paths = [Path(p) for p in tempdir.glob(\"*clipped*\")]\n resampled_scene_paths = [Path(p) for p in tempdir.glob(\"*resampled*\")]\n\n if clipped_scene_paths == [] and resampled_scene_paths == []:\n print(\"starting clipping\")\n\n batches = eio.batches_from(source_paths, 16)\n\n batch_results = []\n\n for batch in batches:\n\n batch_result = dask.delayed(eio.clip_and_save)(batch, bounds_tuple, filter_nan, outDir=tempdir)\n batch_results.append(batch_result)\n\n result_futures = client.compute(batch_results, scheduler='processes')\n\n clipped_scene_batches = [i.result() for i in result_futures]# gets rid of None that denotes too little scene overlap\n clipped_scene_paths = []\n for batch in clipped_scene_batches:\n for path in batch:\n if path != None:\n clipped_scene_paths.append(Path(path))\n print(\"done clipping\")\n return clipped_scene_paths\n else:\n return clipped_scene_paths", "def clip_points(shp, clip_obj):\n poly = clip_obj.geometry.unary_union\n return(shp[shp.geometry.intersects(poly)])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of information of 'good' clips from a list of users.
def get_clips(self, client_id=None, oauth_token=None): logging.info("Getting clips") self.client = TwitchHelix(client_id=client_id, oauth_token=oauth_token) total_clips = [] for user in self.users_list: clips = self._get_clips(user['_id'], user['name'], client_id, oauth_token) good_clips = self._get_good_clips(clips) logging.info("Found %s good clip(s) for %s", len(good_clips), user['name']) if good_clips: total_clips.extend(good_clips) logging.info("Got %s clips", len(total_clips)) return total_clips
[ "def _get_good_clips(self, clips):\n logging.info(\"Getting good clips from %s clip(s)\", len(clips))\n good_clips = []\n for clip in clips:\n if (self.lang is None or clip['language'] in self.lang):\n logging.debug(\"Clip %s by %s has %s views\", clip['id'],\n clip['broadcaster_name'], clip['view_count'])\n video_views = self._get_clip_video_views(clip)\n clip['rating'] = self._get_clip_rating(clip['view_count'],\n video_views)\n logging.info(\"Clip %s rating %s\", clip['id'], clip['rating'])\n if clip['rating'] >= 1:\n logging.info(\"Clip %s is 'good'\", clip['id'])\n good_clips.append(clip)\n else:\n logging.debug(\"Clip %s by %s doesn't isn't lang %s\", clip['id'],\n clip['broadcaster_name'], self.lang)\n return good_clips", "def _get_clips(self, user_id, user_name, client_id=None, oauth_token=None):\n logging.info(\"Getting clips for %s\", user_name)\n clip_headers = {}\n if client_id is not None:\n clip_headers['Client-ID'] = client_id\n if oauth_token is not None:\n clip_headers['Authorization'] = f'Bearer {oauth_token}'\n clip_params = {\n 'broadcaster_id': user_id,\n 'started_at': self.started_at,\n 'ended_at': self.ended_at,\n 'first': 100,\n }\n resp = requests.get(f'https://api.twitch.tv/helix/clips',\n headers=clip_headers, params=clip_params)\n resp_json = resp.json()\n\n if resp.status_code >= 400:\n logging.error(\"Error when getting clips of streamer %s: %s\",\n user_name, resp_json['message'])\n resp.raise_for_status()\n\n clips_json = resp_json['data']\n clips = []\n for clip_json in clips_json:\n clip = Clip.construct_from(clip_json)\n logging.debug(\"Adding clip %s\", clip['id']);\n clips.append(clip)\n logging.info(\"Got %s clip(s) from streamer %s\", len(clips), user_name)\n return clips", "def list_clips():\n\n # if user not authed, start auth proccess\n if not authed():\n return redirect('/auth')\n\n # querying clips from database ordered by total likes\n clips = db.session.query(Clip, func.count(Like.user_id).label('total')).join(Like, isouter=True).group_by(Clip).order_by('total', Clip.created_at).all()\n\n # we need the current user's likes for visual representation of what they already liked\n user = get_user_dict(session['access_token'])\n\n # if user is non, token is bad. start auth\n if user is None:\n return redirect('/auth')\n\n # getting the user from our database to see what posts they like\n user = User.query.get(user.get('id'))\n\n return render_template('clips.html', clips=clips, likes=user.likes)", "def get_user_starred(user):\r\n # TODO clear empty list cache\r\n starred, page = [], 1\r\n while page > 0:\r\n response = requests.get(make_starred_url(user, page))\r\n\r\n # 403: Exceeded request limit\r\n # 404: User does not exist\r\n if response.status_code in (403, 404):\r\n return []\r\n\r\n # Up to 100 items at a time\r\n if len(response.json()) == 100:\r\n starred.extend(response.json())\r\n page += 1\r\n else:\r\n page = -1\r\n return starred", "async def banned_list(self, ctx):\n players = self.get_players(ctx)\n players = sorted(players, key=lambda x: x['IGN'])\n\n out = [\n '+ {} ({})'.format(player['IGN'], player['PlayerTag'])\n for player in players]\n\n for page in pagify('\\n'.join(out), shorten_by=24):\n await self.bot.say(page)", "async def listuserbadges(ctx, user: discord.Member = None):\n if user is None:\n user = ctx.message.author\n badges = await db.user(user).badges()\n\n # sort\n priority_badges = []\n for badge_name, badge in badges.items():\n priority_num = badge[\"priority_num\"]\n if priority_num != -1:\n priority_badges.append((badge, priority_num))\n sorted_badges = sorted(priority_badges, key=operator.itemgetter(1), reverse=True)\n\n badge_ranks = []\n for i, (badge, priority_num) in enumerate(sorted_badges[:12], 1):\n badge_ranks.append(f\"**{i}. {badge['badge_name']}** ({badge['guild_name']}) [{priority_num}] **—** {badge['description']}\")\n badge_ranks = \"\\n\".join(badge_ranks) or \"None\"\n\n em = discord.Embed(description=\"\", colour=user.colour)\n\n total_pages = 0\n for _ in pagify(badge_ranks, [\"\\n\"]):\n total_pages += 1\n\n for i, page in enumerate(pagify(badge_ranks, [\"\\n\"]), 1):\n em.description = page\n em.set_author(name=f\"Badges for {user.name}\", icon_url=user.avatar_url)\n em.set_footer(text=f\"Page {i} of {total_pages}\")\n await ctx.send(embed=em)", "def get_hosts_listings(userids): #TODO: write get_hosts_listings function\n \n url = 'https://api.airbnb.com/v2/listings/?client_id=3092nxybyb0otqw18e8nh5nty&user_id='\n users_listings = collections.defaultdict(dict)\n success = False # was the request successful? If not, may try again\n\n #for each userid in userids:\n for user in userids:\n print \"\\nUser id: \" + str(user)\n room_ratings = []\n num_ratings = 0\n # send request and get results \n req = urllib2.Request(url + str(user), headers = hdr)\n print \"\\nRetrieving user data from URL %s\" %(url)\n\n # execute request\n #sleep(randint(0., 1.)) \n open_url = urllib2.urlopen(req)\n\n # returned codes from HTTP request\n #code = open_url.getcode()\n\n # convert returned JSON into Python dictionary\n json_search_results = json.loads(open_url.read())\n # store the user's name\n user_name = json_search_results['listings'][0]['user']['first_name']\n\n # make a list of the user's property ratings\n num_listings = len(json_search_results['listings'])\n\n # store user's room ratings and number of ratings\n room_ratings.extend(json_search_results['listings'][room]['star_rating'] \\\n for room in range(num_listings) \\\n if json_search_results['listings'][room]['star_rating'] is not None)\n num_ratings += sum(json_search_results['listings'][room]['reviews_count'] \\\n for room in range(num_listings))\n\n # compute the this user's statistics\n users_listings[user]['name'] = user_name.encode('utf-8').strip()\n users_listings[user]['num_rooms'] = num_listings\n users_listings[user]['num_ratings'] = num_ratings\n users_listings[user]['avg_rating'] = sum(room_ratings) / num_listings\n\n return users_listings", "def whitelist(context):\n logger.info(\"Running scout view users\")\n adapter = context.obj['adapter']\n \n ## TODO add a User interface to the adapter\n for whitelist_obj in adapter.whitelist_collection.find():\n click.echo(whitelist_obj['_id'])", "def get_beef_against_list(user_id):\n\n items = [\"BeefTitle\", \"CreatedByName\", \"CreatedById\", \"BeefOpponent\", \"BeefOpponentId\", \n \"BeefDescription\", \"TimeCreated\", \"_id\"]\n\n print \"Getting beef against user: \", user_id\n beef_collection = getCollection(\"beef\")\n\n beef_list = list(beef_collection.find({\"BeefOpponentId\": bson.objectid.ObjectId(user_id)}))\n beef_list = map(lambda x: format_dict(x, items), beef_list)\n\n print \"Beef List: \", beef_list\n return beef_list", "def filtered_users(all_users, whitelisted=None):\n if whitelisted is None:\n whitelisted = whitelisted_users\n return [u for u in all_users if u['UserName'] not in whitelisted]", "def test_user_list_starred(self):\n pass", "async def whitelist(self, ctx):\n guilds = self.memory.getWhiteList()\n users = guilds[str(ctx.guild.id)]\n output = ''\n\n for user in users:\n output += f'<@{user}>,'\n\n embed = None\n\n if output == '':\n embed = discord.Embed(\n title='Whitelist',\n description='Your guild does not have any member in your whitelist yet.',\n colour=discord.Colour.from_rgb(225, 73, 150)\n )\n else:\n embed = discord.Embed(\n title='Whitelist',\n description='{}'.format(output[:-1]),\n colour=discord.Colour.from_rgb(225, 73, 150)\n )\n\n await ctx.send(embed=embed)", "async def penalties(self, ctx):\n stats = await self.config.guild(ctx.guild).stats()\n stats = stats[\"penalties\"]\n if stats:\n a = []\n b = []\n for i, k in enumerate(\n sorted(stats, key=lambda x: stats[x][\"scored\"], reverse=True)[:10]\n ):\n user_team = await self.get_user_with_team(ctx, k)\n a.append(f\"{i+1}. {user_team[0].name} ({user_team[1]}) - {stats[k]['scored']}\")\n for i, k in enumerate(\n sorted(stats, key=lambda x: stats[x][\"missed\"], reverse=True)[:10]\n ):\n user_team = await self.get_user_with_team(ctx, k)\n b.append(f\"{i+1}. {user_team[0].name} ({user_team[1]}) - {stats[k]['missed']}\")\n embed = discord.Embed(\n title=\"Penalty Statistics\",\n colour=0xFF0000,\n description=\"=== Scored and Missed penalties statistics ===\",\n )\n embed.add_field(name=\"Penalties Scored\", value=\"\\n\".join(a))\n embed.add_field(name=\"Penalties Missed\", value=\"\\n\".join(b))\n await ctx.send(embed=embed)\n else:\n await ctx.send(\"No stats available.\")", "def check_users_botometer(list_users):\n return dict(BOM.check_accounts_in(list_users))", "def goal_scorers(self):\n return self.filter_players(\n pl_filter=lambda num, stats: stats['g']\n )", "def get_genuine_presentations_of_user(self, userid):\n\n indicies = N.where(\n N.logical_and(\n self._data[:, self.GALLERY] == self._data[:,self.PROBE],\n self._data[:, self.GALLERY] == userid\n )\n )\n return self.filter_indicies(indicies)", "def items_for_you(df, similar_users, main_user):\n set_main = set(df[df['Customer'] == main_user]['tags'])\n for user in similar_users:\n items = []\n if user != main_user:\n set_user = set(df[df['Customer'] == user]['tags'])\n items = set_main - set_user\n if len(items) > 0:\n return items\n else:\n return 'There is nothing to recommend'", "def get_books_liked_by_user(user_id, user_book_rating, min_rating):\n user2_book_rating = user_book_rating[user_book_rating[\"user_id\"]==user_id]\n books_liked = user2_book_rating[user2_book_rating[\"rating\"] >= min_rating][\"book_id\"].to_list()\n return books_liked", "def get_users():\n return [x.pw_name for x in pwd.getpwall() if user_valid(x)]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The enrollment level of the service. Default value is `BLOCK_ALL`.
def enrollment_level(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "enrollment_level")
[ "def stealth_mode_blocked(self):\n if \"stealthModeBlocked\" in self._prop_dict:\n return self._prop_dict[\"stealthModeBlocked\"]\n else:\n return None", "def part_salable_default():\n\n return InvenTreeSetting.get_setting('PART_SALABLE')", "def get_block_events_enable(self):\n (status, enable) = self.__device.get_block_events_enable()\n self.__device.decode_error_status(status, cmd='get_block_events_enable()', print_on_error=True)\n if (0x01 == status):\n print \"Block events enable: %d\" % enable", "def sslbenullauthorizationsrate(self) :\n try :\n return self._sslbenullauthorizationsrate\n except Exception as e:\n raise e", "async def _lockdown(self):\n gatekeeper_cog = self.bot.get_cog(\"Gatekeeper\")\n async with gatekeeper_cog.edit_config(self.guild) as config:\n config[\"checks\"] = {\n **config.get(\"checks\", {}),\n \"block_all\": {\"enabled\": True},\n }\n\n # TODO: have this be reported in a separate channel, with a mod ping!\n await self.report(\n \"Users are joining too quickly. `block_all` has automatically been enabled.\"\n )", "def get_block_device_enable_status(self):\n response = self._get(\n c.SERVICE_DEVICE_CONFIG, c.GET_BLOCK_DEVICE_ENABLE_STATUS\n )\n return h.zero_or_one_dict_to_boolean(response)", "def get_level():\n return LEVEL", "def getSupportedLock(self):\n \n return self.properties.get(Constants.PROP_SUPPORTED_LOCK)", "def standby_period(self):\n return self._t_standby", "def as_python_level(self) -> int:\n to_python_level = {\n LogLevel.CRITICAL: logging.CRITICAL,\n LogLevel.ERROR: logging.ERROR,\n LogLevel.WARNING: logging.WARNING,\n LogLevel.INFO: logging.INFO,\n LogLevel.DEBUG: logging.DEBUG}\n return to_python_level[self]", "def get_level(self):\r\n return self.__level", "def available_granularity(self):\n return self._exchange.available_granularity()", "def block_course_id(self):\n return str(self.course_id)", "def get_block(self):\n if self.value == 1:\n return Action.BLOCK_ASSASSINATE\n elif self.value == 2:\n return Action.BLOCK_FOREIGN_AID\n elif self.value == 3:\n return Action.BLOCK_STEAL\n else:\n return -1", "def get_security_status(self):\n return self.latest_status", "def test_list_enrollments_sections(self):\r\n section_id = None # Change me!!\r\n\r\n r = self.client.list_enrollments_sections(section_id, grading_period_id=None, include=None, role=None, sis_account_id=None, sis_course_id=None, sis_section_id=None, sis_user_id=None, state=None, type=None, user_id=None)", "def _set_allowlist_cert_status(user, course_key, enrollment_mode, course_grade):\n if not _can_set_allowlist_cert_status(user, course_key, enrollment_mode):\n return None\n\n cert = GeneratedCertificate.certificate_for_student(user, course_key)\n return _get_cert_status_common(user, course_key, enrollment_mode, course_grade, cert)", "def load_balance_status(self):\n return self._load_balance_status", "def get_default_runtime_log_level():\n return EC.EDGE_RUNTIME_LOG_LEVEL_INFO", "def standby_status(self):\n return self._redunda.shouldStandby" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If true, then the Policy is enforced. If false, then any configuration is acceptable.
def enforced(self) -> pulumi.Input[bool]: return pulumi.get(self, "enforced")
[ "def enforced(self) -> bool:\n return pulumi.get(self, \"enforced\")", "def PolicyEnforcement(self) -> PolicyEnforcement:", "def allow(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"allow\")", "def should_auto_approve():\n if settings.MODERATION_POLICY == moderation_policies.automatic.value:\n return True\n return False", "def fpolicy_enable(self):\n return self.request( \"fpolicy-enable\", {\n }, {\n } )", "def is_policy(self):\n return self._policy", "def supports_configuration_rules(self):\n return # boolean", "def _validate_config(self):\n if self.config[\"metric-allowlist\"] and self.config[\"metric-denylist\"]:\n self.unit.status = BlockedStatus(\n \"metric-allowlist and metric-denylist are mutually exclusive\"\n )\n return False\n return True", "def require_validation(self) -> ConfigNodePropertyBoolean:\n return self._require_validation", "def create_base_policy(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"create_base_policy\")", "def authorize(self):\n return True", "def include_deny_policy_analysis(self) -> bool:\n return pulumi.get(self, \"include_deny_policy_analysis\")", "def fpolicy_status(self):\n return self.request( \"fpolicy-status\", {\n }, {\n 'is-enabled': [ bool, False ],\n } )", "def _do_admission_control_check():\n return not sstbf.is_sstbf_configured()", "def fpolicy_set_required(self, policy_name, required):\n return self.request( \"fpolicy-set-required\", {\n 'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],\n 'required': [ required, 'required', [ bool, 'None' ], False ],\n }, {\n } )", "def supports_parameter_smart_configuration(self):\n return # boolean", "def supports_authorization_rules(self):\n return # boolean", "def set_execution_policy_to_restrict(self):\n code_status = self.session.run_ps('%s restricted' % SET_EXECUTION_POLICY).status_code\n return SUCCESSFUL if code_status == 0 else ERROR", "def allow_promiscuous(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_promiscuous\")", "def test_allow(self):\n old_allowed = getattr(settings, 'REGISTRATION_OPEN', True)\n settings.REGISTRATION_OPEN = True\n self.failUnless(self.backend.registration_allowed(_mock_request()))\n\n settings.REGISTRATION_OPEN = False\n self.failIf(self.backend.registration_allowed(_mock_request()))\n settings.REGISTRATION_OPEN = old_allowed" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If set to true, the values from the effective Policy of the parent resource are inherited, meaning the values set in this Policy are added to the values inherited up the hierarchy.
def inherit_from_parent(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "inherit_from_parent")
[ "def _inherit_from_parent(self):\n # TODO: there are more attributes (all) that can be inherited\n if not self.parent:\n return\n if self.group_id is None and self.parent.group_id:\n self.group_id = self.parent.group_id\n if TRACE: logger.debug('_inherit_from_parent: group_id: {}'.format(self.parent.group_id))\n if self.version is None and self.parent.version:\n self.version = str(self.parent.version)\n if TRACE: logger.debug('_inherit_from_parent: version: {}'.format(self.parent.version))\n if not self.classifier is None and self.parent.classifier:\n self.classifier = self.parent.classifier\n if TRACE: logger.debug('_inherit_from_parent: classifier: {}'.format(self.parent.classifier))\n\n # FIXME: the parent may need to be resolved too?\n # special handling for URLs: see\n # http://maven.apache.org/ref/3.5.0/maven-model-builder/index.html#Inheritance_Assembly\n # Notice that the 5 URLs from the model:\n # project.url,\n # project.scm.connection, project.scm.developerConnection, project.scm.url\n # project.distributionManagement.site.url)\n # ... have a special inheritance handling: if not configured in\n # current model, the inherited value is the parent's one with\n # current artifact id appended.\n if (self.url is None\n and hasattr(self.parent, 'url')\n and getattr(self.parent, 'url', None)\n and self.artifact_id):\n # FIXME: this is not the way to join URLs parts!\n self.url = self.parent.url + self.artifact_id\n\n # FIXME: this is not the way to join URLs parts!\n parent_scm = getattr(self.parent, 'scm', None)\n if self.scm and parent_scm and self.artifact_id:\n ps_url = parent_scm.get('url')\n if not self.scm.get('url') and ps_url:\n # FIXME: this is not the way to join URLs parts!\n self.scm['url'] = ps_url + self.artifact_id\n\n ps_connection = parent_scm.get('connection')\n if not self.scm.get('connection') and ps_connection:\n # FIXME: this is not the way to join URLs parts!\n self.scm['connection'] = ps_connection + self.artifact_id\n\n ps_devconnection = parent_scm.get('developer_connection')\n if not self.scm.get('developer_connection') and ps_devconnection:\n # FIXME: this is not the way to join URLs parts!\n self.scm['developer_connection'] = ps_devconnection + self.artifact_id\n\n # TODO: distribution_management.site.url", "def set_base_policy(self, policy):\n if policy in self.matched_policies:\n self.policy = policy\n else:\n raise ValueError(\"Policy is not valid for this candidate policy\")", "def create_base_policy(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"create_base_policy\")", "def get_iam_policies(self):\n desired_policy = self.custom_config.get('policy_arns', [])\n\n if len(self.parents) > 0:\n\n iam_policy_parents = list(filter(lambda x: x.flavor == IAMPolicy.flavor, self.parents))\n if iam_policy_parents:\n for policy_parent in iam_policy_parents:\n policy_parent.sync_state()\n if policy_parent.policy_arn not in desired_policy:\n self.custom_config['policy_arns'].append(policy_parent.policy_arn)", "def inherit(tsuid, parent, *args, **kwargs):\n return Wrapper.inherit_properties(tsuid=tsuid, parent=parent, *args, **kwargs)", "def allow_child_attributes(self):\n return self._allow_child_attributes", "def _inherit_parent_certainty(certain, known, other):\n all = certain[:]\n assert superset(all, known)\n for name in known:\n all.remove(name)\n assert superset(all, other)\n for name in all:\n if (all.count(name) > other.count(name)):\n other.append(name)", "def premium_child_only(self):\n return self._premium_child_only", "def premium_child_only(self, premium_child_only):\n self._premium_child_only = premium_child_only", "def parentConstraints(self):\r\n return None", "def altered(self):\n\t\tprint(\"CHILD, before PARENT altered()\")\n\t\tsuper(Child, self).altered()\n\t\tprint(\"CHILD, after PARENT altered()\")", "def isInherited(self, prop):\n pass", "def override(self):\n\t\tprint(\"CHILD override()\")", "def allow_child_attributes(self, allow_child_attributes):\n\n self._allow_child_attributes = allow_child_attributes", "def merge_with_parent(self, parent):\n\n # first, check if parent has cubes\n if parent.cubes is not None:\n if self.cubes is None:\n self.cubes = parent.cubes.copy() # only if we don't define cubes do we override\n\n # next, try to override textures\n new_textures = parent.textures.copy()\n new_textures.update(self.textures)\n self.textures = new_textures\n self._update_textures() # make sure references are good\n\n self.transforms.update(parent.transforms) # although transforms are represented differently,\n # we still override them", "def inherit(parent_genes, inherit):\n #--No genes, can only mutate to have gene or not:\n if parent_genes == 0:\n if inherit: #--trait is expressed:\n return PROBS[\"mutation\"]\n else: #--trait is not expressed:\n return 1 - PROBS[\"mutation\"]\n #\n #--50/50 shot of passing on if 1 gene in parent:\n elif parent_genes == 1:\n return 0.5 # always 50/50 %\n #\n #--Parent has 2 genes, almost definitely will pass on (minus small mutation prob. rate):\n elif parent_genes == 2:\n if inherit:\n return 1 - PROBS[\"mutation\"]\n else:\n return PROBS[\"mutation\"]", "def effective_policy_rules(self):\n return self.broker.effective_policy_rules(**{\"PolicyID\": self.PolicyID})", "def _update_policy_and_distribution(self):\n self._policy = self.get_softmax_policy()\n self._distribution = distribution_std.DistributionPolicy(\n self._game, self._policy)", "def PolicyEnforcement(self) -> PolicyEnforcement:", "def update_policy(self, new_policy):\n self.policy = new_policy" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load data from the database and return a pandas dataframe. Limit param specifies number of rows returned. Default is to return all
def load_dataframe_from_sql(river, limit=-1): if limit > 0: logger.debug("loading df for river {river} from sql with row limit of {limit}".format(river=river, limit=limit)) else: logger.debug("loading entire df for river {river} from sql".format(river=river)) con = sqlite3.connect(DATABASE_PATH) cur = con.cursor() query = """ SELECT timestamp, rain, level, forecast from {river} ORDER BY timestamp DESC LIMIT {limit} """ cur.execute(query.format(river=river, limit=limit)) result = cur.fetchall() df = pd.DataFrame(result, columns=['timestamp', 'cum_rain', 'level', 'forecast']) # # Set index to timestamp column as object df.timestamp = pd.to_datetime(df.timestamp) df = df.set_index('timestamp') df = df.sort_index() return df
[ "def to_data_frame(self, num_records: int = 0) -> PandasDataFrame:", "def _get_all_records(self):\n self._conn = create_engine(self._connection_str, echo=False)\n\n QUERY = (\"\"\"SELECT *\n FROM {}\n ORDER BY person_index, timestamp;\n \"\"\".format(self.source)\n )\n\n self.all_records_df = pd.read_sql_query(QUERY, self._conn)\n self.ttl_persons = len(self.all_records_df['person_index'].unique())", "def read(self,table,columns=None,where=None,limit=0,status_check=True):\n\n if limit is not None and (where is None or limit<1):\n limit=MySQL_DB_Connection.DEFAULT_READ_LIMIT\n \n if status_check:\n table.check_on_db(self)\n cols_s = '*' if not columns else ','.join(columns)\n where_s = ' WHERE %s' % (where) if where is not None else ''\n limit_s = ' LIMIT %d' % (limit) if limit is not None else ''\n query_template='SELECT %s FROM `%s`%s%s' % (cols_s,table.name,where_s,limit_s)\n df = pd.DataFrame(self.query(query_template),columns=columns if columns else table.get_column_names())\n return df", "def get_data(fn):\n rows = []\n dbf = dbflib.open(fn)\n for i in range(dbf.record_count()):\n rows.append(dbf.read_record(i))\n\n return pd.DataFrame(rows)", "def head(self, n=5):\n col = self.copy()\n col.query.setLIMIT(n)\n return col.toPandas()", "def read_sql(self, session, post_model, size_limit=None):\n if size_limit != None:\n query = session.query(post_model).limit(size_limit)\n else:\n query = session.query(post_model)\n self.corpus_data = data_frame(query, columns=[\"title\", \"body\"])\n return self", "def load_data(): \n\n engine = create_engine('sqlite:///data/DisasterResponse.db')\n\n df = pd.read_sql_table('Messages', engine)\n\n return df", "def fetch_all(self, limit=15):\n records = []\n limit = self.db.llen(self.redis_key)\n for item in self.db.lrange(self.redis_key, 0, limit-1):\n record_obj = json.loads(item.decode('utf-8'))\n records.append(record_obj)\n \n return records", "def get_data_from_postgres(query: str, engine: sqa.engine) -> pd.DataFrame:\n return pd.read_sql(query, engine)", "def sql(self, sql: str) -> pd.DataFrame:\n import duckdb\n\n df = duckdb.query(self.df, \"data\", sql).df()\n self.df = df\n return df", "def read_full_table(self, table, index_col=\"pandas_index\"):\n df = pd.read_sql(f\"SELECT * FROM {table}\", self.conn, index_col=index_col)\n print(f\"Read {len(df)} lines from '{table}' table in DB.\")\n return df", "def dataframe(self, query):\n\n return pd.read_sql(query, self.engine)", "def get_data(nrows=1_000):\n df = pd.read_csv(f\"gs://{BUCKET_NAME}/{BUCKET_TRAIN_DATA_PATH}\", nrows=nrows)\n return df", "def head(self, n=5):\n return PandasDataset(\n self.spark_df.limit(n).toPandas(),\n expectation_suite=self.get_expectation_suite(\n discard_failed_expectations=False,\n discard_result_format_kwargs=False,\n discard_catch_exceptions_kwargs=False,\n discard_include_config_kwargs=False,\n ),\n )", "def _fetch_data(self) -> pandas.DataFrame:\n\n # generate file paths to locally stored 'full' data\n data_title = _FULL_INPUT_DATA_TITLE.format(self._exchange, self._symbol, self._timeperiod)\n file_path = _FULL_INPUT_DATA_PATH.format(data_title)\n\n # check that the full csv files exist\n if not (os.path.isfile(file_path)):\n raise Exception(f\"failed to build DataBook; full data does not exist!\\n\"\n f\"{file_path} not found in library; try building the full dataframe first.\")\n\n # load csv as pandas df\n df = pandas.read_csv(file_path)\n\n return df", "def load_dataset(self):\n try:\n ai_df = pd.read_csv(self.data)\n lg.info('data loaded successfully!!!')\n return ai_df\n except Exception as e:\n lg.exception(str(e))", "def _readData(self):\n print(\"Read from database\")\n con = sqlite3.connect(\"fantasyfootball/players.db\")\n data = pd.read_sql_query(\"select * from players\", con)\n con.close()\n\n return data", "def read_df(self, table=None):\n # Default value for table #\n if table is None: table = self.main_table\n # Make the query to avoid SQLAlchemy dependency #\n query = 'SELECT * FROM \"%s\";' % table\n # Call pandas function #\n return pandas.read_sql_query(query, con = self.own_connection)", "def loadMicrodata(path, delimiter, record_limit, use_columns):\n df = pd.read_csv(path, delimiter).astype(str) \\\n .replace(to_replace=r'^nan$', value='', regex=True) \\\n .replace(to_replace=r'\\.0$', value='', regex=True) \\\n .replace(to_replace=';', value='.,', regex=False) \\\n .replace(to_replace=':', value='..', regex=False) # fix pandas type coercion for numbers and remove reserved delimiters\n if use_columns != []:\n df = df[use_columns]\n if record_limit > 0:\n df = df[:record_limit]\n return df" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }