query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Modifies the board representation using the specified move and piece
|
def place_piece(self, move, piece):
if len(move) > 1:
self.board[move[1][0]][move[1][1]] = ' '
self.board[move[0][0]][move[0][1]] = piece
|
[
"def execute_move(self, move: Tuple[int, int, Piece], player: int):\n\n (x, y, p) = move\n\n # Placing in empty square\n assert self[x][y] == 0\n # Piece placed is not already used\n assert p not in self.used_pieces\n # Not placing in middle cross\n assert x != self.mid\n assert y != self.mid\n\n # print(f\"Placing {(self.selected_piece & 0b1111):04b} at {x},{y}\")\n self[x][y] = int(self.selected_piece) # +(1<<self.n)\n\n self.selected_piece = p\n # print(f\"Selecting {(self.selected_piece & 0b1111):04b} for opponent\\n\")",
"def set_piece(self, row, col, new_piece):\n self.board[row][col] = new_piece",
"def _move_piece(self, pos_from, pos_to):\n self[pos_to] = self[pos_from]\n self[pos_to].position = pos_to\n self[pos_to].mvs_number += 1\n self[pos_from] = EMPTY\n return",
"def movePiece(self,pos, dest):\r\n\r\n #set passant every turn to check if en passant is possible\r\n if(isinstance(self.board[pos[0]][pos[1]],piece.Pawn)):\r\n if(self.board[pos[0]][pos[1]].enPassant == True):\r\n self.passant = True\r\n else:\r\n self.passant = False\r\n\r\n #check if castle move\r\n if(not self.checkPossible(pos,dest)):\r\n if(pos == self.kingPos[0]):\r\n if(dest == (1,0)):\r\n self.board[2][0] = self.board[0][0]\r\n self.board[2][0].position = (2,0)\r\n self.board[0][0] = 0\r\n elif(dest == (6,0)):\r\n self.board[5][0] = self.board[7][0]\r\n self.board[5][0].position = (5,0)\r\n self.board[7][0] = 0\r\n elif(pos == self.kingPos[1]):\r\n if(dest == (1,7)):\r\n self.board[2][7] = self.board[0][7]\r\n self.board[2][7].position = (2,7)\r\n self.board[0][7] = 0\r\n elif(dest ==(6,7)):\r\n print(\"Final Step\")\r\n self.board[5][7] = self.board[7][7]\r\n self.board[5][7].position = (5,7)\r\n self.board[7][7] = 0\r\n\r\n # move piece(normally)\r\n self.board[dest[0]][dest[1]] = self.board[pos[0]][pos[1]]\r\n self.board[pos[0]][pos[1]] = 0\r\n self.board[dest[0]][dest[1]].position = (dest[0], dest[1])",
"def move_piece(x, y, new_x, new_y, x2, y2, new_x2, new_y2, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION):\n\n # check whether the destination is the same for both\n\n if (new_x == new_x2 and new_y == new_y2):\n print(\"Both pieces going to the same location\")\n piece_type1 = get_piece(board, y, x)\n piece_type2 = get_piece(board, y2, x2)\n if (piece_type1 == \"p\" and piece_type2 == \"P\"):\n # both pawns, delete both\n print(\"Both are pawns, detroying both\")\n board = delete_piece(x, y, board, board_turtles)\n board = delete_piece(x2, y2, board, board_turtles)\n elif (piece_type1 == \"k\" and piece_type2 == \"K\"):\n print(\"Both are knights, detroying both\")\n board = delete_piece(x, y, board, board_turtles)\n board = delete_piece(x2, y2, board, board_turtles)\n elif (piece_type1 == \"p\" and piece_type2 == \"K\"):\n\n board = delete_piece(x, y, board, board_turtles)\n # execute move for AI\n board = execute_move(x2, y2, new_x2, new_y2, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n elif (piece_type1 == \"k\" and piece_type2 == \"P\"):\n board = delete_piece(x2, y2, board, board_turtles)\n # execute move for AI\n board = execute_move(x, y, new_x, new_y, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n else:\n # the pieces are moving to different locations, simultaneous movement does not matter\n print(\"Executing moves normally\")\n if (x != -1):\n board = execute_move(x, y, new_x, new_y, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n if (x2 != -1):\n board = execute_move(x2, y2, new_x2, new_y2, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n\n return board",
"def execute_move(self, move, color):\n\n (z,x,y) = move\n\n # Add the piece to the empty square.\n assert self.pieces[z][x][y] == 0\n self.pieces[z][x][y] = color",
"def apply_move(self, move):\n if self.is_valid_move(move):\n self.board[move] = self.turn\n self.turn = 'X' if self.turn == 'O' else 'O'",
"def update(self, piece):\n x, y = piece.position[0], piece.position[1]\n self.board[x:x+piece.arr.shape[0], y:y+piece.arr.shape[1]] += piece.arr",
"def play_piece(self, piece, piece_moves):\n start_file, start_rank = piece.file_pos, piece.rank_pos\n coord_str = \"\"\n select_move_dict = {}\n key_num = 1\n for move_vector in piece_moves:\n move_notation_str = self.board.move_notation(piece, move_vector)\n coord_str += (str(key_num) + \". \" + move_notation_str + \" | \") \n select_move_dict.update({key_num: move_vector})\n key_num += 1\n while True:\n try:\n print(\"0. Go back. | \" + coord_str)\n input_num = int(input(\"Enter the move you want to make: \"))\n if input_num == 0:\n raise ReturnException(\"go back\")\n if input_num >= key_num or input_num < 0:\n raise ValueError\n break\n except ValueError:\n print(f\"Invalid input. Please enter a number from 1 through {key_num-1}.\")\n while True:\n try:\n break_num = int(input(\"Enter 1 to confirm your move. 0 to go back: \"))\n if break_num == 1:\n break\n elif break_num == 0:\n raise ReturnException\n else:\n print(\"Invalid input.\")\n except ValueError:\n print(\"Please enter a number.\")\n\n move_vector = select_move_dict.get(input_num)\n direction, step = move_vector[0], move_vector[1]\n self.board.move_piece(start_file, start_rank, direction, step)",
"def setPieceAt(self,x,y,piece):\n\n self.board[(y * 5) + x] = piece",
"def update_board_location(self, start, destination, pieces_to_move):\n board = self.get_board()\n board[start[0]][start[1]] = pieces_to_move[0]\n list_of_pieces_to_add = pieces_to_move[1]\n for piece in list_of_pieces_to_add:\n board[destination[0]][destination[1]].append(piece)\n return board",
"def execute_move(self, move, color):\n\n (x,y,z) = move\n\n # Add the piece to the empty square.\n assert self[x][y][z] == 0\n self[x][y][z] = color",
"def put(self, piece, position):\n piece.position = Square(position.x, position.y)\n self.state.pitch.board[position.y][position.x] = piece",
"def simulate_board_x(self, test_board, test_piece, move):\n\n # This function simulates placing the current falling piece onto the\n # board, specified by 'move,' an array with two elements, 'rot' and 'sideways'.\n # 'rot' gives the number of times the piece is to be rotated ranging in [0:3]\n # 'sideways' gives the horizontal movement from the piece's current position, in [-9:9]\n # It removes complete lines and gives returns the next board state as well as the number\n # of lines cleared.\n\n rot = move[0]\n sideways = move[1]\n test_lines_removed = 0\n reference_height = self.get_parameters_x(test_board)[0]\n if test_piece is None:\n return None\n\n # Rotate test_piece to match the desired move\n for i in range(0, rot):\n test_piece['rotation'] = (test_piece['rotation'] + 1) % len(PIECES[test_piece['shape']])\n\n # Test for move validity!\n if not is_valid_position(test_board, test_piece, adj_x=sideways, adj_y=0):\n # The move itself is not valid!\n return None\n\n # Move the test_piece to collide on the board\n test_piece['x'] += sideways\n for i in range(0, BOARDHEIGHT):\n if is_valid_position(test_board, test_piece, adj_x=0, adj_y=1):\n test_piece['y'] = i\n\n # Place the piece on the virtual board\n if is_valid_position(test_board, test_piece, adj_x=0, adj_y=0):\n add_to_board(test_board, test_piece)\n test_lines_removed, test_board = remove_complete_lines(test_board)\n\n height_sum, diff_sum, max_height, holes = self.get_parameters_x(test_board)\n one_step_reward = 5 * (test_lines_removed * test_lines_removed) - (height_sum - reference_height)\n # print(\"one_step_reward: \",one_step_reward)\n return test_board, one_step_reward",
"def move_piece(self, from_pos, to_pos):\n moving_piece_id = self.get_occupation(from_pos)\n captured_piece_id = self.get_occupation(to_pos) # None if no capture\n\n # if moving piece is a general update its position in the\n # general_position dictionary\n if moving_piece_id[1:3] == 'ge':\n if moving_piece_id[0] == 'r':\n color = 'red'\n else:\n color = 'blue'\n self.set_general_position(color, to_pos)\n\n # update the board\n self.clear_position(from_pos)\n self.set_occupation(moving_piece_id, to_pos)\n\n return captured_piece_id",
"def update_board_with_new_move(self, move, turn, valid_capture):\n p_i = move[:2]\n p_f = move[2:]\n self.board[p_i[0]][p_i[1]] = 0\n self.board[p_i[0]][p_i[1]] = 0\n self.board[p_f[0]][p_f[1]] = turn.idx\n self.board[p_f[0]][p_f[1]] = turn.idx\n turn.remove_soldier_coodinate(p_i)\n turn.add_soldier_coodinate(p_f)\n if valid_capture['bool'] is True:\n x_mid = int((p_i[0] + p_f[0])/2)\n y_mid = int((p_i[1] + p_f[1])/2)\n valid_capture['coordinate'] = (x_mid, y_mid)\n self.board[x_mid][y_mid] = 0\n if valid_capture['prey'] == 1:\n self.player1.remove_soldier_coodinate((x_mid, y_mid))\n else:\n self.player2.remove_soldier_coodinate((x_mid, y_mid))",
"def set_piece(x, y, new_val):\n # Want to edit the global copy\n global board\n\n board[x][y] = new_val",
"def move_piece(self, start_x_y, end_x_y):\n\t\t(start_x, start_y) = start_x_y\n\t\t(end_x, end_y) = end_x_y\n\t\tself.matrix[end_x][end_y].occupant = self.matrix[start_x][start_y].occupant\n\t\tself.remove_piece((start_x, start_y))\n\t\tself.king((end_x, end_y))",
"def updateBoard(board, row, col, character):\n pass",
"def combine_moves(board_state_val, x, y, new_x, new_y, x2, y2, new_x2, new_y2):\n # Create deep copy of the board to configure\n board_state = copy.deepcopy(board_state_val)\n\n # store the values of each moving board piece\n player_val = board_state[x][y]\n ai_val = board_state[x2][y2]\n\n if new_x == new_x2 and new_y == new_y2:\n\n piece_type1 = board_state[x][y]\n piece_type2 = board_state[x2][y2]\n if piece_type1 == \"p\" and piece_type2 == \"P\":\n # both pawns, delete both\n board_state[x][y] = \"W\"\n board_state[x2][y2] = \"W\"\n elif piece_type1 == \"k\" and piece_type2 == \"K\":\n board_state[y][x] = \"W\"\n board_state[x2][y2] = \"W\"\n elif piece_type1 == \"p\" and piece_type2 == \"K\":\n\n board_state[x][y] = \"W\"\n # execute move for AI\n board_state[new_x2][new_y2] = board_state[y2][x2]\n board_state[x2][y2] = \"W\"\n elif piece_type1 == \"k\" and piece_type2 == \"P\":\n board_state[x2][y2] = \"W\"\n # execute move for player\n board_state[new_x][new_y] = board_state[y][x]\n board_state[x][y] = \"W\"\n else:\n # the pieces are moving to different locations, simultaneous movement does not matter\n\n board_state[new_x][new_y] = player_val\n board_state[x][y] = \"W\"\n\n board_state[new_x2][new_y2] = ai_val\n board_state[x2][y2] = \"W\"\n\n # check whether an AI pawn reached the last rank\n if ai_val == \"P\" and new_x2 == 4:\n # reached last rank, process it\n board_state[new_x2][new_y2] = \"K\"\n\n # check whether a player pawn reached the last rank\n if player_val == \"p\" and new_x == 0:\n # reached last rank, process it\n board_state[new_x][new_y] = \"k\"\n\n return board_state"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add particle container to the file.
|
def _add_particles(self, particles, cuba_keys):
name = particles.name
particles_root = self._root.particle
group = tables.Group(particles_root, name=name, new=True)
h5_particles = H5Particles(group)
h5_particles.data = particles.data
if cuba_keys is not None:
for item in particles.iter(item_type=CUBA.PARTICLE):
item.data = DataContainer(
{key: item.data[key] for key in item.data
if key in cuba_keys[CUBA.PARTICLE]})
h5_particles.add([item])
for item in particles.iter(item_type=CUBA.BOND):
item.data = DataContainer(
{key: item.data[key] for key in item.data
if key in cuba_keys[CUBA.BOND]})
h5_particles.add([item])
else:
h5_particles.add(particles.iter())
|
[
"def add_particle(self, particle):\n self.particles_.append(particle)",
"def addContainer(self, nwbfile):\n nwbfile.add_device(self.dev1)\n nwbfile.add_stimulus(self.optical_series)",
"def particle_to_yml(self, particles, filename):\n # open write append, if you want to start from scratch manually delete this fid\n fid = self.open_file_write(filename)\n fid.write('header:\\n')\n fid.write(\" particle_object: 'MULTIPLE'\\n\")\n fid.write(\" particle_type: 'MULTIPLE'\\n\")\n fid.write('data:\\n')\n for i in range(0, len(particles)):\n particle_dict = particles[i].generate_dict()\n fid.write(' - _index: %d\\n' % (i+1))\n fid.write(' particle_object: %s\\n' % particles[i].__class__.__name__)\n fid.write(' particle_type: %s\\n' % particle_dict.get('stream_name'))\n fid.write(' internal_timestamp: %f\\n' % particle_dict.get('internal_timestamp'))\n for val in particle_dict.get('values'):\n if isinstance(val.get('value'), float):\n fid.write(' %s: %16.3f\\n' % (val.get('value_id'), val.get('value')))\n elif isinstance(val.get('value'), str):\n fid.write(\" %s: '%s'\\n\" % (val.get('value_id'), val.get('value')))\n else:\n fid.write(' %s: %s\\n' % (val.get('value_id'), val.get('value')))\n fid.close()",
"def add_particle(self, particle):\n if not hasattr(particle, \"status\"):\n raise ValueError(\"Particles added to GenEvent must inherit from GenParticle\")\n self.particles_.append(particle)",
"def write_slices( self, particle_dict, species_name, snapshot ):\n # Open the file without parallel I/O in this implementation\n f = self.open_file(snapshot.filename)\n particle_path = \"/data/%d/particles/%s\" %(snapshot.iteration,\n species_name)\n species_grp = f[particle_path]\n\n # Loop over the different quantities that should be written\n for quantity in self.array_quantities_dict[species_name]:\n\n if quantity in [\"x\",\"y\",\"z\"]:\n path = \"position/%s\" %(quantity)\n data = particle_dict[ quantity ]\n self.write_particle_slices(species_grp, path, data, quantity)\n\n elif quantity in [\"ux\",\"uy\",\"uz\"]:\n path = \"momentum/%s\" %(quantity[-1])\n data = particle_dict[ quantity ]\n self.write_particle_slices( species_grp, path, data, quantity)\n\n elif quantity in [\"w\", \"charge\", \"id\"]:\n if quantity == \"w\":\n path = \"weighting\"\n else:\n path = quantity\n data = particle_dict[ quantity ]\n self.write_particle_slices(species_grp, path, data, quantity)\n\n # Close the file\n f.close()",
"def add_particle(self, particle):\n id = particle.id\n if id is None:\n id = self._generate_unique_id(self._group.particles)\n else:\n for _ in self._group.particles.where(\n 'id == value', condvars={'value': id}):\n raise ValueError(\n 'Particle (id={id}) already exists'.format(id=id))\n\n # insert a new particle record\n self._group.particles.append([(id, particle.coordinates)])\n return id",
"def addParticles(self, particles: np.array):\r\n if self.particleArray == None:\r\n self.particleArray = particles\r\n else:\r\n self.particleArray = np.concatenate(self.particleArray, particles)",
"def generate_text_file(composite_particles, composite_info, filename):\n # open the file to write in\n with open(filename, \"w\") as output_file:\n particleID = 1\n # loop through the particles\n for particle in composite_particles:\n # get particle data\n particle_data = composite_particles[particle]\n # if the particle has x, y, and z positions, a, b, and c radii, and an angle\n if len(particle_data) == 7:\n\n # write all the data for the particle to the text file\n output_file.writelines(particle_data[2][0] + str(particleID) + \" \" + str(particle_data[2][1]) + \"[nm]\" + \"\\n\") # a\n output_file.writelines(particle_data[4][0] + str(particleID) + \" \" + str(particle_data[4][1]) + \"[nm]\" + \"\\n\") # b\n output_file.writelines(particle_data[5][0] + str(particleID) + \" \" + str(particle_data[5][1]) + \"[nm]\" + \"\\n\") # c\n output_file.writelines(particle_data[0][0] + str(particleID) + \" \" + str(particle_data[0][1]) + \"[nm]\" + \"\\n\") # x\n output_file.writelines(particle_data[1][0] + str(particleID) + \" \" + str(particle_data[1][1]) + \"[nm]\" + \"\\n\") # y\n output_file.writelines(particle_data[6][0] + str(particleID) + \" \" + str(particle_data[6][1]) + \"[nm]\" + \"\\n\") # z\n output_file.writelines(particle_data[3][0] + str(particleID) + \" \" + str(particle_data[3][1]) + \"[degrees]\" + \"\\n\") # theta\n # increment particleID\n particleID += 1\n\n # write the information for the composite to the end of the file\n output_file.writelines(\"*****************\\n\")\n output_file.writelines(\"total_particles \" + str(particleID-1) + \"\\n\") # number of particles\n output_file.writelines(\"total_volume_ellipsoids \" + str(composite_info[0]) + \"[nm^3]\" + \"\\n\") # total volume\n output_file.writelines(\"x_length_prism \" + str(composite_info[1]) + \"[nm]\" + \"\\n\") # x length prism\n output_file.writelines(\"y_length_prism \" + str(composite_info[2]) + \"[nm]\" + \"\\n\") # y length prism\n output_file.writelines(\"z_length_prism \" + str(composite_info[3]) + \"[nm]\" + \"\\n\") # z length prism\n output_file.writelines(\"x_position_prism \" + str(composite_info[4]) + \"[nm]\" + \"\\n\") # x position prism\n output_file.writelines(\"y_position_prism \" + str(composite_info[5]) + \"[nm]\" + \"\\n\") # x position prism\n output_file.writelines(\"volume_fraction \" + str(composite_info[6]) + \"\\n\") # volume fraction\n output_file.writelines(\"number_particles_within_10nm_of_electrode \" + str(composite_info[7])+ \"\\n\") # number of particle electrode interactions\n output_file.writelines(\"average_particle_electrode_distance \" + str(composite_info[8])+ \"[nm]\"+ \"\\n\") # particle electrode average distance\n # output_file.writelines(\"volume_loading_dielectric_LDPE \" + str(composite_info[9])+ \"\\n\") # dielectric based off volume loading in LDPE\n # output_file.writelines(\"volume_loading_dielectric_epoxy \" + str(composite_info[10])) # dielectric based off volume loading in epoxy\n\n # close output file\n output_file.close()",
"def read_dump(dump_file):\n print(' Reading the .dump file for particle information')\n\n try:\n # Read the Simulation box dimensions\n with open(dump_file, 'r+') as fd:\n lookup = \"ITEM: NUMBER OF ATOMS\"\n lookup2 = \"ITEM: BOX BOUNDS ff ff ff\"\n for num, lines in enumerate(fd, 1):\n if lookup in lines:\n number_particles = int(next(fd))\n par_line_num = num + 7\n\n if lookup2 in lines:\n values = re.findall(r'\\S+', next(fd))\n RVE_min, RVE_max = list(map(float, values))\n\n except FileNotFoundError:\n print(' .dump file not found, make sure \"packingRoutine()\" function is executed first!')\n raise FileNotFoundError\n \n # Create an instance of simulation box\n sim_box = Cuboid(RVE_min, RVE_min, RVE_max, RVE_max, RVE_min, RVE_max)\n\n # Read the particle shape & position information\n # Create instances for ellipsoids & assign values from dump files\n Ellipsoids = []\n with open(dump_file, \"r\") as f:\n count = 0\n for num, lines in enumerate(f, 1):\n if num >= par_line_num:\n\n count += 1\n values = re.findall(r'\\S+', lines)\n int_values = list(map(float, values[1:]))\n values = [values[0]] + int_values\n\n iden = count # ellipsoid 'id' \n a, b, c = values[4], values[5], values[6] # Semi-major length, Semi-minor length 1 & 2\n x, y, z = values[1], values[2], values[3]\n qx, qy, qz, qw = values[7], values[8], values[9], values[10]\n quat = np.array([qw, qx, qy, qz]) \n ellipsoid = Ellipsoid(iden, x, y, z, a, b, c, quat) # instance of Ellipsoid class\n\n # Find the original particle if the current is duplicate\n for c in values[0]:\n if c == '_':\n split_str = values[0].split(\"_\")\n original_id = int(split_str[0])\n ellipsoid.duplicate = original_id\n break\n else:\n continue\n\n Ellipsoids.append(ellipsoid) \n\n return sim_box, Ellipsoids",
"def write_slices( self, particle_array, species_name, snapshot, p2i ):\n # Open the file without parallel I/O in this implementation\n f = self.open_file( snapshot.filename, parallel_open=False )\n particle_path = \"/data/%d/particles/%s\" %(snapshot.iteration,\n species_name)\n species_grp = f[particle_path]\n\n # Loop over the different quantities that should be written\n for particle_var in self.particle_data:\n\n if particle_var == \"position\":\n for coord in [\"x\",\"y\",\"z\"]:\n quantity= coord\n path = \"%s/%s\" %(particle_var, quantity)\n data = particle_array[ p2i[ quantity ] ]\n self.write_boosted_dataset(\n species_grp, path, data, quantity)\n\n elif particle_var == \"momentum\":\n for coord in [\"x\",\"y\",\"z\"]:\n quantity= \"u%s\" %coord\n path = \"%s/%s\" %(particle_var,coord)\n data = particle_array[ p2i[ quantity ] ]\n self.write_boosted_dataset(\n species_grp, path, data, quantity)\n\n elif particle_var == \"weighting\":\n quantity= \"w\"\n path = 'weighting'\n data = particle_array[ p2i[ quantity ] ]\n self.write_boosted_dataset(species_grp, path, data, quantity)\n\n # Close the file\n f.close()",
"async def containeradd(self, ctx, *, input_data: str):\n\t\ttry:\n\t\t\tname, data = input_data.split(';',1)\n\t\texcept IndexError:\n\t\t\tawait self.bot.say(\"Plz format as !container add name;data (data in JSON format)\")\n\t\t\treturn\n\t\ttry:\n\t\t\tself.containers[name] = json.loads(data)\n\t\texcept ValueError:\n\t\t\tawait self.bot.say(\"Error in reading the JSON format\")\n\t\t\treturn\n\t\tself.save_containers()\n\t\tawait self.bot.say(\"Data added\")",
"def write(s,filename,header=\"Opacity file written by optool.particle.write\"):\n\n if (s.np>1):\n raise TypeError('Writing is not supported for multi-particle objects')\n try:\n wfile = open(filename, 'w')\n except:\n raise RuntimeError('Cannot write to file: '+filename)\n\n headerlines = header.splitlines()\n for i in range(len(headerlines)):\n wfile.write(\"# %s\\n\" % headerlines[i])\n if s.scat:\n wfile.write(' 0\\n')\n wfile.write(' %d\\n' % s.nlam)\n wfile.write(' %d\\n' % s.nang)\n wfile.write('\\n')\n else:\n wfile.write(' 3\\n')\n wfile.write(' %d\\n' % s.nlam)\n \n for i in range(s.nlam):\n # write the lambda grid and the opacities\n wfile.write(' %15.5e %15.5e %15.5e %15.5e\\n' % (s.lam[i],s.kabs[0,i],s.ksca[0,i],s.gsca[0,i]))\n \n if s.scat:\n # we have a scattering matrix\n wfile.write('\\n')\n # Write the angular grid\n for i in range(s.nang):\n wfile.write(\"%9.2f\\n\" % s.scatang[i])\n wfile.write('\\n')\n # Write the scattering matrix\n for il in range(s.nlam):\n for ia in range(s.nang):\n wfile.write(' %15.5e %15.5e %15.5e %15.5e %15.5e %15.5e\\n' %\n (s.f11[0,il,ia],s.f12[0,il,ia],s.f22[0,il,ia],\n s.f33[0,il,ia],s.f34[0,il,ia],s.f44[0,il,ia]))\n wfile.close()",
"def _add_config_file_to_container(\n self, container: model.Container, container_path: str,\n config: dict) -> None:\n logger.debug(\n \"Adding following config under '%s' in container: %s\",\n container_path, config)\n container.push(\n container_path,\n json.dumps(config),\n make_dirs=True)\n logger.info(\n \"Successfully wrote config file in container under '%s'\",\n container_path)",
"def publish_particle_viz(self):\n self.particle_pub.publish(\n PoseArray(\n header=Header(\n stamp=rospy.Time.now(),\n frame_id=self.map_frame),\n poses=[\n p.as_pose() for p in self.particle_cloud]))\n\n if self.debug:\n print(\"Publishing new visualization.\")",
"def addDataContainer(self, datacontainer):\n \n if isinstance(datacontainer, DataHandling.DataContainer.DataContainer):\n self._datacontainer.append(datacontainer)\n \n if self.show_window:\n self.view.addFileToFilelist(datacontainer)",
"def add_container(self, container):\n self.__container_list.append(container)",
"def add_object(file_path):\n # file_path=\"/Users/alihasson/Documents/UIUC/CS445/Sythetic-Data-Generation/\" + file_path.split('/')[-1].split('.')[0]\n \n inner_path = \"Object\"\n object_name = file_path.split('/')[-1].split('.')[0]\n\n bpy.ops.wm.append(\n filepath=os.path.join(file_path, inner_path, object_name),\n directory=os.path.join(file_path, inner_path),\n filename=object_name\n )\n\n previous_context = bpy.context.area.type\n bpy.context.area.type = 'VIEW_3D'\n\n bpy.ops.view3d.snap_cursor_to_center() \n bpy.data.objects[object_name].select_set(True)\n bpy.ops.view3d.snap_selected_to_cursor(use_offset=False)\n\n bpy.context.area.type = previous_context\n\n return object_name",
"def create_file_empty_slice( self, fullpath, iteration, time, dt ):\n # Create the file\n f = self.open_file( fullpath )\n\n # Setup the different layers of the openPMD file\n # (f is None if this processor does not participate is writing data)\n if f is not None:\n\n # Setup the attributes of the top level of the file\n self.setup_openpmd_file( f, iteration, time, dt )\n # Setup the meshes group (contains all the particles)\n particle_path = \"/data/%d/particles/\" %iteration\n\n for species_name in self.species_names_list:\n species = self.species_dict[species_name]\n species_path = particle_path+\"%s/\" %(species_name)\n # Create and setup the h5py.Group species_grp\n species_grp = f.require_group( species_path )\n self.setup_openpmd_species_group( species_grp, species,\n self.constant_quantities_dict[species_name])\n\n # Loop over the different quantities that should be written\n # and setup the corresponding datasets\n for quantity in self.array_quantities_dict[species_name]:\n\n if quantity in [\"x\", \"y\", \"z\"]:\n quantity_path = \"position/%s\" %(quantity)\n dset = species_grp.require_dataset(\n quantity_path, (0,),\n maxshape=(None,), dtype='f8')\n self.setup_openpmd_species_component( dset, quantity )\n\n elif quantity in [\"ux\", \"uy\", \"uz\"]:\n quantity_path = \"momentum/%s\" %(quantity[-1])\n dset = species_grp.require_dataset(\n quantity_path, (0,),\n maxshape=(None,), dtype='f8')\n self.setup_openpmd_species_component( dset, quantity )\n\n elif quantity in [\"w\", \"id\", \"charge\"]:\n if quantity == \"w\":\n particle_var = \"weighting\"\n else:\n particle_var = quantity\n if quantity == \"id\":\n dtype = 'uint64'\n else:\n dtype = 'f8'\n dset = species_grp.require_dataset(\n particle_var, (0,), maxshape=(None,), dtype=dtype )\n self.setup_openpmd_species_component( dset, quantity )\n self.setup_openpmd_species_record(\n species_grp[particle_var], particle_var )\n\n else :\n raise ValueError(\n \"Invalid quantity for particle output: %s\"\n %(quantity) )\n\n # Setup the hdf5 groups for \"position\" and \"momentum\"\n if self.rank == 0:\n if \"x\" in self.array_quantities_dict[species_name]:\n self.setup_openpmd_species_record(\n species_grp[\"position\"], \"position\" )\n if \"ux\" in self.array_quantities_dict[species_name]:\n self.setup_openpmd_species_record(\n species_grp[\"momentum\"], \"momentum\" )\n\n # Close the file\n f.close()",
"def add_single_facet_from_pickle(self, filename):\r\n self.sides.append(s3d.FlatFace.from_pickle(filename))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add lattice to the file.
|
def _add_lattice(self, lattice, cuba_keys):
name = lattice.name
lattice_root = self._root.lattice
group = tables.Group(lattice_root, name=name, new=True)
h5_lattice = H5Lattice.create_new(
group, lattice.primitive_cell, lattice.size, lattice.origin)
h5_lattice.data = lattice.data
if cuba_keys is not None:
for item in lattice.iter(item_type=CUBA.NODE):
item.data = DataContainer(
{key: item.data[key] for key in item.data
if key in cuba_keys[CUBA.NODE]})
h5_lattice.update([item])
else:
h5_lattice.update(lattice.iter(item_type=CUBA.NODE))
|
[
"def write_lat_file(self):\n\n # If the lattice file exists, remove it and start over\n if os.path.isfile(self.filename):\n os.remove(self.filename)\n\n lat = open(self.filename, 'w')\n\n header = '? VERSION = 1.0\\n'\n header += '? UNITLENGTH = ' + str(self.unit_length) + '\\n'\n lat.write(header)\n\n quad_label = '#\\n'\n quad_label += '# Quads:\\n'\n quad_label += '# QF dB/dx L space\\n'\n quad_label += '#--------------------------------------\\n'\n lat.write(quad_label)\n\n # Start with quads\n for quad_array in self.elems_dict['QF']:\n quadline = 'QF '\n quadline += str(quad_array[0]) + ' '\n quadline += str(quad_array[1]) + ' '\n quadline += str(quad_array[2]) + ' \\n'\n lat.write(quadline)\n\n und_label = '#\\n'\n und_label += '# Undulators:\\n'\n und_label += '# AW AW0 L space\\n'\n und_label += '#--------------------------------------\\n'\n lat.write(und_label)\n\n # Add undulators\n for und_array in self.elems_dict['AW']:\n undline = 'AW '\n undline += str(und_array[0]) + ' '\n undline += str(und_array[1]) + ' '\n undline += str(und_array[2]) + ' \\n'\n lat.write(undline)\n\n lat.close()",
"def save_lattice(lattice, filename):\n np.save(filename, lattice)\n print (\"SOM lattice saved at %s\" %filename)",
"def convert_lattice(file_in, file_out):\n open_fn = gzip.open if file_in.endswith('.gz') else open\n with open_fn(file_in, 'rt') as lattice, open(file_out, 'w') as dot:\n dot.write(\n \"digraph lattice {\\n\" \\\n \"\\trankdir=LR;\\n\" \\\n \"\\tnode [shape = ellipse; fontname = courier];\\n\" \\\n \"\\tedge [fontname = courier];\\n\\n\")\n while True:\n line = lattice.readline()\n if line.startswith('N='):\n break\n first_line = line.split()\n nodes, links = [int(i.split('=')[1]) for i in first_line]\n for _ in range(nodes):\n next_line = lattice.readline().split()\n content = tuple(i.split('=')[1] for i in next_line[0:3])\n dot.write(\"\\t%s [label = \\\"id=%s\\\\nt=%s\\\\nW=%s\\\"];\\n\" % (\n content[0], content[0], content[1], content[2]))\n dot.write(\"\\n\")\n for _ in range(links):\n next_line = lattice.readline().split()\n content = tuple(i.split('=')[1] for i in next_line[0:5])\n if next_line[5].startswith('n='):\n dot.write(\n \"\\t%s -> %s [label = \\\"id=%s\\\\na=%s\\\\nl=%s\\\\nn=%s\\\"];\\n\" % (\n content[1], content[2], content[0], content[3],\n content[4], next_line[5].split('=')[1]))\n else:\n dot.write(\"\\t%s -> %s [label = \\\"id=%s\\\\na=%s\\\\nl=%s\\\"];\\n\" % (\n content[1], content[2], content[0], content[3], content[4]))\n dot.write(\"}\")",
"def createFileFooter(self):\n import_file_desc_h = open('xml_footer.txt', 'r')\n readlines = import_file_desc_h.read()\n self.fileDesXmlData.write(readlines)\n import_file_desc_h.close()",
"def add_nodes(self):\n\t\twith open(self.fname, 'a') as f:\n\t\t\tf.write(\"\\n%%%%%%%%%% ADDING NODES %%%%%%%%%%%%%\\n\\n\")\n\t\t\ti = 0\n\t\t\tfor v in self.G.nodes:\n\t\t\t\tf.write('\\t\\\\Vertex[x={}, y={}]{{{}}}\\n'.format(round(self.factor*v.x, 3), round(self.factor*v.y, 3), i))\n\t\t\t\t\n\t\t\t\tself.vtoid[v] = i\t\t\t\t\n\t\t\t\t\n\t\t\t\ti += 1",
"def load_lattice(filename):\n lattice = np.load(filename)\n print (\"SOM lattice loaded from %s\" %filename)\n return lattice",
"def generate_lattice(self, verbose=False):\n if not self._lattice:\n lat = StrictOrders().get_orders(xrange(1, self.set_n + 1), verbose)\n self._lattice = lat",
"def writeIMPACT(filename,beam,lattice=[]):\n beamStrList=beam2str(beam) \n latticeStrList=lattice2str(lattice)\n \n \n f=open(filename,'w') \n f.writelines(beamStrList)\n f.writelines(latticeStrList)\n f.close()",
"def addLatticeNodes(self,nodes, lattice = None):\n\t\tself.lattice = lattice\n\t\tself.nodes += nodes\n\t\tfor node in self.nodes:\n\t\t\terrCntrl = self._getInstanceOfErrorController()\n\t\t\terrCntrl.setName(\"ErrCntrl:\" + errCntrl.getShortTypeName() + \":\" + node.getName())\n\t\t\terrCntrl.setLattice(lattice)\n\t\t\terrCntrl.setOneNodeParent(node)\n\t\t\tself.error_controllers.append(errCntrl)\n\t\t\tself.node_to_cntrl_dict[node] = errCntrl\n\t\tself.updateErrorParameters()",
"def create_slf_file(self):\n mesh = open(self.name, 'w') \n mesh.write('numel numnp nmat nmode (This is for a beam bridge)\\n')\n mesh.write(str(len(self.edge_list))+'\\t'+str(len(self.node_list))\n + '\\t'+str(len(self.beams)) + '\\t0\\n')\n mesh.write('matl no., E mod, Poiss. Ratio,density, Area, Iy, Iz\\n')\n tables = open('./tables/CHSTables.txt', 'r')\n for i,beam in enumerate(self.beams):\n mesh.write(str(i)+' '+str(self.beams[i]['emod'])+'\\t0.3000\\t'\n + str(self.beams[i]['density'])+'\\t'+str(self.beams[i]['area'])\n + '\\t'+str(self.beams[i]['iy'])+'\\t'+str(self.beams[i]['ix']) + '\\n') \n mesh.write('el no.,connectivity, matl no, element type\\n')\n for i, edge in enumerate(self.edge_list): \n mesh.write(str(i)+'\\t'+str(edge['pt_a'])+'\\t'+str(edge['pt_b'])\n + '\\t'+str(edge['material'])+'\\t2 \\n')\n mesh.write('node no., coordinates\\n')\n for node in self.node_list:\n mesh.write(node['id']+'\\t'+str(node['x'])+'\\t'+str(node['y'])+'\\t'+str(node['z'])+\"\\n\")\n mesh.write(\"element with specified local z axis: x, y, z component\\n -10\\n\")\n mesh.write('prescribed displacement x: node disp value\\n')\n for node in self.fixed_list:\n# if node[1] == True: # un-comment when dealing with fixed-roller structures\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed displacement y: node disp value\\n')\n for node in self.fixed_list:\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed displacement z: node disp value\\n')\n for node in self.fixed_list:\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed angle phi x: node angle value\\n')\n for node in self.fixed_list:\n# if node[1] == True: # un-comment when dealing with fixed-roller structures\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed angle phi y: node angle value\\n')\n for node in self.fixed_list:\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed angle phi z: node angle value\\n')\n for node in self.fixed_list:\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nnode with point load x, y, z and 3 moments phi x, phi y, phi z\\n') \n if self.BROKEN:\n for node in self.nodeselfloads: \n trans = 0\n broken_long = 0\n for thing in self.load_nodes:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_cable_load\n trans = self.transverse_cable_load \n if self.GROUND_BROKEN:\n for thing in self.ground_node:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_ground_load_broken\n trans = self.transverse_ground_load\n broken_long = self.longitudinal_ground_load\n for thing in self.break_node:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_cable_load_broken\n broken_long = self.longitudinal_cable_load\n trans = self.transverse_cable_load\n else:\n for thing in self.ground_node:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_ground_load\n trans = self.transverse_ground_load\n for thing in self.break_node:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_cable_load_broken\n broken_long = self.longitudinal_cable_load \n trans = self.transverse_cable_load\n mesh.write(str(node[0])+'\\t'+str(broken_long)+'\\t'+str(trans)+'\\t-'+str(round(node[1],5))+'\\t0\\t0\\t0\\n')\n else:\n for node in self.nodeselfloads: \n trans = 0\n for yolk in self.load_nodes:\n if yolk == node[0]:\n node[1] = node[1] + self.vertical_cable_load\n trans = self.transverse_cable_load\n for thong in self.ground_node:\n if thong == node[0]:\n node[1] = node[1] + self.vertical_ground_load\n trans = self.transverse_ground_load\n mesh.write(str(node[0])+'\\t0\\t'+str(trans)+'\\t-'+str(round(node[1],5))+'\\t0\\t0\\t0\\n')\n mesh.write('-10\\nelement with distributed load in global beam y and z coordinates\\n') \n mesh.write('-10\\nelement no. and gauss pt. no. with local stress vector xx and moment xx,yy,zz\\n-10')\n mesh.close()",
"def write_to_pickle(self, lat_dir):\n filename = os.path.join(lat_dir, 'gspace_%dcons.p' % self.set_n)\n with open(filename, 'wb') as f:\n cPickle.dump(self._lattice, f)",
"def addFile(fig, canvas):\n data = readData()\n file_path = tkinter.filedialog.askopenfilename()\n if not file_path:\n pass\n else:\n data.append((file_path, openChrom(file_path)))\n fig.clear()\n axes = fig.add_subplot(111)\n for i in data:\n x_array, y_array = list(zip(*i[1]))\n axes.plot(x_array, y_array, label=str(os.path.split(i[0])[-1]))\n axes.legend()\n canvas.draw()",
"def __init__(self, lattice_file):\n\n super().__init__()\n\n # No log conversion by default. \"None\" means the lattice file uses\n # linear probabilities.\n self._log_scale = logprob_type(1.0)\n\n self._initial_node_id = None\n self._final_node_ids = []\n\n if lattice_file is None:\n self._num_nodes = 0\n self._num_links = 0\n return\n\n self._num_nodes = None\n self._num_links = None\n for line in lattice_file:\n fields = _split_slf_line(line)\n self._read_slf_header(fields)\n if (self._num_nodes is not None) and (self._num_links is not None):\n break\n if self._num_nodes is None or self._num_links is None:\n raise InputError(\"SLF lattice does not specify the number of nodes \"\n \"and the number of links.\")\n\n if self.wi_penalty is not None:\n if self._log_scale is None:\n self.wi_penalty = numpy.log(self.wi_penalty)\n else:\n self.wi_penalty *= self._log_scale\n\n self.nodes = [self.Node(node_id) for node_id in range(self._num_nodes)]\n\n for line in lattice_file:\n fields = _split_slf_line(line)\n if not fields:\n continue\n name, value = _split_slf_field(fields[0])\n if name == 'I':\n self._read_slf_node(int(value), fields[1:])\n elif name == 'J':\n self._read_slf_link(int(value), fields[1:])\n\n if len(self.links) != self._num_links:\n raise InputError(\"Number of links in SLF lattice doesn't match the \"\n \"LINKS field.\")\n\n if self._initial_node_id is not None:\n self.initial_node = self.nodes[self._initial_node_id]\n else:\n # Find the node with no incoming links.\n self.initial_node = None\n for node in self.nodes:\n if len(node.in_links) == 0:\n self.initial_node = node\n break\n if self.initial_node is None:\n raise InputError(\"Could not find initial node in SLF lattice.\")\n\n final_nodes_found = 0\n for node in self.nodes:\n if node.id in self._final_node_ids or len(node.out_links) == 0:\n node.final = True\n final_nodes_found += 1\n\n if final_nodes_found == 0:\n raise InputError(\"Could not find final node in SLF lattice.\")\n elif final_nodes_found > 1:\n # Peter: Not sure if multiple final nodes are allowed, but for now raise an input error. The\n # decoder supports multiple final nodes no problem\n raise InputError(\"More then one final node in SLF lattice.\")\n\n # If word identity information is not present in node definitions then\n # it must appear in link definitions.\n self._move_words_to_links()",
"def generate_lab(self):\n\n with open(self.fichier, \"r\") as fichier:\n x = 0\n for row in fichier:\n row_lab = []\n y = 0\n for lettre in row:\n if lettre != '\\n':\n row_lab.append(lettre)\n if lettre == 'm':\n self.l_wall.append((x*20, y*20, 20, 20))\n if lettre == 'x':\n self.l_none.append((x*20, y*20, 20, 20))\n y += 1\n x += 1\n self.config.append(row_lab)",
"def write(self):\n \n hdulist = fits.HDUList()\n\n level0 = self.get_level0()\n hdulist.append(level0)\n \n level1 = self.get_level1()\n hdulist.append(level1)\n \n level2 = self.get_level2()\n hdulist.append(level2)\n \n level3 = self.get_level3()\n hdulist.append(level3)\n \n level4 = self.get_level4()\n hdulist.append(level4)\n \n hdulist.writeto(self.metadata_file,clobber=True)\n print('Output metadata to '+self.metadata_file)",
"def add_edges(self):\n\t\twith open(self.fname, 'a') as f:\n\t\t\tf.write(\"%%%%%%%%%% ADDING EDGES %%%%%%%%%%%%%\\n\\n\")\n\t\t\tfor v in self.G.nodes:\t\t\t\n\t\t\t\tfor w in self.G.nodes:\n\t\t\t\t\tif (v, w) in self.G.edges:\n\t\t\t\t\t\tf.write('\\t\\\\Edge({})({})\\n'.format(self.vtoid[v], self.vtoid[w]))",
"def addBodyFromFile(self, name):\n with open(\"lesSolarsysteminfo.txt\", \"r\") as infile:\n data = infile.readlines()\n k = 0\n for i in data:\n i = i.strip()\n if i == name:\n k = 1\n elif k == 1:\n mass = float(i)/2e30\n k = 2\n elif k == 2:\n pos0 = i.split(',')\n pos0[0] = float(pos0[0])\n pos0[1] = float(pos0[1])\n pos0[2] = float(pos0[2])\n pos0 = np.asarray(pos0)\n k = 3\n elif k == 3:\n vel0 = i.split(',')\n vel0[0] = float(vel0[0])\n vel0[1] = float(vel0[1])\n vel0[2] = float(vel0[2])\n vel0 = np.asarray(vel0)\n break\n \n if k == 0:\n print(\"Couldn't find \" + name + \" in the file. Make sure you wrote the name correctly and that it's capitalised\")\n sys.exit()\n else:\n self.planets.append(celestialBodies(name, vel0, pos0, mass))",
"def write(self):\n\n # Write file lines according to gaussian requirements\n with open(self.filepath, 'w') as file:\n # file.write('%Chk={}checkpoint.com\\n'.format(utils.sanitize_path(os.path.dirname(self.filepath),\n # add_slash=True)))\n file.write(self.calculation.get_calc_line() + '\\n\\n')\n file.write(self.molecule_name + '\\n\\n')\n file.write(self.multiplicity + '\\n')\n file.write(''.join(line for line in self.mol_coords))\n file.write('\\n\\n')",
"def write_grid(filename, sites):\n \n with open(filename, 'w') as fp:\n fp.write(str(len(sites)))\n fp.write('\\n')\n np.savetxt(fp, sites, fmt=\"%d\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get lattice from file. The returned lattice can be used to query and change the related data stored in the file. If the file has been closed then the lattice should no longer be used.
|
def _get_lattice(self, name):
group = self._root.lattice._f_get_child(name)
return H5Lattice(group)
|
[
"def load_lattice(filename):\n lattice = np.load(filename)\n print (\"SOM lattice loaded from %s\" %filename)\n return lattice",
"def __init__(self, lattice_file):\n\n super().__init__()\n\n # No log conversion by default. \"None\" means the lattice file uses\n # linear probabilities.\n self._log_scale = logprob_type(1.0)\n\n self._initial_node_id = None\n self._final_node_ids = []\n\n if lattice_file is None:\n self._num_nodes = 0\n self._num_links = 0\n return\n\n self._num_nodes = None\n self._num_links = None\n for line in lattice_file:\n fields = _split_slf_line(line)\n self._read_slf_header(fields)\n if (self._num_nodes is not None) and (self._num_links is not None):\n break\n if self._num_nodes is None or self._num_links is None:\n raise InputError(\"SLF lattice does not specify the number of nodes \"\n \"and the number of links.\")\n\n if self.wi_penalty is not None:\n if self._log_scale is None:\n self.wi_penalty = numpy.log(self.wi_penalty)\n else:\n self.wi_penalty *= self._log_scale\n\n self.nodes = [self.Node(node_id) for node_id in range(self._num_nodes)]\n\n for line in lattice_file:\n fields = _split_slf_line(line)\n if not fields:\n continue\n name, value = _split_slf_field(fields[0])\n if name == 'I':\n self._read_slf_node(int(value), fields[1:])\n elif name == 'J':\n self._read_slf_link(int(value), fields[1:])\n\n if len(self.links) != self._num_links:\n raise InputError(\"Number of links in SLF lattice doesn't match the \"\n \"LINKS field.\")\n\n if self._initial_node_id is not None:\n self.initial_node = self.nodes[self._initial_node_id]\n else:\n # Find the node with no incoming links.\n self.initial_node = None\n for node in self.nodes:\n if len(node.in_links) == 0:\n self.initial_node = node\n break\n if self.initial_node is None:\n raise InputError(\"Could not find initial node in SLF lattice.\")\n\n final_nodes_found = 0\n for node in self.nodes:\n if node.id in self._final_node_ids or len(node.out_links) == 0:\n node.final = True\n final_nodes_found += 1\n\n if final_nodes_found == 0:\n raise InputError(\"Could not find final node in SLF lattice.\")\n elif final_nodes_found > 1:\n # Peter: Not sure if multiple final nodes are allowed, but for now raise an input error. The\n # decoder supports multiple final nodes no problem\n raise InputError(\"More then one final node in SLF lattice.\")\n\n # If word identity information is not present in node definitions then\n # it must appear in link definitions.\n self._move_words_to_links()",
"def convert_lattice(file_in, file_out):\n open_fn = gzip.open if file_in.endswith('.gz') else open\n with open_fn(file_in, 'rt') as lattice, open(file_out, 'w') as dot:\n dot.write(\n \"digraph lattice {\\n\" \\\n \"\\trankdir=LR;\\n\" \\\n \"\\tnode [shape = ellipse; fontname = courier];\\n\" \\\n \"\\tedge [fontname = courier];\\n\\n\")\n while True:\n line = lattice.readline()\n if line.startswith('N='):\n break\n first_line = line.split()\n nodes, links = [int(i.split('=')[1]) for i in first_line]\n for _ in range(nodes):\n next_line = lattice.readline().split()\n content = tuple(i.split('=')[1] for i in next_line[0:3])\n dot.write(\"\\t%s [label = \\\"id=%s\\\\nt=%s\\\\nW=%s\\\"];\\n\" % (\n content[0], content[0], content[1], content[2]))\n dot.write(\"\\n\")\n for _ in range(links):\n next_line = lattice.readline().split()\n content = tuple(i.split('=')[1] for i in next_line[0:5])\n if next_line[5].startswith('n='):\n dot.write(\n \"\\t%s -> %s [label = \\\"id=%s\\\\na=%s\\\\nl=%s\\\\nn=%s\\\"];\\n\" % (\n content[1], content[2], content[0], content[3],\n content[4], next_line[5].split('=')[1]))\n else:\n dot.write(\"\\t%s -> %s [label = \\\"id=%s\\\\na=%s\\\\nl=%s\\\"];\\n\" % (\n content[1], content[2], content[0], content[3], content[4]))\n dot.write(\"}\")",
"def get_lattice(self):\n return self._lattice",
"def read_lanc(path: str) -> admix.data.Lanc:\n lanc = admix.data.Lanc(path)\n return lanc",
"def open_file(file):\n elevation_small_file = open(file, 'r')\n elevation = elevation_small_file.readlines()\n return elevation",
"def getLattice() :\n lattice = [getElem('loop'),getElem('quad'),getElem('drift'),getElem('quad'),getElem('drift')]\n lattice[3].Kx = -lattice[3].Kx\n return lattice",
"def read_data( filename ):\n\n # read first word at first line\n with open( filename, 'r' ) as f:\n lattice = f.readline().split()[0] \n\n\n # read volumen and energy results \n data = np.loadtxt(filename, skiprows=1) \n\n return lattice, factor[lattice]*data[:,0]**3, data[:,1]",
"def loaddata(self, f):\n\n if isinstance(f, str):\n try:\n fin = open(f)\n except IOError as e:\n raise LUTError(\"IO error on \" + f + \" - \", e.args[1])\n try:\n return self._loaddata(fin)\n except LUTError:\n raise\n finally:\n fin.close()\n else:\n return self._loaddata(f)",
"def load_data_from_file(file):\n \n setup_database()\n # Parse the file creating objects for loading database.\n LotoDraw.objects.all().delete()\n ldp = LotoDrawParser()\n ldp.draw_from_file(file)",
"def generate_lattice(self, verbose=False):\n if not self._lattice:\n lat = StrictOrders().get_orders(xrange(1, self.set_n + 1), verbose)\n self._lattice = lat",
"def read(filename):\n mesh = meshio.read(filename)\n\n # make sure to include the used nodes only\n if \"tetra\" in mesh.cells:\n points, cells = _sanitize(mesh.points, mesh.cells[\"tetra\"])\n return MeshTetra(points, cells)\n elif \"triangle\" in mesh.cells:\n points, cells = _sanitize(mesh.points, mesh.cells[\"triangle\"])\n return MeshTri(points, cells)\n\n raise RuntimeError(\"Illegal mesh type.\")\n return",
"def load_triplets_from_file(cls, path: str) -> Triplets:\n with open(path) as graph_file:\n file_content = graph_file.readlines()\n triplets = list(map(lambda x: x.strip().split('\\t'), file_content))\n source, relation, destination = list(zip(*triplets))\n\n return list(source), list(relation), list(destination)",
"def load_dihedral_table(self, picklefile='cnot_dihedral_2.pickle'):\n with open(picklefile, \"rb\") as pf:\n pickletable = pickle.load(pf)\n pf.close()\n return pickletable",
"def open_sol(file):\n\tS = pickle.load(open(file, \"rb\"))\n\treturn S",
"def load_raster(cls, file):\n gxapi_cy.WrapARCMAP._load_raster(GXContext._get_tls_geo(), file.encode())",
"def load(file_path):\r\n\r\n with h5py.File(file_path, 'r') as f:\r\n cell_list = [_load_cell(f[key]) for key in f.keys()]\r\n\r\n if len(cell_list) == 1:\r\n return cell_list[0]\r\n else:\r\n return CellList(cell_list)",
"def loadTriplet(path=\"data/freebase_mtr100_mte100-train.txt\"):\n triplet = []\n with open(path, 'r') as f:\n for line in f.readlines():\n h, l, t = line.strip().split(\"\\t\")\n triplet.append((h,l,t))\n return triplet",
"def getClData(listfile, verbose=True, no_pol=False):\n \n data = []\n for dataset in file(listfile):\n dataset=dataset.strip()\n if len(dataset) and dataset[0] != \"#\":\n if verbose: print(\"Getting %s\" % (dataset))\n set = ClData(dataset)\n if no_pol:\n set.has_pol_really = set.has_pol\n set.has_pol=False # explicitly ignore polarization\n if verbose: print(\" got %s\" % (set.name))\n data.append(set)\n return data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Delete lattice from file.
|
def _remove_lattice(self, name):
node = self._root.lattice._f_get_child(name)
node._f_remove(recursive=True)
|
[
"def _ClearTriageLinkFile(self) -> None:\n open(self._triage_link_file, 'w').close()",
"def remove(self, file):\n pass",
"def delete(self):\n self.gridfs.delete(self.file_id)",
"def delete_file(self):\n os.remove(self.full_path())\n self.size = 0",
"def delete(task_file):\n\t\n\tos.remove(task_file)",
"def delete(file):\n\tif exists(file):\n\t\tos.unlink(file)",
"def del_datafile():\n global data_file\n try:\n os.remove(data_file)\n except FileNotFoundError:\n pr.print_warning(\"File not created.... Use socli -u to create a new configuration file.\")\n exit(0)",
"def deleteFeaturesTxt(self):\n file_path = os.path.dirname(os.path.realpath(__file__))\n if os.path.exists(file_path + '\\\\static\\\\features.txt'):\n os.remove(file_path + '\\\\static\\\\features.txt')\n self.goToEndWindow()",
"def remove_measurements_file_for_test(self):\n os.remove(self.measurements_file_name)",
"def delete(file=None):\n if file:\n os.remove(file)",
"def deleteDataset(filename, group, dataset):\n\n FILE = h5py.File(filename, \"r+\")\n\n GROUP = FILE[group]\n\n try:\n del GROUP[dataset]\n print(\"[DELETE]: <{:s}> dataset in <{:s}> group deleted.\".format(dataset, group))\n except:\n pass\n\n FILE.close()",
"def delete(problem, problemset, filename=\"\"):\n path = TANGO_COURSELAB_DIR + TANGO_KEY + \\\n \"-\" + _get_courselab(problem, problemset)\n if filename == \"\":\n shutil.rmtree(path)\n else:\n path += \"/\" + filename\n os.remove(path)",
"def delete_data(file_name=\"data_log.csv\"):\n if os.path.exists(file_name):\n os.remove(file_name)",
"def DeleteNetwork (project_dir, file_name, grp):\n i = ReturnN_Networks(project_dir, file_name)\n if (int(grp) < i+1):\n file = hdf.File(project_dir + file_name, 'r+')\n del file[grp]\n file.close()",
"def _clear_tail_file(self):\n tnum, _, _ = self.info['tail']\n while tnum >= 1:\n tnum -= 1\n path = self._qfile(tnum)\n if os.path.exists(path):\n os.remove(path)\n else:\n break",
"def delete_local_file(path_to_file):\n os.remove(path_to_file)\n return",
"def close_file(self):\n self.hdf.close()",
"def deleteFile(fileName):\n os.remove(calibrationFilesRoot+fileName+\".calib.txt\")",
"def del_file(self, path: str) -> None:\n cmd = b''.join([\n ev3.DELETE_FILE,\n str.encode(path) + b'\\x00' # NAME\n ])\n self.send_system_cmd(cmd)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns an iterator over a subset or all of the particle containers.
|
def _iter_particles(self, names=None):
if names is None:
for node in self._root.particle._f_iter_nodes():
yield self._get_particles(node._v_name)
else:
for name in names:
if name in self._get_child_names(self._root.particle):
yield self._get_particles(name)
|
[
"def _iter_particles(self, ids=None):\n if ids is None:\n return iter(self._particles)\n else:\n return self._particles.itersequence(ids)",
"def particles(self, selection_func=None):\n if selection_func is None:\n return self.particles_\n else:\n return filter(selection_func, self.particles_)",
"def iter_particles(self, ids=None):\n if ids is None:\n for row in self._group.particles:\n yield Particle(\n id=row['id'], coordinates=tuple(row['coordinates']))\n else:\n # FIXME: we might want to use an indexed query for these cases.\n for particle_id in ids:\n yield self.get_particle(particle_id)",
"def iter_elements(self) -> Iterator[SchemaElementType]:\n if self.max_occurs == 0:\n return\n\n iterators: List[Iterator[ModelParticleType]] = []\n particles = iter(self)\n\n while True:\n for item in particles:\n if isinstance(item, XsdGroup):\n iterators.append(particles)\n particles = iter(item)\n if len(iterators) > limits.MAX_MODEL_DEPTH:\n raise XMLSchemaModelDepthError(self)\n break\n else:\n yield item\n else:\n try:\n particles = iterators.pop()\n except IndexError:\n return",
"def iterate_container_objects(self, container):\r\n\r\n return self._get_objects(container)",
"def _add_particles(self, iterable):\n uids = []\n for particle in iterable:\n uids.append(self._add_particle(particle))\n return uids",
"def __iter__(self) -> Iterator[_SetElementT]:\n return iter(self._elements)",
"def iter_elements(self, condition):\n for elem in self.iter():\n if condition(elem):\n yield elem",
"def __iter__(self):\n return self.subset_loader.__iter__()",
"def iter_model(self) -> Iterator[ModelParticleType]:\n iterators: List[Iterator[ModelParticleType]] = []\n particles = iter(self)\n\n while True:\n for item in particles:\n if isinstance(item, XsdGroup) and item.is_pointless(parent=self):\n iterators.append(particles)\n particles = iter(item)\n if len(iterators) > limits.MAX_MODEL_DEPTH:\n raise XMLSchemaModelDepthError(self)\n break\n else:\n yield item\n else:\n try:\n particles = iterators.pop()\n except IndexError:\n return",
"def iter(self) -> Iterator[Sequence]:\n ...",
"def iterator(self) -> \"swig::SwigPyIterator *\":\n return _itkImagePython.vectoritkImageCF3_iterator(self)",
"def children(self) -> Iterator['Type']:\n raise NotImplementedError",
"def __iter__( self ): \n return _SetIterator( self._theElements )",
"def iterator(self, **kwargs):\n cls_name = '{0}Collection'.format(self._cls.__name__)\n collection_cls = type(str(cls_name), (ElementCollection,), {})\n\n params = {'filter_context': self._cls.typeof}\n params.update(kwargs)\n return collection_cls(**params)",
"def iterator(self, *args, **kwargs):\n return _decomp.component_set_iterator(self, *args, **kwargs)",
"def iterator(self) -> \"swig::SwigPyIterator *\":\n return _itkImagePython.vectoritkImageSS3_iterator(self)",
"def elements(self):\n\t\treturn iter(self._elements)",
"def iterate(self, evtype, evsrc):\n for d in self._sub.iterate(evtype):\n for v in d.iterate(evsrc):\n yield v\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns an iterator over a subset or all of the meshes.
|
def _iter_meshes(self, names=None):
if names is None:
for mesh_node in self._root.mesh._f_iter_nodes():
yield self._get_mesh(mesh_node._v_name)
else:
for name in names:
if name in self._get_child_names(self._root.mesh):
yield self._get_mesh(name)
|
[
"def __iter__(self):\n return self.subset_loader.__iter__()",
"def __iter__(self):\r\n for shape in self.__shapes:\r\n yield shape",
"def iter(root=None, **kwargs):\n # type: (om2.MObject, Dict) -> Generator[om2.MObject]\n return idag(root, filter_type=om2.MFn.kMesh, **kwargs)",
"def __iter__(self) -> Iterator[_SetElementT]:\n return iter(self._elements)",
"def __iter__(self):\n # get info on current worker process\n worker_info = torch.utils.data.get_worker_info()\n\n if worker_info is None:\n # single-process data loading, return the whole set of files\n return _get_waymo_iterator(self.file_paths, self.dataloader_config,\n self.scenario_config)\n\n # distribute a unique set of file paths to each worker process\n worker_file_paths = np.array_split(\n self.file_paths, worker_info.num_workers)[worker_info.id]\n return _get_waymo_iterator(list(worker_file_paths),\n self.dataloader_config,\n self.scenario_config)",
"def __iter__(self):\n for coreg in self.pipeline:\n yield coreg",
"def iterator(self, *args, **kwargs):\n return _decomp.component_set_iterator(self, *args, **kwargs)",
"def face_iterator(self, increasing=True):\n Fs = self.faces()\n dim_index = range(-1, self.dimension() + 1)\n if not increasing:\n dim_index = reversed(dim_index)\n for i in dim_index:\n for F in Fs[i]:\n yield F",
"def _select_meshes(meshes):\r\n\r\n for mesh in meshes:\r\n mesh.select = True",
"def __iter__( self ): \n return _SetIterator( self._theElements )",
"def __iter__(self):\n return self._all_shas()",
"def acceptsGeometryIterator(*args, **kwargs):\n \n pass",
"def iter(self) -> Iterator[Sequence]:\n ...",
"def subset(cube, bbox):\n if (cube.coord(axis='X').ndim == 1 and cube.coord(axis='Y').ndim == 1):\n # Workaround `cube.intersection` hanging up on FVCOM models.\n title = cube.attributes.get('title', 'untitled')\n featureType = cube.attributes.get('featureType', None)\n if (('FVCOM' in title) or ('ESTOFS' in title) or\n featureType == 'timeSeries'):\n cube = bbox_extract_1Dcoords(cube, bbox)\n else:\n cube = cube.intersection(longitude=(bbox[0], bbox[2]),\n latitude=(bbox[1], bbox[3]))\n elif (cube.coord(axis='X').ndim == 2 and\n cube.coord(axis='Y').ndim == 2):\n cube = bbox_extract_2Dcoords(cube, bbox)\n else:\n msg = \"Cannot deal with X:{!r} and Y:{!r} dimensions.\"\n raise CoordinateMultiDimError(msg.format(cube.coord(axis='X').ndim),\n cube.coord(axis='y').ndim)\n return cube",
"def __iter__(self):\n for vert in self.verteces:\n yield vert",
"def iter_procs(self):\n for row in self:\n if row.service_def:\n yield row",
"def __iter__(self) -> Iterator[\"GrainBoundary\"]:\n return self.gb_map.__iter__()",
"def get(self, bounds, predicate=lambda e: True):\n if self.tree is None:\n raise ValueError('must select a tree implementation before using')\n\n results = []\n for item_index in self.tree.get(bounds):\n result = self.atlas.entity(item_index, self.entity_type)\n if predicate(result):\n results.append(result)\n\n return frozenset(results)",
"def filter_clients(clients, flag_mask, me):\n iterator = iter(clients)\n for client in iterator:\n if client.intents & flag_mask == flag_mask:\n break\n \n else:\n yield me\n yield me\n return\n \n yield client\n yield client\n \n for client in iterator:\n if client.intents & flag_mask == flag_mask:\n yield client"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a new bullet
|
def new_bullet(name, rotation, width, x, y, velocity_x, velocity_y, speed, batch):
angle_radians = -math.radians(rotation)
ship_radius = width
bullet_x = x + math.cos(angle_radians) * ship_radius
bullet_y = y + math.sin(angle_radians) * ship_radius
from version2.game.Bullet import Bullet
_new_bullet = Bullet(bullet_x, bullet_y, batch=batch)
_new_bullet.name = name
_new_bullet.speed = speed
bullet_vx = (
velocity_x +
math.cos(angle_radians) * _new_bullet.speed
)
bullet_vy = (
velocity_y +
math.sin(angle_radians) * _new_bullet.speed
)
_new_bullet.velocity_x = bullet_vx
_new_bullet.velocity_y = bullet_vy
return _new_bullet
|
[
"def add_bullet(self):\n self.game_objects.append(Bullet(self.player.heading, self.player.position))",
"def _create_bullet(self, size, velocity, color):\n shape = pygame.Rect(self._rect.centerx,\n self._rect.centery, size[0], size[1])\n bullet = {'velocity': velocity, 'rect': shape,\n 'color': color, 'visible': True}\n self._bullets.append(bullet)",
"def explosion(bullet):\r\n exp = gameobjects.Explosion(bullet.body.position[0], bullet.body.position[1])\r\n game_objects_list.append(exp)\r\n explosion_list.append(exp)\r\n explosion_sound.play()",
"def createBulletListParagraph(c, text, x, y):\n style = getSampleStyleSheet()\n width, height = letter\n p = Paragraph(text, style=style[\"Normal\"], bulletText='&bull')\n p.wrapOn(c, width, height)\n p.drawOn(c, x, y, mm)",
"def fire_bullet(self):\n now = pygame.time.get_ticks()\n if now - self.ticks > self.shootdelay:\n self.ticks = pygame.time.get_ticks()\n fire = Bullet(self.rect.center[0],\n self.rect.center[1])\n fire.yvel = fire.defspeed\n return fire",
"def make_bullet_polygon(klass, a, b):\n perp = (a-b).perpendicular_normal() * (a-b).length * 0.1\n lerp = a + (b - a) * 0.1\n c = lerp + perp\n d = lerp - perp\n return Polygon((a,c,b,d,a))",
"def bullet(self, spacing):\n return f'{spacing}* '",
"def shoot(self, direction):\n\t\tself.facing = direction\n\t\tbullet = game_items.Bullet(self)\n\t\tself.bullets_sprite_list.add(bullet)\n\t\tself.game.all_sprite_list.add(bullet)",
"def collision_bullet_bullet(arbiter, _space, data):\r\n _bullet = arbiter.shapes[0]\r\n # Create explosion\r\n explosion(_bullet.parent)\r\n if _bullet.parent in game_objects_list:\r\n bullet_list.remove(_bullet.parent)\r\n game_objects_list.remove(_bullet.parent)\r\n space.remove(_bullet, _bullet.body)\r\n return False",
"def display_bullets(self):\r\n pygame.draw.rect(self.screen, self.settings.bullet_color, self.bullet)",
"def bullet(text, level=1):\n return '{0:s}* {1:s}'.format(' ' * (level - 1), text.strip())",
"def __init__(self,my_settings,screen,ship):\r\n super().__init__()\r\n self.screen = screen\r\n \"\"\" Create a bullet rect at (0,0) and then set correct position \"\"\"\r\n self.rect = pygame.Rect(0, 0, my_settings.bullet_width, my_settings.bullet_height) # create bullet's rect attribute\r\n self.rect.centerx = ship.rect.centerx # move the bullet accordingly with the ship\r\n #self.rect.centery = ship.rect.centery # set bullet's center to be the same as the ship's rect.center\r\n self.rect.top = ship.rect.top # set the top of the bullet's rect to match the top of the ship's rect\r\n\r\n # store the bullet's position as a decimal value\r\n self.y = float(self.rect.y)\r\n\r\n self.color = my_settings.bullet_color\r\n self.speed_factor = my_settings.bullet_speed_factor",
"def test_bullets(self) -> None:\n assert OUTPUT.body[0][1][0] == [\n \"--\\tbullet no indent\",\n \"\\t--\\tbullet indent 1\",\n \"\\t\\t--\\tbullet indent 2\",\n ]",
"def createDrawableObjects(self):\r\n num_rows = 4\r\n num_columns = 1\r\n droplet = 'images/droplet.png'\r\n animation = self.setup_animation(droplet,\r\n num_rows,\r\n num_columns)\r\n\r\n self.dropletSprite = pyglet.sprite.Sprite(animation)\r\n self.dropletSprite.position = (0,0)\r\n\r\n # Add these sprites to the list of drawables\r\n self.drawableObjects.append(self.dropletSprite)",
"def show(self,win):\n # display bullet\n # -------------\n if self.yPos > 0:\n win.addstr(self.yPos,self.xPos,\"+\")\n win.refresh()",
"def collision_bullet_box(arbiter, _space, data):\r\n _bullet = arbiter.shapes[0]\r\n _box = arbiter.shapes[1]\r\n # Create a explosion\r\n explosion(_bullet.parent)\r\n if _box.parent.boxmodel.destructable:\r\n # If the bos is destructable reduce HP\r\n _box.parent.hp -= 1\r\n if _box.parent.hp <= 0:\r\n # If HP reaches 0, remove box\r\n space.remove(_box, _box.body)\r\n game_objects_list.remove(_box.parent)\r\n # Award point\r\n point_list[_bullet.parent.owner] += 1\r\n print_points()\r\n if _bullet.parent in game_objects_list:\r\n bullet_list.remove(_bullet.parent)\r\n game_objects_list.remove(_bullet.parent)\r\n space.remove(_bullet, _bullet.body)\r\n return False",
"def createDrawableObjects(self):\r\n num_rows = 4\r\n num_columns = 1\r\n droplet = 'images/droplet.png'\r\n animation = self.setup_animation(droplet,\r\n num_rows,\r\n num_columns)\r\n\r\n self.dropletSprite = pyglet.sprite.Sprite(animation)\r\n self.dropletSprite.position = (0,200)\r\n\r\n cloud = pyglet.image.load('images/cloud.png')\r\n self.cloudSprite = pyglet.sprite.Sprite(cloud)\r\n self.cloudSprite.y = 100\r\n\r\n lightening = pyglet.image.load('images/lightening.png')\r\n self.lSprite = pyglet.sprite.Sprite(lightening)\r\n self.lSprite.y = 200\r\n\r\n car = pyglet.image.load('images/car.png')\r\n self.carSprite = pyglet.sprite.Sprite(car, -500, 0)\r\n\r\n\r\n # Add these sprites to the list of drawables\r\n self.drawableObjects.append(self.cloudSprite)\r\n self.drawableObjects.append(self.lSprite)\r\n self.drawableObjects.append(self.dropletSprite)\r\n self.drawableObjects.append(self.carSprite)",
"def collision_bullet_barrier(arbiter, _space, data):\r\n _bullet = arbiter.shapes[0]\r\n # Create explosion\r\n explosion(_bullet.parent)\r\n if _bullet.parent in game_objects_list:\r\n bullet_list.remove(_bullet.parent)\r\n game_objects_list.remove(_bullet.parent)\r\n space.remove(_bullet, _bullet.body)\r\n return False",
"def shoot(self, speed=BULLET_SPEED, size=(BULLET_SIZE, BULLET_SIZE), color=BULLET_COLOR):\n # TODO: Make shoot directions more ergonomic\n keys = pygame.key.get_pressed()\n if keys[SHOOT_KEY]:\n if self._shoot_clock._cool_clock > self._shoot_clock.cooldown_time():\n self._shoot_clock.reset_cooldown()\n velocity = [0, 0]\n if keys[SHOOT_UP]:\n velocity[1] = -speed\n if keys[SHOOT_DOWN]:\n velocity[1] = speed\n if keys[SHOOT_LEFT]:\n velocity[0] = -speed\n if keys[SHOOT_RIGHT]:\n velocity[0] = speed\n if velocity != [0, 0]:\n self._create_bullet(size, velocity, color)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a new AWS S3 Bucket Policy.
|
def create(self, params):
return self.make_client_call(
'put_bucket_policy',
params,
fatal_handled_exceptions=ParamValidationError)
|
[
"def create_bucket(name, policy=None):\n s3 = boto3.client('s3')\n\n s3.create_bucket(Bucket=bucket)\n print(\"S3 bucket %s created.\" % bucket)\n\n if policy:\n s3.put_bucket_policy(\n Bucket=bucket,\n Policy=json.dumps(bucketPolicy)\n )\n print(\"Policy attached to S3 bucket.\")\n\n return bucket",
"def create_bucket(self):\n AWSApi.instance().s3.create_bucket(bucket_name=self.name, region=self.region)",
"def create_bucket():\n\n s3 = session.resource('s3')\n\n try:\n s3.create_bucket(Bucket=f\"lambda-source-{os.environ['AWS_ACCOUNT']}\", ACL='private')\n print('Created S3 bucket!')\n\n except Exception as e:\n print(f\"Error creating S3 bucket. Exception: {e}.\")",
"def create_bucket(name):\r\n s3.create_bucket(Bucket=name)",
"def bucket_create():\r\n conn = connect_s3()\r\n bucket = conn.create_bucket(BUCKET_NAME, policy='public-read')\r\n bucket.configure_website('index.html', 'error.html')\r\n print 'Bucket %r created.' % BUCKET_NAME",
"def create_asset_bucket(self):\n return s3.Bucket(\n self,\n self.config.get(\"stack_name\") + \"_s3\",\n removal_policy=core.RemovalPolicy.DESTROY\n )",
"def create_s3(self, name, bucket, access_key, secret_access_key, endpoint=None, region=None,\n signature_version=None):\n\n config = {\n 'bucket': bucket,\n 'accessKey': access_key,\n 'secretAccessKey': secret_access_key,\n }\n if endpoint:\n config['endpoint'] = endpoint\n if region:\n config['region'] = region\n if signature_version:\n config['signatureVersion'] = signature_version\n\n storage_provider = models.StorageProvider(\n type='s3',\n name=name,\n config=config,\n )\n\n repository = self.build_repository(repositories.CreateStorageProvider)\n return repository.create(storage_provider)",
"def create_bucket(self):\n # Cohesity doesn't allow to create a bucket natively from s3 client.\n # response = s3_client.create_bucket(Bucket='my-bucket')\n\n # We create a view with s3Only access, since if it's multiprotocol,\n # bucket becomes readonly access for s3.\n body = View()\n body.view_box_id = self._get_storage_domain_id()\n body.name = BUCKET_NAME\n body.protocol_access = ProtocolAccessEnum.KS3ONLY\n self.cohesity_client.views.create_view(body)\n print(\"Bucket %s created on Cohesity.\" % BUCKET_NAME)",
"def configure_s3_bucket(self):\n AWSApi.instance().s3.put_bucket_versioning(bucket_name=self.name, configuration={\"Status\": \"Enabled\"})\n AWSApi.instance().s3.put_bucket_encryption(\n bucket_name=self.name,\n configuration={\"Rules\": [{\"ApplyServerSideEncryptionByDefault\": {\"SSEAlgorithm\": \"AES256\"}}]},\n )\n deny_http_policy = (\n '{{\"Id\":\"DenyHTTP\",\"Version\":\"2012-10-17\",\"Statement\":[{{\"Sid\":\"AllowSSLRequestsOnly\",\"Action\":\"s3:*\",'\n '\"Effect\":\"Deny\",\"Resource\":[\"arn:{partition}:s3:::{bucket_name}\",\"arn:{partition}:s3:::{bucket_name}/*\"],'\n '\"Condition\":{{\"Bool\":{{\"aws:SecureTransport\":\"false\"}}}},\"Principal\":\"*\"}}]}}'\n ).format(bucket_name=self.name, partition=self.partition)\n AWSApi.instance().s3.put_bucket_policy(bucket_name=self.name, policy=deny_http_policy)",
"def set_bucket_policy(self, bucket_name, policy):\n is_valid_policy_type(policy)\n\n check_bucket_name(bucket_name)\n\n headers = {\n 'Content-Length': str(len(policy)),\n 'Content-Md5': get_md5_base64digest(policy)\n }\n content_sha256_hex = get_sha256_hexdigest(policy)\n self._url_open(\"PUT\",\n bucket_name=bucket_name,\n query={\"policy\": \"\"},\n headers=headers,\n body=policy,\n content_sha256=content_sha256_hex)",
"def test_s3_bucket_creation():\n s3 = boto3.resource(\"s3\") # Will use Localstack\n assert len(list(s3.buckets.all())) == 0\n bucket = s3.Bucket(\"foobar\")\n bucket.create()",
"def get_s3_policy(bucket, video):\n now = timezone.now()\n stamp = str(to_timestamp(now))\n key = video.get_source_s3_key(stamp=stamp)\n\n expires_at = now + timedelta(seconds=AWS_UPLOAD_EXPIRATION_DELAY)\n acl = \"private\"\n x_amz_algorithm = \"AWS4-HMAC-SHA256\"\n x_amz_credential = \"{key:s}/{date:%Y%m%d}/{region:s}/s3/aws4_request\".format(\n date=now, key=settings.AWS_ACCESS_KEY_ID, region=settings.AWS_DEFAULT_REGION\n )\n x_amz_date = now.strftime(\"%Y%m%dT%H%M%SZ\")\n\n policy = {\n \"expiration\": expires_at.strftime(\"%Y-%m-%dT%H:%M:%S.000Z\"),\n \"conditions\": [\n {\"bucket\": bucket},\n {\"key\": key},\n {\"acl\": acl},\n [\"starts-with\", \"$Content-Type\", \"video/\"],\n [\"content-length-range\", 0, VIDEO_SOURCE_MAX_SIZE],\n {\"x-amz-credential\": x_amz_credential},\n {\"x-amz-algorithm\": x_amz_algorithm},\n {\"x-amz-date\": x_amz_date},\n [\"starts-with\", \"$x-amz-meta-jwt\", \"\"],\n ],\n }\n\n policy_b64 = b64encode(\n json.dumps(policy).replace(\"\\n\", \"\").replace(\"\\r\", \"\").encode()\n )\n\n signature_key = get_signature_key(\n settings.AWS_SECRET_ACCESS_KEY,\n now.strftime(\"%Y%m%d\"),\n settings.AWS_DEFAULT_REGION,\n \"s3\",\n )\n\n signature = hmac.new(signature_key, policy_b64, hashlib.sha256).hexdigest()\n\n return {\n \"acl\": acl,\n \"bucket\": bucket,\n \"stamp\": stamp,\n \"key\": key,\n \"max_file_size\": VIDEO_SOURCE_MAX_SIZE,\n \"policy\": policy_b64,\n \"s3_endpoint\": get_s3_endpoint(settings.AWS_DEFAULT_REGION),\n \"x_amz_algorithm\": x_amz_algorithm,\n \"x_amz_credential\": x_amz_credential,\n \"x_amz_date\": x_amz_date,\n \"x_amz_expires\": AWS_UPLOAD_EXPIRATION_DELAY,\n \"x_amz_signature\": signature,\n }",
"def build_policy(bucket, src_policy, ids):\n if not src_policy:\n src_policy = '{ \"Version\" : \"2012-10-17\", \"Statement\" : [] }'\n jpolicy = json.loads(src_policy)\n\n for aid in ids:\n stmt = {\n \"Sid\" : aid,\n \"Action\" : \"s3:ListBucket\",\n \"Effect\" : \"Deny\",\n \"Resource\" : \"arn:aws:s3:::\" + bucket,\n \"Principal\" : { \"AWS\" : [ aid ] }\n }\n jpolicy[\"Statement\"].append(stmt.copy())\n\n if DEBUG:\n print(\"--\", \"Constructed policy:\", jpolicy)\n\n return json.dumps(jpolicy)",
"def Create(iam,name: str,purpose: str,statements: list,tag='/',version='2012-10-17'):\n\n\t\t\t\tif len(statements) <= 0: raise ValueError('Must provide atleast 1 valid policy statement')\n\n\t\t\t\ttag = AWS.preptag(tag)\n\n\t\t\t\t#build policy document\n\t\t\t\tpolicyDoc = {\n\t\t\t\t\t'Statement': statements,\n\t\t\t\t\t'Version': version\n\t\t\t\t\t}\n\n\t\t\t\t#transform policy document into json\n\t\t\t\tjsonPolicyDocument = json.dumps(policyDoc)\n\n\t\t\t\t#use client to submit\n\t\t\t\tresponse = iam.resource.meta.client.create_policy(\n\t\t\t\t\tPolicyName=name,\n\t\t\t\t\tPath=tag,\n\t\t\t\t\tPolicyDocument=jsonPolicyDocument,\n\t\t\t\t\tDescription=purpose\n\t\t\t\t\t)\n\n\t\t\t\treturn response",
"def create_bucket(bucket, bucket_name):\n return _objstore_backend.create_bucket(bucket, bucket_name)",
"def test_s3_bucket_creation(self, noobaa_obj, created_buckets):\n\n bucketname = create_unique_resource_name(self.__class__.__name__.lower(), 's3-bucket')\n logger.info(f'Creating new bucket - {bucketname}')\n created_buckets.append(noobaa_obj.s3_create_bucket(bucketname=bucketname))",
"def create_bucket(self, bucket_name, description, org_id, retention_rules=None):\n return self.client.buckets_api().create_bucket(\n bucket_name=bucket_name, description=description, org_id=org_id, retention_rules=None\n )",
"def create_bucket(bucket_name):\n print('Creating artifacts bucket {}'.format(bucket_name))\n if bucket_exists(bucket_name):\n print('Bucket {} already exists'.format(bucket_name))\n return\n try:\n if args.region is None or args.region == \"us-east-1\":\n s3_client.create_bucket(Bucket=bucket_name)\n else:\n location = {'LocationConstraint': args.region}\n s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=location)\n except Exception as e:\n print('Failed to create artifacts bucket\\nException: {}'.format(e))\n sys.exit(1)\n print('Successfully created artifacts bucket')",
"def create(self, bucket_name):\n bucket = self.gcs_client.get_bucket(bucket_name)\n print('Bucket {} created'.format(bucket.name))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Deletes an existing AWS S3 Bucket Policy.
|
def delete(self, params=None):
self.logger.debug('Deleting %s with parameters: %s'
% (self.type_name, params))
self.client.delete_bucket_policy(**params)
|
[
"def delete_bucket():\n\n s3 = session.resource('s3')\n\n try:\n bucket = s3.Bucket(f\"lambda-source-{os.environ['AWS_ACCOUNT']}\")\n bucket.objects.all().delete()\n bucket.delete()\n print('Deleted S3 bucket!')\n\n except Exception as e:\n print(f\"Error deleting S3 bucket. Exception: {e}.\")",
"def bucket_delete():\r\n if not confirm(\"Are you sure you want to delete the bucket %r?\" % BUCKET_NAME):\r\n abort('Aborting at user request.')\r\n conn = connect_s3()\r\n conn.delete_bucket(BUCKET_NAME)\r\n print 'Bucket %r deleted.' % BUCKET_NAME",
"def delete_bucket(self):\n self.s3_client.delete_bucket(Bucket=BUCKET_NAME)\n print(\"Deleted Bucket: %s\" % BUCKET_NAME)",
"def delete(self):\n\n # TODO: Make sure the proper exceptions are raised.\n\n return self.connection.delete_bucket(self.name)",
"def rm_in_bucket(s3, bucket):\n bucket = s3.Bucket(bucket)\n bucket.objects.all().delete()",
"def delete_s3_buckets():\n s3_resource = boto3.resource('s3')\n print('Deleting S3 Buckets')\n for bucket in s3_resource.buckets.all():\n print('Starting object deletion for S3 Bucket {}'.format(bucket.name))\n bucket.object_versions.delete()\n print('Deleting S3 Bucket {}'.format(bucket.name))\n bucket.delete()\n print('S3 Buckets deleted')",
"def handle_DELETE(request):\n if boto:\n bucket_name = request.REQUEST.get('bucket')\n key_name = request.REQUEST.get('key')\n s3_delete(key_name)\n return make_response(200)\n else:\n return make_response(500)",
"def delete_file( s3_path ):\n\n return _get_bucket().delete_key(s3_path)",
"def delete_policy(self, policy):\r\n return self.manager.delete_policy(scaling_group=self, policy=policy)",
"def delete_files(bucket_name):\n s3 = boto3.resource(\"s3\")\n\n bucket = s3.Bucket(bucket_name)\n for key in bucket.objects.all():\n key.delete()\n # Delete the bucket if we want to \n #bucket.delete()",
"def delete_s3_objects(self, bucketName):\n\n s3ObjectList = AWSSetup._list_s3_objects(bucketName, self._s3Client, self.config)\n self._s3Client.delete_objects(\n Bucket = bucketName,\n Delete = {\n 'Objects' : s3ObjectList\n }\n )\n\n self._s3Client.delete_bucket(\n Bucket = bucketName\n )",
"def delete_file_from_bucket(self):\n self.s3_client.delete_object(Bucket=BUCKET_NAME, Key=FILENAME)\n print(\"File %s deleted from Bucket: %s\" % (FILENAME, BUCKET_NAME))",
"def test_put_get_delete_bucket_policy(make_stubber, make_unique_name, make_bucket):\n stubber = make_stubber(bucket_wrapper, 'get_s3')\n bucket_name = make_unique_name('bucket')\n\n make_bucket(stubber, bucket_wrapper, bucket_name, stubber.region_name)\n\n policy_id = uuid.uuid1()\n\n put_policy = {\n 'Version': '2012-10-17',\n 'Id': str(policy_id),\n 'Statement': [{\n 'Effect': 'Allow',\n 'Principal': {'AWS': 'arn:aws:iam::111122223333:user/Martha'},\n 'Action': [\n 's3:GetObject',\n 's3:ListBucket'\n ],\n 'Resource': [\n f'arn:aws:s3:::{bucket_name}/*',\n f'arn:aws:s3:::{bucket_name}'\n ]\n }]\n }\n\n stubber.stub_put_bucket_policy(bucket_name, put_policy)\n stubber.stub_get_bucket_policy(bucket_name, put_policy)\n stubber.stub_delete_bucket_policy(bucket_name)\n stubber.stub_get_bucket_policy_error(bucket_name, 'NoSuchBucketPolicy')\n\n bucket_wrapper.put_policy(bucket_name, put_policy)\n policy = bucket_wrapper.get_policy(bucket_name)\n assert put_policy == policy\n bucket_wrapper.delete_policy(bucket_name)\n with pytest.raises(ClientError) as exc_info:\n _ = bucket_wrapper.get_policy(bucket_name)\n assert exc_info.value.response['Error']['Code'] == 'NoSuchBucketPolicy'",
"def delete_file(s3_file):\n try:\n s3_file.delete()\n except botocore.exceptions.BotoCoreError as ex:\n logger.exception(f\"S3-SFTP: Error deleting '{ s3_file.key }' from S3.\")\n else:\n logger.info(f\"S3-SFTP: Deleted '{ s3_file.key }' from S3\")",
"def deleteS3files(self):\n s3 = boto3.resource('s3',\n aws_access_key_id=self.s3_key,\n aws_secret_access_key=self.s3_secret)\n bucket = s3.Bucket(self.s3_bucket)\n bucket_files = [x.key for x in bucket.objects.all()]\n delete_objects = []\n if bucket_files:\n for s3_file in bucket_files:\n delete_objects.append({'Key': s3_file})\n try:\n response = bucket.delete_objects(Delete={ 'Objects': delete_objects} )\n except botocore.exceptions.ClientError as e:\n self.logger.error(e)\n self.logger.error(delete_objects)\n return False",
"def delete_file(key):\n try:\n s3_bucket.Object(key).delete()\n except Exception as e:\n print(e)",
"def delete_policy(self, scaling_group, policy):\r\n uri = \"/%s/%s/policies/%s\" % (self.uri_base,\r\n utils.get_id(scaling_group), utils.get_id(policy))\r\n resp, resp_body = self.api.method_delete(uri)",
"def delete_policy(self, scaling_group, policy):\r\n return self._manager.delete_policy(scaling_group=scaling_group,\r\n policy=policy)",
"def delete_policy(self):\n response=self.client.list_policy_versions(PolicyArn=self.PolicyArn)\n for Version in response[\"Versions\"]:\n if not(Version[\"IsDefaultVersion\"]):\n self.client.client.delete_policy_version(\n PolicyArn=self.PolicyArn,\n VersionId=Version[\"Version\"]\n )\n self.client.delete_policy(PolicyArn=self.PolicyArn)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns true if a user is logged in, else false.
|
def is_logged_in():
return 'username' in session
|
[
"def is_logged_in():\n return 'user_id' in session",
"def is_logged_in(self):\n return self.get_secure_cookie(\"userid\") != None",
"def is_loggedin():\n \n if \"user_id\" in session:\n loggedin = \"True\"\n else:\n loggedin = \"False\"\n return loggedin",
"def checkUserAuthenticated(self,user):\n return user.is_authenticated()",
"def IsAuthenticated(self):\n return self._user_cookie is not None",
"def is_authenticated(request):\n if not request:\n return False\n if not request.user:\n return False\n if callable(request.user.is_authenticated):\n return request.user.is_authenticated()\n return request.user.is_authenticated",
"def check_session():\n session = request.cookies.get(\"session\")\n user_obj = User.get_by_session(session)\n return bool(user_obj)",
"def has_user(self):\n return self.user is not None",
"def is_authenticated(self, request, **kwargs):\n session_ok = super(StaffSessionAuthentication, self).is_authenticated(request, **kwargs)\n\n if session_ok and (request.user.is_staff or request.user.is_superuser):\n return True\n else:\n return False",
"def can_sign_in(self):\n if (self.event.use_sign_in and\n not self.is_full() and\n self.is_now()):\n return True\n return False",
"def is_logged_in(self):\n self.refresh()\n \n s = self.selenium\n if s.is_element_present(self.username_loc,timeout =0.1) and s.is_element_present(self.password_loc,timeout =0.1):\n logging.info(\"zd web is not login, because username and password xpath are present\")\n return False\n\n ret = s.is_element_present(self.logout_loc, 2)\n if not ret:\n logging.info(\"zd web is not login, because logout xpath is not present\")\n return ret",
"def is_user_authorized(self, user_id):\n logger = logging.getLogger(\"UserSessionManager.is_user_authorized\")\n logger.debug(\"Entry. user_id: %s\" % (user_id, ))\n if self.r.exists(user_id):\n return_value = True\n else:\n return_value = False\n logger.debug(\"returning: %s\" % (return_value, ))\n return return_value",
"def validate_login(self):\n is_user_valid = None\n try:\n if self.redmine.auth():\n self.__valid_redmine_login = True\n is_user_valid = True\n except Exception as error:\n print(error)\n return is_user_valid",
"def is_member(self, user_login):\n try:\n self.get(\n 'orgs/%s/members/%s' % (self.org_id, user_login),\n access_token=self.admin_access_token,\n )\n return True\n except GitHubError:\n return False",
"def is_current_user_admin(self):\n\n # This env variable was set by GAE based on a GAE session cookie.\n # Using Sign-In With Google, it will probably never be present.\n # Hence, currently is always False.\n # We don't use this. We check a boolean in the AppUser model.\n return (os.environ.get('USER_IS_ADMIN', '0')) == '1'",
"def needs_auth(self, user, **kwargs):\n if self.auth_provider is None:\n return False\n\n if not user.is_authenticated():\n return True\n\n return not UserSocialAuth.objects.filter(\n user=user,\n provider=self.auth_provider,\n ).exists()",
"def user_exists():\n return 'adminId' in session and Admin.query.filter_by(\n id=session['adminId']).first() is not None",
"def check_login_info(self) -> bool:\n URL = self.API_URL + 'user'\n headers = {'X-Api-Key': self.API_KEY }\n try:\n r = requests.get(url=URL, headers=headers)\n if r.status_code == 200:\n self.USER = r.json()['id'] # sets user ID\n URL = self.API_URL + 'workspaces'\n r = requests.get(url=URL, headers=headers)\n if r.status_code == 200:\n wspace = [wspace['id'] for wspace in r.json() if wspace['name'].upper() in self.WSPACENAME.upper()]\n self.WSPACE = wspace[0] if wspace else 'False' # sets workspace ID\n\n return False if not self.USER or not self.WSPACE else True\n except:\n return False",
"def user_and_author(self, author):\n if self.logged_in() and self.account == author:\n return True\n else:\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Build the elasticsearch mapping bits.
|
def elastic_mapping_builder(obj):
super(Citations, Citations).elastic_mapping_builder(obj)
obj['journal_id'] = obj['journal_volume'] = \
obj['journal_issue'] = {'type': 'integer'}
obj['abstract_text'] = obj['xml_text'] = \
obj['page_range'] = obj['release_authorization_id'] = \
{'type': 'text'}
obj['article_title'] = obj['encoding'] = \
obj['doi_reference'] = {'type': 'text', 'fields': {'keyword': {'type': 'keyword', 'ignore_above': 256}}}
|
[
"def generate_docs_mapping(self):\n m = {}\n m['n_words'] = {'type': 'integer'}\n m['n_sents'] = {'type': 'integer'}\n if len(self.settings['languages']) > 1:\n for lang in self.settings['languages']:\n m['n_words_' + lang] = {'type': 'integer'}\n m['n_sents_' + lang] = {'type': 'integer'}\n for meta in self.settings['viewable_meta']:\n if meta.startswith('year'):\n m[meta] = {'type': 'integer'}\n else:\n m[meta] = {'type': 'text',\n 'analyzer': 'lowercase_normalizer'}\n m[meta + '_kw'] = {'type': 'keyword'}\n return {'mappings': {'doc': {'properties': m}}, 'settings': self.docNormalizer}",
"def generate_mappings(self):\n mSentWord = self.generate_words_mapping(wordFreqs=False)\n mWord = self.generate_words_mapping()\n mSent = self.generate_sentences_mapping(mSentWord)\n mDoc = self.generate_docs_mapping()\n mappings = {'docs': mDoc,\n 'sentences': mSent,\n 'words': mWord}\n return mappings",
"def build_index(in_dataset, out_dict, out_postings):\n print('indexing...')\n\n documents = get_docs_from_csv(in_dataset)\n\n # create directory to store intermediate dictionaries\n setup_dirs(out_dict, out_postings)\n lengths_and_court_importance = create_blocks_and_find_lengths(documents)\n\n merge_blocks()\n \n encode_gap(out_dict, out_postings)\n save_dictionary_lengths_and_court(out_dict, lengths_and_court_importance)\n\n # remove directory that stores intermediate dictionaries\n remove_dirs()",
"def build_index(app, es, in_type, mapping, uuids_to_index, dry_run, check_first, index_diff=False,\n print_count_only=False, cached_meta=None, meta_bulk_actions=None):\n uuids_to_index[in_type] = set()\n if print_count_only:\n log.info('___PRINTING COUNTS___')\n check_and_reindex_existing(app, es, in_type, uuids_to_index, index_diff, True)\n return\n\n # combines mapping and settings\n this_index_record = build_index_record(mapping, in_type)\n\n # determine if index already exists for this type\n # probably don't need to do this as I can do it upstream... but passing in meta makes it\n # just a single if check\n this_index_exists = check_if_index_exists(es, in_type, cached_meta)\n\n # if the index exists, we might not need to delete it\n # otherwise, run if we are using the check-first or index_diff args\n if check_first or index_diff:\n prev_index_record = get_previous_index_record(this_index_exists, es, in_type)\n if prev_index_record is not None and this_index_record == prev_index_record:\n if in_type != 'meta':\n check_and_reindex_existing(app, es, in_type, uuids_to_index, index_diff)\n log.info('MAPPING: using existing index for collection %s' % (in_type), collection=in_type)\n return\n\n if dry_run or index_diff:\n return\n\n # delete the index\n if this_index_exists:\n res = es_safe_execute(es.indices.delete, index=in_type, ignore=[400,404])\n if res:\n log.info('MAPPING: index successfully deleted for %s' % in_type, collection=in_type)\n else:\n log.error('MAPPING: could not delete index for %s' % in_type, collection=in_type)\n\n # first, create the mapping. adds settings and mappings in the body\n res = es_safe_execute(es.indices.create, index=in_type, body=this_index_record, ignore=[400])\n if res:\n log.info('MAPPING: new index created for %s' % (in_type), collection=in_type)\n else:\n log.error('MAPPING: new index failed for %s' % (in_type), collection=in_type)\n\n # check to debug create-mapping issues and ensure correct mappings\n confirm_mapping(es, in_type, this_index_record)\n\n # we need to queue items in the index for indexing\n # if check_first and we've made it here, nothing has been queued yet\n # for this collection\n start = timer()\n coll_uuids = set(get_uuids_for_types(app.registry, types=[in_type]))\n end = timer()\n log.info('Time to get collection uuids: %s' % str(end-start), cat='fetch time',\n duration=str(end-start), collection=in_type)\n uuids_to_index[in_type] = coll_uuids\n log.info('MAPPING: will queue all %s items in the new index %s for reindexing' %\n (len(coll_uuids), in_type), cat='items to queue', count=len(coll_uuids), collection=in_type)\n\n # put index_record in meta\n if meta_bulk_actions is None:\n meta_bulk_actions = []\n # 1-2s faster to load in bulk if your doing more than one\n start = timer()\n res = es_safe_execute(es.index, index='meta', doc_type='meta', body=this_index_record, id=in_type)\n end = timer()\n log.info(\"Time to update metadata document: %s\" % str(end-start), duration=str(end-start),\n collection=in_type, cat='update meta')\n if res:\n log.info(\"MAPPING: index record created for %s\" % (in_type), collection=in_type)\n else:\n log.error(\"MAPPING: index record failed for %s\" % (in_type), collection=in_type)\n else:\n # create bulk actions to be submitted after all mappings are created\n bulk_action = {'_op_type': 'index',\n '_index': 'meta',\n '_type': 'meta',\n '_id': in_type,\n '_source': this_index_record\n }\n meta_bulk_actions.append(bulk_action)\n return meta_bulk_actions",
"def _expand_index_map(self, active_ctx, active_property, value, index_key, as_graph, options):\n rval = []\n for k, v in sorted(value.items()):\n ctx = JsonLdProcessor.get_context_value(\n active_ctx, k, '@context')\n if ctx:\n active_ctx = self._process_context(active_ctx, ctx, options)\n\n expanded_key = self._expand_iri(active_ctx, k, vocab=True)\n if index_key == '@id':\n # expand document relative\n k = self._expand_iri(active_ctx, k, base=True)\n elif index_key == '@type':\n k = expanded_key\n\n v = self._expand(\n active_ctx, active_property,\n JsonLdProcessor.arrayify(v),\n options, inside_list=False)\n for item in v:\n if as_graph and not _is_graph(item):\n item = {'@graph': [item]}\n if index_key == '@type':\n if expanded_key == '@none':\n # ignore @none\n item\n elif item.get('@type'):\n types = [k]\n types.extend(item['@type'])\n item['@type'] = types\n else:\n item['@type'] = [k]\n elif expanded_key != '@none' and index_key not in item:\n item[index_key] = k\n\n rval.append(item)\n return rval",
"def _buildIndexes(self):\n # delete any existing indexes\n for name in self.Catalog.indexes():\n self.Catalog.delIndex(name)\n\n # add the default indexes\n for (name, index_type) in [('meta_type', 'FieldIndex'),\n ('published', 'FieldIndex'),\n ('date', 'DateIndex'),\n ('tags', 'KeywordIndex'),\n ('yearmonth', 'KeywordIndex')]:\n self.Catalog.addIndex(name, index_type)",
"def mapping(self, source):",
"def index_body(doc_type, mapping=None, setting=None):\n body = {}\n if setting is not None:\n body['settings'] = setting\n if mapping is not None:\n d = {}\n d[doc_type] = mapping\n body['mappings'] = d\n\n return body",
"def createOutputFieldMappings(self, appendFields = None):\n\n #### Initialize Field Mapping ####\n fieldMappings = ARCPY.FieldMappings()\n\n #### Create Master Field Mapping ####\n if self.masterIsOID:\n masterFieldOutName = \"Source_ID\"\n masterFieldOutAlias = self.inName + \"_\" + masterFieldOutName\n else:\n masterFieldOutName = self.masterField\n masterFieldOutAlias = self.masterField\n masterMap = UTILS.createFieldMap",
"def _get_cube_mappings(self, ctx, mapper, base_mapper=None, parent_mapper=None):\n\n logger.debug(\"Exporting mappings: %s\", mapper)\n\n c_mappings = {}\n if base_mapper is None:\n base_mapper = mapper\n if parent_mapper is None:\n parent_mapper = mapper\n\n # Generate mappings for this mapper, possibly altering recursed mappings.\n\n mappings = mapper.sql_mappings(ctx)\n\n for mapping in mappings:\n #print(mapping)\n # Options are:\n # cube_name.detail = alias.column # for details\n # dimension.attribute = alias.column # for dimension attributes\n #c_mappings[mapping[\"entity\"].name + \".\" + mapping['field'] = mapping['alias'] + \".\" + mapping['sqlcol'].name\n try:\n\n # Flatten path to 2 levels as Cubes does not support nested dimensions\n if len(mapping.path) > 2:\n mapping_path = \"_\".join(mapping.path[:-1]) + \".\" + mapping.path[-1]\n else:\n mapping_path = \".\".join(mapping.path)\n\n if len(mapping.sqltable_alias) > 0:\n mapping_sqltable_alias = \"_\".join(mapping.sqltable_alias)\n else:\n mapping_sqltable_alias = mapping.sqltable.name\n\n c_mappings[mapping_path] = mapping_sqltable_alias + \".\" + mapping.sqlcolumn_alias\n if mapping.function:\n c_mappings[mapping_path] = {\n 'table': mapping_sqltable_alias,\n 'column': mapping.sqlcolumn_alias, # mapping_sqltable_alias + \".\" + ...\n 'extract': mapping.function\n }\n except:\n logger.error(\"Cannot export mapping: %s\", mapping)\n raise\n\n return c_mappings",
"def map_to_es(self):\n full_name = self.query_path\n return set_default(\n {\n c.names[full_name]: c.es_column\n for k, cs in self.lookup.items()\n # if startswith_field(k, full_name)\n for c in cs if c.jx_type not in STRUCT\n },\n {\n c.names[\".\"]: c.es_column\n for k, cs in self.lookup.items()\n # if startswith_field(k, full_name)\n for c in cs if c.jx_type not in STRUCT\n }\n )",
"def _assemble(self):\n self._filters.extend(list(self._default_filters.values()))\n if self._start is not None:\n self.es_query['from'] = self._start\n self.es_query['size'] = self._size if self._size is not None else SIZE_LIMIT\n if self._exclude_source:\n self.es_query['_source'] = False\n elif self._source is not None:\n self.es_query['_source'] = self._source\n if self._aggregations:\n self.es_query['aggs'] = {\n agg.name: agg.assemble()\n for agg in self._aggregations\n }",
"def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('SAM').get('abstractTypes')\n exolinks = globalMap.get('SAM').get('exolinks')\n\n # DataType AmountUnit\n currentMap = {}\n abstractTypes['AmountUnit'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00006'] = currentMap\n loadMaps['SAM.AmountUnit'] = currentMap\n currentMap['tag'] = 'SAM.AmountUnit'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00006'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # DataType ConcentrationUnit\n currentMap = {}\n abstractTypes['ConcentrationUnit'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00005'] = currentMap\n loadMaps['SAM.ConcentrationUnit'] = currentMap\n currentMap['tag'] = 'SAM.ConcentrationUnit'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00005'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # DataType Solvent\n currentMap = {}\n abstractTypes['Solvent'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2009-11-19-14:50:32_00005'] = currentMap\n loadMaps['SAM.Solvent'] = currentMap\n currentMap['tag'] = 'SAM.Solvent'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2009-11-19-14:50:32_00005'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # Class AbstractSample\n currentMap = {}\n abstractTypes['AbstractSample'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00007'] = currentMap\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00007'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'abstractSamples'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Sample.AbstractSample\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AbstractSample.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AbstractSample.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00009'] = currentMap\n loadMaps['SAM.AbstractSample.details'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00009'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00035')\n\n # Attribute AbstractSample.ionicStrength\n currentMap = {}\n contentMap['ionicStrength'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00006'] = currentMap\n loadMaps['SAM.AbstractSample.ionicStrength'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.ionicStrength'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00006'\n currentMap['name'] = 'ionicStrength'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute AbstractSample.isActive\n currentMap = {}\n contentMap['isActive'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00008'] = currentMap\n loadMaps['SAM.AbstractSample.isActive'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.isActive'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00008'\n currentMap['name'] = 'isActive'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AbstractSample.isHazard\n currentMap = {}\n contentMap['isHazard'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00007'] = currentMap\n loadMaps['SAM.AbstractSample.isHazard'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.isHazard'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00007'\n currentMap['name'] = 'isHazard'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AbstractSample.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00004'] = currentMap\n loadMaps['SAM.AbstractSample.name'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00004'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AbstractSample.ph\n currentMap = {}\n contentMap['ph'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00005'] = currentMap\n loadMaps['SAM.AbstractSample.ph'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.ph'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00005'\n currentMap['name'] = 'ph'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute AbstractSample.solvent\n currentMap = {}\n contentMap['solvent'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2012-03-28-17:22:44_00014'] = currentMap\n loadMaps['SAM.AbstractSample.solvent'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.solvent'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2012-03-28-17:22:44_00014'\n currentMap['name'] = 'solvent'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2009-11-19-14:50:32_00005')\n\n # Role AbstractSample.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AbstractSample.hazardPhrases\n currentMap = {}\n contentMap['hazardPhrases'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00013'] = currentMap\n loadMaps['SAM.AbstractSample.hazardPhrases'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.hazardPhrases'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00013'\n currentMap['name'] = 'hazardPhrases'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('CLAS').get('exolinks')\n\n # Role AbstractSample.sampleCategories\n currentMap = {}\n contentMap['sampleCategories'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00003'] = currentMap\n loadMaps['SAM.AbstractSample.sampleCategories'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.sampleCategories'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00003'\n currentMap['name'] = 'sampleCategories'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('CLAS').get('exolinks')\n\n # Role AbstractSample.sampleComponents\n currentMap = {}\n contentMap['sampleComponents'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00001'] = currentMap\n loadMaps['SAM.AbstractSample.sampleComponents'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.sampleComponents'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00001'\n currentMap['name'] = 'sampleComponents'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('SAM').get('abstractTypes')\n # End of AbstractSample\n\n currentMap = abstractTypes.get('AbstractSample')\n aList = ['ionicStrength', 'isActive', 'isHazard', 'ph']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'name', 'solvent']\n currentMap['simpleAttrs'] = aList\n aList = ['sampleComponents', 'sampleCategories', 'hazardPhrases', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['sampleComponents']\n currentMap['children'] = aList\n\n # Class RefSampleSource\n currentMap = {}\n abstractTypes['RefSampleSource'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00008'] = currentMap\n loadMaps['SAM.RefSampleSource'] = currentMap\n currentMap['tag'] = 'SAM.RefSampleSource'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00008'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'refSampleSources'\n currentMap['objkey'] = 'catalogNum'\n currentMap['class'] = ccp.api.lims.Sample.RefSampleSource\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute RefSampleSource.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute RefSampleSource.catalogNum\n currentMap = {}\n contentMap['catalogNum'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00017'] = currentMap\n loadMaps['SAM.RefSampleSource.catalogNum'] = currentMap\n currentMap['tag'] = 'SAM.RefSampleSource.catalogNum'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00017'\n currentMap['name'] = 'catalogNum'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute RefSampleSource.dataPageUrl\n currentMap = {}\n contentMap['dataPageUrl'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00018'] = currentMap\n loadMaps['SAM.RefSampleSource.dataPageUrl'] = currentMap\n currentMap['tag'] = 'SAM.RefSampleSource.dataPageUrl'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00018'\n currentMap['name'] = 'dataPageUrl'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Role RefSampleSource.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role RefSampleSource.supplier\n currentMap = {}\n contentMap['supplier'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00016'] = currentMap\n loadMaps['SAM.RefSampleSource.supplier'] = currentMap\n currentMap['tag'] = 'SAM.RefSampleSource.supplier'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00016'\n currentMap['name'] = 'supplier'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('AFFI').get('exolinks')\n # End of RefSampleSource\n\n currentMap = abstractTypes.get('RefSampleSource')\n aList = ['catalogNum', 'dataPageUrl']\n currentMap['simpleAttrs'] = aList\n aList = ['supplier', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class SampleComponent\n currentMap = {}\n abstractTypes['SampleComponent'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00002'] = currentMap\n loadMaps['SAM.SampleComponent'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00002'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'sampleComponents'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccp.api.lims.Sample.SampleComponent\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute SampleComponent.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute SampleComponent.concDisplayUnit\n currentMap = {}\n contentMap['concDisplayUnit'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00004'] = currentMap\n loadMaps['SAM.SampleComponent.concDisplayUnit'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.concDisplayUnit'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00004'\n currentMap['name'] = 'concDisplayUnit'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute SampleComponent.concentration\n currentMap = {}\n contentMap['concentration'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00001'] = currentMap\n loadMaps['SAM.SampleComponent.concentration'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.concentration'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00001'\n currentMap['name'] = 'concentration'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute SampleComponent.concentrationError\n currentMap = {}\n contentMap['concentrationError'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00002'] = currentMap\n loadMaps['SAM.SampleComponent.concentrationError'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.concentrationError'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00002'\n currentMap['name'] = 'concentrationError'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute SampleComponent.concentrationUnit\n currentMap = {}\n contentMap['concentrationUnit'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00003'] = currentMap\n loadMaps['SAM.SampleComponent.concentrationUnit'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.concentrationUnit'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00003'\n currentMap['name'] = 'concentrationUnit'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00005')\n\n # Attribute SampleComponent.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00006'] = currentMap\n loadMaps['SAM.SampleComponent.details'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00006'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00035')\n\n # Attribute SampleComponent.purity\n currentMap = {}\n contentMap['purity'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00005'] = currentMap\n loadMaps['SAM.SampleComponent.purity'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.purity'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00005'\n currentMap['name'] = 'purity'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute SampleComponent.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:43_00001'] = currentMap\n loadMaps['SAM.SampleComponent.serial'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:43_00001'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Role SampleComponent.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role SampleComponent.container\n currentMap = {}\n contentMap['container'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:42_00004'] = currentMap\n loadMaps['SAM.SampleComponent.container'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.container'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:42_00004'\n currentMap['name'] = 'container'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role SampleComponent.contents\n currentMap = {}\n contentMap['contents'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:42_00003'] = currentMap\n loadMaps['SAM.SampleComponent.contents'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.contents'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:42_00003'\n currentMap['name'] = 'contents'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = False\n\n # Role SampleComponent.refComponent\n currentMap = {}\n contentMap['refComponent'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:42_00002'] = currentMap\n loadMaps['SAM.SampleComponent.refComponent'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.refComponent'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:42_00002'\n currentMap['name'] = 'refComponent'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('REFS').get('exolinks')\n # End of SampleComponent\n\n currentMap = abstractTypes.get('SampleComponent')\n aList = ['concDisplayUnit', 'concentration', 'concentrationError', 'concentrationUnit', 'purity', 'serial']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'contents']\n currentMap['simpleAttrs'] = aList\n aList = ['container']\n currentMap['optLinks'] = aList\n aList = ['refComponent', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class SampleStore\n currentMap = {}\n abstractTypes['SampleStore'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-05-12:37:24_00008'] = currentMap\n loadMaps['SAM.SampleStore'] = currentMap\n currentMap['tag'] = 'SAM.SampleStore'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-05-12:37:24_00008'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'sampleStores'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Sample.SampleStore\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute SampleStore.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute SampleStore.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute SampleStore.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute SampleStore.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute SampleStore.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute SampleStore.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-05-12:37:28_00006'] = currentMap\n loadMaps['SAM.SampleStore.name'] = currentMap\n currentMap['tag'] = 'SAM.SampleStore.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-05-12:37:28_00006'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role SampleStore.abstractSamples\n currentMap = {}\n contentMap['abstractSamples'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-05-12:37:28_00005'] = currentMap\n loadMaps['SAM.SampleStore.abstractSamples'] = currentMap\n currentMap['tag'] = 'SAM.SampleStore.abstractSamples'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-05-12:37:28_00005'\n currentMap['name'] = 'abstractSamples'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('SAM').get('abstractTypes')\n\n # Role SampleStore.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of SampleStore\n\n currentMap = abstractTypes.get('SampleStore')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy']\n currentMap['headerAttrs'] = aList\n aList = ['name']\n currentMap['simpleAttrs'] = aList\n aList = ['abstractSamples', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['abstractSamples']\n currentMap['children'] = aList\n\n # Class RefSample\n currentMap = {}\n abstractTypes['RefSample'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00003'] = currentMap\n loadMaps['SAM.RefSample'] = currentMap\n currentMap['tag'] = 'SAM.RefSample'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00003'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'abstractSamples'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Sample.RefSample\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute RefSample.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute RefSample.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00009')\n\n # Attribute RefSample.ionicStrength\n contentMap['ionicStrength'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00006')\n\n # Attribute RefSample.isActive\n contentMap['isActive'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00008')\n\n # Attribute RefSample.isHazard\n contentMap['isHazard'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00007')\n\n # Attribute RefSample.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00004')\n\n # Attribute RefSample.ph\n contentMap['ph'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00005')\n\n # Attribute RefSample.solvent\n contentMap['solvent'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2012-03-28-17:22:44_00014')\n\n # Role RefSample.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role RefSample.conformings\n currentMap = {}\n contentMap['conformings'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00002'] = currentMap\n loadMaps['SAM.RefSample.conformings'] = currentMap\n currentMap['tag'] = 'SAM.RefSample.conformings'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00002'\n currentMap['name'] = 'conformings'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = False\n\n # Role RefSample.hazardPhrases\n contentMap['hazardPhrases'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00013')\n\n # Role RefSample.refSamplePositions\n currentMap = {}\n contentMap['refSamplePositions'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00010'] = currentMap\n loadMaps['SAM.RefSample.refSamplePositions'] = currentMap\n currentMap['tag'] = 'SAM.RefSample.refSamplePositions'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00010'\n currentMap['name'] = 'refSamplePositions'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = False\n currentMap['content'] = globalMap.get('HOLD').get('exolinks')\n\n # Role RefSample.refSampleSources\n currentMap = {}\n contentMap['refSampleSources'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00008'] = currentMap\n loadMaps['SAM.RefSample.refSampleSources'] = currentMap\n currentMap['tag'] = 'SAM.RefSample.refSampleSources'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00008'\n currentMap['name'] = 'refSampleSources'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('SAM').get('abstractTypes')\n\n # Role RefSample.sampleCategories\n contentMap['sampleCategories'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00003')\n\n # Role RefSample.sampleComponents\n contentMap['sampleComponents'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00001')\n # End of RefSample\n\n currentMap = abstractTypes.get('RefSample')\n aList = ['ionicStrength', 'isActive', 'isHazard', 'ph']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'name', 'solvent', 'conformings']\n currentMap['simpleAttrs'] = aList\n aList = ['sampleComponents', 'refSampleSources', 'sampleCategories', 'refSamplePositions', 'hazardPhrases', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['refSampleSources', 'sampleComponents']\n currentMap['children'] = aList\n\n # Class Sample\n currentMap = {}\n abstractTypes['Sample'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00001'] = currentMap\n loadMaps['SAM.Sample'] = currentMap\n currentMap['tag'] = 'SAM.Sample'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'abstractSamples'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Sample.Sample\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Sample.amountDisplayUnit\n currentMap = {}\n contentMap['amountDisplayUnit'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00011'] = currentMap\n loadMaps['SAM.Sample.amountDisplayUnit'] = currentMap\n currentMap['tag'] = 'SAM.Sample.amountDisplayUnit'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00011'\n currentMap['name'] = 'amountDisplayUnit'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Sample.amountUnit\n currentMap = {}\n contentMap['amountUnit'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00010'] = currentMap\n loadMaps['SAM.Sample.amountUnit'] = currentMap\n currentMap['tag'] = 'SAM.Sample.amountUnit'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00010'\n currentMap['name'] = 'amountUnit'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00006')\n\n # Attribute Sample.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Sample.batchNum\n currentMap = {}\n contentMap['batchNum'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00013'] = currentMap\n loadMaps['SAM.Sample.batchNum'] = currentMap\n currentMap['tag'] = 'SAM.Sample.batchNum'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00013'\n currentMap['name'] = 'batchNum'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Sample.colPosition\n currentMap = {}\n contentMap['colPosition'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00005'] = currentMap\n loadMaps['SAM.Sample.colPosition'] = currentMap\n currentMap['tag'] = 'SAM.Sample.colPosition'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00005'\n currentMap['name'] = 'colPosition'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Sample.currentAmount\n currentMap = {}\n contentMap['currentAmount'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00009'] = currentMap\n loadMaps['SAM.Sample.currentAmount'] = currentMap\n currentMap['tag'] = 'SAM.Sample.currentAmount'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00009'\n currentMap['name'] = 'currentAmount'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Sample.currentAmountFlag\n currentMap = {}\n contentMap['currentAmountFlag'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00012'] = currentMap\n loadMaps['SAM.Sample.currentAmountFlag'] = currentMap\n currentMap['tag'] = 'SAM.Sample.currentAmountFlag'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00012'\n currentMap['name'] = 'currentAmountFlag'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Sample.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00009')\n\n # Attribute Sample.initialAmount\n currentMap = {}\n contentMap['initialAmount'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00008'] = currentMap\n loadMaps['SAM.Sample.initialAmount'] = currentMap\n currentMap['tag'] = 'SAM.Sample.initialAmount'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00008'\n currentMap['name'] = 'initialAmount'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Sample.ionicStrength\n contentMap['ionicStrength'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00006')\n\n # Attribute Sample.isActive\n contentMap['isActive'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00008')\n\n # Attribute Sample.isHazard\n contentMap['isHazard'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00007')\n\n # Attribute Sample.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00004')\n\n # Attribute Sample.ph\n contentMap['ph'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00005')\n\n # Attribute Sample.rowPosition\n currentMap = {}\n contentMap['rowPosition'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00004'] = currentMap\n loadMaps['SAM.Sample.rowPosition'] = currentMap\n currentMap['tag'] = 'SAM.Sample.rowPosition'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00004'\n currentMap['name'] = 'rowPosition'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Sample.solvent\n contentMap['solvent'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2012-03-28-17:22:44_00014')\n\n # Attribute Sample.subPosition\n currentMap = {}\n contentMap['subPosition'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00006'] = currentMap\n loadMaps['SAM.Sample.subPosition'] = currentMap\n currentMap['tag'] = 'SAM.Sample.subPosition'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00006'\n currentMap['name'] = 'subPosition'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Role Sample.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Sample.hazardPhrases\n contentMap['hazardPhrases'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00013')\n\n # Role Sample.holder\n currentMap = {}\n contentMap['holder'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:27_00003'] = currentMap\n loadMaps['SAM.Sample.holder'] = currentMap\n currentMap['tag'] = 'SAM.Sample.holder'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:27_00003'\n currentMap['name'] = 'holder'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('HOLD').get('exolinks')\n\n # Role Sample.refSample\n currentMap = {}\n contentMap['refSample'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00003'] = currentMap\n loadMaps['SAM.Sample.refSample'] = currentMap\n currentMap['tag'] = 'SAM.Sample.refSample'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00003'\n currentMap['name'] = 'refSample'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role Sample.sampleCategories\n contentMap['sampleCategories'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00003')\n\n # Role Sample.sampleComponents\n contentMap['sampleComponents'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00001')\n # End of Sample\n\n currentMap = abstractTypes.get('Sample')\n aList = ['amountDisplayUnit', 'amountUnit', 'batchNum', 'colPosition', 'currentAmount', 'currentAmountFlag', 'initialAmount', 'ionicStrength', 'isActive', 'isHazard', 'ph', 'rowPosition', 'subPosition']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'name', 'solvent']\n currentMap['simpleAttrs'] = aList\n aList = ['refSample']\n currentMap['optLinks'] = aList\n aList = ['sampleComponents', 'sampleCategories', 'holder', 'hazardPhrases', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['sampleComponents']\n currentMap['children'] = aList\n\n # Class CrystalSample\n currentMap = {}\n abstractTypes['CrystalSample'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00004'] = currentMap\n loadMaps['SAM.CrystalSample'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00004'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'abstractSamples'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Sample.CrystalSample\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute CrystalSample.a\n currentMap = {}\n contentMap['a'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00018'] = currentMap\n loadMaps['SAM.CrystalSample.a'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.a'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00018'\n currentMap['name'] = 'a'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.alpha\n currentMap = {}\n contentMap['alpha'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00003'] = currentMap\n loadMaps['SAM.CrystalSample.alpha'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.alpha'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00003'\n currentMap['name'] = 'alpha'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.amountDisplayUnit\n contentMap['amountDisplayUnit'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00011')\n\n # Attribute CrystalSample.amountUnit\n contentMap['amountUnit'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00010')\n\n # Attribute CrystalSample.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute CrystalSample.b\n currentMap = {}\n contentMap['b'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00001'] = currentMap\n loadMaps['SAM.CrystalSample.b'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.b'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00001'\n currentMap['name'] = 'b'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.batchNum\n contentMap['batchNum'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00013')\n\n # Attribute CrystalSample.beta\n currentMap = {}\n contentMap['beta'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00004'] = currentMap\n loadMaps['SAM.CrystalSample.beta'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.beta'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00004'\n currentMap['name'] = 'beta'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.c\n currentMap = {}\n contentMap['c'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00002'] = currentMap\n loadMaps['SAM.CrystalSample.c'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.c'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00002'\n currentMap['name'] = 'c'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.colPosition\n contentMap['colPosition'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00005')\n\n # Attribute CrystalSample.colour\n currentMap = {}\n contentMap['colour'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00014'] = currentMap\n loadMaps['SAM.CrystalSample.colour'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.colour'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00014'\n currentMap['name'] = 'colour'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute CrystalSample.crystalType\n currentMap = {}\n contentMap['crystalType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00013'] = currentMap\n loadMaps['SAM.CrystalSample.crystalType'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.crystalType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00013'\n currentMap['name'] = 'crystalType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute CrystalSample.currentAmount\n contentMap['currentAmount'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00009')\n\n # Attribute CrystalSample.currentAmountFlag\n contentMap['currentAmountFlag'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00012')\n\n # Attribute CrystalSample.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00009')\n\n # Attribute CrystalSample.gamma\n currentMap = {}\n contentMap['gamma'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00005'] = currentMap\n loadMaps['SAM.CrystalSample.gamma'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.gamma'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00005'\n currentMap['name'] = 'gamma'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.initialAmount\n contentMap['initialAmount'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00008')\n\n # Attribute CrystalSample.ionicStrength\n contentMap['ionicStrength'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00006')\n\n # Attribute CrystalSample.isActive\n contentMap['isActive'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00008')\n\n # Attribute CrystalSample.isHazard\n contentMap['isHazard'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00007')\n\n # Attribute CrystalSample.morphology\n currentMap = {}\n contentMap['morphology'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00012'] = currentMap\n loadMaps['SAM.CrystalSample.morphology'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.morphology'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00012'\n currentMap['name'] = 'morphology'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute CrystalSample.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00004')\n\n # Attribute CrystalSample.ph\n contentMap['ph'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00005')\n\n # Attribute CrystalSample.rowPosition\n contentMap['rowPosition'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00004')\n\n # Attribute CrystalSample.solvent\n contentMap['solvent'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2012-03-28-17:22:44_00014')\n\n # Attribute CrystalSample.spaceGroup\n currentMap = {}\n contentMap['spaceGroup'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00011'] = currentMap\n loadMaps['SAM.CrystalSample.spaceGroup'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.spaceGroup'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00011'\n currentMap['name'] = 'spaceGroup'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute CrystalSample.subPosition\n contentMap['subPosition'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00006')\n\n # Attribute CrystalSample.x\n currentMap = {}\n contentMap['x'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00015'] = currentMap\n loadMaps['SAM.CrystalSample.x'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.x'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00015'\n currentMap['name'] = 'x'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.y\n currentMap = {}\n contentMap['y'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00016'] = currentMap\n loadMaps['SAM.CrystalSample.y'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.y'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00016'\n currentMap['name'] = 'y'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.z\n currentMap = {}\n contentMap['z'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00017'] = currentMap\n loadMaps['SAM.CrystalSample.z'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.z'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00017'\n currentMap['name'] = 'z'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Role CrystalSample.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role CrystalSample.hazardPhrases\n contentMap['hazardPhrases'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00013')\n\n # Role CrystalSample.holder\n contentMap['holder'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:23:27_00003')\n\n # Role CrystalSample.refSample\n contentMap['refSample'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00003')\n\n # Role CrystalSample.sampleCategories\n contentMap['sampleCategories'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00003')\n\n # Role CrystalSample.sampleComponents\n contentMap['sampleComponents'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00001')\n # End of CrystalSample\n\n currentMap = abstractTypes.get('CrystalSample')\n aList = ['a', 'alpha', 'amountDisplayUnit', 'amountUnit', 'b', 'batchNum', 'beta', 'c', 'colPosition', 'currentAmount', 'currentAmountFlag', 'gamma', 'initialAmount', 'ionicStrength', 'isActive', 'isHazard', 'ph', 'rowPosition', 'spaceGroup', 'subPosition', 'x', 'y', 'z']\n currentMap['headerAttrs'] = aList\n aList = ['colour', 'crystalType', 'details', 'morphology', 'name', 'solvent']\n currentMap['simpleAttrs'] = aList\n aList = ['refSample']\n currentMap['optLinks'] = aList\n aList = ['sampleComponents', 'sampleCategories', 'holder', 'hazardPhrases', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['sampleComponents']\n currentMap['children'] = aList\n\n # Out-of-package link to RefSampleSource\n currentMap = {}\n exolinks['RefSampleSource'] = currentMap\n loadMaps['SAM.exo-RefSampleSource'] = currentMap\n currentMap['tag'] = 'SAM.exo-RefSampleSource'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00008'\n currentMap['name'] = 'RefSampleSource'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Sample.RefSampleSource\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to SampleComponent\n currentMap = {}\n exolinks['SampleComponent'] = currentMap\n loadMaps['SAM.exo-SampleComponent'] = currentMap\n currentMap['tag'] = 'SAM.exo-SampleComponent'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00002'\n currentMap['name'] = 'SampleComponent'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Sample.SampleComponent\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to SampleStore\n currentMap = {}\n exolinks['SampleStore'] = currentMap\n loadMaps['SAM.exo-SampleStore'] = currentMap\n currentMap['tag'] = 'SAM.exo-SampleStore'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-05-12:37:24_00008'\n currentMap['name'] = 'SampleStore'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Sample.SampleStore\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to RefSample\n currentMap = {}\n exolinks['RefSample'] = currentMap\n loadMaps['SAM.exo-RefSample'] = currentMap\n currentMap['tag'] = 'SAM.exo-RefSample'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00003'\n currentMap['name'] = 'RefSample'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Sample.RefSample\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to Sample\n currentMap = {}\n exolinks['Sample'] = currentMap\n loadMaps['SAM.exo-Sample'] = currentMap\n currentMap['tag'] = 'SAM.exo-Sample'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00001'\n currentMap['name'] = 'Sample'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Sample.Sample\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to CrystalSample\n currentMap = {}\n exolinks['CrystalSample'] = currentMap\n loadMaps['SAM.exo-CrystalSample'] = currentMap\n currentMap['tag'] = 'SAM.exo-CrystalSample'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00004'\n currentMap['name'] = 'CrystalSample'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Sample.CrystalSample\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))",
"def build_maps(self):\n # Determine which match expressions are unique.\n expressions = {}\n for expr in (spec.matchex for spec in self.config['aliases']):\n if expr.expression_ in expressions:\n expressions[expr.expression_] += 1\n else:\n expressions[expr.expression_] = 1\n for expr in (spec.matchex for spec in self.config['aliases']):\n expr.unique = expressions[expr.expression_] == 1\n \n # Determine which accounts / aliases are referenced by which account declarations.\n self.accounts = {}\n self.aliases = {}\n self.alias_accounts = {}\n for spec in self.config['aliases']:\n for ident in spec.accounts:\n if ident in self.accounts:\n self.accounts[ident].append(spec)\n else:\n self.accounts[ident] = [spec]\n for ident in spec.aliases:\n if ident in self.aliases:\n self.aliases[ident].append(spec)\n self.alias_accounts[ident] |= set(spec.accounts)\n else:\n self.aliases[ident] = [spec]\n self.alias_accounts[ident] = set(spec.accounts)\n \n return self",
"def set_dynamic_mapping(self, collection):\n self.client.put_mapping(self.index, collection, {'dynamic': True})",
"def generate_mappings(self, use_all=False, regenerate=False):\n if (len(self.rel_to_idx) == 0 or len(self.ent_to_idx) == 0 or (regenerate is True)) \\\n and (not self.using_existing_db):\n from ..evaluation import create_mappings\n self._create_schema()\n if use_all:\n complete_dataset = []\n for key in self.dataset.keys():\n complete_dataset.append(self.dataset[key])\n self.rel_to_idx, self.ent_to_idx = create_mappings(np.concatenate(complete_dataset, axis=0))\n\n else:\n self.rel_to_idx, self.ent_to_idx = create_mappings(self.dataset[\"train\"])\n\n self._insert_entities_in_db()\n return self.rel_to_idx, self.ent_to_idx",
"def build_index(in_dir, out_dict, out_postings):\n print('indexing...')\n \n # get pathlist of documents folder\n pathlist = os.listdir(in_dir)\n \n # initialize variables\n termID = 1\n termdic = {} # format {term:termID}\n \n ps = PorterStemmer()\n\n # First create term-termID mapping dic\n for doc in pathlist:\n # open each document in folder\n f = open(os.path.join(in_dir, doc), 'r')\n print(\"doc: \"+doc)\n for line in f:\n # casefolding\n line = line.lower()\n \n # tokenize\n sent_line = nltk.sent_tokenize(line)\n for sent_tokens in sent_line:\n word_tokens = nltk.word_tokenize(sent_tokens)\n\n stemmed_tokens=[]\n for token in word_tokens:\n # stem tokens\n stemmed_word = ps.stem(token)\n # remove punctuations\n if stemmed_word not in list(string.punctuation):\n stemmed_tokens.append(stemmed_word)\n\n for stemmed_token in stemmed_tokens:\n if stemmed_token not in termdic.keys():\n termdic[stemmed_token] = termID\n termID += 1\n \n \n # blkSize = 10000\n # blkCount=1\n # pointer=1\n dic={} # format {term: docfreq,pointer}\n postings={} # format {term: postinglist}\n \n\n for doc in pathlist:\n f = open(os.path.join(in_dir, doc), 'r')\n print(\"doc: \"+doc)\n for line in f:\n # casefolding\n line = line.lower()\n \n # tokenize\n sent_line = nltk.sent_tokenize(line)\n for sent_tokens in sent_line:\n word_tokens = nltk.word_tokenize(sent_tokens)\n\n stemmed_tokens=[]\n for token in word_tokens:\n # stem tokens\n stemmed_word = ps.stem(token)\n # remove punctuations\n if stemmed_word not in list(string.punctuation):\n stemmed_tokens.append(stemmed_word)\n \n # update doc frequency and add posting to list\n for stemmed_token in stemmed_tokens:\n if termdic[stemmed_token] not in dic.keys():\n dic[termdic[stemmed_token]] = 1\n postings[termdic[stemmed_token]] = [int(doc)]\n if termdic[stemmed_token] in dic.keys() and int(doc) not in postings[termdic[stemmed_token]]:\n dic[termdic[stemmed_token]] +=1\n postings[termdic[stemmed_token]].append(int(doc))\n \n newdic={} # format {term: (docfreq,pointer)}\n \n # list of termdic keys -> terms\n termdiclist = list(termdic.keys())\n\n # dictionary to store in dictionary.txt\n for item in termdiclist:\n newdic[item] = (dic[termdic[item]],termdic[item])\n # print(newdic)\n with open (out_dict,'wb+') as fp:\n # for item in dic:\n # fp.write(str(termdiclist[item-1])+\" \"+str(dic[item])) \n # fp.write(\"\\n\")\n pickle.dump(newdic,fp)\n fp.close()\n \n # write out postings to postings file\n # if posting has skip pointer/ is tuple, separate by ','\n with open (out_postings,'w+') as fp:\n for posting in postings:\n postings[posting].sort()\n addSkipPointer(postings[posting])\n for item in postings[posting]:\n if type(item) is tuple:\n fp.write(str(item[0])+\",\"+str(item[1])+\" \")\n else:\n fp.write(str(item)+\" \")\n fp.write(\"\\n\")\n fp.close()\n\n # print(\"dic : \",dic)\n # print(\"postings : \",postings)\n \n return (dic,postings)",
"def prepare():\n client = elasticsearch.Elasticsearch(os.environ[\"ES_SOURCE\"])\n dirname = os.path.dirname(__file__)\n\n mapping = client.indices.get(\"_all\")\n with open(os.path.join(dirname, \"test_data_index.json\"), \"w\") as file:\n setting = next(iter(mapping.values()))\n setting[\"settings\"][\"index\"] = {\"analysis\": setting[\"settings\"][\"index\"][\"analysis\"]}\n json.dump(setting, file, indent=2)\n\n with open(os.path.join(dirname, \"test_data_query.json\"), \"r\") as file:\n query = json.load(file)\n docs = client.search(body=query, size=100)\n\n with open(os.path.join(dirname, \"test_data.ndjson\"), \"w\") as file:\n for hit in docs[\"hits\"][\"hits\"]:\n json.dump({\"index\": {\"_id\": hit[\"_id\"]}}, file)\n file.write(\"\\n\")\n json.dump(hit[\"_source\"], file)\n file.write(\"\\n\")",
"def buildIndex():\n\n covid_index = Index('covid_index')\n if covid_index.exists():\n covid_index.delete() # Overwrite any previous version\n covid_index.document(Article) # register the document mapping\n covid_index.create() # create index with specified mapping and document\n\n \n articles = list()\n # Open the json covid corpus\n with open('covid_comm_use_subset_meta.json', 'r', encoding='utf-8') as data_file:\n # load articles from json file into dictionary\n for line in data_file:\n try:\n articles.append(json.loads(line))\n except json.decoder.JSONDecodeError:\n continue \n\n size = len(articles)\n\n # Action series for bulk loading with helpers.bulk function.\n # Implemented as a generator, to return one movie with each call.\n # Note that we include the index name here.\n # The Document type is always 'doc'.\n # Every item to be indexed must have a unique key.\n def actions():\n # mid is movie id (used as key into movies dictionary)\n for mid in range(size):\n # handle NaN in author field\n author = str(articles[mid]['authors'])\n if author == \"NaN\":\n author = \"\"\n # handle NaN and missing month and day in publish_time field\n time = str(articles[mid]['publish_time'])\n # if NaN in publish_time let publish time be the date when index is run\n if time == \"NaN\":\n publish_time = date.today()\n # if month and day are missing in publish_time\n elif time == \"2020\":\n publish_time = date(2020, 1, 1)\n else:\n try:\n publish_time = datetime.strptime(time, '%Y %m %d').date()\n except Exception:\n publish_time = date.today()\n yield {\n \"_index\": \"covid_index\", \n \"_type\": '_doc',\n \"_id\": mid,\n \"title\": articles[mid]['title'],\n \"abstract\": articles[mid]['abstract'],\n \"author\": author,\n \"publish_time\": publish_time,\n \"suggestion\": articles[mid]['title']\n }\n\n helpers.bulk(es, actions())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert the citation fields to a serializable hash.
|
def to_hash(self, recursion_depth=1):
obj = super(Citations, self).to_hash(recursion_depth)
obj['_id'] = int(self.id)
obj['article_title'] = unicode_type(self.article_title)
obj['abstract_text'] = unicode_type(self.abstract_text)
obj['xml_text'] = unicode_type(self.xml_text)
# pylint: disable=no-member
obj['journal_id'] = int(self.journal.id)
# pylint: enable=no-member
obj['journal_volume'] = int(self.journal_volume)
obj['journal_issue'] = int(self.journal_issue)
obj['page_range'] = str(self.page_range)
obj['doi_reference'] = str(self.doi_reference)
obj['release_authorization_id'] = str(self.release_authorization_id)
obj['encoding'] = str(self.encoding)
return obj
|
[
"def from_hash(self, obj):\n super(Citations, self).from_hash(obj)\n self._set_only_if('_id', obj, 'id', lambda: int(obj['_id']))\n self._set_only_if('journal_id', obj, 'journal',\n lambda: Journals.get(Journals.id == int(obj['journal_id'])))\n for key in ['journal_volume', 'journal_issue']:\n self._set_only_if(key, obj, key, lambda k=key: int(obj[k]))\n for key in ['page_range', 'release_authorization_id', 'encoding',\n 'doi_reference']:\n self._set_only_if(key, obj, key, lambda k=key: str(obj[k]))\n for key in ['article_title', 'xml_text', 'abstract_text']:\n self._set_only_if(key, obj, key, lambda k=key: unicode_type(obj[k]))",
"def bibtex_value(self) -> Dict:\n pass",
"def serialize(self) -> Dict[str, str]:\n return {\n \"id\": f\"{self.dept}{self.course_num}{self.section}\",\n \"name\": self.name,\n \"dept\": self.dept,\n \"course_num\": self.course_num,\n \"section\": self.section,\n \"location\": self.location,\n \"start_time\": self.start_time.isoformat(),\n \"end_time\": self.end_time.isoformat(),\n \"start_date\": self.start_date.isoformat(),\n \"end_date\": self.end_date.isoformat(),\n \"days_of_week\": \", \".join(self.days_of_week),\n }",
"def review_as_dict(self):\n\n review = {r.name: getattr(self, r.name) for r in self.__table__.columns}\n review['author'] = self.author.first_name + ' ' + self.author.last_name\n return review",
"def hash_csv_row(csv_row: dict[str, Any]) -> str:\n row_str = str(csv_row).encode()\n return hashlib.sha256(row_str).hexdigest()",
"def asdict(self):",
"def citation(self) -> str:\n return self.collection.extra_fields.get(CITATION)",
"def to_bibtex(self):\n bibtexed = \"@{}{{{},\\n\".format(self.entry_type, self.key)\n for field in self.fields:\n bibtexed += \"\\t\" + field.to_bibtex() + \",\\n\"\n bibtexed += \"}\"\n\n return bibtexed",
"def to_dict(self):\n dict_rep = {}\n dict_rep[\"desc\"] = self.desc\n dict_rep[\"begin\"] = self.begin.isoformat()\n dict_rep[\"end\"] = self.end.isoformat()\n return dict_rep",
"def to_dict(self):\r\n if self.id:\r\n return {\"id\": self.id}\r\n return {\"type\": self.type, \"ipVersion\": self.ip_version}",
"def date_dict(self):\n dict_ret = {\n \"original\": self.pub_date,\n }\n if self.pub_date:\n dict_ret[\"year\"] = self.pub_date.year\n dict_ret[\"month\"] = self.pub_date.month\n dict_ret[\"month_string\"] = self.pub_date.strftime(\"%B\")\n else:\n dict_ret[\"year\"] = \"None\"\n dict_ret[\"month\"] = \"None\"\n dict_ret[\"month_string\"] = \"None\"\n return dict_ret",
"def as_dict(self):\n\n return super(CPE, self).__str__()",
"def to_dict(self, include_meta=False):\n result = super(JackalDoc, self).to_dict(include_meta=include_meta)\n if include_meta:\n source = result.pop('_source')\n return {**result, **source}\n else:\n return result",
"def __hash__(self):\n return hash(\"type: \" + self.question_type + \", text: \" + self.text + \", is_valid: \" + str(self.is_valid))",
"def fingerprint(self):\n # check whether the hash of this object is already known\n if self.attr_fingerprint[\"#\"] is not None:\n return self.attr_fingerprint[\"#\"]\n list_to_hash = []\n # Going through all fields that need to be taken into account\n for key in sorted(self.to_fingerprint):\n # Computing missing hashes\n if self.attr_fingerprint[key] is None:\n self.attr_fingerprint[key] = \\\n\t\t fingerprint([key, getattr(self, key)])\n # Building final string\n list_to_hash.append(self.attr_fingerprint[key])\n string = json.dumps(list_to_hash, separators=(',',':'))\n result = b64encode(crypthash(string).digest())\n self.attr_fingerprint[\"#\"] = result\n return result",
"def to_dict(self):\n out_dict = {\n \"qobj_id\": self.qobj_id,\n \"header\": self.header.to_dict(),\n \"config\": self.config.to_dict(),\n \"schema_version\": self.schema_version,\n \"type\": self.type,\n \"experiments\": [x.to_dict() for x in self.experiments],\n }\n return out_dict",
"def as_dict(self):\n return {\n 'entry_id': self.entry_id,\n 'version': self.version,\n 'domain': self.domain,\n 'title': self.title,\n 'data': self.data,\n 'source': self.source,\n }",
"def _citation_to_string(citation):\r\n output = ''\r\n sep = '. '\r\n\r\n if 'authors' in citation:\r\n authors = []\r\n for author in citation['authors']:\r\n if 'family_name' not in author:\r\n continue\r\n author_name = author['family_name']\r\n if 'given_name' in author:\r\n author_name += ' ' + author['given_name']\r\n authors.append(author_name)\r\n output += ', '.join(authors) + sep\r\n if 'year' in citation:\r\n output += str(citation['year']) + sep\r\n if 'title' in citation:\r\n output += citation['title'] + sep\r\n\r\n if 'journal' in citation:\r\n output += citation['journal'] + sep\r\n if 'volume' in citation:\r\n output += citation['volume']\r\n if 'issue' in citation:\r\n output += '(' + citation['issue'] + ')'\r\n if 'page_location' in citation:\r\n output += ':' + citation['page_location'] + sep\r\n else: # assume for now that the reference is to a book\r\n # TODO(Handle url case.)\r\n if 'edition' in citation:\r\n output += citation['edition'] + sep\r\n if 'publication_location' in citation:\r\n output += citation['publication_location']\r\n if 'publisher' in citation:\r\n output += ': ' + citation['publisher'] + sep\r\n elif 'publisher' in citation:\r\n output += citation['publisher'] + sep\r\n if 'extent' in citation:\r\n output += citation['extent'] + sep\r\n if 'notes' in citation:\r\n output += citation['notes'] + sep\r\n\r\n return output.strip()",
"def citizen_data_to_string(citizen_data, cur):\n data = []\n for field in FIELD_NAMES:\n value = citizen_data[field]\n if field == 'birth_date':\n value = convert_date_string(value)\n data.append(value)\n return (cur\n .mogrify(INSERT_DATA_PATTERN, tuple(data))\n .decode('utf-8'))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert the object into the citation object fields.
|
def from_hash(self, obj):
super(Citations, self).from_hash(obj)
self._set_only_if('_id', obj, 'id', lambda: int(obj['_id']))
self._set_only_if('journal_id', obj, 'journal',
lambda: Journals.get(Journals.id == int(obj['journal_id'])))
for key in ['journal_volume', 'journal_issue']:
self._set_only_if(key, obj, key, lambda k=key: int(obj[k]))
for key in ['page_range', 'release_authorization_id', 'encoding',
'doi_reference']:
self._set_only_if(key, obj, key, lambda k=key: str(obj[k]))
for key in ['article_title', 'xml_text', 'abstract_text']:
self._set_only_if(key, obj, key, lambda k=key: unicode_type(obj[k]))
|
[
"def elastic_mapping_builder(obj):\n super(Citations, Citations).elastic_mapping_builder(obj)\n obj['journal_id'] = obj['journal_volume'] = \\\n obj['journal_issue'] = {'type': 'integer'}\n obj['abstract_text'] = obj['xml_text'] = \\\n obj['page_range'] = obj['release_authorization_id'] = \\\n {'type': 'text'}\n obj['article_title'] = obj['encoding'] = \\\n obj['doi_reference'] = {'type': 'text', 'fields': {'keyword': {'type': 'keyword', 'ignore_above': 256}}}",
"def register_citation(self, citation_object, submitter):\n invalid_metadata = False\n citations = []\n if citation_object is not None and isinstance(citation_object, dict):\n\n # validate the required fields\n if (\"source_id\" in citation_object and \"source_id\" is not None) and \\\n (\"related_identifiers\" in citation_object and len(citation_object[\"related_identifiers\"]) > 0):\n\n for related_id_object in citation_object[\"related_identifiers\"]:\n\n if \"identifier\" not in related_id_object or \\\n len(related_id_object[\"identifier\"]) < 1:\n print(\"1\")\n invalid_metadata=True\n\n if \"relation_type\" not in related_id_object or \\\n len(related_id_object[\"relation_type\"]) < 1:\n print(\"2\")\n invalid_metadata=True\n\n if invalid_metadata :\n\n response = {\n \"message\": \"Incomplete Metadata. source_id, and related_identifiers are both required fields\",\n \"status_code\": \"500\"\n }\n\n return response\n\n identifier = related_id_object[\"identifier\"]\n relation_type = (related_id_object[\"relation_type\"]).lower()\n source_id = citation_object[\"source_id\"]\n\n if relation_type in VALID_DATACITE_CITATION_TYPE:\n relation_type = VALID_DATACITE_CITATION_TYPE[relation_type]\n elif relation_type in VALID_DATACITE_CITATION_TYPE.values():\n pass\n else:\n response = {\n \"message\": \"Not a valid relation type\",\n \"status_code\": \"500\"\n }\n return response\n\n try:\n metrics_database = MetricsDatabase()\n metrics_database.connect()\n doi_pattern = \"^\\s*(http:\\/\\/|https:\\/\\/)?(doi.org\\/|dx.doi.org\\/)?(doi: ?|DOI: ?)?(10\\.\\d{4,}(\\.\\d)*)\\/(\\w+).*$\"\n doi_metadata = {}\n if (re.match(doi_pattern, source_id)):\n source_doi_index = source_id.index(\"10.\")\n source_doi = source_id[source_doi_index:]\n doi_metadata = metrics_database.getDOIMetadata(doi=source_doi)\n\n if (re.match(doi_pattern, identifier)):\n identifier_index = identifier.index(\"10.\")\n identifier = identifier[identifier_index:]\n\n citation_db_object = {}\n citation_db_object[\"source_id\"] = source_doi\n citation_db_object[\"target_id\"] = identifier\n citation_db_object[\"relation_type\"] = relation_type\n\n if submitter is not None:\n citation_db_object[\"reporter\"] = submitter\n\n if \"source_url\" in citation_object:\n citation_db_object[\"source_url\"] = citation_object[\"source_url\"]\n elif \"source_url\" in doi_metadata:\n citation_db_object[\"source_url\"] = doi_metadata[\"source_url\"]\n\n if \"link_publication_date\" in citation_object:\n citation_db_object[\"link_publication_date\"] = citation_object[\"link_publication_date\"]\n elif \"link_publication_date\" in doi_metadata:\n citation_db_object[\"link_publication_date\"] = doi_metadata[\"link_publication_date\"]\n\n if \"origin\" in citation_object:\n citation_db_object[\"origin\"] = citation_object[\"origin\"]\n elif \"origin\" in doi_metadata:\n citation_db_object[\"origin\"] = doi_metadata[\"origin\"]\n\n if \"title\" in citation_object:\n citation_db_object[\"title\"] = citation_object[\"title\"]\n elif \"title\" in doi_metadata:\n citation_db_object[\"title\"] = doi_metadata[\"title\"]\n\n if \"publisher\" in citation_object:\n citation_db_object[\"publisher\"] = citation_object[\"publisher\"]\n elif \"publisher\" in doi_metadata:\n citation_db_object[\"publisher\"] = doi_metadata[\"publisher\"]\n\n if \"journal\" in citation_object:\n citation_db_object[\"journal\"] = citation_object[\"journal\"]\n elif \"journal\" in doi_metadata:\n citation_db_object[\"journal\"] = doi_metadata[\"journal\"]\n\n if \"volume\" in citation_object:\n citation_db_object[\"volume\"] = citation_object[\"volume\"]\n elif \"volume\" in doi_metadata:\n citation_db_object[\"volume\"] = doi_metadata[\"volume\"]\n\n if \"page\" in citation_object:\n citation_db_object[\"page\"] = citation_object[\"page\"]\n elif \"page\" in doi_metadata:\n citation_db_object[\"page\"] = doi_metadata[\"page\"]\n\n if \"year_of_publishing\" in citation_object:\n citation_db_object[\"year_of_publishing\"] = citation_object[\"year_of_publishing\"]\n elif \"year_of_publishing\" in doi_metadata:\n citation_db_object[\"year_of_publishing\"] = doi_metadata[\"year_of_publishing\"]\n\n citations.append(citation_db_object)\n metrics_database.insertCitationObjects(citations_data=citations)\n response = {\n \"message\": \"Registered\",\n \"status_code\": \"202\"\n }\n return response\n\n except Exception as e:\n self.logger.error(e)\n return self.queue_citation_object(self.request)\n\n response = {\n \"message\": \"Cannot process this type of request\",\n \"status_code\": \"500\"\n }\n\n return response",
"def citation(self) -> str:\n return self.collection.extra_fields.get(CITATION)",
"def __init__(self, pubmed_entry):\n article_info = pubmed_entry['MedlineCitation']['Article']\n #author info\n authors = article_info['AuthorList']\n if \"Abstract\" in article_info:\n self.abstract = article_info['Abstract']\n else:\n self.abstract = ''\n if len(authors) < 3:\n self.first_author = ', '.join([parse_author(author) for author in authors]) + '.'\n else:\n self.first_author = parse_author(authors[0]) + ' et al.'\n #article info\n if len(article_info['ArticleDate']):\n self.year = article_info['ArticleDate'][0]['Year']\n else:\n try:\n self.year = article_info['Journal']['JournalIssue']['PubDate']['Year']\n except KeyError:\n self.year = ''\n self.title = article_info['ArticleTitle']\n pagination = False\n if \"Pagination\" in article_info:\n pagination = article_info['Pagination']\n self.journal_info = parse_journal(article_info['Journal'], pagination)\n if len(article_info[\"ELocationID\"]):\n self.link = 'https://doi.org/' + article_info['ELocationID'][0]\n else:\n self.link = ''\n print(self.first_author, self.title, self.link, self.journal_info)\n self.yml = \"- author: {}\\n title: '{} {}.'\\n alt_link: '{}'\\n year: {}\\n\\n\".format(self.first_author, self.title, self.journal_info, self.link, self.year)\n self.long_print = \"author: {}\\nyear: {}\\ntitle: '{} \\n{}.'\\nabstract: '{}'\\nDOI_link: '{}'\\n\\n\".format(self.first_author, self.year, self.title, self.journal_info, self.abstract, self.link, )",
"def textToAttributeValueList (\n\n self,\n text = None,\n bind = True\n ) :\n\n # there is some record in bib firmat in this text, parses it\n\n## if self.findBibPrefix( text ) >= 0 : return self.bibToAttributeValueList( text )\n##\n\n self.bind = bool( bind )\n\n # specific fields\n\n self.author = \"\"\n\n self.bibtex = \"\"\n\n self.description = \"\"\n\n self.key = \"\"\n\n self.title = \"\"\n\n self.year = \"\"\n \n # a text coming from a table file\n \n if utilities.isEmpty( text ) : return [], []\n \n lines = utilities.asciiToLines( text )\n\n if utilities.isEmpty( lines ) : return [], []\n\n attributes = []\n\n values = []\n\n for line in lines :\n\n attribute, value = self.lineToAttributeValue( line )\n\n attribute, value = self.normalizeAttributeValue( attribute, value )\n\n if utilities.isEmpty( attribute ) : continue\n\n if utilities.isEmpty( value ) : continue\n\n if attribute in attributes : continue\n\n attributes.append( attribute )\n\n values.append( value )\n\n # normalizations using multiple fields. Only now, when the attribute value list is completed\n\n\n volume = self.getValue( \"volume\", attributes, values )\n\n number = self.getValue( \"number\", attributes, values )\n\n if ( not utilities.isEmpty( volume ) ) or ( not utilities.isEmpty( number ) ) :\n\n volume, number = self.normalizeVolumeNumber( volume, number )\n\n self.setValue( \"volume\", volume, attributes, values )\n\n self.setValue( \"number\", number, attributes, values )\n\n\n year = self.getValue( \"year\", attributes, values )\n\n month = self.getValue (\"month\", attributes, values )\n\n if ( not utilities.isEmpty( year ) ) or ( not utilities.isEmpty( month ) ) :\n\n month, year = self.normalizeMonthYear( month, year )\n\n## print \" ->\", month, year\n\n self.setValue( \"month\", month, attributes, values )\n\n self.setValue( \"year\", year, attributes, values )\n\n\n author = self.getValue( \"author\", attributes, values )\n\n editor = self.getValue (\"month\", attributes, values )\n\n if ( not utilities.isEmpty( editor ) ) and ( editor == author ) : self.deleteValue( \"editor\" )\n\n # sets external variables\n \n self.setVariables()\n\n return attributes, values",
"def citation(**kwargs):\n print_citation()",
"def convert_citation_text_lines_to_info(text):\n lines = text.strip().split(\"\\n\")\n info = {\n \"_citation_id\": lines[0].strip(),\n }\n found = False\n other = []\n\n if lines[-1].strip().startswith(\">\") and len(lines) >= 2:\n # [N] > varname\n info[\"_pyref\"] = lines[-1][1:].strip()\n info[\"_work_type\"] = \"Ref\"\n found = True\n other = lines[1:-1]\n elif lines[-1].strip().startswith(\"http\") and len(lines) >= 3:\n # [N] WebName http://...\n info[\"title\"] = lines[1].strip()\n info[\"url\"] = lines[-1].strip()\n info[\"_work_type\"] = \"Site\"\n found = True\n other = lines[2:-1]\n elif len(lines) >= 5 and lines[-1].strip().isnumeric():\n # [N] author name place other year\n info[\"author\"] = lines[1].strip()\n info[\"title\"] = lines[2].strip()\n split = lines[3].strip().split(\"=\")\n if len(split) > 1:\n info[split[0]] = \"=\".join(split[1:])\n else:\n info[\"booktitle\"] = lines[3].strip()\n info[\"year\"] = int(lines[-1].strip())\n info[\"_work_type\"] = \"Work\"\n found = True\n other = lines[4:-1]\n if found:\n for num, line in zip(range(1, 10000), other):\n line = line.strip()\n split = line.split(\"=\")\n if len(split) > 1:\n info[split[0]] = \"=\".join(split[1:])\n else:\n info[\"_other{}\".format(num)] = line\n return info\n \n return \"Incomplete\"",
"def extractFromObject( self, object, recursive = True ):\r\n if self._caster is not None : object = self._caster(object)\r\n for member in self._members:\r\n if not recursive and member.isObject: continue\r\n self.__setattr__( member.name, member.get(object) )",
"def _transform(self, document):\n transformed = {\n \"@context\": \"http://schema.org\",\n \"@type\": \"JobPosting\",\n }\n basic_mappings = {\n 'title': 'PositionTitle',\n 'qualifications': 'QualificationSummary',\n 'url': 'PositionURI',\n }\n for target_key, source_key in basic_mappings.items():\n transformed[target_key] = document.get(source_key)\n\n # many of the fields we want are in UserArea->Details\n # sadly most of these never seem to show up in real data,\n # but they are mentioned in the API docs so they are worth checking for\n user_details = document.get('UserArea', {}).get('Details', {})\n transformed['description'] = user_details.get('JobSummary', None)\n transformed['educationRequirements'] = \\\n user_details.get('Education', None)\n transformed['responsibilities'] = user_details.get('MajorDuties', None)\n transformed['experienceRequirements'] = \\\n user_details.get('Requirements', None)\n transformed['jobBenefits'] = user_details.get('Benefits', None)\n\n # employment type, salary, and location are stored in lists;\n # pick the first one\n position_schedules = document.get('PositionSchedule', [])\n if len(position_schedules) > 0:\n transformed['employmentType'] = \\\n position_schedules[0].get('Name', None)\n\n remuneration = document.get('PositionRemuneration', [])\n if len(remuneration) > 0:\n transformed['baseSalary'] = {\n '@type': 'MonetaryAmount',\n 'minValue': float(remuneration[0].get('MinimumRange', None)),\n 'maxValue': float(remuneration[0].get('MaximumRange', None))\n }\n\n locations = document.get('PositionLocation', [])\n if len(locations) > 0:\n transformed['jobLocation'] = {\n '@type': 'Place',\n 'address': {\n '@type': 'PostalAddress',\n 'addressLocality': locations[0].get('CityName', ''),\n 'addressRegion': locations[0].get('CountrySubDivisionCode', ''),\n 'addressCountry': locations[0].get('CountryCode', ''),\n }\n }\n\n # both organization and the department within the org. are defined\n transformed['hiringOrganization'] = {\n '@type': 'Organization',\n 'name': document.get('OrganizationName')\n }\n department_name = document.get('DepartmentName', None)\n if department_name:\n transformed['hiringOrganization']['department'] = {\n '@type': 'Organization',\n 'name': department_name\n }\n\n if not document['PositionStartDate']:\n transformed['datePosted'] = None\n else:\n start = datetime.strptime(\n document['PositionStartDate'],\n self.DATE_FORMAT\n )\n transformed['datePosted'] = start.date().isoformat()\n if not document['PositionEndDate']:\n transformed['validThrough'] = None\n else:\n end = datetime.strptime(\n document['PositionEndDate'],\n self.DATE_FORMAT\n )\n transformed['validThrough'] = end.isoformat()\n\n return transformed",
"def _prepare_object_values(self, row):\n prepared_row = dict()\n prepared_row.update(row)\n self.prepare_object_values(prepared_row)\n return prepared_row",
"def enrich_citation_model_from_pmc(manager: Manager, citation: models.Citation, csl: Mapping[str, Any]) -> bool:\n citation.title = csl.get(\"title\")\n citation.journal = csl.get(\"container-title\")\n citation.volume = csl.get(\"volume\")\n # citation.issue = csl['issue']\n citation.pages = csl.get(\"page\")\n citation.article_type = csl.get(\"type\")\n\n for author in csl.get(\"author\", []):\n try:\n author_name = f'{author[\"given\"]} {author[\"family\"]}'\n except KeyError:\n print(f\"problem with author in pmc:{citation.db_id}\", author)\n continue\n author_model = manager.get_or_create_author(author_name)\n if author_model not in citation.authors:\n citation.authors.append(author_model)\n\n if citation.authors:\n citation.first = citation.authors[0]\n citation.last = citation.authors[-1]\n\n issued = csl.get(\"issued\")\n if issued is not None:\n date_parts = issued[\"date-parts\"][0]\n if len(date_parts) == 3:\n citation.date = date(year=date_parts[0], month=date_parts[1], day=date_parts[2])\n elif len(date_parts) == 2:\n citation.date = date(year=date_parts[0], month=date_parts[1], day=1)\n elif len(date_parts) == 1:\n citation.date = date(year=date_parts[0], month=1, day=1)\n else:\n logger.warning(\"not sure about date parts: %s\", date_parts)\n\n return True",
"def transformMARC(record, marcRels):\n doabID = record[0]\n dateIssued = record[1]\n marcRecord = record[2]\n logger.info('Transforming record {} into a SFR object'.format(doabID))\n\n work = WorkRecord()\n instance = InstanceRecord()\n item = Format(source='doab', contentType='ebook')\n\n # Add issued date to work record\n work.addClassItem('dates', Date, **{\n 'display_date': dateIssued,\n 'date_range': dateIssued,\n 'date_type': 'issued'\n })\n\n # All DOAB records have the same CreativeCommons license, assign this\n # to Instance/Item records\n rights = Rights(\n source='doab',\n license='https://creativecommons.org/licenses/by-nc-nd/4.0/',\n statement='Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International'\n )\n instance.rights.append(rights)\n item.rights.append(rights)\n\n # A single DOAB identifier can be assigned to the work/instance/item records\n doabIdentifier = Identifier(\n type='doab',\n identifier=doabID,\n weight=1\n )\n work.identifiers.append(doabIdentifier)\n instance.identifiers.append(doabIdentifier)\n item.identifiers.append(doabIdentifier)\n\n # Code Fields (Identifiers)\n logger.debug('Parsing 0X0-0XX Fields')\n controlData = [\n ('010', 'identifiers', 'a', 'lccn'),\n ('020', 'identifiers', 'a', 'isbn'),\n ('022', 'identifiers', 'a', 'issn'),\n ('050', 'identifiers', 'a', 'lcc'),\n ('082', 'identifiers', 'a', 'ddc'),\n ('010', 'identifiers', 'z', 'lccn'),\n ('020', 'identifiers', 'z', 'isbn'),\n ('022', 'identifiers', 'z', 'issn'),\n ('050', 'identifiers', 'z', 'lcc'),\n ('082', 'identifiers', 'z', 'ddc')\n ]\n for field in controlData:\n extractSubfieldValue(marcRecord, work, field)\n extractSubfieldValue(marcRecord, instance, field)\n\n # Author/Creator Fields\n logger.debug('Parsing 100, 110 & 111 Fields')\n agentData = ['100', '110', '111', '700', '710', '711']\n for agentField in agentData:\n extractAgentValue(marcRecord, work, agentField, marcRels)\n\n # Title Fields\n logger.debug('Parsing 21X-24X Fields')\n titleData = [\n ('210', 'alt_titles', 'a'),\n ('222', 'alt_titles', 'a'),\n ('242', 'alt_titles', 'a'),\n ('246', 'alt_titles', 'a'),\n ('247', 'alt_titles', 'a'),\n ('245', 'title', 'a'),\n ('245', 'sub_title', 'b')\n ]\n for field in titleData:\n extractSubfieldValue(marcRecord, work, field)\n extractSubfieldValue(marcRecord, instance, field)\n\n # Edition Fields\n logger.debug('Parsing Edition (250 & 260) Fields')\n editionData = [\n ('250', 'edition_statement', 'a'),\n ('250', 'edition_statement', 'b'),\n ('260', 'pub_place', 'a'),\n ('260', 'pub_date', 'c'),\n ('260', 'agents', 'b', 'publisher'),\n ('260', 'agents', 'f', 'manufacturer'),\n ('264', 'copyright_date', 'c')\n ]\n for field in editionData:\n extractSubfieldValue(marcRecord, instance, field)\n\n # Physical Details\n # TODO Load fields into items/measurements?\n logger.debug('Parsing Extent (300) Field')\n extentData = [\n ('300', 'extent', 'a'),\n ('300', 'extent', 'b'),\n ('300', 'extent', 'c'),\n ('300', 'extent', 'e'),\n ('300', 'extent', 'f')\n ]\n for field in extentData:\n extractSubfieldValue(marcRecord, instance, field)\n\n # Series Details\n logger.debug('Parsing Series (490) Field')\n seriesData = [\n ('490', 'series', 'a'),\n ('490', 'series_position', 'v')\n ]\n for field in seriesData:\n extractSubfieldValue(marcRecord, work, field)\n\n # Notes/Description details\n # TODO What fields should we bring in?\n logger.debug('Parsing TOC (505) Field')\n tocData = [\n ('505', 'table_of_contents', 'a'),\n ('520', 'summary', 'a')\n ]\n for field in tocData:\n extractSubfieldValue(marcRecord, instance, field)\n\n # Language Fields\n if len(marcRecord['546']) > 0:\n for lang in marcRecord['546'][0].subfield('a'):\n langs = re.split(r'/|\\|', lang.value)\n for language in langs:\n logger.debug('Adding language {} to work and instance'.format(language))\n langObj = pycountry.languages.get(name=language.strip())\n if langObj is None or langObj.alpha_3 == 'und':\n logger.warning('Unable to parse language {}'.format(language))\n continue\n sfrLang = Language(\n language=language,\n iso_2=langObj.alpha_2,\n iso_3=langObj.alpha_3\n )\n work.language.append(sfrLang)\n instance.language.append(sfrLang)\n\n # Subject Details\n logger.debug('Parsing 6XX Subject Fields')\n subjectData = ['600', '610', '648', '650', '651', '655', '656', '657']\n for subjectType in subjectData:\n extractSubjects(marcRecord, work, subjectType)\n\n # Eletronic Holding Details\n logger.debug('Parsing 856 (Electronic Holding) Field')\n extractHoldingsLinks(marcRecord['856'], instance, item)\n\n # TODO Load data for these fields\n # 76X-78X\n # 80X-83X\n instance.formats.append(item)\n work.instances.append(instance)\n return work, doabID",
"def to_as1(obj, type=None):\n if not obj:\n return {}\n\n type = obj.get('$type') or type\n if not type:\n raise ValueError('Bluesky object missing $type field')\n\n # TODO: once we're on Python 3.10, switch this to a match statement!\n if type in ('app.bsky.actor.defs#profileView',\n 'app.bsky.actor.defs#profileViewBasic'):\n images = [{'url': obj.get('avatar')}]\n banner = obj.get('banner')\n if banner:\n images.append({'url': obj.get('banner'), 'objectType': 'featured'})\n\n handle = obj.get('handle')\n did = obj.get('did')\n\n ret = {\n 'objectType': 'person',\n 'id': did,\n 'url': (Bluesky.user_url(handle) if handle\n else did_web_to_url(did) if did and did.startswith('did:web:')\n else None),\n 'displayName': obj.get('displayName'),\n 'summary': obj.get('description'),\n 'image': images,\n }\n\n elif type == 'app.bsky.feed.post':\n text = obj.get('text', '')\n\n # convert facets to tags\n tags = []\n for facet in obj.get('facets', []):\n tag = {}\n\n for feat in facet.get('features', []):\n if feat.get('$type') == 'app.bsky.richtext.facet#link':\n tag.update({\n 'objectType': 'article',\n 'url': feat.get('uri'),\n })\n elif feat.get('$type') == 'app.bsky.richtext.facet#mention':\n tag.update({\n 'objectType': 'mention',\n 'url': Bluesky.user_url(feat.get('did')),\n })\n\n index = facet.get('index', {})\n # convert indices from UTF-8 encoded bytes to Unicode chars (code points)\n # https://github.com/snarfed/atproto/blob/5b0c2d7dd533711c17202cd61c0e101ef3a81971/lexicons/app/bsky/richtext/facet.json#L34\n byte_start = index.get('byteStart')\n if byte_start is not None:\n tag['startIndex'] = len(text.encode()[:byte_start].decode())\n byte_end = index.get('byteEnd')\n if byte_end is not None:\n tag['displayName'] = text.encode()[byte_start:byte_end].decode()\n tag['length'] = len(tag['displayName'])\n\n tags.append(tag)\n\n in_reply_to = obj.get('reply', {}).get('parent', {}).get('uri')\n\n ret = {\n 'objectType': 'comment' if in_reply_to else 'note',\n 'content': text,\n 'inReplyTo': [{\n 'id': in_reply_to,\n 'url': at_uri_to_web_url(in_reply_to),\n }],\n 'published': obj.get('createdAt', ''),\n 'tags': tags,\n }\n\n elif type in ('app.bsky.feed.defs#postView', 'app.bsky.embed.record#viewRecord'):\n ret = to_as1(obj.get('record') or obj.get('value'))\n author = obj.get('author') or {}\n uri = obj.get('uri')\n ret.update({\n 'id': uri,\n 'url': (at_uri_to_web_url(uri, handle=author.get('handle'))\n if uri.startswith('at://') else None),\n 'author': to_as1(author, type='app.bsky.actor.defs#profileViewBasic'),\n })\n\n # convert embeds to attachments\n for embed in util.get_list(obj, 'embeds') + util.get_list(obj, 'embed'):\n embed_type = embed.get('$type')\n\n if embed_type == 'app.bsky.embed.images#view':\n ret.setdefault('image', []).extend(to_as1(embed))\n\n elif embed_type in ('app.bsky.embed.external#view',\n 'app.bsky.embed.record#view'):\n ret.setdefault('attachments', []).append(to_as1(embed))\n\n elif embed_type == 'app.bsky.embed.recordWithMedia#view':\n ret.setdefault('attachments', []).append(to_as1(\n embed.get('record', {}).get('record')))\n media = embed.get('media')\n media_type = media.get('$type')\n if media_type == 'app.bsky.embed.external#view':\n ret.setdefault('attachments', []).append(to_as1(media))\n elif media_type == 'app.bsky.embed.images#view':\n ret.setdefault('image', []).extend(to_as1(media))\n else:\n assert False, f'Unknown embed media type: {media_type}'\n\n elif type == 'app.bsky.embed.images#view':\n ret = [{\n 'url': img.get('fullsize'),\n 'displayName': img.get('alt'),\n } for img in obj.get('images', [])]\n\n elif type == 'app.bsky.embed.external#view':\n ret = to_as1(obj.get('external'), type='app.bsky.embed.external#viewExternal')\n\n elif type == 'app.bsky.embed.external#viewExternal':\n ret = {\n 'objectType': 'link',\n 'url': obj.get('uri'),\n 'displayName': obj.get('title'),\n 'summary': obj.get('description'),\n 'image': obj.get('thumb'),\n }\n\n elif type == 'app.bsky.embed.record#view':\n record = obj.get('record')\n return to_as1(record) if record else None\n\n elif type == 'app.bsky.embed.record#viewNotFound':\n return None\n\n elif type in ('app.bsky.embed.record#viewNotFound',\n 'app.bsky.embed.record#viewBlocked'):\n return None\n\n elif type == 'app.bsky.feed.defs#feedViewPost':\n ret = to_as1(obj.get('post'), type='app.bsky.feed.defs#postView')\n reason = obj.get('reason')\n if reason and reason.get('$type') == 'app.bsky.feed.defs#reasonRepost':\n ret = {\n 'objectType': 'activity',\n 'verb': 'share',\n 'object': ret,\n 'actor': to_as1(reason.get('by'), type='app.bsky.actor.defs#profileViewBasic'),\n }\n\n elif type == 'app.bsky.graph.follow':\n ret = {\n 'objectType': 'activity',\n 'verb': 'follow',\n 'actor': {\n 'url': obj.get('subject'),\n },\n }\n\n elif type == 'app.bsky.feed.defs#threadViewPost':\n return to_as1(obj.get('post'), type='app.bsky.feed.defs#postView')\n\n elif type == 'app.bsky.feed.defs#generatorView':\n uri = obj.get('uri')\n ret = {\n 'objectType': 'service',\n 'id': uri,\n 'url': at_uri_to_web_url(uri),\n 'displayName': f'Feed: {obj.get(\"displayName\")}',\n 'summary': obj.get('description'),\n 'image': obj.get('avatar'),\n 'author': to_as1(obj.get('creator'), type='app.bsky.actor.defs#profileView'),\n }\n\n else:\n raise ValueError(f'Bluesky object has unknown $type: {type}')\n\n return util.trim_nulls(ret)",
"def _add_citation(self, object_id, citation_type):\n citations_el = self.root.find('citations')\n # We need to specify an id, so count how many we have and iterate one\n citation_id = len(citations_el.findall('citation')) + 1\n\n # One citation per figure\n citation_el = ET.SubElement(citations_el, 'citation', {\n 'id': str(citation_id)\n })\n self._add_elements(citation_el, ['object_id'], str(object_id))\n self._add_elements(citation_el, ['citation_type'], citation_type)\n return citation_id",
"def get_author(self, __data)->Author:\n author: Author = {'container_type': 'Author'}\n author['filled'] = []\n if isinstance(__data, str):\n author['scholar_id'] = __data\n author['source'] = AuthorSource.AUTHOR_PROFILE_PAGE\n else:\n author['source'] = AuthorSource.SEARCH_AUTHOR_SNIPPETS\n author['scholar_id'] = re.findall(_CITATIONAUTHRE, __data('a')[0]['href'])[0]\n\n pic = '/citations?view_op=medium_photo&user={}'.format(author['scholar_id'])\n author['url_picture'] = _HOST.format(pic)\n\n name_class = self._find_tag_class_name(__data, 'h3', 'name')\n author['name'] = __data.find('h3', class_=name_class).text\n\n aff_class = self._find_tag_class_name(__data, 'div', 'aff')\n affiliation = __data.find('div', class_=aff_class)\n if affiliation:\n author['affiliation'] = affiliation.text\n\n email_class = self._find_tag_class_name(__data, 'div', 'eml')\n email = __data.find('div', class_=email_class)\n if email:\n author['email_domain'] = re.sub(_EMAILAUTHORRE, r'@', email.text)\n\n int_class = self._find_tag_class_name(__data, 'a', 'one_int')\n if int_class:\n interests = __data.find_all('a', class_=int_class)\n author['interests'] = [i.text.strip() for i in interests]\n else:\n author['interests'] = []\n\n citedby_class = self._find_tag_class_name(__data, 'div', 'cby')\n citedby = __data.find('div', class_=citedby_class)\n if citedby and citedby.text != '':\n author['citedby'] = int(citedby.text[9:])\n\n return author",
"def bibtex_value(self) -> Dict:\n pass",
"def _prep_obj(obj, is_enc=True):\n\n if not isinstance(obj, Iterable):\n obj = [obj]\n return [o for o in obj if o is not None and hasattr(o, 'attr') and hasattr(o, 'rel') and o.is_enc == is_enc]",
"def get_citation_as_bibtex(self, newline=\"<br/>\", use_hyperlinks=True):\n bibtex = \"\"\n\n if self.pub_venue_type is self.JOURNAL or\\\n self.pub_venue_type is self.ARTICLE:\n bibtex += \"@article{\"\n else:\n bibtex += \"@inproceedings{\"\n\n\n bibtex += self.get_bibtex_id() + newline\n\n # start author block\n bibtex += \" author = {\"\n\n author_idx = 0\n num_authors = self.authors.count()\n for author in self.authors.all():\n citation_name = author.get_citation_name(full_name=True)\n bibtex += citation_name\n\n if (author_idx + 1) < num_authors:\n bibtex += \" and \"\n\n author_idx += 1\n bibtex += \"}\" + newline\n # end author block\n\n bibtex += \" title={{{}}},{}\".format(self.title, newline)\n bibtex += \" booktitle={{{}}},{}\".format(self.book_title, newline)\n bibtex += \" booktitleshort={{{}}},{}\".format(self.book_title_short, newline)\n\n if self.series:\n bibtex += \" series = {\" + self.series + \"},\"\n\n bibtex += \" year={{{}}},{}\".format(self.date.year, newline)\n\n if self.isbn:\n bibtex += \" isbn={{{}}},{}\".format(self.isbn, newline)\n\n if self.geo_location:\n bibtex += \" location={{{}}},{}\".format(self.geo_location, newline)\n\n if self.page_num_start and self.page_num_end:\n bibtex += \" pages={{{}--{}}},{}\".format(self.page_num_start, self.page_num_end, newline)\n\n if self.num_pages:\n bibtex += \" numpages={{{}}},{}\".format(self.num_pages, newline)\n\n if self.doi:\n if use_hyperlinks:\n bibtex += \" doi={{<a href='{}'>{}</a>}},{}\".format(self.doi, self.doi, newline)\n else:\n bibtex += \" doi={{{}}},{}\".format(self.doi, newline)\n\n if self.official_url:\n if use_hyperlinks:\n bibtex += \" url={{<a href='{}'>{}</a>}},{}\".format(self.official_url, self.official_url, newline)\n else:\n bibtex += \" url={{{}}},{}\".format(self.official_url, newline)\n\n if self.acmid:\n bibtex += \" acmid={{{}}},{}\".format(self.acmid, newline)\n\n if self.publisher:\n bibtex += \" publisher={{{}}},{}\".format(self.publisher, newline)\n\n bibtex += \"}\"\n return bibtex",
"def sol_attributes_from(opportunity, solicitation: Solicitation):\n solicitation.noticeData = datetime_to_string_in(opportunity)\n solicitation.noticeType = opportunity.get('notice type')\n solicitation.solNum = opportunity.get('solnbr')\n solicitation.agency = opportunity.get('agency')\n solicitation.date = opportunity.get('postedDate')\n solicitation.compliant = opportunity.get('compliant')\n solicitation.office = opportunity.get('office')\n # TODO: properly set estar category\n estar = \"yes\" if random() < .5 else \"no\"\n solicitation.category_list = {\"value\": \"yes\", \"it\": \"yes\", \"estar\": estar }\n solicitation.undetermined = False\n solicitation.title = opportunity.get('subject')\n solicitation.url = opportunity.get('url')\n solicitation.contactInfo = opportunity.get('emails')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
We set the adapted methods in the object's dict
|
def __init__(self, obj, adapted_methods):
self.obj = obj
self.__dict__.update(adapted_methods)
for key in self.__dict__:
print(key,self.__dict__[key])
|
[
"def __init__(self, obj, **adapted_methods):\n self.object = obj\n self.__dict__.update(adapted_methods)",
"def _include_redis_methods(self, redis):\n for attr in dir(redis):\n value = getattr(redis, attr)\n if attr.startswith('_') or not callable(value):\n continue\n self.__dict__[attr] = value",
"def __persistent_methods(self, obj):\n for name, method in inspect.getmembers(obj, inspect.ismethod):\n setattr(obj, name, self.__persist(method))\n if hasattr(obj.meta, 'client'):\n for name, method in inspect.getmembers(obj.meta.client, inspect.ismethod):\n setattr(obj.meta.client, name, self.__persist(method))\n return obj",
"def updateMethods(self, klass):\n self.generateMethods(klass, self.schema.fields())",
"def replace_instance_methods(self, instance):\n # Declare fit and predict methods in this way so that they\n # remain bound to the MLBlock instance's model.\n fit_method_name = self.block_json['fit']\n produce_method_name = self.block_json['produce']\n build_method = self.build_mlblock_model\n\n def fit(self, *args, **kwargs):\n # Only fit if fit method provided.\n if fit_method_name:\n getattr(self.model, fit_method_name)(*args, **kwargs)\n\n instance.fit = fit.__get__(instance, MLBlock)\n\n def produce(self, *args, **kwargs):\n # Every MLBlock needs a produce method.\n return getattr(self.model, produce_method_name)(*args, **kwargs)\n\n instance.produce = produce.__get__(instance, MLBlock)\n\n def update_model(self, fixed_hyperparams, tunable_hyperparams):\n self.model = build_method(fixed_hyperparams, tunable_hyperparams)\n\n instance.update_model = update_model.__get__(instance, MLBlock)",
"def _update_class_for_magic_builtins( self, obj, name):\r\n if not (name.startswith('__') and name.endswith('__') and len(name) > 4):\r\n return\r\n original = getattr(obj.__class__, name)\r\n def updated(self, *kargs, **kwargs):\r\n if (hasattr(self, '__dict__') and type(self.__dict__) is dict and\r\n name in self.__dict__):\r\n return self.__dict__[name](*kargs, **kwargs)\r\n else:\r\n return original(self, *kargs, **kwargs)\r\n setattr(obj.__class__, name, updated)\r\n if _get_code(updated) != _get_code(original):\r\n self._create_placeholder_mock_for_proper_teardown(\r\n obj.__class__, name, original)",
"def add_method(self, obj, met_name):\n if obj not in self:\n self[obj] = [met_name]\n else:\n self[obj].append(met_name)",
"def __init__(self):\n\n super(MethodDict, self).__init__()\n\n self.default = None",
"def proxyto(target, source, allowed_specials=[]):\n\n # set all non-hidden methods\n for k in dir(source):\n k_is_special = k.startswith(\"__\") and k.endswith(\"__\")\n copy_k = not k_is_special or k in allowed_specials\n if not hasattr(target, k) and copy_k:\n setattr(target, k, getattr(source, k))\n\n return target",
"def _hook_keras_methods(self, keras_type: type):\n\n for attr in self.to_auto_overload[keras_type]:\n # if we haven't already overloaded this function\n if f\"native_{attr}\" not in dir(keras_type):\n native_method = getattr(keras_type, attr)\n setattr(keras_type, f\"native_{attr}\", native_method)\n new_method = self._get_hooked_method(attr)\n setattr(keras_type, attr, new_method)",
"def _modify_state(self, method=None):\n for _, val in self.managed_objects.items():\n try:\n getattr(val, method)()\n except AttributeError:\n pass",
"def _insert_functions(cls, obj: CommonTypes.MLRunInterfaceableType):\n # Insert the functions / methods:\n for function_name in [*cls._METHODS, *cls._FUNCTIONS]:\n # Verify there is no function / method with the same name in the object:\n assert not hasattr(obj, function_name), (\n f\"Can't insert the function / method '{function_name}' as the object already have a function / method \"\n f\"with the same name. To replace a function / method, add the name of the function / method to the \"\n f\"'_REPLACED_METHODS' / '_REPLACED_METHODS' list and follow the instructions documented.\"\n )\n # Get the function / method:\n func = getattr(cls, function_name)\n # If the function is a method and not a function (appears in '_METHODS' and not '_FUNCTIONS'), set the\n # 'self' to the object:\n if function_name in cls._METHODS:\n func = MethodType(func, obj)\n # Insert the function / method to the object:\n setattr(obj, function_name, func)",
"def set_methods(self):\n for api_list in self.api_setting():\n api = {}\n api[\"method_name\"], api[\"path\"], api[\"http_method\"] = api_list\n\n def _method(api=api, id=\"\", **params):\n \"\"\" Check if the parameters include an ID\n This change the url construction\n \"\"\"\n if id:\n return getattr(self,\n api[\"http_method\"])(str.join('', (api[\"path\"], id)),\n params\n )\n else:\n return getattr(self,\n api[\"http_method\"])(api[\"path\"],\n params\n )\n\n setattr(self, api[\"method_name\"], _method)",
"def setAutoMethods(self):\n for m in IMultiEngine:\n IM = IMultiEngine[m]\n #first setup non-All methods\n if callable(IM) and m[-3:] != 'All'\\\n and getattr(self, m, None) is None:\n #only work on methods, not attributes, and only on methods\n #not already defined\n eSig = IEngineComplete[m].getSignatureString()\n defs = \"\"\"\ndef autoMethod(self, %s:\n '''%s'''\n log.msg('%s on %%s' %%targets)\n engines = self.engineList(targets)\n l = []\n for e in engines:\n l.append(e.%s%s)\n return gatherBoth(l)\n\"\"\"%(IM.getSignatureString()[1:], IM.getDoc(), IM.getName(), m, eSig)\n try:\n exec(defs)\n setattr(self, m, instancemethod(autoMethod, self, self.__class__))\n #del autoMethod\n except:\n log.msg(\"failed autogen method %s\" %m)\n raise\n addAllMethods(self)",
"def _weave_method(self, obj, met_name, aspect):\n weaved_methods = self.__woven_dict[obj]\n if met_name not in weaved_methods:\n self.__woven_dict[obj][met_name] = {\n 'original' : getattr(obj, met_name),\n 'aspects' : []\n }\n try:\n self._register_aspect(obj, met_name, aspect)\n except AlreadyAspectedError, excpt:\n print \"already aspected : \", excpt\n return\n wrap_method(aspect, obj, met_name)",
"def add_methods_tensor_syft() -> None:\n # third party\n import torch\n\n for method in tensor.METHODS_TO_ADD:\n if getattr(torch.Tensor, method.__name__, None) is not None:\n raise ValueError(f\"Method {method.__name__} already exists in tensor!\")\n setattr(torch.Tensor, method.__name__, method)",
"def extend(cls, api):\n if cls.EXTEND:\n for name, func in api.__dict__.iteritems():\n if name.startswith(\"_\"): continue\n setattr(cls, name, MethodType(func, None, cls))\n\n return cls.EXTEND",
"def __init__(self, methods , verbose = True ):\n self.methods = methods\n self.verbose = verbose",
"def addMethod(self, method):\r\n self.methods = self.methods + (method,)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the CryBlend properties of a materialname as dict or None if name is invalid.
|
def extract_cryblend_properties(materialname):
if is_cryblend_material(materialname):
groups = re.findall("(.+)__([0-9]+)__(.*)__(phys[A-Za-z0-9]+)", materialname)
properties = {}
properties["ExportNode"] = groups[0][0]
properties["Number"] = int(groups[0][1])
properties["Name"] = groups[0][2]
properties["Physics"] = groups[0][3]
return properties
return None
|
[
"def material_name(self):\n try:\n self._material_name = self._layer.GetMaterial()\n except:\n pass\n return self._material_name",
"def export_material_property(self, name='', flags=0x0001,\n ambient=(1.0, 1.0, 1.0), diffuse=(1.0, 1.0, 1.0),\n specular=(0.0, 0.0, 0.0), emissive=(0.0, 0.0, 0.0),\n gloss=10.0, alpha=1.0, emitmulti=1.0):\n\n # create block (but don't register it yet in self.blocks)\n matprop = NifFormat.NiMaterialProperty()\n\n # list which determines whether the material name is relevant or not\n # only for particular names this holds, such as EnvMap2\n # by default, the material name does not affect rendering\n specialnames = (\"EnvMap2\", \"EnvMap\", \"skin\", \"Hair\",\n \"dynalpha\", \"HideSecret\", \"Lava\")\n\n # hack to preserve EnvMap2, skinm, ... named blocks (even if they got\n # renamed to EnvMap2.xxx or skin.xxx on import)\n if self.properties.game in ('OBLIVION', 'FALLOUT_3'):\n for specialname in specialnames:\n if (name.lower() == specialname.lower()\n or name.lower().startswith(specialname.lower() + \".\")):\n if name != specialname:\n self.warning(\"Renaming material '%s' to '%s'\"\n % (name, specialname))\n name = specialname\n\n # clear noname materials\n if name.lower().startswith(\"noname\"):\n self.warning(\"Renaming material '%s' to ''\" % name)\n name = \"\"\n\n matprop.name = name\n matprop.flags = flags\n matprop.ambient_color.r = ambient[0]\n matprop.ambient_color.g = ambient[1]\n matprop.ambient_color.b = ambient[2]\n matprop.diffuse_color.r = diffuse[0]\n matprop.diffuse_color.g = diffuse[1]\n matprop.diffuse_color.b = diffuse[2]\n matprop.specular_color.r = specular[0]\n matprop.specular_color.g = specular[1]\n matprop.specular_color.b = specular[2]\n matprop.emissive_color.r = emissive[0]\n matprop.emissive_color.g = emissive[1]\n matprop.emissive_color.b = emissive[2]\n matprop.glossiness = gloss\n matprop.alpha = alpha\n matprop.emit_multi = emitmulti\n\n # search for duplicate\n # (ignore the name string as sometimes import needs to create different\n # materials even when NiMaterialProperty is the same)\n for block in self.blocks:\n if not isinstance(block, NifFormat.NiMaterialProperty):\n continue\n\n # when optimization is enabled, ignore material name\n if self.EXPORT_OPTIMIZE_MATERIALS:\n ignore_strings = not(block.name in specialnames)\n else:\n ignore_strings = False\n\n # check hash\n first_index = 1 if ignore_strings else 0\n if (block.get_hash()[first_index:] ==\n matprop.get_hash()[first_index:]):\n self.warning(\n \"Merging materials '%s' and '%s'\"\n \" (they are identical in nif)\"\n % (matprop.name, block.name))\n return block\n\n # no material property with given settings found, so use and register\n # the new one\n return self.register_block(matprop)",
"def get_material_by_name(name):\n\n material = Material.query.filter(Material.name == name).one()\n\n return material",
"def filling_material_name(self):\n if self._layer_type == 0 or self._layer_type == 2:\n try:\n self._filling_material_name = self._layer.GetFillMaterial()\n except:\n pass\n return self._filling_material_name\n return \"\"",
"def getNamedMaterial(self, shaderNode):\n \n return '\\tNamedMaterial \"' + shaderNode.name() + '\"' #theMaterial.name()",
"def getMaskPlaneColor(name):\n\n if _maskPlaneColors.has_key(name):\n return _maskPlaneColors[name]\n else:\n return None",
"def makeMaterial(name, diffuse, specular, alpha):\n mat = bpy.data.materials.new(name)\n mat.diffuse_color = diffuse\n mat.diffuse_shader = 'LAMBERT'\n mat.diffuse_intensity = 1.0\n mat.specular_color = specular\n mat.specular_shader = 'COOKTORR'\n mat.specular_intensity = 0.5\n mat.alpha = alpha\n mat.use_transparency = True\n mat.ambient = 1\n return mat",
"def _convert_to_name_props(font):\n # `name` table should only store x.y version numbers\n # while font.revision could be any string\n try:\n version_number = to_number(font.revision)\n extra = ''\n except ValueError:\n version_number = 0.0\n extra = f'; {font.revision}'\n props = dict(\n # 0\n copyright=font.copyright,\n # 1\n familyName=font.family,\n # 2\n styleName=font.subfamily,\n # 3\n uniqueFontIdentifier=font.font_id or to_postscript_name(font.name),\n # 4\n fullName=font.name,\n # 5\n # must start with 'Version x.y'\n # but may contain additional info after `;`\n version=f'Version {version_number:1.1f}{extra}',\n # 6\n psName=to_postscript_name(font.name),\n # trademark (nameID 7)\n # 8\n manufacturer=font.foundry,\n # 9\n designer=font.author,\n # 10\n # description=font.description,\n # vendorURL (nameID 11)\n # designerURL (nameID 12)\n # 13\n licenseDescription=font.notice,\n # licenseInfoURL (nameID 14)\n # typographicFamily (nameID 16)\n # typographicSubfamily (nameID 17)\n # compatibleFullName (nameID 18)\n # sampleText (nameID 19)\n # postScriptCIDFindfontName (nameID 20)\n # wwsFamilyName (nameID 21)\n # wwsSubfamilyName (nameID 22)\n # lightBackgroundPalette (nameID 23)\n # darkBackgroundPalette (nameID 24)\n # variationsPostScriptNamePrefix (nameID 25)\n )\n return props",
"def makeMaterial(name, diffuse, specular, alpha,transpar=False):\n mat = bpy.data.materials.new(name)\n mat.diffuse_color = diffuse\n mat.diffuse_shader = 'LAMBERT' \n mat.diffuse_intensity = 1.0 \n mat.specular_color = specular\n mat.specular_shader = 'PHONG'\n mat.specular_intensity = 0.5\n mat.alpha = alpha\n mat.ambient = 1\n if transpar:\n mat.use_transparency = True\n return mat",
"def getColor(name):\n api_r_ = c_int()\n api_g_ = c_int()\n api_b_ = c_int()\n api_a_ = c_int()\n ierr = c_int()\n lib.gmshOptionGetColor(\n c_char_p(name.encode()),\n byref(api_r_),\n byref(api_g_),\n byref(api_b_),\n byref(api_a_),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshOptionGetColor returned non-zero error code: \",\n ierr.value)\n return (\n api_r_.value,\n api_g_.value,\n api_b_.value,\n api_a_.value)",
"def get_attrs(self, name=''):\n effects = self.get_compound_fx(name)\n fx_attrs = set()\n if self.exists(name) and 'attr' in self.map[name]:\n fx_attrs.add(self.map[name]['attr'])\n def simplify_transform_attr(attr_name):\n if 'location' in attr_name:\n return 'location'\n elif 'rotation' in attr_name:\n return 'rotation'\n elif 'scale' in attr_name:\n return 'scale'\n else:\n return ''\n if effects:\n for effect in effects:\n fx_attrs.add(simplify_transform_attr(effect['attr']))\n return list(fx_attrs)",
"def _get_materialProperties(self) -> \"adsk::core::Ptr< adsk::core::Properties >\" :\n return _core.Material__get_materialProperties(self)",
"def get_named_color(name: str) -> Optional[LinearColor]:\n blueprint = get_editor_blueprint()\n if blueprint:\n config = blueprint.get_config()\n color_config = config.get(\"colors\", {})\n hex_color = color_config.get(name)\n if hex_color:\n return LinearColor.from_hex(hex_color)",
"def get_materials_from_csv_file(file_name: str,\n throw_exception: bool = False) -> dict:\n\n materials_dictionary = {}\n try:\n with open(file_name, \"r\") as materials_file:\n csv_reader = csv.reader(materials_file)\n next(csv_reader, None) # skip first line (headers)\n for row in csv_reader:\n if row:\n name = row[0]\n material_properties = {}\n\n material_properties[\"rho\"] = float(row[2])\n material_properties[\"cp\"] = float(row[3])\n material_properties[\"lmbd\"] = float(row[4])\n\n materials_dictionary[name] = material_properties\n except FileNotFoundError:\n print(\"File not found - {}\".format(file_name))\n # Raising exception only when the data is really crucial\n # and we cannot afford to miss them\n if throw_exception:\n raise\n\n return materials_dictionary",
"def get_colormap(name=\"normal\"):\n name = __process_name(name)\n assert name in list_colorsets(), \"name should exist in \" + str(list_colorsets())\n\n return distinctipy.get_colormap(colors[name], name=\"distinctipy_\" + name)",
"def get_material_names (self, obj):\n index = 0\n mats = []\n for mat in obj.data.materials:\n mats.append (\"Material_%d\" % (index))\n return mats",
"def get_blend_info(self, blend):\n self.logger.debug(\"get_blend_info function was called\")\n\n blend_info = {} \n query=\"\"\"\n SELECT * FROM blends_metadata WHERE blend='{0}'\n \"\"\".format(blend)\n\n rows_generator = self.__execute_query(query)\n\n #get the blend info from the cursor\n info = rows_generator.next()\n\n if not info:\n self.logger.error(\"Blend: {0} not found, aborting\".format(blend))\n sys.exit(-1)\n\n #column name: 0 index of each desc list element\n desc = self.__get_cursor_description()\n\n for i, column in enumerate(desc):\n blend_info[column[0]] = info[i]\n\n return blend_info",
"def _nativeMaterial( self ):\r\n\t\treturn self._nativePointer.material",
"def get_blend_target_attribute(mesh_name=\"\", blend_node=\"\", target_name=\"\"):\n if not blend_node:\n blend_node = get_connected_blendshape_nodes(mesh_name)[0]\n return filter(lambda x: x[0].startswith(target_name), get_blendshape_targets(blend_node))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Simple SSE loss over the generated image and the content image
|
def content_loss(noise: torch.Tensor, image: torch.Tensor):
return 1/2. * torch.sum(torch.pow(noise - image, 2))
|
[
"def get_loss(generation_img):\n generation_img = K.reshape(generation_img, [1, 300, 400, 3])\n return fn([generation_img])[0].astype('float64')",
"def mse_amm_img(args):\n x_, x_fake_, attn_map = args\n return K.mean(K.square((x_ - x_fake_) * attn_map), axis=[1,2,3], keepdims=False)",
"def _compute_loss(self, model_output: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n pass",
"def total_variation_loss(x):\n a = K.square(x[:, :img_height - 1, :img_width - 1, :] - x[:, 1:, :img_width-1, :])\n b = K.square(x[:, :img_height - 1, :img_width - 1, :] - x[:, :img_height-1, 1:, :])\n return K.sum(K.pow(a + b, 1.25))",
"def _compute_loss(self, _block_id):\n raise NotImplementedError()",
"def hd_loss(seg_soft, gt, seg_dtm, gt_dtm):\n\n delta_s = (seg_soft[:,1,...] - gt.float()) ** 2\n s_dtm = seg_dtm[:,1,...] ** 2\n g_dtm = gt_dtm[:,1,...] ** 2\n dtm = s_dtm + g_dtm\n multipled = torch.einsum('bxy, bxy->bxy', delta_s, dtm)\n hd_loss = multipled.mean()\n\n return hd_loss",
"def construct_content_loss_fns(vgg_model, content_image, content_layers):\n \n trained = vgg_model(content_image, content_layers)\n n = [512]\n result = []\n for i in range(0,1):\n result.append(ContentLoss(trained[i].detach(), 1))\n return result",
"def mse(img1, img2): \n # TODO: implement this function.\n return np.mean(np.square(img1-img2))",
"def mse(img1, img2): \n # TODO: implement this function.\n err=np.square(np.subtract(img1,img2)).mean()\n return err",
"def compute_SSE(self):\n return np.linalg.norm(self.y - self.X.dot(self.beta), ord=2)**2",
"def mse_loss(\n input_: torch.FloatTensor,\n target: torch.FloatTensor,\n dispersion: torch.FloatTensor,\n) -> torch.FloatTensor:\n return torch.nn.functional.mse_loss(input_, target)",
"def restore_image_noise2self(model, x, bg_masks=None, kernel_size=3, bg_mat=None, is_train=False):\n y_pred = 0\n if bg_masks is None:\n size = x.shape[2:]\n if isinstance(kernel_size, int):\n kernel_size = [kernel_size]*len(size)\n masks = get_mask(size, kernel_size, start=None)\n bg_masks = [1-mask for mask in masks]\n with torch.set_grad_enabled(is_train):\n for bg_mask in bg_masks:\n input = x*bg_mask\n if bg_mat is not None:\n input += bg_mat * (1 - bg_mask)\n y_pred = y_pred + model(input) * (1-bg_mask)\n return y_pred",
"def loss_cal(self, S):\n N = self.opt.speaker_num\n M = self.opt.utter_num \n loss_type = self.opt.loss_type\n S_correct = torch.cat([S[i*M:(i+1)*M, i:(i+1)] for i in range(N)], dim=0) # colored entries in Fig.1\n \n if loss_type == \"ge2e_softmax\" or loss_type == \"seq_softmax\" or loss_type == \"union_softmax\":\n total = -torch.sum(S_correct-torch.log(torch.sum(torch.exp(S), dim=1, keepdim=True) + 1e-6))\n elif loss_type == \"ge2e_cosine_margin\" or loss_type == \"seq_cosine_margin\" or loss_type == \"union_cosine_margin\":\n S_correct_scale = torch.cat([S[i*M:(i+1)*M, i:(i+1)] * self.margin_s - self.margin_m for i in range(N)], dim=0) # colored entries in Fig.1\n S_all_scale = torch.cat([torch.cat([S[i*M:(i+1)*M, i:(i+1)] * self.margin_s - self.margin_m if i==j\n else S[i*M:(i+1)*M, j:(j+1)] * self.margin_s for j in range(N)], dim=1)\n for i in range(N)], dim=0)\n total = -torch.sum(S_correct_scale-torch.log(torch.sum(torch.exp(S_all_scale), dim=1, keepdim=True) + 1e-6))\n elif loss_type == \"ge2e_contrast\" or loss_type == \"seq_contrast\" or loss_type == \"union_contrast\":\n S_sig = torch.sigmoid(S)\n S_sig = torch.cat([torch.cat([0*S_sig[i*M:(i+1)*M, j:(j+1)] if i==j\n else S_sig[i*M:(i+1)*M, j:(j+1)] for j in range(N)], dim=1)\n for i in range(N)], dim=0)\n total = torch.sum(1-torch.sigmoid(S_correct)+torch.max(S_sig, dim=1, keepdim=True)[0])\n else:\n raise AssertionError(\"loss type should not be {} !\".format(loss_type))\n ##total = total / (N * M * N)\n return total",
"def reconstruct_loss_with_mse(assignment, labels, hard_assignment=None):\n reconstracted_labels = reconstruction(assignment, labels, hard_assignment)\n return torch.nn.functional.mse_loss(reconstracted_labels, labels)\n # return torch.nn.functional.l1_loss(reconstracted_labels, labels)",
"def style_reconstruction_loss(base, output, img_nrows, img_ncols):\n\tH, W, C = img_nrows, img_ncols, 3\n\tgram_base = gram_matrix(base)\n\tgram_output = gram_matrix(output)\n\tfactor = 1.0 / float((2*C*H*W)**2)\n\tout = factor * K.sum(K.square(gram_output - gram_base))\n\treturn out\n\t#return K.sum(K.square(gram_base - gram_output)) \n\tbase_mean = K.mean(gram_base)\n\toutput_mean = K.mean(gram_output)\n\tbase_sigma = K.sqrt(K.sum(K.square(gram_base)-K.square(base_mean)))\n\tprint(\"===========sigma\")\n\tprint(K.ndim(base_sigma))\n\tprint(\"===========sigma\")\n\toutput_sigma = K.sqrt(K.sum(K.square(gram_output)-K.square(output_mean)))\n\tmul = ((gram_base/255)*(gram_output/255)) *255*255\n\tcov = K.sum( mul - base_mean*output_mean)\n\t###### ziyu\n\tC1 = K.variable(0)\n\tC2 = K.variable(100)\n\tC3 = K.variable(1)\n\txm = base_mean\n\tym = output_mean\n\txs = base_sigma\n\tys = output_sigma\n\tlight = ( 2*xm*ym + C1 ) / ( K.square(xm) + K.square(ym) + C1)\n\tcontrast = ( 2*xs*ys + C2 ) / ( K.square(xs) + K.square(ys) + C2)\n\tstructure = ( cov + C3 ) / ( xs*ys + C3 )\n\n\tSSIM = (light**(1)) * (contrast**(1)) * (structure**(1))\n\tDSSIM = (1/SSIM)-1\n\n\t#out = factor * K.sum(K.square(gram_output - gram_base))",
"def loss(self, y_true: ndarray, y_pred: ndarray):\n \"\"\"\n ###########################\n Write here the PEGASOS loss.\n ###########################\n \"\"\"\n\n err = [np.max([0, 1-y_true[i]*y_pred[i]]) for i in range(0, y_true.size)]\n return np.sum(err) / y_true.size\n # return np.random.normal(loc=100.0, scale=5.0, size=(1,))[0]",
"def test_loss_opposite_images(self, size):\n tensor_shape = np.random.randint(size, 6, size=3).tolist()\n image = np.random.uniform(0.0, 1.0, size=tensor_shape + [1])\n\n laplacian, _ = matting.build_matrices(image, size=size)\n loss = matting.loss(1.0 - image, laplacian)\n\n self.assertAllClose(loss, 0.0, atol=1e-4)",
"def loss_perceptual(self, vgg_out, vgg_gt):\r\n loss = 0\r\n for o, g in zip(vgg_out, vgg_gt):\r\n loss += self.l1(o, g)\r\n return loss",
"def mabse_amm_img(args):\n x_, x_fake_, attn_map = args\n return K.mean(K.abs((x_ - x_fake_) * attn_map), axis=[1,2,3], keepdims=False)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Handles app logic for a user leaving a room. Must be passed either a Room and Account object, or a RoomList object. Examples leave_room(room=room_obj, account=account_obj) leave_room(session=roomlist_obj)
|
def leave_room(room=None, account=None, session=None):
if room is not None and account is not None:
session = RoomList.all().filter('room =', room).filter('account =', account).get()
elif session is not None:
room = session.room
account = session.account
else:
raise TypeError("must specify either room and account, or session")
# clean up the session record
session.delete()
# send a message to the room about the part
timestamp = datetime.datetime.now()
message = Message( sender = account, room = room, timestamp = timestamp, type = 'part' )
message.put()
|
[
"def on_leave(data):\n username = session[\"login\"][0]\n room = find_room(data[\"bookgroup_id\"], data.get(\"chapter_number\"))\n leave_room(room)\n\n emit('leave_status', {'msg': username + \" has left room \" + str(room)}, room=room)",
"async def leave_room(self, chat_rooms: Union[List[str], str]):\n if isinstance(chat_rooms, str):\n chat_rooms = [chat_rooms]\n room_str = ','.join([f'#{c}'.lower() if c[0] != '#' else c.lower() for c in chat_rooms])\n target = [c[1:].lower() if c[0] == '#' else c.lower() for c in chat_rooms]\n for r in target:\n self._room_leave_locks.append(r)\n await self._send_message(f'PART {room_str}')\n for x in target:\n if x in self._join_target:\n self._join_target.remove(x)\n # wait to leave all rooms\n while any([r in self._room_leave_locks for r in target]):\n await asyncio.sleep(0.01)",
"async def leave(self, ctx):\n if not await self.check_pm(ctx.message):\n return\n if ctx.author not in self.players:\n await ctx.send(embed=self.make_embed(\"Avalon\", 0xff4055, \"Error\", \"You are not in a game..\"))\n return\n if self.game_status == 0:\n await ctx.send(embed=self.make_embed(\"Avalon\", 0xff4055, \"Error\", \"No game in progress.\"))\n return\n elif self.game_status == 2:\n await ctx.send(embed=self.make_embed(\"Avalon\", 0xff4055, \"Error\", \"You can not leave an in progress game.\"))\n return\n self.players.remove(ctx.author)\n await ctx.send(embed=self.make_embed(\"Avalon\", 0x77dd77, \"Avalon\", \"You have successfully left the game.\"))",
"def __del__(self):\n for user in self.activeUsers:\n user.leaveRoom(self)\n\n logger.info(\n \"Successfully deleted room %s\" % self.roomName\n )",
"def IgmpMldLeave(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {\"Arg1\": self}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"igmpMldLeave\", payload=payload, response_object=None)",
"def leavegamepost():\n\n\tpostdata = request.post_vars\n\tremovePlayerCheck(postdata['player_id'], auth.user)\n\treturn",
"def leaveGame(self):\n\n\t\tself.__serverHandler.leaveGame()",
"def handle_leave(_, event, destination):\n LINE.log_event(\"Bot left a group.\", event=event, dest=destination)\n ChannelManager.deregister(Platform.LINE, LineApiUtils.get_channel_id(event))",
"def delete_room(request):\n\tif request.method != 'POST' or not is_fields_in_dict(request.POST, 'room_pk'):\n\t\traise Http404\n\n\tuser = request.user\n\troom = get_object_or_404(Room, pk=request.POST['room_pk'])\n\n\tif room.owner == user:\n\t\tresponse_text = convert_room_info_to_dict(room)\n\t\tresponse_text['type'] = 'delete'\n\t\tpusher.trigger(make_room_channel_name(str(room.pk)), 'room_modified', response_text)\n\t\troom.delete()\n\t\tresponse_text = {'response': 'deleted room from database'}\n\telse:\n\t\tdelete_user_room_connection(user, room)\n\t\tuser_dict = convert_user_to_dict(user)\n\t\tchannel_name = make_room_channel_name(str(room.pk))\n\t\tpusher.trigger(channel_name, 'user_deleted', user_dict)\n\t\tresponse_text = {'response': 'deleted from visible_rooms'}\n\n\treturn HttpResponse(json.dumps(response_text), content_type='application/json')",
"async def ask_leave(self, ctx):\r\n await ctx.send(\"If you would like to leave the game, type the command .leave\")",
"def revoke_room_access():\n\tschema = {\n\t\t\"room_text_id\": {\"type\": \"string\"},\n\t\t\"email\": {\"type\": \"string\"}\n\t}\n\n\temail = request.json.get(\"email\")\n\troom_text_id = request.json.get(\"room_text_id\")\n\n\t# Checks if the request is a json\n\tif not request.is_json:\n\t\treturn bad_request(\"Missing JSON in request\")\n\n\t# Checks if any of the input is illegal\n\tif not validator(request.json, schema):\n\t\treturn bad_request(validator.errors)\n\n\t# Checks if the reader exists in the database\n\treader = Reader.query.filter_by(email=email).first()\n\tif not reader:\n\t\treturn bad_request(\"Reader does not exist!\")\n\n\thas_access = db.session.query(Room, CardReader, HasAccessTo).filter(\n\t\tRoom.text_id == room_text_id,\n\t\tor_(CardReader.room_b_id == Room.id, CardReader.room_a_id == Room.id),\n\t\tHasAccessTo.card_reader_id == CardReader.id,\n\t\tHasAccessTo.reader_id == reader.id\n\t).all()\n\n\tif not has_access:\n\t\treturn bad_request(\"The reader does not have access to this room\")\n\n\tfor a in has_access:\n\t\tcr_id = a.CardReader.id\n\t\t# Delete access\n\t\tHasAccessTo.query.filter_by(card_reader_id=cr_id, reader_id=reader.id).delete()\n\n\tdb.session.commit()\n\treturn ok(\"Access to {0} has been removed for {1}\".format(room_text_id, email))",
"def delete_control_room(event, context):\n \n site = event['pathParameters']['site']\n\n # Get room details from dynamodb\n room = Room.from_dynamodb(site)\n if room is not None:\n room.delete_room()\n return http_response(HTTPStatus.OK, 'room has been deleted')\n\n else:\n return http_response(HTTPStatus.OK, 'no such room found')",
"def leave_game():\n if not current_player:\n abort(400)\n current_player.game = None\n db.session.commit()\n return player_state()",
"def at_post_object_leave(self, obj):\n # Try removing the object from the coordinates system\n if loc := self.db.itemcoordinates.pop(obj, None):\n # The object was removed successfully\n # Make sure there was a room at that location\n if room := self.db.rooms.get(loc):\n # If so, try to clean up the room\n self._destroy_room(room)",
"def leave_schedule(request, schedule_pk):\n\trequested_schedule = ReadingSchedule.objects.get(pk = schedule_pk)\n\trequested_schedule.signed_up.remove(request.user)\n\t\n\treturn redirect(\"/schedule/\")",
"async def leave(self, data):\n ad = await self.config()\n smb_ha_mode = await self.middleware.call('smb.get_smb_ha_mode')\n\n ad['dstype'] = DSType.DS_TYPE_ACTIVEDIRECTORY.value\n ad['bindname'] = data.get(\"username\", \"\")\n ad['bindpw'] = data.get(\"password\", \"\")\n\n await self.middleware.call('kerberos.do_kinit', ad)\n\n netads = await run([SMBCmd.NET.value, '-U', data['username'], '-k', 'ads', 'leave'], check=False)\n if netads.returncode != 0:\n self.logger.warning(\"Failed to leave domain: %s\", netads.stderr.decode())\n\n if smb_ha_mode != 'LEGACY':\n krb_princ = await self.middleware.call(\n 'kerberos.keytab.query',\n [('name', '=', 'AD_MACHINE_ACCOUNT')]\n )\n if krb_princ:\n await self.middleware.call('kerberos.keytab.delete', krb_princ[0]['id'])\n\n await self.middleware.call('datastore.delete', 'directoryservice.kerberosrealm', ad['kerberos_realm'])\n await self.middleware.call('activedirectory.stop')\n if smb_ha_mode == 'LEGACY' and (await self.middleware.call('failover.status')) == 'MASTER':\n try:\n await self.middleware.call('failover.call_remote', 'activedirectory.leave', [data])\n except Exception:\n self.logger.warning(\"Failed to leave AD domain on passive storage controller.\", exc_info=True)\n\n self.logger.debug(\"Successfully left domain: %s\", ad['domainname'])",
"def leave_group(group_id_input):\n user_id = session['login'][1]\n user_usergroup = UserGroup.query.filter_by(user_id = user_id, group_id=group_id_input).one()\n db.session.delete(user_usergroup)\n db.session.commit()\n return redirect('/explore')",
"def _leave_cb(self):\n self.shared_activity.emit(\"joined\", False, \"left activity\")",
"def del_room(self, room_name):\n if room_name == \"Game Hall\":\n return\n del self.rooms[room_name]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create curriculum integration object.
|
def create_integration(self, topic, number, lessons=None, curriculum_areas=None):
integration = CurriculumIntegration(
topic=topic,
slug="integration-{}".format(number),
name="Integration {}".format(number),
number=number,
content="<p>Content for integration {}.</p>".format(number),
)
integration.save()
if lessons:
for lesson in lessons:
integration.prerequisite_lessons.add(lesson)
if curriculum_areas:
for curriculum_area in curriculum_areas:
integration.curriculum_areas.add(curriculum_area)
return integration
|
[
"def new_curriculum(req):\r\n\treturn direct_to_template(req, 'lesson/curriculum.html')",
"def create_integrator(self, model, inputs, t_eval=None, use_event_switch=False):\n pybamm.logger.debug(\"Creating CasADi integrator\")\n\n # Use grid if t_eval is given\n use_grid = t_eval is not None\n if use_grid is True:\n t_eval_shifted = t_eval - t_eval[0]\n t_eval_shifted_rounded = np.round(t_eval_shifted, decimals=12).tobytes()\n # Only set up problem once\n if model in self.integrators:\n # If we're not using the grid, we don't need to change the integrator\n if use_grid is False:\n return self.integrators[model][\"no grid\"]\n # Otherwise, create new integrator with an updated grid\n # We don't need to update the grid if reusing the same t_eval\n # (up to a shift by a constant)\n else:\n if t_eval_shifted_rounded in self.integrators[model]:\n return self.integrators[model][t_eval_shifted_rounded]\n else:\n method, problem, options, time_args = self.integrator_specs[model]\n time_args = [t_eval_shifted[0], t_eval_shifted[1:]]\n integrator = casadi.integrator(\n \"F\", method, problem, *time_args, options\n )\n self.integrators[model][t_eval_shifted_rounded] = integrator\n return integrator\n else:\n rhs = model.casadi_rhs\n algebraic = model.casadi_algebraic\n\n options = {\n \"show_eval_warnings\": False,\n **self.extra_options_setup,\n \"reltol\": self.rtol,\n \"abstol\": self.atol,\n }\n\n # set up and solve\n t = casadi.MX.sym(\"t\")\n p = casadi.MX.sym(\"p\", inputs.shape[0])\n y0 = model.y0\n\n y_diff = casadi.MX.sym(\"y_diff\", rhs(0, y0, p).shape[0])\n y_alg = casadi.MX.sym(\"y_alg\", algebraic(0, y0, p).shape[0])\n y_full = casadi.vertcat(y_diff, y_alg)\n\n if use_grid is False:\n time_args = []\n # rescale time\n t_min = casadi.MX.sym(\"t_min\")\n t_max = casadi.MX.sym(\"t_max\")\n t_max_minus_t_min = t_max - t_min\n t_scaled = t_min + (t_max - t_min) * t\n # add time limits as inputs\n p_with_tlims = casadi.vertcat(p, t_min, t_max)\n else:\n time_args = [t_eval_shifted[0], t_eval_shifted[1:]]\n # rescale time\n t_min = casadi.MX.sym(\"t_min\")\n # Set dummy parameters for consistency with rescaled time\n t_max_minus_t_min = 1\n t_scaled = t_min + t\n p_with_tlims = casadi.vertcat(p, t_min)\n\n # define the event switch as the point when an event is crossed\n # we don't do this for ODE models\n # see #1082\n event_switch = 1\n if use_event_switch is True and not algebraic(0, y0, p).is_empty():\n for event in model.casadi_switch_events:\n event_switch *= event(t_scaled, y_full, p)\n\n problem = {\n \"t\": t,\n \"x\": y_diff,\n # rescale rhs by (t_max - t_min)\n \"ode\": (t_max_minus_t_min) * rhs(t_scaled, y_full, p) * event_switch,\n \"p\": p_with_tlims,\n }\n if algebraic(0, y0, p).is_empty():\n method = \"cvodes\"\n else:\n method = \"idas\"\n problem.update(\n {\n \"z\": y_alg,\n \"alg\": algebraic(t_scaled, y_full, p),\n }\n )\n integrator = casadi.integrator(\"F\", method, problem, *time_args, options)\n self.integrator_specs[model] = method, problem, options, time_args\n if use_grid is False:\n self.integrators[model] = {\"no grid\": integrator}\n else:\n self.integrators[model] = {t_eval_shifted_rounded: integrator}\n\n return integrator",
"def __init__(self):\n this = _coin.new_SoPendulum()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def create_tst_object() -> NJTransitAPI:\n njt = NJTransitAPI()\n njt.username = config.USERNAME\n njt.apikey = config.APIKEY\n return njt",
"def create_curriculum_area(self, number, parent=None):\n area = CurriculumArea(\n slug=\"area-{}\".format(number),\n name=\"Area {}\".format(number),\n colour=\"colour-{}\".format(number),\n number=number,\n parent=parent,\n languages=[\"en\"],\n )\n area.save()\n return area",
"def TrackerCSRT_create(parameters=...) -> retval:\n ...",
"def create_incorporation_object(inc: str):\n company_type, ID = get_company_type_and_id(inc)\n date = get_date(inc)\n address = get_address(inc)\n number = get_number(inc)\n incorp_object = Incorporation(ID,company_type,date,address,number)\n return(incorp_object)",
"def create_component() -> NewSimulationComponent:\n\n # Read the parameters for the component from the environment variables.\n # In this example the parameters are made to correspond to the example\n # parameters used in the NewSimulationComponent constructor\n # They should be changed to fit the actual component.\n environment_variables = load_environmental_variables(\n (COMPONENT_PARAMETER_1, int, 10), # required integer with the default value of 10\n (COMPONENT_PARAMETER_2, str, \"test\"), # required string with the default value of \"test\"\n (COMPONENT_PARAMETER_3, str) # optional string with the default value of None\n )\n\n # The cast function here is only used to help Python linters like pyright to recognize the proper type.\n # They are not necessary and can be omitted.\n parameter1 = cast(int, environment_variables[COMPONENT_PARAMETER_1])\n parameter2 = cast(str, environment_variables[COMPONENT_PARAMETER_2])\n parameter3 = environment_variables[COMPONENT_PARAMETER_3]\n if parameter3 is not None:\n parameter3 = cast(str, parameter3)\n\n # Create and return a new NewSimulationComponent object using the values from the environment variables\n return NewSimulationComponent(\n parameter1=parameter1,\n parameter2=parameter2,\n parameter3=parameter3\n )",
"def create_integration_role(input):\n assert isinstance(input, IntegrationInstall)\n if input.integration_arn is not None:\n role = _get_role(input.session, input.integration_arn)\n if role:\n success(\n \"Found existing AWS IAM role '%s', using it with the New Relic Lambda \"\n \"integration\" % input.integration_arn\n )\n return role\n failure(\n \"Could not find AWS IAM role '%s', please verify it exists and run this \"\n \"command again\" % input.integration_arn\n )\n return\n\n role_name = \"NewRelicLambdaIntegrationRole_%s\" % input.nr_account_id\n stack_name = \"NewRelicLambdaIntegrationRole-%s\" % input.nr_account_id\n role = _get_role(input.session, role_name)\n if role:\n success(\"New Relic AWS Lambda integration role '%s' already exists\" % role_name)\n return role\n stack_status = _get_cf_stack_status(input.session, stack_name)\n if stack_status is None:\n _create_role(input)\n role = _get_role(input.session, role_name)\n success(\"Created role [%s] in AWS account.\" % role_name)\n return role\n failure(\n \"Cannot create CloudFormation stack %s because it exists in state %s\"\n % (stack_name, stack_status)\n )",
"def curriculum_learning_initialize(self, curriculum_config):\n # Save config.\n self.curriculum_config = curriculum_config",
"def create_integrator(self):\n cparams = self.params[('forward_model', {}, 'settings for the forward model')]\n advectionMap = FM.AdvectMap(self.sz, self.spacing, compute_inverse_map=self.compute_inverse_map)\n return ODE.ODEWrapBlock(advectionMap, cparams, self.use_odeint, self.use_ode_tuple, self.tFrom, self.tTo)",
"def New(*args, **kargs):\n obj = itkStochasticFractalDimensionImageFilterID3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def create_integrator(self):\n cparams = self.params[('forward_model', {}, 'settings for the forward model')]\n epdiffScalarMomentumMap = FM.EPDiffScalarMomentumMap(self.sz, self.spacing, self.smoother, cparams,\n compute_inverse_map=self.compute_inverse_map)\n return ODE.ODEWrapBlock(epdiffScalarMomentumMap, cparams, self.use_odeint, self.use_ode_tuple, self.tFrom, self.tTo)",
"def New(*args, **kargs):\n obj = itkStochasticFractalDimensionImageFilterIUS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def Add_Integration(apig,api_name: str,resource_id: str,http_method: str,lambda_arn: str,integration_type='AWS',enable_CORs=True):\n\n\t\t\t\t#get the id of the api by name\n\t\t\t\tapi_id = AWS.APIGateway.GetId(apig,api_name)\n\n\t\t\t\t#get the version to use for the method integration\n\t\t\t\t#version = apig.client.meta.service_model.api_version\n\t\t\t\tversion = '2015-03-31' #latest 2015-07-09 failed to properly invoke lambda\n\n\t\t\t\t#remove the latest alias funciton tag\n\t\t\t\t#TODO: why? - didnt work otherwise\n\t\t\t\tlambda_arn = lambda_arn.replace(':$LATEST','')\n\t\t\t\t\n\t\t\t\t#build the lambda uri\n\t\t\t\turi = 'arn:aws:apigateway:' + apig.region + ':lambda:path/' + version + '/functions/' + lambda_arn + '/invocations'\n\t\t\t\t#uri arn:aws:apigateway:$REGION:lambda:path/2015-03-31/functions/arn:aws:lambda:$REGION:$ACCOUNT:function:LambdaFunctionOverHttps/invocations\n\n\t\t\t\tif enable_CORs:\n\t\t\t\t\t\n\t\t\t\t\t#add integration\n\t\t\t\t\tadd_response = apig.client.put_integration(\n\t\t\t\t\t\trestApiId=api_id,\n\t\t\t\t\t\tresourceId=resource_id,\n\t\t\t\t\t\thttpMethod=http_method,\n\t\t\t\t\t\tintegrationHttpMethod='POST',#http_method, #must change to POST as this is how lambda functions are invoked?\n\t\t\t\t\t\turi=uri,\n\t\t\t\t\t\ttype=integration_type)\n\n\t\t\t\t\t#add the method response\n\t\t\t\t\tmethod_response = apig.client.put_method_response(\n\t\t\t\t\t\trestApiId=api_id,\n\t\t\t\t\t\tresourceId=resource_id,\n\t\t\t\t\t\thttpMethod=http_method,\n\t\t\t\t\t\tstatusCode='200',\n\t\t\t\t\t\tresponseParameters={\n\t\t\t\t\t\t\t'method.response.header.Access-Control-Allow-Origin': False\n\t\t\t\t\t\t},\n\t\t\t\t\t\tresponseModels={\n\t\t\t\t\t\t\t'application/json': 'Empty'\n\t\t\t\t\t\t})\n\n\t\t\t\t\t#add the integration response\n\t\t\t\t\tintegration_response = apig.client.put_integration_response(\n\t\t\t\t\t\trestApiId=api_id,\n\t\t\t\t\t\tresourceId=resource_id,\n\t\t\t\t\t\thttpMethod=http_method,\n\t\t\t\t\t\tstatusCode='200',\n\t\t\t\t\t\tresponseParameters={\n\t\t\t\t\t\t\t'method.response.header.Access-Control-Allow-Origin': '\\'*\\''\n\t\t\t\t\t\t},\n\t\t\t\t\t\tresponseTemplates={\n\t\t\t\t\t\t\t'application/json': ''\n\t\t\t\t\t\t}\n\t\t\t\t\t)\n\n\t\t\t\t\t#add an OPTION method\n\t\t\t\t\toption_response = apig.client.put_method(\n\t\t\t\t\t\trestApiId=api_id,\n\t\t\t\t\t\tresourceId=resource_id,\n\t\t\t\t\t\thttpMethod='OPTIONS',\n\t\t\t\t\t\tauthorizationType='NONE'\n\t\t\t\t\t)\n\n\t\t\t\t\t# Set the put integration of the OPTIONS method\n\t\t\t\t\topt_int_response = apig.client.put_integration(\n\t\t\t\t\t\trestApiId=api_id,\n\t\t\t\t\t\tresourceId=resource_id,\n\t\t\t\t\t\thttpMethod='OPTIONS',\n\t\t\t\t\t\ttype='MOCK',\n\t\t\t\t\t\trequestTemplates={\n\t\t\t\t\t\t\t'application/json': '{\"statusCode\": 200}'\n\t\t\t\t\t\t}\n\t\t\t\t\t)\n\n\t\t\t\t\t# Set the put method response of the OPTIONS method\n\t\t\t\t\topt_resp_response = apig.client.put_method_response(\n\t\t\t\t\t\trestApiId=api_id,\n\t\t\t\t\t\tresourceId=resource_id,\n\t\t\t\t\t\thttpMethod='OPTIONS',\n\t\t\t\t\t\tstatusCode='200',\n\t\t\t\t\t\tresponseParameters={\n\t\t\t\t\t\t\t'method.response.header.Access-Control-Allow-Headers': False,\n\t\t\t\t\t\t\t'method.response.header.Access-Control-Allow-Origin': False,\n\t\t\t\t\t\t\t'method.response.header.Access-Control-Allow-Methods': False\n\t\t\t\t\t\t},\n\t\t\t\t\t\tresponseModels={\n\t\t\t\t\t\t\t'application/json': 'Empty'\n\t\t\t\t\t\t}\n\t\t\t\t\t)\n\n\t\t\t\t\t# Set the put integration response of the OPTIONS method\n\t\t\t\t\topt_int_resp_response = apig.client.put_integration_response(\n\t\t\t\t\t\trestApiId=api_id,\n\t\t\t\t\t\tresourceId=resource_id,\n\t\t\t\t\t\thttpMethod='OPTIONS',\n\t\t\t\t\t\tstatusCode='200',\n\t\t\t\t\t\tresponseParameters={\n\t\t\t\t\t\t\t'method.response.header.Access-Control-Allow-Headers': '\\'Content-Type,X-Amz-Date,Authorization,X-Api-Key,x-api-key,X-Amz-Security-Token\\'',\n\t\t\t\t\t\t\t'method.response.header.Access-Control-Allow-Methods': '\\'' + http_method + ',OPTIONS\\'',\n\t\t\t\t\t\t\t'method.response.header.Access-Control-Allow-Origin': '\\'*\\''\n\t\t\t\t\t\t},\n\t\t\t\t\t\tresponseTemplates={\n\t\t\t\t\t\t\t'application/json': ''\n\t\t\t\t\t\t}\n\t\t\t\t\t)\n\n\t\t\t\telse:\n\n\t\t\t\t\tadd_response = apig.client.put_integration(\n\t\t\t\t\t\trestApiId=api_id,\n\t\t\t\t\t\tresourceId=resource_id,\n\t\t\t\t\t\thttpMethod=http_method,\n\t\t\t\t\t\tintegrationHttpMethod=http_method,\n\t\t\t\t\t\turi=uri,\n\t\t\t\t\t\ttype=integration_type)\n\n\t\t\t\t\tresp_response = apig.client.put_integration_response(\n\t\t\t\t\t\trestApiId=api_id,\n\t\t\t\t\t\tresourceId=resource_id,\n\t\t\t\t\t\thttpMethod=http_method,\n\t\t\t\t\t\tstatusCode='200',\n\t\t\t\t\t\tselectionPattern=''\n\t\t\t\t\t)\n\n\t\t\t\t\t# create POST method response\n\t\t\t\t\ttry:\n\t\t\t\t\t\tmethod_response = apig.client.put_method_response(\n\t\t\t\t\t\t\trestApiId=api_id,\n\t\t\t\t\t\t\tresourceId=resource_id,\n\t\t\t\t\t\t\thttpMethod=http_method,\n\t\t\t\t\t\t\tstatusCode='200',\n\t\t\t\t\t\t\tresponseModels={\n\t\t\t\t\t\t\t\t'application/json': 'Empty' #TODO: make like in console\n\t\t\t\t\t\t\t})\n\t\t\t\t\texcept:\n\t\t\t\t\t\ta = 5\n\t\t\t\t\t\t#TODO: update because http_method could change?\n\n\t\t\t\treturn add_response",
"def create_integrator(self):\n cparams = self.params[('forward_model', {}, 'settings for the forward model')]\n advection = FM.AdvectImage(self.sz, self.spacing)\n return ODE.ODEWrapBlock(advection, cparams, self.use_odeint, self.use_ode_tuple, self.tFrom, self.tTo)",
"def New(*args, **kargs):\n obj = itkCyclicShiftImageFilterIUS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def create_integrator(self):\n cparams = self.params[('forward_model', {}, 'settings for the forward model')]\n epdiffScalarMomentumImage = FM.EPDiffScalarMomentumImage(self.sz, self.spacing, self.smoother, cparams)\n return ODE.ODEWrapBlock(epdiffScalarMomentumImage, cparams, self.use_odeint, self.use_ode_tuple, self.tFrom, self.tTo)",
"def New(*args, **kargs):\n obj = itkStochasticFractalDimensionImageFilterIUL3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create curriculum area object.
|
def create_curriculum_area(self, number, parent=None):
area = CurriculumArea(
slug="area-{}".format(number),
name="Area {}".format(number),
colour="colour-{}".format(number),
number=number,
parent=parent,
languages=["en"],
)
area.save()
return area
|
[
"def new_area(self):\n pass",
"def __init__(self, area_points, reversed_counting_logic):\n super(AreaCounter, self).__init__(reversed_counting_logic)\n if len(area_points) > 2:\n self.area_polygon = area_points\n else:\n print(\"[Counter] Invalid counting area settings, the counter\" \\\n \" will use the bottom half of the image by default\")\n self.area_polygon = [[0, 30], [79, 30], [79, 59], [0, 59]]",
"def new_curriculum(req):\r\n\treturn direct_to_template(req, 'lesson/curriculum.html')",
"def Construct(self):\n # First get the materials\n rock = Geant4.gNistManager.FindOrBuildMaterial(\"G4_SILICON_DIOXIDE\") #Sandy rock\n air = Geant4.gNistManager.FindOrBuildMaterial(\"G4_AIR\")\n # World is a 60x60x60m box\n world_solid = Geant4.G4Box(\"world_solid\", 60.0 * Geant4.m, 60.0 * Geant4.m, 60.0 * Geant4.m)\n world_logical = Geant4.G4LogicalVolume(world_solid, rock, \"world_logical\")\n # Must be global in order not to be garbage collected\n global world\n world = Geant4.G4PVPlacement(Geant4.G4Transform3D(), world_logical, \"world\", None, False, 0)\n # The target volume is a 20x20m Cylinder in the centre\n target_solid = Geant4.G4Tubs(\"target_solid\", 0.0, target_radius, target_height, 0.0, 2.0 * Geant4.pi)\n global detector\n detector = TargetDetector()\n target_logical = Geant4.G4LogicalVolume(target_solid, air, \"target_logical\", None, detector)\n # Must be global in order not to be garbage collected\n global target\n target = Geant4.G4PVPlacement(Geant4.G4Transform3D(), target_logical, \"world\", world_logical, False, 0)\n return world",
"def create_graphic(self):\n x, y = self.coords\n self.graphic_id = self.world.create_arc(x - Entity.RADIUS, y - Entity.RADIUS,\n x + Entity.RADIUS, y + Entity.RADIUS,\n # A little mouth\n start=self.heading + self.mouth_angle / 2,\n extent= 360 - self.mouth_angle,\n fill=self.color, outline=self.outline)",
"def __init__(self, geometry = None):\n\n self.nangle = 16\n self.delta = 0.02\n self.boundary = 'vacuum'\n self.geometry = geometry\n\n self.polarquad = Quadrature()\n # self.polarquad.TabuchiYamamoto(3)\n self.polarquad.GaussLegendre(10)",
"def __init__(self):\n this = _coin.new_SoPendulum()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def get_side_area(self):\n\t\treturn PI * self.get_forming * (self.r_1 + self.r_2)",
"def add_area(self, top_left_location, width, height, name,\n customizable_properties=None,\n visualize_colour=None,\n visualize_opacity=None, **custom_properties):\n if customizable_properties is not None:\n warnings.warn(\n f\"Usage of customizable_properties is depreceated and can be removed. All properties are now customizable.\",\n DeprecationWarning,\n )\n\n # Check if width and height are large enough to make an actual room\n # (with content)\n if width < 1 or height < 1:\n raise Exception(f\"While adding area {name}; The width {width} \"\n f\"and/or height {height} should both be larger \"\n f\"than 0.\")\n\n # Get all locations in the rectangle\n locs = self.__list_area_locs(top_left_location, width, height)\n\n # Add all area objects\n self.add_multiple_objects(locations=locs, callable_classes=AreaTile,\n names=name,\n visualize_colours=visualize_colour,\n visualize_opacities=visualize_opacity,\n custom_properties=custom_properties)",
"def CreateAnalysisArea(Project_Area, parameter_values, out_name):\n in_features = Project_Area\n out_feature_class = out_name\n line_side = \"FULL\"\n line_end_type = \"ROUND\"\n dissolve_option = \"ALL\"\n\n # identify maximum indirect effect distance for buffer\n effect_distances = [row[0] for row in arcpy.da.SearchCursor(\n parameter_values, \"Distance\") if isinstance(row[0], (int, float))]\n buffer_distance = max(effect_distances)\n\n Analysis_Area = arcpy.Buffer_analysis(in_features, out_feature_class,\n buffer_distance, line_side,\n line_end_type, dissolve_option)\n\n return Analysis_Area",
"def area( self ):\n\n return self.__sideAB * self.__sideDA * math.sin(math.radians(self.__angleA))",
"def __init__(self, length, width, transform):\n self.length = length\n self.width = width\n self.area = length * width\n self.transform = transform\n bottom_left = (-length/2, -width/2)\n top_left = (-length/2, width/2)\n top_right = (length/2, width/2)\n bottom_right = (length/2, -width/2)\n ext = [bottom_left, top_left, top_right, bottom_right, bottom_left]\n\n polygon = Polygon(ext)\n super(Rectangle, self).__init__(polygon, transform, \"rectangle\")",
"def set_area(self):\n hectare = self.items[\"hectare\"]\n area_total = self.raw_data[\"AREA_HA\"]\n self.add_statement(\n \"area\", {\"quantity_value\": area_total,\n \"unit\": hectare})\n\n areas_parts = {\"SKOG_HA\": \"woods\",\n \"LAND_HA\": \"land\",\n \"VATTEN_HA\": \"water\"}\n for part, item in areas_parts.items():\n area_ha = self.raw_data[part]\n target_item = self.items[item]\n qualifier = self.make_qualifier_applies_to(target_item)\n self.add_statement(\n \"area\", {\"quantity_value\": area_ha,\n \"unit\": hectare},\n quals=qualifier)",
"def create_ride():",
"def __init__(self, radius: int, position: Vector, velocity: Vector,\n dob_timestamp: datetime, min_bound: float = None,\n max_bound: float = None):\n self.id = Asteroid._generate_unique_id()\n self.radius = radius\n self.position = position\n self.velocity = velocity\n self.dob_timestamp = dob_timestamp\n self.min_bound = min_bound\n self.max_bound = max_bound",
"def __init__(self, *args):\n this = _coin.new_SbSphereSectionProjector(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def __init__(self, side_length, transform):\n self.side_length = side_length\n self.length = side_length\n self.width = side_length\n self.area = side_length**2\n bottom_left = (-side_length/2, -side_length/2)\n top_left = (-side_length/2, side_length/2)\n top_right = (side_length/2, side_length/2)\n bottom_right = (side_length/2, -side_length/2)\n ext = np.array([bottom_left, top_left, top_right, bottom_right, bottom_left])\n\n polygon = Polygon(ext)\n super(Square, self).__init__(polygon, transform, \"square\")",
"def createSubdivRegion():\n pass",
"def test_area_studies(self):\n\n credits = [\n APCredit(AP_MUS, 5), # Humanities and Fine Arts - Music\n APCredit(AP_PSY, 5), # Social Sciences - Psychology\n ]\n\n college = init_college(WARREN_NAME)\n college.apply_credits(credits)\n\n # Unit calculation:\n # 4 units for Social Sciences\n # 4 units for Humanities and Fine Arts\n # = 8 units\n\n self.assertEqual(4, college.area_study_hum_credited_units)\n self.assertEqual(4, college.area_study_soc_credited_units)\n\n # Verify the SubRequirement names\n hum_condition = (WARREN_MUS_AS_NAME == college.area_study_hum.name)\n soc_condition = (WARREN_PSY_AS_NAME == college.area_study_soc.name)\n self.assertTrue(hum_condition and soc_condition)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create programming language object.
|
def create_programming_language(self, number):
language = ProgrammingChallengeLanguage(
slug="language-{}".format(number),
name="Language {}".format(number),
number=number,
languages=["en"],
)
language.save()
return language
|
[
"def create_test_language(name: str, code: str) -> Language:\r\n lang = Language(name=name, code=code)\r\n lang.full_clean()\r\n lang.save()\r\n return lang",
"def fol_language():\n def make_symbols(start):\n \"\"\"E.g., if start='a', then returns ['a1', ..., 'a9', 'b1', ..., 'c9'].\"\"\"\n return [chr(ord(start) + i) + str(n)\n for i in range(0, 3)\n for n in range(1, 10)]\n\n return Language(\n collections.OrderedDict([\n (IDENTITY_SYMBOL, 0),\n (NEGATION_SYMBOL, 1),\n (AND_SYMBOL, 2),\n (OR_SYMBOL, 2),\n (XOR_SYMBOL, 2),\n (IMPLIES_SYMBOL, 2),\n (FOR_ALL_SYMBOL, 2),\n (EXISTS_SYMBOL, 2),\n (RELATION_SYMBOL.format(1), 2), # unary-relation\n (RELATION_SYMBOL.format(2), 3), # binary-relation\n ]),\n predicates=make_symbols('p'),\n constants=make_symbols('a'),\n variables=make_symbols('x'),\n )",
"def get_lang_obj(name: str) -> AbstractLanguage:\n if name.lower() == \"abap\":\n return Abap()\n raise Exception(\"Unknown language: \" + name)",
"def propositional_language(num_variables=26):\n predicates = [chr(ord('a') + i) for i in xrange(num_variables)]\n\n return Language(\n collections.OrderedDict([\n (IDENTITY_SYMBOL, 0),\n (NEGATION_SYMBOL, 1),\n (AND_SYMBOL, 2),\n (OR_SYMBOL, 2),\n (XOR_SYMBOL, 2),\n (IMPLIES_SYMBOL, 2),\n ]),\n predicates=predicates,\n constants=[],\n variables=[],\n )",
"def __init__(self, language=\"en-GB\", lang_dir=None):\n lang_dirs = [\"/usr/share/pico/lang/\", _LANG_DIR]\n if lang_dir:\n lang_dirs.insert(0, lang_dir)\n\n self.__e = None\n for ldir in lang_dirs:\n try:\n self.__e = ctts.engine_create(language_dir=ldir, language=language)\n except RuntimeError as ex:\n pass # Try next directory to find language...\n if self.__e:\n break\n\n if self.__e is None:\n raise RuntimeError(\"Could not instantiate TTS engine with language \" + language)",
"def create_programming_challenge_implementation(self, topic,\n language,\n challenge,\n expected_result=\"<p>Example result.</p>\",\n hints=\"<p>Example hints.</p>\",\n solution=\"<p>Example solution.</p>\",\n ):\n implementation = ProgrammingChallengeImplementation(\n topic=topic,\n language=language,\n challenge=challenge,\n expected_result=expected_result,\n hints=hints,\n solution=solution,\n languages=[\"en\"],\n )\n implementation.save()\n return implementation",
"def __init__(self,\n languages: Optional[Languages] = None,\n operators: Optional[Operators] = None\n ) -> None:\n self.__languages = languages if languages else Languages()\n self.__operators = operators if operators else Operators()",
"def __init__(self, language):\n if language.lower() in self.languages_rev:\n self._language = language.lower()\n elif language.upper() in self.languages:\n self._language = self.languages[language.upper()]\n else:\n raise ValueError(\"No such language: %s\" % language)",
"def make_language_keyboard():\n return telegram.make_keyboard(\n globalvars.lang.text('SUPPORTED_LANGUAGES'),\n 2,\n '')",
"def createSyntaxFile():\n try:\n from . import Paths\n from .JSONFile import JSONFile\n except:\n from libs import Paths\n from libs.JSONFile import JSONFile\n\n keywords = getKeywords()\n\n LITERAL1s = []\n KEYWORD1s = []\n KEYWORD2s = []\n KEYWORD3s = []\n\n # set keywords\n for k in keywords:\n for w in k.get_keywords():\n if 'LITERAL1' in w.get_type():\n LITERAL1s.append(w.get_id())\n if 'KEYWORD1' in w.get_type():\n KEYWORD1s.append(w.get_id())\n if 'KEYWORD2' in w.get_type():\n KEYWORD2s.append(w.get_id())\n if 'KEYWORD3' in w.get_type():\n KEYWORD3s.append(w.get_id())\n\n # formating\n LITERAL1s = set(LITERAL1s)\n LITERAL1s = '|'.join(LITERAL1s)\n KEYWORD1s = set(KEYWORD1s)\n KEYWORD1s = '|'.join(KEYWORD1s)\n KEYWORD2s = set(KEYWORD2s)\n KEYWORD2s = '|'.join(KEYWORD2s)\n KEYWORD3s = set(KEYWORD3s)\n KEYWORD3s = '|'.join(KEYWORD3s)\n\n # get sintax preset\n sintax_path = Paths.getSyntaxPath()\n sintax_file = JSONFile(sintax_path)\n sintax = sintax_file.readFile()\n\n # replace words in sintax file\n sintax = sintax.replace('${LITERAL1}', LITERAL1s)\n sintax = sintax.replace('${KEYWORD1}', KEYWORD1s)\n sintax = sintax.replace('${KEYWORD2}', KEYWORD2s)\n sintax = sintax.replace('${KEYWORD3}', KEYWORD3s)\n\n # Save File\n file_path = Paths.getTmLanguage()\n language_file = JSONFile(file_path)\n language_file.writeFile(sintax)",
"def create_project(name: str, language: str) -> None:\n cli_config_manager = container.cli_config_manager()\n\n language = language if language is not None else cli_config_manager.default_language.get_value()\n if language is None:\n raise MoreInfoError(\n \"Please specify a language with --language or set the default language using `lean config set default-language python/csharp`\",\n \"https://www.lean.io/docs/lean-cli/tutorials/project-management\")\n\n full_path = Path.cwd() / name\n\n if not container.path_manager().is_path_valid(full_path):\n raise MoreInfoError(f\"'{name}' is not a valid path\",\n \"https://www.lean.io/docs/lean-cli/user-guides/troubleshooting#02-Common-errors\")\n\n is_library_project = False\n try:\n library_dir = container.lean_config_manager().get_cli_root_directory() / \"Library\"\n is_library_project = library_dir in full_path.parents\n except:\n # get_cli_root_directory() raises an error if there is no such directory\n pass\n\n if is_library_project and language == \"python\" and not full_path.name.isidentifier():\n raise RuntimeError(\n f\"'{full_path.name}' is not a valid Python identifier, which is required for Python library projects to be importable\")\n\n if full_path.exists():\n raise RuntimeError(f\"A project named '{name}' already exists, please choose a different name\")\n else:\n project_manager = container.project_manager()\n project_manager.create_new_project(full_path, QCLanguage.Python if language == \"python\" else QCLanguage.CSharp)\n\n # Convert the project name into a valid class name by removing all non-alphanumeric characters\n class_name = re.sub(f\"[^a-zA-Z0-9]\", \"\", \"\".join(map(_capitalize, full_path.name.split(\" \"))))\n\n if language == \"python\":\n main_name = \"main.py\"\n main_content = DEFAULT_PYTHON_MAIN if not is_library_project else LIBRARY_PYTHON_MAIN\n else:\n main_name = \"Main.cs\"\n main_content = DEFAULT_CSHARP_MAIN if not is_library_project else LIBRARY_CSHARP_MAIN\n\n with (full_path / main_name).open(\"w+\", encoding=\"utf-8\") as file:\n file.write(main_content.replace(\"$CLASS_NAME$\", class_name).replace(\"$PROJECT_NAME$\", full_path.name))\n\n with (full_path / \"research.ipynb\").open(\"w+\", encoding=\"utf-8\") as file:\n file.write(DEFAULT_PYTHON_NOTEBOOK if language == \"python\" else DEFAULT_CSHARP_NOTEBOOK)\n\n logger = container.logger()\n logger.info(f\"Successfully created {'Python' if language == 'python' else 'C#'} project '{name}'\")",
"def __init__(self):\n self.__language = build_pb2.BuildLanguage()\n self.title = \"\"\n self.description = \"\"",
"def from_map(name, inobj):\n obj = Language(\n name, inobj.pop('description', None), inobj.pop('owner', None),\n inobj.pop('privileges', []), inobj.pop('trusted', False))\n obj.fix_privileges()\n if '_ext' in inobj:\n obj._ext = inobj['_ext']\n obj.set_oldname(inobj)\n return obj",
"def my_language_model(self):\n try:\n return self._my_language_model\n except AttributeError:\n self._my_language_model = LanguageModel(\n parent_directory = self.directory,\n start_symbol = self.language_model_start_symbol,\n end_symbol = self.language_model_end_symbol,\n categorial = self.language_model_categorial\n )\n return self._my_language_model",
"def __init__(self, lm, corpus):\n self.languageModel = lm\n self.VietnameseDictionary = self.ReadDictionary()\n self.converter = TelexConverter(self.VietnameseDictionary)\n self.soundEx = SoundEx(self.VietnameseDictionary)",
"def PLATFORM_CREATE_OBJECTIVE(self):\n\t\treturn \"Here is how to create a new objective\"",
"def create_language(self):\n def on_eng(button):\n \"\"\"Handle pressing ENGLISH button.\"\"\"\n self.set_language('en')\n for b in self.language_buttons:\n self.objects.remove(b)\n self.mouse_handlers.remove(b.handle_mouse_event)\n self.language_buttons = []\n self.create_language()\n\n def on_rus(button):\n \"\"\"Handle pressing RUSSIAN button.\"\"\"\n self.set_language('ru')\n for b in self.language_buttons:\n self.objects.remove(b)\n self.mouse_handlers.remove(b.handle_mouse_event)\n self.language_buttons = []\n self.create_language()\n\n def on_back_from_language(button):\n \"\"\"Handle pressing RETURN from language settings button.\"\"\"\n for b in self.language_buttons:\n self.objects.remove(b)\n self.mouse_handlers.remove(b.handle_mouse_event)\n self.create_settings()\n\n # first rendering of settings buttons\n if self.lang_change:\n self.lang_change = False\n self.language_buttons = []\n if len(self.language_buttons) == 0:\n for i, (text, click_handler, language) in \\\n enumerate(((_(\"ENGLISH\"), on_eng, 'en'),\n (_(\"RUSSIAN\"), on_rus, 'ru'),\n (_(\"RETURN\"), on_back_from_language, ''))):\n if self.language_id == language:\n text_color = c.button_text_color_chosen\n else:\n text_color = c.button_text_color\n b = Button(c.settings_offset_x,\n c.settings_offset_y +\n (c.settings_button_h + 50) * i,\n c.settings_button_w,\n c.settings_button_h,\n text,\n click_handler,\n padding=5,\n text_color=text_color)\n self.objects.append(b)\n self.language_buttons.append(b)\n self.mouse_handlers.append(b.handle_mouse_event)\n # re-rendering of settings buttons\n else:\n for b in self.language_buttons:\n self.objects.append(b)\n self.mouse_handlers.append(b.handle_mouse_event)",
"def _create_dictionary(self) -> None:\n language = database.Language(language=self.language)\n self._language_mapper = language\n self._connection.add(language)\n self._connection.commit()",
"def init_for_language(language=None, language_model=None, **spacy_opts):\n if language is None and language_model is None:\n raise ValueError('either `language` or `language_model` must be given')\n\n if language_model is None:\n if not isinstance(language, str) or len(language) != 2:\n raise ValueError('`language` must be a two-letter ISO 639-1 language code')\n\n if language not in DEFAULT_LANGUAGE_MODELS:\n raise ValueError('language \"%s\" is not supported' % language)\n language_model = DEFAULT_LANGUAGE_MODELS[language] + '_sm'\n\n spacy_kwargs = dict(disable=['parser', 'ner'])\n spacy_kwargs.update(spacy_opts)\n\n global nlp\n nlp = spacy.load(language_model, **spacy_kwargs)\n\n return nlp"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create programming challenge object.
|
def create_programming_challenge(self, topic, number,
difficulty,
challenge_set_number=1,
challenge_number=1,
content="<p>Example content.</p>",
testing_examples="<p>Testing example</p>",
extra_challenge="<p>Example challenge.</p>",
):
challenge = ProgrammingChallenge(
topic=topic,
slug="challenge-{}".format(number),
name="Challenge {}.{}: {}".format(
challenge_set_number,
challenge_number,
number,
),
challenge_set_number=challenge_set_number,
challenge_number=challenge_number,
content=content,
testing_examples=testing_examples,
extra_challenge=extra_challenge,
difficulty=difficulty,
languages=["en"],
)
challenge.save()
return challenge
|
[
"def create_programming_challenge_implementation(self, topic,\n language,\n challenge,\n expected_result=\"<p>Example result.</p>\",\n hints=\"<p>Example hints.</p>\",\n solution=\"<p>Example solution.</p>\",\n ):\n implementation = ProgrammingChallengeImplementation(\n topic=topic,\n language=language,\n challenge=challenge,\n expected_result=expected_result,\n hints=hints,\n solution=solution,\n languages=[\"en\"],\n )\n implementation.save()\n return implementation",
"def create_problem(self) -> \"MallooviaLp\":\n\n # Create the linear programming problem\n self.pulp_problem = LpProblem(self.system.name, LpMinimize)\n\n # Once we have the variables represented as tuples, we use\n # the tuples to create the linear programming variables for pulp\n self._create_variables()\n\n # Create the goal function\n self._cost_function()\n\n # Add all restrictions indicated with functions *_restriction\n # in this class\n self._add_all_restrictions()\n\n return self",
"def new(self):\n if not self.key or not self.keyLock:\n kt = tuple([i for i,m in enumerate([self.maj,self.min]) if m])\n i = random.randint(0,11)\n q = random.choice(kt)\n self.key = challenge.keys12[i][q]\n self.answerLab.config(text='')\n if not any([self.I,self.II,self.III,self.IV,self.V,self.VI,self.VII]):\n self.I_f()\n if not any([self.maj,self.min]):\n self.maj_f()\n self.challenge = challenge.Challenge(key=self.key,\n mode=self.mode.get(),\n validInts=self.intervals,\n nIntervals=int(self.Nint.get()),\n inversion=self.inversion,\n octave = self.oct)\n self.playB.config(state='normal')\n self.playChB.config(state='normal')\n self.revealB.config(state='normal')\n [b.config(state='normal') for b in self.playIntBs+self.playInt2Bs]",
"def create_challenge(self, data):\n if self.is_challenge_model(data):\n data = ChallengeModel.to_dict(data)\n response = self.post(\n endpoint=\"/challenge\",\n body=data)\n return response",
"def create_programming_language(self, number):\n language = ProgrammingChallengeLanguage(\n slug=\"language-{}\".format(number),\n name=\"Language {}\".format(number),\n number=number,\n languages=[\"en\"],\n )\n language.save()\n return language",
"def new():\n\n vector = Vector.new()\n fitness = vector.fitness()\n attempts = Solution.max_attempts\n return Solution(vector, fitness, attempts)",
"def create():\n data = request.form or request.get_json()\n team_id = str(data[\"team_id\"])\n force_new = data[\"force_new\"]\n\n if force_new:\n challenges[team_id] = \"CHALLENGE_DETAILS-\" + str(random.randint(0, 1000000000))\n\n try:\n challenges[team_id]\n except KeyError:\n challenges[team_id] = \"CHALLENGE_DETAILS-\" + str(random.randint(0, 1000000000))\n\n return challenges[team_id]",
"def create_virtual_challenge(self, data):\n # TODO: add model for virtual challenge to aid in posting\n response = self.post(\n endpoint=\"/virtualchallenge\",\n body=data)\n return response",
"def makePolicy(mdp,Q):\r\n # A policy is an action-valued dictionary P[s] where s is a state\r\n P = dict()\r\n ### YOUR CODE HERE\r\n ### YOUR CODE HERE\r\n ### YOUR CODE HERE\r\n return P",
"def genproof(publickey, data, authenticators, challenge):\n pass",
"def test_challenge_obj():\n challenge_dict = {\n \"id\": \"challenge_1\",\n \"projectId\": \"project_1\",\n \"participantTeamId\": \"team_1\",\n \"etag\": \"etag_1\",\n }\n challenge_obj = Challenge(**challenge_dict)\n assert challenge_obj.to_dict() == challenge_dict",
"def _init_objective(self) -> None:\n raise NotImplementedError(\"You should implement this!\")",
"def PLATFORM_CREATE_OBJECTIVE(self):\n\t\treturn \"Here is how to create a new objective\"",
"def _create_practice_exam(self):\n return create_exam(\n course_id=self.course_id,\n content_id=self.content_id_practice,\n exam_name=self.exam_name,\n time_limit_mins=self.default_time_limit,\n is_practice_exam=True,\n is_proctored=True\n )",
"def create(self,*args,**kwargs):\n raise NotImplementedError(\"Each question must implement the create method\")",
"def newQuestion():\n x = random.randint(MIN_NUMBER,MAX_NUMBER)\n y = random.randint(MIN_NUMBER,MAX_NUMBER)\n correctResult = Operation(x,y)\n proposedResult = noisyOperation(x,y)\n question = str(x)+\" \"+operationLabel+\" \"+str(y)+\" = \"+str(proposedResult)+\" ?\"\n comment = str(x)+\" \"+operationLabel+\" \"+str(y)+\" = \"+str(correctResult)\n return (question,proposedResult==correctResult,comment)",
"def initializeProblem(self):\n self.replaceExternalFunctionsWithVariables()\n self.initial_decision_bounds = {}\n for var in self.decision_variables:\n self.initial_decision_bounds[var.name] = [var.lb, var.ub]\n self.createConstraints()\n self.data.basis_constraint.activate()\n objective_value, _, _ = self.solveModel()\n self.data.basis_constraint.deactivate()\n self.updateSurrogateModel()\n feasibility = self.calculateFeasibility()\n self.data.sm_constraint_basis.activate()\n return objective_value, feasibility",
"def create(question, answer):\n new_question = Question(question=question, answer = answer)\n new_question.save()\n print(\"Questão salva no banco de dados\")\n return new_question",
"def create_instance_wfg7(num_distance_params, num_position_params, num_objectives):\n return _wfg_problem_instance('wfg7', num_distance_params, num_position_params, num_objectives)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create programming challenge implementation object.
|
def create_programming_challenge_implementation(self, topic,
language,
challenge,
expected_result="<p>Example result.</p>",
hints="<p>Example hints.</p>",
solution="<p>Example solution.</p>",
):
implementation = ProgrammingChallengeImplementation(
topic=topic,
language=language,
challenge=challenge,
expected_result=expected_result,
hints=hints,
solution=solution,
languages=["en"],
)
implementation.save()
return implementation
|
[
"def create_programming_challenge(self, topic, number,\n difficulty,\n challenge_set_number=1,\n challenge_number=1,\n content=\"<p>Example content.</p>\",\n testing_examples=\"<p>Testing example</p>\",\n extra_challenge=\"<p>Example challenge.</p>\",\n ):\n challenge = ProgrammingChallenge(\n topic=topic,\n slug=\"challenge-{}\".format(number),\n name=\"Challenge {}.{}: {}\".format(\n challenge_set_number,\n challenge_number,\n number,\n ),\n challenge_set_number=challenge_set_number,\n challenge_number=challenge_number,\n content=content,\n testing_examples=testing_examples,\n extra_challenge=extra_challenge,\n difficulty=difficulty,\n languages=[\"en\"],\n )\n challenge.save()\n return challenge",
"def create_problem(self) -> \"MallooviaLp\":\n\n # Create the linear programming problem\n self.pulp_problem = LpProblem(self.system.name, LpMinimize)\n\n # Once we have the variables represented as tuples, we use\n # the tuples to create the linear programming variables for pulp\n self._create_variables()\n\n # Create the goal function\n self._cost_function()\n\n # Add all restrictions indicated with functions *_restriction\n # in this class\n self._add_all_restrictions()\n\n return self",
"def new(self):\n if not self.key or not self.keyLock:\n kt = tuple([i for i,m in enumerate([self.maj,self.min]) if m])\n i = random.randint(0,11)\n q = random.choice(kt)\n self.key = challenge.keys12[i][q]\n self.answerLab.config(text='')\n if not any([self.I,self.II,self.III,self.IV,self.V,self.VI,self.VII]):\n self.I_f()\n if not any([self.maj,self.min]):\n self.maj_f()\n self.challenge = challenge.Challenge(key=self.key,\n mode=self.mode.get(),\n validInts=self.intervals,\n nIntervals=int(self.Nint.get()),\n inversion=self.inversion,\n octave = self.oct)\n self.playB.config(state='normal')\n self.playChB.config(state='normal')\n self.revealB.config(state='normal')\n [b.config(state='normal') for b in self.playIntBs+self.playInt2Bs]",
"def new():\n\n vector = Vector.new()\n fitness = vector.fitness()\n attempts = Solution.max_attempts\n return Solution(vector, fitness, attempts)",
"def makePolicy(mdp,Q):\r\n # A policy is an action-valued dictionary P[s] where s is a state\r\n P = dict()\r\n ### YOUR CODE HERE\r\n ### YOUR CODE HERE\r\n ### YOUR CODE HERE\r\n return P",
"def create_programming_language(self, number):\n language = ProgrammingChallengeLanguage(\n slug=\"language-{}\".format(number),\n name=\"Language {}\".format(number),\n number=number,\n languages=[\"en\"],\n )\n language.save()\n return language",
"def _init_objective(self) -> None:\n raise NotImplementedError(\"You should implement this!\")",
"def __init__(self, solver):\n self.solver = solver",
"def create_challenge(self, data):\n if self.is_challenge_model(data):\n data = ChallengeModel.to_dict(data)\n response = self.post(\n endpoint=\"/challenge\",\n body=data)\n return response",
"def __init__(self):\n\n self.code = []\n self.num_hints = 0\n self.game_mode = \"user_guess\"\n self.num_guesses = 8\n self.guess = []\n self.temp_index = [0, 1, 2, 3] # keep track of which index of the code has been \"hinted\"\n self.guess_set = []\n self.num_rounds = 1\n\n self.user_points = 0\n self.computer_points = 0\n\n self.UI = UI() # Initializes UI iterface",
"def __init__(self, optimizer):\n assert isinstance(optimizer, Optimizer)\n\n self.aborted = False\n self.best = Solution()\n self.elapsed = datetime.timedelta(seconds=0)\n self.exit_condition = None\n self.iteration = 0\n self.optimizer = optimizer\n self.particles = []\n self.start = datetime.datetime.now()",
"def solve_algorithm(self):\n self.algorithm.solve()",
"def __init__(self, algorithm: GeneratorAlgorithm) -> None:\n self.algorithm = algorithm",
"def create(self,*args,**kwargs):\n raise NotImplementedError(\"Each question must implement the create method\")",
"def test_interface():\n import pKaTool.pKa_calc\n X = pKaTool.pKa_calc.Monte_Carlo_Mult_CPP()\n\n X.intrinsic_pKa = {':0001:ASP': [0.0, 4.0, 5.0]}\n X.charged_state = {':0001:ASP': [0, 1, 1]}\n X.acid_base = {':0001:ASP': -1}\n X.intene_mult = {':0001:ASP': {':0001:ASP': [[0, 0, 0], [0, 0, 0], [0, 0, 0]]}}\n X._calc_pKas(0.0, 10.0, 0.5)\n return",
"def codeGeneration(chip):\n\n # Header of the class\n code = 'class ' + chip.get('name') + '():\\n\\n'\n\n\n # Initalize output variables\n for i in chip.get('returns'):\n if i.split(':')[1] == 'std_logic':\n code += '\\t' + i.split(':')[0] + ' = False\\n'\n\n\n for i in chip.get('logic'):\n i = i.split('<=')\n i[0] = i[0].rstrip()\n i[1] = i[1].lstrip()\n\n i[1] = toTokenCode(i[1])\n if type(i[1]) == str:\n i[1] = [i[1]]\n\n print(i[1])\n ##Pick up from here, you need to make it so the array that comes back\n ##All the chips are created like in example PreBuilt.py -> XORs = {}\n ##Then below at somepoint actually create the logic noob\n\n\n code += '\\n'\n\n\n # Init function\n code += '\\tdef __init__(self'\n for i in chip.get('params'):\n if i.split(':')[1] == 'std_logic':\n code += ', ' + i.split(':')[0]\n code += '):\\n'\n\n for i in chip.get('params'):\n if i.split(':')[1] == 'std_logic':\n code += '\\t\\tself.' + i.split(':')[0] + ' = ' + i.split(':')[0] + '\\n'\n code += '\\n'\n\n\n # Get for outputs\n code += '\\t def getOutputs(self):\\n'\n code += '\\t\\treturn {'\n for i in chip.get('returns'):\n if i.split(':')[1] == 'std_logic':\n code += '\\'' + i.split(':')[0] + '\\':self.' + i.split(':')[0] + ', '\n code = code[:-2] + '}\\n'\n\n return code",
"def __init__(self):\n self.schemes_ = {}\n self.addScheme(DataSchemeBase())\n self.addScheme(DataSchemeWennerAlpha())\n self.addScheme(DataSchemeWennerBeta())\n self.addScheme(DataSchemeDipoleDipole())\n self.addScheme(DataSchemeSchlumberger())\n self.addScheme(DataSchemePolePole())\n self.addScheme(DataSchemePoleDipole())\n self.addScheme(DataSchemeHalfWenner())\n self.addScheme(DataSchemeMultipleGradient())\n\n self.addScheme(DataSchemeBase(typ=Pseudotype.A_M, name='A_M'))\n self.addScheme(DataSchemeBase(typ=Pseudotype.AB_MN, name='AB_MN'))\n self.addScheme(DataSchemeBase(typ=Pseudotype.AB_M, name='AB_M'))\n self.addScheme(DataSchemeBase(typ=Pseudotype.AB_N, name='AB_N'))",
"def solution(data):\n\t\treturn data",
"def create_instance_wfg7(num_distance_params, num_position_params, num_objectives):\n return _wfg_problem_instance('wfg7', num_distance_params, num_position_params, num_objectives)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create learning outcome object.
|
def create_learning_outcome(self, number):
outcome = LearningOutcome(
slug="outcome-{}".format(number),
text="Outcome {}".format(number),
languages=["en"],
)
outcome.save()
return outcome
|
[
"def sample_outcome(self, state: State, action: Action):\n pass",
"def learn(self, reward, observation):",
"def set_outcome(self, outcome):\n self.outcome = outcome\n self.y = self._makey()",
"def add_outcomes(self):\n study_outcomes = StudyOutcomes()\n for outcome_type in (\"primary\", \"secondary\", \"other\"):\n for protocol_outcome in glom(\n self._data, \"{}_outcome\".format(outcome_type), default=[]\n ):\n study_outcomes.add_outcome(outcome_type, protocol_outcome)\n self._outcomes = study_outcomes",
"def __init__(self, num_outcomes=2, device=None):\n self.recorded = torch.zeros(num_outcomes * num_outcomes, dtype=torch.int32, device=device)\n self.num_outcomes = num_outcomes",
"def test_MLPGeneratorAgent():\n\n # Define variables\n num_actions = 3\n history_length = 3 \n hidden_sizes = [5,5]\n num_agents = 3\n\n mlp = MLPGeneratorAgent(num_actions, history_length, hidden_sizes, num_agents)\n print(\"\\nmlp: \", mlp)\n\n # Generate integer actions from input\n (logits_t0, action_t0) = mlp.act(t=0, inputs=[], num_actions=num_actions)\n print(\"\\nt = 0: logits_t0: \", logits_t0, \" action_t0: \", action_t0)\n\n # And again\n a1_action_t0 = 1\n a2_action_t0 = 2\n inputs = [action_t0, a1_action_t0, a2_action_t0]\n (logits_t1, action_t1) = mlp.act(t=1, inputs=inputs, num_actions=num_actions)\n print(\"\\nt = 0: logits_t1: \", logits_t1, \" action_t1: \", action_t1)\n\n # Check recorded interaction history\n print(\"\\nmlp history: \", mlp._interaction_history)",
"def sample_jointaction_outcome(self, state: State, jointaction: JointAction):\n pass",
"def learn(self, obs):\n pass",
"def test_learning_curve_output_with_objectives(self):\n # Test to validate learning curve output\n self.make_learning_curve_data()\n\n config_template_path = config_dir / \"test_learning_curve.template.cfg\"\n config_path = fill_in_config_paths(config_template_path)\n\n # run the learning curve experiment\n run_configuration(config_path, quiet=True, local=True)\n outprefix = \"test_learning_curve\"\n\n # make sure that the TSV file is created with the right columns\n output_tsv_path = output_dir / f\"{outprefix}_summary.tsv\"\n self.assertTrue(output_tsv_path.exists())\n with open(output_tsv_path) as tsvf:\n r = csv.reader(tsvf, dialect=csv.excel_tab)\n header = next(r)\n # make sure we have the expected number of columns\n self.assertEqual(len(header), 13)\n num_rows = len(list(r))\n # we should have 2 featuresets x 3 learners x 2 objectives x 5 (default)\n # training sizes = 60 rows\n self.assertEqual(num_rows, 60)\n\n # make sure that the four PNG files (two per featureset) are created\n for featureset_name in [\"test_learning_curve1\", \"test_learning_curve2\"]:\n path_score = output_dir / f\"{outprefix}_{featureset_name}.png\"\n path_time = output_dir / f\"{outprefix}_{featureset_name}_times.png\"\n self.assertTrue(path_score.exists())\n self.assertTrue(path_time.exists())",
"def generateoutcomes(X, beta, feat_spec, beta_spec, sigma=1, w=0.5, o=0, **args):\n # Initialize variables\n sigma = sigma\n w = w\n o = o\n spec_idx = [X.index.get_loc(feat) for feat in feat_spec if feat in X]\n X_spec = X[feat_spec]\n X_non_spec = X[set(range(len(X))) - set(spec_idx)]\n beta = np.array(beta)[list(set(range(len(X))) - set(spec_idx))]\n beat_spec = np.array(beta_spec)\n\n # Generate outcomes\n \n t0 = np.exp(np.matmul(X_non_spec + w, np.transpose(beta))+np.matmul(X_spec, np.transpose(beta_spec)))\n t1 = (np.matmul(X_non_spec + w, np.transpose(beta))+np.matmul(X_spec, np.transpose(beta_spec))) - o\n y_t0 = np.random.normal(t0, sigma, 1)[0]\n y_t1 = np.random.normal(t1, sigma, 1)[0]\n\n return (t0, t1, y_t0, y_t1)",
"def _create_learning_related_mechanisms(self,\n input_source,\n output_source,\n error_function,\n learning_function,\n learned_projection,\n learning_rate,\n learning_update):\n\n if isinstance(learning_function, type):\n if issubclass(learning_function, TDLearning):\n creation_method = self._create_td_related_mechanisms\n elif issubclass(learning_function, Reinforcement):\n creation_method = self._create_rl_related_mechanisms\n else:\n raise CompositionError(f\"'learning_function' argument for add_linear_learning_pathway \"\n f\"({learning_function}) must be a class of {LearningFunction.__name__}\")\n\n target_mechanism, objective_mechanism, learning_mechanism = creation_method(input_source,\n output_source,\n error_function,\n learned_projection,\n learning_rate,\n learning_update)\n\n elif is_function_type(learning_function):\n target_mechanism = ProcessingMechanism(name='Target')\n objective_mechanism = ComparatorMechanism(name='Comparator',\n sample={NAME: SAMPLE,\n VARIABLE: [0.], WEIGHT: -1},\n target={NAME: TARGET,\n VARIABLE: [0.]},\n function=error_function,\n output_ports=[OUTCOME, MSE],\n )\n learning_mechanism = LearningMechanism(\n function=learning_function(\n default_variable=[input_source.output_ports[0].value,\n output_source.output_ports[0].value,\n objective_mechanism.output_ports[0].value],\n learning_rate=learning_rate),\n default_variable=[input_source.output_ports[0].value,\n output_source.output_ports[0].value,\n objective_mechanism.output_ports[0].value],\n error_sources=objective_mechanism,\n learning_enabled=learning_update,\n in_composition=True,\n name=\"Learning Mechanism for \" + learned_projection.name)\n else:\n raise CompositionError(f\"'learning_function' argument of add_linear_learning_pathway \"\n f\"({learning_function}) must be a class of {LearningFunction.__name__} or a \"\n f\"learning-compatible function\")\n\n learning_mechanism.output_ports[ERROR_SIGNAL].parameters.require_projection_in_composition.set(False,\n override=True)\n return target_mechanism, objective_mechanism, learning_mechanism",
"def add_outcome(self, number, outcome):\n self.bins[number].add(outcome)\n self.all_outcomes[outcome.name] = outcome",
"def __init__(self, learning_rate = 0.05, training_proportion = 0.66):\n self.output_layer = []\n self.learning_rate = learning_rate\n self.training_proportion = training_proportion",
"def has_outcome(self, outcome, null=True):\n raise NotImplementedError",
"def create_training_example(background,activates,negatives):\n background=background-20\n #initialize y(label vector) of zeros\n y=np.zeros((1,Ty))\n #initialize segment times as empty list\n previous_segment=[]\n # Select 0-4 random \"activate\" audio clips from the entire list of \"activates\" recordings\n number_of_activates=np.random.randint(0,5)\n random_indices=np.random.randint(len(activates),size=number_of_activates)\n random_activates=[activates[i] for i in random_indices]\n \n for random_activate in random_activates:\n background,segment_time=insert_audio_clip(background,random_activate,previous_segments)\n segment_start,segment_end=segment_time\n y=insert_ones(y,segment_end)\n \n number_of_negatives=np.random.randint(0,3)\n random_indices=np.random.randint(len(negatives),size=number_of_negatives)\n random_negatives=[negatives[i] for i in random_indices]\n \n for random_negative in random_negatives:\n back_ground,_=insert_audio_clip(background,random_negative,previous_segments)\n \n # Standardize the volume of the audio clip \n background=match_target_amplitude(background,-20.0)\n file_handle=background.export(\"train\"+\".wav\",format=\"wav\")\n print(\"File (train.wav) was saved in your directory.\")\n x=graph_spectrogram(\"train.wav\")\n return x,y",
"def create_classifier():\n\n # Logistic Regression\n return LogisticRegression(penalty='l2', max_iter=1000, C=1, random_state=42)",
"def outcome(self, new_value):\n # XXX: it would be nice if we could not do this remapping.\n if new_value == \"none\":\n new_value = None\n self.native.outcome = new_value",
"def test_predict_future_reward(self):\n good_sequence = [\n ([0,0,0,0],1,[0,0,0,1]),\n ([0,0,0,1],0,[1,0,1,0]),\n ([1,0,1,0],1,[1,1,1,1]),\n ]\n bad_sequence = [\n ([0,0,0,0],0,[1,0,0,1]),\n ([1,0,0,1],1,[0,0,1,0]),\n ([0,0,1,0],1,[0,1,1,1]),\n ]\n def expand(r, final_reward):\n results = []\n for i,(state,action,new_state) in enumerate(r):\n record = {\n 'state': np.array(state,'f'),\n 'new_state': np.array(new_state,'f'),\n 'action': action,\n 'done': i >= len(r),\n 'reward': final_reward\n }\n results.append(record)\n assert results[-1]['reward'] == final_reward\n return results \n records = expand(good_sequence,1.0) + expand(bad_sequence,-1.0)\n print(records)\n records = records * 256\n model = main.build_model(env)\n main.train_model( model, records, env, batch_size=8)\n for (state,action,new_state) in good_sequence:\n prediction = main.predict(model,state)\n assert np.argmax(prediction) == action, (state,action,prediction)\n \n for (state,action,new_state) in bad_sequence:\n prediction = main.predict(model,state)\n assert np.argmax(prediction) != action, (state,action,prediction)",
"def predict(wf_obj):\n\n return {\n 'Action': pred([wf_obj.get_errors(True)])[0]\n }"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create classroom resource object.
|
def create_classroom_resource(self, number):
resource = ClassroomResource(
slug="resource-{}".format(number),
description="Resource {}".format(number),
languages=["en"],
)
resource.save()
return resource
|
[
"def _create_new_classroom(\n classroom: classroom_config_domain.Classroom\n) -> None:\n classroom.validate()\n classroom_models.ClassroomModel.create(\n classroom.classroom_id,\n classroom.name,\n classroom.url_fragment,\n classroom.course_details,\n classroom.topic_list_intro,\n classroom.topic_id_to_prerequisite_topic_ids\n )",
"def create_resource(self, name):\n raise NotImplementedError",
"def create_resource(resource_id, resource_type, **kwargs):\n if resource_type not in _RESOURCE_TYPE_MAP:\n return None\n resource_type = _RESOURCE_TYPE_MAP[resource_type]\n if not resource_type.get('can_create_resource'):\n return None\n\n return resource_type.get('class')(\n resource_id, **kwargs)",
"def create(cls, **kwargs):",
"def create_resource(\n self,\n name: str,\n model: str,\n address: str,\n family: str = \"\",\n parent_path: str = \"\",\n create_new_resources_if_exists: bool = False,\n ) -> str:\n logger.info(f\"Creating the resource {name}\")\n logger.debug(f\"{name=}, {model=}, {address=}, {family=}, {parent_path=}\")\n while True:\n try:\n self._api.CreateResource(\n family, model, name, address, parentResourceFullPath=parent_path\n )\n except CloudShellAPIError as e:\n if str(e.code) != \"114\":\n raise\n if create_new_resources_if_exists:\n name = generate_new_resource_name(name)\n else:\n break\n else:\n break\n logger.debug(f\"Created the resource {name}\")\n return name",
"def create_resource(self, *args, **kwargs):\n target_uri = self._build_uri(*args, **kwargs)\n\n message, status_code = self.request(\n target_uri, POST, request_object=kwargs.get('payload'))\n\n if args:\n resource_type = args[2]\n elif not args and kwargs:\n resource_type = kwargs.get('resource_level')\n else:\n resource_type = None\n\n operation = 'Create {resource_type} resource'.format(\n resource_type=resource_type)\n\n self.check_status_code_success(\n operation, status_code, message)\n return message",
"def __new__(mcs, cls_name, superclasses, attributes):\n if hasattr(attributes, '__body__'):\n # Check that the body schema is valid\n try:\n Draft4Validator.check_schema(attributes['__body__'])\n except jsonschema.ValidationError:\n raise jsonschema.ValidationError(\n f'Invalid body schema declared for resource {cls_name}')\n\n if hasattr(attributes, '__params__'):\n # Check that the params schema is valid\n try:\n Draft4Validator.check_schema(attributes['__params__'])\n except jsonschema.ValidationError:\n raise jsonschema.ValidationError(\n f'Invalid params schema declared for resource {cls_name}')\n\n # Create the class\n return super(ResourceMeta, mcs).__new__(mcs, cls_name, superclasses,\n attributes)",
"def create_resource_from_json(resource_type, parent, json_string):\n if resource_type not in _RESOURCE_TYPE_MAP:\n return None\n resource_type = _RESOURCE_TYPE_MAP[resource_type]\n if not resource_type.get('can_create_resource'):\n return None\n\n return resource_type.get('class').from_json(parent, json_string)",
"def create_resource():\n schema = get_schema()\n deserializer = RequestDeserializer(schema)\n serializer = ResponseSerializer(schema)\n controller = TagsController()\n return wsgi.Resource(controller, deserializer, serializer)",
"def create_resource():\n # deserializer = ImageDeserializer()\n # serializer = ImageSerializer()\n func = lambda x: x\n return wsgi.Resource(RouteManager(), func)",
"def create_resource(conf):\n controller = RootController()\n return wsgi.Resource(controller)",
"def construct_resource(\n self, name: str, input_type: st.Type, output_type: st.Type\n ) -> st.Resource:\n if name in self._resource_cache:\n return self._resource_cache[name]\n\n machine_cls = type(\n PulumiResourceMachine.__name__,\n (PulumiResourceMachine,),\n {\"UP\": st.State(\"UP\", input_type, output_type)},\n )\n\n resource = machine_cls(name, self)\n self._resource_cache[name] = resource\n\n return resource",
"def room_create(self):\n\t\treturn self.app.put('/room/create')",
"def create_resource(options):\r\n deserializer = wsgi.JSONRequestDeserializer()\r\n return wsgi.Resource(StackController(options), deserializer)",
"def create_resource():\n deserializer = TemplateConfigSetDeserializer()\n serializer = TemplateConfigSetSerializer()\n return wsgi.Resource(Controller(), deserializer, serializer)",
"def _create(cls, model_class, *args, **kwargs):\n return model_class(*args, **kwargs)",
"def _create_target_resource(self, target_project, res_spec, res_id,\n res_parent_id=None, payload=None, key=None):\n project_id = target_project\n rid = res_id\n name = None\n\n # fetch IDs from payload if possible\n if payload:\n if isinstance(payload, dict):\n name = payload.get(res_spec.name_field)\n # some custom ID fields are no UUIDs/strings but just integers\n rid = rid or str(payload.get(res_spec.id_field))\n\n project_id = (target_project or payload.get('project_id')\n or payload.get('tenant_id'))\n else:\n project_id = target_project\n self._log.warning(\n \"mapping error, malformed resource payload %s (no dict) in bulk operation on resource: %s\",\n payload,\n res_spec)\n\n type_uri = res_spec.el_type_uri if rid else res_spec.type_uri\n rid = _make_uuid(rid or res_parent_id or taxonomy.UNKNOWN)\n target = OpenStackResource(project_id=project_id, id=rid,\n typeURI=type_uri, name=name)\n\n # provide name of custom keys in attachment of target\n if key:\n target.add_attachment(Attachment(typeURI=\"xs:string\",\n content=key, name='key'))\n\n return target",
"def test_create_resource(self):\r\n r = Resource.objects.create(\r\n name='Resource Model Test', slug='resource_model_test',\r\n i18n_type='PO', source_language=self.language_en,\r\n project=self.project\r\n )\r\n self.assertTrue(r)",
"def create_resource(options):\r\n deserializer = wsgi.JSONRequestDeserializer()\r\n serializer = serializers.JSONResponseSerializer()\r\n return wsgi.Resource(\r\n SoftwareConfigController(options), deserializer, serializer)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add relationship between challenge and lesson objects.
|
def add_challenge_lesson_relationship(self, challenge, lesson, set_number, number):
relationship = ProgrammingChallengeNumber(
programming_challenge=challenge,
lesson=lesson,
challenge_set_number=set_number,
challenge_number=number,
)
relationship.save()
|
[
"def add_lesson_resource_relationship(self, lesson, resource, number):\n relationship = ResourceDescription(\n lesson=lesson,\n resource=resource,\n description=\"Description {}\".format(number),\n )\n relationship.save()",
"def add_rel_person(self, added):\n self.rel.append(added)\n added.parent = self",
"def __add_related_objects(self, obj, idref, type_, relationship=\"Contained Within\"):\n related_object = cybox.RelatedObjectType(idref=idref, type_=type_, relationship=relationship)\n related_objects = obj.get_Related_Objects()\n if not related_objects:\n related_objects = cybox.RelatedObjectsType()\n obj.set_Related_Objects(related_objects)\n\n related_objects.add_Related_Object(related_object)",
"def add(self, lesson: Lesson):\n self.lessons.append(lesson)",
"def test_get_retest_problems(self):\n for missed in [\n {'operand1' : 1, 'operand2' : 4, 'answer' : 6},\n {'operand1' : 2, 'operand2' : 4, 'answer' : 5},\n {'operand1' : 2, 'operand2' : 4, 'answer' : 6},\n ]:\n problem = Problem(\n level=1, problem_type=ProblemType.ADDITION, operand1=missed['operand1'],\n operand2=missed['operand2'])\n result = Result()\n result.level = 1\n result.round = 1\n result.problemid = problem.id\n result.studentid = self.student.id\n result.answer = missed['answer']\n db.session.add(result)\n \n lesson = Lesson(self.student)",
"def addRelationship (self, relationship):\n node1 = list(self.graph_db.find(relationship['src_label'], 'node_id',\n relationship['src_id']))\n node2 = list(self.graph_db.find(relationship['dst_label'], 'node_id',\n relationship['dst_id']))\n\n if len(node1) > 0 and len(node2) > 0:\n\n rel = Relationship(node1[0], relationship['rel_type'], node2[0])\n for key, value in relationship.items():\n rel.properties[key] = value\n self.graph_db.create(rel)\n return True\n else:\n log.debug(\"nodes do not exist in the DB\")\n return False",
"def test_create_relationship():\n from FeedMitreAttackv2 import create_relationship\n relation = create_relationship(RELATION.get('response'), ID_TO_NAME)\n relation._entity_a = 'entity a'\n relation._entity_a_type = 'STIX Malware'\n relation._entity_b = 'entity b'\n relation._entity_b_type = 'STIX Attack Pattern'\n relation._name = 'uses'\n relation._relation_type = 'IndicatorToIndicator'\n relation._reverse_name = 'used-by'",
"def test_new_lesson_creation(self):\n local_user = self.create_and_return_local_user()\n course_id = new_course({\n 'teacher' : local_user.key.id(),\n 'title' : 'foo course',\n 'body' : 'hey look mom',\n })\n unit_id = new_unit({\n 'course' : course_id, \n 'title' : 'foo unit',\n 'body' : 'bla bla unit body',\n })\n lesson_id = new_lesson({\n 'unit' : unit_id, \n 'title' : 'foo lesson',\n 'body' : 'lesson about stuff'\n })\n unit = ndb.Key('Curriculum', unit_id).get()\n course = ndb.Key('Curriculum', course_id).get()\n lesson = ndb.Key('Curriculum', lesson_id).get()\n\n # check that the correct content properties were set\n self.assertEqual(lesson.content['title'], 'foo lesson')\n self.assertEqual(lesson.content['body'], 'lesson about stuff')\n # check that the correct inferred properties were set\n self.assertEqual(lesson.content['course'], course_id)\n self.assertEqual(lesson.content['unit'], unit_id)\n self.assertEqual(lesson.content['teacher'], int(local_user.key.id()))\n self.assertEqual(lesson.content_type, 'lesson')\n # check that the parent unit correctly had this new lesson appended\n self.assertIn(lesson_id, unit.content['lessons'])",
"def add_coach(self, other):\n if other in self.coaches:\n raise DuplicateCoachError(other.name + \" is already a coach of \" + self.name)\n other.students.append(self)\n self.coaches.append(other)",
"def adopt(self):\n valid_relationships = set(Relationship._instances.keys())\n\n relationships = [\n (parent, relation.complement(), term.id)\n for term in six.itervalues(self.terms)\n for relation in term.relations\n for parent in term.relations[relation]\n if relation.complementary\n and relation.complementary in valid_relationships\n ]\n\n relationships.sort(key=operator.itemgetter(2))\n\n for parent, rel, child in relationships:\n\t #print parent, rel, child\n if rel is None:\n break\n\n try:\n parent = parent.id\n except AttributeError:\n pass\n\n if parent in self.terms:\n try:\n if child not in self.terms[parent].relations[rel]:\n self.terms[parent].relations[rel].append(child)\n except KeyError:\n self[parent].relations[rel] = [child]\n\n del relationships",
"def _add_to_relation(self, query, relationname, toadd=None):\n submodel = get_related_model(self.model, relationname)\n if isinstance(toadd, dict):\n toadd = [toadd]\n for dictionary in toadd or []:\n subinst = get_or_create(self.session, submodel, dictionary)\n try:\n for instance in query:\n getattr(instance, relationname).append(subinst)\n except AttributeError as exception:\n current_app.logger.exception(str(exception))\n setattr(instance, relationname, subinst)",
"def _add_to_relation(self, query, relationname, toadd=None):\r\n submodel = get_related_model(self.model, relationname)\r\n if isinstance(toadd, dict):\r\n toadd = [toadd]\r\n for dictionary in toadd or []:\r\n subinst = get_or_create(self.session, submodel, dictionary)\r\n try:\r\n for instance in query:\r\n getattr(instance, relationname).append(subinst)\r\n except AttributeError as exception:\r\n current_app.logger.exception(str(exception))\r\n setattr(instance, relationname, subinst)",
"def link_housing(tx):\n tx.run(\n \"MATCH (p:people {name:'Snow White'}), (h:house {name:'Castle'}) \"\n \"create (p)-[r:LIVES_IN]->(h) \"\n )\n tx.run(\n \"MATCH (p:people {gender:'M'}), (h:house {name:'Dwarf House'}) \"\n \"create (p)-[r:LIVES_IN]->(h) \"\n )\n tx.run(\n \"MATCH (p:people {gender:'F'}), (h:house {name:'Dwarf House'}) \"\n \"create (p)-[r:WORKS_IN]->(h) \"\n )\n tx.run(\n \"MATCH (p:people {gender:'M'}), (h:house {name:'Mine'}) \"\n \"create (p)-[r:WORKS_IN]->(h) \"\n )",
"def create_relationship(client: Client, indicator: str, threats: List, entity_a_type: str) -> List:\n relationships = []\n if client.create_relationships:\n for threat in threats:\n for block in threat.get('blockSet', {}):\n relationships.append(\n EntityRelationship(name='related-to',\n entity_a=indicator,\n entity_a_type=entity_a_type,\n entity_b=block.get('data'),\n entity_b_type=check_indicator_type(block.get('data')),\n brand=BRAND))\n for exec_set in threat.get('executableSet', {}):\n relationships.append(\n EntityRelationship(name='related-to',\n entity_a=indicator,\n entity_a_type=entity_a_type,\n entity_b=exec_set.get('md5Hex'),\n entity_b_type=FeedIndicatorType.File,\n brand=BRAND))\n return relationships",
"def test_element_add_relationship_twice_is_ok():\n element1 = ConcreteElement(name=\"elt1\")\n element2 = ConcreteElement(name=\"elt1\")\n model = MockModel()\n element1.set_model(model)\n relationship = element1.add_relationship(destination=element2)\n element1.add_relationship(relationship)\n assert element1.relationships == {relationship}",
"def test_add_coach_iterative(self):\n A = User()\n coaches = [User() for user in range(5)]\n A.add_coach(coaches)\n self.assertEqual(A._User__coached_by, set(coaches))\n for c in coaches:\n self.assertEqual(c.students(), set([A]))",
"def add_relation(self, relation):\n \n if self.has_id(relation.relation_id):\n raise KeyError(\"relation_id '%s' already seen before.\" % \\\n relation.relation_id)\n self._relation_ids.add(relation.relation_id)\n for entity in relation.entities:\n if not self.entity_matcher.has_id(entity.entity_id):\n self.entity_matcher.add_entity(entity)\n self._relations.append(relation)\n for entity in relation.entities:\n entity_id = entity.entity_id\n if entity_id not in self._relation_dict.keys():\n self._relation_dict[entity_id] = set()\n self._relation_dict[entity_id].add(relation)",
"def handle_interaction_add_relationship_type(\n self, module_num, relationship, object_name1, object_name2\n ):\n with DBContext(self) as (connection, cursor):\n return self.add_relationship_type(\n module_num, relationship, object_name1, object_name2, cursor\n )",
"def create_lesson():\n\n ### SAVE LESSON TO DATABASE ###\n # Set up default lesson data dict\n lesson_data = {\n 'title': 'Untitled', \n 'author_id': session['user_id'],\n 'overview': '', \n 'imgUrl': None,\n 'public': False,\n }\n\n ### UPLOAD PHOTO TO CLOUDINARY AND ATTACH URL ###\n if 'lesson-pic' not in request.files:\n lesson_data['imgUrl'] = \"/static/img/placeholder.png\"\n else: \n my_file = request.files['lesson-pic']\n result = cloudinary.uploader.upload(my_file, api_key=CLOUD_KEY, \n api_secret=CLOUD_SECRET,\n cloud_name='hackbright')\n lesson_data['imgUrl'] = result['secure_url']\n \n ### SAVE LESSON TO DATABASE ###\n lesson_data['title'] = request.form['title']\n lesson_data['overview'] = request.form['overview']\n db_lesson = crud.create_lesson(lesson_data)\n\n ### CREATE DB ASSOCIATION BETWEEN TAGS AND LESSON ###\n tags = request.form['tags'].split(',') # eg. '6th,science'\n # Right now, setting up new tag with id of \"tag\"\n for tag in tags:\n if tag in SUBJECTS: \n db_tag = crud.get_tag_by_name(tag)\n elif tag in GRADES: \n db_tag = crud.get_tag_by_name(tag)\n crud.assign_tag_to_lesson(db_tag, db_lesson)\n\n ### CREATE DB ASSOCIATION BETWEEN COMPONENTS AND LESSON ###\n if request.form['component-ids']:\n component_ids = request.form['component-ids'].split(',') # e.g. '30,31'\n for comp_id in component_ids:\n db_comp = crud.get_comp_by_id(int(comp_id))\n crud.assign_comp(db_comp, db_lesson)\n \n try: \n return {'success': True, 'lesson_id': db_lesson.lesson_id}\n except: \n print('Except something done broke')\n return {'success': False}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create glossary term object.
|
def create_glossary_term(self, number):
term = GlossaryTerm(
slug="term-{}".format(number),
term="Term {}".format(number),
definition="Defintion for term {}".format(number),
)
term.save()
return term
|
[
"def test_glossary_term_create(self):\n pass",
"def _create_term_definition(self, active_ctx, local_ctx, term, defined):\n if term in defined:\n # term already defined\n if defined[term]:\n return\n # cycle detected\n raise JsonLdError(\n 'Cyclical context definition detected.',\n 'jsonld.CyclicalContext', {\n 'context': local_ctx,\n 'term': term\n }, code='cyclic IRI mapping')\n\n # now defining term\n defined[term] = False\n\n if _is_keyword(term):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; keywords cannot be overridden.',\n 'jsonld.SyntaxError', {'context': local_ctx, 'term': term},\n code='keyword redefinition')\n\n if term == '':\n raise JsonLdError(\n 'Invalid JSON-LD syntax; a term cannot be an empty string.',\n 'jsonld.SyntaxError', {'context': local_ctx},\n code='invalid term definition')\n\n # remove old mapping\n if term in active_ctx['mappings']:\n del active_ctx['mappings'][term]\n\n # get context term value\n value = local_ctx[term]\n\n # clear context entry\n if (value is None or (\n _is_object(value) and '@id' in value and\n value['@id'] is None)):\n active_ctx['mappings'][term] = None\n defined[term] = True\n return\n\n # convert short-hand value to object w/@id\n _simple_term = False\n if _is_string(value):\n _simple_term = True\n value = {'@id': value}\n\n if not _is_object(value):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context property values must be '\n 'strings or objects.', 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid term definition')\n\n # create new mapping\n mapping = active_ctx['mappings'][term] = {'reverse': False}\n\n # make sure term definition only has expected keywords\n valid_keys = ['@container', '@id', '@language', '@reverse', '@type']\n if self._processing_mode(active_ctx, 1.1):\n valid_keys.extend(['@context', '@nest', '@prefix'])\n for kw in value:\n if kw not in valid_keys:\n raise JsonLdError(\n 'Invalid JSON-LD syntax; a term definition must not contain ' + kw,\n 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid term definition')\n\n # always compute whether term has a colon as an optimization for _compact_iri\n _term_has_colon = ':' in term\n\n if '@reverse' in value:\n if '@id' in value:\n raise JsonLdError(\n 'Invalid JSON-LD syntax; an @reverse term definition must '\n 'not contain @id.', 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid reverse property')\n if '@nest' in value:\n raise JsonLdError(\n 'Invalid JSON-LD syntax; an @reverse term definition must '\n 'not contain @nest.', 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid reverse property')\n reverse = value['@reverse']\n if not _is_string(reverse):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @reverse value must be '\n 'a string.', 'jsonld.SyntaxError', {'context': local_ctx},\n code='invalid IRI mapping')\n\n # expand and add @id mapping\n id_ = self._expand_iri(\n active_ctx, reverse, vocab=True, base=False,\n local_ctx=local_ctx, defined=defined)\n if not _is_absolute_iri(id_):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @reverse value must be '\n 'an absolute IRI or a blank node identifier.',\n 'jsonld.SyntaxError', {'context': local_ctx},\n code='invalid IRI mapping')\n mapping['@id'] = id_\n mapping['reverse'] = True\n elif '@id' in value:\n id_ = value['@id']\n if not _is_string(id_):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @id value must be a '\n 'string.', 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid IRI mapping')\n if id_ != term:\n # add @id to mapping\n id_ = self._expand_iri(\n active_ctx, id_, vocab=True, base=False,\n local_ctx=local_ctx, defined=defined)\n if not _is_absolute_iri(id_) and not _is_keyword(id_):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @id value must be '\n 'an absolute IRI, a blank node identifier, or a '\n 'keyword.', 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid IRI mapping')\n mapping['@id'] = id_\n mapping['_prefix'] = (\n not _term_has_colon\n and re.match('.*[:/\\?#\\[\\]@]$', id_)\n and (_simple_term or self._processing_mode(active_ctx, 1.0)))\n if '@id' not in mapping:\n # see if the term has a prefix\n colon = term.find(':')\n if colon != -1:\n prefix = term[0:colon]\n if prefix in local_ctx:\n # define parent prefix\n self._create_term_definition(\n active_ctx, local_ctx, prefix, defined)\n\n # set @id based on prefix parent\n if active_ctx['mappings'].get(prefix) is not None:\n suffix = term[colon + 1:]\n mapping['@id'] = (\n active_ctx['mappings'][prefix]['@id'] + suffix)\n # term is an absolute IRI\n else:\n mapping['@id'] = term\n else:\n # non-IRIs MUST define @ids if @vocab not available\n if '@vocab' not in active_ctx:\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context terms must define '\n 'an @id.', 'jsonld.SyntaxError', {\n 'context': local_ctx,\n 'term': term\n }, code='invalid IRI mapping')\n # prepend vocab to term\n mapping['@id'] = active_ctx['@vocab'] + term\n\n # IRI mapping now defined\n defined[term] = True\n\n if '@type' in value:\n type_ = value['@type']\n if not _is_string(type_):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @type value must be '\n 'a string.', 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid type mapping')\n if type_ != '@id' and type_ != '@vocab':\n # expand @type to full IRI\n type_ = self._expand_iri(\n active_ctx, type_, vocab=True,\n local_ctx=local_ctx, defined=defined)\n if not _is_absolute_iri(type_):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; an @context @type value must '\n 'be an absolute IRI.', 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid type mapping')\n if type_.startswith('_:'):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; an @context @type values '\n 'must be an IRI, not a blank node identifier.',\n 'jsonld.SyntaxError', {'context': local_ctx},\n code='invalid type mapping')\n # add @type to mapping\n mapping['@type'] = type_\n\n if '@container' in value:\n container = JsonLdProcessor.arrayify(value['@container'])\n valid_containers = ['@list', '@set', '@index', '@language']\n is_valid = True\n has_set = '@set' in container\n\n if self._processing_mode(active_ctx, 1.1):\n valid_containers.extend(['@graph', '@id', '@type'])\n\n # check container length\n if '@list' in container:\n if len(container) != 1:\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @container with @list must have no other values.',\n 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid container mapping')\n elif '@graph' in container:\n _extra_keys = [kw for kw in container if kw not in ['@graph', '@id', '@index', '@set']]\n if _extra_keys:\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @container with @graph must have no other values ' +\n 'other than @id, @index, and @set',\n 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid container mapping')\n else:\n is_valid = is_valid and (len(container) <= (2 if has_set else 1))\n else: # json-ld-1.0\n is_valid = is_valid and _is_string(value['@container'])\n\n # check against valid containers\n is_valid = is_valid and not [kw for kw in container if kw not in valid_containers]\n\n # @set not allowed with @list\n is_valid = is_valid and not (has_set and '@list' in container)\n\n if not is_valid:\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @container value '\n 'must be one of the following: ' + ', '.join(valid_containers) + '.',\n 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid container mapping')\n _extra_reverse_keys = [kw for kw in container if kw not in ['@index', '@set']]\n if (mapping['reverse'] and _extra_reverse_keys):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @container value for '\n 'an @reverse type definition must be @index or @set.',\n 'jsonld.SyntaxError', {'context': local_ctx},\n code='invalid reverse property')\n\n # add @container to mapping\n mapping['@container'] = container\n\n # scoped contexts\n if '@context' in value:\n mapping['@context'] = value['@context']\n\n if '@language' in value and '@type' not in value:\n language = value['@language']\n if not (language is None or _is_string(language)):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @language value must be '\n 'a string or null.', 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid language mapping')\n # add @language to mapping\n if language is not None:\n language = language.lower()\n mapping['@language'] = language\n\n # term may be used as prefix\n if '@prefix' in value:\n if _term_has_colon:\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @prefix used on a compact IRI term.',\n 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid term definition')\n if not _is_bool(value['@prefix']):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context value for @prefix must be boolean.',\n 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid @prefix value')\n mapping['_prefix'] = value['@prefix']\n\n # nesting\n if '@nest' in value:\n nest = value['@nest']\n if not _is_string(nest) or (nest != '@nest' and nest[0] == '@'):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @nest value must be ' +\n 'a string which is not a keyword other than @nest.',\n 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid @nest value')\n mapping['@nest'] = nest\n\n # disallow aliasing @context and @preserve\n id_ = mapping['@id']\n if id_ == '@context' or id_ == '@preserve':\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context and @preserve '\n 'cannot be aliased.', 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid keyword alias')",
"def makeGlossary():\n return render_template('maker/glossary.html', title=\"Maker - Glossary\", year=year)",
"async def pglossary_add(self, ctx, term, *, definition):\n await self._pglossary_add(ctx, term, definition, True)",
"def from_terms(cls, terms, name, description, refgen):\n self = cls.create(name, description, refgen)\n self.log(\"Adding {} terms to the database.\", len(terms))\n self.add_terms(terms, overwrite=False)\n # Build the indices\n self.log(\"Building the indices.\")\n self._build_indices()\n\n self.log(\"Your gene ontology is built.\")\n return self",
"def create_term(name, code, start):\n return Term.objects.create(name=name, code=code, start=start, end=start + datetime.timedelta(7*20-1))",
"def load_glossary_words(parser, token):\n class GlossaryWords(Node):\n def __init__(self, context_var):\n self.context_var = context_var\n\n def render(self, context):\n try:\n context[self.context_var] = GlossaryWord.objects.all()\n except:\n pass\n\n return \"\"\n\n\n try:\n _, context_var = token.split_contents()[1:]\n except ValueError:\n raise TemplateSyntaxError(_('tag requires 2 arguments'))\n\n return GlossaryWords(context_var)",
"async def pglossary_edit(self, ctx, term, *, definition):\n await self._pglossary_add(ctx, term, definition, False)",
"def term(template):\n class ConcreteTerm(AbstractTerm):\n _template = template\n if '{' in template:\n return ConcreteTerm\n else:\n # Primitive terms (i.e. terms with no subterms) can be singletons so we\n # return the object here rather than the class. Doing this reduces the\n # number of parentheses needed when building terms.\n return ConcreteTerm()",
"def test_glossary_term_update(self):\n pass",
"def define(word: str):\n dictionary = PyDictionary()\n meanings = dictionary.meaning(word)\n return meanings",
"def generate_glossary_json(app, doctree, docname):\n current_builder = app.builder.name\n if current_builder == 'html' or current_builder == 'readthedocs':\n glossary_data = {}\n data_dir = app.outdir + '/_static/data'\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n if os.path.exists(data_dir + '/glossary.json'):\n with open(data_dir + '/glossary.json', 'r') as existing_glossary:\n glossary_data = json.loads(existing_glossary.read())\n for node in doctree.traverse(glossary):\n for definition_list in node.children:\n for definition_list_item in definition_list.children:\n term = definition_list_item.children[0].attributes['ids'][0][5:]\n definition = ''\n for paragraphs in definition_list_item.children[1].children:\n definition += paragraphs.rawsource + '\\n'\n definition = definition[:-2]\n glossary_data[term] = definition\n glossary_json = json.dumps(glossary_data)\n glossary_json_file = open(data_dir + '/glossary.json', 'w')\n glossary_json_file.write(glossary_json)\n glossary_json_file.close()",
"def test_glossary_terms_list(self):\n pass",
"def add_entry(self, signature, **kwargs):\n if \"label\" in kwargs:\n label = kwargs['label']\n name = f\"glossary.{label}\"\n anchor = f\"glossary:{label}\"\n else:\n name = f\"glossary.{signature}\"\n anchor = f\"glossary-{signature}\"\n\n self.data['entries'].append(\n (name, signature, \"Glossary\", self.env.docname, anchor, 0)\n )\n\n self.data['names'][name] = signature\n \n if \"abbreviation\" in kwargs:\n self.data['abbreviation'][kwargs['abbreviation']] = signature\n self.data['abbreviation-name'][name] = kwargs['abbreviation']\n if \"abbreviationpl\" in kwargs:\n self.data['abbreviation-plural'][name] = kwargs['abbreviationpl']",
"def test_glossary_term_show(self):\n pass",
"def build_json():\n\n species_labels = get_species_labels()\n\n with gzip.open(download_fn, \"rt\") as fi, gzip.open(resource_fn, \"wt\") as fo:\n\n # Header JSONL record for terminology\n metadata = get_metadata(namespace_def)\n fo.write(\"{}\\n\".format(json.dumps({\"metadata\": metadata})))\n\n orig_data = json.load(fi)\n\n for doc in orig_data:\n\n id = doc[\"CHANGEME\"]\n\n term = Term(\n key=f\"{namespace}:{id}\",\n namespace=namespace,\n id=id,\n # label=doc[\"symbol\"],\n # name=doc[\"name\"],\n # species_key=species_key,\n # species_label=species_labels[species_key],\n )\n\n term.alt_ids = [\"NS:1\"]\n\n # Synonyms\n term.synonyms.extend([\"one\", \"two\"])\n\n # Equivalences\n term.equivalence_keys.append(\"NS:1\")\n\n # Entity types\n term.entity_types = []\n\n # Obsolete Namespace IDs\n term.obsolete_keys.append(\"NS:1\")\n\n # Add term to JSONL\n fo.write(\"{}\\n\".format(json.dumps({\"term\": term.dict()})))",
"def create_term(self):\n curterm = self.get_curterm()\n if self.show:\n self.show = False\n curterm.close_popup()\n term = Fterm(self)\n self.cur_termnr += 1\n self.term_list.insert(self.cur_termnr, term)\n term.create_popup()\n self.show = True",
"def add_term(mytrie, word, weight):\r\n assert isinstance(word, str), \"The word to be added should be a string.\"\r\n assert isinstance(weight, int), \"The weight of the word should be an integer\"\r\n mytrie.insertWord(weight, word)",
"def test_term_creation(self):\n term = create_term(name=\"Fall 2012\", code=\"Fa12\", start=datetime.date(2012, 8, 13))\n self.assertEqual(\"Fall 2012\", term.name)\n self.assertEqual(\"Fa12\", term.code)\n self.assertEqual(datetime.date(2012, 8, 13), term.start)\n self.assertEqual((datetime.date(2012, 8, 13)+datetime.timedelta(7*20-1)), term.end)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add relationship between challenge and lesson objects.
|
def add_lesson_resource_relationship(self, lesson, resource, number):
relationship = ResourceDescription(
lesson=lesson,
resource=resource,
description="Description {}".format(number),
)
relationship.save()
|
[
"def add_challenge_lesson_relationship(self, challenge, lesson, set_number, number):\n relationship = ProgrammingChallengeNumber(\n programming_challenge=challenge,\n lesson=lesson,\n challenge_set_number=set_number,\n challenge_number=number,\n )\n relationship.save()",
"def add_rel_person(self, added):\n self.rel.append(added)\n added.parent = self",
"def __add_related_objects(self, obj, idref, type_, relationship=\"Contained Within\"):\n related_object = cybox.RelatedObjectType(idref=idref, type_=type_, relationship=relationship)\n related_objects = obj.get_Related_Objects()\n if not related_objects:\n related_objects = cybox.RelatedObjectsType()\n obj.set_Related_Objects(related_objects)\n\n related_objects.add_Related_Object(related_object)",
"def add(self, lesson: Lesson):\n self.lessons.append(lesson)",
"def test_get_retest_problems(self):\n for missed in [\n {'operand1' : 1, 'operand2' : 4, 'answer' : 6},\n {'operand1' : 2, 'operand2' : 4, 'answer' : 5},\n {'operand1' : 2, 'operand2' : 4, 'answer' : 6},\n ]:\n problem = Problem(\n level=1, problem_type=ProblemType.ADDITION, operand1=missed['operand1'],\n operand2=missed['operand2'])\n result = Result()\n result.level = 1\n result.round = 1\n result.problemid = problem.id\n result.studentid = self.student.id\n result.answer = missed['answer']\n db.session.add(result)\n \n lesson = Lesson(self.student)",
"def addRelationship (self, relationship):\n node1 = list(self.graph_db.find(relationship['src_label'], 'node_id',\n relationship['src_id']))\n node2 = list(self.graph_db.find(relationship['dst_label'], 'node_id',\n relationship['dst_id']))\n\n if len(node1) > 0 and len(node2) > 0:\n\n rel = Relationship(node1[0], relationship['rel_type'], node2[0])\n for key, value in relationship.items():\n rel.properties[key] = value\n self.graph_db.create(rel)\n return True\n else:\n log.debug(\"nodes do not exist in the DB\")\n return False",
"def test_create_relationship():\n from FeedMitreAttackv2 import create_relationship\n relation = create_relationship(RELATION.get('response'), ID_TO_NAME)\n relation._entity_a = 'entity a'\n relation._entity_a_type = 'STIX Malware'\n relation._entity_b = 'entity b'\n relation._entity_b_type = 'STIX Attack Pattern'\n relation._name = 'uses'\n relation._relation_type = 'IndicatorToIndicator'\n relation._reverse_name = 'used-by'",
"def test_new_lesson_creation(self):\n local_user = self.create_and_return_local_user()\n course_id = new_course({\n 'teacher' : local_user.key.id(),\n 'title' : 'foo course',\n 'body' : 'hey look mom',\n })\n unit_id = new_unit({\n 'course' : course_id, \n 'title' : 'foo unit',\n 'body' : 'bla bla unit body',\n })\n lesson_id = new_lesson({\n 'unit' : unit_id, \n 'title' : 'foo lesson',\n 'body' : 'lesson about stuff'\n })\n unit = ndb.Key('Curriculum', unit_id).get()\n course = ndb.Key('Curriculum', course_id).get()\n lesson = ndb.Key('Curriculum', lesson_id).get()\n\n # check that the correct content properties were set\n self.assertEqual(lesson.content['title'], 'foo lesson')\n self.assertEqual(lesson.content['body'], 'lesson about stuff')\n # check that the correct inferred properties were set\n self.assertEqual(lesson.content['course'], course_id)\n self.assertEqual(lesson.content['unit'], unit_id)\n self.assertEqual(lesson.content['teacher'], int(local_user.key.id()))\n self.assertEqual(lesson.content_type, 'lesson')\n # check that the parent unit correctly had this new lesson appended\n self.assertIn(lesson_id, unit.content['lessons'])",
"def add_coach(self, other):\n if other in self.coaches:\n raise DuplicateCoachError(other.name + \" is already a coach of \" + self.name)\n other.students.append(self)\n self.coaches.append(other)",
"def adopt(self):\n valid_relationships = set(Relationship._instances.keys())\n\n relationships = [\n (parent, relation.complement(), term.id)\n for term in six.itervalues(self.terms)\n for relation in term.relations\n for parent in term.relations[relation]\n if relation.complementary\n and relation.complementary in valid_relationships\n ]\n\n relationships.sort(key=operator.itemgetter(2))\n\n for parent, rel, child in relationships:\n\t #print parent, rel, child\n if rel is None:\n break\n\n try:\n parent = parent.id\n except AttributeError:\n pass\n\n if parent in self.terms:\n try:\n if child not in self.terms[parent].relations[rel]:\n self.terms[parent].relations[rel].append(child)\n except KeyError:\n self[parent].relations[rel] = [child]\n\n del relationships",
"def _add_to_relation(self, query, relationname, toadd=None):\n submodel = get_related_model(self.model, relationname)\n if isinstance(toadd, dict):\n toadd = [toadd]\n for dictionary in toadd or []:\n subinst = get_or_create(self.session, submodel, dictionary)\n try:\n for instance in query:\n getattr(instance, relationname).append(subinst)\n except AttributeError as exception:\n current_app.logger.exception(str(exception))\n setattr(instance, relationname, subinst)",
"def _add_to_relation(self, query, relationname, toadd=None):\r\n submodel = get_related_model(self.model, relationname)\r\n if isinstance(toadd, dict):\r\n toadd = [toadd]\r\n for dictionary in toadd or []:\r\n subinst = get_or_create(self.session, submodel, dictionary)\r\n try:\r\n for instance in query:\r\n getattr(instance, relationname).append(subinst)\r\n except AttributeError as exception:\r\n current_app.logger.exception(str(exception))\r\n setattr(instance, relationname, subinst)",
"def link_housing(tx):\n tx.run(\n \"MATCH (p:people {name:'Snow White'}), (h:house {name:'Castle'}) \"\n \"create (p)-[r:LIVES_IN]->(h) \"\n )\n tx.run(\n \"MATCH (p:people {gender:'M'}), (h:house {name:'Dwarf House'}) \"\n \"create (p)-[r:LIVES_IN]->(h) \"\n )\n tx.run(\n \"MATCH (p:people {gender:'F'}), (h:house {name:'Dwarf House'}) \"\n \"create (p)-[r:WORKS_IN]->(h) \"\n )\n tx.run(\n \"MATCH (p:people {gender:'M'}), (h:house {name:'Mine'}) \"\n \"create (p)-[r:WORKS_IN]->(h) \"\n )",
"def create_relationship(client: Client, indicator: str, threats: List, entity_a_type: str) -> List:\n relationships = []\n if client.create_relationships:\n for threat in threats:\n for block in threat.get('blockSet', {}):\n relationships.append(\n EntityRelationship(name='related-to',\n entity_a=indicator,\n entity_a_type=entity_a_type,\n entity_b=block.get('data'),\n entity_b_type=check_indicator_type(block.get('data')),\n brand=BRAND))\n for exec_set in threat.get('executableSet', {}):\n relationships.append(\n EntityRelationship(name='related-to',\n entity_a=indicator,\n entity_a_type=entity_a_type,\n entity_b=exec_set.get('md5Hex'),\n entity_b_type=FeedIndicatorType.File,\n brand=BRAND))\n return relationships",
"def test_element_add_relationship_twice_is_ok():\n element1 = ConcreteElement(name=\"elt1\")\n element2 = ConcreteElement(name=\"elt1\")\n model = MockModel()\n element1.set_model(model)\n relationship = element1.add_relationship(destination=element2)\n element1.add_relationship(relationship)\n assert element1.relationships == {relationship}",
"def test_add_coach_iterative(self):\n A = User()\n coaches = [User() for user in range(5)]\n A.add_coach(coaches)\n self.assertEqual(A._User__coached_by, set(coaches))\n for c in coaches:\n self.assertEqual(c.students(), set([A]))",
"def add_relation(self, relation):\n \n if self.has_id(relation.relation_id):\n raise KeyError(\"relation_id '%s' already seen before.\" % \\\n relation.relation_id)\n self._relation_ids.add(relation.relation_id)\n for entity in relation.entities:\n if not self.entity_matcher.has_id(entity.entity_id):\n self.entity_matcher.add_entity(entity)\n self._relations.append(relation)\n for entity in relation.entities:\n entity_id = entity.entity_id\n if entity_id not in self._relation_dict.keys():\n self._relation_dict[entity_id] = set()\n self._relation_dict[entity_id].add(relation)",
"def handle_interaction_add_relationship_type(\n self, module_num, relationship, object_name1, object_name2\n ):\n with DBContext(self) as (connection, cursor):\n return self.add_relationship_type(\n module_num, relationship, object_name1, object_name2, cursor\n )",
"def create_lesson():\n\n ### SAVE LESSON TO DATABASE ###\n # Set up default lesson data dict\n lesson_data = {\n 'title': 'Untitled', \n 'author_id': session['user_id'],\n 'overview': '', \n 'imgUrl': None,\n 'public': False,\n }\n\n ### UPLOAD PHOTO TO CLOUDINARY AND ATTACH URL ###\n if 'lesson-pic' not in request.files:\n lesson_data['imgUrl'] = \"/static/img/placeholder.png\"\n else: \n my_file = request.files['lesson-pic']\n result = cloudinary.uploader.upload(my_file, api_key=CLOUD_KEY, \n api_secret=CLOUD_SECRET,\n cloud_name='hackbright')\n lesson_data['imgUrl'] = result['secure_url']\n \n ### SAVE LESSON TO DATABASE ###\n lesson_data['title'] = request.form['title']\n lesson_data['overview'] = request.form['overview']\n db_lesson = crud.create_lesson(lesson_data)\n\n ### CREATE DB ASSOCIATION BETWEEN TAGS AND LESSON ###\n tags = request.form['tags'].split(',') # eg. '6th,science'\n # Right now, setting up new tag with id of \"tag\"\n for tag in tags:\n if tag in SUBJECTS: \n db_tag = crud.get_tag_by_name(tag)\n elif tag in GRADES: \n db_tag = crud.get_tag_by_name(tag)\n crud.assign_tag_to_lesson(db_tag, db_lesson)\n\n ### CREATE DB ASSOCIATION BETWEEN COMPONENTS AND LESSON ###\n if request.form['component-ids']:\n component_ids = request.form['component-ids'].split(',') # e.g. '30,31'\n for comp_id in component_ids:\n db_comp = crud.get_comp_by_id(int(comp_id))\n crud.assign_comp(db_comp, db_lesson)\n \n try: \n return {'success': True, 'lesson_id': db_lesson.lesson_id}\n except: \n print('Except something done broke')\n return {'success': False}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
=========== This is for reading in apr3 hdf (HDF5 updated 2/21/18) files from OLYMPEX and return them all in one dictionary =========== filename = filename of the apr3 file
|
def apr3read(filename):
apr = {}
flag = 0
##Radar varibles in hdf file found by hdf.datasets
radar_freq = 'zhh14' #Ku
radar_freq2 = 'zhh35' #Ka
radar_freq3 = 'z95s' #W
radar_freq4 = 'ldr14' #LDR
vel_str = 'vel14' #Doppler
##
hdf = h5py.File(filename,"r")
listofkeys = hdf['lores'].keys()
alt = hdf['lores']['alt3D'][:]
lat = hdf['lores']['lat'][:]
lon = hdf['lores']['lon'][:]
time = hdf['lores']['scantime'][:]
surf = hdf['lores']['surface_index'][:]
isurf = hdf['lores']['isurf'][:]
plane = hdf['lores']['alt_nav'][:]
radar = hdf['lores'][radar_freq][:]
radar2 = hdf['lores'][radar_freq2][:]
radar4 = hdf['lores'][radar_freq4][:]
vel = hdf['lores']['vel14c'][:]
lon3d = hdf['lores']['lon3D'][:]
lat3d = hdf['lores']['lat3D'][:]
alt3d = hdf['lores']['alt3D'][:]
#see if there is W band
if 'z95s' in listofkeys:
if 'z95n' in listofkeys:
radar_nadir = hdf['lores']['z95n']
radar_scanning = hdf['lores']['z95s']
radar3 = radar_scanning
##uncomment if you want high sensativty as nadir scan (WARNING, CALIBRATION)
#radar3[:,12,:] = radar_nadir[:,12,:]
else:
radar3 = hdf['lores']['z95s']
print('No vv, using hh')
else:
radar3 = np.ma.array([])
flag = 1
print('No W band')
##convert time to datetimes
time_dates = np.empty(time.shape,dtype=object)
for i in np.arange(0,time.shape[0]):
for j in np.arange(0,time.shape[1]):
tmp = datetime.datetime.utcfromtimestamp(time[i,j])
time_dates[i,j] = tmp
#Create a time at each gate (assuming it is the same down each ray, there is a better way to do this)
time_gate = np.empty(lat3d.shape,dtype=object)
for k in np.arange(0,550):
for i in np.arange(0,time_dates.shape[0]):
for j in np.arange(0,time_dates.shape[1]):
time_gate[k,i,j] = time_dates[i,j]
#Quality control (masked where invalid)
radar = np.ma.masked_where(radar <= -99,radar)
radar2 = np.ma.masked_where(radar2 <= -99,radar2)
radar3 = np.ma.masked_where(radar3 <= -99,radar3)
radar4 = np.ma.masked_where(radar4 <= -99,radar4)
#Get rid of nans, the new HDF has builtin
radar = np.ma.masked_where(np.isnan(radar),radar)
radar2 = np.ma.masked_where(np.isnan(radar2),radar2)
radar3 = np.ma.masked_where(np.isnan(radar3),radar3)
radar4 = np.ma.masked_where(np.isnan(radar4),radar4)
apr['Ku'] = radar
apr['Ka'] = radar2
apr['W'] = radar3
apr['DFR_1'] = radar - radar2 #Ku - Ka
if flag == 0:
apr['DFR_3'] = radar2 - radar3 #Ka - W
apr['DFR_2'] = radar - radar3 #Ku - W
apr['info'] = 'The shape of these arrays are: Radar[Vertical gates,Time/DistanceForward]'
else:
apr['DFR_3'] = np.array([]) #Ka - W
apr['DFR_2'] = np.array([]) #Ku - W
apr['info'] = 'The shape of these arrays are: Radar[Vertical gates,Time/DistanceForward], Note No W band avail'
apr['ldr'] = radar4
apr['vel'] = vel
apr['lon'] = lon
apr['lat'] = lat
apr['alt_gate'] = alt3d
apr['alt_plane'] = plane
apr['surface'] = isurf
apr['time']= time
apr['timedates']= time_dates
apr['time_gate'] = time_gate
apr['lon_gate'] = lon3d
apr['lat_gate'] = lat3d
# fileheader = hdf.select('fileheader')
roll = hdf['lores']['roll']
pitch = hdf['lores']['pitch']
drift = hdf['lores']['drift']
ngates = alt.shape[0]
apr['ngates'] = ngates
apr['roll'] = roll
apr['pitch'] = pitch
apr['drift'] = drift
_range = np.arange(15,550*30,30)
_range = np.asarray(_range,float)
ind = np.where(_range >= plane.mean())
_range[ind] = np.nan
apr['range'] = _range
return apr
|
[
"def apr3read(filename):\n \n apr = {}\n flag = 0\n ##Radar varibles in hdf file found by hdf.datasets\n radar_freq = 'zhh14' #Ku\n radar_freq2 = 'zhh35' #Ka\n radar_freq3 = 'zvv95' #W\n radar_freq4 = 'ldr14' #LDR\n vel_str = 'vel14' #Doppler\n ##\n\n hdf = SD(filename, SDC.READ)\n \n listofkeys = hdf.datasets().keys()\n if 'zvv95' in listofkeys:\n radar3 = hdf.select(radar_freq3)\n radar_n3 = radar3.get()\n radar_n3 = radar_n3/100.\n else:\n radar_n3 = np.array([])\n flag = 1\n print('No W band')\n \n \n alt = hdf.select('alt3D')\n lat = hdf.select('lat')\n lon = hdf.select('lon')\n time = hdf.select('scantime').get()\n surf = hdf.select('surface_index').get()\n isurf = hdf.select('isurf').get()\n plane = hdf.select('alt_nav').get()\n radar = hdf.select(radar_freq)\n radar2 = hdf.select(radar_freq2)\n radar4 = hdf.select(radar_freq4)\n vel = hdf.select(vel_str)\n lon3d = hdf.select('lon3D')\n lat3d = hdf.select('lat3D')\n alt3d = hdf.select('alt3D')\n lat3d_scale = hdf.select('lat3D_scale').get()[0][0]\n lon3d_scale = hdf.select('lon3D_scale').get()[0][0]\n alt3d_scale = hdf.select('alt3D_scale').get()[0][0]\n lat3d_offset = hdf.select('lat3D_offset').get()[0][0]\n lon3d_offset = hdf.select('lon3D_offset').get()[0][0]\n alt3d_offset = hdf.select('alt3D_offset').get()[0][0]\n \n alt = alt.get()\n ngates = alt.shape[0]\n #alt = alt[:,scan,:]\n lat = lat.get()\n #lat = lat[scan,:]\n lon = lon.get()\n #lon = lon[scan,:]\n \n lat3d = lat3d.get()\n lat3d = (lat3d/lat3d_scale) + lat3d_offset\n lon3d = lon3d.get()\n lon3d = (lon3d/lon3d_scale) + lon3d_offset\n alt3d = alt3d.get()\n alt3d = (alt3d/alt3d_scale) + alt3d_offset\n \n #time = time[scan,:]\n #surf = surf[scan,:]\n #isurf = isurf[scan,:]\n #plane = plane[scan,:]\n radar_n = radar.get()\n radar_n = radar_n/100.\n radar_n2 = radar2.get()\n radar_n2 = radar_n2/100.\n\n radar_n4 = radar4.get()\n radar_n4 = radar_n4/100.\n vel_n = vel.get()\n vel_n = vel_n/100.\n\n ##convert time to datetimes\n time_dates = np.empty(time.shape,dtype=object)\n for i in np.arange(0,time.shape[0]):\n for j in np.arange(0,time.shape[1]):\n tmp = datetime.datetime.utcfromtimestamp(time[i,j])\n time_dates[i,j] = tmp\n\n apr['Ku'] = radar_n\n apr['Ka'] = radar_n2\n apr['W'] = radar_n3\n apr['DFR_1'] = radar_n - radar_n2 #Ku - Ka\n \n if flag == 0:\n apr['DFR_2'] = radar_n2 - radar_n3 #Ka - W\n apr['DFR_3'] = radar_n - radar_n3 #Ku - W\n apr['info'] = 'The shape of these arrays are: Radar[Vertical gates,Time/DistanceForward]'\n else:\n apr['DFR_2'] = np.array([]) #Ka - W\n apr['DFR_3'] = np.array([]) #Ku - W\n apr['info'] = 'The shape of these arrays are: Radar[Vertical gates,Time/DistanceForward], Note No W band avail'\n \n apr['ldr'] = radar_n4\n apr['vel'] = vel_n\n apr['lon'] = lon\n apr['lat'] = lat\n apr['alt_gate'] = alt3d\n apr['alt_plane'] = plane\n apr['surface'] = isurf \n apr['time']= time\n apr['timedates']= time_dates\n apr['lon_gate'] = lon3d\n apr['lat_gate'] = lat3d\n \n fileheader = hdf.select('fileheader')\n roll = hdf.select('roll').get()\n pitch = hdf.select('pitch').get()\n drift = hdf.select('drift').get()\n \n apr['fileheader'] = fileheader\n apr['ngates'] = ngates\n apr['roll'] = roll\n apr['pitch'] = pitch\n apr['drift'] = drift\n \n _range = np.arange(15,550*30,30)\n _range = np.asarray(_range,float)\n ind = np.where(_range >= plane.mean())\n _range[ind] = np.nan\n apr['range'] = _range\n \n return apr",
"def read2dict(filename):\n \n if not os.path.isfile(filename):\n logger.error('The file you try to read does not exist!')\n raise IOError\n \n def read_rec(hdf):\n \"\"\" recusively read the hdf5 file \"\"\"\n res = {}\n for name,grp in hdf.items():\n #-- read the subgroups and datasets\n if hasattr(grp, 'items'):\n # in case of a group, read the group into a new dictionary key\n res[name] = read_rec(grp)\n else:\n # in case of dataset, read the value\n res[name] = grp.value\n \n #-- read all the attributes\n for name, atr in hdf.attrs.iteritems():\n res[name] = atr\n \n return res\n \n hdf = h5py.File(filename, 'r')\n result = read_rec(hdf)\n hdf.close()\n \n return result",
"def gather_file_info (apath):\n file_info = dict()\n file_info['file_name'] = os.path.basename(apath)\n file_info['file_path'] = os.path.abspath(apath)\n file_info['file_size'] = os.path.getsize(apath)\n return file_info",
"def retrieve_data_from_hdf_suitcase(fpath):\n data_dict = {}\n with h5py.File(fpath, \"r+\") as f:\n other_data_list = [v for v in f.keys() if v != \"xrfmap\"]\n if len(other_data_list) > 0:\n f_hdr = f[other_data_list[0]].attrs[\"start\"]\n if not isinstance(f_hdr, str):\n f_hdr = f_hdr.decode(\"utf-8\")\n start_doc = ast.literal_eval(f_hdr)\n other_data = f[other_data_list[0] + \"/primary/data\"]\n\n if start_doc[\"beamline_id\"] == \"HXN\":\n current_dir = os.path.dirname(os.path.realpath(__file__))\n config_file = \"hxn_pv_config.json\"\n config_path = sep_v.join(current_dir.split(sep_v)[:-2] + [\"configs\", config_file])\n with open(config_path, \"r\") as json_data:\n config_data = json.load(json_data)\n extra_list = config_data[\"other_list\"]\n fly_type = start_doc.get(\"fly_type\", None)\n subscan_dims = start_doc.get(\"subscan_dims\", None)\n\n if \"dimensions\" in start_doc:\n datashape = start_doc[\"dimensions\"]\n elif \"shape\" in start_doc:\n datashape = start_doc[\"shape\"]\n else:\n logger.error(\"No dimension/shape is defined in hdr.start.\")\n\n datashape = [datashape[1], datashape[0]] # vertical first, then horizontal\n for k in extra_list:\n # k = k.encode('utf-8')\n if k not in other_data.keys():\n continue\n _v = np.array(other_data[k])\n v = _v.reshape(datashape)\n if fly_type in (\"pyramid\",):\n # flip position the same as data flip on det counts\n v = flip_data(v, subscan_dims=subscan_dims)\n data_dict[k] = v\n return data_dict",
"def multiFileData():\n data={}\n names = Utilities.createRandomStrings(3,6)\n reps = range(1,3)\n for name in names:\n for rep in reps:\n for i in np.arange(2,10,1.0):\n key = name+'_ph'+str(i)+'_rep'+str(rep)+'.txt'\n val = 1/(1+exp((i-5)/1.2))\n data[key] = {}\n data[key]['ph'] = tempData(val)\n #print data\n return data",
"def files_to_dictionary(files):\r\n mydict = {}\r\n for x in files:\r\n pair = file_to_data(x)\r\n mydict[pair[0]] = pair[1:]\r\n return mydict",
"def get_hpdict(infilename):\n hp_dict = defaultdict(list)\n with open(infilename) as file:\n for line in file:\n if line.startswith('#'):\n continue\n pix = int(line.split()[-1])\n hp_dict[pix].append(line)\n return hp_dict",
"def read_metadata(input_file):\n metadata = {'includes': []}\n while True:\n if py3:\n line = input_file.readline().decode('utf-8')\n else:\n line = input_file.readline()\n\n # The end ?\n if line == '\\n':\n return metadata\n\n header_name, header_body = line.split(':', 1)\n header_name = header_name.strip()\n header_body = header_body.strip()\n\n if header_name.startswith('include-'):\n metadata['includes'].append( (header_name[8:], header_body) )\n else:\n metadata[header_name] = header_body",
"def read_hdf5(filename):\n import h5py as hp\n hfile = hp.File(filename, 'r')\n lenk = len(hfile.keys())\n if lenk == 1:\n data = hfile[hfile.keys()[0]].value\n else:\n data = {}\n for k in hfile.iterkeys():\n # The straight code gives ustrings, which I don't like.\n# data[k] = hfile[k].value\n exec(\"data['\" + k + \"'] = hfile['\" + k + \"'].value\")\n hfile.close()\n return data",
"def get_info_from_files() -> List[Dict[str, int]]:\n arr = []\n for i in os.listdir(\"module\"):\n data = {'title': i, 'size': os.path.getsize(f\"module/{i}\")}\n arr.append(data)\n return arr",
"def get_logo_file_dict(radiant_icd_path):\n try:\n fds = os.listdir(radiant_icd_path)\n except OSError:\n fds = list()\n abs_fds = [os.path.join(radiant_icd_path, item) for item in fds]\n return dict(zip(map(_map_logo, abs_fds), map(yose.win2unix, abs_fds)))",
"def medline_parser(filename):\n pmid_abstract_dict = {}\n with open(filename) as handle:\n for record in Medline.parse(handle):\n if 'TI' in record.keys():\n pmid, title = record['PMID'], record['TI']\n pmid_abstract_dict[pmid] = title\n return pmid_abstract_dict",
"def get_gwas_data(assoc_file):\n header_dict = {}\n data = {}\n with open(assoc_file) as fobj:\n header = fobj.readline().strip().split()\n if len(header) == 14:\n pass\n else:\n print(\"That doesnt look like a PLINK .assoc file\")\n sys.exit()\n for col in header:\n header_dict[col] = header.index(col)\n for line in fobj:\n snp_data = line.strip().split()\n data[snp_data[header.index(\"rs\")]] = snp_data\n name = get_simple_filename(assoc_file)\n print(f\"\\nThere are {len(data)} SNPs in {name}\")\n return data, header_dict",
"def from_hdf(filename):\n # Function for iteratively parsing the file to create the dictionary\n def visit_group(obj, sdict):\n name = obj.name.split('/')[-1]\n #indent = len(obj.name.split('/'))-1\n #print \" \"*indent,name, obj.value if (type(obj) == h5py.Dataset) else \":\"\n if type(obj) in [ h5py.Dataset ]:\n sdict[name] = obj.value\n if type(obj) in [ h5py.Group, h5py.File ]:\n sdict[name] = {}\n for sobj in obj.values():\n visit_group(sobj, sdict[name])\n\n data = {}\n h5file = h5py.File(os.path.expandvars(filename), 'r')\n try:\n # Run over the whole dataset\n for obj in h5file.values():\n visit_group(obj, data)\n finally:\n h5file.close()\n\n return data",
"def readDatasetMap(mappath, parse_extra_fields=False):\n datasetMap = {}\n extraFieldMap = {}\n mapfile = open(mappath)\n for line in mapfile.readlines():\n if line[0]=='#' or line.strip()=='':\n continue\n\n if parse_extra_fields:\n fields = splitLine(line)\n versionName, path, size = fields[0:3]\n datasetName,versionno = parseDatasetVersionId(versionName)\n if len(fields)>3:\n for field in fields[3:]:\n efield, evalue = field.split('=')\n extraFieldMap[(datasetName, versionno, path, efield.strip())] = evalue.strip()\n if datasetMap.has_key((datasetName, versionno)):\n datasetMap[(datasetName, versionno)].append((path, size))\n else:\n datasetMap[(datasetName, versionno)] = [(path, size)]\n else:\n datasetId, path, size = splitLine(line)[0:3]\n versionId = parseDatasetVersionId(datasetId)\n if datasetMap.has_key(versionId):\n datasetMap[versionId].append((path, size))\n else:\n datasetMap[versionId] = [(path, size)]\n\n mapfile.close()\n\n for value in datasetMap.values():\n value.sort()\n \n if parse_extra_fields:\n return (datasetMap, extraFieldMap)\n else:\n return datasetMap",
"def parse_stats_file(filename):\n ms_dict = dict()\n key = ''\n\n f = file(filename)\n for line in f.readlines():\n words = line.split()\n if len(words) == 0:\n continue\n if words[0] == 'device':\n key = words[4]\n new = [ line.strip() ]\n elif 'nfs' in words or 'nfs4' in words:\n key = words[3]\n new = [ line.strip() ]\n else:\n new += [ line.strip() ]\n ms_dict[key] = new\n f.close\n\n return ms_dict",
"def _read(self, file_path: str) -> Iterable[Dict[str, Any]]:\n pass",
"def read_montage_table():\n files_dict = {'u':[],'g':[],'r':[],'i':[],'z':[]}\n files = sp.check_output(\"awk '{print $NF}' *.imglist | grep _st\",shell=True).decode(\"UTF-8\").strip().split('\\n')\n for i in files:\n _dict = parse_path(i)\n files_dict[_dict[\"filter\"]].append(_dict['file'])\n\n\n return files_dict",
"def fetch_har_entry_pairs(har_file, no_proxy=False):\n for g in glob(har_file):\n with open(g, 'r') as f:\n har = json.loads(f.read())\n entries = []\n for e in har['log']['entries']:\n url = e['request']['url']\n if no_proxy:\n # try to remove playback app portion of the URL\n url = extract_archived_url(url)\n if url is None:\n # could be an asset of playback app\n continue\n entries.append({\n 'url': url,\n 'status': e['response']['status'],\n 'mime_type': e['response']['content']['mimeType'],\n 'size': e['response']['content']['size'],\n })\n return entries"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This is a function to load and combine the PSD into 1 matrix with dimensions (time,bin)
|
def PSD_load(psd_filename_2DS,psd_filename_HVPS,day=1,month=12):
##Load in UIOPS
#Select bins for each probe
#2DS
index1 = 4
index2 = 18
#
#HVPS
index3 = 5
index4 = 28
#
#2DS
data = netCDF4.Dataset(psd_filename_2DS,'r')
time1 = data['time'][:]
ND1 = data['conc_minR'][:,index1:index2]
midpoints1 = data['bin_mid'][index1:index2]
bin_width1 = data['bin_dD'][index1:index2]
#HVPS
data2 = netCDF4.Dataset(psd_filename_HVPS,'r')
time2 = data2['time'][:]
ND2 = data2['conc_minR'][:,index3:index4]
midpoints2 = data2['bin_mid'][index3:index4]
bin_width2 = data2['bin_dD'][index3:index4]
#Combine the 2 probes.
ND = np.zeros([ND1.shape[0],ND1.shape[1]+ND2.shape[1]])
ND[:,:ND1.shape[1]] = ND1
ND[:,ND1.shape[1]:] = ND2
binwidth = np.append(bin_width1,bin_width2)
midpoints = np.append(midpoints1,midpoints2)
#Use 2ds time (should be the same)
time = time1
dtime = np.array([],dtype=object)
flag1 = 0
for i in np.arange(0,time.shape[0]):
if len(str(int(time[i]))) < 6:
hour = int(str(int(time[i]))[0:1])
mins = int(str(int(time[i]))[1:3])
sec = int(str(int(time[i]))[3:5])
else:
hour = int(str(int(time[i]))[0:2])
mins = int(str(int(time[i]))[2:4])
sec = int(str(int(time[i]))[4:6])
if sec >= 60:
sec = sec - 60
mins = mins + 1
if mins >= 60:
mins = mins - 60
hour = hour + 1
if hour >= 24:
hour = hour - 24
if flag1 == 0:
day = day + 1
flag1 = 1
try:
temp = datetime.datetime(2015,month,day,hour,mins,sec)
except:
print('error in dtime maker')
print(month,day,hour,mins,sec)
temp = datetime.datetime(2015,month,day,hour,mins,sec)
dtime = np.append(dtime,temp)
return dtime,ND,binwidth,midpoints
|
[
"def load_p_beam_2s():\n # get start time\n start_time = get_start_time()\n \n # instantiate array to hold the resulting data, empty and single column \n # at first, for data to be successively stacked\n p_beam_data = np.empty((0,2), float)\n \n # loop through the files and load the data\n for filename in os.listdir('../data_p_beam/2_second'):\n \n # all of the csv file is converted to a list of strings for extracting\n # the time data\n f = open('../data_p_beam/2_second/' + filename)\n lines = f.readlines()\n f.close()\n \n # instantiate an array to hold the measurement times\n arr = np.zeros((np.shape(lines)[0] - 1, 2))\n \n # loop over every row in the csv file, skipping line 1\n for i in range(0, np.shape(arr)[0]):\n \n # convert the measurement time to epoch time\n date_time = lines[i + 1][0:10] + ' ' + lines[i + 1][11:19]\n # print(date_time)\n pattern = '%Y-%m-%d %H:%M:%S'\n measurement_time = int(\n time.mktime(time.strptime(date_time, pattern)))\n \n # save the elapsed time to the times array\n arr[i, 0] = measurement_time - start_time\n\n # the current data is loaded into a numpy array\n arr[:,1] = np.loadtxt('../data_p_beam/2_second/' + filename, \n delimiter = ',', \n skiprows=1, \n usecols=(1));\n \n # removing the 0 values\n for i in range(0,np.shape(arr)[0]):\n\n if (arr[i,1] == 0):\n\n arr[i,1] = float('nan')\n\n\n # append the time and count data to the array\n p_beam_data = np.append(p_beam_data, \n arr, axis = 0)\n\n return p_beam_data",
"def load_p_beam_10s():\n \n # get start time\n start_time = get_start_time()\n\n # instantiate array to hold the resulting data, empty and single column \n # at first, for data to be successively stacked\n p_beam_data = np.empty((0,3), float)\n \n # loop through the files and load the data\n for filename in os.listdir('../data_p_beam/10_second'):\n \n # all of the csv file is converted to a list of strings for extracting\n # the time data\n f = open('../data_p_beam/10_second/' + filename)\n lines = f.readlines()\n f.close()\n \n # instantiate an array to hold the measurement times\n arr = np.zeros((np.shape(lines)[0] - 1, 3))\n \n # loop over every row in the csv file, skipping line 1\n for i in range(0, np.shape(arr)[0]):\n \n # convert the measurement time to epoch time\n date_time = lines[i + 1][0:10] + ' ' + lines[i + 1][11:19]\n # print(date_time)\n pattern = '%d.%m.%Y %H:%M:%S'\n measurement_time = int(\n time.mktime(time.strptime(date_time, pattern)))\n \n # save the elapsed time to the times array\n arr[i, 0] = measurement_time - start_time\n\n # the current data is loaded into a numpy array\n arr[:,1:3] = np.loadtxt('../data_p_beam/10_second/' + filename, \n delimiter = ';', \n skiprows=1, \n usecols=(86,88));\n \n # removing the 0 values\n for i in range(0,np.shape(arr)[0]):\n\n if (arr[i,1] == 0):\n\n arr[i,1] = float('nan')\n\n if (arr[i,2] == 0):\n\n arr[i,2] = float('nan')\n\n\n # append the time and count data to the array\n p_beam_data = np.append(p_beam_data, \n arr, axis = 0)\n\n return p_beam_data",
"def processBinFile(OpenedFile):\n raw_data = np.fromfile(OpenedFile, dtype = np.uint8)\n bin_file_size = len(raw_data) \n ii = np.zeros((1,128), dtype=np.int)\n start_byte = 0\n rp_i = 0\n rp_locs = np.zeros(6240, dtype='int') \n for i in range(1, int(bin_file_size/32096) + 1):\n raw_fire_time = raw_data[start_byte + 24:start_byte + 32]\n roll_b = raw_data[start_byte + 16:start_byte + 18].view('int16')\n pitch_b = raw_data[start_byte + 18:start_byte + 20].view('int16')\n if((roll_b != 8224) | (pitch_b != 8224)):\n rp_locs[rp_i] = i\n ROLL_R[rp_i] = roll_b\n rp_i = rp_i + 1\n \n for k in range(0, 8):\n raw_signal = raw_data[start_byte + k * 4008 + 40 : start_byte + k * 4008 + 4040].view('uint16')\n raw_signal = np.float16((raw_signal.astype(\"double\")-32768)/32768)\n raw_signal = np.asmatrix(raw_signal)\n #raw_first_ref = raw_data[start_byte+k*4008+32:start_byte +k*4008+34]\n #first_ref = raw_first_ref.view('uint16')\n channel_index = raw_data[start_byte + k*4008 + 38].astype(\"int\")\n SIGNAL_MATRICES[channel_index, ii[0,channel_index], :] = raw_signal\n ii[0,channel_index] = ii[0,channel_index] + 1\n start_byte = start_byte +32096\n return SIGNAL_MATRICES, ROLL_R",
"def binFile2DataAle(fname, storePickle=False):\n # Open .bin file\n print(\"Opening .bin file.\")\n with open(fname, mode='rb') as file:\n rawdata = list(file.read())\n print(\"Binfile opened.\")\n \n # Number of bytes per line\n NB = 4\n \n # Number of addresses\n NA = 8\n \n # number of data points\n N = len(rawdata) / NB # 4 bytes per 32 bit number\n\n # convert list into list of lists with each sublist containing 4 numbers\n # rawdata2 = [rawdata[i:i+NB] for i in range(0, len(rawdata), NB)]\n\n # reverse order of the bytes in each list item\n # rawdata = [list(reversed(rawdata[i])) for i in range(0, len(rawdata))]\n\n # Create vector data with the macro arrival times\n data = arrivalTimes()\n for i in range(NA):\n for j in range(NB):\n setattr(data, \"pixel\" + str(i*NB + j), [])\n \n # go through data set\n oldTimeArr = np.zeros(NA) # keep track of time\n k = np.zeros(NA) # keep track of number of macrotimes of each address\n for i in range(round(N)):\n if np.mod(i, 10000) == 0:\n print(\"{0:.3f} %\".format(100 * i/N))\n start = 4 * i\n newRawDataPoint = rawdata[start:start+NB]\n newRawDataPoint = list(reversed(newRawDataPoint))\n address = getAddress(newRawDataPoint)\n time = getTime(newRawDataPoint)\n ch = getChannel(newRawDataPoint)\n # print(\"time: \" + str(time) + \" - oldTime: \" + str(oldTime))\n # print(ch)\n if time < oldTimeArr[address]:\n k[address] += 1\n oldTimeArr[address] = time\n time = int(k[address] * 4096 + time)\n if ch > 0 and address >= 0:\n pixel = address * 4 + ch - 1\n getattr(data, \"pixel\" + str(pixel)).append(time)\n \n if storePickle:\n print(\"Storing .pickle file\")\n savevar(data, fname[0:-4] + \"_data\")\n print(\".pickle file stored\")\n \n return(data)",
"def dataset_to_tensor(input_folder, output_folder, files):\n dataset_tensor = np.empty([45253,37,0])\n for file in files[:-1]:\n \n file_path = os.path.join(input_folder, file)\n np_table = np.load(file_path, allow_pickle=True)\n np_table = np.expand_dims(np_table, axis=-1) \n dataset_tensor = np.concatenate((dataset_tensor,np_table), axis=-1)\n\n # Process the timestamp\n doy, hod = process_timestamp(dataset_tensor[:, 0, 0])\n\n # Clean the time stamp:\n dataset_tensor = dataset_tensor[:,1:,:]\n \n # Add city attributes:\n city_att_path = os.path.join(input_folder, files[-1])\n city_att = np.load(city_att_path, allow_pickle=True)\n dataset_tensor = np.concatenate((dataset_tensor, city_att), axis=-1)\n # Add hour of the day\n hod = np.broadcast_to(hod, (dataset_tensor.shape[0], dataset_tensor.shape[1], 1))\n dataset_tensor = np.concatenate((hod, dataset_tensor), axis=-1)\n # Add day of the year\n doy = np.broadcast_to(doy, (dataset_tensor.shape[0], dataset_tensor.shape[1], 1))\n dataset_tensor = np.concatenate((doy, dataset_tensor), axis=-1)\n\n # Save dataset_tensor:\n new_filename = 'dataset_tensor' \n new_filepath = os.path.join(output_folder, new_filename)\n np.save(new_filepath, dataset_tensor)",
"def initData():\n global timeBinary\n global specBinary\n global nrbdBinary\n\n rawData = dataArray(sampleSize)\n\n timeBinary = packTimeData(rawData)\n specBinary = packSpecData(rawData)\n nrbdBinary = packNrbdData(rawData)",
"def get_data(tstart,tstop,binsperdec = 4,data_dir = '/phys/groups/tev/scratch1/users/Fermi/data'):\n start_date = MyDate(*MET(tstart).time.timetuple()[:6])\n stop_date = MyDate(*MET(tstop).time.timetuple()[:6])\n files = dict(monthly=dict(bpd=None,lt=None),weekly=dict(bpd=None,lt=None),daily=dict(bpd=None,lt=None))\n for t in ['monthly','weekly','daily']:\n files[t]['bpd'] = np.asarray(sorted(glob(os.path.join(data_dir,t,'bpd','%s_*_%ibpd.fits'%(t[:-2].replace('i','y'),binsperdec)))))\n files[t]['lt'] = np.asarray(sorted(glob(os.path.join(data_dir,t,'lt','%s_*_lt.fits'%(t[:-2].replace('i','y'))))))\n month_mask,gti = accept_files(files['monthly']['bpd'],start_date,stop_date,months = True)\n week_mask,gti = accept_files(files['weekly']['bpd'],start_date,stop_date,gti=gti)\n day_mask,gti = accept_files(files['daily']['bpd'],start_date,stop_date,gti=gti)\n bpds = np.append(files['monthly']['bpd'][month_mask],np.append(files['weekly']['bpd'][week_mask],files['daily']['bpd'][day_mask]))\n lts = np.append(files['monthly']['lt'][month_mask],np.append(files['weekly']['lt'][week_mask],files['daily']['lt'][day_mask]))\n return bpds,lts",
"def _create_array(self,path):\n #load data\n x = np.genfromtxt(path,delimiter = ',')\n #cut unnecessary parts (the stimuli was present 5-20 seconds)\n x = x[5*self.freq:20*self.freq]\n\n re_sult = re.search('(?P<freq>\\d+)Hz',path)\n f = re_sult.group('freq')\n target = self._target[f]\n t = [target for n in range(x.shape[0])]\n return np.column_stack((x,t))",
"def next_batch_packed(self):\n if self.batch >= self.num_batches:\n self.new_epoch()\n \n pack_ids = self.batches_pack[self.batch]\n\n if len(pack_ids) > 1:\n where = \"id in {}\".format(tuple(pack_ids))\n else:\n where = \"id={}\".format(pack_ids[0])\n\n sql = \"\"\"\n select data\n from {}\n where\n {}\n\n \"\"\".format(self.packed_table, where)\n\n s_sql = datetime.datetime.now()\n dat = pd.read_sql_query(sql, self.conn)\n\n\n e_sql = datetime.datetime.now()\n\n buff_list = dat.data.values\n\n x, y, n_obs = pickle.load(StringIO.StringIO(buff_list[0]))\n for i in range(1,len(buff_list)):\n x_,y_,n_obs_ = pickle.load(StringIO.StringIO(buff_list[i]))\n x = np.append(x,x_, axis=0)\n y = np.append(y, y_, axis=0)\n n_obs = np.append(n_obs, n_obs_, axis=0)\n\n e_pkl = datetime.datetime.now()\n if self.debug:\n dt_sql = e_sql - s_sql\n dt_pkl = e_pkl - e_sql\n dt_total = e_pkl - s_sql\n print(\"next_batch time summary:\")\n msg = \"total time elapsed: %d ms (sql: %d ms, unpickle %d ms)\" % (\n dt_total.total_seconds() * 1000, dt_sql.total_seconds() * 1000,\n dt_pkl.total_seconds() * 1000)\n print(msg)\n\n self.batch += 1\n return x, y, n_obs",
"def GetData(month, day): ## Input Weiyi-formatted Data\n #root = \"/mnt/rips2/2016\" #for AWS\n root = \"/home/rmendoza/Documents/Data/DataXGB_jul28\" #for local maschine\n p0 = \"0\" + str(month)\n p1 = str(day).rjust(2,'0')\n #dataroot = os.path.join(root,p0,p1,\"day_samp_bin.npy\") # for AWS\n #binName = 'day_samp_bin'+p0+p1+'.npy' #for local maschine #old data\n binName = 'day_samp_new_'+p0+p1+'.npy'## New data\n dataroot = os.path.join(root,binName) #for local maschine\n print \"Reading Data...\"\n train_data, train_label = format_data(dataroot)\n\n #temp = np.load(dataroot) #old code\n #Data = csr_matrix(( temp['data'], temp['indices'], temp['indptr']),shape = temp['shape'], dtype=float).toarray()\n\n print \"Finished reading data file\"\n return train_data, train_label",
"def get_stream_data(stream_date,\n stream_time,\n file_name_prefix='gui_episode_data',\n file_path='data/streams/XRPEUR/',\n half=False):\n quote_array = np.load(\n '{}{}_{}_{}_{}_quotes_{}_{}_hours.npy'.format(\n file_path, file_name_prefix, *stream_date, *stream_time\n )\n )\n \n image_array = np.load(\n '{}{}_{}_{}_{}_images_{}_{}_hours.npy'.format(\n file_path, file_name_prefix, *stream_date, *stream_time\n )\n )\n \n if half:\n image_array = image_array[:, :, int(image_array.shape[2]/2):-40, :]\n\n return quote_array, image_array",
"def load_monitor(): \n\n # get the start time\n start_time = get_monitor_start_time()\n\n # initialize an array to hold the data\n monitor_data = np.empty((0,4), float)\n\n # loop through the files and load the data\n for filename in os.listdir('../data_ucn/monitor_detector'):\n \n # get the time stamp from the txt file and the counts from the tof file\n # but we only check for one, so that we don't do each twice.\n if(filename[0] == 'T' and 'tof' in filename):\n \n # print(filename[0:12])\n\n # grab from the text file associated with the run\n f = open('../data_ucn/monitor_detector/' \n + filename[0:12] \n + '.txt') \n\n lines = f.readlines()\n f.close()\n\n # grab the epoch time for run start\n date_time = filename[1:3].zfill(2) + '.12.2017 '\\\n + lines[26][15:23]\n \n pattern = '%d.%m.%Y %H:%M:%S'\n run_time = int(time.mktime(\n time.strptime(date_time, pattern)))\n\n # reset the run_start_time with reference to the\n # t = 0 time\n # !!! temporarily use the raw UNIX epoch time stamp\n# run_time = run_time - start_time\n\n # load the monitor count data\n arr = np.loadtxt('../data_ucn/monitor_detector/' + filename,\n usecols = (1))\n\n # sum the counts\n counts = np.sum(arr)\n\n # saving the [day].[run number] can be useful for debugging\n day_run_no = int(filename[1:3]) + (0.001\n * int(filename[9:12]))\n\n # the current data is appended to the existing data array\n monitor_data = np.append(monitor_data, [[run_time, \n counts, \n np.sqrt(counts),\n day_run_no]], axis = 0)\n \n return monitor_data[monitor_data[:,0].argsort()]",
"def reorg_array(self, header, data, rbtime=1):\n\n # Makes sure rbtimes is an integer greater than 1\n assert rbtime >= 1\n assert rbtime % 1 == 0\n\n # Make complex array of voltages\n data_c = data[:, ::2] + 1j * data[:, 1::2]\n\n del data\n\n data_c = data_c.reshape(-1, 625, 8)\n\n seq_list = list(set(header[:, -1]))\n seq_list.sort()\n seq_list_zero = seq_list - seq_list[0]\n\n # Get total number of packets between first and last\n npackets = (seq_list[-1] - seq_list[0] + self.nperpacket) \n\n seq_f = np.arange(seq_list[0], seq_list[-1])\n# Arr = np.zeros([625*len(seq_list), self.npol, self.nfreq], np.complex64)\n Arr = np.zeros([npackets, self.npol, self.nfreq], np.complex64)\n\n for pp in xrange(self.npol):\n for qq in xrange(self.nfr):\n for ii in xrange(16):\n for ss in xrange(len(seq_list)):\n seq=seq_list[ss] \n ind = np.where((header[:, 0]==pp) & (header[:, 1]==qq) & \\\n (header[:, 2]==ii) & (header[:, -1]==seq))[0]\n \n fin = ii + 16 * qq + 128 * np.arange(8)\n\n if len(ind) != 1:\n continue\n\n tti = seq_list_zero[ss]\n\n Arr[tti:tti+625, pp, fin] = data_c[ind[0]]\n\n del data_c, header\n\n if rbtime != 1:\n Arr = np.abs(Arr)**2\n\n Arrt = Arr[:len(Arr)//rbtime*rbtime].reshape(-1, rbtime, 2, self.nfreq)\n nnonz = np.where(Arrt!=0, 1, 0)[0].sum(1)\n\n print Arrt.shape, nnonz.shape\n Arrt /= nnonz[:, None]\n Arr[np.isnan(Arr)] = 0.0\n \n return Arr\n else:\n return Arr",
"def load_day(self, day) :\n col = []\n bs = 0\n if self.has_day(day) :\n bs = self.idx['daily'][day]['bar_sec']\n col= list(copy.deepcopy(self.idx['daily'][day]['cols']))\n try :\n bfn = self.path+'/daily/'+day+'/bar.npz'\n bar = np.load(bfn, allow_pickle=True)['bar']\n except KeyboardInterrupt as e :\n print ('Keyboard interrupt!')\n raise e\n except :\n print (bfn+' not found but is in the repo index')\n self.remove_day(day, check=False)\n return [], [], 0\n else :\n return [], [], 0\n\n #assert self._get_totalbars(bs) == len(bar), bfn + ' wrong size: '+str(len(bar)) + ' should be ' + str(self._get_totalbars(bs))\n if self._get_totalbars(bs) != len(bar) :\n print (bfn + ' wrong size: '+str(len(bar)) + ' should be ' + str(self._get_totalbars(bs)))\n utc=bar[:, ci(col,utcc)]\n u0 = self._make_daily_utc(day, bs)\n ix0, zix = ix_by_utc(u0, utc, verbose=False)\n bar = bar[zix, :]\n if len(zix) != len(u0) :\n bar0 = np.zeros((len(u0), len(col)))\n bar0[:, ci(col, utcc)] = u0\n bar0[ix0, :] = bar[:, :]\n # fill forward and backward for ltt, lpx, ism1, spd\n for i, c in enumerate(col) :\n if c in [lttc, lpxc] + col_idx(['ism1', 'spd']) :\n repo.fwd_bck_fill(bar0[:,i], v=0)\n bar = bar0\n return bar, col, bs",
"def importDCAM(filename, dims, timepoints):\n with open(filename, 'rb') as fid:\n fid.seek(233)\n A = np.fromfile(fid, dtype='>u2')\n# A = np.fromfile(fid, dtype=np.uint16).byteswap()\n # TODO: consider using np.memmap here\n A = A[:dims[0]*dims[1]*timepoints]\n assert(len(A)==(dims[0]*dims[1]*timepoints))\n mov = np.fliplr(A.reshape([dims[0], dims[1], timepoints], order='F'))\n # hack to remove strange pixels with very high intensity\n mov[np.where(mov > 60000)] = 0\n return mov",
"def loadData(path = \"../data/\"):\n\n I = None\n L = None\n s = None \n images = None\n \n for i in range(7):\n j = i+1\n temp = imread(path + 'input_' + str(j) + '.tif')\n temp = rgb2xyz(temp)\n fors = np.copy(temp)\n temp = temp[:,:,1] #Just take luminance (Y)\n ipyn = np.copy(temp)\n print(ipyn.shape)\n temp = np.reshape(temp, (temp.shape[0]*temp.shape[1]))\n \n \n if i == 0:\n I = np.copy(temp)\n images = np.copy(ipyn)\n else:\n I = np.vstack((I, temp))\n images = np.vstack((images, ipyn))\n \n sources = np.load(path + 'sources.npy')\n L = np.copy(sources)\n L = L.T\n \n # s = (431, 369, 3)\n s = (fors.shape[0], fors.shape[1])\n \n print(L.shape, temp.shape, I.shape, s)\n \n return I, L, s, images",
"def extract_obsmode_data(files, bin_data=True, bin_res=0.125, label_only=False, labels=\"clean\"):\n\n if labels == \"clean\":\n belloni_turned = convert_belloni.convert_belloni_clean()\n else:\n belloni_states = convert_belloni.main()\n belloni_turned = convert_belloni.turn_states(belloni_states)\n\n\n d_all = []\n for f in files:\n fstring = f.split(\"_\")[1]\n if fstring in belloni_turned:\n state = belloni_turned[fstring]\n else:\n state = None\n if label_only:\n continue\n\n d = np.loadtxt(f)\n dt_data = d[1:,0]-d[:-1,0]\n\n dt_min = np.min(dt_data)\n\n ## compute nbins, if nbins is <=1, don't bin\n ## because target resolution is smaller than\n ## native resolution, and we don't resample.\n nbins = int(bin_res/dt_min)\n if nbins <= 1:\n print(\"Target resolution smaller than native time resolution. Not binning!\")\n bin_data=False\n\n ### split data with breaks\n breaks = np.where(dt_data > 0.008)[0]\n if len(breaks) == 0:\n dtemp = d\n if bin_data:\n dshort = bin_lightcurve(dtemp, nbins)\n else:\n dshort = dtemp\n d_all.append([dshort, state, fstring])\n else:\n for i,b in enumerate(breaks):\n if i == 0:\n if b == 0:\n continue\n else:\n dtemp = d[:b]\n if bin_data:\n dshort = bin_lightcurve(dtemp, nbins)\n else:\n dshort = dtemp\n\n else:\n dtemp = d[breaks[i-1]+1:b]\n if bin_data:\n dshort = bin_lightcurve(dtemp, nbins)\n else:\n dshort = dtemp\n\n d_all.append([dshort, state, fstring])\n\n ## last segment\n dtemp = d[b+1:]\n if bin_data:\n dshort = bin_lightcurve(dtemp, nbins)\n else:\n dshort = dtemp\n\n d_all.append([dshort, state, fstring])\n\n return d_all",
"def readData(period):\n if period == 'future':\n directory = '/volumes/eas-shared/ault/ecrl/spring-indices/LENS_springonset/data/cesm1.lens.1920-2005.cvdp_data/'\n NAO = []\n PDO = []\n NINO = []\n ens = list(xrange(2,31))\n for i in xrange(len(ens)):\n files = 'CESM1-CAM5-BGC-LE_%s.cvdp_data.2013-2100.nc' % ens[i]\n filename = directory + files\n values = Dataset(filename)\n time = values.variables['time'][:]\n pdo = values.variables['pdo_timeseries_mon'][:]\n nao = values.variables['nao_pc_mon'][:]\n nino = values.variables['nino34'][:]\n values.close()\n \n NAO.append(nao)\n PDO.append(pdo)\n NINO.append(nino)\n time = np.asarray(time)\n PDO = np.asarray(PDO)\n NINO = np.asarray(NINO)\n NAO = np.asarray(NAO)\n PDOyr = np.reshape(PDO,(PDO.shape[0],PDO.shape[1]/12.,12.))\n PDOave = np.nanmean(PDOyr,axis=2)\n NAOyr = np.reshape(NAO,(NAO.shape[0],NAO.shape[1]/12.,12.))\n NAOave = np.nanmean(NAOyr,axis=2)\n NINOyr = np.reshape(NINO,(NINO.shape[0],NINO.shape[1]/12.,12.))\n NINOave = np.nanmean(NINOyr,axis=2)\n \n leafmean, latmean, lstfrz, lat, lon = SIx() \n leafmean = leafmean[:,7:,:,:]\n latmean = latmean[:,7:,:,:]\n PDOave = PDOave[:,:-20]\n NAOave = NAOave[:,:-20]\n NINOave = NINOave[:,:-20]\n return PDOyr,PDOave,NAOyr,NAOave,NINOyr,NINOave,leafmean,latmean,lat,lon\n elif period == 'historical':\n directory = '/volumes/eas-shared/ault/ecrl/spring-indices/LENS_springonset/data/cesm1.lens.1920-2005.cvdp_data/'\n NAO = []\n PDO = []\n NINO = []\n ens = list(xrange(2,31))\n for i in xrange(len(ens)):\n files = 'CESM1-CAM5-BGC-LE_%s.cvdp_data.1920-2005.nc' % ens[i]\n filename = directory + files\n values = Dataset(filename)\n time = values.variables['time'][:]\n pdo = values.variables['pdo_timeseries_mon'][:]\n nao = values.variables['nao_pc_mon'][:]\n nino = values.variables['nino34'][:]\n values.close()\n \n NAO.append(nao)\n PDO.append(pdo)\n NINO.append(nino)\n time = np.asarray(time)\n PDO = np.asarray(PDO)\n NINO = np.asarray(NINO)\n NAO = np.asarray(NAO)\n PDOyr = np.reshape(PDO,(PDO.shape[0],PDO.shape[1]/12.,12.))\n PDOave = np.nanmean(PDOyr,axis=2)\n NAOyr = np.reshape(NAO,(NAO.shape[0],NAO.shape[1]/12.,12.))\n NAOave = np.nanmean(NAOyr,axis=2)\n NINOyr = np.reshape(NINO,(NINO.shape[0],NINO.shape[1]/12.,12.))\n NINOave = np.nanmean(NINOyr,axis=2)\n \n leafmean, latmean, lat, lon = SIxHistorical()\n return PDOyr,PDOave,NAOyr,NAOave,NINOyr,NINOave,leafmean,latmean,lat,lon",
"def load_points():\n lst = []\n f3 = open(\"calibration_data/psm1_calibration_line_cutting.p\", \"rb\")\n pos1 = pickle.load(f3)\n lst.append(pos1)\n while True:\n try:\n pos1 = pickle.load(f3)\n lst.append(pos1)\n except EOFError:\n f3.close()\n return np.matrix(lst)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
=========== Calculate the density of the icesphere/spheriod return_ice = bool, returns iwc with the rho
|
def rho_e(midpoints,binwidth,ND,MD,aspect,mass,twc,return_ice=False):
flag1 = False
##Determine Volume function based on input
if aspect == 1:
def volume(D,N,dD):
vol = vol_1(D,N,dD)
return vol
elif aspect == 2:
def volume(D,N,dD):
vol = vol_2(D,N,dD)
return vol
elif aspect == 3:
def volume(D,N,dD):
vol = vol_3(D,N,dD)
return vol
##
##Determine Mass function following m=aD^b and based on input
if mass == 1:
def Mass(D,N,M,dD):
m = M*dD #g/cm^3
return m
elif mass == 2:
def Mass(D,N,M,dD):
#Heymsfield 2004
a = 0.0061
b = 2.05
m = N*dD*a*D**b #g/cm^3
return m
elif mass == 3:
def Mass(D,N,M,dD):
#BF 1995 (Hogan adaption)
a = 0.0121
b = 1.9
D = D / 100. #convert cm to m
m = N*dD*1e3*(a *(D)**b); #g/cm^3
return m
elif mass ==4:
flag1 = True
##
rho_array = np.array([],dtype=float)
ice_array = np.array([],dtype=float)
for j in np.arange(0,ND.shape[0]):
NumD = ND[j,:]
MasD = np.zeros(NumD.shape)
rho_tot = 0.
bucket=0.
bucket2 =0.
for i in np.arange(3,midpoints.shape[0]):
if flag1:
iwc = twc[j]/1e6 #convert g/m^3 to g/cm^3
bucket = iwc
vol = volume(midpoints[i],NumD[i],binwidth[i]) #cm^3/cm^3
bucket2 = np.ma.sum([bucket2,vol])
else:
if np.ma.is_masked(NumD[i]):
continue
else:
iwc = Mass(midpoints[i],NumD[i],MasD[i],binwidth[i]) # g/cm^3
bucket = np.ma.sum([bucket,iwc])
vol = volume(midpoints[i],NumD[i],binwidth[i]) #cm^3/cm^3
bucket2 = np.ma.sum([bucket2,vol])
if bucket==0. and bucket2 == 0.:
den_b = np.nan
else:
den_b = bucket/bucket2 #bulk density of particles (g/cm^3)
#adjust den in the event it exceeds the maxium density of solid ice
if den_b >= 0.9167:
den_b = 0.9167
rho_array = np.ma.append(rho_array,den_b)
ice_array = np.ma.append(ice_array,bucket)
if return_ice:
return rho_array,ice_array*100**3
else:
return rho_array
|
[
"def rho(self):\n\n if \"rho\" not in self.ds:\n var = xroms.density(self.ds.temp, self.ds.salt, self.ds.z_rho)\n self.ds[\"rho\"] = var\n\n return self.ds.rho",
"def resistivity(self):\n A = self. w * self.t # Cross-sectional area of cpw\n rho = self.resistance() * (A / self.l)\n return rho",
"def atmospheric_density(self, height):\n return (self.atmospheric_pressure(height) * self.atmospheric_weight)/(GAS_CONSTANT * self.mean_surface_temperature)",
"def number_density_z_coarse(inter):\n dens_re = inter.volume # equal to number_density_field_reshape\n tmp = np.sum(dens_re,axis=0)\n tmp2 = np.sum(tmp,axis=0) #un-normalized density along z\n volume = inter.box[0]*inter.box[1]*inter.box[2] #A^3 \n volume0 = volume/inter.ngrid[-1]\n rho_av = inter.universe.atoms.n_atoms/volume \n norm_factor = inter.universe.atoms.n_atoms/volume0/tmp2.sum()\n return tmp2*norm_factor,rho_av",
"def _virial_overdensity(self):\n Om_mz = self.cosmology._Omega_m()\n x = Om_mz-1.;\n Dv0 = 18.*pow(np.pi,2);\n Dv = (Dv0+82.*x-39.*pow(x,2))/Om_mz;\n\n return Dv;",
"def rho(self, d):\n\t\tif (self.distribution == 'spherical'):\n\t\t\t# log(rho) = (13.86 +/- 0.47) - (3.34 +/- 0.11)*log(R) [Galactocentric distance] (Wetterer 1991)\n\t\t\trho = 10**(13.86 - 3.34*log10(d*1e3))\n\t\t\terr = 10**(0.47 - 0.11*log10(d*1e3))\n\t\t\t\n\t\telif (self.distribution == 'ellipsoidal'):\n\t\t\t# log(rho) = (15.71 +/- 0.56) - (3.76 +/- 0.13)*log(a) [Galactocentric semimajor distance] (Wetterer 1991)\n\t\t\trho = 10**(15.71 - 3.76*log10(d*1e3))\n\t\t\terr = 10**(0.56 - 0.13*log10(d*1e3))\n\t\telse:\traise TypeError, 'Spatial density distribution unknown, only spherical or ellipsoidal available'\n\n\t\t\n\t\treturn [rho, err]",
"def sigma_ice(depth, age, temp, thinning, dt, ice_diffusivity = \"ramseier\"):\n age_dt = np.arange(age[0], age[-1], dt)\n depth_dt = np.interp(age_dt, age, depth)\n temp_dt = np.interp(age_dt, age, temp)\n thinning_dt = np.interp(age_dt, age, thinning)\n rho_ice_dt = 0.9165*(1-1.53e-4*(temp_dt - 273.15)) ##Ice density Bader 1964\n\n if ice_diffusivity != \"ramseier\":\n try:\n ice_diffusivity_dt = eval(\"diffusivity.IceDiffusivity(temp_dt).\"\\\n + ice_diffusivity + \"()*3600*24*365.25 #m2yr-1\")\n print(ice_diffusivity)\n except:\n print(\"wrong diffusivity code\")\n\n else:\n ice_diffusivity_dt = diffusivity.IceDiffusivity(temp_dt).ramseier()*3600*24*365.25 #m2yr-1\n\n\n sigma_ice_dt = \\\n np.sqrt(thinning_dt[0:-1]**2*sp.integrate.cumtrapz(2*ice_diffusivity_dt*thinning_dt**(-2), age_dt))\n\n sigma_ice = np.interp(age, age_dt[0:-1], sigma_ice_dt)\n\n return sigma_ice",
"def density(alt):\r\n rho = surface_rho*np.exp(-beta*alt)\r\n return rho",
"def _calc_density(self, EigenVecs, num_electrons): \n density = 0\n\n for i in range (0, len(self.occupation_list)):\n #print(\"orbital number - {0} adding occupation: {1}\".format(i, self.occupation_list[i]))\n #density += self.occupation_list[i] * np.power(np.abs(EigenVecs[:, i]), 2)\n density += self.occupation_list[i] * np.abs(EigenVecs[:, i])**2 \n\n self._check_density(density, num_electrons)\n return density",
"def test_propensity_return_value_desert(self):\n i = Island()\n des_loc = (5,9)\n h = Herbivore(i, des_loc)\n\n assert h.propensity(des_loc) == 1",
"def calculate_co_column_density():\n # Build up all the constants\n # Already defined in astropy.constants\n # const.k_B, const.eps0, const.h\n #\n B0 = 55101.01 * u.MHz\n Eu = 5.28880 * u.K\n mu = 0.11046 * u.Debye\n nu = 110.20135400 * u.GHz\n Ju = 1.\n g = 2.*Ju + 1\n S = Ju/g\n # Prefactors (after cancelling a factor of 4pi from top and bottom)\n prefactor_numerator = const.eps0 * 3 * const.k_B\n prefactor_denominator = 2 * np.pi**2 * nu * S * mu**2\n # Load in Tex and integrated intensity\n Tex_unitless, Texhdr = fits.getdata(catalog.utils.search_for_file(\"bima/12co10_19-27.3_peak.fits\"), header=True)\n err_Tex = u.Quantity(extract_noise_from_hdr(Texhdr))\n # Tex more often used as kTex (and put units)\n Tex = Tex_unitless*u.K\n\n fn_13co = catalog.utils.search_for_file(\"bima/13co10_19-27.integrated.marcs_version.fits\")\n\n\n integrated_intensity_unitless, intT_hdr = fits.getdata(fn_13co, header=True)\n beam_13co = cube_utils.Beam.from_fits_header(intT_hdr)\n err_intT = u.Quantity(extract_noise_from_hdr(intT_hdr))\n integrated_intensity = integrated_intensity_unitless*u.K*kms\n # Rotational partition function\n Qrot = (const.k_B * Tex / (const.h * B0)).decompose() + (1./3.)\n err_Qrot = (const.k_B * err_Tex / (const.h * B0)).decompose() # constant falls off from derivative\n # exponential term\n exp_term = np.exp(Eu / Tex)\n err_exp_term = err_Tex * exp_term * Eu/(Tex**2) # d(e^(a/x)) = (a dx / x^2) e^(a/x)\n # All together\n N13CO = ((prefactor_numerator/prefactor_denominator) * (Qrot/g) * exp_term * integrated_intensity).to(u.cm**-2)\n # Uncertainty! d(cxyz) = cyz dx + cxz dy + cxy dz. But you gotta do quadrature sum instead of regular sum\n # Collected all constants (prefactor_numerator/prefactor_denominator and 1/g) at the end, outside the derivatives and quad sum\n helper_1 = (Qrot * exp_term * err_intT)**2\n helper_2 = (Qrot * err_exp_term * integrated_intensity)**2\n helper_3 = (err_Qrot * exp_term * integrated_intensity)**2\n err_N13CO = (np.sqrt(helper_1 + helper_2 + helper_3) * (prefactor_numerator / prefactor_denominator) / g).to(u.cm**-2)\n\n\n # Mask on integrated intensity error\n masking_by_error = True\n if masking_by_error:\n unmasked_N13CO = N13CO.copy()\n unmasked_err_N13CO = err_N13CO.copy()\n masking_by_error_coeff = 1.\n N13CO[integrated_intensity_unitless < masking_by_error_coeff*err_intT.to_value()] = np.nan\n err_N13CO[integrated_intensity_unitless < masking_by_error_coeff*err_intT.to_value()] = np.nan\n else:\n unmasked_N13CO = None\n\n\n N12CO = N13CO * ratio_12co_to_13co\n NH2 = N12CO / ratio_12co_to_H2\n\n err_N12CO = err_N13CO * ratio_12co_to_13co\n err_NH2 = err_N12CO / ratio_12co_to_H2\n\n if unmasked_N13CO is not None:\n unmasked_NH2 = unmasked_N13CO * ratio_12co_to_13co / ratio_12co_to_H2\n unmasked_err_NH2 = unmasked_err_N13CO * ratio_12co_to_13co / ratio_12co_to_H2\n else:\n unmasked_NH2 = None\n unmasked_err_NH2 = None\n\n if False:\n crop = { # i, j\n 'p1a': ((378, 478), (227, 355)),\n 'p1b': ((260, 371), (117, 246)),\n 'p2_head': ((276, 343), (278, 388)),\n 'p3_head': ((196, 245), (329, 378)),\n 'blob': ((170, 293), (381, 487)),\n 'full': ((None, None), (None, None)),\n }\n selected_cutout = 'p1a'\n cutout = (slice(*crop[selected_cutout][0]), slice(*crop[selected_cutout][1]))\n NH2_cropped = NH2[cutout]\n wcs_cropped = WCS(intT_hdr)[cutout]\n elif False:\n selected_box_type = 'threads' # or pillars\n if selected_box_type == 'pillars':\n boxes_reg_list = regions.Regions.read(catalog.utils.search_for_file(\"catalogs/p123_boxes.reg\"))\n selected_box = 'Pillar 1'\n elif selected_box_type == 'threads':\n boxes_reg_list = regions.Regions.read(catalog.utils.search_for_file(\"catalogs/thread_boxes.reg\"))\n selected_box = 'western'\n boxes_reg_dict = {reg.meta['text']: reg for reg in boxes_reg_list}\n box_mask = boxes_reg_dict[selected_box].to_pixel(WCS(intT_hdr)).to_mask().to_image(NH2.shape)\n NH2_cropped = NH2.copy()\n NH2_cropped[(box_mask < 1)] = np.nan\n if selected_box_type == 'pillars' and selected_box[-1] == '3':\n NH2_cropped[178:235, 379:413] = np.nan\n wcs_cropped = WCS(intT_hdr)\n\n # from .dust_mass import get_physical_area_pixel\n # pixel_area = get_physical_area_pixel(NH2, wcs_object, los_distance_M16.to(u.pc).to_value())\n # This and the method we use below (misc_utils.get_pixel_scale) are the same within 1e-16\n \"\"\"\n Save a FITS file of:\n 13CO column density\n 12CO column density implied from that\n H2 column density implied from that\n H2 mass per pixel\n \"\"\"\n wcs_object = WCS(intT_hdr)\n\n pixel_scale = misc_utils.get_pixel_scale(wcs_object)\n pixel_area = (pixel_scale * (los_distance_M16/u.radian))**2\n err_pixel_area = 2 * (pixel_scale/u.radian)**2 * los_distance_M16 * err_los_distance_M16\n\n particle_mass = 2*mean_molecular_weight_neutral*Hmass # molecular H; 2*mu*mH\n mass_per_pixel_map = (pixel_area * NH2 * particle_mass).to(u.solMass)\n # Include both error from column density as well as from LOS distance\n err_mass_per_pixel_raw = np.sqrt((pixel_area * err_NH2 * particle_mass)**2 + (err_pixel_area * NH2 * particle_mass)**2).to(u.solMass)\n pixels_per_beam = (beam_13co.sr / pixel_scale**2).decompose()\n # sqrt(oversample_factor) to correct for correlated pixels\n err_mass_per_pixel = np.sqrt(pixels_per_beam) * err_mass_per_pixel_raw\n\n def make_and_fill_header():\n # fill header with stuff, make it from WCS\n hdr = wcs_object.to_header()\n hdr['DATE'] = f\"Created: {datetime.datetime.now(datetime.timezone.utc).astimezone().isoformat()}\"\n hdr['CREATOR'] = f\"Ramsey, {__file__}.calculate_co_column_density\"\n hdr['HISTORY'] = f\"12CO/H2 = {ratio_12co_to_H2:.2E}\"\n hdr['HISTORY'] = f\"12C/13C = {ratio_12co_to_13co:.2f}\"\n hdr['HISTORY'] = f\"Hmass = {Hmass:.3E}\"\n hdr['HISTORY'] = f\"mean molecular weight = {mean_molecular_weight_neutral:.2f}\"\n hdr['HISTORY'] = f\"adopted particle mass = {particle_mass:.2E}\"\n hdr['HISTORY'] = f\"pixel scale = {pixel_scale.to(u.arcsec):.3E}\"\n hdr['HISTORY'] = f\"pixel area = {pixel_area.to(u.pc**2):.3E}\"\n hdr['HISTORY'] = f\"sqrt(pixels/beam) oversample = {np.sqrt(pixels_per_beam):.2f}\"\n hdr['HISTORY'] = f\"LOS distance = {los_distance_M16.to(u.pc):.2f}\"\n hdr['HISTORY'] = \"Using Marcs 13co10 moment, which is less noisy\"\n hdr['HISTORY'] = \"Also using Marcs channel RMS values for 12 and 13CO\"\n if masking_by_error:\n hdr['HISTORY'] = f\"Masking by {masking_by_error_coeff:.1f} X integrated intensity error\"\n return hdr\n\n savedir = os.path.dirname(catalog.utils.search_for_file(\"bima/13co10_19-27.3_integrated.fits\"))\n savename = os.path.join(savedir, \"13co10_column_density_and_more_with_uncertainty_v3.fits\")\n\n phdu = fits.PrimaryHDU()\n\n header1 = make_and_fill_header()\n header1['EXTNAME'] = \"13COcoldens\"\n header1['BUNIT'] = str(N13CO.unit)\n hdu_13co = fits.ImageHDU(data=N13CO.to_value(), header=header1)\n\n header2 = make_and_fill_header()\n header2['EXTNAME'] = \"12COcoldens\"\n header2['BUNIT'] = str(N12CO.unit)\n hdu_12co = fits.ImageHDU(data=N12CO.to_value(), header=header2)\n\n header3 = make_and_fill_header()\n header3['EXTNAME'] = \"H2coldens\"\n header3['BUNIT'] = str(NH2.unit)\n header3['COMMENT'] = \"This is MOLECULAR hydrogen (H2)\"\n hdu_H2 = fits.ImageHDU(data=NH2.to_value(), header=header3)\n\n header4 = make_and_fill_header()\n header4['EXTNAME'] = \"mass\"\n header4['BUNIT'] = str(mass_per_pixel_map.unit)\n header4['COMMENT'] = \"mass is per pixel on this image\"\n hdu_mass = fits.ImageHDU(data=mass_per_pixel_map.to_value(), header=header4)\n\n\n header5 = make_and_fill_header()\n header5['EXTNAME'] = \"err_13COcoldens\"\n header5['BUNIT'] = str(err_N13CO.unit)\n hdu_e13co = fits.ImageHDU(data=err_N13CO.to_value(), header=header5)\n\n header6 = make_and_fill_header()\n header6['EXTNAME'] = \"err_12COcoldens\"\n header6['BUNIT'] = str(err_N12CO.unit)\n hdu_e12co = fits.ImageHDU(data=err_N12CO.to_value(), header=header6)\n\n header7 = make_and_fill_header()\n header7['EXTNAME'] = \"err_H2coldens\"\n header7['BUNIT'] = str(err_NH2.unit)\n header7['COMMENT'] = \"This is MOLECULAR hydrogen (H2)\"\n hdu_eH2 = fits.ImageHDU(data=err_NH2.to_value(), header=header7)\n\n header8 = make_and_fill_header()\n header8['EXTNAME'] = \"err_mass\"\n header8['BUNIT'] = str(err_mass_per_pixel.unit)\n header8['COMMENT'] = \"mass is per pixel on this image\"\n hdu_emass = fits.ImageHDU(data=err_mass_per_pixel.to_value(), header=header8)\n\n\n\n list_of_hdus = [phdu, hdu_13co, hdu_12co, hdu_H2, hdu_mass,\n hdu_e13co, hdu_e12co, hdu_eH2, hdu_emass]\n\n if masking_by_error:\n header1a = make_and_fill_header()\n header1a['EXTNAME'] = \"13COcoldens_all\"\n header1a['BUNIT'] = str(unmasked_N13CO.unit)\n header1a['COMMENT'] = \"all values\"\n hdu_13co_all = fits.ImageHDU(data=unmasked_N13CO.to_value(), header=header1a)\n\n header2a = make_and_fill_header()\n header2a['EXTNAME'] = \"H2coldens_all\"\n header2a['BUNIT'] = str(unmasked_NH2.unit)\n header2a['COMMENT'] = \"all values\"\n hdu_H2_all = fits.ImageHDU(data=unmasked_NH2.to_value(), header=header2a)\n\n header3a = make_and_fill_header()\n header3a['EXTNAME'] = \"err_H2coldens_all\"\n header3a['BUNIT'] = str(unmasked_err_NH2.unit)\n header3a['COMMENT'] = \"all values\"\n hdu_eH2_all = fits.ImageHDU(data=unmasked_err_NH2.to_value(), header=header3a)\n\n list_of_hdus.extend([hdu_13co_all, hdu_H2_all, hdu_eH2_all])\n\n\n hdul = fits.HDUList(list_of_hdus)\n hdul.writeto(savename, overwrite=True)\n\n # plt.show()",
"def get_density(self, state):\n return self.get_population(state)/self.get_area(state)",
"def ion_density(self, r):\n\n r = np.asarray(r)\n if np.any(r < 0):\n raise ValueError(\"Minor radius must not be negative\")\n\n if self.mode == \"L\":\n density = (\n self.ion_density_centre\n * (1 - (r / self.major_radius) ** 2) ** self.ion_density_peaking_factor\n )\n elif self.mode in [\"H\", \"A\"]:\n density = np.where(\n r < self.pedestal_radius,\n (\n (self.ion_density_centre - self.ion_density_pedestal)\n * (1 - (r / self.pedestal_radius) ** 2)\n ** self.ion_density_peaking_factor\n + self.ion_density_pedestal\n ),\n (\n (self.ion_density_pedestal - self.ion_density_separatrix)\n * (self.major_radius - r)\n / (self.major_radius - self.pedestal_radius)\n + self.ion_density_separatrix\n ),\n )\n return density",
"def compute_virial_quantities(dsname, wdir = './', *args, **kwargs):\n data_ds = yt.load(wdir + dsname + '/' + dsname)\n halos_ds = yt.load(wdir + ROCKSTAR_OUTPUT_PREFIX + dsname + '/halos_0.0.bin')\n\n hc = HaloCatalog(data_ds = data_ds, halos_ds = halos_ds,\n output_dir = wdir + HALOCATALOG_PREFIX + str(data_ds))\n hc.add_filter('quantity_value', 'particle_mass', '>', 1E4, 'Msun')\n\n if ('enzo','Density') in data_ds.field_list:\n mass_field = 'matter_mass'\n radius_field = \"radius\"\n else:\n # DM only simulation\n mass_field = ('all',\"particle_mass\")\n radius_field = ('all','particle_radius')\n \n hc.add_recipe(\"my_calculate_virial_quantities\", [radius_field, mass_field ], radius_field=radius_field)\n hc.create()\n\n return",
"def rho_self(data):\n tdata = dc(data)\n\n try:\n por = np.array(tdata['por'], dtype=float, copy=True, ndmin=1)\n rho_s = a_if_b_scal(tdata['rho_s'], por)\n rho_f = a_if_b_scal(tdata['rho_f'], por)\n # if 's_w' in tdata:\n # s_w = a_if_b_scal(tdata['s_w'], por)\n except NameError:\n raise\n m_e = a_if_b_scal(tdata.get('m_e', np.array(2.)), por)\n # if 's_w' in tdata:\n # n_e = a_if_b_scal(tdata.get('n_e', np.array(2.)), por)\n # rho_f = rho_f/(s_w**n_e)\n\n rho_b = np.array([brentq(lambda x: por_r_self({'rho_f':rho_f[x_i],\n 'rho_s':rho_s[x_i], 'rho_b':x, 'm_e':m_e[x_i]}) - por[x_i],\n min(rho_s[x_i], rho_f[x_i]), max(rho_s[x_i], rho_f[x_i]))\n for x_i in range(len(por))])\n\n return rho_b",
"def rho(self):\r\n return self._rho",
"def get_final_density(input_linear_field, output_resolution=None, time=0.025):\n assert input_linear_field.shape[0]==input_linear_field.shape[1]\n N=len(input_linear_field)\n if not output_resolution:\n output_resolution = N\n x,y = get_evolved_particle_positions(input_linear_field,time)\n f = pynbody.new(len(x))\n f['x']=x-N/2\n f['y']=y-N/2\n f['mass']=1.0\n f['mass'].units=\"kg\"\n f['x'].units=\"cm\"\n return pynbody.plot.sph.image(f,width=N,resolution=output_resolution,units=\"kg cm^-2\")",
"def _check_density(self,density, num_electrons):\n\n FLOAT_PRECISION = 0.01\n #integrate the density over the spherical space\n #s = float(np.sum(density))\n #s = 4*np.pi * float(np.sum(density * self.grid.gridvec**2 ))\n s = 4*np.pi * integrate.simps(density * self.grid.gridvec**2 ,self.grid.gridvec)\n print(\"the density sums to \",s)\n assert (abs(s - num_electrons) < FLOAT_PRECISION), \\\n \"density should sum to {0} ! got prob={1} instead\".format(num_electrons, s)",
"def _critical_rho(n, alpha = 0.05):\n\n df = n - 2\n t_crit = spt.ppf(alpha, df)\n r_crit = np.sqrt( (t_crit**2) / ( (t_crit**2) + df ) )\n\n return r_crit"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function provides a filter for APR3 data to determine if there exists a precip echo in the column. Only preforms it on one scan at a time. Could easily go up to all scans.
|
def precip_echo_filt(ku):
precip_yn = np.zeros(ku.shape[1])
for j in np.arange(0,ku.shape[1]):
flag1 = 0
c1 = -1
i = -1
start_flag = 0
while flag1 == 0:
i = i + 1
if c1 >= 3:
precip_yn[j] = 1
break
if i == 550:
precip_yn[j] = 0
break
t = ku[i,j]
if start_flag ==0:
if np.ma.is_masked(t):
continue
else:
start_flag = 1
c1 = c1 + 1
else:
if np.ma.is_masked(t):
start_flag = 0
c1 = -1
continue
else:
c1 = c1 + 1
return precip_yn
|
[
"def filter_pfcp_ngap(imsi,file_name):\r\n\tfilter_patten = '\\\"pfcp && e212.imsi == ' +imsi+ '\\\"'\r\n\tTfield = ' -Tfields -e pfcp.seqno'\r\n\tcmd = '\"C:\\\\Program Files\\\\wireshark\\\\tshark.exe\\\" -n -r \\\"' + file_name +'\\\" -2 -R ' +filter_patten + Tfield +' 2>null'\r\n\tprint(\"\\n\",cmd,\"\\n\")\r\n\r\n\ttmp_list = []\r\n\tfor x in set(subprocess.getoutput( cmd ).split(\"\\n\")):\r\n\t\tif(len(x)>0):\r\n\t\t\ttmp_list.append( 'pfcp.seqno == ' + x )\r\n\r\n\tif(len(tmp_list)<=0):\r\n\t\tprint(\"imsi %s not found in pfcp\" %imsi);\r\n\t\treturn \"\"\r\n\r\n\t\"\"\"\r\n\t2. search pfcp.teid used in ngap by pfcp.seqno\r\n\t\"\"\"\t\r\n\tfilter_pfcp = \"||\".join(tmp_list)\r\n\t#print(\"filter_pfcp= \",filter_pfcp)\r\n\r\n\tfilter_patten = '\\\"' + filter_pfcp + '\\\"'\r\n\tTfield = ' -Tfields -e pfcp.f_teid.teid'\r\n\tcmd = '\"C:\\\\Program Files\\\\wireshark\\\\tshark.exe\\\" -n -r \\\"' + file_name +'\\\" -2 -R ' +filter_patten + Tfield +' 2>null'\r\n\t#print(\"\\n\",cmd,\"\\n\")\r\n\r\n\ttmp_list = []\r\n\tfor x in set(subprocess.getoutput( cmd ).split(\"\\n\")):\r\n\t\tif len(x) > 0:\r\n\t\t\ttmp_list.append( 'ngap.gTP_TEID == ' + teid2str(x) )\r\n\r\n\t\"\"\"\r\n\t3. search ngap id by teid\r\n\t\"\"\"\t\r\n\tif( len(tmp_list)<1 ):\r\n\t\tprint(\"no gtp teid found in pfcp.\");\r\n\t\treturn filter_pfcp\r\n\t\r\n\tprint(\"Searching in ngap...\");\r\n\tfilter_ngap = '\\\"' + \" || \".join(tmp_list) + '\\\"'\r\n\t#print(filter_ngap)\r\n\r\n\tfilter_patten = filter_ngap\r\n\tTfield = ' -Tfields -e ngap.RAN_UE_NGAP_ID -e ngap.AMF_UE_NGAP_ID'\r\n\tcmd = '\"C:\\\\Program Files\\\\wireshark\\\\tshark.exe\\\" -n -r \\\"' + file_name +'\\\" -2 -R ' +filter_patten + Tfield +' 2>null'\r\n\tprint(\"\\n\",cmd,\"\\n\")\r\n\r\n\tset_ranid = set()\r\n\tset_amfid = set()\r\n\ttmp_set = set(subprocess.getoutput( cmd ).split('\\n'))\r\n\ttmp_set.discard('')\r\n\t\r\n\tif(len(tmp_set)==0):\r\n\t\treturn \"\"\r\n\t\r\n\tfor x in tmp_set:\r\n\t\ty = x.split('\\t')\r\n\t\tset_ranid = set_ranid | {y[0]}\r\n\t\tset_amfid = set_amfid | {y[1]}\r\n\r\n\tset_ranid.discard('')\r\n\tset_amfid.discard('')\r\n\t\r\n\tif( len(set_ranid)>0 ):\r\n\t\ttmp_set = set()\r\n\t\tfor x in set_ranid:\r\n\t\t\ttmp_set = tmp_set | { 'ngap.RAN_UE_NGAP_ID=='+x }\r\n\t\tset_ranid = tmp_set\r\n\r\n\tif( len(set_amfid)>0 ):\r\n\t\ttmp_set = set()\r\n\t\tfor x in set_amfid:\r\n\t\t\ttmp_set = tmp_set | { 'ngap.AMF_UE_NGAP_ID=='+x }\r\n\t\tset_amfid = tmp_set\r\n\t\r\n\ttmp_set = set_ranid | set_amfid\r\n\ttmp_set.discard('')\r\n\treturn \"||\".join( tmp_set ) +\"||\"+filter_pfcp",
"def filter_pfcp(imsi,file_name):\r\n\tfilter_patten = '\\\"pfcp && e212.imsi == ' +imsi+ '\\\"'\r\n\tTfield = ' -Tfields -e pfcp.seqno'\r\n\tcmd = '\"C:\\\\Program Files\\\\wireshark\\\\tshark.exe\\\" -n -r \\\"' + file_name +'\\\" -2 -R ' +filter_patten + Tfield +' 2>null'\r\n\tprint(\"\\n\",cmd,\"\\n\")\r\n\r\n\ttmp_list = []\r\n\tfor x in set(subprocess.getoutput( cmd ).split(\"\\n\")):\r\n\t\tif(len(x)>0):\r\n\t\t\ttmp_list.append( 'pfcp.seqno == ' + x )\r\n\r\n\tif(len(tmp_list)<=0):\r\n\t\tprint(\"imsi %s not found in pfcp\" %imsi);\r\n\t\treturn \"\"\r\n\r\n\t\"\"\"\r\n\t2. search pfcp.seid by pfcp.seqno\r\n\t\"\"\"\t\r\n\tfilter_pfcp = \"||\".join(tmp_list)\r\n\t#print(\"filter_pfcp= \",filter_pfcp)\r\n\r\n\tfilter_patten = '\\\"' + filter_pfcp + '\\\"'\r\n\tTfield = ' -Tfields -e pfcp.seid'\r\n\tcmd = '\"C:\\\\Program Files\\\\wireshark\\\\tshark.exe\\\" -n -r \\\"' + file_name +'\\\" -2 -R ' +filter_patten + Tfield +' 2>null'\r\n\t#print(\"\\n\",cmd,\"\\n\")\r\n\r\n\ttmp_set = set(subprocess.getoutput( cmd ).replace('\\n',',').split(\",\"))\r\n\ttmp_set.discard('0x0000000000000000')\r\n\ttmp_set.discard('')\r\n\t\r\n\tset_pfcp_seid = set()\r\n\tfor x in tmp_set:\r\n\t\tset_pfcp_seid = set_pfcp_seid | { 'pfcp.seid==' + x }\r\n\r\n\treturn \"||\".join( set_pfcp_seid )",
"def precip_echo_filt3D(ku,thresh=5):\n precip_yn = np.zeros([ku.shape[1],ku.shape[2]])\n for k in np.arange(0,ku.shape[1]):\n for j in np.arange(0,ku.shape[2]):\n flag1 = 0 \n c1 = -1\n i = -1\n start_flag = 0\n while flag1 == 0:\n i = i + 1\n\n if c1 >= thresh:\n precip_yn[k,j] = 1\n break\n if i == 550: \n precip_yn[k,j] = 0\n break\n\n t = ku[i,k,j]\n\n if start_flag ==0:\n if np.ma.is_masked(t):\n continue\n else:\n start_flag = 1\n c1 = c1 + 1\n else:\n if np.ma.is_masked(t):\n start_flag = 0\n c1 = -1\n continue\n else:\n c1 = c1 + 1\n return precip_yn",
"def CLASH_3UTR_filter (clash_file, output_file):\n # Read clash data and print its summary information\n human_clash_data = pd.read_csv(clash_file, sep=\"\\t\", skiprows=30)\n print (\"The CLASH file contains {} rows\".format(human_clash_data.shape[0]))\n\n # filtter out all the rows without 3'UTR\n human_clash_data_utr3 = human_clash_data[human_clash_data['3\\'UTR'].notnull()]\n human_clash_data_utr3['ensg'] = human_clash_data_utr3.apply(lambda row: ENSG(row['mRNA_name'], \"_\"), axis=1)\n human_clash_data_utr3['enst'] = human_clash_data_utr3.apply(lambda row: ENST(row['mRNA_name'], \"_\"), axis=1)\n print (\"After filtering all the non 3utr, the CLASH file contains {} rows\".format(human_clash_data_utr3.shape[0]))\n\n human_clash_data_utr3.to_csv(output_file)",
"def _filter_prevalence(self, df = None, min_prev = None):\n if df is None:\n df = self.rpkm_piv_df\n if min_prev is None:\n min_prev = self.min_prev_default\n min_val = df.min().min()\n to_keep = (df > min_val).mean() >= min_prev\n print(\"Features passing a {} prevalence threshold: {:,} / {:,}\".format(\n min_prev ,\n to_keep.sum(),\n to_keep.shape[0]\n ))\n self.rpkm_piv_df_filt = df.loc[:, to_keep]\n return self.rpkm_piv_df_filt.copy()",
"def _filter_prevalence(self, df, min_prev = None):\n if min_prev is None:\n min_prev = self.min_prev_default\n min_val = df.min().min()\n to_keep = (df > min_val).mean() >= min_prev\n print(\"Features passing a {} prevalence threshold: {:,} / {:,}\".format(\n min_prev ,\n to_keep.sum(),\n to_keep.shape[0]\n ))\n return df.loc[:, to_keep]",
"def increaseThresholdFilter(record):\n increase = record.split(';')[1].split(',')[1].strip('%').strip()\n if float(increase) > 1000.0:\n return record",
"def data_extractor():\n\n# Opens the csv file and read it in\n with open('NYPD_Arrests_Data__Historic_.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n\n# Create four different counters for the variables that are needed\n total_arrests2012_2013 = 0\n total_arrests2016_2017 = 0\n marijuana_arrests2012_2013 = 0\n marijuana_arrests2016_2017 = 0\n\n# Create the regular expression patterns that allow us to search trough the data\n p = re.compile('.*201[23]')\n p2 = re.compile('.*201[67]')\n pm = re.compile('MARIJUANA, POSSESSION.*')\n\n# Loop trough the data once to count all the required data using conditions\n for row in csv_reader:\n if p.match(row[1]) and pm.match(row[3]):\n marijuana_arrests2012_2013 += 1\n total_arrests2012_2013 += 1\n elif p.match(row[1]):\n total_arrests2012_2013 += 1\n elif p2.match(row[1]) and pm.match(row[3]):\n marijuana_arrests2016_2017 += 1\n total_arrests2016_2017 += 1\n elif p2.match(row[1]):\n total_arrests2016_2017 += 1\n\n# Print out the results of the countings in a formatted way\n print(\"Total arrests made for marijuana related crimes in the period 2012-2013: {0}\".format(marijuana_arrests2012_2013))\n print(\"Total arrests made for marijuana related crimes in the period 2016-2017: {0}\".format(marijuana_arrests2016_2017))\n print(\"Total arrests made in the period 2012-2013: {0}\".format(total_arrests2012_2013))\n print(\"Total arrests made in the period 2016-2017: {0}\".format(total_arrests2016_2017))",
"def extract_uncertain(): #Add function strings\n sudan_processed = remove_stopwords()\n return [row for row in sudan_processed if bool(re.search(\"reportedly\", row[-5]))]",
"def check_record( record ):\n if record.QUAL < args.QUAL:\n return( False )\n if record.FILTER:\n return( False )\n return( True )",
"def replicate_filter(compiled, replicate_threshold):\n df = compiled.copy()\n replicates = df.groupby('Sequence').count()['Proteins']\n rep_sequences = replicates[replicates == replicate_threshold].reset_index()['Sequence']\n return df[df['Sequence'].isin(rep_sequences)]",
"def mass_conservation_filter(data, qc_thresh=1e-6):\n if \"mass_diff\" not in data.columns:\n mass_columns(data)\n mass_filter = ((data[\"mass_diff\"] == 0) &\n (data[\"NC_TAU_out_v2\"] > 0) &\n (data[\"NR_TAU_out_v2\"] > 0) &\n (data[\"QC_TAU_out_v2\"] > 0) &\n (data[\"QR_TAU_out_v2\"] > 0) &\n (data[\"QC_TAU_in_v2\"] >= qc_thresh))\n return data.loc[mass_filter].reset_index()",
"def filter_packet(p):\n return p.haslayer(IP) and p.haslayer(TCP) and p[TCP].seq in sequence_numbers",
"def dataWNfilter(df):\n\t\n\t#WIDE\n\tdf = wide_filter(df,'combinado')\n\n\t#NARROW\n\tdf = narrow_filter(df,\"mb_total_qt\")\n\tdf = narrow_filter(df,\"arpu_negocio_promedio\")\n\n\treturn df",
"def extractCanton(dataframe, q, raw = None):\n if raw == None: # if raw is not set, it is equal to q\n raw = q\n param = {'q': q, 'country': 'CH', 'type': 'json' ,'username': username}\n \n res = requests.get(url, params=param)\n df_ = pd.read_json(res.content)\n if df_.geonames.count() > 0 :\n df_.geonames[0]['adminCode1']\n dataframe['Canton'][(dataframe['Canton'] == \"\") & (dataframe['University'] == raw)] = df_.geonames[0]['adminCode1']\n return",
"def is_preamble(data) -> bool:\n if len(data) < 16:\n return False\n # set cut-off for 0/1 between minimum and maximum values in data\n thresh = min(data) + ((max(data) - min(data)) / 2)\n normed = [1 if b >= thresh else 0 for b in data]\n # print(f'NORMED PREAMB: {normed}')\n for i, b in enumerate(Radio.PREAMB_KEY):\n if normed[i] != b:\n return False\n return True",
"def is_papua(self, df):\n df['is_papua'] = df.apply(\n lambda x: (\n x['induk_provinsi'] == 'Prov. Papua'\n ) | (\n x['induk_provinsi'] == 'Prov. Papua Barat'\n ), axis=1\n )\n\n return df",
"def __cloud_cover(input_csv: DataFrame, cloud_cover: int) -> Filter:\n to_keep: Filter\n if cloud_cover:\n print(f'Removing entries with more than {cloud_cover}% CC')\n to_keep = input_csv['cloudCover'] <= cloud_cover\n else:\n to_keep = [True] * input_csv.shape[0]\n return to_keep",
"def filter_flux(fluxes, allowance):\n #YOUR CODE HERE\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Overload so len() simply returns the number of nucleotides stored within the instance of the class.
|
def __len__(self):
return(len(self.nucleotides))
|
[
"def __len__(self):\n return len(self.organisms)",
"def __len__(self):\n\t\tret = 0\n\t\tfor elem in self:\n\t\t\tret += 1\n\t\treturn ret",
"def __len__(self) -> \"int\":\n return _coin.SoMField___len__(self)",
"def __len__(self) -> \"int\":\n return _coin.SoGroup___len__(self)",
"def length(self):\n return len(self.chord_list)",
"def __len__(self):\n totalLength = 0\n for node in self.grid.iter():\n totalLength += len(node.get('grid'))\n\n return totalLength",
"def __len__(self) -> int:\n return len(self.groups[0])",
"def __len__(self):\n return self.GetNumberOfArrays()",
"def __len__(self: bitlist) -> int:\n return len(self.bits)",
"def __len__(self):\n\t\ttotal=0\n\t\tfor ns in self.namespace:\n\t\t\tif ns is None:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\ttotal=total+len(ns)\n\t\treturn total",
"def __len__(self) -> int:\n return len(self.datasets[0])",
"def __len__(self) -> int:\n return self.num_images",
"def __len__(self):\r\n\t\treturn len(self.features)",
"def length(self, index) -> int:\n raise NotImplementedError",
"def __len__(self):\n assert (len(self.item_list) == len(self.item_set))\n return len(self.item_list)",
"def get_length(self):\n return len(self._iupac)",
"def __len__(self):\n return len(self.__neighbours)",
"def __len__(self):\n return len(self._dic) + len(self._lazyload)",
"def __len__(self):\n return sum(len(r) for r in self.ranges)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This is a private method that, given a string of nucleotides, calls the current BIGSI test site and parses the results, given the target species_name and species_min_amount.
|
def _interrogate_bigsi(self,nucleotides_string):
# create an empty list to store the list of sample numbers from the Short Read Archive
sra_samples={}
# define the URL of the BIGSI instance
url_front="http://api.bigsi.io/search?seq="
url_end="&threshold=1.0"
query_string=nucleotides_string.upper()
# call the Web API
r=requests.get(url_front+query_string+url_end)
# parse the returned data
result=json.loads(r.text)
# loop through the samples
for sra_sample_number in result[query_string]['results']:
# pull out the list of predicted species, and their predicted amounts
predicted_species_list=result[query_string]['results'][sra_sample_number]["species"].split("; ")
print(predicted_species_list)
record_sample=False
if ":" in predicted_species_list[0]:
predicted_species_name_0 = predicted_species_list[0].split(" : ")[0]
predicted_species_amount_0 = float(predicted_species_list[0].split(" : ")[1].split("%")[0])
if len(predicted_species_list)>2:
predicted_species_name_1 = predicted_species_list[1].split(" : ")[0]
predicted_species_amount_1 = float(predicted_species_list[1].split(" : ")[1].split("%")[0])
if self.species_name is not None:
if predicted_species_name_0==self.species_name:
if (predicted_species_amount_0/100.)>=self.species_min_amount:
record_sample=True
elif len(predicted_species_list)>2:
if predicted_species_name_1==self.species_name:
if (predicted_species_amount_1/100.)>=self.species_min_amount:
record_sample=True
else:
record_sample=True
if record_sample:
if sra_sample_number not in sra_samples.keys():
if len(predicted_species_list)>2:
sra_samples[sra_sample_number]=[(predicted_species_name_0,predicted_species_amount_0),(predicted_species_name_1,predicted_species_amount_1)]
else:
sra_samples[sra_sample_number]=[(predicted_species_name_0,predicted_species_amount_0),(None,None)]
return(sra_samples)
|
[
"def test_species(self):\n\n # test node.species\n\n species_tree = PhyloTree(\n \"((Felis_catus_1:1, (Homo_sapiens_1:1, Pan_troglodytes_1:1), Saccharomyces_cerevisiae_1:1));\",\n format=1)\n species_tree.set_species_naming_function(lambda n: n.name.split(\"_\")[1] if \"_\" in n.name else '')\n\n pattern0 = \"\"\"((('@.species in (\"sapiens\",\"pygmaeus\") '))' \"Pan_troglodytes_1\" ');\"\"\"\n pattern0 = TreePattern(pattern0, format=8, quoted_node_names=True)\n root = species_tree.get_tree_root()\n\n self.assertEqual(list(pattern0.find_match(species_tree, None)), [root])\n\n # test ncbi taxonomy\n\n ncbi = NCBITaxa()\n taxonomy_tree = PhyloTree(\"((9598, 9606), 10090);\", sp_naming_function=lambda name: name)\n taxonomy_tree.annotate_ncbi_taxa()\n root = taxonomy_tree.get_tree_root()\n\n pattern1 = \"\"\" ' @.sci_name == \"Euarchontoglires\" ';\"\"\"\n pattern2 = \"\"\"\n (( '@.sci_name==\"Homo sapiens\" , 9526 in @.lineage ' )' @.rank==\"subfamily\" and @.taxid == 207598 ')\n ' @.sci_name == \"Euarchontoglires\" and \"cellular organisms\" in @.named_lineage';\n \"\"\"\n\n pattern1 = TreePattern(pattern1, format=1, quoted_node_names=True)\n pattern2 = TreePattern(pattern2, format=1, quoted_node_names=True)\n match1 = pattern1.find_match(taxonomy_tree, None, maxhits=None)\n match2 = pattern2.find_match(taxonomy_tree, None, maxhits=1)\n\n self.assertEqual(len(list(match1)), 5)\n self.assertEqual(list(match2), [root])",
"def test_bioentity_gene_function_taxon_endpoint(self):\n for go_id in go_ids:\n data = {\"taxon\": \"NCBITaxon:9606\"}\n response = test_client.get(f\"/api/bioentity/function/{go_id}/genes\", params=data)\n self.assertEqual(response.status_code, 200)\n self.assertGreaterEqual(len(response.json()), 4)",
"def test_search_species(self):\n resp = self.request({\n 'version': '1.1',\n 'method': 'taxonomy_re_api.search_species',\n 'params': [{\n 'ns': 'ncbi_taxonomy',\n 'search_text': 'prefix:rhodobact',\n 'limit': 10,\n 'offset': 20\n }]\n })\n self.assertTrue(resp.ok, resp.text)\n body = resp.json()\n result = body['result'][0]\n ranks = set(r['rank'] for r in result['results'])\n self.assertEqual(len(result['results']), 10)\n self.assertEqual(ranks, {'species'})\n for result in result['results']:\n self.assertTrue('rhodobact' in result['scientific_name'].lower())",
"def test_ncbi_references(parser, description):\n parser(description)",
"def test_species_name_tag_precedence(self):\n schema = self.schema_with_genus_and_species_name_no_required()\n # remove biosys tag for Genus and Species\n for field in schema['fields']:\n if field['name'] in ['Genus', 'Species']:\n del field['biosys']\n dataset = self.assert_create_dataset(schema)\n records = [\n ['Genus', 'Species', 'SpeciesName', 'When', 'Latitude', 'Longitude'],\n ['Pteropyus', 'vampyrus', 'Canis lupus', '2018-01-25', -32.0, 115.75],\n ]\n expected_species_name = 'Canis lupus'\n\n resp = self._upload_records_from_rows(records, dataset_pk=dataset.pk, strict=False)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n received = resp.json()\n rec_id = received[0]['recordId']\n record = Record.objects.filter(pk=rec_id).first()\n self.assertEqual(record.species_name, expected_species_name)",
"def getAssemblyinfo(speciesName):\n\n#---------------Create e-search URL & send request to API-----------------------\n base_url = \"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/\"\n \n url = base_url + \"esearch.fcgi?db=assembly&term=(%s[All Fields])&usehistory=y&api_key=f1e800ad255b055a691c7cf57a576fe4da08\" % speciesName # creates e-search url\n\n api_request = requests.get(url) #sends request to api\n \n # grab the response content \n xml_content = api_request.content \n \n # parse with beautiful soup \n soup = BeautifulSoup(xml_content, 'xml')\n#--------------Get Query Key & Web Environments from xml------------------------ \n query_str = soup.find('QueryKey') #finds query key tag from xml\n \n querykey = str(query_str) #converts result to string variable\n \n querykey_num = querykey[10:len(querykey)-11] #parses out query key from string\n \n web_env_str = soup.find('WebEnv') #finds web environment tag from xml\n \n web_env = str(web_env_str) #converts result to string variable\n \n web_env_num = web_env[8:len(web_env)-9] #parses out web environment from string\n \n#-----------------Create e-summary URL and send request to API------------------\n summary_url = base_url + \"esummary.fcgi?db=assembly&query_key=%s&WebEnv=%s&api_key=f1e800ad255b055a691c7cf57a576fe4da08\" % (querykey_num, web_env_num)\n \n api_request_summary = requests.get(summary_url) #sends request to api\n \n # grab the response content \n xml_content_summary = api_request_summary.content\n \n # parse with beautiful soup \n soup_summary = BeautifulSoup(xml_content_summary, 'xml')\n#------------Gets desired information from Assembly database--------------------\n accession_str = soup_summary.find('AssemblyAccession') #finds Assembly accession number tag from xml\n \n accession = str(accession_str) #converts result to string variable\n \n accession_num = accession[19:len(accession)-20] #parses out accession number from string\n \n bioproject_str = soup_summary.find('BioprojectAccn') #finds bioproject tag from xml\n \n bioproject = str(bioproject_str) #converts result to string variable\n \n bioproject_num = bioproject[16:len(bioproject)-17] #parses out bioproject number from string\n \n pubdate_str = soup_summary.find('AsmReleaseDate_GenBank') #finds Assembly publication date tag from xml\n \n pubdate = str(pubdate_str) #converts result to string variable\n \n pubdate_num = pubdate[24:len(pubdate)-37] #parses out assembly publication date from string\n \n return accession_num, bioproject_num, pubdate_num",
"def test_simple_parse(self):\n for file in self.test_files:\n h = open(file, \"r\")\n PrimerSearch.read(h)\n h.close()",
"def collect_all_genomes():\n\n def str2num(s,cat=False,force=True):\n \"\"\"\n Converts string to integer\n eg. ensembl92 to 92\n\n :param s: string\n :param cat: Whether to concatenate detected integers. eg. 20,23 to 2023\n :param force: If True, ignores decimal point error. \n \"\"\"\n import re \n if '.' in s and not force:\n raise ValueError(f\"A string can only be converted to integeres, found a '.' in {s}\")\n n=re.findall(r'\\d+',s)\n if len(n)==0:\n raise ValueError(\"No digits found in string {}\".format(s)) \n elif len(n)==1:\n return int(n[0])\n else:\n if cat:\n return int(''.join(n))\n else:\n return n\n\n from glob import glob\n from os.path import dirname,basename,exists\n import numpy as np\n import pandas as pd\n from pyensembl.species import normalize_species_name,Species\n \n # here's how I get the .cache directory eg. '/home/user/.cache/pyensembl'\n import datacache\n pyensembl_cache_dir=f\"{dirname(datacache.get_data_dir())}/pyensembl\" #FIXME if genomes are installed at other places than .cache\n\n # all the assemblies\n assemblies=[basename(p) for p in glob(f\"{pyensembl_cache_dir}/*\")]\n # dataframe that contains all the info (and can be exported as a tsv).\n dspecies=pd.DataFrame(columns=['latin name','release','synonymn','assembly'])\n # assempy to release min max dict needed as an input to create Species object\n assembly2releasesminmax={}\n # following loop populates the dataframe \n genomei=0\n for assembly in assemblies:\n releases=[basename(p) for p in glob(f\"{pyensembl_cache_dir}/{assembly}/*\")]\n for release in releases:\n releasei=str2num(release) #FIXME is realease is a float\n genome_dir=f\"{pyensembl_cache_dir}/{assembly}/{release}\"\n genome_files=glob(f\"{genome_dir}/*\")\n is_genome_installed=True if len(genome_files)>4 else False #FIXME need more than 4 (.gz) files to be strict\n if is_genome_installed:\n dspecies.loc[genomei,'assembly']=assembly\n dspecies.loc[genomei,'release']=releasei\n dspecies.loc[genomei,'synonymn']=basename(genome_files[0]).split('.')[0]\n dspecies.loc[genomei,'latin name']=normalize_species_name(dspecies.loc[genomei,'synonymn'])\n genomei+=1\n # following loop generates the Species object\n for spc in dspecies['latin name'].unique():\n assembly2releases={}\n for assembly in dspecies.loc[(dspecies['latin name']==spc),'assembly'].unique():\n d=dspecies.loc[((dspecies['latin name']==spc) & (dspecies['assembly']==assembly)),:]\n assembly2releases[assembly]=d['release'].min(),d['release'].max() #FIXME if MAX_ENSEMBL_RELEASE very important and has to be used\n Species.register(\n latin_name=spc,\n synonyms=dspecies.loc[(dspecies['latin name']==spc),'synonymn'].unique().tolist(),\n reference_assemblies=assembly2releases)\n Species.dspecies=dspecies\n return Species",
"def start_requests(self):\n with open('examples.csv', 'r') as file:\n fieldnames = []\n for i, l in enumerate(file):\n fieldnames.append(i)\n with open('examples.csv') as csv_file:\n reader = csv.DictReader(csv_file)\n urls = []\n baseurl = 'https://' + 'где-ударение.рф/в-слове-'\n for row in reader:\n sentence = row['example']\n sentence_list.append(sentence.lower())\n sentence = sentence.replace(',', '')\n sentence = sentence.replace('.', '')\n sentence = sentence.replace('!', '')\n sentence = sentence.replace('?', '')\n sentence = sentence.replace('—', '')\n sentence = sentence.replace('«', '')\n sentence = sentence.replace('»', '')\n sentence = sentence.replace(':', '')\n sentence = sentence.replace(';', '')\n sentence = sentence.replace('(', '')\n sentence = sentence.replace(')', '')\n sentence = sentence.replace('[', '')\n sentence = sentence.replace(']', '')\n sentence = sentence.replace('/', '')\n sentence = sentence.lower()\n words = sentence.split()\n # create list of only words that need stress\n targetwords = [word for word in words if needs_stress(word)]\n targetwords = set(targetwords)\n urls += [baseurl + word + '/' for word in targetwords]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)",
"def add_singlesample_parser(subparsers):\n\n argparser_gsea = subparsers.add_parser(\"ssgsea\", help=\"Run Single Sample GSEA.\")\n\n # group for input files\n group_input = argparser_gsea.add_argument_group(\"Input files arguments\")\n group_input.add_argument(\n \"-d\",\n \"--data\",\n dest=\"data\",\n action=\"store\",\n type=str,\n required=True,\n help=\"Input gene expression dataset file in txt format. Same with GSEA.\",\n )\n group_input.add_argument(\n \"-g\",\n \"--gmt\",\n dest=\"gmt\",\n action=\"store\",\n type=str,\n required=True,\n help=\"Gene set database in GMT format. Same with GSEA.\",\n )\n # group for output files\n group_output = argparser_gsea.add_argument_group(\"Output arguments\")\n add_output_option(group_output)\n\n # group for General options.\n group_opt = argparser_gsea.add_argument_group(\n \"Single Sample GSEA advanced arguments\"\n )\n group_opt.add_argument(\n \"--sn\",\n \"--sample-norm\",\n dest=\"norm\",\n action=\"store\",\n type=str,\n default=\"rank\",\n metavar=\"normalize\",\n choices=(\"rank\", \"log\", \"log_rank\", \"custom\"),\n help=\"Sample normalization method. Choose from {'rank', 'log', 'log_rank','custom'}. Default: rank\",\n )\n\n group_opt.add_argument(\n \"-c\",\n \"--correl-type\",\n dest=\"correl\",\n action=\"store\",\n type=str,\n default=\"rank\",\n metavar=\"transform\",\n choices=(\"rank\", \"symrank\", \"zscore\"),\n help=\"Input data transformation after sample normalization. Choose from {'rank','symrank', 'zscore'}. Default: rank\",\n )\n group_opt.add_argument(\n \"--ns\",\n \"--no-scale\",\n action=\"store_false\",\n dest=\"scale\",\n default=True,\n help=\"If the flag was set, don't normalize the enrichment scores by number of genes.\",\n )\n group_opt.add_argument(\n \"-n\",\n \"--permu-num\",\n dest=\"n\",\n action=\"store\",\n type=int,\n default=0,\n metavar=\"nperm\",\n help=\"Number of random permutations. For calculating esnulls. Default: 0\",\n )\n group_opt.add_argument(\n \"--min-size\",\n dest=\"mins\",\n action=\"store\",\n type=int,\n default=15,\n metavar=\"int\",\n help=\"Min size of input genes presented in Gene Sets. Default: 15\",\n )\n group_opt.add_argument(\n \"--max-size\",\n dest=\"maxs\",\n action=\"store\",\n type=int,\n default=2000,\n metavar=\"int\",\n help=\"Max size of input genes presented in Gene Sets. Default: 2000\",\n )\n group_opt.add_argument(\n \"-w\",\n \"--weight\",\n action=\"store\",\n dest=\"weight\",\n default=0.25,\n type=float,\n metavar=\"weight\",\n help=\"Weighted_score of rank_metrics. For weighting input genes. Default: 0.25\",\n )\n group_opt.add_argument(\n \"-a\",\n \"--ascending\",\n action=\"store_true\",\n dest=\"ascending\",\n default=False,\n help=\"Rank metric sorting order. If the -a flag was chosen, then ascending equals to True. Default: False.\",\n )\n group_opt.add_argument(\n \"-s\",\n \"--seed\",\n dest=\"seed\",\n action=\"store\",\n type=int,\n default=123,\n metavar=\"\",\n help=\"Number of random seed. Default: 123\",\n )\n group_opt.add_argument(\n \"-p\",\n \"--threads\",\n dest=\"threads\",\n action=\"store\",\n type=int,\n default=4,\n metavar=\"procs\",\n help=\"Number of Processes you are going to use. Default: 4\",\n )\n\n return",
"def main():\n import argparse\n\n parser = argparse.ArgumentParser(\n \"Scrape standard names from a file or URL\")\n parser.add_argument('file', nargs='+', metavar='FILE',\n help=\"URL or file to scrape\")\n parser.add_argument('--reader', choices=SCRAPERS.keys(),\n default='url',\n help=\"Name of reader\")\n parser.add_argument('--regex', default=_DEFAULT_SEARCH,\n help='Regular expression describing '\n 'a standard name (%s)' % _DEFAULT_SEARCH)\n\n args = parser.parse_args()\n\n kwds = dict(format=args.reader)\n if args.regex:\n kwds['regex'] = args.regex\n\n docs = {}\n for file_name in args.file:\n docs[file_name] = scrape(file_name, **kwds)\n\n documents = []\n for (name, name_list) in docs.items():\n documents.append(\n _AS_TXT(name_list, sorted=True, heading='Scraped from %s' % name),\n )\n print(os.linesep.join(documents))",
"def main():\n defaults = utils.default_config()\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--config_file\",\n dest=\"config_file\",\n help=(\n \"File containing configuration for the spider script. \"\n \"Config file values will be overridden by commandline arguments.\"\n ),\n )\n parser.add_argument(\n \"--collectors\",\n dest=\"collectors\",\n help=\"Comma-separated list of Collector addresses used to locate Schedds/Startds\",\n )\n parser.add_argument(\n \"--schedds\",\n dest=\"schedds\",\n help=(\n \"Comma-separated list of Schedd names to process \"\n \"[default is to process all Schedds located by Collectors]\"\n ),\n )\n parser.add_argument(\n \"--startds\",\n dest=\"startds\",\n help=(\n \"Comma-separated list of Startd machines to process \"\n \"[default is to process all Startds located by Collectors]\"\n ),\n )\n parser.add_argument(\n \"--process_schedd_history\",\n action=\"store_const\",\n const=False,\n dest=\"process_schedd_history\",\n help=\"Process Schedd history\"\n )\n parser.add_argument(\n \"--process_schedd_queue\",\n action=\"store_const\",\n const=True,\n dest=\"process_schedd_queue\",\n help=\"Process Schedd queue (Running/Idle/Pending jobs)\",\n )\n parser.add_argument(\n \"--process_startd_history\",\n action=\"store_const\",\n const=True,\n dest=\"process_startd_history\",\n help=\"Process Startd history\"\n )\n parser.add_argument(\n \"--process_max_documents\",\n type=int,\n dest=\"process_max_documents\",\n help=(\n \"Abort after this many documents (per Schedd). \"\n f\"[default: {defaults['process_max_documents']} (process all)]\"\n ),\n )\n parser.add_argument(\n \"--process_parallel_queries\",\n type=int,\n dest=\"process_parallel_queries\",\n help=(\n \"Number of parallel processes for querying \"\n f\"[default: {defaults['process_parallel_queries']}]\"\n ),\n )\n parser.add_argument(\n \"--es_host\",\n dest=\"es_host\",\n help=(\n \"Host of the Elasticsearch instance to be used \"\n f\"[default: {defaults['es_host']}]\"\n ),\n )\n parser.add_argument(\n \"--es_port\",\n type=int,\n dest=\"es_port\",\n help=(\n \"Port of the Elasticsearch instance to be used \"\n f\"[default: {defaults['es_port']}]\"\n )\n )\n parser.add_argument(\n \"--es_bunch_size\",\n type=int,\n dest=\"es_bunch_size\",\n help=(\n \"Send docs to ES in bunches of this number \"\n f\"[default: {defaults['es_bunch_size']}]\"\n )\n )\n parser.add_argument(\n \"--es_feed_schedd_history\",\n action=\"store_const\",\n const=True,\n dest=\"es_feed_schedd_history\",\n help=(\n \"Feed Schedd history to Elasticsearch \"\n f\"[default: {defaults['es_feed_schedd_history']}]\"\n )\n )\n parser.add_argument(\n \"--es_feed_schedd_queue\",\n action=\"store_const\",\n const=True,\n dest=\"es_feed_schedd_queue\",\n help=(\n \"Feed Schedd queue to Elasticsearch \"\n f\"[default: {defaults['es_feed_schedd_queue']}]\"\n )\n )\n parser.add_argument(\n \"--es_feed_startd_history\",\n action=\"store_const\",\n const=True,\n dest=\"es_feed_startd_history\",\n help=(\n \"Feed Startd history to Elasticsearch \"\n f\"[default: {defaults['es_feed_startd_history']}]\"\n )\n )\n parser.add_argument(\n \"--es_index_name\",\n dest=\"es_index_name\",\n help=(\n \"Trunk of Elasticsearch index name \"\n f\"[default: {defaults['es_index_name']}]\"\n ),\n )\n parser.add_argument(\n \"--es_index_date_attr\",\n dest=\"es_index_date_attr\",\n help=(\n \"Job attribute to use as date for Elasticsearch index name \"\n \"[default: {defaults['es_index_date_attr']}]\"\n ),\n )\n\n parser.add_argument(\n \"--log_dir\",\n default=\"log/\",\n type=str,\n dest=\"log_dir\",\n help=(\n \"Directory for logging information \"\n \"[default: %(default)s]\"\n ),\n )\n parser.add_argument(\n \"--log_level\",\n default=\"WARNING\",\n type=str,\n dest=\"log_level\",\n help=(\n \"Log level (CRITICAL/ERROR/WARNING/INFO/DEBUG) \"\n \"[default: %(default)s]\"\n ),\n )\n parser.add_argument(\n \"--email_alerts\",\n default=[],\n action=\"append\",\n dest=\"email_alerts\",\n help=(\n \"Email addresses for alerts \"\n \"[default: none]\"\n ),\n )\n parser.add_argument(\n \"--read_only\",\n action=\"store_true\",\n dest=\"read_only\",\n help=\"Only read the info, don't submit it.\",\n )\n parser.add_argument(\n \"--dry_run\",\n action=\"store_true\",\n dest=\"dry_run\",\n help=(\n \"Don't even read info, just pretend to. (Still \"\n \"query the collector for the Schedds though.)\"\n ),\n )\n\n args = parser.parse_args()\n args = utils.load_config(args)\n utils.set_up_logging(args)\n\n # --dry_run implies read_only\n args.read_only = args.read_only or args.dry_run\n\n main_driver(args)",
"def test_parse_text_examples(self):\n examples = pd.read_csv('chant21/examples/cantus-volpiano-examples.csv', index_col=0)\n for idx, data in examples.iterrows():\n parser = ParserCantusText()\n parse = parser.parse(data['full_text_manuscript'])\n self.assertTrue(True)",
"def test_species(pakuri: Pakuri, EXPECTED: str):\n assert pakuri.species == EXPECTED",
"def Q2_query(QDrug, QDisease, options):\n\n # Pre-process query (get medic ids)\n drug = QDrug.strip().lower()\n drug_id = GNBR_api.get_MEDICID(drug)\n\n\n # Generate prefix for output file, is drug_disease\n if QDisease is not None:\n disease = QDisease.strip().lower()\n disease_id = GNBR_api.get_MEDICID(disease)\n out_name = drug + \"_\" + disease.replace(\" \", \"-\").lower()\n\n\n # Get list of genes causally annotated to a disease\n if options.verbose and options.batchFile is None:\n print(\"Querying GNBR for disease genes...\")\n dis_gene_list = GNBR_api.get_disease_gene_list(disease, freq_correct=True)\n\n # If first query did not work, try getting nearest matches for the query and try again\n if dis_gene_list is None:\n\n # Get matches\n disease2, match_type = GNBR_api.query_term_for_matches(disease)\n\n # If not fuzzy matching, but simple order matching, use the new disease query and proceed\n if match_type == 0:\n dis_gene_list = GNBR_api.get_disease_gene_list(disease2, freq_correct=True)\n # \n elif len(options.gene_list) >0:\n out_name = drug + \"__\"\n disease = None\n disease_id = None\n dis_gene_list = [GNBR_api.resolve_gene_to_EntrezGeneID(gene) for gene in options.gene_list]\n\n\n\n\n\n # Get list of drug targets from Pharos\n if options.verbose and options.batchFile is None:\n print(\"Querying Pharos for drug targets...\")\n drug_genes = pharos_api.get_ligand_targets(drug)\n\n # If Pharos did not return targets, pull them from the literature\n if drug_genes is None:\n if options.verbose and options.batchFile is None:\n print(\"Pharos did not contain drug target information, querying GNBR for drug targets...\")\n # Search GNBR for a drug target, via binding annotations\n drug_gene_list = GNBR_api.query_drug_target(drug)\n\n else:\n # If targets are from Pharos, map them to their Uniprot IDs\n drug_gene_list = []\n print(\"If targets are from Pharos, map them to their Uniprot IDs\")\n for gene in drug_genes:\n drug_gene_list.append(GNBR_api.resolve_gene_to_EntrezGeneID(gene))\n\n # If either disease or drug list comes up empty\n # the query has failed and we return a statement to that effect\n\n if dis_gene_list is None and drug_gene_list is None:\n print(\"ERROR: Drug and disease not recognized\")\n return \"ERROR: Drug and disease not recognized\"\n\n elif dis_gene_list is None:\n print(\"ERROR: Disease not recognized\")\n return \"ERROR: Disease not recognized\"\n\n elif drug_gene_list is None:\n print(\"ERROR: No drug targets found\")\n return \"ERROR: No drug targets found\"\n\n # If we have targets\n else:\n print(\"drug (%s) and disease (%s) found. Processing...\" % (drug, disease) )\n # Generate output directory\n overall_path = os.path.abspath(os.path.dirname(__file__))\n results_dir = os.path.join(*[overall_path, options.outPath, out_name])\n\n # filter out None\n dis_gene_list = [gene for gene in dis_gene_list if gene is not None]\n drug_gene_list = [gene for gene in drug_gene_list if gene is not None]\n\n if len(dis_gene_list) < 1:\n print(\"ERROR: No disease targets found\")\n return \"ERROR: No disease targets found\"\n\n if len(drug_gene_list) < 1:\n print(\"ERROR: No drug targets found\")\n return \"ERROR: No drug targets found\"\n\n if not os.path.exists(results_dir):\n os.makedirs(results_dir)\n \n # Select the top 25 genes from the disease and then drug gene list for GO enrichment\n print(\"Getting disease genes Calling resolve_EntrezGeneID_to_NCBIGeneName\")\n dis_genes = pd.DataFrame([[GNBR_api.resolve_EntrezGeneID_to_NCBIGeneName(str(x)),x] for x in dis_gene_list], columns=[\"Gene\", \"Entrez ID\"])\n dis_genes_short = dis_genes[:min(len(dis_genes), 5)]\n dis_gene_list = list(map(int, dis_gene_list))\n\n \n print(\"Getting drug genes Calling resolve_EntrezGeneID_to_NCBIGeneName\")\n drug_genes = [[GNBR_api.resolve_EntrezGeneID_to_NCBIGeneName(x), x] for x in drug_gene_list]\n drug_genes = pd.DataFrame(drug_genes, columns=[\"Gene\", \"Entrez ID\"])\n drug_genes_short = drug_genes[:min(5, len(drug_genes))]\n\n\n # Get the GO terms for the drug targets\n drug_gene_list = list(map(int,drug_gene_list))\n drug_targets = GO_API.get_GO_terms(drug_gene_list)\n\n # Get GO Enrichment statistics then saving those to csv\n print(\"Getting Go Enrichment statistics\")\n if options.gen_image:\n go_result = GO_API.calc_GO_enrichment(dis_gene_list, os.path.join(results_dir, out_name), target_list=drug_targets, gen_image=True)\n else:\n go_result = GO_API.calc_GO_enrichment(dis_gene_list, \"\",target_list=drug_targets)\n\n go_result['gene_target'] = go_result['term'].isin(drug_targets)\n\n go_result = go_result.loc[go_result['rejected'] == 1.0, ['name', 'term', 'p', 'q', 'gene_target']]\n go_result = go_result.sort_values(by=['gene_target', 'q'], ascending=[False, True])\n go_result.to_csv(os.path.join(results_dir, out_name + \"_GO_pathway_enrichment.csv\"), mode=\"w+\", index_label=False, index=False)\n \n\n # Get GO Enrichment statistics\n go_result_short = go_result[:min(5, len(go_result))]\n\n # Start saving results\n result = {\"GOENRICH\":go_result, \"drug_genes\":drug_genes, \"disease_genes\":dis_genes, \"drug_id\": drug_id, \"disease_id\": disease_id,\n \"GOENRICH_short\":go_result_short, \"drug_genes_short\":drug_genes_short, \"disease_genes_short\":dis_genes_short,\n }\n \n\n # Get tissue information\n print(\"Getting tissue information resolved to disease via: resolve_EntrezGeneID_to_NCBIGeneName\")\n tissue_df_dis = TiGER_api.get_tissue_counts([GNBR_api.resolve_EntrezGeneID_to_NCBIGeneName(str(x)) for x in dis_gene_list])\n if tissue_df_dis is not None:\n tissue_df_dis_short = tissue_df_dis[:min(5, len(tissue_df_dis))]\n result[\"tissue_df_dis\"] = tissue_df_dis\n result[\"tissue_df_dis_short\"] = tissue_df_dis_short\n\n # Generate image\n print('Generating Image')\n if options.gen_image:\n file_name = os.path.join(results_dir, out_name + '.png')\n if os.path.exists(os.path.join(results_dir, out_name + '.dot')):\n subprocess.check_call(['dot', '-Tpng', os.path.join(results_dir, out_name + '.dot'), '-o', file_name])\n result[\"image_file\"] = file_name\n\n # Get Pubmed id, then get top 10 publication titles \n print(\"Getting Pubmed IDs and Titles\")\n if options.gen_pubmed and QDisease is not None:\n PMIDs = GNBR_api.query_chemical_disease(drug, disease, get_PMIDs=True)\n if len(PMIDs) > 0:\n PMID_df = pd.DataFrame([[x, get_PMID(x)] for x in PMIDs[:min(10, len(PMIDs))]], columns=[\"PMIDS\", \"Title\"])\n \n # will show top 5\n PMID_df_short = PMID_df[:min(5, len(PMID_df))]\n result[\"pubmed\"] = PMID_df\n \n result[\"pubmed_short\"] = PMID_df_short\n\n print('Saving pubmed PMIDs to ', results_dir, out_name,\"_PMIDs.csv\")\n PMID_df.to_csv(os.path.join(results_dir, out_name + \"_PMIDs.csv\"), mode=\"w+\",\n index_label=False, index=False, header=False)\n \n else:\n result[\"pubmed\"] = 'no PMIDs found'\n\n return(result)",
"def test_big_number(self):\n feed = \"i bought 10,000 cookies\"\n expected = \"i bought cookies\"\n\n result = Parser().parse_numbers(feed)\n self.assertEqual(expected, result)",
"def test_thorough_parse(self):\n self._test_thorough(pulldom.parse(None, parser=SAXExerciser()))",
"async def test_search_systems_by_name(galaxy_fx):\n nearest = await galaxy_fx.search_systems_by_name(\"Fualun\")\n assert 'FOLNA' in nearest\n assert 'FEI LIN' in nearest\n assert 'FEI LIAN' in nearest",
"def test_show_gpus():\n cli_runner = cli_testing.CliRunner()\n result = cli_runner.invoke(cli.show_gpus, [])\n assert not result.exit_code\n\n result = cli_runner.invoke(cli.show_gpus, ['--all'])\n assert not result.exit_code\n\n result = cli_runner.invoke(cli.show_gpus, ['V100:4'])\n assert not result.exit_code\n\n result = cli_runner.invoke(cli.show_gpus, [':4'])\n assert not result.exit_code\n\n result = cli_runner.invoke(cli.show_gpus, ['V100:0'])\n assert isinstance(result.exception, SystemExit)\n\n result = cli_runner.invoke(cli.show_gpus, ['V100:-2'])\n assert isinstance(result.exception, SystemExit)\n\n result = cli_runner.invoke(cli.show_gpus,\n ['--cloud', 'aws', '--region', 'us-west-1'])\n assert not result.exit_code\n\n for cloud in CLOUDS_TO_TEST:\n result = cli_runner.invoke(cli.show_gpus, ['--cloud', cloud])\n assert not result.exit_code\n\n result = cli_runner.invoke(cli.show_gpus, ['--cloud', cloud, '--all'])\n assert not result.exit_code\n\n result = cli_runner.invoke(cli.show_gpus, ['V100', '--cloud', cloud])\n assert not result.exit_code\n\n result = cli_runner.invoke(cli.show_gpus, ['V100:4', '--cloud', cloud])\n assert not result.exit_code\n\n result = cli_runner.invoke(cli.show_gpus,\n ['V100:4', '--cloud', cloud, '--all'])\n assert isinstance(result.exception, SystemExit)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a Pandas dataframe that lists all the mutations and their occurences, one per line
|
def _create_dataframe(self):
data_dict={}
# create a Boolean array of only those positions where sequences have been identified
positive_elements=self.arrays["number_genomes"]>0
for key in ['amino_acid_position','original_triplet','new_triplet','number_nucleotide_changes','mutation','number_genomes','original_amino_acid','new_amino_acid','synonymous','non_synonymous']:
data_dict[key]=(self.arrays[key][positive_elements]).tolist()
self.df=pandas.DataFrame(data=data_dict)
self.df["number_nucleotide_changes"]=self.df["number_nucleotide_changes"].astype("int8")
|
[
"def get_mutation_records(self):\n\n yield from self._mutation_records",
"def extract_avenio_mutations(\n columns=[\"Allele Fraction\", \"No. Mutant Molecules per mL\", \"CNV Score\"]\n) -> Dict[str, pd.DataFrame]:\n # Load data from spreadsheet.\n patient_mutations = pd.read_excel(\"variant_list_20200730.xlsx\", sheet_name=1)\n\n # Add pathway or network annotations.\n patient_mutations = _annotate_genes(patient_mutations, \"gene_annotation.xlsx\")\n\n # Combine the T0 and T1 measurements in a single record.\n patient_mutations[\"chromosome\"] = patient_mutations[\"Genomic Position\"].apply(\n lambda x: x.split(\":\")[0] if isinstance(x, str) else x\n )\n # Make seperate timepoint columns for the following columns in the\n # spreadsheet.\n\n spread_sheet = _merge_mutation_spreadsheet_t0_with_t1(patient_mutations, columns)\n\n cleaned_sheets = {}\n # Make a different document for each of the column pairs.\n for column in columns:\n column_pair = [f\"T0: {column}\", f\"T1: {column}\"]\n # Filter out the column pairs, and remove empty fields.\n mutations = spread_sheet[column_pair].dropna(how=\"all\").fillna(0)\n # Repair dirty cells (with per cent signs etc.).\n clean_mutation_sheet = _clean_and_verify_avenio_sheet(mutations, column_pair)\n cleaned_sheets[column] = clean_mutation_sheet\n\n return cleaned_sheets",
"def as_dataframe(self):\n chains = []\n for ch in self.chains:\n chains.append({\n 'pdb_id': self.pdb_id,\n 'biol_unit_nr': self.nr,\n 'letter': ch.letter,\n 'trans_nr': ch.trans_nr,\n 'protein_id': ch.protein,\n 'seq_start': ch.seq_start,\n 'seq_end': ch.seq_end,\n 'complex_stoichiometry': self.stoich_string\n })\n return pd.DataFrame(chains)",
"def get_empty_perSVade_insertions_df():\n\n return pd.DataFrame(columns=[\"ChrA\", \"StartA\", \"EndA\", \"ChrB\", \"StartB\", \"EndB\", \"Copied\", \"ID\"])",
"def __init__(self):\r\n \r\n #Instance variable for storing the occurences of corresponding characters\r\n self.char_occ = pd.DataFrame({'character': self.characters,\r\n 'occurence': [0]*len(self.characters)})",
"def get_genomic_info(self):\n cosmic = []\n features = []\n alterations = [\"METHYLATION\", \"DELETION\", \"GENETIC_VARIATION\", \"AMPLIFICATION\"]\n for alteration in alterations:\n this = self.genomic_df.loc[self.genomic_df.ALTERATION_TYPE == alteration]\n N = len(this.COSMIC_ID.unique())\n cosmic.append(N)\n\n this = self.genomic_df.loc[self.genomic_df.ALTERATION_TYPE == alteration]\n features.append(len(this))\n\n df = pd.DataFrame({\"features\": features, \"cosmic\": cosmic})\n df.index = alterations\n try:\n print(\n \"Number of unique genes: {}\".format(len(self.genomic_df.GENE.unique()))\n )\n except:\n print(\n \"Number of unique genes: {}\".format(\n len(self.genomic_df.IDENTIFIER.unique())\n )\n )\n print(\n \"Number of unique COSMIC ID: {}\".format(\n len(self.genomic_df.COSMIC_ID.unique())\n )\n )\n return df",
"def fusion_df(self):\n a = self.scrap_foxton()\n b = self.scrap_dexters()\n c = self.scrap_hamptons()\n \n return pd.concat([a,b,c], ignore_index=True)",
"def count_df (self):\n df = pd.DataFrame (self.abundance_dict.most_common(), columns=[\"transcript_name\",\"raw\"])\n df.set_index(\"transcript_name\", inplace=True, drop=True)\n df[\"est_count\"] = df[\"raw\"]*len(self.read_dict)\n df[\"tpm\"] = df[\"est_count\"] * 1000000\n return df",
"def get_mutation_info_for_index(self) -> List:\n mutations = []\n for f in self.get_mutant_files():\n try:\n with open(f, 'r') as data_file:\n mutant_obj = json.load(data_file)\n data_file.closed\n except:\n raise Exception(\"Error parsing %s\" % f)\n\n mutations.append({\n \"number\" : mutant_obj[\"number\"],\n \"mutations\" : mutant_obj[\"mutations\"],\n \"module\" : mutant_obj[\"module\"],\n \"status\" : mutant_obj[\"status\"],\n \"time\" : mutant_obj[\"time\"],\n \"killer\" : mutant_obj[\"killer\"],\n \"tests_run\" : mutant_obj[\"tests_run\"]\n })\n return mutations",
"def create_commits_dataframe_lines(self):\n\n columns = []\n\n pbar = tqdm.tqdm(total=self.total_commits)\n for commit in self.repository_mining.traverse_commits():\n\n columns.append(commit.hash)\n\n pbar.update(1)\n pbar.close()\n\n\n dataframe_list = []\n index = []\n\n\n cwd = os.getcwd()\n os.chdir(self.repo_folder)\n\n\n # Print analyzing all the lines of the repo\n print('Print analyzing all the lines of the repo')\n file_lines = []\n \n\n for file_path in tqdm.tqdm(self.repo_files_path):\n\n # Get path to file and count number of lines\n complete_file_path = self.repo_folder + '\\\\' + file_path\n linenumber = self.get_file_number_of_lines(complete_file_path)\n\n for i in range(1, linenumber):\n file_lines.append((file_path, i))\n\n line_to_commits = {}\n with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:\n future_to_line = {executor.submit(self.analyze_line, file_line): file_line for file_line in file_lines}\n\n pbar = tqdm.tqdm(total=len(file_lines))\n for future in concurrent.futures.as_completed(future_to_line):\n file_line = future_to_line[future]\n try:\n \n modified_in_commits = future.result()\n modified_in_commits = [commit[1:-1] for commit in modified_in_commits]\n index.append(f'{file_line[0]}:{file_line[1]}')\n file_line_commits = []\n for commit in columns:\n if commit in modified_in_commits:\n file_line_commits.append(1)\n else:\n file_line_commits.append(0)\n dataframe_list.append(file_line_commits)\n except Exception as exc:\n print(f'Error during execution : {exc}')\n pbar.update(1)\n pbar.close()\n\n\n os.chdir(cwd)\n\n return pd.DataFrame(dataframe_list, index=index, columns=columns)",
"def test_permutation(self):\n file_path = os.path.normpath('{}/{}'.format(BASE_DIR,\n 'test_data/entrez_sample.txt'))\n f = open(file_path, 'r')\n\n sample = pandas.read_csv(f, header='infer', sep='\\t')\n sample = sample.set_index(keys=sample.columns[0])\n f.close()\n\n test_data = os.path.normpath('{}/{}'.format(BASE_DIR,\n 'output/permd.txt'))\n df = pandas.read_csv(test_data, header='infer', sep='\\t')\n df = df.rename(columns={'Unnamed: 0': None})\n df = df.set_index(keys=df.columns[0])\n df.index = range(10)\n\n assert_frame_equal(permutate(sample=sample, n_up=50, n_down=50,\n reps=10),df)",
"def get_observation_list(self):\n ob_names = [\"line switches\", \"loads supplied\", \"gen power\" , \"pv scaling\", \n \"pv powered\", \"wind scaling\", \"wind powered\", \"storage powered\", \n \"storage SOCs\"]\n ob_n = [self.n_line, self.n_varloads, self.n_gen, self.n_pv, self.n_pv, \n self.n_wind, self.n_wind, self.n_storage, self.n_storage]\n df = pd.DataFrame(list(zip(ob_names, ob_n)), columns=[\"name\", \"number\"])\n return df",
"def _make_df(recipe: MyRecipe) -> pd.DataFrame:\n df = pd.DataFrame()\n res = recipe.res = FitResults(recipe)\n df[\"name\"] = [\"Rw\", \"half_chi2\"] + res.varnames\n df[\"val\"] = [res.rw, res.chi2 / 2] + res.varvals.tolist()\n df[\"std\"] = [0, 0] + res.varunc\n df = df.set_index(\"name\")\n return df",
"def mutual_information(self):\n mutual_information = pd.DataFrame(np.zeros((len(self.md_sa_seq[0]),\\\n len(self.md_sa_seq[0]))))\n\n mat_p_a = self.matrice_p_a()\n mat_p_ab = self.matrice_p_ab()\n\n for i in range(len(self.md_sa_seq[0])):\n for j in range(0, len(self.md_sa_seq[0])):\n mutual_information[i][j] = self.mutual_information_i(\\\n i, j, mat_p_a, mat_p_ab)\n return mutual_information",
"def get_dataframe(self):\n df = pd.DataFrame()\n for result in self.get_results():\n if (result.get(\"type\") != \"word\" and result.get(\"exact\") is not None):\n df = df.append(pd.DataFrame.from_dict(result, \"index\").T)\n return df",
"def _make_srl_df(text):\n sentences = sent_tokenize(text)\n\n srl = []\n for sent in sentences:\n result = pipeline['srl_en'].predict(sentence=sent)\n srl.append(result)\n\n dfs_by_sent = []\n for sentence in srl:\n tags_by_frame = pd.DataFrame(columns=sentence[\"words\"])\n for i, frame in zip(np.arange(len(sentence[\"verbs\"])), sentence[\"verbs\"]):\n tags_by_frame.loc[i] = frame[\"tags\"]\n\n if tags_by_frame.shape[0] != 0:\n dfs_by_sent.append(tags_by_frame)\n\n return dfs_by_sent",
"def create_commits_dataframe(self):\n\n files_commits = {}\n current_length = 0\n columns = []\n\n pbar = tqdm.tqdm(total=self.total_commits)\n for commit in self.repository_mining.traverse_commits():\n\n current_length += 1\n columns.append(commit.hash)\n\n for modification in commit.modified_files:\n\n if modification.new_path in self.repo_files_path:\n current_path = modification.new_path\n else:\n current_path = self.retrieve_current_path(modification.new_path)\n \n if current_path is not None:\n\n if current_path in files_commits:\n\n while len(files_commits[current_path]) < current_length - 1:\n files_commits[current_path].append(0)\n files_commits[current_path].append(1)\n \n else:\n files_commits[current_path] = [0 for _ in range(current_length-1)]\n files_commits[current_path].append(1)\n\n pbar.update(1)\n pbar.close()\n\n dataframe_list = []\n index = []\n for key, value in files_commits.items():\n\n if len(value) < current_length:\n\n while len(files_commits[key]) < current_length:\n files_commits[key].append(0)\n\n index.append(key)\n dataframe_list.append(value)\n\n return pd.DataFrame(dataframe_list, index=index, columns=columns)",
"def get_action_df(self):\n action_list = []\n action_names = [\"activate line {}\", \"deactivate line {}\", \"activate load {}\", \"deactivate load {}\", \n \"activate gen {}\", \"deactivate gen {}\", \"activate PV power plant {}\", \n \"deactivate PV power plant {}\", \"activate wind power plant {}\", \"deactivate wind power plant {}\", \n \"activate storage {}\", \"deactivate storage {}\"]\n\n for m in range(len(self.action_categories)):\n for n in self._get_action_matrix()[m]: \n action_list.append([action_names[m].format(n), self.action_duration[m]]) \n \n df = pd.DataFrame(action_list, columns=[\"name\", \"duration\"]) \n return df",
"def stats(self) -> pd.DataFrame:\n cumul = []\n for f in self:\n info = {\n \"flight_id\": f.flight_id,\n \"callsign\": f.callsign,\n \"origin\": f.origin,\n \"destination\": f.destination,\n \"duration\": f.stop - f.start,\n }\n cumul.append(info)\n\n return (\n pd.DataFrame.from_records(cumul)\n .set_index(\"flight_id\")\n .sort_values(\"duration\", ascending=False)\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns data including only lines containing SNPs.
|
def vcf_snp_prune(self, in_data=None):
snps_data = []
print "\nPruning non-SNP entries..."
bar = progressbar.ProgressBar(redirect_stdout=True)
for i in bar(range(len(in_data))):
file_line = in_data[i]
cols = file_line.split('\t')
# If the second character is a (meta-info line) or a blank line, ignore
if (file_line[:1] == "#") or (cols[self.vcf_chrom] == '\n') or len(file_line) < 1:
continue
cols[self.vcf_ref] = cols[self.vcf_ref].upper()
cols[self.vcf_alt] = cols[self.vcf_alt].upper()
if len(cols[self.vcf_ref]) > 1: # if not a snp
continue
elif (cols[self.vcf_ref] not in self.acgt) and (cols[self.vcf_ref] not in self.missing): # if not a snp
continue
else:
alt_alleles = cols[self.vcf_alt].split(",") # List of ALT alleles for this row
goodalt = True
for allele_pos, chk_allele in enumerate(alt_alleles): # Iterates through the alleles
if len(chk_allele) > 1:
goodalt = False
if chk_allele in self.missing:
alt_alleles[allele_pos] = "."
if goodalt:
cols[self.vcf_alt] = ",".join(alt_alleles)
clean_file_line = "\t".join(cols)
snps_data.append(clean_file_line)
return snps_data
|
[
"def filter_snps( snp_list ):\n return [s for s in snp_list if s.S1 != 'N']",
"def all_snpedia_rsids():\n snpedia = MediaWiki(url=\"https://bots.snpedia.com/api.php\")\n\n # pull list of all available SNP markers\n all_snps = snpedia.categorymembers(\"Is a snp\", results=None, subcategories=False)\n all_snps = [s.lower() for s in all_snps] # convert to lowercase\n\n # save to file\n with open(\"./data/SNPedia_ids.txt\", \"wb\") as f:\n pickle.dump((all_snps), f)\n\n return all_snps",
"def srt_data():\n \n logging.info(\"loading coordinates from SRT file\")\n\n with open('../videos/DJI_0301.SRT') as file:\n res = [list(g)\n for b, g in groupby(file, lambda x: bool(x.strip())) if b]\n\n return res",
"def snps(self):\n data = self.tokenize(self.data)\n\n self.allele1_aligned = [] # Will be joined into string later.\n self.allele2_aligned = []\n snps = []\n\n idx = 0 # Current index on the SNP sequence.\n pos = 0 # Position relative to the first allele\n while idx < len(data):\n c = data[idx]\n if c in 'ACGTN':\n self.allele1_aligned.append(c)\n self.allele2_aligned.append(c)\n elif c == '[':\n self.allele1_aligned.append(data[idx+1])\n self.allele2_aligned.append(data[idx+3])\n if data[idx+1] == '-':\n # Insertion SNP\n descriptor = f'.{pos}ins{data[idx+3]}'\n snp = Snp(descriptor)\n elif data[idx+3] == '-':\n # Deletion SNP\n descriptor = f'.{pos}del'\n snp = Snp(descriptor)\n snp.ref_nucleotide = data[idx+1]\n else:\n # Substitution SNP\n descriptor = f'.{pos}{data[idx+1]}>{data[idx+3]}'\n snp = Snp(descriptor)\n \n\n snps.append(snp)\n\n # Place the idx on the ']' so the next increment reads the next token.\n idx += 4\n else:\n raise StarpError(('Invalid characters in Snp Sequence. The accepted alphabet '\n 'is {A, C, G, T, -, /, [, ]}.'))\n\n idx += 1\n pos += 1\n\n self.allele1_aligned = ''.join(self.allele1_aligned)\n self.allele2_aligned = ''.join(self.allele2_aligned)\n self.allele1 = self.allele1_aligned.replace('-', '')\n self.allele2 = self.allele2_aligned.replace('-', '')\n\n return snps",
"def makePrivateLines(self, p):\n at = self.c.atFileCommands\n # A hack: we want to suppress gnx's *only* in @+node sentinels,\n # but we *do* want sentinels elsewhere.\n at.at_shadow_test_hack = True\n try:\n s = at.atFileToString(p, sentinels=True)\n finally:\n at.at_shadow_test_hack = False\n return g.splitLines(s)",
"def get_snps(self):\n\n sheet_name = self.get_sheet_name()\n\n df_snps = pd.read_excel(self.excel_file, header=0, parse_cols='A:C,O:X', skiprows=2,\n names=['Gene', 'Exon', 'Direction', 'SNPCheck_build', 'Total_SNPs', 'dbSNP_rs', 'HGVS',\n 'Frequency', 'ss_refs', 'ss_projects', 'Other_info', 'Action_required',\n 'Checked_by'],\n index_col=False, sheetname=sheet_name)\n\n for col in ['Gene', 'Exon', 'Direction']:\n df_snps[col] = df_snps[col].fillna(method='ffill') # These fields usually contain merged cells.\n\n df_snps = df_snps.where((pd.notnull(df_snps)), None) # Easier to check than NaN.\n snp_faults = sum(CheckSNPs(df_snps).check_all())\n\n return df_snps, snp_faults",
"def load_sentences(self):\n if self.print_only:\n infile = 'data/sentences_clean.txt'\n with open(infile) as infile:\n lines = infile.readlines()\n sentences = [l.lower().strip() for l in lines]\n else:\n infile = resource_filename('typer_game', 'data/audio_lookup_subset.txt')\n sentences = pickle.load(open(infile, 'rb'))\n return sentences",
"def _get_lines_for_protein(self):\n self.lines = []\n self.res_nbr = 1\n self.atom_nbr = 1\n mapping_coords = zip(self.mapping, self._coord_generator())\n for (res_name, atom_names), res_coords in mapping_coords:\n self.lines.extend(self._get_lines_for_residue(res_name, atom_names, res_coords))\n self.res_nbr += 1\n return self.lines",
"def script(self):\n return [\n p.text.strip()\n for p in self.xml.findall('p')\n if p.text and p.text.strip() and not _is_technical_note(p)\n ]",
"def extract_lncrna_only(input_file, output_file):\n\n ids = []\n for entry in entries:\n type = re.findall(\"^ENSG\\d+\\.\\d+:(.+)\", entry[3])\n # if the type exists\n if len(type) != 0:\n splits = type[0].split(\",\")\n # and if there is only 1 entry\n if len(splits) == 1:\n # and that entry is lncRNA\n if splits[0] == \"lncRNA\":\n ids.append(entry[1])\n with open(output_file, \"w\") as outfile:\n outfile.write(\"{0}\\n\".format(\"\\t\".join(sorted(ids))))",
"def init_data(self) -> list[str]:\n x = self\n lines = x.old_sent_lines\n # The sentinels preceding each non-sentinel line,\n # not including @verbatim sentinels.\n sentinels: list[str] = []\n # A list of all non-sentinel lines found. Should match x.a.\n new_lines: list[str] = []\n # A list of lists of sentinels preceding each line.\n x.sentinels = []\n i = 0\n while i < len(lines):\n line = lines[i]\n i += 1\n if x.marker.isVerbatimSentinel(line):\n # Do *not* include the @verbatim sentinel.\n if i < len(lines):\n line = lines[i]\n i += 1\n x.sentinels.append(sentinels)\n sentinels = []\n new_lines.append(line)\n else:\n x.verbatim_error()\n elif x.marker.isSentinel(line):\n sentinels.append(line)\n else:\n x.sentinels.append(sentinels)\n sentinels = []\n new_lines.append(line)\n x.trailing_sentinels = sentinels\n return new_lines",
"def pull_points_from_nyc_data(data):\n points = []\n for row in data:\n p = [int(row[19]),int(row[20])]\n if not p in points:\n points.append(p)\n \n return points",
"def _non_empty_lines(output):\n return [line for line in output.splitlines() if line.strip()]",
"def get_snps_occurences(self, plink_geno_file, snp_indices, number_of_samples, min_missing_values):\n relevant_snp_occurrences = [] # will hold ndarray of occurences per sample for each snp\n # na_count_per_sample = zeros(number_of_samples) #missing samples handling\n\n if type(plink_geno_file) != file:\n samples_snps_f = open(plink_geno_file, 'r')\n else:\n samples_snps_f = plink_geno_file\n\n snps_missing_values_counter = 0\n\n # iterate over each snp\n next_index = 0\n for i, samples_snp in enumerate(samples_snps_f):\n if (next_index < len(snp_indices)) and (i == snp_indices[next_index]):\n\n snp_occurrences = self.convert_012_string_to_ndarray(samples_snp[:-1])\n na_indices = snp_occurrences[where(snp_occurrences == self.NA_VALUE)[0]] #samples indices where this snp is missing\n # na_count_per_sample[na_indices] += 1 #missing samples handling\n\n na_count = len(na_indices)\n na_percentage = float(na_count) / number_of_samples\n if na_percentage > min_missing_values:\n # too many missing values in this snp: ignore this snp for all samples, and dont use it to impute site methylation level\n snp_indices[next_index] = -1 # mark as not relevant\n snps_missing_values_counter += 1\n else:\n if na_count != 0 :\n # impute snp occurences - relate the mean of non-missing samples in this snp\n non_na_indices = delete(range(len(snp_occurrences)), na_indices)\n snp_occurrences[na_indices] = mean(snp_occurrences[non_na_indices])\n\n relevant_snp_occurrences.append(snp_occurrences)\n\n next_index += 1\n\n samples_snps_f.close()\n \n logging.info(\"Removing %d SNPs with more than %f missing values...\" %(snps_missing_values_counter, min_missing_values))\n relevant_snps_indices = snp_indices[where(snp_indices != -1)[0]]\n \n # # find samples with too many missing values #missing samples handling\n # missing_sampels_indices = where(na_count_per_sample > number_of_samples * min_missing_values)[0] #missing samples handling\n # non_missing_sampels_indices = delete(range(number_of_samples), missing_sampels_indices) #missing samples handling\n\n # remove missing samples from snp occurences\n relevant_snp_occurrences = vstack(tuple(relevant_snp_occurrences))\n\n # relevant_snp_occurrences = relevant_snp_occurrences[:, non_missing_sampels_indices] #missing samples handling\n # missing_sampels_indices = where(na_count_per_sample >= number_of_samples * 0)[0] #missing samples handling\n # logging.info(\"removing %d samples with more than %f missing values...\" %(len(missing_sampels_indices), min_missing_values)) #missing samples handling\n\n return relevant_snp_occurrences, relevant_snps_indices#, missing_sampels_indices, non_missing_sampels_indices #missing samples handling",
"def lines(self):\n if self.__all:\n return self.__fea.signlines\n else:\n return self.__slines",
"def parse_nozzlecheck(data):\n m = re.search('@BDC PS\\r?\\nnc:((\\d\\d,)+(\\d\\d));', data)\n return map(lambda x: int(x)>0, m.group(1).split(','))",
"def query_for_lines(self, table_name):\n return []",
"def non_comment_lines(self):\n return [_ for _ in self.stripped_whole_lines() if not _.startswith(\"#\")]",
"async def get_spn_list(self):\n spnlist = []\n netads = await run([SMBCmd.NET.value, '-k', 'ads', 'setspn', 'list'], check=False)\n if netads.returncode != 0:\n raise CallError(\n f\"Failed to generate SPN list: [{netads.stderr.decode().strip()}]\"\n )\n\n for spn in netads.stdout.decode().splitlines():\n if len(spn.split('/')) != 2:\n continue\n spnlist.append(spn.strip())\n\n return spnlist"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a list of variants from a reference sequence. Intended for use with FASTA input, but will work with any AlignIO object or list of sequence data.
|
def variants_from_sequence(self):
print "\nSetting up sequence difference sets..."
firstseq = self.sequence[0]
diffsets = []
for i in range(len(firstseq)):
diffsets.append(set())
bar = progressbar.ProgressBar(redirect_stdout=True)
for i in bar(range(len(self.sequence))):
seq_line = self.sequence[i]
if len(seq_line) != len(firstseq):
print "Error! A sequence line is a different size", len(seq_line), "than the first sequence!", len(
firstseq)
print seq_line
exit()
for j in range(len(seq_line)):
diffsets[j].add(seq_line[j])
print "\nGenerating variants from sequence..."
bar = progressbar.ProgressBar(redirect_stdout=True)
for i in bar(range(len(self.sequence))):
seq_line = self.sequence[i]
if len(seq_line) != len(firstseq):
print "Error! A sequence line is a different size", len(seq_line), "than the first sequence!", len(
firstseq)
print seq_line
return
diffs = [i for i in xrange(len(firstseq)) if firstseq[i] != seq_line[i]]
curdiffs = []
for diff_pos in diffs:
self.variantset.add(diff_pos)
curdiffs.append(diff_pos)
self.variants[seq_line.name] = curdiffs
return
|
[
"def get_bam_ref_sequences(filename, headsize=64000):\n with open(filename, 'rb') as bamfile:\n head = bamfile.read(headsize)\n\n # read a bunch of fields, where the final field is the size of the block\n # minus 1, then decompress the block\n fields = struct.unpack_from('<4BI2BH2B2H', head)\n block_size = fields[-1] + 1\n bam_data = decompress_block(head[:block_size])\n\n offset = 0 # keep track how much data we've read\n magic, l_text = struct.unpack_from('<4si', bam_data)\n if magic != b'BAM\\x01': # magic string\n raise RuntimeError('{} appears to not be a BAM file'.format(filename))\n offset += 8 + l_text\n n_ref, = struct.unpack_from('<i', bam_data, offset)\n offset += 4\n ref_sequences = list()\n for _ in range(n_ref):\n l_name, = struct.unpack_from('<i', bam_data, offset)\n offset += 4\n name, = struct.unpack_from('<{:d}s'.format(l_name), bam_data, offset)\n ref_sequences.append(name[:-1]) # sequence names are NUL terminated\n offset += l_name + 4 # skipping l_ref\n return ref_sequences",
"def resolveVariant(self,ref,alt,start,chrom):\n\n ref_end = start + len(ref)\n alt_end = start + len(alt)\n\n if chrom[start:ref_end] == ref: \n return [True,ref,alt,'',0]\n elif chrom[start:alt_end] == alt:\n return [True,alt,ref,'ref_alt_rev',1] \n elif ref != '-' and chrom[start:ref_end] == Seq(ref).reverse_complement().tostring():\n ref = Seq(ref).reverse_complement().tostring()\n alt = Seq(alt).reverse_complement().tostring()\n return [True,ref,alt,'neg_strand',2] \n elif alt != '-' and chrom[start:alt_end] == Seq(alt).reverse_complement().tostring():\n ref = Seq(ref).reverse_complement().tostring()\n alt = Seq(alt).reverse_complement().tostring()\n return [True,alt,ref,'neg_strand_ref_alt_rev',3] \n else:\n refStart = start - 5\n refStop = max(ref_end,alt_end) + 6\n print ref+'\\t'+alt+'\\t'+str(start)+'\\t'+ str(ref_end)+'\\t'+str(alt_end)+'\\t'\n #return [False,ref,alt,'unknown - ref:'+chrom[start:max(ref_end,alt_end)],-1 ]\n return [False,ref,alt,'unknown - ref:'+chrom[start-5:start+6],-1 ]",
"def differentiate_sequences(seq, ref=None, verbose=True):\n\n if ref is None:\n ref = seq\n\n verboseprint = print if verbose else lambda *a, **k: None\n\n if len(seq) % 3 != 0:\n raise ValueError(\"Sequence length must be divisible by 3\")\n\n if len(seq) != len(ref):\n raise ValueError(\"`seq` and `ref` must be the same length\")\n\n if set(seq + ref) - {\"A\", \"C\", \"G\", \"T\"} != set():\n raise ValueError(\"The sequences must contain only A,T,G,C\")\n\n seq_codons = genealloy.convert_seq_to_codons(seq)\n seq_aa = [genealloy.codon_to_aa[triplet] for triplet in seq_codons]\n\n verboseprint(\"The aa sequence is:\", \"\".join(seq_aa))\n\n aa_to_codon = generate_aa_to_codon(genealloy.codon_to_aa)\n\n modified_seq_codons = []\n for i, triplet in enumerate(seq_codons):\n ref_triplet = ref[i * 3 : i * 3 + 3]\n if triplet != ref_triplet:\n replacement = [triplet]\n else:\n replacement = []\n aa = genealloy.codon_to_aa[triplet]\n for codon in aa_to_codon[aa]:\n if codon != ref_triplet:\n replacement.append(codon)\n if len(replacement) == 0:\n replacement = [triplet] # no alternative codon\n modified_seq_codons.append(replacement)\n\n modified_seq = \"\".join([codons[0] for codons in modified_seq_codons])\n\n if _has_rapidfuzz:\n verboseprint(\n \"Levenshtein distance (seq vs ref):\",\n rapidfuzz.levenshtein.distance(seq, ref),\n )\n verboseprint(\n \"Levenshtein distance (modified seq vs ref):\",\n rapidfuzz.levenshtein.distance(modified_seq, ref),\n )\n else:\n verboseprint(\"Levenshtein distance requires the rapidfuzz package\")\n\n return {\"solution\": modified_seq, \"codons\": modified_seq_codons}",
"def query_variants(self, chrom, start, end):\n variant_list = []\n req_body = {\n 'datasetId' : self.dataset_id,\n 'start': start,\n 'end': end,\n 'referenceName': chrom\n }\n r = requests.post('%s%s' % (self.host_url, 'variants/search'), json=req_body).json()\n for variant in r['results']['variants']:\n variant_list.append(':'.join([chrom, variant['start'], variant['end']]))\n return variant_list",
"def get_reference_seqs(args, len_reads):\n # generate reference sequence with N's\n if args.ref_mode == \"N\":\n\n print(\"Generating reference sequence with all Ns...\")\n num_ref_seqs = 1\n ref_samples = np.zeros((num_ref_seqs, len_reads, 4))\n\n # create reference sequences with same GC content as the training data set\n elif args.ref_mode == \"GC\":\n\n print(\"Generating reference sequences with same GC-content as training data set...\")\n train_samples = np.load(args.train_data, mmap_mode='r')\n num_ref_seqs = 5\n ref_seqs = [0]*num_ref_seqs\n # calculate frequency of each nucleotide (A,C,G,T,N) in the training data set\n probs = np.mean(np.mean(train_samples, axis=1), axis=0).tolist()\n probs.append(1-sum(probs))\n # generate reference seqs\n for i in range(num_ref_seqs):\n ref_seqs[i] = np.random.choice([0, 1, 2, 3, 4], p=probs, size=len_reads, replace=True)\n ref_samples = to_categorical(ref_seqs, num_classes=5)\n # remove channel of N-nucleotide\n ref_samples = ref_samples[:, :, 0:4]\n nc_dict = {0: 'A', 1: 'C', 2: 'G', 3: 'T', 4: 'N'}\n train_data_set_name = os.path.splitext(os.path.basename(args.train_data))[0]\n # save reference sequences\n with open(args.out_dir + '/' + train_data_set_name + '_references.fasta', 'w') as csv_file:\n file_writer = csv.writer(csv_file)\n for seq_id in range(num_ref_seqs):\n file_writer.writerow([\">\"+train_data_set_name+\"_ref_\"+str(seq_id)])\n file_writer.writerow([\"\".join([nc_dict[base] for base in ref_seqs[seq_id]])])\n del train_samples\n\n # load own reference sequences (args.ref_mode == \"own_ref_file\")\n else:\n\n print(\"Loading reference sequences...\")\n tokenizer = Tokenizer(char_level=True)\n tokenizer.fit_on_texts('ACGT')\n ref_reads = list(SeqIO.parse(args.ref_seqs, \"fasta\"))\n ref_samples = np.array([np.array([tokenizer.texts_to_matrix(read)]) for read in ref_reads])\n # remove unused character\n if not np.count_nonzero(ref_samples[:, :, :, 0]):\n ref_samples = ref_samples[:, :, :, 1:5]\n ref_samples = ref_samples.squeeze(1)\n # num_ref_seqs = ref_samples.shape[0]\n\n return ref_samples",
"def find_region_variants(my_bg, ref, supercontig, start, end,\n min_overlap=70, max_anchors=10000, max_steps=100000,\n skip_ambiguous=True):\n if start >= end:\n raise RuntimeError(\"start must be < end\")\n\n all_variants = []\n\n # For all contigs within the region...\n for ref_range in ref.find_ranges(supercontig, start, end):\n\n # Collect the forward and reverse anchors\n from biograph.internal import find_anchors, assemble\n fwd = find_anchors(\n my_bg, ref_range, True, min_overlap, max_anchors)\n rev = find_anchors(\n my_bg, ref_range, False, min_overlap, max_anchors)\n\n # Assemble between anchors. Returns [variants, anchor coverage]\n results = assemble(\n fwd, rev, min_overlap, max_steps, skip_ambiguous, my_bg.readmap)\n\n # Add in reference coverage for anchors. Start with reference coverage.\n fixed_coverage = my_bg.seq_coverage(ref_range.sequence)\n\n # Add hom-reference object (no variants)\n if not results:\n all_variants.append(\n Assembly(ref_range=ref_range, variants=[], coverage=fixed_coverage))\n continue\n\n # Coverage entries are of the format:\n # ['scaffold', position, [25,25,26,26,26...]]\n for cov in results[1]:\n if cov[0] != ref_range.scaffold:\n continue\n\n for i in range(len(cov[2])):\n mod_pos = cov[1] + i - ref_range.start\n if 0 <= mod_pos < ref_range.size:\n fixed_coverage[mod_pos] += cov[2][i]\n\n all_variants.append(\n Assembly(ref_range=ref_range, variants=sorted(results[0]), coverage=fixed_coverage))\n\n return all_variants",
"def parseRefSeq(RefseqToEntrez):\n\tconvert = []\n\tx = open(RefseqToEntrez)\n\tfor line in x:\n\t\tline = line.rstrip(\"\\n\")\n\t\tline = line.split(\"\\t\")\n\t\tif (line[0] == \"#RefSeq_ID\"):\n\t\t\tcontinue\n\t\tconvert.append(line)\n\tx.close()\n\treturn (convert)",
"def get_ref_and_filter(input_alignment):\n #Get reference strain:\n ref_seq = []\n for seq_record in SeqIO.parse(input_alignment, \"fasta\"):\n ref_seq = list(seq_record.seq)\n break\n #Creating the initial state of the filter.\n filter_seq = [0 for i in xrange(len(ref_seq))]\n return ref_seq, filter_seq",
"def variant_matches_reference_sequence(variant, ref_seq_on_transcript, strand):\n if strand == \"-\":\n ref_seq_on_transcript = reverse_complement_dna(ref_seq_on_transcript)\n return ref_seq_on_transcript == variant.ref",
"def _read_variants(pipeline, known_args):\n # type: (beam.Pipeline, argparse.Namespace) -> pvalue.PCollection\n representative_header_lines = None\n if known_args.representative_header_file:\n representative_header_lines = vcf_header_parser.get_metadata_header_lines(\n known_args.representative_header_file)\n\n if known_args.optimize_for_large_inputs:\n variants = (pipeline\n | 'InputFilePattern' >> beam.Create([known_args.input_pattern])\n | 'ReadAllFromVcf' >> vcfio.ReadAllFromVcf(\n representative_header_lines=representative_header_lines,\n allow_malformed_records=(\n known_args.allow_malformed_records)))\n else:\n variants = pipeline | 'ReadFromVcf' >> vcfio.ReadFromVcf(\n known_args.input_pattern,\n representative_header_lines=representative_header_lines,\n allow_malformed_records=known_args.allow_malformed_records)\n return variants",
"def getChromosomes(refGenome):\r\n refIndex = refGenome + \".fai\"\r\n if os.path.exists(refIndex) == False:\r\n exitStatus = subprocess.call(\"samtools faidx \" + refIndex)\r\n if exitStatus == 1:\r\n print(\"ERROR: Failed to create a fai index file for the reference genome, do I have permissions to write in the directory of the reference genome?\")\r\n exit(1)\r\n chromosomes = []\r\n with open(refIndex) as indexFile:\r\n indexReader = csv.reader(indexFile, delimiter=\"\\t\")\r\n for line in indexReader:\r\n chromosomes.append(line[0])\r\n return chromosomes",
"def get_variants(self):\n return self.variants or []",
"def references(self):\n out = []\n fields = 'position id doi title authors sourcetitle publicationyear '\\\n 'volume issue first last text fulltext'\n ref = namedtuple('Reference', fields)\n items = self._tail.get('bibliography', {}).get('reference', [])\n if not isinstance(items, list):\n items = [items]\n for item in items:\n info = item['ref-info']\n volisspag = info.get('ref-volisspag', {})\n try:\n auth = info['ref-authors']['author']\n if not isinstance(auth, list):\n auth = [auth]\n authors = [', '.join([d['ce:surname'], d['ce:initials']])\n for d in auth]\n except KeyError: # No authors given\n authors = None\n ids = info['refd-itemidlist']['itemid']\n if not isinstance(ids, list):\n ids = [ids]\n try:\n doi = [d['$'] for d in ids if d['@idtype'] == 'DOI'][0]\n except IndexError:\n doi = None\n new = ref(position=item.get('@id'),\n id=[d['$'] for d in ids if d['@idtype'] == 'SGR'][0],\n doi=doi, authors=authors,\n title=info.get('ref-title', {}).get('ref-titletext'),\n sourcetitle=info.get('ref-sourcetitle'),\n publicationyear=info.get('ref-publicationyear', {}).get('@first'),\n volume=volisspag.get('voliss', {}).get('@volume'),\n issue=volisspag.get('voliss', {}).get('@issue'),\n first=volisspag.get('pagerange', {}).get('@first'),\n last=volisspag.get('pagerange', {}).get('@last'),\n text=info.get('ref-text'),\n fulltext=item.get('ref-fulltext'))\n out.append(new)\n return out or None",
"def VCFtoFASTA(member):\n\tref = open(member[0], \"r\")\n\twindow = open(member[1], \"r\")\n\tvcffile = open(member[2], \"r\")\n\tfasta1 = open(member[3], \"w\")\n\tfasta2 = open(member[4], \"w\")\n\t\n\tchrom = \">\"+window.readline() #in the FASTA genome file, every chromosome is a header and starts with \">\"\n\tstart = int(window.readline()) #this is the start of the window (included in the reference window)\n\tfinish = int(window.readline())-1 #this is the end of the window (not included in the reference window)\n\n\trefwin = [] #a list that corresponds to the reference window between the start and the end\n\n\n\t#first, we get the FASTA sequence between positions start and finish\n\tchrnum = ref.readline()\n\n\twhile not chrnum == chrom: #make sure we're at the right chromosome\n\t\tchrnum = ref.readline()\n\n\tposskipped = 0\n\n\tfor line in ref:\n\t\tline_length = len(line.strip())\n\t\tposskipped+=line_length #this is how long we assume that a FASTA line is\n\t\tif posskipped<start:\n\t\t\tcontinue\n\t\trefwin+=list(line.strip().upper())\n\t\tif posskipped>finish:\n\t\t\tbreak\n\trefwin = refwin[(start-1)%line_length:] #trim chars before the start position\n\trefwin = refwin[:-(line_length-finish%line_length)] #trim chars after the finish; turns out that if finish%line_length==0, then refwin has +line_length chars\n\n\tsequence1 = list(refwin) \n\tsequence2 = list(refwin) \n\tused1 = [0]*len(refwin) #indicates if an index is used to check for overlapping variants. It uses variant_id to asociate the position to a specific variant.\n\tused2 = [0]*len(refwin) #see above\n\tind1 = [0]*len(refwin) #indicates where are insertions/deletions\n\tind2 = [0]*len(refwin)\n\tvar_map1 = [0]*len(refwin)\n\tvar_map2 = [0]*len(refwin)\n\n\thomozygous = False #a homozygous variant is applied to both sequences\n\n\t#second, we insert the changes from the VCF into the sequences\n\tvariant_counter = 0\n\thetero_counter = 0\n\n\tfor line in vcffile:\n\t\tif line[0]=='#': #header line\n\t\t\tcontinue\n\t\tif not doesLineCount(line):\n\t\t\tcontinue\n\t\tinfo = line.split()\n\t\tpos = int(info[1])\n\t\tif not info[0] == chrom[1:-1]: #wrong chromosome\n\t\t\tcontinue\n\t\t\n\t\trecord_id = int(info[2].strip())\n\t\tref_seq = list(info[3].strip())\n\t\talt_seq = list(info[4].strip())\n\t\tif pos<start: #haven't reached the window yet\n\t\t\tcontinue\n\t\tif pos>finish: #went past the window\n\t\t\tbreak\n\t\tif pos + len(ref_seq) > finish: #variant will go out of the window\n\t\t\tbreak\n\t\tif ',' in alt_seq:\n\t\t\tcontinue #we don't want to see the commas for now TODO: divide into two lines, check VCF to make sure these aren't phased\n\t\t#we are surely in the window\n\t\t#now we check whether the variant is homozygous or heterozygous\n\t\t\n\t\t#print \"processing the following line:\"+int_to_alpha(record_id)\n\t\t#print line\n\t\t\n\t\tvariant_counter = variant_counter + 1;\n\t\tvariant_id = record_id \n\t\thomozygous = True if info[9][0]=='1' and info[9][2]=='1' else False\n\t\tif homozygous == False:\n\t\t\thetero_counter += 1\n\t\twinpos = pos-start\n\t\tassert(ref_seq == refwin[winpos:winpos+len(ref_seq)])\n\t\toffset_1 = sum(ind1[:winpos]) #what is the difference in positions between ref and sequence1\n\t\toffset_2 = sum(ind2[:winpos]) #what is the difference in positions between ref and sequence2\n\t\tuscore1 = sum(used1[winpos:winpos+len(ref_seq)]) #is there anything used in the positions of the variant\n\t\tuscore2 = sum(used2[winpos:winpos+len(ref_seq)])\n\t\tif uscore1 == 0: #we can add this variant to the 1st FASTA file\t\n\t\t\tsequence1[winpos+offset_1:winpos+len(ref_seq)+offset_1] = list(alt_seq)\n\t\t\tvar_map1[winpos+offset_1:winpos+len(ref_seq)+offset_1] = [variant_id]*len(alt_seq)\n\t\t\tind1[winpos] = len(alt_seq)-len(ref_seq)\n\t\t\tused1[winpos:winpos+len(ref_seq)] = [variant_id]*len(ref_seq) # val uses ref_seq. alt_seq keeps used1 aligned to seequence1\n\t\t\tassert(variant_id > 0)\n\t\t\tif not homozygous: #we wouldn't want to add to the second sequence\n\t\t\t\tcontinue\n\t\tif uscore2 == 0:\n\t\t\tsequence2[winpos+offset_2:winpos+len(ref_seq)+offset_2] = list(alt_seq)\n\t\t\tvar_map2[winpos+offset_2:winpos+len(ref_seq)+offset_2] = [variant_id]*len(alt_seq)\n\t\t\tind2[winpos] = len(alt_seq)-len(ref_seq)\n\t\t\tused2[winpos:winpos+len(ref_seq)] = [variant_id]*len(ref_seq) # same as above\n\t\t\tassert(variant_id > 0)\n\n\tprint \"********\"\n\tprint \"We processed :\"+str(variant_counter)+\" variants\"\n\tprint \"We processed :\"+str(hetero_counter)+\" heterozygous variants\"\n\tprint \"********\"\n\t#print \"\".join(map(str,used1))\n\t#print \"\".join(map(str,used2))\n\t#print \"********\"\n\t#print \"\".join(sequence1)\n\t#print \"\".join(sequence2)\n\t#print \"********\"\n\t#print \"\".join(map(str,ind1))\n\t#print \"\".join(map(str,ind2))\n\t#print \"********\"\n\tgappedsequence1 = list(sequence1) #in these we put the aligned sequences\n\tgappedsequence2 = list(sequence2)\n\thappening = 0\n\tgaps1 = 0\n\tgaps2 = 0\n\t#now, we build the alignment of the sequences (in fact, only semi-align: put gaps where there are INDELS)\n\t#put gaps ('-'s) with respect to the first sequence\n\tfor nucind in xrange(len(ind1)):\n\t\tgaplen = ind1[nucind]*((-1) if ind1[nucind]<0 else 1)\n\t\tif ind1[nucind]>0: #there was an insertion\n\t\t\t#add gaps to sequence2 and make sure ind is OK too\n\t\t\tgappedsequence2[nucind+gaps2:nucind+gaps2+1] = gappedsequence2[nucind+gaps2:nucind+gaps2+1] + ['-']*gaplen\n\t\t\tvar_map2[nucind+gaps2:nucind+gaps2+1] = var_map2[nucind+gaps2:nucind+gaps2+1] + [0]*gaplen\n\t\t\t\n\t\t\tgaps1+=gaplen\n\t\t\tgaps2+=gaplen\n\t\t\thappening+=1\n\t\tif ind1[nucind]<0: #there was a deletion\n\t\t\t#add gaps to sequence1 and make sure ind is OK too\n\t\t\tgappedsequence1[nucind+gaps1:nucind+gaps1+1] = gappedsequence1[nucind+gaps1:nucind+gaps1+1] + ['-']*gaplen\n\t\t\tvar_id = var_map1[nucind+gaps1]\n\t\t\tassert(var_id != 0)\n\t\t\tvar_map1[nucind+gaps1:nucind+gaps1+1] = var_map1[nucind+gaps1:nucind+gaps1+1] + [var_id]*gaplen #TODO\n\t\t\thappening+=1\n\t# put gaps with respect to the second sequence\n\t# NEEDS TO BE DONE IN THE SAME CYCLE TO KEEP THE GAPS CORRECT\n\t\tgaplen = ind2[nucind]*((-1) if ind2[nucind]<0 else 1)\n\t\tif ind2[nucind]>0: #there was an insertion\n\t\t\t#add gaps to sequence1 and make sure ind is OK too\n\t\t\tgappedsequence1[nucind+gaps1:nucind+gaps1+1] = gappedsequence1[nucind+gaps1:nucind+gaps1+1] + ['-']*gaplen\n\t\t\tvar_map1[nucind+gaps1:nucind+gaps1+1] = var_map1[nucind+gaps1:nucind+gaps1+1] + [0]*gaplen\n\t\t\t\n\t\t\tgaps1+=gaplen\n\t\t\tgaps2+=gaplen\n\t\t\thappening+=1\n\t\tif ind2[nucind]<0: #there was a deletion\n\t\t\t#add gaps to sequence2 and make sure ind is OK too\n\t\t\tgappedsequence2[nucind+gaps2:nucind+gaps2+1] = gappedsequence2[nucind+gaps2:nucind+gaps2+1] + ['-']*gaplen\n\t\t\tvar_map2[nucind+gaps2:nucind+gaps2+1] = var_map2[nucind+gaps2:nucind+gaps2+1] + [0]*gaplen\n\t\t\thappening+=1\n\n\t#lastly, we format the aligned sequences as FASTA (60 characters per line)\n\tfasta1.write(\">hg19|chromosome \"+chrom[1:-1]+\"|start pos \"+str(start)+\"|end pos \"+str(finish)+\"|variants from \"+member[2]+\"|first sequence\\n\")\n\tfasta2.write(\">hg19|chromosome \"+chrom[1:-1]+\"|start pos \"+str(start)+\"|end pos \"+str(finish)+\"|variants from \"+member[2]+\"|second sequence\\n\")\n\tfasta1.write(\"\\n\".join(\"\".join(gappedsequence1[i:i+60]) for i in xrange(0, len(gappedsequence1), 60))+\"\\n\")\n\tfasta2.write(\"\\n\".join(\"\".join(gappedsequence2[i:i+60]) for i in xrange(0, len(gappedsequence2), 60))+\"\\n\")\n\t\n\t\n\t#print \"\".join(gappedsequence1)\n\t#print \"\".join(gappedsequence2)\n\t#print \"______\"\n\t#print \"\".join(map(str,str_to_alpha(var_map1)))\n\t#print \"\".join(map(str,str_to_alpha(var_map2)))\n\t#print \"______\"\n\n\n\tvcffile.close()\n\tref.close()\n\twindow.close()\n\n\tfasta1.close()\n\tfasta2.close()\n\n\t#here we return the two ind lists\n\t#return [ind1, ind2]\n\treturn [var_map1, var_map2, hetero_counter]",
"def matchreads(refseq,refbase1num,firstbases,seqs,quals):\n ## by python numbering the first base in refseq is at position 0\n ## need to renumber of firstbases[] values, so the base positions line up\n numbases = len(refseq)\n r = [[i,refseq[i],[],[]] for i in range(numbases)] # make a structure to hold everything we need for each base\n numreads = len(firstbases)\n for j in range(numreads):\n k = firstbases[j]\n for ci,c in enumerate(seqs[j]):\n renum1 = (k+ci) - refbase1num\n if 0 <= renum1 < numbases:\n r[renum1][2].append(c)\n r[renum1][3].append(quals[j][ci])\n return r",
"def extract_sub_alignment_read_seq(aln, ref_start, ref_end):\n # TODO TODO TODO implement this!",
"def fasta2vcf(f):\r\n\tmy_dict = {}\r\n\tfor r in SeqIO.parse(f, \"fasta\"):\r\n\t\tmy_dict[r.id] = str(r.seq).upper()\r\n\tprint (my_dict)\r\n\tvcf = pd.DataFrame()\r\n\tindex_list = []\r\n\tchr_list = []\r\n\tpos_list = []\r\n\tref_list = []\r\n\talt_list = []\r\n\tseq_list = []\r\n\tfor k in my_dict:\r\n\t\tif not \"_ref\" in k:\r\n\t\t\tcontinue\r\n\t\tname = k.replace(\"_ref\",\"\")\r\n\t\tif not name+\"_alt\" in my_dict:\r\n\t\t\tprint (k,\"alt sequence not found. Please use _ref and _alt keywords. Skip...\")\r\n\t\t\tcontinue\r\n\t\tref_seq,alt_seq = my_dict[k],my_dict[name+\"_alt\"]\r\n\t\tif len(ref_seq) < 30:\r\n\t\t\tprint (k,\"Please input sequence length at least 30bp. Skip...\")\r\n\t\t\tcontinue\r\n\t\tif ref_seq == alt_seq:\r\n\t\t\tprint (k,\"Ref and Alt sequence is the same. Please check. Skip...\")\r\n\t\t\tcontinue\r\n\t\tpos,ref,alt = find_pos_ref_alt(ref_seq,alt_seq)\r\n\t\tindex_list.append(name)\r\n\t\tchr_list.append(k)\r\n\t\tseq_list.append(ref_seq)\r\n\t\tpos_list.append(pos)\r\n\t\tref_list.append(ref)\r\n\t\talt_list.append(alt)\r\n\tvcf[0] = chr_list\r\n\tvcf[1] = pos_list\r\n\tvcf[2] = index_list\r\n\tvcf[3] = ref_list\r\n\tvcf[4] = alt_list\r\n\tvcf[5] = seq_list\r\n\tvcf = vcf[vcf[1]!=-1]\r\n\tif vcf.shape[0] == 0:\r\n\t\tprint (\"no valid sequences in:\",f)\r\n\t\tprint (\"Exit...\")\r\n\t\tsys.exit(1)\r\n\r\n\treturn vcf",
"def get_de_novo_variants(sample_alignment, sv_methods=CUSTOM_SV_METHODS):\n\n # Check to see if this reference genome has ever had structural variants\n # called against it. If not, return empty list.\n ref_genome = sample_alignment.alignment_group.reference_genome\n if 'INFO_METHOD' not in ref_genome.variant_key_map[MAP_KEY__COMMON_DATA]:\n return []\n\n # Otherwise fetch all variants corresponding to SVs called by our custom\n # methods. We do this in two steps.\n\n # First, fetch all VCCD for this object, so we can inspect INFO_METHODs.\n all_sample_vccd_list = (\n VariantCallerCommonData.objects.filter(\n variantevidence__experiment_sample=\n sample_alignment.experiment_sample))\n\n # Now filter, keeping only the ones that match one of our sv_methods.\n filtered_variant_list = []\n for vccd in all_sample_vccd_list:\n if ('INFO_METHOD' in vccd.data and\n vccd.data.get('INFO_METHOD') in sv_methods):\n filtered_variant_list.append(vccd.variant)\n\n return filtered_variant_list",
"def get_subarray(self, ref, slc):\n\n refsub = ref.copy()\n # If the reference file data might have a dimension greater than\n # two, use this syntax:\n # refsub.data = ref.data[..., slc[0], slc[1]].copy()\n refsub.data = ref.data[slc[0], slc[1]].copy()\n if hasattr(ref, \"dq\"):\n refsub.dq = ref.dq[slc[0], slc[1]].copy()\n\n return refsub"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Strips out nonsegregating sites from a sequence alignment. Uses self.variantset, which must be filled first.
|
def prune_non_seg(self):
self.fullsequence = self.sequence # First back up the original sequence
self.fullvariantset = self.variantset
self.fullvariants = self.variants
self.sequence = MultipleSeqAlignment([]) # Blank the sequence to be worked on
print "\nPruning non-segregating sites..."
locs = []
for curvar in self.variantset:
locs.append(curvar)
locs.sort()
stripped = {}
seqnames = []
for seq in self.fullsequence:
stripped[seq.name] = []
seqnames.append(seq.name)
for i in xrange(len(locs)):
loc = locs[i]
self.pruned_to_full.append(loc)
seqbits = self.fullsequence[:, loc]
name = 0
for seqbit in seqbits:
stripped[seqnames[name]].append(seqbit)
name += 1
for strip in stripped.keys():
self.sequence.append(SeqRecord(Seq(''.join(stripped[strip])), name=strip, id=strip))
self.variantset = set()
self.variants = {}
self.variants_from_sequence() # Re-run on stripped sequence
|
[
"def remove_unalignable(self):\n if self.unalignable is None or self.data is None:\n self.logger.warn(\"Both unalignable regions and RE data must be \"\n \"loaded prior to running this function\")\n return None\n self.logger.info(\"Removing unalignable regions\")\n data = [self.data]\n indices = [self.chr_indices]\n if self.binned is not None:\n data.append(self.binned)\n indices.append(self.bin_indices)\n for h in range(len(data)):\n for i in range(self.chroms.shape[0]):\n if indices[h][i] == indices[h][i + 1]:\n continue\n start = indices[h][i]\n stop = indices[h][i + 1]\n where = numpy.where(self.unalignable['chr'] == i)[0]\n if where.shape[0] == 0:\n continue\n mids = (data[h]['coords'][start:stop, 0]\n + data[h]['coords'][start:stop, 1]) // 2\n starts = numpy.searchsorted(\n mids, self.unalignable['coords'][where, 0]) + start\n stops = numpy.searchsorted(\n mids, self.unalignable['coords'][where, 1]) + start\n for j in range(starts.shape[0]):\n data[h]['alignable'][starts[j]:stops[j]] = False\n data[h]['score'][starts[j]:stops[j]] = numpy.nan\n # If counts are present for both treatment and control,\n # (re)calculate scores\n if (numpy.amax(self.data['treatment']) > 0\n and numpy.amax(self.data['control']) > 0):\n self.calculate_scores()",
"def remove_nonvariable_sites(baselist):\n\tif unique_list_size(non_gap_bases(baselist)) == 1:\n\t\treturn [''] * len(baselist)\n\telse:\n\t\treturn baselist",
"def eliminate_site(self, protein, sequence, site_index,\n escore_threshold=0.3):\n maxescore = 1 # just to initialize with a large value\n prevmax = -1 # just to avoid infinite loop\n # list of mutated sites\n site_mutpos = []\n # sequence\n full_sequence = str(sequence)\n\n flag = True\n while flag and prevmax != maxescore:\n prevmax = float(maxescore)\n maxepreds = self.get_max_non_intersecting_escore(protein=protein,\n full_seq=full_sequence,\n site_index=site_index)\n maxescore = maxepreds['score']\n # if the max non intersecting escore is below the threshold, nothing to mutate\n if maxescore < escore_threshold:\n flag = False\n else:\n # return immediately if the site can't be mutated\n if maxescore == float(\"inf\"): # no e-score that can be chosen\n # since there is no site to be mutated, then just use empty list\n return full_sequence, []\n seq_tomutate = maxepreds[\"escore_seq\"]\n midpos = len(seq_tomutate) // 2\n # get new mutated sequence\n mutated_escore_seq = self.mutate_escore_seq_at_pos(seq_tomutate, midpos, escore_threshold)\n if mutated_escore_seq != \"\":\n # mutate the sequence\n mut_start = self.bsites[protein][site_index].site_start + maxepreds[\"start_idx\"]\n mut_end = mut_start + len(mutated_escore_seq)\n full_sequence = full_sequence[:mut_start] + mutated_escore_seq + full_sequence[mut_end:]\n site_mutpos.append(mut_start + midpos)\n else:\n full_sequence = \"\"\n site_mutpos = []\n # return the new mutated sequence and the positions mutated\n return full_sequence, site_mutpos",
"def trim(self, taxon_missingness=0.75):\n # debug('in trim')\n i = 0\n seqlen = len(self.aln[i])\n while seqlen == 0:\n i = i + 1\n seqlen = len(self.aln[i])\n for tax in self.aln:\n if len(self.aln[tax]) != seqlen:\n sys.stderr.write(\"can't trim un-aligned inputs, moving on\")\n return\n start = 0\n stop = seqlen\n cutoff = len(self.aln) * taxon_missingness\n for i in range(seqlen):\n counts = {\"?\": 0, \"-\": 0}\n for tax in self.aln:\n call = self.aln[tax][i].label\n if call in [\"?\", \"-\"]:\n counts[call] += 1\n if counts[\"?\"] + counts[\"-\"] <= cutoff:\n start = i\n break\n for i in range(seqlen, 0, -1):\n counts = {\"?\": 0, \"-\": 0}\n for tax in self.aln:\n call = self.aln[tax][i - 1].label\n if call in [\"?\", \"-\"]:\n counts[call] += 1\n if counts[\"?\"] + counts[\"-\"] <= cutoff:\n stop = i\n break\n aln_ids = set()\n for taxon in self.aln:\n self.aln[taxon] = self.aln[taxon][start:stop]\n aln_ids.add(taxon.label)\n assert aln_ids.issubset(self.otu_dict.keys())\n treed_taxa = set()\n for leaf in self.tre.leaf_nodes():\n treed_taxa.add(leaf.taxon)\n for leaf in self.tre.leaf_nodes():\n if leaf.taxon not in aln_ids:\n self.tre.prune_taxa([leaf])\n self.tre.prune_taxa_with_labels([leaf.taxon])\n self.tre.prune_taxa_with_labels([leaf])\n treed_taxa.remove(leaf.taxon)\n assert treed_taxa.issubset(aln_ids)\n if _VERBOSE:\n sys.stdout.write(\"trimmed alignment ends to < {} missing taxa, \"\n \"start {}, stop {}\\n\".format(taxon_missingness, start, stop))\n return",
"def remove_duplicate_seqs(alignment):\n\tnewseqs = list()\n\treturnseqs = list()\n\tfor record in alignment:\n\t\tsequence = record.seq\n\t\tif sequence not in newseqs:\n\t\t\tnewseqs.append(record.seq)\n\t\t\t# record.seq = Seq(str(record.seq).replace('N','-'), record.seq.alphabet)\n\t\t\treturnseqs.append(record)\n\treturn MultipleSeqAlignment(returnseqs)",
"def TrimEdges(alignment):\n codons = [alignment[:,x:x+3] for x in range(0, alignment.get_alignment_length(), 3)]\n percentages = []\n goodCodonsIndices = []\n for codon in codons:\n gapPerc = float(codon[:,0].count(\"-\")+codon[:,1].count(\"-\")+codon[:,2].count(\"-\"))/(len(codon)*3)\n percentages.append(gapPerc)\n for i in range(0,len(percentages)-1):\n if percentages[i] < args.trim:\n goodCodonsIndices.append(i)\n goodCodons = codons[goodCodonsIndices[0]:goodCodonsIndices[len(goodCodonsIndices)-1]]\n cleanedAlignment = alignment[:,0:0]\n for codon in goodCodons:\n cleanedAlignment = cleanedAlignment+codon\n return(cleanedAlignment)",
"def omit_gap_sequences(self, maximum_gap_frequency):\n # handle empty Alignment case\n if self.is_empty():\n return self.__class__([])\n\n base_frequencies = self.k_word_frequencies(k=1)\n gap_alphabet = self[0].gap_alphabet()\n seqs_to_keep = []\n for seq, f in izip(self, base_frequencies):\n gap_frequency = sum([f[c] for c in gap_alphabet])\n if gap_frequency <= maximum_gap_frequency:\n seqs_to_keep.append(seq.identifier)\n return self.subalignment(seqs_to_keep=seqs_to_keep)",
"def clear_bilingual_alignments(inst: Instance):\n for trans_word in inst.trans:\n for aligned_item in list(trans_word.alignments): # type: Union[Word, SubWord]\n trans_word.remove_alignment(aligned_item)",
"def trim(aligned_headers_seqs):\n if not (isinstance(aligned_headers_seqs, list) and len(aligned_headers_seqs) >= 2):\n raise ValueError, \"Input does not specify at least two aligned sequences.\"\n ref_seq = aligned_headers_seqs[0].seq# str yields the sequence\n #print(ref_seq)\n # Getting the positions to strip from the start\n go=True\n i=0\n start_excess=0\n while (go==True):\n if (ref_seq[i]=='-'):\n start_excess=i # strip 0 to i\n else:\n go=False\n i=i+1\n # Getting the posisiton to remove from the end\n start_excess=start_excess+1 # slicing is inclusive on this end\n end=True\n i=len(ref_seq)-1\n end_excess=i\n print(i)\n while (end==True):\n if (ref_seq[i]=='-'):\n end_excess=i # strip 0 to i\n else:\n end=False\n i=i-1\n\n print \"%s bases taken off the 5' end\" % str(start_excess)\n print \"%s bases taken off the 3' end \" % str(len(ref_seq)-1-end_excess)\n\n\n\n samp_seq=aligned_headers_seqs[1]\n samp_seq.seq=samp_seq.seq[start_excess:end_excess]\n\n return([samp_seq,start_excess,end_excess+1]) # In a 1 base system (like R) The start will be the last base to not be exclued on the 5' and end is the last base off the end to be included.",
"def remove_identical_seqs(self):\n debug(\"remove identical seqs\")\n if len(self.new_seqs_otu_id) > 0:\n if _DEBUG:\n sys.stdout.write(\"running remove identical twice in a row\"\n \"without generating new alignment will cause errors. skipping\\n\")\n return\n tmp_dict = dict((taxon.label, self.data.aln[taxon].symbols_as_string()) for taxon in self.data.aln)\n old_seqs = tmp_dict.keys()\n # Adding seqs that are different, but needs to be maintained as diff than aln that the tree has been run on\n avg_seqlen = sum(self.data.orig_seqlen) / len(self.data.orig_seqlen) # HMMMMMMMM\n assert self.config.seq_len_perc <= 1\n seq_len_cutoff = avg_seqlen * self.config.seq_len_perc\n for gb_id, seq in self.new_seqs.items():\n if gb_id.split(\".\") == 1:\n debug(gb_id)\n if self.blacklist is not None and gb_id in self.blacklist:\n debug(\"gb_id in blacklist, not added\")\n pass\n elif gb_id in self.newseqs_acc: # added to increase speed. often seq was found in another blast file\n debug(\"passed, was already added\")\n pass\n else:\n if len(seq.replace(\"-\", \"\").replace(\"N\", \"\")) > seq_len_cutoff:\n if self.config.blast_loc != \"remote\":\n tax_name = None\n # ######################################################\n # ### new implementation of rank for delimitation\n if type(self.mrca_ncbi) is int:\n mrca_ncbi = self.mrca_ncbi\n elif len(self.mrca_ncbi) == 1:\n mrca_ncbi = list(self.mrca_ncbi)[0]\n else:\n debug(self.mrca_ncbi)\n debug(\"think about something to do!\")\n rank_mrca_ncbi = self.ids.ncbi_parser.get_rank(mrca_ncbi)\n # get rank to delimit seq to ingroup_mrca\n # get name first\n if gb_id[:6] == \"unpubl\":\n debug(\"unpubl data\")\n debug(self.data.gb_dict[gb_id])\n tax_name = self.data.gb_dict[gb_id][u\"^ot:ottTaxonName\"]\n ncbi_id = self.data.gb_dict[gb_id][u\"^ncbi:taxon\"]\n if tax_name is None:\n tax_name = self.data.gb_dict[gb_id][u'^user:TaxonName']\n if ncbi_id is None:\n debug(tax_name.split(\" \")[0])\n tax_lin_name = tax_name.split(\" \")[0]\n tax_lin_name = tax_lin_name.split(\"_\")[0]\n print(tax_lin_name)\n ncbi_id = self.ids.ncbi_parser.get_id_from_name(tax_lin_name)\n # ncbi_id = 00000\n elif len(gb_id.split(\".\")) >= 2:\n if gb_id in self.data.gb_dict.keys() and 'staxids' in self.data.gb_dict[gb_id].keys():\n tax_name = self.data.gb_dict[gb_id]['sscinames']\n ncbi_id = self.data.gb_dict[gb_id]['staxids']\n else:\n tax_name = self.ids.find_name(acc=gb_id)\n if tax_name is None:\n sys.stderr.write(\"no species name returned for {}\".format(gb_id))\n ncbi_id = self.ids.map_acc_ncbi(gb_id)\n assert tax_name is not None\n assert ncbi_id is not None\n tax_name = str(tax_name).replace(\" \", \"_\")\n input_rank_id = self.ids.ncbi_parser.get_downtorank_id(ncbi_id, rank_mrca_ncbi)\n # #######################################################\n if input_rank_id == mrca_ncbi: # belongs to ingroup mrca -> add to data, if not, leave it out\n # debug(\"input belongs to same mrca\")\n self.newseqs_acc.append(gb_id)\n otu_id = self.data.add_otu(gb_id, self.ids)\n self.seq_dict_build(seq, otu_id, tmp_dict)\n else:\n self.newseqs_acc.append(gb_id)\n otu_id = self.data.add_otu(gb_id, self.ids)\n self.seq_dict_build(seq, otu_id, tmp_dict)\n old_seqs_ids = set()\n for tax in old_seqs:\n old_seqs_ids.add(tax)\n assert old_seqs_ids.issubset(tmp_dict.keys())\n for tax in old_seqs:\n del tmp_dict[tax]\n self.new_seqs_otu_id = tmp_dict # renamed new seq to their otu_ids from GI's, but all info is in self.otu_dict\n debug(\"len new seqs dict after remove identical\")\n debug(len(self.new_seqs_otu_id))\n with open(self.logfile, \"a\") as log:\n log.write(\"{} new sequences added from genbank after removing identical seq, \"\n \"of {} before filtering\\n\".format(len(self.new_seqs_otu_id), len(self.new_seqs)))\n self.data.dump()",
"def TrimEdges(alignment):\n percentages = []\n goodColumnsIndices = []\n for x in range(0,alignment.get_alignment_length()):\n column = alignment[:,x]\n gapPerc = float(column.count(\"-\"))/(len(alignment))\n percentages.append(gapPerc)\n for i in range(0,len(percentages)-1):\n if percentages[i] < args.trim:\n goodColumnsIndices.append(i)\n cleanedAlignment = alignment[:,goodColumnsIndices[0]:goodColumnsIndices[len(goodColumnsIndices)-1]]\n return(cleanedAlignment)",
"def omit_gap_positions(self, maximum_gap_frequency):\n # handle empty Alignment case\n if self.is_empty():\n return self.__class__([])\n\n position_frequencies = self.position_frequencies()\n gap_alphabet = self[0].gap_alphabet()\n\n positions_to_keep = []\n for i, f in enumerate(position_frequencies):\n gap_frequency = sum([f[c] for c in gap_alphabet])\n if gap_frequency <= maximum_gap_frequency:\n positions_to_keep.append(i)\n return self.subalignment(positions_to_keep=positions_to_keep)",
"def removeGapsFromAlign(sscons, align, gapCharacter=\".\"):\n\n assert len(sscons) == len(align[0]), \"different lengths!\\n sscons w gaps:{}, align:{}\".format(\n (len(sscons)), len(align[0])\n )\n\n cleanAlign = [\"\".join([a for ss, a in zip(sscons, seq) if ss != gapCharacter]) for seq in align]\n if (len(sscons) - sscons.count(gapCharacter)) != len(cleanAlign[0]):\n raise ValueError('gaps removal error!')\n\n return cleanAlign",
"def remove_stagnating_species(self):\n for s in self.species:\n imp = False \n\n for o in s.organisms:\n if o.fitness > s.max_fitness:\n imp = True\n\n s.max_fitness = o.fitness\n\n s.age_since_imp = 0\n\n if not imp:\n s.age_since_imp += 1\n\n if s.age_since_imp >= self.conf.stagnation_threshold:\n s.marked = True\n\n self.log.info('gen %d removing species %d, %d days since improvement',\n self.generation,\n s.species_id,\n s.age_since_imp)",
"def _trim_dict(self):\n\t\tfor celltype, fastq_list in self.cell_fastq_dict.iteritems():\n\t\t\tself.cell_dict[celltype] = [x.replace(\"fastq.gz\",\"\") for x in fastq_list]",
"def drop_sites_using_binary_markers(aln, marker_ids, inverse=False,\n match_prefix=False, match_suffix=False,\n copy=False):\n aln = aln.copy() if copy else aln\n # Get marker alignments and turn into a numpy array\n marker_matrix = np.array(\n [list(map(int, m))\n for m in aln.get_markers(marker_ids,\n match_prefix=match_prefix,\n match_suffix=match_suffix)\n .sequences])\n # Sum the values down each column\n # Columns whose sum is less than the number of rows have failed\n # one or more filters\n summed = np.sum(marker_matrix, axis=0)\n remove_list = np.where(summed == len(marker_matrix))[0] if inverse else \\\n np.where(summed < len(marker_matrix))[0]\n\n # Edit alignment inplace\n aln.remove_sites(remove_list)\n\n if copy:\n return aln",
"def __remove_duplicates_fast_memory_heavy(self):\n for (item, start_node) in self.distinct_map.items():\n current = start_node.next_alike\n while current is not None:\n self.__remove_node(current) \n current = current.next_alike",
"def _g_nasality_assimilation(self):\n out_phones = self.phones\n target = Phone(\"ŋ\")\n for n in range(len(self.phones)):\n p = self.phones[n]\n if p.ipa == \"g\" and p.right.nas:\n out_phones[n] = target\n self.phones = out_phones\n self._refresh()",
"def __trim_generation(self, should_trim=True):\n self.__compute_fitness()\n\n if should_trim:\n self.generation = sorted(self.generation,\n key=lambda chromosomerep:\n chromosomerep['fitness'])\n while len(self.generation) > self.max_gen_size:\n self.generation.pop()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Output a REJECTOR2 input file
|
def rej_infile(self):
rejfilename = self.filebase + "-rej.txt"
rejfile = open(rejfilename, 'w')
rejfile.write("/--Data\n")
rejfile.write("Vnaught 0\n\n")
rejfile.write("Loci\tSNP\n")
rejfile.write("Ancestral\t-1\n")
rejfile.write("RecombRt\t0\n")
rejfile.write("NumLoci\t")
rejfile.write(str(len(self.sequence[0].seq)))
rejfile.write("\n")
rejfile.write("Length\t1\n")
rejfile.write("\n")
rejfile.write("\n")
rejfile.write("Tag\t")
rejfile.write("Population\n")
outseq = {}
for seq in self.sequence:
outseq[seq.id] = str(seq.seq)
for x in sorted(outseq.keys()):
rejfile.write(str(x))
rejfile.write("\t")
rejfile.write("X")
rejfile.write("\t")
for y in list(outseq[x]):
rejfile.write(y)
rejfile.write("\t")
rejfile.write("\n")
rejfile.close()
|
[
"def output(self, file: 'FILE *const') -> \"void\":\n return _coin.SoNotRec_output(self, file)",
"def check_2bit(self):\n print(self.temp_file_dir)\n print(self.temp_file)\n print(self.output_file)\n if self.target[-4:] != \"2bit\" or self.query[-4:] != \"2bit\":\n print(\"Error: input files must be in .2bit format.\")\n exit()",
"def phase_remove_bad_values(phase_filename: str, output_filename: str) -> str:\n try:\n phase_filepath = Path(phase_filename)\n output_filepath = Path(output_filename)\n except Exception:\n raise Exception(\"Inputs must be absolute paths to files.\")\n\n with open(phase_filepath, \"r\") as phase_data:\n lines = [line for line in phase_data]\n filtered_phase = [line for line in lines if \"******\" not in line]\n\n if (len(lines)) == len(filtered_phase):\n return phase_filename\n else:\n # Write out to temporary file and return that\n with open(output_filepath, \"w\") as filtered_phase_file:\n filtered_phase_file.writelines(filtered_phase)\n return output_filename",
"def final_output(file_name):\n # calling the asm file\n asm_file = open(file_name, \"r\")\n parsed_file = parser.Parser(asm_file)\n parsed_lines = parsed_file.get_list()\n code = coder.Coder(parsed_lines)\n coded_lines = code.get_list()\n asm_file.close()\n # writing the complied lines into the new file (.hack)\n new_file = open(file_name[:-3]+\"hack\", \"w\")\n new_file.writelines(coded_lines)\n new_file.close()",
"def write_input(infile,tkin,nh2,cdmol=cdmol_default):\n infile.write(mole+'.dat\\n')\n infile.write('radex.out\\n')\n infile.write(str(flow*(1-bw))+' '+str(fupp/(1-bw))+'\\n')\n infile.write(str(tkin)+'\\n')\n infile.write('1\\n')\n infile.write('H2\\n')\n infile.write(str(nh2)+'\\n')\n infile.write(str(tbg)+'\\n')\n infile.write(str(cdmol)+'\\n')\n infile.write(str(dv)+'\\n')",
"def stop(self, filename):\n # Flush the stream to make sure all our data goes in before\n # the escape character.\n self.origstream.flush()\n # Print the escape character to make the readOutput method stop\n self.origstream.write(self.escape_char)\n self.readOutput()\n # Close the pipe\n os.close(self.pipe_out)\n # Restore the original stream\n os.dup2(self.streamfd, self.origstreamfd)\n # Write to file filename\n f = open(filename, \"a\")\n f.write(self.capturedtext)\n f.close()\n pass",
"def remove_secondary_mapping_bit(sam, sam_parsed):\n lines = iter(fileinput.input([sam]))\n sam_parsed_file = open(sam_parsed, \"w\")\n headers = []\n body = []\n\n for line in lines:\n if line.startswith('@'):\n sam_parsed_file.write(line)\n else:\n # chomp line\n line = line.rstrip('\\n')\n details = line.split(\"\\t\")\n flag = int(details[1])\n if flag > 256:\n details[1] = str(flag - 256)\n print >> sam_parsed_file, '\\t'.join(details)\n sam_parsed_file.close()",
"def setup_r2(file_path):\n r2 = r2pipe.open(file_path)\n r2.cmd('e asm.arch=m7700')\n r2.cmd('e anal.limits=true')\n r2.cmd('e anal.from=0x8000')\n r2.cmd('e anal.to=0xffd0')\n\n return r2",
"def readPersona_role(file,output):\n personas = []\n num_of_conversation = 0\n persona_tmp = []\n current_role = \"\"\n with open(file,'r') as f:\n for line in f.readlines():\n line = line.split()\n if line[0] == '1':\n num_of_conversation += 1\n current_role = \"new\"\n if line[2] == \"persona:\":\n role = line[1]\n if (role != current_role) & (current_role != \"\"):\n personas.append(\" \".join(persona_tmp) + '\\n')\n persona_tmp = []\n persona_tmp.append(\" \".join(line[3:]))\n current_role = role\n\n with open(output,'w') as op:\n op.writelines(personas)",
"def rspace_killer ( fname, fout = None ) :\n\n import sys\n \n fin = open(source,\"r\")\n fout = source + '_wk.txt' if ( fout == None ) else fout\n dest = open(fout,\"w\")\n\n print(\"%s starting with %s. Output is %s.\" % \n (sys._getframe(0).f_code.co_name , fname, fout) )\n \n for line in fin :\n fout.write( line.rstrip() )\n \n print( \"%s Compeleted!\" % sys._getframe(0).f_code.co_name )",
"def outputExcludedFiles(self):\n outputFile = open(self.fileExcOutput,\"w\",-1,\"utf-8\")\n for file in self.filesExcluded:\n outputFile.write(str(file) + \"\\n\")\n outputFile.close()",
"def _filter_out_neg(self, sample):\n negative_index = os.path.join(self.negative_index_dir, os.path.basename(self.negative_index_dir))\n\n message = '{}: Filtering negative RNA species'.format(sample.basename)\n command = 'bowtie -p 18 -q {} {} --un {}'.format(negative_index, sample.trimmed, sample.filtered)\n if os.path.exists(sample.filtered):\n self._log_message(message, command_status=self.FILE_ALREADY_EXISTS)\n else:\n self._run_command(command, message, log_stderr=True)\n self._get_bowtie_summary(self.log_file, 'filtering')",
"def removeUnkFromNgramsFile(ngrams_file, output):\n\tf = open(ngrams_file)\n\to = open(output, 'w')\n\tc = 0\n\tfor line in f:\n\t\tc += 1\n\t\tif c % 1000000==0:\n\t\t\tprint(str(c) + ' tokens filtered.')\n\t\tif '<unk>' not in line:\n\t\t\to.write(line)\n\tf.close()\n\to.close()",
"def presubmission(self, corr2path=\".\", use_weights=False):\n\n\t\tpresubdir = os.path.join(os.path.dirname(__file__), \"presubmission_script\")\n\t\tpresubscriptpath = os.path.join(presubdir, \"presubmission.py\")\n\t\tcatpath = os.path.join(self.outdir(), \"*.cat\")\n\t\tbranchcode = \"-\".join(self.branch) # Not required given our filenames, but just to be sure.\n\t\toutfilepath = self.subfilepath()\n\t\tcorr2path = os.path.join(corr2path, 'corr2')\n\t\t\n\t\tif use_weights:\n\t\t\tcmd = \"python %s %s -b %s -w 3 -c2 %s -o %s\" % (presubscriptpath, catpath, branchcode, corr2path, outfilepath)\n\t\telse:\n\t\t\tprint \"I am NOT using weights !\"\n\t\t\tcmd = \"python %s %s -b %s -c2 %s -o %s\" % (presubscriptpath, catpath, branchcode, corr2path, outfilepath)\n\t\tos.system(cmd)",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-v\",\n \"--verbosity\",\n dest=\"verbosity\",\n action=\"count\",\n default=0,\n help=\"set verbosity level\",\n )\n parser.add_argument(\n \"-f\",\n \"--file\",\n dest=\"forbidden\",\n action=\"store\",\n default=\"forbidden-violations.txt\",\n help=\"List of forbidden rules (one rule per line)\",\n )\n parser.add_argument(\n \"report\",\n action=\"store\",\n default=None,\n help=\"Axivion report file\",\n )\n args = parser.parse_args()\n\n if args.verbosity == 1:\n logging.basicConfig(level=logging.INFO)\n elif args.verbosity > 1:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.ERROR)\n\n forbidden_rules = []\n\n with open(args.forbidden, mode=\"r\", encoding=\"utf-8\") as forbidden:\n lines = forbidden.readlines()\n lines = [rule.rstrip() for rule in lines]\n for rule in lines:\n if not rule:\n continue\n if not rule.lstrip().startswith(\"#\"):\n forbidden_rules.append(rule)\n logging.info(f\"loaded the following rules as forbidden: {forbidden_rules}\")\n\n with open(args.report, mode=\"r\", encoding=\"utf-8\") as report:\n report_json = json.load(report)\n logging.info(\"successfully loaded a JSON report file\")\n\n fatal_violations_found = False\n for violation in report_json:\n error_number = violation[\"errorNumber\"]\n logging.debug(f\"checking violation {error_number}\")\n if error_number in forbidden_rules:\n suppressed = violation[\"suppressed\"]\n violation_text = (\n f\"found forbidden violation {error_number} in \"\n f\"{violation['path']}:{violation['line']}:{violation['column']}\"\n f\" with justification: {violation['justification']}\"\n f\" suppressed: {suppressed}\"\n )\n if suppressed:\n logging.info(violation_text)\n else:\n logging.fatal(violation_text)\n fatal_violations_found = True\n\n if fatal_violations_found:\n sys.exit(1)\n else:\n logging.info(\"no forbidden violations found\")",
"def remove_capping_hidrogens(output_template_file, nstd):\n\t\n with open(output_template_file, \"r\") as f:\n lines = f.readlines()\n \n #Create Variables\n new_lines = lines\n atom_index = [line[0:5].strip() for line in lines if line[21:25] in nstd]\n index_to_remove = []\n fields = {\n \"NAME\" : False,\n \"NBON\" : False,\n \"BOND\" : False,\n \"THET\" : False,\n \"PHI\" : False,\n \"IPHI\" : False\n }\n\n #Remove lines from capping atoms\n for i, line in enumerate(lines):\n found=False\n for value in [\"NAME\", \"NBON\", \"BOND\", \"THET\", \"PHI\", \"IPHI\"]:\n if line.strip(\"\\n\") == value:\n fields[value] = True\n found=True\n if found:\n found=False\n continue\n\n if i<=2 and not fields[\"NBON\"] and not fields[\"BOND\"] and not fields[\"THET\"] and not fields[\"PHI\"] and not fields[\"IPHI\"]:\n if i == 2:\n new_lines[i] = line[0:9] + str(int(line[9:11])-len(atom_index)) + line[11:]\n else:\n pass\t\n elif i>2 and not fields[\"NBON\"] and not fields[\"BOND\"] and not fields[\"THET\"] and not fields[\"PHI\"] and not fields[\"IPHI\"]:\n if line[21:25].strip() in nstd:\n index_to_remove.append(i)\n else:\n pass\n\n elif fields[\"NBON\"] and not fields[\"BOND\"] and not fields[\"THET\"] and not fields[\"PHI\"] and not fields[\"IPHI\"]:\n if line[0:6].strip() in atom_index:\n index_to_remove.append(i)\n else:\n pass\n elif fields[\"NBON\"] and fields[\"BOND\"] and not fields[\"THET\"] and not fields[\"PHI\"] and not fields[\"IPHI\"]:\n atom_1, atom_2 = line.split()[0:2]\n if atom_1 in atom_index or atom_2 in atom_index:\n index_to_remove.append(i)\n else:\n pass\n elif fields[\"NBON\"] and fields[\"BOND\"] and fields[\"THET\"] and not fields[\"PHI\"] and not fields[\"IPHI\"]:\n atom_1, atom_2, atom_3 = line.split()[0:3]\n if atom_1 in atom_index or atom_2 in atom_index or atom_3 in atom_index:\n index_to_remove.append(i)\n else:\n pass\n elif fields[\"NBON\"] and fields[\"BOND\"] and fields[\"THET\"] and fields[\"PHI\"] and not fields[\"IPHI\"]:\n atom_1, atom_2, atom_3, atom_4 = line.split()[0:4]\n if atom_1 in atom_index or atom_2 in atom_index or atom_3 in atom_index or atom_4 in atom_index:\n index_to_remove.append(i)\n else:\n pass\n elif fields[\"NBON\"] and fields[\"BOND\"] and fields[\"THET\"] and fields[\"PHI\"] and fields[\"IPHI\"] and line != \"END\":\n atom_1, atom_2, atom_3, atom_4 = line.split()[0:4]\n if atom_1 in atom_index or atom_2 in atom_index or atom_3 in atom_index or atom_4 in atom_index:\n index_to_remove.append(i)\n else:\n pass\n #Write back\n template = [line for i, line in enumerate(new_lines) if i not in index_to_remove]\n with open(output_template_file, \"w\") as f:\n f.write(\"\".join(template))",
"def endFile(file) :\n log.run(\"addhis in=%s comment='Data reduction by CARMA pipeline version %s completed at %s'\" % (file,version.VERSION,time.ctime()),[],logit=False)",
"def fix_receptor(input_file: str, output_file: str, pH: float = 7.0):\n fixer = PDBFixer(filename=input_file)\n fixer.findMissingResidues()\n fixer.findMissingAtoms()\n fixer.addMissingAtoms()\n fixer.addMissingHydrogens(pH)\n app.PDBFile.writeFile(fixer.topology, fixer.positions, open(output_file, \"w\"))",
"def erase_files(self):\n print('\\n\\n\\n We are erasing files!!! ')\n try:\n writeable_file = open('scrape-html-max/scrape.txt', 'w')\n writeable_file.close()\n print('\\n\\n opened file to erase and closed file.... ')\n writeable_file_2 = open('final-report/report.txt', 'w')\n writeable_file_2.close()\n except:\n print('\\n\\n Could not open file to erase')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Takes input tree file or sequence data.
|
def input_tree(self):
if self.starttreename:
if self.starttreename[-3:] == 'xml':
self.starttree = Phylo.read(self.starttreename, "phyloxml")
elif self.starttreename[-6:] == 'newick':
self.starttree = Phylo.read(self.starttreename, "newick")
print "Generating phylogenetic tree..."
if self.treetype[-3:] == 'xml':
self.tree = Phylo.read(self.treetype, "phyloxml")
elif self.treetype[-3:] == 'nwk':
self.tree = Phylo.read(self.treetype, "newick")
elif self.treetype == 'pars':
self.parsimony_tree()
elif self.treetype == 'PhyML':
self.phyml_tree()
else:
self.raxml_tree()
self.tree.collapse_all(lambda c: c.branch_length <= 0.0)
self.treeparents = self.all_parents(self.tree)
for btree in self.btrees:
btree.collapse_all(lambda c: c.branch_length <= 0.0)
self.btreeparents.append(self.all_parents(btree))
|
[
"def parse_input():\n\n with open('input.txt', 'r') as txt:\n tree = txt.read().strip().split(' ')\n\n return tree",
"def parse_files(preorder=None, postorder=None, inorder=None):\n data_is_sufficient = (preorder and inorder) or (\n postorder and inorder)\n if not data_is_sufficient:\n raise InsufficientTraversalInformation(\n \"Specify at least 2 of the three modes to load a \"\n \"unique Binary Tree\")\n if preorder:\n with open(preorder, \"rb+\") as f:\n preorder_traversal = [\n int(l.decode(\"ascii\").strip()) for l in f.readlines()\n ]\n\n if inorder:\n with open(inorder, \"rb+\") as f:\n inorder_traversal = [\n int(l.decode(\"ascii\").strip()) for l in f.readlines()\n ]\n\n if postorder:\n with open(postorder, \"rb+\") as f:\n postorder_traversal = [\n int(l.decode(\"ascii\").strip()) for l in f.readlines()\n ]\n if preorder and inorder:\n return TreeNode.load(\n preorder=preorder_traversal,\n inorder=inorder_traversal)\n elif postorder and inorder:\n return TreeNode.load(\n postorder=postorder_traversal,\n inorder=inorder_traversal)",
"def fa2tree(args):\n for fasta in args.input_files:\n logging.info('fa2tree input: {}'.format(fasta))\n fasta = abspath(fasta)\n if args.bootstrap:\n fa2tree_bs(args, fasta)\n elif bio.fasta_record_count(fasta) >= bio.SEQ_COUNT_CUTOFF:\n fa2tree_big(args, fasta)\n else:\n fa2tree_default(args, fasta)",
"def buildtree(filename):\n \n #FIXME\n pass",
"def create_tree():\n global stream\n global stream_rshift\n\n stream = sys.stdin.read().rstrip().split(\"\\n\")\n\n stream_rshift = valid_indent(stream)\n\n if stream_rshift == -1:\n print(\"[ERROR] Detected alternate indentation levels\")\n quit(1)\n\n grow_tree()",
"def read_tree(tree_file):\n\ttree = Phylo.read(tree_file, 'newick')\n\ttree.root_at_midpoint()\n\treturn tree",
"def __init__(self, input_data=None):\n self.data = input_data\n self.children = []",
"def read_process_tree(file_path):\n from pm4py.objects.process_tree.importer import importer as tree_importer\n tree = tree_importer.apply(file_path)\n return tree",
"def read_tiered_data(filenames):\n initial = None\n kdtree = None\n for f in filenames:\n if kdtree is None:\n initial,kdtree = read_data(f)\n else:\n (d,t) = read_data(f)\n (initial,kdtree) = merge(initial,d)\n return (initial, kdtree)",
"def finish_tree(self, tree, filename):\r\n pass",
"def deserialize(self, data):\n\n\n # Your Codec object will be instantiated and called as such:\n # ser = Codec()\n # deser = Codec()\n # ans = deser.deserialize(ser.serialize(root))\n tree = data.split()\n print(tree)\n if(tree[0] == \"n\"):\n return None\n queue = []\n root = TreeNode(tree[0])\n\n queue.append(root)\n i = 1\n while queue:\n cur = queue.pop(0)\n if cur == None:\n break\n cur.left = TreeNode(int(tree[i])) if tree[i] != \"n\" else None\n cur.right = TreeNode(int(tree[i+1])) if tree[i+1] != \"n\" else None\n i += 2\n queue.append(cur.left)\n queue.append(cur.right)\n\n return root",
"def read_hartree():\n# TODO: write a function to read the parameters from not ad-hoc files\n import numpy as np;\n if isfile(\"hartree.dat\"):\n print(\" Reading file hartree.dat... \",end=\"\")\n hartreefile = open(\"hartree.dat\");\n hartree = [];\n for line in hartreefile.readlines():\n hartree.append(map(float,line.split()));\n hartreefile.close()\n print(\"Done.\")\n hartree = np.array(hartree);\n elif isfile(\"E_lda.dat\") and isfile(\"Vxc.dat\"):\n print(\"Auxiliary file (hartree.dat) not found.\")\n print(\"Reading files E_lda.dat and Vxc.dat... \",end=\"\")\n Eldafile = open(\"E_lda.dat\");\n Vxcfile = open(\"Vxc.dat\");\n elda = [];\n vxc = [];\n for line in Eldafile.readlines():\n elda.append(map(float,line.split()));\n Eldafile.close()\n for line in Vxcfile.readlines():\n vxc.append(map(float,line.split()));\n Vxcfile.close()\n print(\"Done.\")\n elda = np.array(elda);\n vxc = np.array(vxc);\n hartree = elda - vxc\n else : \n print(\"Auxiliary file not found (hartree/E_lda/Vxc). Impossible to continue.\")\n sys.exit(1)\n return hartree",
"def process_trees(tree):\n name_target = tree[:-9].replace('trees/all_', '').replace('trees/pure_', '').replace('trees/recomb_', '')\n\n with open(tree, 'r') as check_tree:\n tree_txt = check_tree.read() \n\n if (tree_txt == 'not enough genomic information\\n'): \n return [name_target, np.NaN, 0]\n\n else:\n t = Tree(tree)\n t.set_outgroup('CONSENSUS_CPZ')\n t.ladderize()\n target_node = t.search_nodes(name=name_target)[0]\n\n result = []\n for node in target_node.get_ancestors():\n subtypes_in_node = [leaf.split('-')[0] for leaf in node.get_leaf_names() if leaf != name_target]\n if len(set(subtypes_in_node)) == 1:\n result = [name_target, subtypes_in_node[0], node.support]\n break\n else:\n pass \n if result == []:\n result = [name_target, np.NaN, 0]\n else:\n pass\n \n return result",
"def loadTree(filepath):\r\n return pickle.load(open(filepath, \"rb\"))",
"def load_tree(files, tree, branches, nmax = -1, selection=''):\n\n import ROOT\n ROOT.PyConfig.IgnoreCommandLineOptions = True\n ROOT.gROOT.SetBatch(True)\n chain = ROOT.TChain(tree)\n for f in files: chain.Add(f)\n\n from root_numpy import tree2array\n return tree2array(chain, branches = branches, selection = selection, start = 0, stop = nmax)",
"def convert(tree,fileName=None):\n simulation = tree.getroot()\n if simulation.tag!='Simulation' and simulation.tag!='ExternalModel': return tree #this isn't an input file\n extmod = None\n if simulation.tag=='Simulation':\n models = simulation.find('Models')\n if models is not None:\n extmod = models.find('ExternalModel')\n elif simulation.tag=='ExternalModel': #externalNode case\n extmod = simulation\n if extmod is not None:\n vars = []\n toRemove = []\n for child in extmod:\n if child.tag=='variable':\n vars.append(child.text)\n toRemove.append(child)\n for child in toRemove:\n extmod.remove(child)\n if len(vars)>0:\n variables = ET.Element('variables')\n extmod.append(variables)\n variables.text = ','.join(vars)\n return tree",
"def load_tree(files, tree, branches, nmax = -1, selection=''):\n\n ROOT.PyConfig.IgnoreCommandLineOptions = True\n ROOT.gROOT.SetBatch(True)\n chain = ROOT.TChain(tree)\n for f in files: chain.Add(f)\n\n from root_numpy import tree2array\n return tree2array(chain, branches = branches, selection = selection, start = 0, stop = nmax)",
"def test_write_tree(self):\n\n newick = '''(\n (\n a:1.000000,\n b:2.000000\n )x:3.000000,\n (\n c:4.000000,\n d:5.000000\n )y:6.000000\n)rra:0.000000;\n'''\n infile = StringIO(newick)\n tree = read_tree(infile)\n\n out = StringIO()\n tree.write(out, rootData=True)\n self.assertEqual(newick, out.getvalue())",
"def build(self):\n text = open(self.fname).read()\n self.tree = buildtree(text,self.fname)\n self.tree = binarizetree(self.tree)\n self.tree = backprop(self.tree,self.fname)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Find all parents of all children in the tree.
|
def all_parents(tree):
parents = {}
for clade in tree.find_clades(order='level'):
for child in clade:
parents[child] = clade
return parents
|
[
"def walk_parents(self):\n active = self.parent_datasets[:]\n while active:\n d = active.pop()\n yield d\n active += d.parent_datasets",
"def parents(self, rev):\n self._scanparents(rev)\n return [r for _c, r in sorted(self._parents.get(rev, []))]",
"def parents(self):\n return (Commit(self._repo, parent) for parent in self._commit.parents)",
"def get_parents(self):\n return []",
"def iter_parents(node):\n parent = node.parent\n\n while parent:\n yield parent\n\n parent = parent.parent",
"def parents(self, term):\n for parent_term in term.is_a:\n yield self[parent_term]\n for grand_parent in self.parents(self[parent_term]):\n yield grand_parent",
"def get_parents_recursive( self, item ):\n\n\t\tparents = [ ]\n\n\t\tparent = self.get_parent( item )\n\t\tif parent:\n\t\t\tparents.append( parent )\n\t\t\tparents.extend( self.get_parents_recursive( parent ) )\n\n\t\treturn parents",
"def get_parents(self, person):\n for f in self.families:\n if person in f.children:\n for p in f.parents:\n yield p",
"def all_parents(self, obj):\n # Check the memoization cache first.\n if obj in self.parent_cache:\n return self.parent_cache[obj]\n\n if not isinstance(obj, Expr):\n raise Error('%s must be an Expr.' % (obj,))\n var = expr('?x')\n query = expr('ISA')(obj, var)\n solutions = self.ask_all(query)\n parents = map(lambda b: b[var], solutions)\n self.parent_cache[obj] = parents\n return parents",
"def ancestors(self):\n stack = deque([self])\n parent = self.parent\n while parent:\n stack.appendleft(parent)\n parent = parent.parent\n return list(stack)",
"def get_parents(self, ns, id):\n return self.descendants_rel(ns, id, {'isa', 'partof'})",
"def parents(self, nodename):\n parents = set(self.node_dict[nodename].parents.values())\n return parents",
"def parents(self, host):\n return list(self.iter_parents(host))",
"def get_parents(self):\n _p = {}\n for s, children in self.store.items():\n for (_, child) in children:\n assert not child in _p # Each core has only one parent\n _p[child] = s\n return _p",
"def children_recursive(self):\n for node in self.children():\n yield node\n for sub_node in node.children_recursive():\n yield sub_node",
"def parents(self):\n # Sort here for determinism\n # return sorted(self._parents.values(), key=lambda edge: str(edge))\n return list(self._parents.values())",
"def get_parent_paths(self, depth=None, hints=None):\n #pylint:disable=too-many-nested-blocks\n if depth is not None and depth == 0:\n return [[self]]\n results = []\n parents = PageElement.objects.filter(\n pk__in=RelationShip.objects.filter(\n dest_element=self).values('orig_element_id'))\n if not parents:\n return [[self]]\n if hints:\n for parent in parents:\n if parent.slug == hints[-1]:\n # we found a way to cut the search space early.\n parents = [parent]\n hints = hints[:-1]\n break\n for parent in parents:\n grandparents = parent.get_parent_paths(\n depth=(depth - 1) if depth is not None else None,\n hints=hints)\n if grandparents:\n for grandparent in grandparents:\n term_index = 0\n if hints:\n for node in grandparent:\n if node.slug == hints[term_index]:\n term_index += 1\n if term_index >= len(hints):\n break\n if not hints or term_index >= len(hints):\n # we have not hints or we consumed all of them.\n results += [grandparent + [self]]\n return results",
"def get_parents(self, tag):\n families = self.get_parent_tags()\n try:\n parents = families[tag]\n for parent in parents:\n parents.extend(self.get_parents(parent))\n return parents\n except Exception:\n return []",
"def get_parents_recursively(client, runs, only_finished=True, parent_tag=\"parent\", verbose=0):\n\n if not isinstance(runs, list):\n runs = [runs]\n\n new_nodes = {}\n all_nodes = {}\n root_nodes = {}\n\n _build_tree_add_runs_to_data_structures(\n runs, root_nodes, all_nodes, new_nodes, only_finished=only_finished)\n\n level = 0\n while len(new_nodes) > 0:\n if verbose > 0:\n print(f\"Processing level {level}: {len(new_nodes)} nodes\")\n\n child_nodes = new_nodes\n new_nodes = {}\n parents = []\n\n if verbose > 0:\n print(f\"* collecting parents for child nodes: {len(child_nodes)}\")\n\n for run_id, run_wrapper in child_nodes.items():\n\n tags = run_wrapper[\"run\"].data.tags\n if \"parent\" in tags:\n parent = client.get_run(tags[parent_tag])\n parents.append(parent)\n\n if verbose > 0:\n print(f\"* found parents: {len(parents)}\")\n\n _build_tree_add_runs_to_data_structures(\n parents, root_nodes, all_nodes, new_nodes, only_finished=only_finished)\n\n level -= 1\n\n return root_nodes"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Collect neighbors of minimum sized monophyletic clade including term.
|
def neighbors_by_mono(self, term, ctree, parents, tsize):
neighbors = set()
monn = set()
monn.add(term)
curnode = term
while len(neighbors) < tsize:
if curnode not in parents: # will not go past the root
break
curparent = parents[curnode]
allkids = self.collect_all_kids(curparent, [])
for kid in allkids:
if kid is not term:
neighbors.add(kid)
monn.add(kid)
if len(neighbors) >= tsize and ctree.is_monophyletic(monn):
return neighbors
if len(neighbors) > self.maxn:
return set()
curnode = curparent
return set()
|
[
"def neighbors_by_hops(self, term, ctree, parents, tsize):\n workneighbors = set()\n neighbors = set()\n monn = set()\n monn.add(term)\n\n height = 0\n while len(workneighbors) <= tsize:\n curnode = term\n for i in xrange(height):\n if curnode not in parents: # will not go past the root\n break\n curnode = parents[curnode]\n allkids = self.collect_kids(curnode, [], 0, height + 1)\n for kid in allkids:\n if kid is not term:\n workneighbors.add(kid)\n height += 1\n ndist = {}\n for neb in workneighbors:\n if len(ctree.trace(term, neb)) <= maxhops:\n ndist[neb] = ctree.distance(term, neb)\n sorted_neb = sorted(ndist.items(), key=operator.itemgetter(1))\n for i in xrange(len(sorted_neb)):\n if i >= tsize:\n break\n monn.add(sorted_neb[i][0])\n neighbors.add(sorted_neb[i][0])\n return neighbors",
"def GetAtomHeavyNeighbors(atom):\n res = []\n for nbr in atom.GetNeighbors():\n if nbr.GetAtomicNum() != 1:\n res.append(nbr)\n return res",
"def known_mines(self):\n return {cell for cell in self.cells if len(self.cells)==self.count}",
"def ContructLFromGraphSearch(mol):\r\n\r\n AtomIndex = []\r\n Hmol = Chem.RemoveHs(mol)\r\n for atom in Hmol.GetAtoms():\r\n temp = []\r\n if atom.GetAtomicNum() == 6:\r\n for neighatom in atom.GetNeighbors():\r\n if neighatom.GetAtomicNum() == 6:\r\n temp.append(0)\r\n elif neighatom.GetAtomicNum() == 1:\r\n continue\r\n else:\r\n temp.append(1)\r\n if sum(temp) == 0:\r\n AtomIndex.append(atom.GetIdx())\r\n\r\n return AtomIndex",
"def min_hamming_dist(ns):\n return min(starmap(hamming_distance, combinations(ns, 2)))",
"def number_of_neighbors(self):\n return len(self.indices_of_atoms_connecting)",
"def compute_nearest_neighbors(submatrix, balltree, k, row_start):\n\n nn_dist, nn_idx = balltree.query(submatrix, k=k+1)\n\n # Remove the self-as-neighbors\n nn_idx = nn_idx[:,1:]\n nn_dist = nn_dist[:,1:]\n\n # Construct a COO sparse matrix of edges and distances\n i = np.repeat(row_start + np.arange(nn_idx.shape[0]), k)\n j = nn_idx.ravel().astype(int)\n return (i, j, nn_dist.ravel())",
"def fwd(self, x):\n\n # Check for consistency\n errstring = self.consist('som', x)\n if errstring != None:\n raise Exception(errstring)\n\n # Turn nodes into matrix of centres\n nodes = np.reshape(self.map, (self.nin, self.num_nodes), order='F').T\n # Compute squared distance matrix\n d2 = dist2(x, nodes)\n # Find winning node for each pattern: minimum value in each row\n win_nodes = np.argmin(d2, 1)\n w = np.min(d2, 1)\n return d2, win_nodes",
"def _precompute_distances(self, state):\n theGhosts = api.ghosts(state)\n\n distances = [[float(\"inf\") for col in range(len(self.map[0]))] for row in range(len(self.map))]\n\n theGhosts = api.ghosts(state)\n for ghost in theGhosts:\n self._flood_fill(distances, int(ghost[1]), int(ghost[0]), 0)\n\n return distances",
"def neighbour_nodes_generate(s, current_node, TreeConn):\r\n neigh_radius = 1\r\n neighbour_nodes = []\r\n for k in range(s): \r\n dist = np.sqrt((TreeConn[k][0] - current_node[0])**2 + ((TreeConn[k][1] - current_node[1])**2))\r\n if dist <= neigh_radius:\r\n neighbour_nodes.append(k)\r\n return neighbour_nodes",
"def CalcWeightedNeighborsFromMatrix(factor_M):\n factor_mat_no_selfconnect = np.copy(factor_M)\n np.fill_diagonal(factor_mat_no_selfconnect, 0)\n weighted_neighbors = np.sum(factor_mat_no_selfconnect, axis=1, keepdims=True)\n return weighted_neighbors",
"def sweep_mines(bm: BoardManager) -> List[List[int]]:",
"def lemmings(num_holes, cafes):\n # Create list of lemmings and cafes (l)\n l = [0 for i in range(0, num_holes)]\n for cafe in cafes:\n l[cafe] = 1\n\n print('l', l)\n\n\n # Iterate through it, saving max of all min distances\n max_of_min_dist = 0\n\n for lem1 in l:\n\n # For each lemming, find the closest cafe:\n\n for lem2 in l:\n\n if lem2 == 1:\n \n dist = abs(lem1 - lem2)\n print('dist', dist)\n if dist > min_dist:\n min_dist = dist \n\n print('new min_dist', min_dist)\n\n print('Overall min_dist', min_dist)\n\n\n if min_dist > max_of_min_dist:\n max_of_min_dist = min_dist\n\n print('new max_dist', max_of_min_dist)\n\n\n\n\n return max_of_min_dist",
"def get_neighbourhood(self):\n\n mu, var = np.random.normal(loc=0,scale=4,size=2)\n var = np.abs(var)\n self.neigh_feats = np.random.normal(loc=mu,scale=var,size=(self.n_neighbours,self.n_ftrs))\n #self.neigh_feats = np.random.normal(loc=mu,scale=var,size=(np.random.uniform(int(self.n_neighbours*.8),int(self.n_neighbours*1.2),self.n_ftrs)))\n #conn_str = lambda x,y",
"def compute_neighbors(self):\n for img in self.images:\n self.images_superpixels_neighbours[img] = [set() for sp in self.images_superpixels[img]]\n for row in range(len(self.images_segmented[img]) - 1):\n for column in range(len(self.images_segmented[img][0]) - 1):\n current = self.images_segmented[img][row][column] # superpixel label of current pixel\n right = self.images_segmented[img][row][column + 1] # superpixel label of pixel right of current\n below = self.images_segmented[img][row + 1][column] # superpixel label of pixel below current\n if current != right:\n self.images_superpixels_neighbours[img][current].add(right)\n self.images_superpixels_neighbours[img][right].add(current)\n if current != below:\n self.images_superpixels_neighbours[img][current].add(below)\n self.images_superpixels_neighbours[img][below].add(current)",
"def neighboors(data_frame_matrix):\n\n \tTHRESOLD = 1.4 + VDW_RADIUS['S'] * 2\n \trow_matrix =[]\n \ti_matrix = []\n \tprog = ProgressBar()\n\n \tfor index, row in data_frame_matrix.iterrows():\n \t\trow_matrix.append(row)\n \t\ti_matrix.append(index)\n\n \tatom_neighboors = {}\n \tfor i in prog(range(len(i_matrix))):\n \t\tneighboors = []\n \t\tfor j in range(len(row_matrix[i]) - 1):\n \t\t\tif(row_matrix[i][j] < THRESOLD) & (i != j):\n \t\t\t\tneighboors.append(j)\n \t\tatom_neighboors[i] = neighboors\n\n \treturn atom_neighboors",
"def getNeighbors(self):\n neighbors = []\n for edge in self.edges:\n neighbors.append(edge.toUnit)\n return neighbors",
"def neighbor_indices(self):",
"def neighbors(self):\n hood = (self.x, self.y, self.neighborhood_radius) # neighborhood\n n = collide_single(hood, self.others)\n return n"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Collect neighbors up to a maximum number of hops along tree.
|
def neighbors_by_hops(self, term, ctree, parents, tsize):
workneighbors = set()
neighbors = set()
monn = set()
monn.add(term)
height = 0
while len(workneighbors) <= tsize:
curnode = term
for i in xrange(height):
if curnode not in parents: # will not go past the root
break
curnode = parents[curnode]
allkids = self.collect_kids(curnode, [], 0, height + 1)
for kid in allkids:
if kid is not term:
workneighbors.add(kid)
height += 1
ndist = {}
for neb in workneighbors:
if len(ctree.trace(term, neb)) <= maxhops:
ndist[neb] = ctree.distance(term, neb)
sorted_neb = sorted(ndist.items(), key=operator.itemgetter(1))
for i in xrange(len(sorted_neb)):
if i >= tsize:
break
monn.add(sorted_neb[i][0])
neighbors.add(sorted_neb[i][0])
return neighbors
|
[
"def get_neighbours(self):\n return self.__neighbours",
"def _calculate_nb_neighbors(self, target_node):\n # if number of neighbors was calculated at least once\n # skips calculating the distance\n if target_node.nb_neighbors != -1:\n # only check if there are dead nodes\n all_neighbors = target_node.neighbors\n nb_dead_neighbors = sum(1 for x in all_neighbors if not x.alive)\n target_node.neighbors[:] = [x for x in all_neighbors if x.alive]\n return target_node.nb_neighbors - nb_dead_neighbors\n\n nb_neighbors = 0\n shortest_distance = cf.COVERAGE_RADIUS*2\n for node in self.get_alive_nodes():\n if node == target_node:\n continue\n distance = calculate_distance(target_node, node)\n if distance <= cf.COVERAGE_RADIUS:\n nb_neighbors += 1\n target_node.neighbors.append(node) \n if distance < shortest_distance:\n shortest_distance = distance\n\n if shortest_distance != cf.INFINITY:\n exclusive_radius = shortest_distance - cf.COVERAGE_RADIUS\n if exclusive_radius < 0:\n exclusive_radius = 0.0\n \n node.nb_neighbors = nb_neighbors\n node.exclusive_radius = exclusive_radius",
"def findEdges(self):\n for nc in self.nodes:\n x = nc[0]\n y = nc[1]\n nc_neighbours = self.nodes.get(nc).neighbours\n # Check for adjacent nodes in all directions\n if (x - self.x_div_len, y) in self.nodes:\n nc_neighbours.append(self.nodes.get((x - self.x_div_len, y)))\n if (x + self.x_div_len, y) in self.nodes:\n nc_neighbours.append(self.nodes.get((x + self.x_div_len, y)))\n if (x, y - self.y_div_len) in self.nodes:\n nc_neighbours.append(self.nodes.get((x, y - self.y_div_len)))\n if (x, y + self.y_div_len) in self.nodes:\n nc_neighbours.append(self.nodes.get((x, y + self.y_div_len)))",
"def num_neighbours(lag=1):\n win_size = 2*lag + 1\n neighbours = win_size**2 - (2*(lag-1) + 1)**2\n \n return neighbours",
"def getNeighbours(self, user=None, limit=None):\n pass",
"def get_neighbors(self):\n\n # create an empty list for neighbors.\n neighbors = []\n\n # go through all of the rows in the map array\n for j in range(0, self.height):\n \n row = []\n \n # go through the items in the row, and add the \n # amount of neighbors that item has to the \n # array.\n for i in range(0, self.width):\n row += [self.get_point_neighbor(j, i)]\n\n # add the row into the neighbors array.\n neighbors += [row]\n\n # check the map, and return the neighbors array.\n self.assert_array_size('get_neighbors', self.neighbors)\n return neighbors",
"def create_neighbors(self):\n for row in self._currentGrid:\n for cell in row:\n row = cell.get_row()\n column = cell.get_column()\n if row == 0:\n # 1. upper left corner (3 neighbors)\n if column == 0:\n #print('upper left')\n cell.add_neighbor(self._currentGrid[row][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column + 1])\n # 2. rest of the top row (5 neighbors)\n elif column < (self._columns - 1):\n #print('upper')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column - 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column + 1])\n # upper right corner (3 neighbors)\n else:\n #print('upper right')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column - 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column])\n # middle row\n elif row < (self._rows - 1):\n #print('middle')\n # 1. middle left edge (5 neighbors)\n if column == 0:\n #print('middle left edge')\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n cell.add_neighbor(self._currentGrid[row - 1][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][self._columns - 1])\n # 2. rest of the middle row (8 neighbors)\n elif column < (self._columns - 1):\n #print('upper')\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n # 3. middle right edge (5 neighbors)\n else:\n #print('middle right edge')\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[row][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - self._columns - 1])\n # bottom row\n else:\n #print('lower')\n # 1. bottom left corner (3 neighbors)\n if column == 0:\n #print('lower left')\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[0][column])\n cell.add_neighbor(self._currentGrid[0][column + 1])\n cell.add_neighbor(self._currentGrid[row][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row - 1][self._columns - 1])\n cell.add_neighbor(self._currentGrid[0][column])\n # 2. rest of the bottom row (5 neighbors)\n elif column < (self._columns - 1):\n #print('upper')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[0][column - 1])\n cell.add_neighbor(self._currentGrid[0][column + 1])\n cell.add_neighbor(self._currentGrid[0][column])\n # bottom right corner (3 neighbors)\n else:\n #print('upper right')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[0][column - 1])\n cell.add_neighbor(self._currentGrid[0][column])\n cell.add_neighbor(self._currentGrid[row - 1][0])\n cell.add_neighbor(self._currentGrid[row][0])",
"def compute_neighbours(self, nns):\n self.NNS = []\n for i in range(len(self.embeds)):\n start_time = time.clock()\n write(\"Computing nearest neighbours for embedding no = %d ...\" % i)\n nbrs = NearestNeighbors(n_neighbors=nns, algorithm='ball_tree').fit(self.embeds[i].T)\n distances, indices = nbrs.kneighbors(self.embeds[i].T)\n self.NNS.append(indices[:,1:])\n end_time = time.clock()\n write(\"Done (%s sec.)\\n\" % str(end_time - start_time))\n pass",
"def neighbors(self):\n hood = (self.x, self.y, self.neighborhood_radius) # neighborhood\n n = collide_single(hood, self.others)\n return n",
"def max_n_edges(self):\n m = 0\n for n in self.nodes:\n k = self.edges_connected(n)\n print(k)\n if k > m:\n m = k\n return k",
"def add_neighbors(self):\n\n self.neighbors = []\n\n if self.row > 0 and not grid[self.row - 1][self.col].is_wall():\n self.neighbors.append(grid[self.row - 1][self.col])\n\n if self.row < rows - 1 and not grid[self.row + 1][self.col].is_wall():\n self.neighbors.append(grid[self.row + 1][self.col])\n\n if self.col > 0 and not grid[self.row][self.col - 1].is_wall():\n self.neighbors.append(grid[self.row][self.col - 1])\n\n if self.col < cols - 1 and not grid[self.row][self.col + 1].is_wall():\n self.neighbors.append(grid[self.row][self.col + 1])\n\n return self.neighbors",
"def compute_neighbors(self):\n for img in self.images:\n self.images_superpixels_neighbours[img] = [set() for sp in self.images_superpixels[img]]\n for row in range(len(self.images_segmented[img]) - 1):\n for column in range(len(self.images_segmented[img][0]) - 1):\n current = self.images_segmented[img][row][column] # superpixel label of current pixel\n right = self.images_segmented[img][row][column + 1] # superpixel label of pixel right of current\n below = self.images_segmented[img][row + 1][column] # superpixel label of pixel below current\n if current != right:\n self.images_superpixels_neighbours[img][current].add(right)\n self.images_superpixels_neighbours[img][right].add(current)\n if current != below:\n self.images_superpixels_neighbours[img][current].add(below)\n self.images_superpixels_neighbours[img][below].add(current)",
"def neighbors_of_feasible(self):\n return self._neighbors_of_feasible",
"def __add_neighbours(self):\n calculate_cell_neighbour_coordinates = self._neighbourhood.calculate_cell_neighbour_coordinates\n coordinates = self._current_state.keys()\n for coordinate, cell_c, cell_n in zip(coordinates, self._current_state.values(), self._next_state.values()):\n n_coord = calculate_cell_neighbour_coordinates(\n coordinate, self._dimension)\n cell_c.neighbours = list([self._current_state[nc]\n for nc in n_coord])\n cell_n.neighbours = list([self._next_state[nc] for nc in n_coord])",
"def _get_neighboring_edges(self, g, edge):\n x = [(edge[0], v, _) for v in nx.neighbors(g, edge[0]) for _\n in range(g.number_of_edges(edge[0], v)) if v != edge[1]] +\\\n [(edge[0], edge[1], tag) for tag in\n range(g.number_of_edges(edge[0], edge[1])) if tag != edge[2]]\n if edge[0] != edge[1]: # not a self-loop\n x += [(edge[1], v, _) for v in nx.neighbors(g, edge[1]) for _\n in range(g.number_of_edges(edge[1], v)) if v != edge[0]]\n return set([order(e) for e in x])",
"def test_total_neighbors(st: SpaceTime):\n # This is actually only true if the space_time is large enough. WHen it is small enough one node may be two different neighors reducing the total number of neighbors.\n for n in events(st):\n assert len(n.neighbors) >= 4",
"def CountNeighbours(self):\r\n idx = lambda c: -int(c==0) + int(c==self.size-1)\r\n deltas = { -1:[0, 1], 0:[-1, 0, 1], 1:[-1, 0] }\r\n ## localize instance vars local for speed (gains +1 fps)\r\n size = self.size\r\n grid = self.grid\r\n count = self.count\r\n ## #\r\n for y in xrange(size):\r\n for x in xrange(size):\r\n count[y][x] = 0\r\n vertical = deltas[idx(y)]\r\n horizontal = deltas[idx(x)]\r\n for dy in vertical:\r\n for dx in horizontal:\r\n if (dy != 0) or (dx != 0):\r\n count[y][x] += grid[y+dy][x+dx]",
"def number_of_neighbors(self):\n return len(self.indices_of_atoms_connecting)",
"def _count_neighbours(self):\n for point in self._points:\n self._neighbour_counter[point] += len(point.cluster.points)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Constructs a tree via maximum parsimony using Biopython's ParsimonyTreeConstructor.
|
def parsimony_tree(self):
print "Generating maximum parsimony tree.."
if runs > 0 or boot > 0:
print "ERROR: Bootstrap and multiple runs not compatible with -tree pars option."
exit()
cpus = multiprocessing.cpu_count()
if cpus > maxthreads:
cpus = maxthreads
# Erase RaXML intermediate files from previous runs
raxml_glob = glob.glob('RAxML_*')
for delfile in raxml_glob:
os.remove(delfile)
# Output sequence to a temp FASTA file
tempfastafile = self.indata.filebase + self.impname + "_fastatmp.fasta"
reducedtempfastafile = self.indata.filebase + self.impname + "_fastatmp.fasta.reduced"
AlignIO.write(self.indata.sequence, tempfastafile, "fasta")
raxml_args = {"sequences": tempfastafile, "model": self.rmodel, "name": self.impname,
"parsimony_seed": rng.randint(0, sys.maxint), "threads": cpus, "parsimony": True,
"algorithm": "d"}
raxmlstarttreename = "RAxML_" + self.impname + "_starttree.newick"
if self.starttree:
Phylo.write(self.starttree, raxmlstarttreename, "newick")
raxml_args["starting_tree"] = raxmlstarttreename
if exlocal:
raxml_cline = RaxmlCommandline(cmd='./raxmlHPC', **raxml_args)
else:
raxml_cline = RaxmlCommandline(**raxml_args)
print "Invoking RAxML with ", raxml_cline
out_log, err_log = raxml_cline()
if verbose:
print err_log
print out_log
raxmlparstreename = "RAxML_parsimonyTree." + self.impname
self.tree = Phylo.read(raxmlparstreename, "newick")
# Erase RaXML intermediate files
if not verbose:
raxml_glob = glob.glob('RAxML_*')
for delfile in raxml_glob:
os.remove(delfile)
try:
os.remove(tempfastafile)
except OSError:
pass
try:
os.remove(reducedtempfastafile)
except OSError:
pass
|
[
"def construct_maxheap_recursive(arr):\n if len(arr) > 0:\n v = arr.pop()\n heap = BTNode(v)\n else:\n return None\n heap.left = construct_maxheap_recursive(arr)\n heap.right = construct_maxheap_recursive(arr)\n fix_maxheap(heap, heap.value)\n return heap",
"def __init__(self, maxnodepts = 64, initsize = 4):\n this = _coin.new_SbBSPTree(maxnodepts, initsize)\n try: self.this.append(this)\n except: self.this = this",
"def tree_max(t):\n if is_leaf(t):\n return label(t)\n else:\n return max(label(b) for b in branches(t))",
"def get_max_node(self) -> BSTNode[T]:\n \n return self.tree_maximum(self.root)\n ...",
"def make_master_tree(\n n,\n method,\n names=None,\n inner_edge_params=(1, 1),\n leaf_params=(1, 1),\n distribution_func=np.random.gamma,\n ):\n\n if method == 'random_topology':\n master_topology = Tree.new_random_topology(n,\n names=names, rooted=True)\n master_tree = \\\n master_topology.randomise_branch_lengths(inner_edges=inner_edge_params,\n leaves=leaf_params,\n distribution_func=branch_length_func)\n master_tree.newick = '[&R] ' + master_tree.newick\n elif method == 'random_yule':\n master_tree = Tree.new_random_yule(n, names=names)\n elif method == 'random_coal':\n master_tree = Tree.new_random_coal(n, names=names)\n return master_tree",
"def __init__(self, max_depth: Optional[int] = None):\n\n self.__max_depth: int = max_depth\n self.__root: Optional[Node] = None\n self.__branches: Optional[str] = None",
"def __init__(self, maxnodepts: 'int const'=64, initsize: 'int const'=4):\n this = _coin.new_SbBSPTree(maxnodepts, initsize)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def get_random_tagged_tree(number_leafnodes, percentage_parasites, percentage_unknown, p_multifurcation, beta_distribution_parameters):\n # Arguments:\n # number_leafnodes - needed for randomized function\n # percentage_parasites\n # percentage_unknown - proportion of unknown leafnodes\n # percentage_multifurcation\n # beta_distribution_parameters - [A_FL, B_FL, A_P, B_P]\n\n global percentage_multifurcation\n percentage_multifurcation = p_multifurcation\n\n START_TIME = datetime.datetime.now().replace(microsecond=0)\n CURRENT_TIME = datetime.datetime.now().replace(microsecond=0)\n print(\"---- randomized tree ----\")\n current_percentage_parasites = 0\n # randomized(cls, taxa, branch_length=1.0, branch_stdev=None) \n # Create a randomized bifurcating tree given a list of taxa.\n # https://github.com/biopython/biopython/blob/master/Bio/Phylo/BaseTree.py\n randomized_tree = Phylo.BaseTree.Tree.randomized(number_leafnodes)\n randomized_tree.clade.name = 'root'\n boolean = True\n CURRENT_TIME = print_time(START_TIME)\n print(\"---- tag tree ----\")\n while boolean:\n current_tree = deepcopy(randomized_tree)\n result = tag_tree(current_tree.clade, [], 0, [0, 0], percentage_parasites, percentage_unknown, beta_distribution_parameters) # father_tag = 0 -> free living\n nodelist = result[1]\n leaf_distr = result[2]\n # child_depth = child_depth + result[3]\n # %P = #FL / (#P + #FL) * 100\n current_percentage_parasites = leaf_distr[1] / (leaf_distr[0] + leaf_distr[1]) \n print(\"tried\", current_percentage_parasites*100, \"% of parasites\") # 40% parasites?\n if (percentage_parasites - permitted_deviation) < current_percentage_parasites < (percentage_parasites + permitted_deviation):\n boolean = False\n print(\"----\")\n CURRENT_TIME = print_time(CURRENT_TIME)\n print(\"----\")\n # print(current_percentage_parasites, '% parasites,', 100 - current_percentage_parasites, '% free-living')\n return [current_tree, nodelist]",
"def buildTree(self):\n tree = owyl.parallel(\n owyl.limit(\n owyl.repeatAlways(self.clearMemoes(), debug=True),\n limit_period=0.4),\n\n ### Velocity and Acceleration\n #############################\n owyl.repeatAlways(owyl.sequence(self.hasCloseNeighbors(),\n self.accelerate(rate=-.01),\n ),\n ),\n self.move(),\n self.matchSpeed(match_speed=300, rate=.01),\n\n ### Steering\n ############\n self.seek(goal=(0, 0), rate=5),\n self.steerToMatchHeading(rate=2),\n self.steerForSeparation(rate=5),\n self.steerForCohesion(rate=2),\n\n policy=owyl.PARALLEL_SUCCESS.REQUIRE_ALL\n )\n return owyl.visit(tree, blackboard=self.bb)",
"def run_max_product(self, tree, N):\n # initialize max_up_belief (will replace up_belief in computation)\n\n # backtracking?\n\n # most likely state for each nonroot node\n\n # ###### up_propagate:\n # \"\"\"\n #compute upward belief at each node (function of incoming msgs and node potential) and\n #send the message to the parent\n #\"\"\"\n root = tree.get_root()\n\n active_nodes = tree.get_leaves()\n\n while active_nodes:\n curr_node = active_nodes.pop()\n #compute max belief if it doesn't exist\n if curr_node.max_up_belief is None:\n curr_node.max_up_belief = self.compute_max_belief(curr_node, tree)\n if curr_node != root:\n self.pass_max_msg_up(tree, curr_node, curr_node.get_parent(), N)\n if curr_node.get_parent().is_ready_decoding(tree):\n active_nodes.append(curr_node.get_parent())\n\n # Backtrack\n max_states = {}\n active_edges = tree.get_edges_to_root()\n while active_edges:\n curr_edge = active_edges.pop()\n curr_child = curr_edge.get_child()\n if curr_edge in tree.get_edges_to_root():\n curr_child.max_state = curr_edge.max_paths # scalar\n max_states[curr_child.index] = curr_child.max_state\n else:\n curr_child.max_state = curr_edge.max_paths[curr_edge.get_parent().max_state]\n max_states[curr_child.index] = curr_child.max_state\n active_edges.extend(tree.get_edges_where_parent(curr_child))\n\n return max_states",
"def create_tree(p_0, theta_0, max_layers, curr_layer):\n if curr_layer >= max_layers:\n return None\n else:\n z = gen_z_val()\n theta = gen_theta_val()\n phi = gen_phi_val()\n p_rad, p_f = split(p_0, z, theta_0, theta, phi)\n curr = Parton(p_0)\n curr.left_child = create_tree(p_rad, theta_0 + theta, max_layers, curr_layer + 1)\n curr.right_child = create_tree(p_f, theta_0 - theta, max_layers, curr_layer + 1)\n return curr",
"def __init__(self, min_height=2, max_height=4, search_space_obj=None, tree_args=None):\n nodes = None\n if tree_args is not None:\n nodes = tree_args.get(\"nodes\")\n tree_fitness = tree_args.get(\"fitness\", 0)\n tree_avg_epoch_time = tree_args.get(\"avg_epoch_time\", None)\n id = tree_args.get(\"id\", None)\n\n TreeConstruction.__init__(self, min_height, max_height, search_space_obj, nodes)\n LinearTree.__init__(self, self.root, nodes)\n\n self.symbolic_expression = None\n # A flag set after validation to mark if this tree is working or not. None represents\n # that it has not yet been validated.\n self.working = None\n\n if tree_args is None:\n self.fitness = 0 # The fitness of the tree\n self.avg_epoch_time = None # If the NN is a fitness function, then the time for each Epoch.\n self.id = None\n # Construct the Expression and the Linear Tree\n self.initialize_parents()\n self.assign_level_order_id()\n self.linearize_tree()\n try:\n self.construct_symbolic_expression()\n self.validate_working()\n except:\n self.working = False\n else:\n self.fitness = tree_fitness # The fitness of the tree\n self.avg_epoch_time = tree_avg_epoch_time # If the NN is a fitness function, then the time for each Epoch.\n self.id = id\n self.reset_tree()",
"def max_children(self, num_children):\n nq = self._clone()\n try:\n num_children = int(num_children)\n except ValueError:\n num_children = 15\n\n nq.num_children = num_children\n return nq",
"def build_trees(alns, trees):\n # prepare calculator and constructor\n calculator = DistanceCalculator('blosum62')\n constructor = DistanceTreeConstructor()\n for aln, tree in zip(alns, trees):\n print(aln, tree)\n processes = []\n for method in phylip_symb:\n processes.append(subprocess.Popen([\n method,\n '-auto',\n '-sequence',\n aln,\n '-outtreefile',\n tree.format(method)\n ]))\n # nj + upgma\n with open(aln) as fin:\n alnr = AlignIO.read(fin, 'fasta')\n dm = calculator.get_distance(alnr)\n Phylo.write(\n constructor.upgma(dm),\n tree.format('upgma'),\n 'newick'\n )\n Phylo.write(\n constructor.nj(dm),\n tree.format('nj'),\n 'newick'\n )\n for process in processes:\n print(process.wait())",
"def initializeTrees(self):\n #Initialize tree value for parentless particles (initial partons should always be linked here)\n if self.verbose:\n print(\"MCTree is initializing the tree with empty arrays\")\n self.tree[-1] = []\n for Idx in xrange(len(self.gens)):\n self.tree[Idx] = []\n self.treeElectron[Idx] = []\n self.treeMuon[Idx] = []\n self.treeTau[Idx] = []\n self.treeJet[Idx] = []\n self.treeGenJet[Idx] = []\n self.treeFatJet[Idx] = []\n self.treeGenJetAK8[Idx] = []\n #self.treeSubJet[Idx] = []\n #self.treeSubGenJetAK8[Idx] = []\n self.treeJetTuple[Idx] = []\n self.treeGenJetTuple[Idx] = []\n self.treeFatJetTuple[Idx] = []\n self.treeGenJetAK8Tuple[Idx] = []\n #self.treeSubJetTuple[Idx] = []\n #self.treeSubGenJetAK8Tuple[Idx] = []",
"def _build_tree_dynamic(self):\n node_id = 0\n fractions = dice_fractions(self.fixed_k)\n\n c = Components(self.proc_affinity_matrix)\n #Build the bottom level\n components, comp_mat = c.get_components(fractions.next(), \n self.proc_affinity_matrix,\n strongly_connected=True)\n for component in components:\n base_mat = self.base_affinity_matrix[component,:][:,component]\n proc_mat = self.proc_affinity_matrix[component,:][:,component]\n keys = self.key_list[component]\n n = Node(component, keys, base_mat, proc_mat, node_id)\n self.nodes[node_id] = n\n node_id += 1\n\n node_offset = temp_offset = 0\n for fraction in fractions:\n temp_offset += len(components) \n c = Components(comp_mat)\n components, comp_mat = c.get_components(fraction, comp_mat, True)\n for component in components:\n instances = []\n for instance in component:\n idx = instance + node_offset\n self.nodes[idx]._parent = node_id\n instances += self.nodes[idx].list_of_instances\n base_mat = self.base_affinity_matrix[instances,:][:,instances]\n proc_mat = self.proc_affinity_matrix[instances,:][:,instances]\n \n keys = self.key_list[instances]\n n = Node(instances, keys, base_mat, proc_mat, node_id)\n n._children = list(asanyarray(component) + node_offset) \n self.nodes[node_id] = n\n node_id += 1\n \n node_offset = temp_offset\n self.root_id = node_id - 1",
"def fit(dataset, max_depth, feature_ratio, min_leaves, size_param, emphasize):\n\n\n tree = Tree(dataset, max_depth, feature_ratio, min_leaves, size_param, emphasize)\n return tree",
"def run_max_posterior(self, tree, N, cont, ignore_rel=None):\n # upward and downward propagation\n self.up_propagate(tree, N)\n tree.set_ll(tree.get_root().up_belief)\n ll = tree.get_ll()\n\n self.down_propagate(tree, N)\n best_states = {}\n # get posteriors\n for node in tree.get_nonroots():\n if node.rel == ignore_rel:\n continue\n parent = node.get_parent()\n curr_edge = tree.get_edge_by_nodes(parent, node)\n node.posterior = self.compute_node_posterior(node, curr_edge, ll)\n # assert len(node.posterior.shape) == 1\n if cont:\n node.max_state = node.posterior\n else:\n node.max_state = node.posterior.argmax()\n best_states[node.index] = node.max_state\n\n return best_states",
"def __farTree(tau,topology,ratio):\n \n # calculating p and q from the parameters ratio and tau\n p=round((tau*ratio)/(3+(2*ratio)),3)\n q=round(p/ratio,3)\n half_p=round(p/2,3)\n half_q=round(q/2,3)\n \n # Creating trees\n tree1=['((']\n tree1.append(topology[0]+':'+str(p)+',')\n tree1.append(topology[1]+':'+str(p)+'):'+str(half_q)+',(')\n tree1.append(topology[2]+':'+str(q)+',')\n tree1.append(topology[3]+':'+str(q)+'):'+str(half_q)+');') \n nwkTree1=''.join(tree1)\n \n tree2=['((('] \n tree2.append(topology[0]+':'+str(p)+',')\n tree2.append(topology[1]+':'+str(p)+'):'+str(q)+',')\n tree2.append(topology[2]+':'+str(q)+'):'+str(half_q)+',')\n tree2.append(topology[3]+':'+str(half_q)+');') \n nwkTree2=''.join(tree2) \n \n tree3=['('] \n tree3.append(topology[0]+':'+str(half_p)+',(')\n tree3.append(topology[1]+':'+str(p)+',(')\n tree3.append(topology[2]+':'+str(q)+',')\n tree3.append(topology[3]+':'+str(q)+'):'+str(q)+'):'+str(half_p)+');') \n nwkTree3=''.join(tree3)\n \n return [nwkTree1,nwkTree2,nwkTree3]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Constructs a tree via maximum likelihood by invoking external software PhyML. See docs for PhyML installation and setup.
|
def phyml_tree(self):
print "Invoking PhyML..."
if runs > 0 or boot > 0:
print "ERROR: Bootstrap and multiple runs not yet implemented for PhyML."
print "Try using RAxML."
exit()
# Output sequence to a temp FASTA file
tempfastafile = self.indata.filebase + "_" + self.impname + "_fastatmp.fasta"
AlignIO.write(self.indata.sequence, tempfastafile, "fasta")
tempphyfile = self.indata.filebase + "_" + self.impname + "_phytmp.phy"
AlignIO.convert(tempfastafile, "fasta", tempphyfile, "phylip-relaxed")
phyml_args = {"input": tempphyfile, "alpha": "e"}
phystarttreename = "PhyML_imp", self.impname, "starttree.newick"
if self.starttree:
Phylo.write(self.starttree, phystarttreename, "newick")
phyml_args["input_tree"] = phystarttreename
if exlocal:
cmdline = PhymlCommandline(cmd='./PhyML', **phyml_args)
else:
cmdline = PhymlCommandline(**phyml_args)
print "Commandline for PhyML: " + str(cmdline)
out_log, err_log = cmdline()
if verbose:
print err_log
print out_log
phytreefile = tempphyfile + "_phyml_tree.txt"
self.tree = Phylo.read(phytreefile, "newick")
if not verbose:
phyml_globname = self.indata.filebase + "_" + self.impname + "*"
phyml_glob = glob.glob(phyml_globname)
for delfile in phyml_glob:
os.remove(delfile)
|
[
"def max_tree(max_depth = None, out_file = None):\n\n data = np.loadtxt(\"fourier/energy.txt\", delimiter=\",\")\n\n X = []\n y = []\n for row in data:\n y.append(int(row[1]))\n X.append(map(int,row[2:]))\n\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=.4, random_state=0)\n\n print X_train\n\n clf = tree.DecisionTreeClassifier(max_depth=max_depth)\n clf = clf.fit(X_train, y_train)\n\n print \"trained tree of depth %s\" % (max_depth) \n print \"training error: %f\" % (1-clf.score(X_train, y_train)) \n print \"testing error: %f\" % (1-clf.score(X_test, y_test)) \n\n\n if out_file:\n with open(out_file+\".dot\", 'w') as f:\n f = tree.export_graphviz(clf, out_file=f)\n\n graph = pydot.graph_from_dot_file(out_file+\".dot\")\n graph.write_png(out_file+'.png')",
"def input_tree(self):\n\n if self.starttreename:\n if self.starttreename[-3:] == 'xml':\n self.starttree = Phylo.read(self.starttreename, \"phyloxml\")\n elif self.starttreename[-6:] == 'newick':\n self.starttree = Phylo.read(self.starttreename, \"newick\")\n\n print \"Generating phylogenetic tree...\"\n\n if self.treetype[-3:] == 'xml':\n self.tree = Phylo.read(self.treetype, \"phyloxml\")\n elif self.treetype[-3:] == 'nwk':\n self.tree = Phylo.read(self.treetype, \"newick\")\n elif self.treetype == 'pars':\n self.parsimony_tree()\n elif self.treetype == 'PhyML':\n self.phyml_tree()\n else:\n self.raxml_tree()\n\n self.tree.collapse_all(lambda c: c.branch_length <= 0.0)\n self.treeparents = self.all_parents(self.tree)\n for btree in self.btrees:\n btree.collapse_all(lambda c: c.branch_length <= 0.0)\n self.btreeparents.append(self.all_parents(btree))",
"def parsimony_tree(self):\n print \"Generating maximum parsimony tree..\"\n if runs > 0 or boot > 0:\n print \"ERROR: Bootstrap and multiple runs not compatible with -tree pars option.\"\n exit()\n cpus = multiprocessing.cpu_count()\n if cpus > maxthreads:\n cpus = maxthreads\n # Erase RaXML intermediate files from previous runs\n raxml_glob = glob.glob('RAxML_*')\n for delfile in raxml_glob:\n os.remove(delfile)\n\n # Output sequence to a temp FASTA file\n tempfastafile = self.indata.filebase + self.impname + \"_fastatmp.fasta\"\n reducedtempfastafile = self.indata.filebase + self.impname + \"_fastatmp.fasta.reduced\"\n AlignIO.write(self.indata.sequence, tempfastafile, \"fasta\")\n\n raxml_args = {\"sequences\": tempfastafile, \"model\": self.rmodel, \"name\": self.impname,\n \"parsimony_seed\": rng.randint(0, sys.maxint), \"threads\": cpus, \"parsimony\": True,\n \"algorithm\": \"d\"}\n\n raxmlstarttreename = \"RAxML_\" + self.impname + \"_starttree.newick\"\n if self.starttree:\n Phylo.write(self.starttree, raxmlstarttreename, \"newick\")\n raxml_args[\"starting_tree\"] = raxmlstarttreename\n\n if exlocal:\n raxml_cline = RaxmlCommandline(cmd='./raxmlHPC', **raxml_args)\n else:\n raxml_cline = RaxmlCommandline(**raxml_args)\n\n print \"Invoking RAxML with \", raxml_cline\n\n out_log, err_log = raxml_cline()\n if verbose:\n print err_log\n print out_log\n raxmlparstreename = \"RAxML_parsimonyTree.\" + self.impname\n self.tree = Phylo.read(raxmlparstreename, \"newick\")\n\n # Erase RaXML intermediate files\n if not verbose:\n raxml_glob = glob.glob('RAxML_*')\n for delfile in raxml_glob:\n os.remove(delfile)\n\n try:\n os.remove(tempfastafile)\n except OSError:\n pass\n\n try:\n os.remove(reducedtempfastafile)\n except OSError:\n pass",
"def phyloxml(self):\n # Load Tree with addition information\n tree = newick.loads(self.data_phyloxml)[0]\n\n # Load Additional information from the database\n clades = Clade.query.all()\n id_to_clade = {c.id: c.name for c in clades}\n seq_to_species = {}\n seq_to_id = {}\n species = []\n\n for s in self.sequences.all():\n seq_to_id[s.name] = s.id\n seq_to_species[s.name] = s.species.code\n if s.species not in species:\n species.append(s.species)\n\n csep = CrossSpeciesExpressionProfile()\n csep_data = csep.get_data(*seq_to_id.values())\n\n has_heatmap = False\n heatmap_order = []\n for cd in csep_data:\n if \"profile\" in cd.keys() and \"order\" in cd[\"profile\"].keys():\n has_heatmap = True\n heatmap_order = cd[\"profile\"][\"order\"]\n break\n\n # Start constructing PhyloXML\n doc, tag, text, line = Doc().ttl()\n with tag(\"phyloxml\"):\n with tag(\"phylogeny\", rooted=\"True\"):\n # line('name', self.label)\n # line('description', \"PlaNet 2.0 PhyloXML tree\")\n Tree.__yattag_node(\n tree, tag, text, line, id_to_clade, seq_to_species, seq_to_id\n )\n\n with tag(\"graphs\"):\n if has_heatmap:\n with tag(\"graph\", type=\"heatmap\"):\n line(\"name\", \"Heatmap\")\n with tag(\"legend\", show=1):\n for label in heatmap_order:\n with tag(\"field\"):\n line(\"name\", label)\n with tag(\"gradient\"):\n line(\"name\", \"YlGnBu\")\n line(\"classes\", len(heatmap_order))\n with tag(\"data\"):\n for cd in csep_data:\n if (\n \"profile\" in cd.keys()\n and \"data\" in cd[\"profile\"].keys()\n ):\n with tag(\n \"values\", **{\"for\": str(cd[\"sequence_id\"])}\n ):\n for label in heatmap_order:\n if cd[\"profile\"][\"data\"][label] is not None:\n line(\n \"value\",\n cd[\"profile\"][\"data\"][label],\n )\n else:\n line(\"value\", \"\")\n\n with tag(\"graph\", type=\"binary\"):\n line(\"name\", \"Low Expression\")\n with tag(\"legend\", show=1):\n with tag(\"field\"):\n line(\"name\", \"Low expression\")\n line(\"color\", \"0xf03b20\")\n line(\"shape\", \"circle\")\n\n with tag(\"data\"):\n for cd in csep_data:\n if \"low_expressed\" in cd.keys():\n with tag(\"values\", **{\"for\": str(cd[\"sequence_id\"])}):\n line(\"value\", cd[\"low_expressed\"])\n\n with tag(\"graph\", type=\"multibar\"):\n line(\"name\", \"Expression Range\")\n with tag(\"legend\", show=1):\n with tag(\"field\"):\n line(\"name\", \"Max. Expression (TPM)\")\n line(\"color\", \"0x664977\")\n\n with tag(\"data\"):\n for cd in csep_data:\n if \"max_expression\" in cd.keys():\n with tag(\"values\", **{\"for\": str(cd[\"sequence_id\"])}):\n line(\"value\", cd[\"max_expression\"])\n\n with tag(\"taxonomies\"):\n for s in species:\n with tag(\"taxonomy\", code=s.code):\n line(\"color\", s.color.replace(\"#\", \"0x\"))\n line(\"name\", s.name)\n line(\n \"url\",\n url_for(\n \"species.species_view\", species_id=s.id, _external=True\n ),\n )\n\n for c in clades:\n with tag(\"taxonomy\", code=c.name):\n line(\"color\", \"0x000000\")\n line(\"name\", c.name)\n line(\n \"url\",\n url_for(\"clade.clade_view\", clade_id=c.id, _external=True),\n )\n\n return indent(doc.getvalue())",
"def build_trees(alns, trees):\n # prepare calculator and constructor\n calculator = DistanceCalculator('blosum62')\n constructor = DistanceTreeConstructor()\n for aln, tree in zip(alns, trees):\n print(aln, tree)\n processes = []\n for method in phylip_symb:\n processes.append(subprocess.Popen([\n method,\n '-auto',\n '-sequence',\n aln,\n '-outtreefile',\n tree.format(method)\n ]))\n # nj + upgma\n with open(aln) as fin:\n alnr = AlignIO.read(fin, 'fasta')\n dm = calculator.get_distance(alnr)\n Phylo.write(\n constructor.upgma(dm),\n tree.format('upgma'),\n 'newick'\n )\n Phylo.write(\n constructor.nj(dm),\n tree.format('nj'),\n 'newick'\n )\n for process in processes:\n print(process.wait())",
"def get_random_tagged_tree(number_leafnodes, percentage_parasites, percentage_unknown, p_multifurcation, beta_distribution_parameters):\n # Arguments:\n # number_leafnodes - needed for randomized function\n # percentage_parasites\n # percentage_unknown - proportion of unknown leafnodes\n # percentage_multifurcation\n # beta_distribution_parameters - [A_FL, B_FL, A_P, B_P]\n\n global percentage_multifurcation\n percentage_multifurcation = p_multifurcation\n\n START_TIME = datetime.datetime.now().replace(microsecond=0)\n CURRENT_TIME = datetime.datetime.now().replace(microsecond=0)\n print(\"---- randomized tree ----\")\n current_percentage_parasites = 0\n # randomized(cls, taxa, branch_length=1.0, branch_stdev=None) \n # Create a randomized bifurcating tree given a list of taxa.\n # https://github.com/biopython/biopython/blob/master/Bio/Phylo/BaseTree.py\n randomized_tree = Phylo.BaseTree.Tree.randomized(number_leafnodes)\n randomized_tree.clade.name = 'root'\n boolean = True\n CURRENT_TIME = print_time(START_TIME)\n print(\"---- tag tree ----\")\n while boolean:\n current_tree = deepcopy(randomized_tree)\n result = tag_tree(current_tree.clade, [], 0, [0, 0], percentage_parasites, percentage_unknown, beta_distribution_parameters) # father_tag = 0 -> free living\n nodelist = result[1]\n leaf_distr = result[2]\n # child_depth = child_depth + result[3]\n # %P = #FL / (#P + #FL) * 100\n current_percentage_parasites = leaf_distr[1] / (leaf_distr[0] + leaf_distr[1]) \n print(\"tried\", current_percentage_parasites*100, \"% of parasites\") # 40% parasites?\n if (percentage_parasites - permitted_deviation) < current_percentage_parasites < (percentage_parasites + permitted_deviation):\n boolean = False\n print(\"----\")\n CURRENT_TIME = print_time(CURRENT_TIME)\n print(\"----\")\n # print(current_percentage_parasites, '% parasites,', 100 - current_percentage_parasites, '% free-living')\n return [current_tree, nodelist]",
"def construct_decision_tree(self, data, outputs):\n self.features = data.shape[1]\n self.classes = max(outputs)\n if self.decisionTree is None:\n self.decisionTree = DecisionTreeClassifier()\n self.decisionTree.fit(data, outputs)",
"def build_tree(data):\n attributes = list(data.columns.values)\n target = attributes[-1]\n return create_decision_tree(data,attributes,target,IG)",
"def generate_tree_sequence_network():\r\n from xgboost import XGBClassifier\r\n\r\n model = XGBClassifier(n_estimators=600, verbosity=training_verbosity, n_jobs=n_threads)\r\n\r\n return model",
"def __init__(self, min_height=2, max_height=4, search_space_obj=None, tree_args=None):\n nodes = None\n if tree_args is not None:\n nodes = tree_args.get(\"nodes\")\n tree_fitness = tree_args.get(\"fitness\", 0)\n tree_avg_epoch_time = tree_args.get(\"avg_epoch_time\", None)\n id = tree_args.get(\"id\", None)\n\n TreeConstruction.__init__(self, min_height, max_height, search_space_obj, nodes)\n LinearTree.__init__(self, self.root, nodes)\n\n self.symbolic_expression = None\n # A flag set after validation to mark if this tree is working or not. None represents\n # that it has not yet been validated.\n self.working = None\n\n if tree_args is None:\n self.fitness = 0 # The fitness of the tree\n self.avg_epoch_time = None # If the NN is a fitness function, then the time for each Epoch.\n self.id = None\n # Construct the Expression and the Linear Tree\n self.initialize_parents()\n self.assign_level_order_id()\n self.linearize_tree()\n try:\n self.construct_symbolic_expression()\n self.validate_working()\n except:\n self.working = False\n else:\n self.fitness = tree_fitness # The fitness of the tree\n self.avg_epoch_time = tree_avg_epoch_time # If the NN is a fitness function, then the time for each Epoch.\n self.id = id\n self.reset_tree()",
"def fit(dataset, max_depth, feature_ratio, min_leaves, size_param, emphasize):\n\n\n tree = Tree(dataset, max_depth, feature_ratio, min_leaves, size_param, emphasize)\n return tree",
"def build(self):\n text = open(self.fname).read()\n self.tree = buildtree(text,self.fname)\n self.tree = binarizetree(self.tree)\n self.tree = backprop(self.tree,self.fname)",
"def make_mlp_model():\n return snt.Sequential([\n snt.nets.MLP([2] * 2, activate_final=True),\n snt.LayerNorm()\n ])",
"def setup(job, args, input_file_ids):\n # create a file with the phylogenetic tree in NEWICK format\n tree = write_tree(job, input_file_ids)\n\n # construct all MAF chunks\n chrom_sizes = job.fileStore.readGlobalFile(input_file_ids.chrom_sizes)\n # 4G buffer for MAF chunk, should be more than enough (famous last words)\n hal2maf_usage = tools.toilInterface.find_total_disk_usage(input_file_ids.hal)\n\n # TODO: do not split within genic regions of the reference genome\n maf_chunks = [] # list of lists [chrom, start, chunksize, fileID]\n for chrom, chrom_size in tools.fileOps.iter_lines(chrom_sizes):\n chrom_size = int(chrom_size)\n for start in range(0, chrom_size, args.chunksize - args.overlap):\n chunksize = args.chunksize if start + args.chunksize <= chrom_size else chrom_size - start\n j = job.addChildJobFn(hal2maf, input_file_ids, args.genomes, args.ref_genome, args.annotate_ancestors,\n chrom, start, chunksize, memory='8G', disk=hal2maf_usage)\n maf_chunks.append([chrom, start, chunksize, j.rv()])\n\n # if we have no params, time to train\n if input_file_ids.cgp_param is None:\n du = tools.toilInterface.find_total_disk_usage([input_file_ids.hints_db], buffer='40G')\n results = job.addFollowOnJobFn(cgp_training_wrapper, maf_chunks, tree, args, input_file_ids, memory='8G',\n disk=du).rv()\n else:\n results = job.addFollowOnJobFn(cgp_wrapper, maf_chunks, tree, args, input_file_ids, disk='4G').rv()\n return results",
"def make_master_tree(\n n,\n method,\n names=None,\n inner_edge_params=(1, 1),\n leaf_params=(1, 1),\n distribution_func=np.random.gamma,\n ):\n\n if method == 'random_topology':\n master_topology = Tree.new_random_topology(n,\n names=names, rooted=True)\n master_tree = \\\n master_topology.randomise_branch_lengths(inner_edges=inner_edge_params,\n leaves=leaf_params,\n distribution_func=branch_length_func)\n master_tree.newick = '[&R] ' + master_tree.newick\n elif method == 'random_yule':\n master_tree = Tree.new_random_yule(n, names=names)\n elif method == 'random_coal':\n master_tree = Tree.new_random_coal(n, names=names)\n return master_tree",
"def make_mlp_model():\n return snt.Sequential([\n snt.nets.MLP([LATENT_SIZE] * NUM_LAYERS, activate_final=True),\n snt.LayerNorm()\n ])",
"def tree_max(t):\n if is_leaf(t):\n return label(t)\n else:\n return max(label(b) for b in branches(t))",
"def __init__(self, y, Z, config, name=name, long_name=__doc__):\n\n super(SelectorGmlMl, self).__init__(y, Z, config, name=name, long_name=long_name)\n\n # add modules\n self.py_wrapper = wrapper.selector_glm_ml()\n self.pp_modules += ('binary.selector_glm_ml',)\n\n # determine criterion\n if config['prior/criterion'].lower() == 'bic':\n self.SIZE_PENALTY = 0.5 * numpy.log(self.n)\n if config['prior/criterion'].lower() in ['aic', 'aicc']:\n self.SIZE_PENALTY = 1\n\n # use AIC with correction\n self.AICc = False\n if config['prior/criterion'].lower() == 'aicc':\n self.AICc = True",
"def run_max_product(self, tree, N):\n # initialize max_up_belief (will replace up_belief in computation)\n\n # backtracking?\n\n # most likely state for each nonroot node\n\n # ###### up_propagate:\n # \"\"\"\n #compute upward belief at each node (function of incoming msgs and node potential) and\n #send the message to the parent\n #\"\"\"\n root = tree.get_root()\n\n active_nodes = tree.get_leaves()\n\n while active_nodes:\n curr_node = active_nodes.pop()\n #compute max belief if it doesn't exist\n if curr_node.max_up_belief is None:\n curr_node.max_up_belief = self.compute_max_belief(curr_node, tree)\n if curr_node != root:\n self.pass_max_msg_up(tree, curr_node, curr_node.get_parent(), N)\n if curr_node.get_parent().is_ready_decoding(tree):\n active_nodes.append(curr_node.get_parent())\n\n # Backtrack\n max_states = {}\n active_edges = tree.get_edges_to_root()\n while active_edges:\n curr_edge = active_edges.pop()\n curr_child = curr_edge.get_child()\n if curr_edge in tree.get_edges_to_root():\n curr_child.max_state = curr_edge.max_paths # scalar\n max_states[curr_child.index] = curr_child.max_state\n else:\n curr_child.max_state = curr_edge.max_paths[curr_edge.get_parent().max_state]\n max_states[curr_child.index] = curr_child.max_state\n active_edges.extend(tree.get_edges_where_parent(curr_child))\n\n return max_states"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets up imputation function for all terminal nodes.
|
def impute(self):
if ncollect == 'hops':
print "HOPS"
elif ncollect == 'distance':
print "DISTANCE"
else:
print "ROOTWARD"
terms = self.phytree.tree.get_terminals() # Get all internal nodes on tree. These are the ones with samples.
if boot > 0 or runs > 0: # Bootstrap replicates or multiple runs
for i in range(passes):
print "\nPass", i
bpar = iter(phytree.btreeparents)
bar = progressbar.ProgressBar(redirect_stdout=True)
for j in bar(range(len(self.phytree.btrees))):
# for btree in self.phytree.btrees:
btree = self.phytree.btrees[j]
terms = btree.get_terminals()
random.shuffle(terms)
bparents = next(bpar)
for term in terms:
if ncollect == 'hops':
nneighbors = phytree.neighbors_by_hops(term, btree, bparents,
nsize)
mneighbors = phytree.neighbors_by_hops(term, btree, bparents,
msize)
elif ncollect == 'distance':
nneighbors = phytree.neighbors_by_distance(term, terms, nsize)
mneighbors = phytree.neighbors_by_distance(term, terms, msize)
elif ncollect == 'mono':
nneighbors = phytree.neighbors_by_mono(term, btree, bparents,
nsize)
mneighbors = phytree.neighbors_by_mono(term, btree, bparents,
msize)
else:
nneighbors = phytree.neighbors_by_rootward(term, bparents, 0, nsize, btree)
mneighbors = phytree.neighbors_by_rootward(term, bparents, 0, msize, btree)
self.impute_bootstrap(term, bparents, str(i + 1), mneighbors, nneighbors)
for bootrep in self.bootreps:
newimpute = bootrep.split(".")
impratio = float(self.bootreps[bootrep][0]) / float(self.bootreps[bootrep][1])
newimpute.append(str(impratio))
if verbose:
if impratio > threshold:
newimpute.append("T")
self.workseq[newimpute[0]][int(newimpute[1])] = newimpute[3]
self.indivimputes[newimpute[0]].append(newimpute[1])
else:
newimpute.append("F")
self.imputelist.append(newimpute)
else:
if impratio > threshold:
newimpute.append("T")
self.imputelist.append(newimpute)
self.workseq[newimpute[0]][int(newimpute[1])] = newimpute[3]
self.indivimputes[newimpute[0]].append(newimpute[1])
else:
for i in range(passes):
print "\nPass", i
random.shuffle(terms)
bar = progressbar.ProgressBar(redirect_stdout=True)
for p in bar(range(len(terms))):
term = terms[p]
# for term in terms:
if ncollect == 'hops':
nneighbors = phytree.neighbors_by_hops(term, self.phytree.tree, self.phytree.treeparents, nsize)
mneighbors = phytree.neighbors_by_hops(term, self.phytree.tree, self.phytree.treeparents, msize)
elif ncollect == 'distance':
nneighbors = phytree.neighbors_by_distance(term, terms, nsize)
mneighbors = phytree.neighbors_by_distance(term, terms, msize)
elif ncollect == 'mono':
nneighbors = phytree.neighbors_by_mono(term, self.phytree.tree, self.phytree.treeparents,
nsize)
mneighbors = phytree.neighbors_by_mono(term, self.phytree.tree, self.phytree.treeparents,
msize)
else:
nneighbors = phytree.neighbors_by_rootward(term, self.phytree.treeparents, 0, nsize)
mneighbors = phytree.neighbors_by_rootward(term, self.phytree.treeparents, 0, msize)
self.impute_threshold(term, self.phytree.treeparents, str(i + 1),
mneighbors, nneighbors)
for newimpute in self.imputelist:
if newimpute[6] == "T":
self.indivimputes[newimpute[0]].append(newimpute[1])
self.workseq[newimpute[0]][newimpute[1]] = newimpute[3]
self.process_imputed()
|
[
"def initialize(self):\n\n for n in self.nodes():\n # if self.nodes[n].get(\"init_fn\") is not None:\n if n in self.input_functions:\n self.nodes[n][\"value\"] = self.nodes[n][\"init_fn\"]()\n self.update()",
"def imputation(self):\n return self._imputation",
"def impute_missing_values(\n self,\n imputation_method,\n log_transform=True,\n min_observations_per_peptide=1,\n min_observations_per_allele=1):\n if isinstance(imputation_method, string_types):\n imputation_method = imputer_from_name(imputation_method)\n\n X_incomplete, peptide_list, allele_list = self.to_dense_pMHC_affinity_matrix(\n min_observations_per_peptide=min_observations_per_peptide,\n min_observations_per_allele=min_observations_per_allele)\n\n if imputation_method is None:\n logging.warn(\"No imputation method given\")\n # without an imputation method we should leave all the values\n # incomplete and return an empty dataset\n X_complete = np.ones_like(X_incomplete) * np.nan\n else:\n if log_transform:\n X_incomplete = np.log(X_incomplete)\n\n if np.isnan(X_incomplete).sum() == 0:\n # if all entries in the matrix are already filled in then don't\n # try using an imputation algorithm since it might raise an\n # exception.\n logging.warn(\"No missing values, using original data instead of imputation\")\n X_complete = X_incomplete\n else:\n X_complete = imputation_method.complete(X_incomplete)\n\n if log_transform:\n X_complete = np.exp(X_complete)\n\n allele_to_peptide_to_affinity_dict = dense_pMHC_matrix_to_nested_dict(\n X=X_complete,\n peptide_list=peptide_list,\n allele_list=allele_list)\n return self.from_nested_dictionary(allele_to_peptide_to_affinity_dict)",
"def reset_imp(self) -> None:\n for unit in self.mutable_units:\n unit.reset_fisher_info()",
"def impute_apply(data, n_iter, to_nan=0.2, fast_impute=False):\n output = impute_optimizer(data, n_iter=n_iter, to_nan=0.2, fast_impute=False)\n imputer, param = output.iloc[0,:].name.split(\"__\")\n param = param.replace(\":\", \"\")\n\n if imputer == \"SimpleImputer\":\n ix = data.index.copy()\n data = (SimpleImputer(strategy=param)\n .fit_transform(np.asarray(data).reshape(-1, 1)))\n data = pd.Series(data.flatten(), index=ix)\n del ix\n\n elif imputer == \"KNNImputer\":\n ix = data.index.copy()\n data = (KNNImputer(weights=param)\n .fit_transform(np.asarray(data).reshape(-1, 1)))\n data = pd.Series(data.flatten(), index=ix)\n del ix\n\n elif imputer == \"Interpolate\":\n if param == \"time\":\n data.index = pd.to_datetime(pd.to_timedelta(data.index, unit=\"days\"))\n data = data.interpolate(method=param, limit_direction=\"both\")\n else:\n data = data.interpolate(method=param, limit_direction=\"both\")\n\n elif imputer == \"Interpolate_with_order\":\n # Order can be tweaked (default quadratic)\n data = data.interpolate(method=param, limit_direction=\"both\", order=2)\n \n elif imputer == \"TimeSeries_LOCF\":\n ix = data.index.copy()\n data = locf(np.asarray(data).reshape(1, -1))\n data = pd.Series(data.flatten(), index=ix)\n del ix\n \n elif imputer == \"Moving_Win_Imputer\":\n ix = data.index.copy()\n param = int(param)\n remainder = -(len(data) % param)\n data = np.asarray(list(zip(*[iter(data)] * param)))\n data = np.asarray(moving_window(data, wsize=param))\n if remainder != 0:\n data = pd.Series(data.flatten(),\n index=ix[:remainder])\n else:\n data = pd.Series(data.flatten(), index=ix)\n del ix\n else:\n raise Exception\n print(\"Imputer passed through \\\"impute_optimize\\\" cannot be applied\")\n print(f\"Value passed: {impter}\")\n \n return data, imputer, param",
"def update_inverse_temperature(self):\n\n if settings.annealing_method == \"multiplication\":\n self.inverse_temperature *= self.inverse_temperature_increment\n elif settings.annealing_method == \"addition\":\n self.inverse_temperature += self.inverse_temperature_increment\n else:\n eprint(1, \"ERROR: Annealing method %s not recognized\" % settings.annealing_method)\n quit()",
"def __init__(\n self,\n num_imputer=InterpolateImputer,\n cat_imputer=ModeImputer,\n num_kwgs={\"fill_strategy\": \"linear\"},\n cat_kwgs={\"fill_strategy\": \"random\"}\n ):\n DefaultBaseImputer.__init__(\n self,\n num_imputer=num_imputer,\n cat_imputer=cat_imputer,\n num_kwgs=num_kwgs,\n cat_kwgs=cat_kwgs\n )",
"def _impute_from_job_type(self):\n for column_name in self.categorical_variables:\n method = (lambda x: x.mode()[0])\n self._impute_single_column_from_job_type(column_name, method)\n for column_name in self.continuous_variables:\n method = (lambda x: x.median())\n self._impute_single_column_from_job_type(column_name, method)\n return self",
"def _impute(self, examples\n ):\n\n for feature, feature_values in examples.items():\n if schema_util.is_categorical_feature(\n schema_util.get_feature(self._schema, feature)):\n imputation_fill_value = CATEGORICAL_FEATURE_IMPUTATION_FILL_VALUE\n else:\n imputation_fill_value = max(\n value for value in feature_values if value is not None) * 10\n examples[feature] = [\n value if value is not None else imputation_fill_value\n for value in feature_values\n ]\n return examples",
"def execute(self, nodenet, nodes, netapi):\n for uid, node in nodes.items():\n node.reset_slots()\n\n # propagate activation\n for uid, node in nodes.items():\n for gate_type in node.get_gate_types():\n gate = node.get_gate(gate_type)\n for link in gate.get_links():\n link.target_slot.add_activation(float(gate.activation) * float(link.weight)) # TODO: where's the string coming from?",
"def __init__(\n self,\n num_imputer=PMMImputer,\n cat_imputer=MultinomialLogisticImputer,\n num_kwgs=None,\n cat_kwgs=None\n ):\n # delegate to DefaultBaseImputer\n DefaultBaseImputer.__init__(\n self,\n num_imputer=num_imputer,\n cat_imputer=cat_imputer,\n num_kwgs=num_kwgs,\n cat_kwgs=cat_kwgs\n )",
"def update_imp(self) -> None:\n for unit in self.mutable_units:\n unit.update_fisher_info()",
"def impute(data):\n\n for i in range(len(data)): # every person\n data[i] = data[i].fillna(0)\n\n return data",
"def set_input(self, inputs):\n for i, node in enumerate(self.input_nodes):\n node.activation_level = inputs[i]",
"def reset_assignments(self):\n for node in self.all_nodes:\n node.set_assignment(None)\n #for node -ends",
"def impute_by_regression(target, df, impute_method=\"mean\"):\n if target.name in df.columns:\n df = df[~target.name]\n reg_imp = MiceImputer(seed_strategy=impute_method, target=target.name, group=[])\n reg_imp.fit(pd.concat([df, target], axis=0))\n return reg_imp",
"def launch(self):\n self.target_node.activation += self.activation_to_add\n for n in self.target_node.linksOut.keys():\n Worker.pushRandom(Compute(n))\n super().launch()",
"def init_attn(self):\n self.ph_attn = attention.GlobalAttention(query_dim=self.hidden_size,\n key_dim=self.word_emb_size)\n return",
"def initialize_variables(self, sess: tf.Session) -> None:\n raise NotImplementedError"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Output imputed sequence and auxilliary files.
|
def output_imputed(self, limpout):
for imputed in self.imputelist:
if indata.orig_vcf_pos:
imputed[1] = str(indata.orig_vcf_pos[int(imputed[1])])
else:
imputed[1] = str(imputed[1])
if verbose:
if len(self.imputelist) > 0:
print "Imputed Mutations"
print "SUBJECTID | VAR | FROM | TO | TYPE | IMPUTED | PASS"
for imputed in sorted(self.imputelist):
print " | ".join(imputed)
print "\n"
print impute.imputedseq
if limpout:
impoutfilename = indata.filebase + "-impout.txt"
impoutfile = open(impoutfilename, 'w')
if boot > 0 or runs > 0:
impoutfile.write("SUBJECTID\t VAR\t FROM\t TO\t PASS\tRATIO\tIMPUTED\n")
else:
impoutfile.write("SUBJECTID\t VAR\t FROM\t TO\t TYPE\tPASS\tIMPUTED\n")
for imputed in self.imputelist:
impoutfile.write("\t".join(imputed))
impoutfile.write("\n")
impoutfile.close()
indivoutfilename = indata.filebase + "-indivout.txt"
indivoutfile = open(indivoutfilename, 'w')
indivoutfile.write("SUBJECTID\tNUM\tVARS\n")
for indiv in sorted(self.indivimputes.keys()):
indivoutfile.write(indiv)
indivoutfile.write("\t")
indivoutfile.write(str(len(self.indivimputes[indiv])))
indivoutfile.write("\t")
for indivar in self.indivimputes[indiv]:
indivoutfile.write(str(indivar))
indivoutfile.write(",")
indivoutfile.write("\n")
indivoutfile.close()
if outtype == "vcf":
outseqfile = indata.filebase + "-out.vcf"
outfile = open(outseqfile, 'w')
outfile.write("##fileformat=VCFv4.1\n")
outfile.write("##source=IMPUTORv1.0\n")
outfile.write("#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT ")
for seq in self.imputedseq:
outfile.write(str(seq.name))
outfile.write("\t")
outfile.write("\n")
for i in xrange(0, len(self.newvariants)):
if len(self.newvariants[i]) > 1:
outfile.write(indata.chroms[i])
outfile.write("\t")
outfile.write(indata.orig_vcf_pos[i])
outfile.write("\t.\t")
outfile.write(self.newvariants[i][0])
outfile.write("\t")
for j in xrange(1, len(self.newvariants[i])):
if j > 1:
outfile.write(",")
outfile.write(self.newvariants[i][j])
outfile.write("\t.\t.\t.\tGT\t")
for seq in self.imputedseq:
outfile.write(str(self.newvariants[i].index(seq.seq[i])))
outfile.write("\t")
outfile.write("\n")
else: # default to fasta
outseqfile = indata.filebase + "-seqout.fasta"
outfile = open(outseqfile, 'w')
outseq = {}
for seq in self.imputedseq:
outseq[seq.id] = str(seq.seq)
for x in sorted(outseq.keys()):
outfile.write(">")
outfile.write(str(x))
outfile.write("\n")
outfile.write(outseq[x])
outfile.write("\n")
outfile.close()
# bmfile = open("backmut.txt", 'w')
# bmfile.write("term\tvar\torigseq\torgseqchk\torigneighbors\tneighborseq\tbmkid\tkidseq\t\T/F\n")
# for bmchk in self.backmutchks:
# bmfile.write("\t".join(bmchk))
# bmfile.write("\n")
#
# nbfile = open("neighbors.txt", 'w')
# for nb in self.neighbors.keys():
# nbfile.write(str(nb))
# nbfile.write("\t:\t")
# for nbb in self.neighbors[nb]:
# nbfile.write(str(nbb))
# nbfile.write("\t")
# nbfile.write("\n")
|
[
"def _gen_aux(self, ssym):\n\n for asym in self.aux:\n if self.auxout > 0:\n self.hmm_file_ofp.write(\"%d\\t%d\\t%s\\t%s\\n\" % (ssym, ssym, asym, asym))\n else:\n self.hmm_file_ofp.write(\"%d\\t%d\\t%s\\t%s\\n\" % (ssym, ssym, self.eps, asym))\n return",
"def anotation(output):\r\n\r\n vcfs = obtener_nombre_ficheros(output + '/pileup/', 'vcf')\r\n for fichero in vcfs:\r\n os.system(\"awk '{{print $1, $2, $4, $5, $10}}' {0}/pileup/{1} > {0}/annotate/{1}\".format(output, fichero))\r\n os.system(\"sed -i 's/chr//g' {0}/annotate/{1}\".format(output, fichero))\r\n os.system(\"awk '{{print $1{2}$2{2}$2{2}$3{2}$4{2}$5}}' {0}/annotate/{1} > {0}/annotate/{1}_awk.vcf\".format(output, fichero,'\"\\\\t\"'))\r\n os.system(\"grep -v '#' {0}/annotate/{1}_awk.vcf > {0}/annotate/{1}_grep.vcf\".format(output,fichero))\r\n os.system(\"python genotipo.py -i {0}/annotate/{1}_grep.vcf -o {0}/annotate/{1}\".format(output,fichero))\r\n os.system(\"rm {0}/annotate/{1}_awk.vcf\".format(output,fichero))\r\n os.system(\"rm {0}/annotate/{1}_grep.vcf\".format(output,fichero))\r\n os.system(\"perl annovar/table_annovar.pl {0}/annotate/{1} annovar/humandb/ -buildver hg19 -out {0}/annotate/{1} -remove -protocol refGene,cytoBand,gnomad_exome,clinvar_20131105,exac03,avsnp147,dbnsfp30a -operation g,r,f,f,f,f,f -nastring . -csvout -polish -xref annovar/example/gene_fullxref.txt\".format(output,fichero))\r\n os.system(\"awk -f filtro_awk {0}/annotate/{1}.{2}_multianno.csv > {0}/annotate/{1}.{2}_multianno_filtrado.csv\".format(output,fichero,\"hg19\")\r\n os.system(\"python multianno_vcf_annot.py -i {0}/annotate/{1}.{2}_multianno_filtrado.csv -o {0}/annotate/{1}.{2}_multianno_filtrado_genot.csv -v {0}/annotate/{1}\".format(output,fichero,\"hg19\"))\r\n \r\ndef main():\r\n \"\"\"\r\n Funcion que ejecuta el programa.\r\n \"\"\"\r\n\r\n ext = \"fastq\"\r\n argum = argumentos()\r\n crear_directorios(argum.output)\r\n ficheros = obtener_nombre_ficheros(argum.input, ext)\r\n calidad_fichero(ficheros, argum.input, argum.output)\r\n trimming(ficheros, argum.input, argum.output, argum.type)\r\n alineamiento(argum.reference, argum.input, argum.output, argum.type, ext, argum.amplicon)\r\n variant_calling(argum.reference, argum.input, argum.output)\r\n anotation(argm.output)",
"def incrementOutputFiles(self):\n self.closeOutputFiles()\n \n self.output_file_count+=1\n \n self.createOutputFiles(self.output_tag)",
"def anarci_output(numbered, sequences, alignment_details, outfile, sequence_id=None, domain_id=None): \n assert (sequence_id is not None) or (sequence_id is None and domain_id is None), \"If domain_id is specified, sequence_id must also be specified.\"\n for i in range(len(numbered)):\n if sequence_id is None:\n outfile.write(\"# %s\\n\"%sequences[i][0]) # print the name\n if numbered[i] is not None:\n if sequence_id is not None:\n if i != sequence_id: continue\n outfile.write(\"# ANARCI numbered\\n\")\n for j in range( len(numbered[i])): # Iterate over domains\n if domain_id is not None:\n if j != domain_id: continue\n outfile.write(\"# Domain %d of %d\\n\"%(j+1, len(numbered[i]) ))\n outfile.write(\"# Most significant HMM hit\\n\")\n outfile.write(\"#|species|chain_type|e-value|score|seqstart_index|seqend_index|\\n\")\n alignment_details[i][j][\"evalue\"] = str( alignment_details[i][j][\"evalue\"] )\n outfile.write(\"#|%s|%s|%s|%.1f|%d|%d|\\n\"%tuple( [alignment_details[i][j][field] for field in \n [\"species\",\"chain_type\",\"evalue\",\"bitscore\"]] \n +[ numbered[i][j][1], numbered[i][j][2]] ))\n \n if 'germlines' in alignment_details[i][j]:\n outfile.write('# Most sequence-identical germlines\\n')\n outfile.write('#|species|v_gene|v_identity|j_gene|j_identity|\\n')\n (species, vgene), vid =alignment_details[i][j]['germlines'].get('v_gene', [['','unknown'],0])\n if vgene is None:\n vgene, vid = 'unknown', 0\n (_,jgene), jid =alignment_details[i][j]['germlines'].get('j_gene', [['','unknown'],0])\n if jgene is None:\n jgene, jid = 'unknown', 0\n outfile.write('#|%s|%s|%.2f|%s|%.2f|\\n'%(species, vgene, vid, jgene, jid )\t)\n chain_type = chain_type_to_class[ alignment_details[i][j][\"chain_type\"] ]\n outfile.write(\"# Scheme = %s\\n\"%alignment_details[i][j][\"scheme\"])\n if len( numbered[i][j][0] ) == 0:\n outfile.write(\"# Warning: %s scheme could not be applied to this sequence.\\n\"%alignment_details[i][j][\"scheme\"])\n for (index, insertion), aa in numbered[i][j][0]:\n outfile.write( \"%s %s %s %s\\n\" % (chain_type, (\"%d\"%index).ljust(5), insertion, aa))\n outfile.write(\"//\\n\")",
"def writeParAndInputFiles(self):\n pass",
"def main():\n # get arguments\n args = sys.argv[1:]\n if not args or len(args) == 3 or len(args) > 4:\n print(\"usage: n (-rand or -seq) [-mode filename]\", file=sys.stderr)\n sys.exit(1)\n\n n = int(args[0])\n hmm = True\n\n if args[1] == \"-rand\":\n hmm = False\n elif args[1] != \"-seq\":\n print(\"Error! Second argument must be -rand or -seq.\", file=sys.stderr)\n sys.exit(1)\n\n mode = 'w+'\n if len(args) == 4: \n mode_flag = args[2]\n mode = mode_flag[1:]\n output_filename = args[3]\n else:\n # filename format is: rand/seq-number_of_lines-date.txt \n output_filename = create_output_filename(hmm, str(n))\n\n\n\n # create list of vectors (stimuli to print to file)\n filenames = os.listdir(\"input-files/vecs\")\n vecs = []\n vecnames = []\n\n for f in filenames:\n match = re.search(\"vec[1-9]\\.txt\", f)\n if match:\n fullname = \"input-files/vecs/\" + match.group()\n vecnames.append(fullname)\n \n vecnames.sort()\n \n for v in vecnames:\n stim_vec = open(v, 'r').readline()\n vecs.append(stim_vec)\n \n # write to file\n out = open(output_filename, mode=mode)\n\n # if \"-rand\" provided, write n random stimuli\n if not hmm: \n write_random(out, n, vecs)\n # otherwise write using hidden markov model with temporal sequences\n else: \n t = 0\n while t < n:\n isSequence = random.choice([True, False, False, False])\n if isSequence:\n linesLeft = n - t\n if linesLeft > 3:\n linesLeft = 3\n\n write_seq(out, vecs, linesLeft)\n t += linesLeft \n else:\n write_random(out, 1, vecs)\n t += 1\n\n\n out.close()\n\n print(output_filename, file=sys.stdout)",
"def generate_output_file(final_model,out_name):\n\n\tout_name = str(out_name.strip())\n\t# If the output file is too big, we save it in \".mmcif\" format\n\tif len(list(final_model[0].get_atoms())) > 99999 or len(list(final_model[0].get_chains())) > 62:\n\t\tmmcif_IO = MMCIFIO()\n\t\tmmcif_IO.set_structure(final_model[0])\n\t\tmmcif_IO.save(out_name + \".cif\")\n\t# Otherwise, save it \".pdb\" format\n\telse:\n\t\tpdb_IO = PDBIO()\n\t\tpdb_IO.set_structure(final_model[0])\n\t\tpdb_IO.save(out_name + \".pdb\")",
"def create_forna_file(output_folder, origin, name, seq, structure):\n if origin == \"Real\":\n forna_file = '{}/{}_(Real).txt'.format(output_folder, name)\n else:\n forna_file = '{}/{}_({}_predicted).txt'.format(output_folder, name, origin)\n with open(forna_file, 'w') as output:\n if origin == \"Real\":\n output.write('>{}_Real'.format(name))\n else:\n output.write('>{}_{}_predicted'.format(name, origin))\n output.write('\\n')\n output.write(seq)\n output.write('\\n')\n output.write(structure)",
"def individual_seqs_and_outnames(self):\n\n from imgaug import augmenters as iaa\n\n augmentation_tasks = []\n augmenters = self.augmenters\n for name, augmentation in self.augmenters.items():\n augmentation_tasks.append([augmentation, name])\n\n return augmentation_tasks",
"def generate_testfiles(self):\n print(\"Opening files...\")\n data = self.open_test_files()\n print(\"Assemble and concat...\")\n testdata, labels = self.assemble_and_concat(**data)\n print(\"Removing nans and saving...\")\n self.remove_nans(testdata, labels)\n data = None\n labels = None",
"def annotate_iob(text_file, output_file):\n init_data = read_data('lexicon.tsv')\n data = select_data(init_data)\n text_dataframe = lemma_posttag(text_file)\n annotate(data, text_dataframe)\n annotation = construct_annotated_text(text_dataframe)\n iob_text = tagging_IOB(annotation)\n with open(output_file, 'w') as f:\n f.write(iob_text)\n print(\"Your file has been annotated and IOB_tagged.\")",
"def make_all_asciifiles():\n \n unicorn.catalogs.read_catalogs()\n from unicorn.catalogs import zout, phot, mcat, lines, rest, gfit, zsp\n \n os.chdir('/3DHST/Spectra/Work/ANALYSIS/REDSHIFT_FITS_v1.6')\n \n fields = np.unique(phot.field)\n for field in fields:\n try:\n os.mkdir('ASCII/%s' %(field))\n except:\n pass\n \n fp = open('ASCII/failed.log','w')\n for i in range(len(zout.z_peak[0::3])):\n object = zout.id[0::3][i]\n field = phot.field[phot.idx][i]\n print unicorn.noNewLine+object\n try:\n unicorn.analysis.make_eazy_asciifiles(object=object, eazy_output='./OUTPUT/', savepath='./ASCII/%s' %(field))\n except:\n fp.write(object+'\\n')\n #\n fp.close()",
"def output(self, eta, percent, file, filenum=1, total_files=1):\n pass",
"def write_output(options, models):\n writer = select_writer(options.aprolog)\n map(writer, models)",
"def create_parallel_files(infilename, outfile_prefix, output_blank_for_failure=False):\n data = load_serialized_from_file(infilename)\n sys.stderr.write('Deserializing and processing {} graphs.'.format(len(data)))\n sys.stderr.write('Using Moses tokenization from the nltk package.\\n')\n with io.open(get_src_filename(outfile_prefix), 'w', encoding='utf8') as outfile_src, \\\n io.open(get_tgt_filename(outfile_prefix), 'w', encoding='utf8') as outfile_tgt, \\\n io.open(get_anon_filename(outfile_prefix), 'w', encoding='utf8') as outfile_anon, \\\n io.open(get_orig_filename(outfile_prefix), 'w', encoding='utf8') as outfile_orig:\n sys.stderr.write(\n 'Writing serialized graphs to {}.\\n'.format(os.path.abspath(outfile_src.name)))\n sys.stderr.write(\n 'Writing tokenized sentences to {}.\\n'.format(os.path.abspath(outfile_tgt.name)))\n sys.stderr.write(\n 'Writing anonymization map to {}.\\n'.format(os.path.abspath(outfile_anon.name)))\n sys.stderr.write(\n 'Writing original sentences to {}.\\n'.format(os.path.abspath(outfile_orig.name)))\n num_written = 0\n num_skipped = 0\n for label, penman_serialized in data:\n try:\n # treat unknowns same as named tokens so they'll be copied exactly\n penman_serialized = re.sub(r'_([^\\s]+)\\/(.*?_unknown)', r'UNK\\1 :carg \"\\1\"', penman_serialized)\n # simplify, linearize, and anonymize graphs\n linearized, anon_map = preprocess_penman(penman_serialized)\n # tokenize and anonymize sentences (assumes last comment is sentence)\n sentence = label.split('# ::snt ')[-1].strip()\n outfile_tgt.write('{}\\n'.format(preprocess_sentence(sentence, anon_map))) # modifies anon_map\n outfile_src.write('{}\\n'.format(linearized))\n # store anonymization info for use in postprocessing\n outfile_anon.write('{}\\n'.format(json.dumps(anon_map)))\n # also write original sentence, which will be compared against during eval\n outfile_orig.write('{}\\n'.format(_normalize_sentence(sentence)))\n num_written += 1\n except Exception as e:\n sys.stderr.write(\n 'Deserialization failed for {}, skipping. Error was: {}\\n'.format(label, e))\n num_skipped += 1\n if output_blank_for_failure:\n outfile_src.write('\\n')\n outfile_tgt.write('\\n')\n outfile_anon.write('[]\\n')\n outfile_orig.write('\\n')\n ratio_skipped = float(num_skipped) / num_written\n sys.stderr.write(\n 'Linearized {} graphs. Skipped {} due to deserialization errors ({}).\\n'.format(\n num_written, num_skipped, ratio_skipped))",
"def run(inputFile,outputDirectory):\n structures=compute(inputFile)\n\n family=inputFile.split('/').pop().split('.')[0]+'.fasta'\n outfile = os.path.join(outputDirectory,family)\n with open (outfile,'w') as OUTPUT:\n for name,sequences in structures.items():\n print >>OUTPUT,name\n print >>OUTPUT,sequences[0].strip()\n print >>OUTPUT,sequences[1].strip()\n OUTPUT.closed\n return outfile",
"def generate_mps_files(self):\n print(\"starting mps generation\")\n # setting antares options\n print(\"-- pre antares\")\n self.pre_antares()\n # launching antares\n print(\"-- launching antares\")\n antares_output_name = self.launch_antares()\n # writting things\n print(\"-- post antares\")\n lp_path = self.post_antares(antares_output_name)\n return lp_path",
"def write_exams(output_directory,base_filename, separate_files,library, K, Q, V, A, min_pages, one_page_per_q): \n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n \n output_filename = os.path.join(output_directory, base_filename )\n \n log_and_print(\"Writing randomized exams : %s\" % output_filename)\n (N_e, N_Q, N_A) = A.shape\n\n flat_questions = list(itertools.chain.from_iterable([zone.questions for zone in library.zones]))\n\n #Todo: This code would be tidier if we assigned the numerous output lines to strings first . \n #Then you could see the iteration logic below compressed into 10 lines instead of 100\n \n for ei in range(N_e):\n if separate_files :\n # exam.tex -> exam_1.tex\n output_filename_full = (\"_\"+str(ei+1)+\".\").join( output_filename.rsplit(\".\") )\n\n elif ei == 0 :\n output_filename_full = output_filename\n \n if(output_filename_full) :\n out_f = open( output_filename_full ,\"w\")\n output_filename_full = \"\"\n \n out_f.write(library.preamble + \"\\n\\n\")\n\n out_f.write(r\"\\begin{document}\" + \"\\n\")\n out_f.write(\"\\n\")\n \n out_f.write(r\"\\newcount\\examnumber\" + \"\\n\") # set in write_one_student_exam\n out_f.write(r\"\\newcount\\questioncountall\" + \"\\n\")\n out_f.write(r\"\\questioncountall=%d\" % len(flat_questions))\n out_f.write(\"\\n\")\n \n# Can't declare Tex variables with numbers in , perhaps someone will continue to develop this...\n# for (i_zone, zone) in enumerate(library.zones): \n# out_f.write(r\"\\newcount\\questioncountzone%d\" % (i_zone + 1) )\n# out_f.write(\"\\n\")\n# out_f.write(r\"\\questioncountzone%d=%d\" % ( i_zone + 1 ,len(zone.questions) ))\n# out_f.write(\"\\n\") \n\n out_f.write(r\"\\newcount\\maxrawpages\" + \"\\n\")\n out_f.write(r\"\\newcount\\maxpadpages\" + \"\\n\")\n out_f.write(r\"\\newcount\\minpadpages\" + \"\\n\") \n out_f.write(r\"\\maxrawpages=0\" + \"\\n\")\n out_f.write(r\"\\maxpadpages=0\" + \"\\n\")\n out_f.write(r\"\\minpadpages=1000000\" + \"\\n\")\n out_f.write(r\"\\newcount\\padcount\" + \"\\n\")\n\n \n \n \n write_one_student_exam(out_f,ei,library,flat_questions, K, Q, V, A, one_page_per_q)\n \n\n out_f.write(\"\\n\")\n if(separate_files) :\n out_f.write(r\"\\end{document}\" + \"\\n\")\n out_f.close()\n out_f = None\n else: \n # Ensure all exams are even in length and have padding pages if necessary\n out_f.write(r\"\\ifnum\\maxrawpages<\\thepage \\maxrawpages=\\thepage\\fi\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"\\ifnum\\thepage<%d\" % min_pages + \"\\n\")\n out_f.write(r\"\\padcount=\\thepage\" + \"\\n\")\n out_f.write(r\"\\loop\" + \"\\n\")\n out_f.write(r\"\\newpage \\ \\par \\vspace*{\\fill}\\centerline{This page is intentionally left blank.}\\vspace*{\\fill}\" + \"\\n\")\n out_f.write(r\"\\advance \\padcount 1\" + \"\\n\")\n out_f.write(r\"\\ifnum \\padcount<%d\" % min_pages + \"\\n\")\n out_f.write(r\"\\repeat\" + \"\\n\")\n out_f.write(r\"\\fi\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"\\ifnum\\maxpadpages<\\thepage \\maxpadpages=\\thepage\\fi\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"\\ifnum\\minpadpages>\\thepage \\minpadpages=\\thepage\\fi\" + \"\\n\")\n out_f.write(\"\\n\")\n\n if(not(separate_files)) :\n out_f.write(\"\\n\")\n out_f.write(r\"\\cleardoublepage\" + \"\\n\")\n out_f.write(r\"\\message{Max raw (unpadded) length: \\the\\maxrawpages.}\" + \"\\n\")\n out_f.write(r\"\\message{Max padded length: \\the\\maxpadpages.}\" + \"\\n\")\n out_f.write(r\"\\message{Min padded length: \\the\\minpadpages.}\" + \"\\n\")\n out_f.write(r\"\\ifnum\\maxpadpages>\\minpadpages \\message{WARNING: exams are not all the same length.}\" + \"\\n\")\n out_f.write(r\"\\else\\message{Exams are all the same length.}\\fi\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"\\end{document}\" + \"\\n\")\n out_f.close()\n out_f = None\n \n log(\"Successfully completed writing randomized exams\")",
"def generate_imi(out, ll) :\n out.write(\"var\\n\")\n out.write(\" \")\n if print_list(out, lambda x : x.gen_clocks(), ll) : out.write(\" : clock;\\n \")\n if print_list(out, lambda x : x.gen_discrete(), ll) : out.write(\" : discrete;\\n \")\n if print_list(out, lambda x : x.gen_parameters(), ll) : out.write(\" : parameter;\\n \")\n out.write(\"\\n\\n\")\n\n for x in ll :\n out.write(x.gen_automaton())\n out.write(\"\\n\\n\")\n\n out.write(\"var init: region;\\n\\n\")\n out.write(\"init := \")\n\n for x in ll :\n out.write(x.gen_init())\n out.write(x.gen_init_param())\n out.write(\" True;\\n\")\n out.write(\"property := unreachable loc[OBS_dline] = dline_loc_miss\\n\")\n out.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Build path with endpoint and args
|
def _build_path(self, *args):
return '/'.join(chain((self.endpoint,), map(str, args)))
|
[
"def __url_builder(self, endpoint: str, **kwargs: dict) -> str:\n\n endpoint = self.__clean_endpoints_string(endpoint)\n if kwargs != {}:\n endpoint = endpoint.format(**kwargs)\n elif type(endpoint) == tuple:\n endpoint = endpoint[0]\n endpoint += \"&api_key={}\".format(API_KEY)\n return endpoint",
"def _api_path(self, endpoint=None):\n path = ''\n if endpoint is not None:\n path += '{0}'.format(endpoint.lstrip('/'))\n path = path.replace('api/tm/', '')\n if not path.startswith(self.api_version):\n path = '/'.join([self.api_version, path])\n\n return path",
"def _make_path(*parts):\n # TODO: maybe only allow some parts to be lists/tuples ?\n return \"/\" + \"/\".join(\n # preserve ',' and '*' in url for nicer URLs in logs\n quote(_escape(p), b\",*\")\n for p in parts\n if p not in SKIP_IN_PATH\n )",
"def build_url(self, endpoint: str, use_api: bool = False) -> str:\n\n if use_api:\n url = self.api_resource + endpoint\n else:\n url = self.resource + endpoint\n\n return url",
"def construct_path(path, format, *args):\n\treturn os.path.join(path.format(**format), *args)",
"def _ger_full_url(self, endpoint):\r\n return '{}{}{}'.format(self.url, self._base_path, endpoint)",
"def build_url(self, template: str, **kwargs) -> str:\n quoted = {k: quote(v) for k, v in kwargs.items()}\n suffix = template.format(**quoted).lstrip(\"/\")\n return \"{prefix}/{suffix}\".format(prefix=self.api_root, suffix=suffix)",
"def _generate_url(endpoint):\n\n if is_collection(endpoint):\n resource = map(str, endpoint)\n resource = '/'.join(endpoint)\n else:\n resource = endpoint\n\n return (settings.base_url + resource)",
"def test_build_uri_slashs(self):\n iiq = insightiq_api.InsightiqApi(username='pat', password='a')\n\n value = iiq._build_uri('/someEndpoint')\n expected = 'https://localhost/someEndpoint'\n\n self.assertEqual(value, expected)",
"def uri_path(suffix, *versions):\n prefix = uri_path_prefix(*versions)\n return '{0}/{1}'.format(prefix, suffix)",
"def make_api_url(args, settings):\n base_url=\"http://api.wunderground.com/api/%s/\" % settings.api_key\n\n # Create a location string, or use autoip\n query=\"q/%s.json\"\n if args.location:\n query = query % \"_\".join(args.location);\n else:\n query = query % \"autoip\"\n\n return base_url + make_query_path(args) + query",
"def path(self, *args) -> str:\n path = self.base_folder\n for arg in args:\n path = path / arg\n return str(path.absolute())",
"def render_path(path: str, args: ArgsDict) -> str:\n LOG.debug('RENDERING PATH FROM: %s, %s', path, args)\n result = path\n matches = re.search(r'{([^}.]*)}', result)\n while matches:\n path_token = matches.group(1)\n if path_token not in args:\n raise ValueError(\"Missing argument %s in REST call.\" % path_token)\n result = re.sub('{%s}' % path_token, str(args[path_token]), result)\n matches = re.search(r'{([^}.]*)}', result)\n return result",
"def make_uri(base, *args, **kwargs):\r\n\r\n # get encoding parameters\r\n charset = kwargs.pop(\"charset\", \"utf-8\")\r\n safe = kwargs.pop(\"safe\", \"/:\")\r\n encode_keys = kwargs.pop(\"encode_keys\", True)\r\n\r\n base_trailing_slash = False\r\n if base and base.endswith(\"/\"):\r\n base_trailing_slash = True\r\n base = base[:-1]\r\n retval = [base]\r\n\r\n # build the path\r\n _path = []\r\n trailing_slash = False\r\n for s in args:\r\n if s is not None and isinstance(s, six.string_types):\r\n if len(s) > 1 and s.endswith('/'):\r\n trailing_slash = True\r\n else:\r\n trailing_slash = False\r\n _path.append(url_quote(s.strip('/'), charset, safe))\r\n\r\n path_str =\"\"\r\n if _path:\r\n path_str = \"/\".join([''] + _path)\r\n if trailing_slash:\r\n path_str = path_str + \"/\"\r\n elif base_trailing_slash:\r\n path_str = path_str + \"/\"\r\n\r\n if path_str:\r\n retval.append(path_str)\r\n\r\n params_str = url_encode(kwargs, charset, encode_keys)\r\n if params_str:\r\n retval.extend(['?', params_str])\r\n\r\n return ''.join(retval)",
"def build_path(self, *p):\n return self.env_path('build', 's2e', *p)",
"def api_path(base_url, path):\n if base_url.endswith('/'):\n base_url = base_url[:-1]\n\n if path.startswith('/'):\n path = path[1:]\n\n return \"{}/{}\".format(base_url, path)",
"def build_url(server_context, controller, action, container_path=None):\n return server_context.build_url(controller, action, container_path=container_path)",
"def _build_url(self, route):\n return \"{0}/{1}\".format(self.base_url, route)",
"def create_api_invocation_uri(self):\n self.constants[\"API_INVOCATION_URI\"] = (\n \"arn:aws:apigateway:%s:lambda:\"\n \"path/2015-03-31/functions/%s/invocations\"\n ) % (self.region, self.constants[\"LAMBDA_FUNCTION_ARN\"])",
"def create_path(stack):\n return '/' + u'/'.join(reversed(stack))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a set of pings, calculate the echometrics values based on the depth bin provided.
|
def calculate_echometrics(frame_buffers, depth_to_bin):
depth_to_bin = int(depth_to_bin)
print 'depth to bin is ...', depth_to_bin
num_beams = len(frame_buffers)
num_samples = frame_buffers[0].num_samples[0]
max_range = frame_buffers[0].range_max_m[0]
min_range = frame_buffers[0].range_min_m[0]
first_ping = frame_buffers[0].ping_num[0]
last_ping = frame_buffers[-1].ping_num[0]
freq_khz = frame_buffers[0].freq_hz[0] / 1000.0
pulse_len = frame_buffers[0].pulselen_microsec[0] * 10**-6
echogram = np.zeros((len(frame_buffers), num_samples))
for i in range(len(frame_buffers)):
echogram[i] = frame_buffers[i].image
echogram = echogram.T
bg_removed_echogram = background_removal(np.copy(echogram), freq_khz, min_range, \
max_range, pulse_len, 1486.0)
#logger.info(' -- Generating New Echogram')
# calculating over 1 meter bins, change to use env variable
samples_per_meter = num_samples / (max_range - min_range)
start_sample = 0
sample_extent = int(np.round(depth_to_bin * samples_per_meter))
stop_sample = num_samples - 1 - sample_extent
intervals = range(start_sample, stop_sample, sample_extent)
logger.info(' -- Calculating Metrics')
index = np.arange(num_beams) + 1 # TODO: change to ping bins
range_step = np.absolute(max_range - min_range) / num_samples
metrics = dict(depth_integral=[],
avg_sv=[],
center_of_mass=[],
inertia=[],
proportion_occupied=[],
aggregation_index=[],
equivalent_area=[])
for interval in intervals:
depths = np.array([((i * range_step) + min_range) for i in range(interval, interval + sample_extent)])
subbg = bg_removed_echogram[interval: interval+sample_extent]
echo = echometrics.Echogram(subbg, depth=depths, index=index)
metrics['depth_integral']+=np.average(echometrics.depth_integral(echo)),
metrics['avg_sv']+=np.average(echometrics.sv_avg(echo)),
metrics['center_of_mass']+=np.average(echometrics.center_of_mass(echo)),
metrics['inertia']+=np.average(echometrics.inertia(echo)),
metrics['proportion_occupied']+=np.average(echometrics.proportion_occupied(echo)),
metrics['aggregation_index']+=np.average(echometrics.aggregation_index(echo)),
metrics['equivalent_area']+=np.average(echometrics.equivalent_area(echo)),
#print 'metrics =', metrics
for k in metrics.keys():
metrics[k] = np.mean(metrics[k])
#print 'meetrics =', metrics
#logger.info(' -- Finished.')
return metrics
|
[
"def sumBinEnergies( binnedPoints, factor=1000 ):\n\n energies = []\n\n for tBin in binnedPoints:\n if tBin.size > 0:\n energies.append(sum(tBin[:,3]*factor))\n else:\n energies.append(0)\n\n return energies",
"def calc_phase_hist_data(property, snap, bin_nums=100):\n\n\t# Set up x and y data, limits, and bins\n\tG = snap.loadpart(0)\n\tnH_data = G.get_property('nH')\n\tT_data = G.get_property('T')\n\tnH_bin_lims = config.PROP_INFO['nH'][1]\n\tT_bin_lims = config.PROP_INFO['T'][1]\n\tif config.PROP_INFO['nH'][2]:\n\t\tnH_bins = np.logspace(np.log10(nH_bin_lims[0]),np.log10(nH_bin_lims[1]),bin_nums)\n\telse:\n\t\tnH_bins = np.linspace(nH_bin_lims[0], nH_bin_lims[1], bin_nums)\n\tif config.PROP_INFO['T'][2]:\n\t\tT_bins = np.logspace(np.log10(T_bin_lims[0]),np.log10(T_bin_lims[1]),bin_nums)\n\telse:\n\t\tT_bins = np.linspace(T_bin_lims[0], T_bin_lims[1], bin_nums)\n\n\tif property in ['M_H2','M_gas']:\n\t\tfunc = np.sum\n\telse:\n\t\tfunc = np.mean\n\tbin_data = G.get_property(property)\n\tret = binned_statistic_2d(nH_data, T_data, bin_data, statistic=func, bins=[nH_bins, T_bins])\n\t# Need to catch case were np.sum is given empty array which will return zero\n\tif property in ['M_H2','M_gas']:\n\t\tret.statistic[ret.statistic<=0] = np.nan\n\n\treturn ret",
"def evaluate_performance_penetrance_bins(controls_file, df, identifier, output):\n\n bin_df = pd.read_csv(controls_file)\n bin_df = lower_column_names(bin_df)\n\n if 'bin' in bin_df.columns.values:\n # Remove strains not screened\n mask = np.array([1 if (x in df[identifier]) else False for x in bin_df[identifier]])\n bin_df = bin_df[mask == 1]\n bin_df = bin_df.reset_index(drop=True)\n\n # Initialize output dataframe for penetrance bins\n bin_df_out = pd.DataFrame(columns=bin_df.columns.values.tolist() + ['penetrance', 'predicted_bin', 'p_value',\n 'num_cells', 'num_wells'])\n this_row = 0\n for i in range(len(bin_df)):\n # Extract strain/condition information and calculated penetrance (1 - negative%)\n strain = bin_df.iloc[i, bin_df.columns.get_loc(identifier)]\n penetrance = df[df[identifier] == strain]['penetrance'].values[0]\n\n # Predict penetrance bin\n if (penetrance > 80) and (penetrance <= 100):\n predicted_bin = 1\n elif (penetrance > 60) and (penetrance <= 80):\n predicted_bin = 2\n elif (penetrance > 40) and (penetrance <= 60):\n predicted_bin = 3\n elif (penetrance > 20) and (penetrance <= 40):\n predicted_bin = 4\n else:\n predicted_bin = 0\n\n # Gather predicted penetrance bin and other information\n line = bin_df.iloc[i, :].tolist()\n line.append(penetrance)\n line.append(predicted_bin)\n line.append(df[df[identifier] == strain]['p_value'].values[0])\n line.append(df[df[identifier] == strain]['num_cells'].values[0])\n line.append(df[df[identifier] == strain]['num_wells'].values[0])\n bin_df_out.loc[this_row, ] = line\n this_row += 1\n\n # Save results\n bin_df_out.to_csv(path_or_buf=output['PenetranceBins'], index=False)\n\n else:\n print('\\nNo penetrance bins for this screen!')\n\n print('\\n\\n')",
"def bins_to_depth(depth_bin):\n if type(depth_bin).__module__ != torch.__name__:\n depth_bin = torch.tensor(depth_bin, dtype=torch.float32).cuda()\n depth_bin = depth_bin.permute(0, 2, 3, 1) #[b, h, w, c]\n if type(cfg.DATASET.DEPTH_BIN_BORDER).__module__ != torch.__name__:\n cfg.DATASET.DEPTH_BIN_BORDER = torch.tensor(cfg.DATASET.DEPTH_BIN_BORDER, dtype=torch.float32).cuda()\n depth = depth_bin * cfg.DATASET.DEPTH_BIN_BORDER\n depth = torch.sum(depth, dim=3, dtype=torch.float32, keepdim=True)\n depth = 10 ** depth\n depth = depth.permute(0, 3, 1, 2) # [b, 1, h, w]\n return depth",
"def analyze_depths(eq_dict):\r\n\r\n # Put depths into list\r\n line = []\r\n depths = []\r\n for i in range(len(eq_dict)):\r\n line = eq_dict[i + 1]\r\n depths.append(line[3])\r\n\r\n # Mean\r\n fmt = \"Mean depth = {:.1f} Mi\"\r\n print(fmt.format(data_mean(depths)))\r\n\r\n # Median\r\n fmt = \"Median depth = {} Mi\"\r\n print(fmt.format(data_median(depths)))\r\n\r\n # Standard Deviation\r\n variance = data_mean_variance(depths)\r\n standard_deviation = math.sqrt(variance[1])\r\n fmt = \"Standard deviation = {:.2f} Mi\"\r\n print(fmt.format(standard_deviation))\r\n\r\n # Data Frequency Table\r\n data_freq_table(depths)",
"def FindRisingEdge(floor, numevent, file, verbosity = 0):\n\n left_hand = []\n\n for j in range(0, NUMCHANS):\n h = file.Get(\"PedSubWvfms/hPedSubWvfm_ch{chan:02d}_ev{ne:06d}\".format(chan=j,ne=numevent))\n hists[j]=h\n\n for ch,h in hists.iteritems():\n numofbins = h.GetNbinsX()\n X = INVALID\n for k in range(1, numofbins+1):\n amp = h.GetBinContent(k)\n befk = k-1\n x1offset = 0\n if k == 1 : #compensates for pulses that begin in the first bin\n befk = numofbins\n x1offset = numofbins\n if (amp > floor and h.GetBinContent(befk) < floor): #the second parameter avoids the code getting confused with fluctuations in the pulse peak\n x1 = h.GetBinCenter(befk) - x1offset\n x2 = h.GetBinCenter(k)\n y1 = h.GetBinContent(befk)\n y2 = h.GetBinContent(k)\n m = (y2 - y1) / (x2 - x1)\n if verbosity > 10 :\n print \"ch= \", ch, \": x1=\", x1, \";y1=\", y1, \";x2 =\", x2, \";y2 =\",y2, \";m=\", m, \";k= \", k\n if (m > 0): #the is statement passes only a positive slope so that the correct risingedge is found\n X = (floor + m*x1 - y1) / m\n \n break\n left_hand.append(X)\n return left_hand",
"def calc_binned_property_vs_property(property1, property2, snap, bin_nums=50, prop_lims=None):\n\n\tif property1 in ['sigma_star','sigma_stellar','stellar_Z','age'] or \\\n\t property2 in ['sigma_star','sigma_stellar','stellar_Z','age']:\n\t\tptype = 4\n\telse:\n\t\tptype = 0\n\n\tP = snap.loadpart(ptype)\n\t# Get property data\n\tdata = np.zeros([2,P.npart])\n\tweights = P.get_property('M')\n\tfor i, property in enumerate([property1,property2]):\n\t\tdata[i] = P.get_property(property)\n\n\tif prop_lims is None:\n\t\tprop_lims = config.PROP_INFO[property2][1]\n\t\tlog_bins = config.PROP_INFO[property2][2]\n\telse:\n\t\tif prop_lims[1] > 30*prop_lims[0]: \tlog_bins=True\n\t\telse:\t\t\t\t\t\t\t\tlog_bins=False\n\n\tbin_vals, mean_DZ, std_DZ = math_utils.bin_values(data[1], data[0], prop_lims, bin_nums=bin_nums, weight_vals=weights,\n\t\t\t\t\t\t\t\t\t\t\t\t log=log_bins)\n\n\treturn bin_vals, mean_DZ, std_DZ",
"def pigeonhole_bounded(num_balls, num_bins, max_balls_per_bin):\n lim = int(min(num_bins, 1.*num_balls / (max_balls_per_bin+1)))\n val = 0\n for q in range(lim+1):\n sgn = (-1)**q\n term_1 = comb(num_bins, q)\n term_2 = comb(num_balls - q*(max_balls_per_bin + 1) + num_bins - 1, num_bins - 1)\n val += sgn * term_1 * term_2\n return val",
"def shell_pressure_stats(self, elements, dr, normal=False):\n self.stats(dr, normal=normal)\n\n print( \"NNNNNumber of bins: {}\".format(self.nbins) )\n\n if not normal:\n # atom.stress has 3 elements, xx yy zz components\n if self.use_atomic_volume:\n if self.average_on_atom:\n # atomic volume is used, pressure is calculated for each atom and then averaged together\n stress = []\n for idx, shell_atoms in enumerate(self._shell_atom_objs):\n pressure_raw = {}\n for element, atoms in shell_atoms.iteritems():\n if element in elements:\n # P = -(S_xx + S_yy + S_zz)/3/V\n pressure_raw[element] = [ - sum(atom.stress)/atom.voro_volume/3.0 for atom in atoms ]\n # Average pressure = sum(Pressure)/n_atoms\n n_atoms = sum( len(_ele) for _ele in pressure_raw.values() )\n if n_atoms != 0:\n pressure_ave = sum( sum(_ele) for _ele in pressure_raw.values() ) / n_atoms\n else:\n pressure_ave = 0\n stress.append(pressure_ave)\n return stress\n else:\n # pressure is calculated as sum(atom stress in a shell) / sum(atom volume in a shell)\n stress = []\n for idx, shell_atoms in enumerate( self._shell_atom_objs ):\n stress_all = 0\n volume_all = 0\n for element, atoms in shell_atoms.iteritems():\n if element in elements:\n stress_all += sum( sum(atom.stress[:3]) for atom in atoms )\n volume_all += sum( atom.voro_volume for atom in atoms )\n if volume_all != 0:\n pressure_ave = - stress_all / 3.0 / volume_all\n else:\n pressure_ave = 0\n stress.append( pressure_ave )\n return stress\n else:\n # use shell volume\n stress = [ ]\n for idx, shell_atoms in enumerate( self._shell_atom_objs ):\n r_min, r_max = idx * dr, (idx + 1)*dr\n stress_all = 0\n volume_all = self.vol_sphere(r_max) - self.vol_sphere(r_min)\n for element, atoms in shell_atoms.iteritems():\n if element in elements:\n stress_all += sum( sum( atom.stress[:3] ) for atom in atoms )\n pressure_ave = - stress_all / 3.0 / volume_all\n stress.append( pressure_ave )\n return stress\n else:\n # normal pressure, atom.spherical_stress has 6 items: xx, yy, zz, xy, xz, yz.\n stress_r = []\n stress_theta = []\n stress_phi = []\n\n if self.use_atomic_volume:\n\n if self.average_on_atom:\n # Pressure is calculate as average of pressure on each atom\n for idx, shell_atoms in enumerate( self._shell_atom_objs ):\n pressure_r_raw = {}\n pressure_theta_raw = {}\n pressure_phi_raw = {}\n for element, atoms in shell_atoms.iteritems():\n if element in elements:\n pressure_r_raw[element] = [ - atom.spherical_stress[0][0] / atom.voro_volume for atom in atoms ]\n pressure_theta_raw[element] = [ - atom.spherical_stress[1][1] / atom.voro_volume for atom in atoms ]\n pressure_phi_raw[element] = [ - atom.spherical_stress[2][2] / atom.voro_volume for atom in atoms ]\n\n n_atoms = sum( len( _ele ) for _ele in pressure_r_raw.values() )\n if n_atoms != 0:\n pressure_r_ave = sum( sum(_ele) for _ele in pressure_r_raw.values() ) / n_atoms\n pressure_theta_ave = sum( sum(_ele) for _ele in pressure_theta_raw.values() ) / n_atoms\n pressure_phi_ave = sum( sum(_ele) for _ele in pressure_phi_raw.values() ) / n_atoms\n else:\n pressure_r_ave = pressure_theta_ave = pressure_phi_ave = 0\n\n stress_r.append( pressure_r_ave )\n stress_theta.append( pressure_theta_ave )\n stress_phi.append( pressure_phi_ave )\n return { 'r': stress_r, 'theta': stress_theta, 'phi': stress_phi, }\n\n else:\n # Pressure is calculated as sum(stress)/sum(atomic_volume)\n for idx, shell_atoms in enumerate( self._shell_atom_objs ):\n stress_r_all = 0\n stress_theta_all = 0\n stress_phi_all = 0\n volume_all = 0\n\n for element, atoms in shell_atoms.iteritems():\n if element in elements:\n stress_r_all += sum( atom.spherical_stress[0][0] for atom in atoms )\n stress_theta_all += sum( atom.spherical_stress[1][1] for atom in atoms )\n stress_phi_all += sum( atom.spherical_stress[2][2] for atom in atoms )\n volume_all += sum( atom.voro_volume for atom in atoms )\n if volume_all != 0:\n pressure_r_ave = - stress_r_all / volume_all\n pressure_theta_ave = - stress_theta_all / volume_all\n pressure_phi_ave = - stress_phi_all / volume_all\n else:\n pressure_r_ave = pressure_theta_ave = pressure_phi_ave = 0\n\n stress_r.append( pressure_r_ave )\n stress_theta.append( pressure_theta_ave )\n stress_phi.append( pressure_phi_ave )\n return { 'r': stress_r, 'theta': stress_theta, 'phi': stress_phi, }\n else:\n # Use shell volume\n for idx, shell_atoms in enumerate( self._shell_atom_objs ):\n r_min, r_max = idx * dr, (idx+1) * dr\n stress_r_all = 0\n stress_theta_all = 0\n stress_phi_all = 0\n volume_all = self.vol_sphere(r_max) - self.vol_sphere(r_min)\n\n for element, atoms in shell_atoms.iteritems():\n if element in elements:\n stress_r_all += sum( atom.spherical_stress[ 0 ][ 0 ] for atom in atoms )\n stress_theta_all += sum( atom.spherical_stress[ 1 ][ 1 ] for atom in atoms )\n stress_phi_all += sum( atom.spherical_stress[ 2 ][ 2 ] for atom in atoms )\n\n pressure_r_ave = - stress_r_all / volume_all\n pressure_theta_ave = - stress_theta_all / volume_all\n pressure_phi_ave = - stress_phi_all / volume_all\n\n stress_r.append( pressure_r_ave )\n stress_theta.append( pressure_theta_ave )\n stress_phi.append( pressure_phi_ave )\n return { 'r': stress_r, 'theta': stress_theta, 'phi': stress_phi, }",
"def volume_bins(photon_paths, bin_range, shape, \n bin_type = 'fluence', smooth = False, mu_a = 1.0, mu_s = 0.0, \n event_type = 1, \n detector_perp = (1.0, 0.0, 0.0), detector_para = (0.0, 1.0, 0.0), detector_normal = (0.0, 0.0, 1.0), \n minAcceptanceCosine = 0.0, maxAcceptanceCosine = 1.0,\n count_event_number = False):\n \n def bin_index(pos, bin_range, shape):\n # calculate ND index mapping position pos into an ND-array with specified coordinate ranges and shape.\n # notes: \n # -- bin_range tuples shall be ordered\n # -- out-of-range positions return \"None\"\n \n nx = [int(floor((pos[n] - bin_range[n][0])/(bin_range[n][1] - bin_range[n][0]) * float(shape[n]))) for n in range(len(shape))]\n\n if (any([n < 0 for n in nx]) or any([nx[n] >= shape[n] for n in range(len(nx))])):\n nx = None\n else:\n nx = tuple(nx) # support index \"by-tuple\" (i.e. for many containers: \"by-list\" indexing doesn't work)\n \n return nx\n \n if (bin_type == 'fluence'):\n bFluence = True\n elif (bin_type == 'energy'):\n bFluence = False\n else:\n raise RuntimeError, 'photon_path: volume_bins: unknown accumulation type %s requested' % bin_type\n \n # order the acceptance-cosine tuple:\n acceptanceCosine = (min(minAcceptanceCosine, maxAcceptanceCosine), max(minAcceptanceCosine, maxAcceptanceCosine))\n \n # extinction coefficient:\n mu_t = mu_a + mu_s \n \n # ----------- return values: ---------------------------------------------- \n bins = zeros(shape,dtype=float) \n \n # detector_fluence is in \"polmc_data\" \"Stokes_data\" format (but containing scalar entries instead of arrays):\n if not count_event_number:\n detector_fluence = {'I':0.0, 'Q':0.0, 'U':0.0, 'V':0.0, \n 'minAcceptanceCosine':minAcceptanceCosine, \n 'maxAcceptanceCosine':maxAcceptanceCosine} \n else:\n detector_fluence = \n [ {'I':0.0, 'Q':0.0, 'U':0.0, 'V':0.0, \n 'minAcceptanceCosine':minAcceptanceCosine, \n 'maxAcceptanceCosine':maxAcceptanceCosine} for N_events in range(MAX_NUMBER_EVENTS) ] \n # ------------------------------------------------------------------------- \n \n for photon in photon_paths:\n # ignore empty photon_paths\n if not photon:\n continue\n\n transmit_event = photon[-1]\n if (transmit_event.event_type != 3):\n print 'photon_path: volume_bins: WARNING: last event:%d in the photon path is not \\\"kInterfaceTransmitEvent\\\":3'\\\n % transmit_event.event_type\n\n # ignore photons outside of the acceptance-angle:\n # cosign at the detector surface (s.t. antiparallel is 1.0):\n cosine = -normalized_dot_product(photon[-1].dir, detector_normal)\n if ((acceptanceCosine[0] > cosine) or (cosine > acceptanceCosine[1])):\n continue\n \n N_events = 0 \n for event in photon:\n if (event.event_type != event_type): # ignore photon events not of the specified type\n continue\n \n N_events += 1 \n if not smooth:\n nx = bin_index(event.pos, bin_range, shape)\n if (nx != None):\n # note: index will be None if pos is out-of-range, \n # which is not an error here (bins are accumulated only for the region of interest specified via bin_range).\n if bFluence:\n bins[nx] += event.W\n else:\n bins[nx] += event.W * (mu_a / mu_t)\n else:\n pass\n \n # accumulate the detector fluence: (important: see notes at \"StokesV: transformToLabFrame\" above):\n localStokes = transmit_event.Stokes.normalized()\n localStokes.transformSelfToLabFrame(detector_perp, detector_para, detector_normal)\n \n if not count_event_number: \n detector_fluence['I'] += localStokes.I * transmit_event.W \n detector_fluence['Q'] += localStokes.Q * transmit_event.W\n detector_fluence['U'] += localStokes.U * transmit_event.W\n detector_fluence['V'] += localStokes.V * transmit_event.W \n else:\n if N_events >= MAX_NUMBER_EVENTS:\n raise RuntimeError: 'photon_path: volume_bins: too many events of specified type in the path'\n detector_fluence[N_events]['I'] += localStokes.I * transmit_event.W \n detector_fluence[N_events]['Q'] += localStokes.Q * transmit_event.W\n detector_fluence[N_events]['U'] += localStokes.U * transmit_event.W\n detector_fluence[N_events]['V'] += localStokes.V * transmit_event.W \n \n return bins, detector_fluence",
"def compute_prototypical_distribution(idea_edge_weights, BUCKETS):\n \n # initialize container for computing average cdf for each x\n all_cdfs = {}\n # BUCKETS = np.linspace(0, 1, num_bins)\n for b in BUCKETS:\n all_cdfs[b] = []\n\n # compute ecdf for each idea\n idea_edge_weights['cdf'] = \"\"\n for index, idea in idea_edge_weights.iterrows():\n # print idea['edge_weights']\n if len(idea['edge_weights']) > 0:\n # create the ecdf for the idea\n ecdf = ECDF(idea['edge_weights'])\n this_cdf = []\n for b in BUCKETS:\n this_cdf.append(ecdf(b))\n all_cdfs[b] += [ecdf(b)]\n idea_edge_weights.set_value(index, 'cdf', this_cdf)\n\n # compute the prototypical distribution by averaging across cdfs for each bucket\n proto_cdf = []\n for b in BUCKETS:\n proto_cdf.append(np.mean(all_cdfs[b]))\n \n return proto_cdf, idea_edge_weights",
"def calculate_maximum_potential_edge_counts(channel_composition, N, max_ble_span):\n\n back_dir = {'L' : 'R', 'R' : 'L', 'U' : 'D', 'D' : 'U'}\n\n counts = {}\n for src_ble in range(0, N):\n for sink_ble in range(max(0, src_ble - max_ble_span),\\\n min(N - 1, src_ble + max_ble_span) + 1):\n for w_src in channel_composition:\n src_dirs = ('L', 'R')\n if w_src[0] == 'V':\n src_dirs = ('U', 'D')\n for src_dir in src_dirs:\n for w_sink in channel_composition:\n sink_dirs = ('L', 'R')\n if w_sink[0] == 'V':\n sink_dirs = ('U', 'D')\n for sink_dir in sink_dirs:\n if sink_dir == back_dir[src_dir]:\n continue\n inc = channel_composition[w_src] * channel_composition[w_sink]\n try:\n counts[(w_src, w_sink)] += inc \n except:\n counts.update({(w_src, w_sink) : inc})\n\n e_str = lambda e : \"potential_edge__%s%s__%s%s\"\\\n % (e[0], \"_tap_0\" if e[0][0] == 'V' else '',\\\n e[1], \"_tap_0\" if e[1][0] == 'V' else '')\n\n return {e_str(e) : counts[e] for e in counts}",
"def _derive_cable_points(self, cable_id, depth=2):\n endpts = self._derive_cable_endpoints(cable_id)\n midpt = meanOf(endpts)\n lo_pt = meanOf([ endpts[0], midpt ])\n hi_pt = meanOf([ midpt, endpts[1] ])\n return [endpts[0], lo_pt, midpt, hi_pt, endpts[1]]",
"def depth_value_extraction(self, dmap_list, pts_list):\n\n updated_pts = [[] for i in range(len(pts_list))]\n pts_depth = [[] for i in range(len(pts_list))]\n\n for idx in range(len(pts_list[0])): # Check all matched points\n depth = np.zeros(len(pts_list))\n valid = True\n for i in range(len(pts_list)): # Check depth of current point in each view\n if pts_list[i][idx] != []:\n (u,v) = pts_list[i][idx]\n neighborhood = tools.get_neighborhood(round(u), round(v), self.depth_neighborhood_radius, dmap_list[i])\n nonzero = neighborhood[np.nonzero(neighborhood)]\n count = len(nonzero)\n if count > 0: # and (max(nonzero) - min(nonzero)) < 100:\n depth[i] = sorted(nonzero)[count//2] #Take median value\n else:\n valid = False\n break\n if valid: # If there is valid depth information in all views we keep the point\n for i in range(len(pts_list)):\n pts_depth[i].append(depth[i])\n updated_pts[i].append(pts_list[i][idx])\n\n return pts_depth, updated_pts",
"def calc_binned_obs_property_vs_property(property1, property2, snap, r_max=20, pixel_res=2, bin_nums=10, prop_lims=None,\n\t\t\t\t\t\t\t\t\t\t mask_prop=None):\n\n\tif prop_lims is None:\n\t\tprop_lims = config.PROP_INFO[property2][1]\n\t\tlog_bins = config.PROP_INFO[property2][2]\n\telse:\n\t\tif prop_lims[1] > 30*prop_lims[0]:\tlog_bins=True\n\t\telse:\t\t\t\t\t\t\t\tlog_bins=False\n\n\n\n\tpixel_bins = int(np.ceil(2*r_max/pixel_res))\n\tx_bins = np.linspace(-r_max,r_max,pixel_bins)\n\ty_bins = np.linspace(-r_max,r_max,pixel_bins)\n\tx_vals = (x_bins[1:] + x_bins[:-1]) / 2.\n\ty_vals = (y_bins[1:] + y_bins[:-1]) / 2.\n\tpixel_area = pixel_res**2 * 1E6 # area of pixel in pc^2\n\n\tpixel_data = np.zeros([2,(pixel_bins-1)**2])\n\tfor i, property in enumerate([property1,property2]):\n\t\tif property in ['sigma_sfr','sigma_stellar','sigma_star']:\n\t\t\tptype = 4\n\t\telse:\n\t\t\tptype = 0\n\n\t\tP = snap.loadpart(ptype)\n\t\tx = P.p[:,0]; y = P.p[:,1];\n\n\t\tif property == 'sigma_dust':\n\t\t\tbin_data = P.get_property('M_dust')\n\t\t\tret = binned_statistic_2d(x, y, bin_data, statistic=np.sum, bins=[x_bins,y_bins]).statistic\n\t\t\tdust_pixel = ret.flatten()/pixel_area\n\t\t\tpixel_data[i] = dust_pixel\n\t\telif property=='sigma_gas':\n\t\t\tbin_data = P.get_property('M_gas')\n\t\t\tret = binned_statistic_2d(x, y, bin_data, statistic=np.sum, bins=[x_bins,y_bins]).statistic\n\t\t\tM_pixel = ret.flatten()/pixel_area\n\t\t\tpixel_data[i] = M_pixel\n\t\telif property in ['sigma_stellar','sigma_star']:\n\t\t\tbin_data = P.get_property('M_stellar')\n\t\t\tret = binned_statistic_2d(x, y, bin_data, statistic=np.sum, bins=[x_bins,y_bins]).statistic\n\t\t\tM_pixel = ret.flatten()/pixel_area\n\t\t\tpixel_data[i] = M_pixel\n\t\telif property=='sigma_sfr':\n\t\t\tbin_data = P.get_property('M_sfr')\n\t\t\tret = binned_statistic_2d(x, y, bin_data, statistic=np.sum, bins=[x_bins,y_bins]).statistic\n\t\t\tM_pixel = ret.flatten()/pixel_area\n\t\t\tpixel_data[i] = M_pixel\n\t\telif property=='sigma_gas_neutral':\n\t\t\tbin_data = P.get_property('M_gas_neutral')\n\t\t\tret = binned_statistic_2d(x, y, bin_data, statistic=np.sum, bins=[x_bins,y_bins]).statistic\n\t\t\tM_pixel = ret.flatten()/pixel_area\n\t\t\tpixel_data[i] = M_pixel\n\t\telif property=='sigma_H2':\n\t\t\tbin_data = P.get_property('M_H2')\n\t\t\tret = binned_statistic_2d(x, y, bin_data, statistic=np.sum, bins=[x_bins,y_bins]).statistic\n\t\t\tMH2_pixel = ret.flatten()/pixel_area\n\t\t\tpixel_data[i] = MH2_pixel\n\t\telif property == 'sigma_Z' or property == 'sigma_metals':\n\t\t\tbin_data = P.get_property('M_metals')\n\t\t\tret = binned_statistic_2d(x, y, bin_data, statistic=np.sum, bins=[x_bins,y_bins]).statistic\n\t\t\tZ_pixel = ret.flatten()/pixel_area\n\t\t\tpixel_data[i] = Z_pixel\n\t\telif property == 'Z':\n\t\t\tbin_data = [P.get_property('M_metals'),P.get_property('M_gas')]\n\t\t\tret = binned_statistic_2d(x, y, bin_data, statistic=np.sum, bins=[x_bins,y_bins]).statistic\n\t\t\tZ_pixel = ret[0].flatten()/(ret[1].flatten())/config.SOLAR_Z\n\t\t\tpixel_data[i] = Z_pixel\n\t\telif property == 'O/H':\n\t\t\tbin_data = P.get_property('O/H')\n\t\t\tret = binned_statistic_2d(x, y, bin_data, statistic=np.mean, bins=[x_bins,y_bins]).statistic\n\t\t\tOH_pixel = ret.flatten()\n\t\t\tpixel_data[i] = OH_pixel\n\t\telif property == 'O/H_gas_offset':\n\t\t\tbin_data = P.get_property('O/H_gas_offset')\n\t\t\tret = binned_statistic_2d(x, y, bin_data, statistic=np.mean, bins=[x_bins,y_bins]).statistic\n\t\t\tOH_pixel = ret.flatten()\n\t\t\tpixel_data[i] = OH_pixel\n\t\telif property == 'O/H_gas':\n\t\t\tbin_data = P.get_property('O/H_gas')\n\t\t\tret = binned_statistic_2d(x, y, bin_data, statistic=np.mean, bins=[x_bins,y_bins]).statistic\n\t\t\tOH_pixel = ret.flatten()\n\t\t\tpixel_data[i] = OH_pixel\n\t\telif property == 'O/H_ionized':\n\t\t\tbin_data = P.get_property('O/H')\n\t\t\tnH = P.get_property('nH')\n\t\t\tT = P.get_property('T')\n\t\t\tmask = (nH>=0.5) & (T>=7000) & (T<=15000)\n\t\t\tret = binned_statistic_2d(x[mask], y[mask], bin_data[mask], statistic=np.mean, bins=[x_bins,y_bins]).statistic\n\t\t\tOH_pixel = ret.flatten()\n\t\t\tpixel_data[i] = OH_pixel\n\t\telif property == 'O/H_gas_ionized':\n\t\t\tbin_data = P.get_property('O/H_gas')\n\t\t\tnH = P.get_property('nH')\n\t\t\tT = P.get_property('T')\n\t\t\tmask = (nH>=0.5) & (T>=7000) & (T<=15000)\n\t\t\tret = binned_statistic_2d(x[mask], y[mask], bin_data[mask], statistic=np.mean, bins=[x_bins,y_bins]).statistic\n\t\t\tOH_pixel = ret.flatten()\n\t\t\tpixel_data[i] = OH_pixel\n\t\telif property == 'O/H_gas_ionized_offset':\n\t\t\tbin_data = P.get_property('O/H_gas_offset')\n\t\t\tnH = P.get_property('nH')\n\t\t\tT = P.get_property('T')\n\t\t\tmask = (nH>=0.5) & (T>=7000) & (T<=15000)\n\t\t\tret = binned_statistic_2d(x[mask], y[mask], bin_data[mask], statistic=np.mean, bins=[x_bins,y_bins]).statistic\n\t\t\tOH_pixel = ret.flatten()\n\t\t\tpixel_data[i] = OH_pixel\n\t\telif property == 'fH2':\n\t\t\tbin_data = [P.get_property('M_H2'),P.get_property('M_gas')]\n\t\t\tret = binned_statistic_2d(x, y, bin_data, statistic=np.sum, bins=[x_bins,y_bins]).statistic\n\t\t\tfH2_pixel = ret[0].flatten()/(ret[1].flatten())\n\t\t\tpixel_data[i] = fH2_pixel\n\t\telif property in ['r','r25']:\n\t\t\tr_conversion = 1.\n\t\t\tif property == 'r25':\n\t\t\t\ttry:\n\t\t\t\t\tr25 = snap.calc_stellar_scale_r()/0.2\n\t\t\t\t\tr_conversion = 1./r25\n\t\t\t\texcept NameError:\n\t\t\t\t\tprint(\"Using r25 only works for disk galaxy objects. Will default to galactocentric radius.\")\n\t\t\t# Get the average r coordinate for each pixel in kpc\n\t\t\tpixel_r_vals = np.array([np.sqrt(np.power(np.abs(y_vals),2) + np.power(np.abs(x_vals[k]),2))*r_conversion for k in range(len(x_vals))]).flatten()\n\t\t\tpixel_data[i] = pixel_r_vals\n\t\telif property == 'D/Z':\n\t\t\tbin_data = [P.get_property('M_dust'),P.get_property('M_metals')]\n\t\t\tret = binned_statistic_2d(x, y, bin_data, statistic=np.sum, bins=[x_bins,y_bins]).statistic\n\t\t\tDZ_pixel = np.divide(ret[0].flatten(),ret[1].flatten(),where=ret[1].flatten()!=0)\n\t\t\tpixel_data[i] = DZ_pixel\n\t\telse:\n\t\t\tprint(\"Property given to calc_binned_obs_property_vs_property() is not supported:\",property)\n\t\t\treturn None,None,None\n\n\n\tif mask_prop == 'fH2':\n\t\tP = snap.loadpart(0)\n\t\tx = P.p[:,0]; y = P.p[:,1];\n\t\tmask_data = [P.get_property('M_H2'),P.get_property('M_gas')]\n\t\tret = binned_statistic_2d(x, y, mask_data, statistic=np.sum, bins=[x_bins,y_bins]).statistic\n\t\t# Need to be a bit careful to not divide by zero here\n\t\tfH2_pixel= np.zeros(len(ret[0].flatten()))\n\t\tH2_pixel=ret[0].flatten(); gas_pixel = ret[1].flatten()\n\t\tfH2_pixel= np.zeros(len(gas_pixel))\n\t\tfH2_pixel[gas_pixel>0] =H2_pixel[gas_pixel>0]/gas_pixel[gas_pixel>0]\n\t\tmask = fH2_pixel>0.1\n\telse:\n\t\tmask = np.ones(len(pixel_data[0]), dtype=bool)\n\n\tbin_vals, mean_vals, std_vals = math_utils.bin_values(pixel_data[1][mask], pixel_data[0][mask], prop_lims, bin_nums=bin_nums, weight_vals=None, log=log_bins)\\\n\n\treturn bin_vals, mean_vals, std_vals",
"def _calc_trsp_at_density_levels(trsp_ds, rho_w, rho_s, ufld, vfld, lat=None):\n\n # Determine density level and sum\n for kr in trsp_ds['k_rho'].values:\n\n if kr == 0:\n # First bin\n bin_condition_w = rho_w < trsp_ds['rho_f'][kr+1].values\n bin_condition_s = rho_s < trsp_ds['rho_f'][kr+1].values\n\n elif kr == trsp_ds['k_rho'].values[-1]:\n # Last bin\n bin_condition_w = rho_w >= trsp_ds['rho_f'][kr].values\n bin_condition_s = rho_s >= trsp_ds['rho_f'][kr].values\n\n else:\n # all others\n bin_condition_w = (rho_w < trsp_ds['rho_f'][kr+1].values) & \\\n (rho_w >= trsp_ds['rho_f'][kr].values)\n bin_condition_s = (rho_s < trsp_ds['rho_f'][kr+1].values) & \\\n (rho_s >= trsp_ds['rho_f'][kr].values)\n \n # Compute transport within this density bin\n trsp_x = ufld.where( \n bin_condition_w,0).sum(dim=['i_g','j','tile','k'])\n trsp_y = vfld.where( \n bin_condition_s,0).sum(dim=['i','j_g','tile','k'])\n\n if lat is not None:\n trsp_ds['trsp'].loc[{'lat':lat,'k_rho':kr}] = trsp_x + trsp_y\n else:\n trsp_ds['trsp'].loc[{'k_rho':kr}] = trsp_x + trsp_y\n\n return trsp_ds",
"def get_stats(gene_df, df_path, criteria, cell_type, bin=3000, df_function=df_to_index_danpos):\n # print 'get stats', len(gene_df['gene'].unique())\n if criteria != 'skewness' and criteria != 'kurtosis':\n table_dict = df_function(df_path)\n else:\n df_function = df_to_index_sk\n table_dict = df_function(df_path)\n\n results = defaultdict(float)\n\n for k in range(gene_df.shape[0]):\n gene_name = gene_df.iloc[k, 0]\n\n chr_name, start, end, length = gene_df.iloc[k, 1], gene_df.iloc[k, 2], gene_df.iloc[k, 3], gene_df.iloc[k, 4]\n ## Here is the problem, danpos selector will consider the entire overlapped peaks\n ## The other approach is using self designed peak calling, to make sure each parameter will return different value\n cur_table = set()\n\n if end < start:\n mid = (start + end) / 2\n start = mid\n end = mid\n\n for i in range(int(start/bin), int(end/bin) + 1):\n if chr_name in table_dict and i in table_dict[chr_name]:\n table = table_dict[chr_name][i]\n cur_table = cur_table.union(table)\n\n if len(cur_table) == 0:\n continue\n\n selected_table = []\n for t in cur_table:\n if start < t[1] < end:\n selected_table.append(t)\n elif start < t[2] < end:\n selected_table.append(t)\n elif t[1] <= start and end <= t[2]:\n selected_table.append(t)\n\n if len(selected_table) == 0:\n continue\n\n cur_df = pd.DataFrame(list(selected_table))\n\n if cur_df.shape[1] == 6:\n cur_df.columns = ['chr',\n 'start',\n 'end',\n 'width_above_cutoff',\n 'total_signal',\n 'height',]\n else:\n cur_df.columns = ['chr',\n 'start',\n 'end',\n 'width_above_cutoff',\n 'total_signal',\n 'height',\n 'skewness',\n 'kurtosis']\n\n if criteria == 'total_width':\n cur_col = cur_df['end'] - cur_df['start']\n cur_value = cur_col.sum()\n elif criteria == 'height':\n cur_value = cur_df['height'].max()\n elif criteria == 'single_width':\n cur_col = cur_df['end'] - cur_df['start']\n cur_value = cur_col.max()\n elif criteria == 'total_signal':\n cur_value = cur_df['total_signal'].sum()\n elif criteria == 'single_signal':\n cur_value = cur_df['total_signal'].max()\n elif criteria == 'coverage':\n cur_value = (cur_df['end'] - cur_df['start']).sum()*1.0/length\n\n\n #\n # # This is for kurtosis and skewness\n elif cur_df.shape[0] > 0 and criteria == 'skewness' and 'skewness' in cur_df.columns:\n cur_value = cur_df.ix[cur_df['total_signal'].argmax(),'skewness']\n elif cur_df.shape[0] > 0 and criteria == 'kurtosis' and 'kurtosis' in cur_df.columns:\n cur_value = cur_df.ix[cur_df['total_signal'].argmax(), 'kurtosis']\n\n\n if cur_value > results[gene_name] and criteria != 'skewness' and criteria != 'kurtosis':\n results[gene_name] = cur_value\n # this is for kurtosis and skewness\n\n elif criteria == 'kurtosis':\n if abs(cur_value) > abs(results[gene_name]):\n results[gene_name] = cur_value\n elif criteria == 'skewness':\n if abs(cur_value) > results[gene_name]:\n results[gene_name] = abs(cur_value)\n\n final = []\n\n for gene_name in gene_df['gene'].unique():\n final.append((gene_name, results[gene_name], cell_type))\n print len(final)\n return final",
"def _calc_bin_edges(self, binning_scheme='generator'):\n pt_bin_edges = self.pt_bin_edges(binning_scheme)\n num_pt_bins = self.num_pt_bins(binning_scheme)\n bins = []\n for ibin_pt, pt in enumerate(pt_bin_edges[:-1]):\n lambda_bin_edges = self.binning_handler.get_variable_bins(pt=pt,\n binning_scheme=binning_scheme)\n if self.plot_with_bin_widths:\n offset = ibin_pt * math.ceil(lambda_bin_edges[-1])\n bins.extend(lambda_bin_edges[:-1] + offset)\n if ibin_pt == (num_pt_bins - 1):\n bins.append(lambda_bin_edges[-1] + offset)\n else:\n start = 0 if len(bins) == 0 else (bins[-1] + 1)\n end = start + len(lambda_bin_edges) - 1\n # -1 to account for first bin having lower edge of last bin\n bins.extend(list(range(start, end)))\n return array('d', bins)",
"def compute_depths(ps_volume, inv_depths):\n\n inv_depth_image = np.zeros(ps_volume.shape[1:], dtype=np.float64)\n\n \"\"\" YOUR CODE STARTS HERE \"\"\"\n for x in range(ps_volume.shape[2]):\n for y in range(ps_volume.shape[1]):\n volumes = ps_volume[:, y, x]\n inv_depth_image[y, x] = inv_depths[np.argmin(volumes)]\n\n \"\"\" YOUR CODE ENDS HERE \"\"\"\n\n return inv_depth_image"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add command line options
|
def add_command_line_options():
AddOption('--preprocess',
dest='preprocess',
action='store_true',
default=False,
help='Preprocess selected files for profiling')
AddOption('--no-rpath',
dest='no_rpath',
action='store_true',
default=False,
help='Disable rpath')
AddOption('--analyze-stack',
dest='analyze_stack',
metavar='ARGSTRING',
default=None,
help='Gather stack usage statistics after build')
# We need to sometimes use alternate tools for building and need to add them to the PATH in the
# environment.
AddOption('--prepend-path',
dest='prepend_path',
default=None,
help="String to prepend to PATH environment variable.")
# Allow specifying the locale to be used. Default "en_US.UTF8"
AddOption('--locale-name',
dest='locale_name',
default='en_US.UTF8',
help='locale to use for building. [%default]')
AddOption('--require-optional',
dest='require_optional',
action='store_true',
default=False,
help='Fail the build if check_component fails')
AddOption('--build-deps',
dest='build_deps',
type='choice',
choices=['yes', 'no', 'only', 'build-only'],
default='no',
help="Automatically download and build sources. (yes|no|only|build-only) [no]")
# We want to be able to check what dependencies are needed without
# doing a build, similar to --dry-run. We can not use --dry-run
# on the command line because it disables running the tests for the
# the dependencies. So we need a new option
AddOption('--check-only',
dest='check_only',
action='store_true',
default=False,
help="Check dependencies only, do not download or build.")
# Need to be able to look for an alternate build.config file.
AddOption('--build-config',
dest='build_config',
default=os.path.join(Dir('#').abspath, 'utils', 'build.config'),
help='build config file to use. [%default]')
|
[
"def add_opts(self, optparser):\n return",
"def _setCommandLineOptions(self):\n self._commandLineParser.add_option(\"-f\", \"--file\", dest=\"filename\", help=\"open a FILE\", metavar=\"FILE\")\n self._commandLineParser.add_option(\"-l\", \"--loglevel\", dest=\"loglevel\", help=\"set LOGLEVEL to 10=DEBUG, 20=INFO, 30=WARNING, 40=ERROR, 50=CRITICAL\", metavar=\"LOGLEVEL\", type=\"int\")",
"def add_custom_cli_args(self, cli_parser):\n pass",
"def define_command_line_options(self):\n\n self.OptionParser.add_option(\"-f\", \"--filename\", action=\"store\", type=\"string\",\n dest=\"filename\", default=\"foamcutter\",\n help=(\"Basename of the generated G-CODE file (will have .nc \"\n \"extension and will be saved on Desktop\"))\n self.OptionParser.add_option(\"-x\", \"--dim-x\", action=\"store\", type=\"float\", dest=\"dim_x\",\n default=200.0, help=\"Plane X dimension in mm\")\n self.OptionParser.add_option(\"-y\", \"--dim-y\", action=\"store\", type=\"float\", dest=\"dim_y\",\n default=200.0, help=\"Plane Y dimension in mm\")\n self.OptionParser.add_option(\"-s\", \"--speed\", action=\"store\", type=\"float\",\n dest=\"speed\", default=100.0, help=\"Cutting speed in mm/min\")\n self.OptionParser.add_option(\"-t\", \"--temperature\", action=\"store\", type=\"int\",\n dest=\"temperature\", default=25, help=\"Wire temperature in percentual\")\n self.OptionParser.add_option(\"-b\", \"--flatness\", action=\"store\", type=\"float\",\n dest=\"flatness\", default=1.0,\n help=\"Flatness (for bezier curves)\")\n\n # This is here so we can have tabs - but we do not use it for the moment.\n # Remember to use a legitimate default\n self.OptionParser.add_option(\"\", \"--active-tab\", action=\"store\", type=\"string\",\n dest=\"active_tab\", default='setup', help=\"Active tab.\")",
"def add_simple_args(self):\n self.ctrl_parser.add_argument(\"-V\", \"--version\", action=\"version\", version='0.1.0',\n help='Provides the version of the tool')\n self.ctrl_parser.add_argument(\"-v\", \"--verbosity\", action=\"count\", help=\"increase output verbosity\")\n self.ctrl_parser.add_argument(\"-i\", action=InteractiveCli, nargs=0, help=\"Start in interactive mode\")\n self.ctrl_parser.add_argument(\"-t\", \"--timeout\", type=float,\n help=\"Provides a timeout for the command\")",
"def addArgs(self):\n \n self.createArgument('--fork', self.fork, 1, 'Fork to background', action='store_true')\n self.createArgument('--run', self.run, 1, 'Execute run on remote server (to be used with --client argument)', action='store_true')\n self.createArgument('--stop', self.stop, 1, 'Stop previous job', action='store_true')\n self.createArgument('--debug', self.debug, 1, 'Debugging mode', action='store_true')\n self.createArgument('--socket', self.setSocket, '', 'use TCP or UDP connection over ethernet/wireless, default TCP, available TCP, UDP, RFC (bluetooth)')\n self.createArgument('--client', self.client, 1, 'Connect to comma separated client addresses')\n self.createArgument('--server', self.bindMode, 1, 'turn into a server mode that handles instructions', action='store_true')\n self.createArgument('--target', self.selectTarget, '', 'target adress (bluetooth mac or ip adress over ethernet/wireless)')\n self.createArgument('--port', self.selectPort, 80, 'destination port')\n self.createArgument('--bytes', self.packetSize, 80, 'number of bytes to send in one packet')",
"def add_core_options(parser):\n parser.add_option(\"-d\", \"--directory\", dest=\"directory\",\n default=getcwd(),\n help=\"directory from which to load configuration [default: %default]\")\n\n parser.add_option(\"-e\", \"--environment\", dest=\"environment\",\n default=\"local\",\n help=\"environment to operate on [default: %default]\")\n\n parser.add_option(\"-H\", \"--hosts\", dest=\"hosts\",\n default=\"\",\n help=\"comma-separated list of hosts to operate on\")\n\n parser.add_option(\"-q\", \"--quiet\", dest=\"quiet\",\n action=\"store_true\",\n default=False,\n help=\"minimize output verbosity\")\n\n parser.add_option(\"-R\", \"--roles\", dest=\"roles\",\n default=\"\",\n help=\"comma-separated list of roles to operate on\")\n\n parser.add_option(\"-v\", \"--verbose\", dest=\"verbosity\",\n action=\"count\",\n default=0,\n help=\"control confab verbosity; by default, confab suppresses\"\n \"most output. Additional -v flags increase verbosity.\")",
"def add_standard_args(self):\n self.add_argument(\"-v\", \"--verbose\",\n help=\"Set log verbosity to True, nominal debug level.\", action=\"store_true\")\n self.add_argument(\"--verbosity\",\n help=\"Set log verbosity to a specific level: 0..100.\", type=int, default=0)\n self.add_argument(\"--dump-cmdline\", action=\"store_true\",\n help=\"Dump the command line parameters used to start the script to the log.\")\n self.add_argument(\"-R\", \"--readonly-cache\", action=\"store_true\",\n help=\"Don't modify the CRDS cache. Not compatible with options which implicitly modify the cache.\")\n self.add_argument('-I', '--ignore-cache', action='store_true', dest=\"ignore_cache\",\n help=\"Download required files even if they're already in the cache.\")\n self.add_argument(\"-V\", \"--version\",\n help=\"Print the software version and exit.\", action=\"store_true\")\n self.add_argument(\"-J\", \"--jwst\", dest=\"jwst\", action=\"store_true\",\n help=\"Force observatory to JWST for determining header conventions.\"\"\")\n self.add_argument(\"-H\", \"--hst\", dest=\"hst\", action=\"store_true\",\n help=\"Force observatory to HST for determining header conventions.\"\"\")\n self.add_argument(\"--roman\", dest=\"roman\", action=\"store_true\",\n help=\"Force observatory to Roman for determining header conventions.\"\"\")\n self.add_argument(\"--stats\", action=\"store_true\",\n help=\"Track and print timing statistics.\")\n self.add_argument(\"--profile\",\n help=\"Output profile stats to the specified file.\", type=str, default=\"\")\n self.add_argument(\"--log-time\", action=\"store_true\",\n help=\"Add date/time to log messages.\")\n self.add_argument(\"--pdb\",\n help=\"Run under pdb.\", action=\"store_true\")\n self.add_argument(\"--debug-traps\",\n help=\"Bypass exception error message traps and re-raise exception.\", action=\"store_true\")",
"def cli_options():\n parser = argparse.ArgumentParser(description='Barcoding gap inference tool wrapper for Galaxy')\n\n parser.add_argument(\"-d\", \"--distance_folder\", type=str, help=\"folder in which are stored the distances in family specific files\", action=\"store\", required=True)\n parser.add_argument(\"-f\", \"--all_taxa_file_list\", type=str, help=\"file containing all the taxon list\", action=\"store\", required=True)\n parser.add_argument(\"-n\", \"--taxon_name\", type=str, help=\"taxon name\", action=\"store\", required=True)\n parser.add_argument(\"-r\", \"--taxonomic_rank\", type=str, help=\"taxonomic_rank\", action=\"store\", required=True)\n parser.add_argument(\"-o\", \"--outfile\", type=str, help=\"output file\", action=\"store\", required=True)\n return parser.parse_args()",
"def _add_arguments(self):\n #the base arguments\n self.parser.add_argument(\"-d\", \"--debug\",\n help=\"Display debugging messages.\",\n action=\"store_true\",\n default=False, dest=\"debug\")\n \n self.parser.add_argument(\"--pudb\",\n help=\"Enable pudb interactive debugging.\",\n action=\"store_true\",\n default=False, dest='pudb')\n\n self.parser.add_argument(\"--pdb\",\n help=\"Enable python's debugger\",\n action=\"store_true\",\n default=False, dest='pdb')\n \n\n self.parser.add_argument(\"-s\", \"--silent\",\n help=\"Turn off screen output.\",\n action=\"store_true\", default=False,\n dest='silent')\n return",
"def add_options(parser):\n parser.add_option(\"\", \"--excess-bw\", type=\"float\", default=_def_excess_bw,\n help=\"set RRC excess bandwith factor [default=%default]\")\n parser.add_option(\"\", \"--no-gray-code\", dest=\"gray_code\",\n action=\"store_false\", default=True,\n help=\"disable gray coding on modulated bits (PSK)\")",
"def add_options(config):\n return config[\"module\"][\"application\"].add_options(config)",
"def prtOptions():\n\n print(\"The command-line options are:\")\n print(\" --version (print the version number and exit)\")\n print(\" -r (print the full version string and exit)\")\n print(\" -v (verbose)\")\n print(\" -t (print timestamps)\")\n print(\"\")\n print(\"Following the options, list one or more input files\")\n print(\" (enclosed in quotes if more than one file name is specified\")\n print(\" and/or if wildcards are used) and one output file name.\")",
"def addPyfeynOptions(parser):\n parser.add_option(\"-V\", \"--visual-debug\", dest=\"VDEBUG\", action = \"store_true\",\n default = False, help=\"produce visual debug output\")\n parser.add_option(\"-D\", \"--debug\", dest=\"DEBUG\", action = \"store_true\",\n default = False, help=\"produce debug output\")\n parser.add_option(\"-d\", \"--draft\", dest=\"DRAFT\", action = \"store_true\",\n default = False, help=\"produce draft output, skipping time-consuming calculations\")\n return parser",
"def add_argument_cmd(self, *args, **kwargs):\n pass",
"def add_arguments(self, parser):\n parser.add_argument(\n \"--datetime\",\n action=\"store\",\n help=\"ISO datetime used for calculating eligibility. Defaults to now. Currently only used for backdating command runs in tests.\",\n )\n parser.add_argument(\n \"--global_userinfo\",\n action=\"store\",\n help=\"specify Wikipedia global_userinfo data. Defaults to fetching live data. Currently only used for faking command runs in tests.\",\n )",
"def setup_main_options(args):\n # Set up environment based on args.\n tty.set_verbose(args.verbose)\n tty.set_debug(args.debug)\n # tty.set_trace(args.trace)\n\n # debug must be set first so that it can even affect behavior of\n # errors raised by pymod.config.\n if args.debug:\n # pymod.error.debug = True\n pymod.config.set(\"debug\", True, scope=\"command_line\")\n\n if args.dryrun:\n pymod.config.set(\"dryrun\", True, scope=\"command_line\")\n\n if args.shell != pymod.config.get(\"default_shell\"):\n pymod.shell.set_shell(args.shell)\n\n # when to use color (takes always, auto, or never)\n color.set_color_when(args.color)",
"def add_arguments(self, parser):\r\n parser.add_argument(\"digcoll_retriever_host\",\r\n help=\"The host of the digcoll_retriever\"),\r\n parser.add_argument(\"project_api\",\r\n help=\"\", type=str)\r\n parser.add_argument(\"import_data_file\",\r\n help=\"An identifier for a particular MVol issue\", type=str)",
"def add_args(parser):\n # fmt: off\n parser.add_argument('--varscale-beta', default=0.9, type=float,\n help='betas for LaProp optimizer')\n parser.add_argument('--momentum', default=0.9, type=float, metavar='WD',\n help='weight decay')\n parser.add_argument('--beta-min', default=0.5, type=float, metavar='WD',\n help='weight decay')\n parser.add_argument('--varscale-eps', type=float, default=1e-15, metavar='D',\n help='epsilon for LaProp optimizer')\n parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',\n help='weight decay')\n parser.add_argument('--use-adam', default=False, action=\"store_true\")\n parser.add_argument('--eps-schedule', default=False, action=\"store_true\")\n parser.add_argument('--nesterov', default=False, action=\"store_true\")\n # fmt: on"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update the version (and release) in the RPM spec file
|
def update_rpm_version(version, tag):
# pylint: disable=consider-using-f-string
spec = open("utils/rpms/daos.spec", "r").readlines() # pylint: disable=consider-using-with
current_version = 0
release = 0
for line_num, line in enumerate(spec):
if line.startswith("Version:"):
current_version = line[line.rfind(' ') + 1:].rstrip()
if version < current_version:
print("You cannot create a new version ({}) lower than the RPM "
"spec file has currently ({})".format(version,
current_version))
return False
if version > current_version:
spec[line_num] = "Version: {}\n".format(version)
if line.startswith("Release:"):
if version == current_version:
current_release = int(line[line.rfind(' ') + 1:line.find('%')])
release = current_release + 1
else:
release = 1
spec[line_num] = "Release: {}%{{?relval}}%{{?dist}}\n".\
format(release)
if line == "%changelog\n":
cmd = 'rpmdev-packager'
try:
# pylint: disable=consider-using-with
pkg_st = subprocess.Popen(cmd, stdout=subprocess.PIPE) # nosec
packager = pkg_st.communicate()[0].strip().decode('UTF-8')
except OSError:
print("You need to have the rpmdev-packager tool (from the "
"rpmdevtools RPM on EL7) in order to make releases.\n\n"
"Additionally, you should define %packager in "
"~/.rpmmacros as such:\n"
"%packager John A. Doe <john.doe@intel.com>"
"so that package changelog entries are well defined")
return False
date_str = time.strftime('%a %b %d %Y', time.gmtime())
spec.insert(line_num + 1, "\n")
spec.insert(line_num + 1,
"- Version bump up to {}\n".format(tag))
spec.insert(line_num + 1,
'* {} {} - {}-{}\n'.format(date_str,
packager,
version,
release))
break
open("utils/rpms/daos.spec", "w").writelines(spec) # pylint: disable=consider-using-with
return True
|
[
"def _update_version_file(self):\n file_path = os.path.join(self.repo.working_tree_dir, self.version_file)\n self._update_version_numbers(file_path)\n return self._commit_file(\n self.version_file,\n 'Version updated for release {}.{}.{}{}.'.format(\n self.major, self.minor, self.patch, self.release\n ),\n )",
"def update_fetch(self):\n Popen([\"mount\", \"-t\", \"devfs\", \"devfs\",\n \"{}/releases/{}/root/dev\".format(self.iocroot,\n self.release)]).communicate()\n copy(\"/etc/resolv.conf\",\n \"{}/releases/{}/root/etc/resolv.conf\".format(self.iocroot,\n self.release))\n\n # TODO: Check for STABLE/PRERELEASE/CURRENT/BETA if we support those.\n # TODO: Fancier.\n self.lgr.info(\"\\n* Updating {} to the latest patch level... \".format(\n self.release))\n\n os.environ[\"UNAME_r\"] = self.release\n os.environ[\"PAGER\"] = \"/bin/cat\"\n new_root = \"{}/releases/{}/root\".format(self.iocroot, self.release)\n if os.path.isfile(\"{}/etc/freebsd-update.conf\".format(new_root)):\n # 10.1-RELEASE and under have a interactive check\n if float(self.release.partition(\"-\")[0][:5]) <= 10.1:\n with NamedTemporaryFile(delete=False) as tmp_conf:\n conf = \"{}/usr/sbin/freebsd-update\".format(new_root)\n with open(conf) as update_conf:\n for line in update_conf:\n tmp_conf.write(re.sub(\"\\[ ! -t 0 \\]\", \"false\",\n line))\n\n os.chmod(tmp_conf.name, 0o755)\n Popen([tmp_conf.name, \"-b\", new_root, \"-d\",\n \"{}/var/db/freebsd-update/\".format(new_root), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(new_root),\n \"fetch\"], stdout=PIPE, stderr=PIPE).communicate()\n os.remove(tmp_conf.name)\n else:\n Popen([\"freebsd-update\", \"-b\", new_root, \"-d\",\n \"{}/var/db/freebsd-update/\".format(new_root), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(new_root),\n \"fetch\"], stdout=PIPE, stderr=PIPE).communicate()\n\n Popen([\"freebsd-update\", \"-b\", new_root, \"-d\",\n \"{}/var/db/freebsd-update/\".format(new_root), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(new_root),\n \"install\"], stdout=PIPE, stderr=PIPE).communicate()\n\n try:\n # Why this sometimes doesn't exist, we may never know.\n os.remove(\"{}/releases/{}/root/etc/resolv.conf\".format(\n self.iocroot, self.release))\n except OSError:\n pass\n\n Popen([\"umount\", \"{}/releases/{}/root/dev\".format(\n self.iocroot, self.release)]).communicate()",
"def update_version_file(self) -> \"ProductionPrep\":\n\n PyFunceble.facility.Logger.info(\n \"Started to update version file.\",\n )\n\n if self.should_be_deprecated(self.previous_version):\n to_append = \".\".join(\n self.version_utility.get_splitted(self.version_utility.local_version)[0]\n )\n\n if to_append not in self.version_file_content[\"deprecated\"]:\n self.version_file_content[\"deprecated\"].append(to_append)\n\n self.version_file_content[\n \"current_version\"\n ] = PyFunceble.storage.PROJECT_VERSION\n\n self.dict_helper.set_subject(self.version_file_content).to_yaml_file(\n self.VERSION_FILE_PATH\n )\n\n PyFunceble.facility.Logger.info(\n \"Finished to update version file.\",\n )\n\n return self",
"def update_control_version(file_path, version):\n for line in fileinput.input(file_path, inplace=1):\n if 'Version: ' in line:\n old_ver = line.split(' ')[1]\n line = line.replace(old_ver, version) + '\\n'\n sys.stdout.write(line)",
"def bump_version(version, filename):\n latest_tag = run_command(['git', 'describe', '--abbrev=0'])\n with open(filename, 'r', encoding='utf-8') as file:\n content = file.read()\n\n # Maybe use re.sub in case the version appears a second time in the spec file\n content = content.replace(latest_tag.replace(\"v\", \"\"), str(version))\n\n with open(filename, 'w', encoding='utf-8') as file:\n file.write(content)",
"def bump_version():\n cmd_args = sys.argv[1:]\n\n current_version = __version__.__version__\n is_rc = False\n major = 0\n minor = 0\n patch = 0\n release = RELEASE__RELEASE_CANDIDATE\n build = 0\n base_matches = re.findall(r\"^(\\d+)\\.(\\d+)\\.(\\d+)$\", current_version)\n if base_matches:\n major, minor, patch = base_matches[0]\n new_patch = str(int(patch) + 1)\n patch = new_patch\n\n rc_matches = re.findall(r\"^(\\d+)\\.(\\d+)\\.(\\d+)-([a-z]+)\\.(\\d+)$\", current_version)\n if rc_matches:\n major, minor, patch, release, build = rc_matches[0]\n if release == RELEASE__RELEASE_CANDIDATE:\n is_rc = True\n new_build = str(int(build) + 1)\n build = new_build\n else:\n is_rc = False\n release = RELEASE__RELEASE_CANDIDATE\n build = 0\n\n if PART__PRE_RELEASE in cmd_args:\n cmd_args = [\n \"--new-version\",\n f\"{major}.{minor}.{patch}-{release}.{build}\",\n ] + cmd_args\n\n elif is_rc and PART__BUILD not in cmd_args:\n # warn about deviating from release flow\n if PART__RELEASE not in cmd_args:\n print(\n \"\\033[93mCurrent version is marked as a release candidate and must be bumped as a either a build or release for targeted version. \"\n \"Run command with part 'release' to bump as release.\\033[0m\"\n )\n return False\n\n cmd_args = [\"--new-version\", f\"{major}.{minor}.{patch}\"] + cmd_args\n\n completed_process = subprocess.run([\"bump2version\"] + cmd_args, capture_output=True)\n\n if completed_process.returncode != 0:\n print(\n f\"Failed to bump version: {completed_process.stdout.decode('ascii')} {completed_process.stderr.decode('ascii')}\"\n )\n return False\n\n print(completed_process.stderr.decode(\"ascii\"))\n print(completed_process.stdout.decode(\"ascii\"))\n return True",
"def test_edit_both_versions(self):\n self.instance.package = self.input_ovf\n self.instance.version = \"5.2.0.01I\"\n self.instance.full_version = \"Cisco IOS XRv, Version 5.2\"\n self.instance.run()\n self.instance.finished()\n self.check_diff(\"\"\"\n <ovf:Vendor>Cisco Systems, Inc.</ovf:Vendor>\n- <ovf:Version>DEV</ovf:Version>\n- <ovf:FullVersion>DEVELOPMENT IMAGE</ovf:FullVersion>\n+ <ovf:Version>5.2.0.01I</ovf:Version>\n+ <ovf:FullVersion>Cisco IOS XRv, Version 5.2</ovf:FullVersion>\n <ovf:ProductUrl>http://www.cisco.com/en/US/products/ps12559/index.html\\\n</ovf:ProductUrl>\n\"\"\")",
"def __upgrade_install__(self, name):\n install = su.Popen([name, \"-b\", self.path, \"-d\",\n f\"{self.path}/var/db/freebsd-update/\",\n \"-f\",\n f\"{self.path}/etc/freebsd-update.conf\",\n \"-r\",\n self.new_release, \"install\"], stderr=su.PIPE)\n install.communicate()\n\n return install.returncode",
"def test_edit_full_version(self):\n self.instance.package = self.input_ovf\n self.instance.full_version = \"Some arbitrary product, version 3.14159\"\n self.instance.run()\n self.instance.finished()\n self.check_diff(\"\"\"\n <ovf:Version>DEV</ovf:Version>\n- <ovf:FullVersion>DEVELOPMENT IMAGE</ovf:FullVersion>\n+ <ovf:FullVersion>Some arbitrary product, version 3.14159\\\n</ovf:FullVersion>\n <ovf:ProductUrl>http://www.cisco.com/en/US/products/ps12559/index.html\\\n</ovf:ProductUrl>\n\"\"\")",
"def version(self, value):\n self.set(\"ver\", value)",
"def test03_update_gppkg_lower(self):\n #Use the gppkg from previous test\n update_main_rpm_spec = RPMSpec(\"A\", \"1\", \"2\", [\"B = 1-2\"])\n update_dep_rpm_spec = RPMSpec(\"B\", \"1\", \"2\")\n update_gppkg_spec = GppkgSpec(\"alpha\", \"1.1\")\n self.install(update_gppkg_spec.get_filename())\n \n #Original gppkg with a lower gppkg, main and deps version\n with self.assertRaisesRegexp(ExecutionError, \"A newer version of %s is already installed\" % self.alpha_spec.get_filename()):\n self.update(self.alpha_spec.get_filename())",
"def _version_man(self, strversion, isdev, revision):\n import datetime\n\n shortversion = '.'.join(strversion.split('.')[:2])\n for filename in self.manpages:\n fp = textopen(filename)\n try:\n initlines = fp.readlines()\n finally:\n fp.close()\n fp = textopen(filename, 'w')\n replaced = 0\n try:\n for line in initlines:\n if line.startswith('.TH'):\n splitted = line.split('\"')\n splitted[3] = str(datetime.date.today().year)\n\n splitted[5] = ' '.join(\n splitted[5].split(' ')[:-1] + [shortversion]\n )\n line = '\"'.join(splitted)\n replaced = 1\n fp.write(line)\n finally:\n fp.close()\n assert replaced, \".TH line not found in %r\" % (filename,)",
"def fix_spec(self, archive: str, version: str, commit: str):\n self._fix_spec_source(archive)\n self._fix_spec_prep(version)\n\n # we only care about the first number in the release\n # so that we can re-run `packit srpm`\n git_des_command = [\n \"git\",\n \"describe\",\n \"--tags\",\n \"--long\",\n \"--match\",\n \"*\",\n ]\n try:\n git_des_out = run_command(\n git_des_command, output=True, cwd=self.local_project.working_dir\n ).strip()\n except PackitCommandFailedError as ex:\n # probably no tags in the git repo\n logger.info(f\"Exception while describing the repository: {ex!r}\")\n git_desc_suffix = \"\"\n else:\n # git adds various info in the output separated by -\n # so let's just drop version and reuse everything else\n g_desc_raw = git_des_out.rsplit(\"-\", 2)[1:]\n # release components are meant to be separated by \".\", not \"-\"\n git_desc_suffix = \".\" + \".\".join(g_desc_raw)\n # the leading dot is put here b/c git_desc_suffix can be empty\n # and we could have two subsequent dots - rpm errors in such a case\n current_branch = self.local_project.ref\n # rpm is picky about release: hates \"/\" - it's an error\n # also prints a warning for \"-\"\n sanitized_current_branch = current_branch.replace(\"/\", \"\").replace(\"-\", \".\")\n original_release_number = self.specfile.get_release_number().split(\".\", 1)[0]\n current_time = datetime.datetime.now().strftime(DATETIME_FORMAT)\n release = (\n f\"{original_release_number}.{current_time}.\"\n f\"{sanitized_current_branch}{git_desc_suffix}\"\n )\n\n last_tag = self.get_last_tag()\n msg = \"\"\n if last_tag:\n # let's print changes b/w the last tag and now;\n # ambiguous argument '0.1.0..HEAD': unknown revision or path not in the working tree.\n # Use '--' to separate paths from revisions, like this\n cmd = [\n \"git\",\n \"log\",\n \"--pretty=format:- %s (%an)\",\n f\"{last_tag}..HEAD\",\n \"--\",\n ]\n msg = run_command(\n cmd, output=True, cwd=self.local_project.working_dir\n ).strip()\n if not msg:\n # no describe, no tag - just a boilerplate message w/ commit hash\n # or, there were no changes b/w HEAD and last_tag, which implies last_tag == HEAD\n msg = f\"- Development snapshot ({commit})\"\n logger.debug(f\"Setting Release in spec to {release!r}.\")\n # instead of changing version, we change Release field\n # upstream projects should take care of versions\n self.specfile.set_spec_version(\n version=version,\n release=release,\n changelog_entry=msg,\n )",
"def update_version(self):\n if not hasattr(self, 'versions') and not hasattr(self, 'version_number'):\n self.version_number = 1\n \n if hasattr(self, 'version_number') and self.version_number < 2:\n try:\n if 'short_desc' in self.__dict__:\n self._short_desc = self.short_desc\n del self.__dict__['short_desc']\n if 'long_desc' in self.__dict__:\n self._long_desc = self.long_desc\n del self.__dict__['long_desc']\n self.version_number = 2\n except KeyError:\n self.log.error('Error updating object %s in Thing.update_version()' % self)\n \n if hasattr(self, 'version_number'):\n # Changing to dictionary-based versioning system\n self.versions[gametools.findGamePath(__file__)] = 3\n del self.__dict__['version_number']\n \n if self.versions[gametools.findGamePath(__file__)] <= 5:\n self.adjectives = set(self.adjectives)\n self.versions[gametools.findGamePath(__file__)] = 6",
"def update_pkg_dep_file( filePath, oldMacroVersions, newMacroVersions, verbose=False ):\n using_MODULE_VERSION = {}\n definedModules = {}\n using_BASE_MODULE_VERSION = False\n using_EPICS_BASE_VER = False\n modified = False\n lineCache = []\n in_file = open( filePath, \"r\" )\n for line in in_file:\n strippedLine = line.strip()\n if len(strippedLine) == 0:\n lineCache += line\n continue\n\n # XXX_MODULE_VERSION = YYYYYYYYY\n match = moduleVersionRegExp.search( line )\n if match:\n macroName = match.group(1)\n oldVersion = match.group(2)\n if macroName in newMacroVersions:\n newVersion = newMacroVersions[macroName]\n if newVersion != oldVersion:\n print(\"Old: %s\" % line, end=' ')\n line = string.replace( line, oldVersion, newMacroVersions[macroName] )\n print(\"New: %s\" % line, end=' ')\n modified = True\n if macroName == \"BASE\":\n using_BASE_MODULE_VERSION = True\n else:\n using_MODULE_VERSION[macroName] = True\n lineCache += line\n continue\n\n # #* XXX = YYYYYYYYYYYYYYYYYYYYYYYYYYYY\n # Matches any macro definition, even if commented out\n match = condMacroRegExp.search( line )\n if not match:\n lineCache += line\n continue\n\n # Parse the macro match\n originalLine = match.group(0)\n commentedOut = match.group(1).startswith('#')\n macroName = match.group(2)\n oldVersionPath = match.group(3)\n\n # Is this macro related to the base version\n #isMacroBaseRelated = False\n #if macroName in [ \"EPICS_BASE\", \"EPICS_BASE_VER\", \"EPICS_MODULES\", \"MODULES_SITE_TOP\" ]:\n #\tisMacroBaseRelated = True\n\n if macroName in newMacroVersions:\n pkgName = macroNameToPkgName(macroName)\n if not pkgName:\n continue\n if pkgName == 'base':\n if 'BASE_MODULE_VERSION' in oldMacroVersions:\n newVersionPath = \"$(EPICS_SITE_TOP)/base/$(BASE_MODULE_VERSION)\"\n else:\n newVersionPath = \"$(EPICS_SITE_TOP)/base/%s\" % ( newMacroVersions[macroName] )\n #print '1. newVersionPath = %s' % newVersionPath\n elif using_MODULE_VERSION.get( macroName, False ):\n newVersionPath = \"$(EPICS_MODULES)/%s/$(%s_MODULE_VERSION)\" % ( pkgName, macroName )\n #print '2. newVersionPath = %s' % newVersionPath\n else:\n newVersionPath = \"$(EPICS_MODULES)/%s/%s\" % ( pkgName, newMacroVersions[macroName] )\n #print '3. newVersionPath = %s' % newVersionPath\n if macroName in definedModules:\n # We've already defined this macroName\n if not commentedOut:\n # Comment out subsequent definitions\n print(\"Old: %s\" % line, end=' ')\n line = string.replace( line, originalLine, '#' + originalLine )\n print(\"New: %s\" % line, end=' ')\n modified = True\n else:\n definedModules[macroName] = newVersionPath\n if commentedOut:\n # Uncomment the line\n print(\"Old: %s\" % line, end=' ')\n line = string.strip( line, '# ' )\n print(\"New: %s\" % line, end=' ')\n modified = True\n if oldVersionPath != newVersionPath:\n print(\"Old: %s\" % line, end=' ')\n line = string.replace( line, oldVersionPath, newVersionPath )\n print(\"New: %s\" % line, end=' ')\n modified = True\n\n if not \"BASE\" in newMacroVersions:\n lineCache += line\n continue\n\n # Handle BASE related macros\n #if not isMacroBaseRelated:\n if macroName in [ \"EPICS_BASE\", \"EPICS_BASE_VER\", \"EPICS_MODULES\", \"MODULES_SITE_TOP\" ]:\n lineCache += line\n continue\n\n newBaseVersion = newMacroVersions[\"BASE\"]\n oldBaseVersion = oldMacroVersions[\"BASE\"]\n if oldBaseVersion == newBaseVersion:\n lineCache += line\n continue\n\n if VersionToRelNumber(newBaseVersion) < 3.141205:\n baseDirName = \"base-%s\" % newBaseVersion\n else:\n baseDirName = newBaseVersion\n\n if VersionToRelNumber(oldBaseVersion) >= 3.141205:\n # For these, just replace all old instances of base version w/ new version\n oldLine = line\n line = string.replace( line, oldBaseVersion, newBaseVersion )\n if newBaseVersion in line:\n print(\"Old: %s\" % oldLine, end=' ')\n print(\"New: %s\" % line, end=' ')\n modified = True\n lineCache += line\n continue\n\n if\t \"EPICS_BASE_VER\" in oldVersionPath \\\n or \"BASE_MODULE_VERSION\" in oldVersionPath:\n lineCache += line\n continue\n\n # Handle fixing unusual paths\n if macroName == \"EPICS_BASE_VER\":\n oldLine = line\n #line = string.replace( line, oldBaseVersion, newBaseVersion )\n #line = string.replace( line, oldVersionPath, baseDirName )\n if True or newBaseVersion in line:\n print(\"Old: %s\" % oldLine, end=' ')\n print(\"New: %s\" % line, end=' ')\n modified = True\n\n if macroName == \"EPICS_BASE\":\n if \"BASE_MODULE_VERSION\" in oldVersionPath:\n newVersionPath = \"$(EPICS_SITE_TOP)/base/$(BASE_MODULE_VERSION)\"\n elif \"EPICS_BASE_VER\" in oldVersionPath:\n newVersionPath = \"$(EPICS_SITE_TOP)/base/$(EPICS_BASE_VER)\"\n else:\n newVersionPath = \"$(EPICS_SITE_TOP)/base/%s\" % baseDirName \n if oldVersionPath != newVersionPath:\n print(\"Old: %s\" % line, end=' ')\n line = string.replace( line, oldVersionPath, newVersionPath )\n print(\"New: %s\" % line, end=' ')\n modified = True\n\n if macroName == \"EPICS_MODULES\" or macroName == \"MODULES_SITE_TOP\":\n if \"BASE_MODULE_VERSION\" in oldVersionPath:\n newVersionPath = \"$(EPICS_SITE_TOP)/$(BASE_MODULE_VERSION)/modules\"\n else:\n newVersionPath = \"$(EPICS_SITE_TOP)/%s/modules\" % newBaseVersion\n if oldVersionPath != newVersionPath:\n print(\"Old: %s\" % line, end=' ')\n line = string.replace( line, oldVersionPath, newVersionPath )\n print(\"New: %s\" % line, end=' ')\n modified = True\n\n lineCache += line\n continue\n\n in_file.close()\n if not modified:\n if verbose:\n print(\"%s, No change\" % filePath)\n return 0\n\n # Replace prior version w/ updates\n try:\n os.remove( filePath )\n out_file = open( filePath, 'w' )\n out_file.writelines( lineCache )\n out_file.close()\n except OSError as e:\n sys.stderr.write( 'Could not remove \"%s\": %s\\n' % ( filePath, e.strerror ) )\n return 0\n except IOError as e:\n sys.stderr.write( 'Could not replace \"%s\": %s\\n' % ( filePath, e.strerror ) )\n return 0\n print(\"%s, UPDATED\" % filePath)\n return 1",
"def update_version(filepath, version):\n\n filepath = re_version_take.sub('_v{version:02}_t{take:02}'.format(\n **{\n 'version': version.version,\n 'take': version.take\n }\n ), filepath)\n\n return filepath",
"def update_release(\n self,\n chart,\n dry_run,\n name='',\n disable_hooks=False,\n values=None\n ):\n\n values = Config(raw=yaml.safe_dump(values or {}))\n\n # build release install request\n stub = ReleaseServiceStub(self.channel)\n release_request = UpdateReleaseRequest(\n chart=chart,\n dry_run=dry_run,\n disable_hooks=disable_hooks,\n values=values,\n name=name\n )\n\n stub.UpdateRelease(\n release_request,\n self.timeout,\n metadata=self.metadata\n )",
"def inc_packageversion(self):\n match = re.compile(r\"^(\\d*)\\.?((\\d*)|(corr\\d*corr))$\")\n m = match.search(self.packageversion)\n #print(m.group(0))\n #print(m.group(1)[4:-4])\n #print(m.group(2))\n if m:\n if \"corr\" in m.group(2):\n pre_inc = int(m.group(2)[4:-4]) + 1\n elif m.group(2) == \"\":\n pre_inc = 1\n else:\n pre_inc = int(m.group(2)) + 1\n self.packageversion = m.group(1) + \".corr\" + str(pre_inc) + \"corr\"\n else:\n self.packageversion = \"99999error\"",
"def test02_update_gppkg_higher(self):\n #Use gppkg from previous test\n self.install(self.alpha_spec.get_filename())\n \n #New gppkg with higher gppkg, main and deps version\n update_main_rpm_spec = RPMSpec(\"A\", \"1\", \"2\", [\"B = 1-2\"])\n update_dep_rpm_spec = RPMSpec(\"B\", \"1\", \"2\")\n update_gppkg_spec = GppkgSpec(\"alpha\", \"1.1\")\n update_gppkg_file = self.build(update_gppkg_spec, update_main_rpm_spec, [update_dep_rpm_spec]) \n \n self.update(update_gppkg_file)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Function for importing custom scons file. Making this a function allows us to export 'env' without namespace pollution in the parent.
|
def load_local(env_script, env):
# pylint: disable=unused-argument
SConscript(env_script, exports=['env'])
|
[
"def get_env():\n env.output_prefix = False\n run('export | sed -e \"s/declare -x/export/g\"')",
"def environInject(shellName):",
"def extend_env(extra_env):\n env = os.environ.copy()\n env.update(extra_env)\n return env",
"def get_environment(py_src):\n env_src = \".\".join((os.path.splitext(py_src)[0], 'env', 'yml'))\n return read_configuration_file(env_src)",
"def expanded_env_dict():\n return generate_expanded_env_dict()",
"def shell_source(script):\n pipe = subprocess.Popen(\". %s; env\" % script, stdout=subprocess.PIPE, shell=True)\n output = pipe.communicate()[0]\n env = dict((line.split(\"=\", 1) for line in output.splitlines()))\n os.environ.update(env)",
"def get_env_under(env):\n fname = env.config['tripleo']['undercloud_env']\n return util.parse_env_file(fname, '^OS_|_VERSION=')",
"def extension_environ(env_config_path, monkeypatch):\n monkeypatch.setattr(serverextension, \"ENV_CONFIG_PATH\", [str(env_config_path)])",
"def load_env() -> None:\n for file in find_env():\n with file.open(\"r\") as f:\n for line in f.readlines():\n key, value = line.strip().rstrip().split(\"=\")\n key = re.sub(r\"[^A-Za-z0-9_]\", \"_\", key).upper()\n os.environ[key] = value",
"def setup_environment():\n global GIVEN_ENV\n GIVEN_ENV['env'] = env.copy()",
"def get_env_over(env):\n fname = env.config['tripleo']['overcloud_env']\n return util.parse_env_file(fname, '^OS_|_VERSION=')",
"def setup_environment():",
"def shell_source(script):\n \n pipe = subprocess.Popen(\". %s; env\" % script, stdout=subprocess.PIPE, shell=True)\n output = pipe.communicate()[0]\n env = dict((line.split(\"=\", 1) for line in output.splitlines()))\n return env",
"def load_env():\n project_dir = dirname(dirname(__file__))\n dotenv.read_dotenv(join(project_dir, '.env'))",
"def get_config_file(env):\n return f'config/config_{env.lower()}.py'",
"def BuildEnv(env=None, inherit_env=[]):\n if env == None:\n env = {}\n fixed_env = env.copy()\n for varname in inherit_env:\n fixed_env[varname] = os.environ[varname]\n if sys.platform == \"win32\":\n # Win32 requires certain environment variables be present\n for k in (\"COMSPEC\", \"SystemRoot\"):\n if k in os.environ and k not in fixed_env:\n fixed_env[k] = os.environ[k]\n return fixed_env",
"def set_env():\n from kolibri import dist as kolibri_dist # noqa\n\n monkey_patch_collections()\n\n monkey_patch_translation()\n\n sys.path = [os.path.realpath(os.path.dirname(kolibri_dist.__file__))] + sys.path\n\n # Add path for c extensions to sys.path\n prepend_cext_path(os.path.realpath(os.path.dirname(kolibri_dist.__file__)))\n\n # This was added in\n # https://github.com/learningequality/kolibri/pull/580\n # ...we need to (re)move it /benjaoming\n # Force python2 to interpret every string as unicode.\n if sys.version[0] == \"2\":\n reload(sys) # noqa\n sys.setdefaultencoding(\"utf8\")\n\n # Dynamically add the path of `py2only` to PYTHONPATH in Python 2 so that\n # we only import the `future` and `futures` packages from system packages when\n # running with Python 3. Please see `build_tools/py2only.py` for details.\n sys.path = sys.path + [\n os.path.join(\n os.path.realpath(os.path.dirname(kolibri_dist.__file__)), \"py2only\"\n )\n ]\n\n # Set default env\n for key, value in ENVIRONMENT_VARIABLES.items():\n if \"default\" in value:\n os.environ.setdefault(key, value[\"default\"]())",
"def __init__(self, case_root=None, infile=\"env_archive.xml\", read_only=False):\n schema = os.path.join(utils.get_schema_path(), \"env_archive.xsd\")\n EnvBase.__init__(self, case_root, infile, schema=schema, read_only=read_only)",
"def load_config_from_env(self):\n app_envs = filter(\n lambda s: s.startswith(\n '{}_'.format(self.name.upper())), os.environ.keys())\n for env_key in app_envs:\n if os.environ[env_key]:\n self.config[env_key] = os.environ[env_key]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the user's timeline with the list of tweets in the following format and aggregate into one document.
|
def aggregate_tweets(self, timeline, lang=None):
if lang is None:
twt_doc = ' '.join([t['text'] for t in timeline['tweets']])
else:
twt_doc = ' '.join([t['text'] for t in timeline['tweets'] if t['lang'] == lang])
return {'user_id': timeline['user_id'], 'all_tweets': twt_doc}
|
[
"def get_user_tweets(self):\n tweets = []\n for status in tweepy.Cursor(self.api.user_timeline).items():\n tweets.append(status)\n return tweets",
"def tweeterise_timeline(self, request, blasts):\n tweets = []\n for blast in blasts:\n tweets.append(self.tweeterise_blast(request, blast))\n return tweets",
"def get_tweets_from_timeline(self):\n tweets = []\n for status in tweepy.Cursor(self.api.home_timeline).items(200):\n tweets.append(status)\n return tweets",
"def preprocess_raw_tweets(self, raw_tweets):\n\n def wait_for_awhile():\n wait = 10\n time.sleep(wait)\n\n twts = list()\n for user_data in raw_tweets:\n try:\n recent_tweets = [twt for twt in user_data['tweets']]\n\n # Aggregate the tweets to create the document\n text = ' '.join([tw['text'] for tw in recent_tweets])\n\n item = {\n 'raw_text': text,\n 'user_id': user_data['id'],\n 'len_text': len(text),\n 'n_tweets': len(recent_tweets),\n 'screen_name': user_data['screen_name'],\n 'lang': user_data['lang'],\n 'parent': self.account_name,\n }\n\n # do we already have this account in the db?\n # twt = db.tweets.find({'user_id': id, 'parent': screen_name})\n\n # if we do, update the data else create a new entry\n # if twt.count() == 0:\n # store document\n print(\"New account:\", user_data['screen_name'],\n user_data['id'], len(recent_tweets), user_data['lang'])\n twts.append(item)\n # else:\n # # update the existing account record\n # res = db.tweets.replace_one(\n # {'user_id': id, 'parent': screen_name}, item\n # )\n # # result of the update\n # if res.matched_count == 0:\n # print(\"no match for id: \", id)\n # elif res.modified_count == 0:\n # print(\"no modification for id: \", id)\n # else:\n # print(\"replaced \", timeline[0]['user']['screen_name'],\n # id, len(recent_tweets), timeline[0]['lang'])\n except TwythonRateLimitError as e:\n wait_for_awhile()\n except TwythonAuthError as e:\n print(e)\n except:\n # Keep track of the ID that errored out\n print(\" FAILED:\", id)\n print(\"Unexpected error:\", sys.exc_info()[0])\n pass\n return twts",
"def list_timeline(user, list_id, owner_id, since_id, count):\n try:\n t = TwitterUser(user.access_token, user.access_token_secret)\n return t.get_list_timeline(list_id, owner_id, since_id, count)\n except twitter.TwitterError as e:\n from random import choice, randint\n if e.message[0]['code'] == 88:\n user = list(User.objects(access_token_active = True).skip(randint(0,1)).limit(10))\n user = choice(user)\n return list_timeline(user, list_id, owner_id, since_id, count)\n else:\n raise e",
"def userTweets(username):\n api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)\n user_tweet = api.GetUserTimeline(screen_name=username)\n for tweet in user_tweet:\n util.safe_print(tweet.GetText())",
"def tweets(self):\n tweet=[] # creating a list to add all of the tweets text to\n for json_file in self.data:\n tweet.append(json_file[\"text\"])# adding the text of the tweets to the list\n return tweet # returning the list of tweets so that I can use this function tweets and apply it",
"def analyze_timeline(self, timeline):\n\t\t# TODO should be no different if timeline or just an array of tweets\n\t\t# TODO should the crawler do the looping before so that analyze only gets tweets?\n\n\t\tnum_retweets = 0\n\t\tretweets_age = []\n\t\tnum_replys = 0\n\t\treplys_age = []\n\t\tnum_tweets = 0\n\t\ttweets_age = []\n\t\thash_dict = {}\n\n\t\tfor tweet in timeline:\n\t\t\tif helper.is_retweet(tweet):\n\t\t\t\tretweets_age.append(helper.days_until(tweet.created_at))\n\t\t\t\tnum_retweets += 1\n\t\t\telif helper.is_reply(tweet):\n\t\t\t\treplys_age.append(helper.days_until(tweet.created_at))\n\t\t\t\tnum_replys += 1\n\t\t\telse:\n\t\t\t\tnum_tweets += 1\n\t\t\t\ttweets_age.append(helper.days_until(tweet.created_at))\n\t\t\t\thash_dict = update_hash_dict(hash_dict, tweet.entities['hashtags'])\n\n\t\tout = {\n\t\t\t\"counts\": {\n\t\t\t\t\"retweets\": num_retweets,\n\t\t\t\t\"replys\": num_replys,\n\t\t\t\t\"tweets\": num_tweets\n\t\t\t},\n\t\t\t\"ages\": {\n\t\t\t\t\"retweets\": retweets_age,\n\t\t\t\t\"replys_age\": replys_age,\n\t\t\t\t\"tweets_age\": tweets_age\n\t\t\t},\n\t\t\t\"hash_dict\": hash_dict\n\t\t}\n\n\t\treturn out",
"def get_tweets(self, account, number=MAX_TWEETS, since_id=None, max_id=None):\n import twitter\n\n all_tweets = []\n while number > 0:\n try:\n tweets = self.api.GetUserTimeline(\n screen_name=account,\n include_rts=False,\n exclude_replies=True,\n count=min(number, CHUNK_SIZE),\n since_id=since_id,\n max_id=max_id,\n )\n except twitter.TwitterError as e:\n raise plugin.PluginError(f'Unable to fetch timeline {account} for {e}')\n\n if not tweets:\n break\n\n all_tweets += tweets\n number -= len(tweets)\n max_id = tweets[-1].id - 1\n\n return all_tweets",
"def get_tweets_for(user, ntweets=200, max_id=None, since_id=None):\r\n params = {}\r\n if max_id:\r\n params['max_id'] = max_id\r\n if since_id:\r\n params['since_id'] = since_id\r\n user_tweets, iters = [], 0\r\n while len(user_tweets) < ntweets and iters < MAX_REQUESTS_PER_15_MIN:\r\n nrequested = min(200, ntweets - len(user_tweets))\r\n tweets = twitter_api.statuses.user_timeline(screen_name=user,\r\n count=nrequested, include_rts=0, **params)\r\n user_tweets.extend(tweets)\r\n iters += 1\r\n if len(tweets) == 0:\r\n ## got no results: maybe hit limit, or ran out of tweets, or error\r\n break\r\n params['max_id'] = tweets[-1]['id']\r\n return user_tweets",
"def _get_all_timeline(self, screen_name, since_id=None):\n if since_id is not None:\n data = self._twitter_instance.statuses.user_timeline(\n screen_name=screen_name, count=200, trim_user=True, since_id=since_id)\n else:\n data = self._twitter_instance.statuses.user_timeline(\n screen_name=screen_name, count=200, trim_user=True)\n while len(data) >= 200:\n print(\"For user {0} we are at {1} tweets\".format(screen_name, str(len(data))))\n last_id = data[-1][\"id\"]\n if since_id is not None:\n _ = self._twitter_instance.statuses.user_timeline(\n screen_name=screen_name, count=200, trim_user=True,\n max_id=last_id, since_id=since_id)\n else:\n _ = self._twitter_instance.statuses.user_timeline(\n screen_name=screen_name, count=200, trim_user=True,\n max_id=last_id)\n if len(_) == 1:\n break\n data += _\n return data",
"def analyze_tweet(tweet, results):\n\n ######################################\n # fields that are relevant for user-level and tweet-level analysis\n # count the number of valid Tweets here\n # if it doesn't have at least a body and an actor, it's not a tweet\n try: \n body = tweet[\"body\"]\n userid = tweet[\"actor\"][\"id\"].split(\":\")[-1]\n results[\"tweet_count\"] += 1\n except (ValueError, KeyError):\n if \"non-tweet_lines\" in results:\n results[\"non-tweet_lines\"] += 1\n return\n\n # count the number of tweets from each user\n if \"tweets_per_user\" in results:\n results[\"tweets_per_user\"][tweet[\"actor\"][\"id\"][15:]] += 1\n \n #######################################\n # fields that are relevant for the tweet-level analysis\n # ------------------> term counts\n # Tweet body term count\n if \"body_term_count\" in results:\n results[\"body_term_count\"].add(tweet[\"body\"])\n\n # count the occurences of different hashtags\n if \"hashtags\" in results:\n if \"hashtags\" in tweet[\"twitter_entities\"]:\n for h in tweet[\"twitter_entities\"][\"hashtags\"]:\n results[\"hashtags\"][h[\"text\"].lower()] += 1\n \n try:\n # count the occurences of different top-level domains\n if (\"urls\" in results) and (\"urls\" in tweet[\"gnip\"]):\n for url in tweet[\"gnip\"][\"urls\"]:\n try:\n results[\"urls\"][url[\"expanded_url\"].split(\"/\")[2]] += 1\n except (KeyError,IndexError,AttributeError):\n pass\n # and the number of links total\n if (\"number_of_links\" in results) and (\"urls\" in tweet[\"gnip\"]):\n results[\"number_of_links\"] += len(tweet[\"gnip\"][\"urls\"])\n except KeyError:\n pass\n \n # -----------> timelines\n # make a timeline of UTC day of Tweets posted\n if \"utc_timeline\" in results:\n date = tweet[\"postedTime\"][0:10]\n results[\"utc_timeline\"][date] += 1\n\n # make a timeline in normalized local time (poster's time) of all of the Tweets\n if \"local_timeline\" in results:\n utcOffset = tweet[\"actor\"][\"utcOffset\"]\n if utcOffset is not None:\n posted = tweet[\"postedTime\"]\n hour_and_minute = (datetime.datetime.strptime(posted[0:16], \"%Y-%m-%dT%H:%M\") + \n datetime.timedelta(seconds = int(utcOffset))).time().strftime(\"%H:%M\")\n results[\"local_timeline\"][hour_and_minute] += 1\n \n # ------------> mention results\n # which users are @mentioned in the Tweet\n if \"at_mentions\" in results:\n for u in tweet[\"twitter_entities\"][\"user_mentions\"]:\n # update the mentions with weight + 1 and \n # list all of the screennames (in case a name changes)\n if u[\"id_str\"] is not None:\n results[\"at_mentions\"][u[\"id_str\"]][\"weight\"] += 1 \n results[\"at_mentions\"][u[\"id_str\"]][\"screennames\"].update([u[\"screen_name\"].lower()])\n \n # count the number of times each user gets replies\n if (\"in_reply_to\" in results) and (\"inReplyTo\" in tweet):\n results[\"in_reply_to\"][tweet[\"inReplyTo\"][\"link\"].split(\"/\")[3].lower()] += 1\n\n # --------------> RTs and quote Tweet\n # count share actions (RTs and quote-Tweets)\n # don't count self-quotes or self-RTs, because that's allowed now\n if ((\"quote_of_user\" in results) or (\"RT_of_user\" in results)) and (tweet[\"verb\"] == \"share\"):\n # if it's a quote tweet\n if (\"quote_of_user\" in results) and (\"twitter_quoted_status\" in tweet[\"object\"]):\n quoted_id = tweet[\"object\"][\"twitter_quoted_status\"][\"actor\"][\"id\"][15:]\n quoted_name = tweet[\"object\"][\"twitter_quoted_status\"][\"actor\"][\"preferredUsername\"]\n if quoted_id != tweet[\"actor\"][\"id\"]:\n results[\"quote_of_user\"][quoted_id][\"weight\"] += 1 \n results[\"quote_of_user\"][quoted_id][\"screennames\"].update([quoted_name])\n # if it's a RT\n elif (\"RT_of_user\" in results):\n rt_of_name = tweet[\"object\"][\"actor\"][\"preferredUsername\"].lower()\n rt_of_id = tweet[\"object\"][\"actor\"][\"id\"][15:]\n if rt_of_id != tweet[\"actor\"][\"id\"]:\n results[\"RT_of_user\"][rt_of_id][\"weight\"] += 1 \n results[\"RT_of_user\"][rt_of_id][\"screennames\"].update([rt_of_name])\n\n # Tweet expended url content term count\n if \"url_content\" in results:\n try:\n urls = tweet[\"gnip\"][\"urls\"]\n except KeyError:\n urls = []\n url_content = \"\"\n for url in urls:\n try:\n expanded_url_title = url[\"expanded_url_title\"]\n if expanded_url_title is None:\n expanded_url_title = \"\"\n except KeyError:\n expanded_url_title = \"\"\n try:\n expanded_url_description = url[\"expanded_url_description\"]\n if expanded_url_description is None:\n expanded_url_description = \"\"\n except KeyError:\n expanded_url_description = \"\"\n url_content = url_content + \" \" + expanded_url_title + \" \" + expanded_url_description\n results[\"url_content\"].add(url_content)\n \n ############################################\n # actor-property qualities\n # ------------> bio terms\n if \"bio_term_count\" in results:\n if tweet[\"actor\"][\"id\"][:15] not in results[\"tweets_per_user\"]:\n try:\n if tweet[\"actor\"][\"summary\"] is not None:\n results[\"bio_term_count\"].add(tweet[\"actor\"][\"summary\"])\n except KeyError:\n pass\n \n # ---------> profile locations\n if \"profile_locations_regions\" in results:\n # if possible, get the user's address\n try:\n address = tweet[\"gnip\"][\"profileLocations\"][0][\"address\"]\n country_key = address.get(\"country\", \"no country available\")\n region_key = address.get(\"region\", \"no region available\")\n except KeyError:\n country_key = \"no country available\"\n region_key = \"no region available\"\n results[\"profile_locations_regions\"][country_key + \" , \" + region_key] += 1",
"def get_all_tweets(screen_name):\n #Twitter only allows access to a users most recent 3240 tweets with this method\n \n #authorize twitter, initialize tweepy\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n api = tweepy.API(auth)\n \n #initialize a list to hold all the tweepy Tweets\n alltweets = [] \n \n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name = screen_name, count=200, include_rts = True)\n \n #only do this for users that have actually tweeted\n if len(new_tweets) > 0:\n #save most recent tweets\n alltweets.extend(new_tweets)\n \n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n \n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n \n #all subsiquent requests use the max_id param to prevent duplicates\n new_tweets = api.user_timeline(screen_name = screen_name,count=200, max_id=oldest, include_rts = True)\n \n #save most recent tweets\n alltweets.extend(new_tweets)\n \n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n \n print \"...%s tweets downloaded so far\" % (len(alltweets))\n \n # Save tweets for user in a json file\n fname = \"tweets/\"+str(screen_name)+\".jsonl\"\n with open(fname, 'w') as f:\n for status in alltweets:\n f.write(json.dumps(status._json)+\"\\n\")\n \n #close the file\n print \"Done with \" + str(screen_name)\n time.sleep(60)\n print \"Sleeping for one minute\"",
"def get_n_tweets(self, username, last_n_tweets=1):\n req = requests.get(url=\"https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=%s&count=%d\" % (username, last_n_tweets), auth=self.twitter_oauth)\n return [tweet['text'] for tweet in req.json()]",
"def scrape_tweet(tweet):\n\n\n dateUntil = tweet.created_at + timedelta(1)\n tweetCriteria = got.manager.TweetCriteria().setUsername(tweet.author.screen_name).setSince(\n tweet.created_at.strftime(\"%Y-%m-%d\")).setUntil(dateUntil.strftime(\"%Y-%m-%d\")).setMaxTweets(-1)\n found = False\n tweets = got.manager.TweetManager.getTweets(tweetCriteria)\n for tw in tweets:\n if tw.id == tweet.id_str:\n tweet.reply_count = tw.replies\n break;\n return tweet",
"def get_tweets(self):\n\t\treturn self.tweets",
"def timeline():\n from app.api.data.friend import get_friends\n\n username = get_jwt_identity()\n # Check if user exists.\n if not users.exists(username=username):\n return bad_json_response('user not found')\n\n # Get the user's own posts.\n posts_array = get_posts(username)\n\n # Get the user's friends.\n friends = get_friends(username)\n\n for i in range(len(friends)):\n try:\n friend = friends[i]['username']\n friend_address = get_user_ip(friend)\n # Get the posts of the friend.\n response = requests.get(\n friend_address + '/api/user/posts',\n params={\n 'username': friend\n },\n headers=request.headers\n ).json()\n if response['success']:\n posts = response['data']['posts']\n posts_array = posts_array + posts\n except BaseException:\n continue\n\n posts_array = sorted(\n posts_array,\n key=lambda k: datetime.datetime.strptime(k['creation_date'],\n '%Y-%m-%d %H:%M:%S'),\n reverse=True\n )\n\n return good_json_response({\n 'posts': posts_array\n })",
"def get_timeline(username, since_id=None, count=0):\n twitter = OAuth1Session(client_key=settings.CLIENT_KEY, client_secret=settings.CLIENT_SECRET,\n resource_owner_key=settings.ACCESS_TOKEN_KEY,\n resource_owner_secret=settings.ACCESS_TOKEN_SECRET)\n url = 'https://api.twitter.com/1.1/statuses/user_timeline.json'\n params = {\n 'screen_name': username,\n }\n if since_id:\n params.update(since_id=since_id)\n if count:\n params.update(count=count)\n r = twitter.get(url, params=params)\n return r.json()",
"def timeline():\n if not g.user:\n return redirect(url_for('public_timeline'))\n users = [u.username for u in g.user.followers] or []\n users.append(g.user.username)\n messages = Message.objects.filter(author__in=users)\\\n .order_by('-pub_date').limit(PER_PAGE)\n return render_template('timeline.html', messages=messages)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get timelines for all friends (following) for this twitter account and return tweets aggregated for each user.
|
def get_timelines_for_parent(self, parent_name):
db = self.db
cursor = db.tweets.find({'parent_account': parent_name})
friends_tweets = []
for tl in range(cursor.count()):
friends_tweets.append(self.aggregate_tweets(cursor.next()))
return friends_tweets
|
[
"def get_friends_tweets(self):\n tweets = []\n for friend in self.friends:\n for tweet in tweepy.Cursor(self.API.user_timeline).items():\n tweets.append(tweet._json)\n print(tweets,\"\\n\")\n \n return tweets",
"def get_user_tweets(self):\n tweets = []\n for status in tweepy.Cursor(self.api.user_timeline).items():\n tweets.append(status)\n return tweets",
"def get_tweets(self, account, number=MAX_TWEETS, since_id=None, max_id=None):\n import twitter\n\n all_tweets = []\n while number > 0:\n try:\n tweets = self.api.GetUserTimeline(\n screen_name=account,\n include_rts=False,\n exclude_replies=True,\n count=min(number, CHUNK_SIZE),\n since_id=since_id,\n max_id=max_id,\n )\n except twitter.TwitterError as e:\n raise plugin.PluginError(f'Unable to fetch timeline {account} for {e}')\n\n if not tweets:\n break\n\n all_tweets += tweets\n number -= len(tweets)\n max_id = tweets[-1].id - 1\n\n return all_tweets",
"def _get_all_timeline(self, screen_name, since_id=None):\n if since_id is not None:\n data = self._twitter_instance.statuses.user_timeline(\n screen_name=screen_name, count=200, trim_user=True, since_id=since_id)\n else:\n data = self._twitter_instance.statuses.user_timeline(\n screen_name=screen_name, count=200, trim_user=True)\n while len(data) >= 200:\n print(\"For user {0} we are at {1} tweets\".format(screen_name, str(len(data))))\n last_id = data[-1][\"id\"]\n if since_id is not None:\n _ = self._twitter_instance.statuses.user_timeline(\n screen_name=screen_name, count=200, trim_user=True,\n max_id=last_id, since_id=since_id)\n else:\n _ = self._twitter_instance.statuses.user_timeline(\n screen_name=screen_name, count=200, trim_user=True,\n max_id=last_id)\n if len(_) == 1:\n break\n data += _\n return data",
"def get_tweets_for(user, ntweets=200, max_id=None, since_id=None):\r\n params = {}\r\n if max_id:\r\n params['max_id'] = max_id\r\n if since_id:\r\n params['since_id'] = since_id\r\n user_tweets, iters = [], 0\r\n while len(user_tweets) < ntweets and iters < MAX_REQUESTS_PER_15_MIN:\r\n nrequested = min(200, ntweets - len(user_tweets))\r\n tweets = twitter_api.statuses.user_timeline(screen_name=user,\r\n count=nrequested, include_rts=0, **params)\r\n user_tweets.extend(tweets)\r\n iters += 1\r\n if len(tweets) == 0:\r\n ## got no results: maybe hit limit, or ran out of tweets, or error\r\n break\r\n params['max_id'] = tweets[-1]['id']\r\n return user_tweets",
"def list_timeline(user, list_id, owner_id, since_id, count):\n try:\n t = TwitterUser(user.access_token, user.access_token_secret)\n return t.get_list_timeline(list_id, owner_id, since_id, count)\n except twitter.TwitterError as e:\n from random import choice, randint\n if e.message[0]['code'] == 88:\n user = list(User.objects(access_token_active = True).skip(randint(0,1)).limit(10))\n user = choice(user)\n return list_timeline(user, list_id, owner_id, since_id, count)\n else:\n raise e",
"def timeline():\n from app.api.data.friend import get_friends\n\n username = get_jwt_identity()\n # Check if user exists.\n if not users.exists(username=username):\n return bad_json_response('user not found')\n\n # Get the user's own posts.\n posts_array = get_posts(username)\n\n # Get the user's friends.\n friends = get_friends(username)\n\n for i in range(len(friends)):\n try:\n friend = friends[i]['username']\n friend_address = get_user_ip(friend)\n # Get the posts of the friend.\n response = requests.get(\n friend_address + '/api/user/posts',\n params={\n 'username': friend\n },\n headers=request.headers\n ).json()\n if response['success']:\n posts = response['data']['posts']\n posts_array = posts_array + posts\n except BaseException:\n continue\n\n posts_array = sorted(\n posts_array,\n key=lambda k: datetime.datetime.strptime(k['creation_date'],\n '%Y-%m-%d %H:%M:%S'),\n reverse=True\n )\n\n return good_json_response({\n 'posts': posts_array\n })",
"def last_tweets(self):\n last_tweets = []\n\n for user in self.Users:\n last = self.get_last(user)\n last_tweets.append(last)\n\n return last_tweets",
"def get_all_tweets(usernames):\n length = len(usernames)\n # For each username, get the tweets\n for i in range(length):\n # Creating dataframe if first user\n if i == 0:\n tweets = get_tweets(usernames[i])\n else:\n new_tweets = get_tweets(usernames[i])\n # Appending the tweets to current dataframe\n tweets = pd.concat([tweets, new_tweets])\n return tweets",
"def get_all_tweets(screen_name,keys=keys,filter=True):\n\t\n\tconsumer_key,consumer_secret,access_key,access_secret = keys\n\n\t#re\n\trt = r'^RT'\n\tlink = r'https?:\\/\\/([\\w\\.-]+)\\/([\\w\\.-]+)'\n\tmention = r'^\\@'\n\n\t#authorize twitter, initialize tweepy\n\tauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n\tauth.set_access_token(access_key, access_secret)\n\tapi = tweepy.API(auth)\n\t\n\t#initialize a list to hold all the tweepy Tweets\n\talltweets = []\t\n\t\n\t#make initial request for most recent tweets (200 is the maximum allowed count)\n\tnew_tweets = api.user_timeline(screen_name = screen_name,count=200,tweet_mode='extended')\n\t\n\t#save most recent tweets\n\talltweets.extend(new_tweets)\n\t\n\t#save the id of the oldest tweet less one\n\toldest = alltweets[-1].id - 1\n\t\n\t#keep grabbing tweets until there are no tweets left to grab\n\twhile len(new_tweets) > 0:\n\t\tprint(\"getting tweets before {}\".format(oldest))\n\t\t\n\t\t#all subsiquent requests use the max_id param to prevent duplicates\n\t\tnew_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest,tweet_mode='extended')\n\t\t\n\t\t#save most recent tweets\n\t\talltweets.extend(new_tweets)\n\t\t\n\t\t#update the id of the oldest tweet less one\n\t\toldest = alltweets[-1].id - 1\n\t\t\n\t\tprint(\"...{} tweets downloaded so far\".format(len(alltweets)))\n\t\n\t#transform the tweepy tweets into a 2D array that will populate the csv\t\n\tif filter: \n\t\touttweets = [tweet.full_text for tweet in alltweets if not re.match(rt, tweet.full_text) and not re.match(mention, tweet.full_text)]\n\t\tpreproc = [re.sub(link, \"\", tweet)+\"\\n\" for tweet in outtweets][::-1]\n\telse: \n\t\touttweets = [tweet.full_text for tweet in alltweets]\n\t\n\t#write the csv\t\n\twith open('tweets/{}_tweets.txt'.format(screen_name), 'w', encoding='utf-8') as f:\n\t\tf.writelines(preproc)\n\t\tprint('tweets/{}_tweets.txt was successfully created.'.format(screen_name))\n\tpass",
"def get_followers(auth):\r\n\r\n # Authorize API\r\n api = tweepy.API(auth, wait_on_rate_limit = True, wait_on_rate_limit_notify = True)\r\n\r\n congress = tweepy.Cursor(api.list_members, \"cspan\", \"members-of-congress\").items()\r\n governors = tweepy.Cursor(api.list_members, \"cspan\", \"governors\").items()\r\n\r\n all_accounts = itertools.chain(congress, governors)\r\n\r\n # Get followers for each account; write to CSV\r\n for i, acct in enumerate(all_accounts):\r\n user_id = acct.id_str\r\n username = acct.name\r\n\r\n try:\r\n # Keep those with at least 5000 followers\r\n if acct.followers_count < 5000:\r\n print(\"{0}: Skipped {1} / {2} (too few)\".format(i, user_id, username))\r\n continue\r\n\r\n # For time purposes, ignore those with over 100k followers\r\n # This cuts the runtime by 75% (due to rate limiting)\r\n if acct.followers_count > 100000:\r\n print(\"{0}: Skipped {1} / {2} (too many)\".format(i, user_id, username))\r\n continue\r\n\r\n # If we already made the followers list, skip it (because script may\r\n # be restarted occasionally)\r\n fname = \"followers_lists/{}.csv\".format(user_id)\r\n possible_file = pathlib.Path(fname)\r\n if possible_file.is_file():\r\n print(\"{0}: Skipped {1} / {2} (already have)\".format(i, user_id, username))\r\n continue\r\n\r\n print(\"{0}: Processing {1} / {2}\".format(i, user_id, username))\r\n\r\n # Collect list of followers\r\n followers = []\r\n for page in tweepy.Cursor(api.followers_ids, id = user_id).pages():\r\n followers.extend(page)\r\n time.sleep(60)\r\n\r\n # Write list of followers to CSV\r\n fname = \"followers_lists/{}.csv\".format(user_id)\r\n with open(fname, \"w\") as outfile:\r\n writer = csv.writer(outfile)\r\n for follower in followers:\r\n writer.writerow([follower])\r\n\r\n print(\"{0}: Got followers for {1} / {2}\".format(i, user_id, username))\r\n except UnicodeEncodeError:\r\n print(\"{0}: Skipped {1} (unicode)\".format(i, user_id))\r\n continue",
"def userTweets(username):\n api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)\n user_tweet = api.GetUserTimeline(screen_name=username)\n for tweet in user_tweet:\n util.safe_print(tweet.GetText())",
"def get_tweets_from_timeline(self):\n tweets = []\n for status in tweepy.Cursor(self.api.home_timeline).items(200):\n tweets.append(status)\n return tweets",
"def get_all_tweets(screen_name):\n #Twitter only allows access to a users most recent 3240 tweets with this method\n \n #authorize twitter, initialize tweepy\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n api = tweepy.API(auth)\n \n #initialize a list to hold all the tweepy Tweets\n alltweets = [] \n \n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name = screen_name, count=200, include_rts = True)\n \n #only do this for users that have actually tweeted\n if len(new_tweets) > 0:\n #save most recent tweets\n alltweets.extend(new_tweets)\n \n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n \n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n \n #all subsiquent requests use the max_id param to prevent duplicates\n new_tweets = api.user_timeline(screen_name = screen_name,count=200, max_id=oldest, include_rts = True)\n \n #save most recent tweets\n alltweets.extend(new_tweets)\n \n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n \n print \"...%s tweets downloaded so far\" % (len(alltweets))\n \n # Save tweets for user in a json file\n fname = \"tweets/\"+str(screen_name)+\".jsonl\"\n with open(fname, 'w') as f:\n for status in alltweets:\n f.write(json.dumps(status._json)+\"\\n\")\n \n #close the file\n print \"Done with \" + str(screen_name)\n time.sleep(60)\n print \"Sleeping for one minute\"",
"def tweeterise_timeline(self, request, blasts):\n tweets = []\n for blast in blasts:\n tweets.append(self.tweeterise_blast(request, blast))\n return tweets",
"def get_tweets_for_user(self, user_id):\n tweets = [tweet for tweet in self.tweets if tweet.user.id == user_id]\n # print(tweets)\n return tweets",
"def get_tracked_twitter_tl_users():\n eleanor_logger.debug('Getting listing of tracked twitter users')\n tracked_users = []\n with GetDBSession() as db_session:\n tracked_users_query = db_session.query(\n twitter_models.PolledTimelineUsers\n )\n for user in tracked_users_query:\n tracked_users.append(user.user_name)\n return tracked_users",
"def get_timeline(username, since_id=None, count=0):\n twitter = OAuth1Session(client_key=settings.CLIENT_KEY, client_secret=settings.CLIENT_SECRET,\n resource_owner_key=settings.ACCESS_TOKEN_KEY,\n resource_owner_secret=settings.ACCESS_TOKEN_SECRET)\n url = 'https://api.twitter.com/1.1/statuses/user_timeline.json'\n params = {\n 'screen_name': username,\n }\n if since_id:\n params.update(since_id=since_id)\n if count:\n params.update(count=count)\n r = twitter.get(url, params=params)\n return r.json()",
"def get_n_tweets(self, username, last_n_tweets=1):\n req = requests.get(url=\"https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=%s&count=%d\" % (username, last_n_tweets), auth=self.twitter_oauth)\n return [tweet['text'] for tweet in req.json()]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Base class for an initialiser with a distribution between [low,high]
|
def __init__(self, low=0, high=1):
self.low = low
self.high = high
|
[
"def __init__(self, a, b, the_seed=None):\n super(UniformRNS, self).__init__(the_seed)\n self.upper_bound = a\n self.lower_bound = b\n self.width = self.upper_bound - self.lower_bound",
"def __init__(self) :\n\t\t#later may want to pass in something that effects the probability of certain events\n\t\tself._rand = Random()\n\t\tself._seed = self._rand.seed()\n\t\tself._randNum = self._rand.randint(0,100)\n\t\tself._noEventRange = range(0,50)\n\t\tself._goodEventRange = range(50,75)\n\t\tself._badEventRange = range(75,101)",
"def __init__(self, base=2, *percentages):\n\n self.values = range(base)\n self.percentages = percentages\n self.make_percentages_cumulative(percentages)",
"def __init__(self, p, f, a, b):\n self.f = f\n self.p = p #error allowed.\n self.lim = a, b #lower and upper limits.\n self.bins = 10",
"def uniform(self, lower, upper):\n # adjust the bins\n gsl.histogram_uniform(self.data, lower, upper)\n # and return\n return self",
"def create(initSampleCount=..., initSeedCount=..., pointDistribution=...) -> retval:\n ...",
"def __init__(self) -> None:\n super(NormalRandomizer, self).__init__()\n self._log_probability_sum = 0.0",
"def __init__(self, bound_list=[], oversampling=[]):\r\n self.bound_list = bound_list\r\n self.oversampling = oversampling",
"def __init__(self, minimum_value=None, maximum_value=None):\n if minimum_value is None and maximum_value is None:\n raise ValueError(\n \"Either 'minimum_value' or 'maximum_value' must be specified.\"\n )\n\n self.minimum_value = minimum_value\n self.maximum_value = maximum_value",
"def __init__(self, max_resource: int, min_resource: int):\n self.min_resource = min_resource\n self.max_resource = max_resource",
"def init_uniform(self, ns, lb, ub):\n return np.random.uniform(lb, ub, (ns, ub.shape[0]))",
"def conditional(self, lower, upper):\r\n if lower > self.max_range:\r\n raise Exception(\"Conditioning not allowed, lower bound exceeds distribution range\")\r\n if lower == 0 and upper == np.inf:\r\n self.probabilities = self.counts / self.total\r\n else:\r\n mask = np.zeros(self.max_range + 1)\r\n for i in range(lower, upper + 1):\r\n mask[i] = 1\r\n self.probabilities = self.counts * mask / np.sum(self.counts * mask)",
"def __init__(self, p_min, p_max, domains):\n \n if not check(p_min, p_max, domains):\n raise Exception(\"some constraint is violated!\") \n \n self._p_min = p_min\n self._p_max = p_max\n self._domains = domains",
"def __init__(self, maxNumbers):\n self.ns = range(maxNumbers + 1)\n self.ns[0] = float(\"-inf\")",
"def __init__(self, shard_mapping_id, lower_bound, shard_id):\n super(RangeShardingSpecification, self).__init__()\n self.__shard_mapping_id = shard_mapping_id\n self.__lower_bound = lower_bound\n self.__shard_id = shard_id",
"def test_hist_range():\n vals = np.array([20, 40, 60])\n C = classifier.HistogramClassifier(vals, 2.0, range=(0, 100))\n assert np.all(C.classified == np.array([1, 1, 2]))\n assert C.range == (0, 100)",
"def __init__(self, domain, range, reduce=True):\n\t\t#The boring checks\n\t\tif len(domain) != len(range):\n\t\t\traise TypeError(\"Domain basis has {} elements, but range basis has {} elements.\".format(\n\t\t\t len(domain), len(range)))\n\n\t\tif domain.signature.arity != range.signature.arity:\n\t\t\traise TypeError(\"Domain arity {} does not equal the range arity {}.\".format(\n\t\t\t domain.signature.arity, range.signature.arity))\n\n\t\t#Expand any non-simple words\n\t\tHomomorphism._expand(domain, range)\n\t\tdomain, range = Generators.sort_mapping_pair(domain, range)\n\n\t\t#Remove any redundancy---like reducing tree pairs.\n\t\t#How do you know that you've found the smallest possible nice basis if you haven't kept everything as small as possible throughout?\n\t\tif reduce:\n\t\t\tHomomorphism._reduce(domain, range)\n\n\t\t#Check that domain is a free generating set\n\t\ti, j = domain.test_free()\n\t\tif not(i == j == -1):\n\t\t\traise ValueError(\"Domain is not a free generating set. Check elements at indices {} and {}.\".format(\n\t\t\t i, j))\n\n\t\t#Check to see that the domain generates all of V_{n,r}\n\t\tmissing = domain.test_generates_algebra()\n\t\tif len(missing) > 0:\n\t\t\traise ValueError(\"Domain {} does not generate V_{}. Missing elements are {}.\".format(\n\t\t\t domain, domain.signature, [format(x) for x in missing]))\n\n\t\tself.domain = domain\n\t\tself.range = range\n\t\tself.domain_relabeller = None\n\t\tself.range_relabeller = None\n\n\t\t#Setup the mapping cache\n\t\tself._map = {}\n\t\tfor d, r in zip(self.domain, self.range):\n\t\t\tself._set_image(d, r, self.domain.signature, self.range.signature, self._map)\n\n\t\t#Compute and cache the images of any simple word above self.domain.\n\t\tfor root in Generators.standard_basis(domain.signature):\n\t\t\tself._image_simple_above_domain(root, self.domain.signature, self.range.signature, self._map)",
"def __init__(self, dim, distribution, proposal_distribution): \n self.dim = dim\n self.distribution = distribution\n self.proposal_distribution = proposal_distribution\n self.x = None\n self.init_x()",
"def __init__(self, datasets, bins=10):\n\n super(EqualIntervalClassifier, self).__init__()\n\n # Find the absolute min and max of all datasets. Assume we have\n # VariableVW instances and catch any array-like objects\n\n try:\n abs_min = min(datasets[0].values)\n abs_max = max(datasets[0].values)\n except AttributeError:\n abs_min = min(datasets[0])\n abs_max = max(datasets[0])\n\n for ds in datasets:\n try:\n if min(ds.values) < abs_min:\n abs_min = min(ds.values)\n if max(ds.values) > abs_max:\n abs_max = max(ds.values)\n except AttributeError:\n if min(ds) < abs_min:\n abs_min = min(ds)\n if max(ds) > abs_max:\n abs_max = max(ds)\n\n # Make sure bins can be cast as an integer\n try:\n bins = int(bins)\n except ValueError:\n raise ValueError('Bins must be a number')\n\n # Catch negative bins - otherwise they silently succeed\n if bins <= 0:\n raise ValueError('Number of bins must be a positive integer')\n\n # Create the bins based on these values\n self.edges = np.linspace(abs_min, abs_max, num=bins + 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A common function for ensuring that two observables contain derivatives with respected to the same force field parameters, and refactors these derivatives into more easily manipulable dictionaries.
|
def _compatible_gradients(
self, other: T
) -> Tuple[
Dict[ParameterGradientKey, ParameterGradient],
Dict[ParameterGradientKey, ParameterGradient],
]:
self_gradients = {gradient.key: gradient for gradient in self._gradients}
other_gradients = {gradient.key: gradient for gradient in other._gradients}
if {*self_gradients} != {*other_gradients}:
raise ValueError(
"Two observables can only be operated on if they contain gradients "
"with respect to the same force field parameters."
)
return self_gradients, other_gradients
|
[
"def compute_observations_and_derivatives(self, state1, state2, dt): \n data1 = self.compute_observations(state1)\n data2 = self.compute_observations(state2)\n \n # For each sensor type (e.g., sensor_type = 'rangefinder')\n for sensor_type in self.config.sensor_types.keys():\n datas1 = getattr(data1, sensor_type)\n datas2 = getattr(data2, sensor_type)\n # for each sensor output\n for i, data in enumerate(datas1):\n # for each output of the sensor (e.g., obs = 'luminance') \n for obs in data.__dict__.keys():\n y1 = datas1[i].__dict__[obs]\n y2 = datas2[i].__dict__[obs]\n if not isinstance(y1, ndarray):\n # ignore spurious members in the response\n # TODO: make this check more strict\n continue\n average = (y1 + y2) / 2.0\n derivative = (y1 - y2) / dt\n datas1[i].__dict__[obs] = average\n datas1[i].__dict__[obs + '_dot'] = derivative\n # finally for all the sensels\n average = (data1.sensels + data2.sensels) / 2\n derivative = (data2.sensels - data1.sensels) / dt\n data1.sensels = average\n data1.sensels_dot = derivative \n return data1",
"def fix_dihedrals_by_backbone_mapping(spc_1: ARCSpecies,\n spc_2: ARCSpecies,\n backbone_map: Dict[int, int],\n ) -> Tuple[ARCSpecies, ARCSpecies]:\n if not spc_1.rotors_dict or not spc_2.rotors_dict:\n spc_1.determine_rotors()\n spc_2.determine_rotors()\n spc_1_copy, spc_2_copy = spc_1.copy(), spc_2.copy()\n torsions = get_backbone_dihedral_angles(spc_1, spc_2, backbone_map)\n deviations = [get_backbone_dihedral_deviation_score(spc_1, spc_2, backbone_map, torsions=torsions)]\n # Loop while the deviation improves by more than 1 degree:\n while len(torsions) and (len(deviations) < 2 or deviations[-2] - deviations[-1] > 1):\n for torsion_dict in torsions:\n angle = 0.5 * sum([torsion_dict['angle 1'], torsion_dict['angle 2']])\n spc_1_copy.set_dihedral(scan=convert_list_index_0_to_1(torsion_dict['torsion 1']),\n deg_abs=angle, count=False, chk_rotor_list=False, xyz=spc_1_copy.get_xyz())\n spc_2_copy.set_dihedral(scan=convert_list_index_0_to_1(torsion_dict['torsion 2']),\n deg_abs=angle, count=False, chk_rotor_list=False, xyz=spc_2_copy.get_xyz())\n spc_1_copy.final_xyz, spc_2_copy.final_xyz = spc_1_copy.initial_xyz, spc_2_copy.initial_xyz\n torsions = get_backbone_dihedral_angles(spc_1_copy, spc_2_copy, backbone_map)\n deviations.append(get_backbone_dihedral_deviation_score(spc_1_copy, spc_2_copy, backbone_map, torsions=torsions))\n return spc_1_copy, spc_2_copy",
"def test_disambiguation(cl_and_vals_a, cl_and_vals_b):\n cl_a, vals_a, kwargs_a = cl_and_vals_a\n cl_b, vals_b, kwargs_b = cl_and_vals_b\n\n req_a = {a.name for a in attr.fields(cl_a)}\n req_b = {a.name for a in attr.fields(cl_b)}\n\n assume(len(req_a))\n assume(len(req_b))\n\n assume((req_a - req_b) or (req_b - req_a))\n for attr_name in req_a - req_b:\n assume(getattr(attr.fields(cl_a), attr_name).default is NOTHING)\n for attr_name in req_b - req_a:\n assume(getattr(attr.fields(cl_b), attr_name).default is NOTHING)\n\n fn = create_uniq_field_dis_func(cl_a, cl_b)\n\n assert fn(attr.asdict(cl_a(*vals_a, **kwargs_a))) is cl_a",
"def process_dual_diagrams(self):\n ags_net=self.dic_attr['ags_net']\n form_orig_net=self.dic_attr['form_orig_net']\n force_orig_net=self.dic_attr['force_orig_net']\n map_edg_orig_dic=self.dic_attr['map_edg_orig_dic']\n q_c=self.dic_attr['q_c'] # force_densities, based on dic_attr['edg_dic'] indeces(indeces of original ags_net)\n edg_dic=self.dic_attr['edg_dic'] # the dictionary with original indeces\n\n # map the original edges to their forces\n old_edg_f_dic={} # {old_edg:f}\n for ind, edg in edg_dic.items():\n old_q=round(q_c[ind][0], 1)\n old_len=hf.edge_length(ags_net, edg)\n old_edg_f_dic[edg]=(old_q*old_len).item() # .item() to make it reabale in ironpytho (numpyfloat64>>float)\n \n # update the dual edge mapping (removing repetative vertices of force)\n map_edg_temp_dic=hf.update_dual_mapping_1(force_orig_net, map_edg_orig_dic)\n\n # update the dual edge mapping\n map_edg_dic, new_edg_f_dic=hf.update_dual_mapping_2(form_orig_net, map_edg_temp_dic, old_edg_f_dic)\n\n # make a new form_net (without aligned edges)\n form_net=hf.make_new_network(form_orig_net, list(map_edg_dic.keys()))\n\n # make a new dual (force) network without repetative egdes and vertices\n force_net=hf.make_new_network(force_orig_net, list(map_edg_dic.values()))\n\n # rotate force_net 90 degrees\n ANG=np.pi/2.0\n force_90_net=hf.rotate_dual(force_net , ANG)\n\n # dictionary of dual vertices\n dual_ver_dic={}\n for key in force_net.nodes():\n dual_ver_dic[key]=force_net.node_coordinates(key)\n\n # ### save the data to draw form and force diagrams in Rhino ###\n with open(os.path.join(BASEDIR, 'map_edg_dic.p'), 'wb') as fp:\n pickle.dump(map_edg_dic, fp, protocol=2)\n with open(os.path.join(BASEDIR, 'new_edg_f_dic.p'), 'wb') as fp:\n pickle.dump(new_edg_f_dic, fp, protocol=2)\n with open(os.path.join(BASEDIR, 'dual_ver_dic.p'), 'wb') as fp:\n pickle.dump(dual_ver_dic, fp, protocol=2) \n\n self.dic_attr['map_edg_dic']=map_edg_dic\n self.dic_attr['form_net']=form_net\n self.dic_attr['force_net']=force_net\n self.dic_attr['force_90_net']=force_90_net\n self.dic_attr['new_edg_f_dic']=new_edg_f_dic # {new_edg:f} ",
"def test_dictcopy(self):\n copy = dict(Address._restrictions)\n for key in copy:\n Restriction.legacy(copy[key])",
"def test_invalid_dict_comparison(self):\n d = {1: np.arange(8), 2: np.arange(2), 3: np.arange(3)}\n d2 = {1: np.arange(8), 2: np.arange(2), 3: np.arange(3)}\n\n self.assertEqual(self.assert_callback_count, 0)\n # Set the dictproperty to a dictionary with numpy arrays as values\n self.dispatcher.p1 = d\n # Set an equivalent numpy array and check that it still dispatches\n # The individual numpy array elements equate to a non-scalar boolean. In that case we just assume they are !=\n self.dispatcher.p1 = d2\n self.assertEqual(self.assert_callback_count, 2)\n\n # Check it still works for actually different arrays\n self.dispatcher.p1 = {'one': np.arange(8), 'two': np.arange(20), 'three': np.arange(3)}\n self.assertEqual(self.assert_callback_count, 3)\n\n # Check setting individual elements to equivalent numpy value still dispatches\n self.dispatcher.p1['two'] = self.dispatcher.p1['two'].copy()\n self.assertEqual(self.assert_callback_count, 4)\n\n # Check updating elements to equivalent numpy value still dispatches\n self.dispatcher.p1.update({'one': np.arange(8), 'two': np.arange(2), 'three': np.arange(3)})\n self.assertEqual(self.assert_callback_count, 5)\n # Check updating elements (via keyword) to equivalent numpy value still dispatches\n self.dispatcher.p1.update(one=np.arange(8), two=np.arange(2), three=np.arange(3))\n self.assertEqual(self.assert_callback_count, 6)",
"def merge_dicts(d1: Dict[A, B], d2: Dict[A, B], f: Callable[[B, B], B]) -> Dict[A, B]:\n d1_keys = d1.keys()\n d2_keys = d2.keys()\n shared = d1_keys & d2_keys\n d1_exclusive = d1_keys - d2_keys\n d2_exclusive = d2_keys - d1_keys\n new_dict = {k: f(d1[k], d2[k]) for k in shared}\n new_dict.update({k: d1[k] for k in d1_exclusive})\n new_dict.update({k: d2[k] for k in d2_exclusive})\n return new_dict",
"def _safe_cross_reference_optimization(self) -> None:\n #self._cross_reference_optimization()\n #return\n xref_errors = defaultdict(list)\n for unused_key, deqatn in self.dequations.items():\n deqatn.safe_cross_reference(self)\n\n for unused_key, dresp in self.dresps.items():\n dresp.safe_cross_reference(self, xref_errors)\n\n for unused_key, dconstrs in self.dconstrs.items():\n for dconstr in dconstrs:\n if hasattr(dconstr, 'safe_cross_reference'):\n dconstr.safe_cross_reference(self)\n else: # pragma: no cover\n dconstr.cross_reference(self)\n\n for unused_key, dvcrel in self.dvcrels.items():\n if hasattr(dvcrel, 'safe_cross_reference'):\n dvcrel.safe_cross_reference(self)\n else: # pragma: no cover\n dvcrel.cross_reference(self)\n\n for unused_key, dvmrel in self.dvmrels.items():\n if hasattr(dvmrel, 'safe_cross_reference'):\n dvmrel.safe_cross_reference(self)\n else: # pragma: no cover\n dvmrel.cross_reference(self)\n\n for unused_key, dvprel in self.dvprels.items():\n if hasattr(dvprel, 'safe_cross_reference'):\n dvprel.safe_cross_reference(self)\n else: # pragma: no cover\n dvprel.cross_reference(self)\n\n for unused_key, desvar in self.desvars.items():\n desvar.safe_cross_reference(self)\n\n for unused_key, topvar in self.topvar.items():\n topvar.safe_cross_reference(self)",
"def calc_force(a, b, dt):\n\n r = ((b['x'] - a['x']) ** 2 + (b['y'] - a['y']) ** 2 + (b['z']\n - a['z']) ** 2) ** 0.5\n a['vx'] += G * a['m'] * b['m'] / r ** 2 * ((b['x'] - a['x']) / r) \\\n / a['m'] * dt\n a['vy'] += G * a['m'] * b['m'] / r ** 2 * ((b['y'] - a['y']) / r) \\\n / a['m'] * dt\n a['vz'] += G * a['m'] * b['m'] / r ** 2 * ((b['z'] - a['z']) / r) \\\n / a['m'] * dt",
"def _equivalent_for_plotgroup_update(p1,p2):\n\n attrs_to_check = ['pre_plot_hooks','keyname','sheet','x','y','projection','input_sheet','density','coords']\n\n for a in attrs_to_check:\n if hasattr(p1,a) or hasattr(p2,a):\n if not (hasattr(p1,a) and hasattr(p2,a) and getattr(p1,a)== getattr(p2,a)):\n return False\n\n return True",
"def _fixed_dict(self):\n\n _fixed_dict = {}\n for key, val in self.defdict.items():\n if val[0] is False:\n # treat parameters that are intended to be constants\n # if value is provided as a scalar, insert it in the definition\n if isinstance(val[1], str) and val[1] == 'auxiliary':\n _fixed_dict[key] = 'auxiliary'\n else:\n _fixed_dict[key] = val[1]\n\n return _fixed_dict",
"def differentiate_one_step_(\n self: \"Variables\", independent_var: \"Variables\", required_derivatives: List[str]\n ):\n\n required_derivatives = [d for d in required_derivatives if d not in self]\n required_derivatives_set = set(\n tuple(required_derivative.split(DIFF_SYMBOL))\n for required_derivative in required_derivatives\n )\n dependent_var_set = set(tuple(dv.split(DIFF_SYMBOL)) for dv in self.keys())\n computable_derivative_dict = defaultdict(set)\n for dv, rd in itertools.product(dependent_var_set, required_derivatives_set):\n if (\n len(rd) > len(dv)\n and rd[: len(dv)] == dv\n and rd[: len(dv) + 1] not in dependent_var_set\n ):\n computable_derivative_dict[rd[len(dv)]].add(DIFF_SYMBOL.join(dv))\n derivative_variables = Variables()\n for key, value in computable_derivative_dict.items():\n for v in value:\n f__x = torch.autograd.grad(\n self[v],\n independent_var[key],\n grad_outputs=torch.ones_like(self[v]),\n retain_graph=True,\n create_graph=True,\n allow_unused=True,\n )[0]\n if f__x is not None:\n f__x.requires_grad_()\n else:\n f__x = torch.zeros_like(self[v], requires_grad=True)\n derivative_variables[DIFF_SYMBOL.join([v, key])] = f__x\n self.update(derivative_variables)",
"def delta(dict1,dict2):\n res = {}\n for k,v in dict1.iteritems():\n if k not in dict2:\n res[k] = v\n elif dict2[k] != v:\n if isinstance(dict2[k],dict) and isinstance(v,dict):\n res[k] = delta(v,dict2[k])\n else:\n res[k] = v\n return res",
"def __sub__(self, other):\n if self == other:\n return None\n else:\n # Need to test not only if fields are new but if fields\n # have changed. Does not test removed fields.\n # Find new fields.\n new = set(self.field_dict.keys()) - set(other.field_dict.keys())\n # Find common fields whose values have changed.\n common = set(self.field_dict.keys()) & set(other.field_dict.keys())\n changed = [field for field in common if\n self.field_dict[field] != other.field_dict[field]]\n # Attributes for Fields object containing the changes\n updated_fields = set(changed) | new\n updated_dict = {k: self.field_dict[k] for k in updated_fields}\n # Create Fields object containing the changes.\n dif = Fields()\n dif.field_dict = updated_dict\n return dif",
"def get_blindfactors(refdict,shiftdict):\n ratiodict = {}\n for key in refdict: #all2ptdat:\n if ('_ell' in key) or ('_theta' in key) or ('_l' in key) :\n #'is x data'\n ratiodict[key] = refdict[key]\n else:\n ratiodict[key] = shiftdict[key]/refdict[key]\n return ratiodict",
"def equalObjs(obj1,obj2,allowedDiff,ignore=[], where=None ):\n if type(obj1) in [ float, int ] and type ( obj2) in [ float, int ]:\n obj1,obj2=float(obj1),float(obj2)\n \n if type(obj1) != type(obj2):\n logger.warning(\"Data types differ (%s,%s)\" %(type(obj1),type(obj2)))\n return False\n \n if isinstance(obj1,unum.Unum):\n if obj1 == obj2:\n return True\n diff = 2.*abs(obj1-obj2)/abs(obj1+obj2)\n return diff.asNumber() < allowedDiff\n elif isinstance(obj1,float):\n if obj1 == obj2:\n return True\n diff = 2.*abs(obj1-obj2)/abs(obj1+obj2)\n return diff < allowedDiff\n elif isinstance(obj1,str):\n return obj1 == obj2\n elif isinstance(obj1,dict):\n for key in obj1:\n if key in ignore: continue\n if not key in obj2:\n logger.warning(\"Key %s missing\" %key)\n return False\n if not equalObjs(obj1[key],obj2[key],allowedDiff, ignore=ignore, where=key ):\n logger.warning('Objects differ in %s:\\n %s\\n and\\n %s' %(where, str(obj1[key]),str(obj2[key])))\n #s1,s2 = str(obj1[key]),str(obj2[key]) \n #if False: # len(s1) + len(s2) > 200:\n # logger.warning ( \"The values are too long to print.\" )\n #else:\n # logger.warning( 'The values are: >>%s<< (this run) versus >>%s<< (default)'%\\\n # ( s1[:20],s2[:20] ) )\n return False\n elif isinstance(obj1,list):\n if len(obj1) != len(obj2):\n logger.warning('Lists differ in length:\\n %i (this run)\\n and\\n %i (default)' %\\\n (len(obj1),len(obj2)))\n return False\n for ival,val in enumerate(obj1):\n if not equalObjs(val,obj2[ival],allowedDiff):\n logger.warning('Lists differ:\\n %s (this run)\\n and\\n %s (default)' %\\\n (str(val),str(obj2[ival])))\n return False\n else:\n return obj1 == obj2\n \n return True",
"def update_dynamics(self):\n\n raise NotImplementedError",
"def test_same_coords_diff_data_var(self):\n\n # get the ddis\n orig, cppy = self.helper_get_joinable_ddis(cppy_change_times=False)\n\n # find a nan-index\n cppy.EXPOSURE.load()\n i, j = self.helper_get_nonnan_index(cppy.EXPOSURE)\n\n # change one of the data_vars values in the copy\n cppy.EXPOSURE[i][j] += 1\n self.assertNotEqual(orig.EXPOSURE[i][j], cppy.EXPOSURE[i][j])\n\n # try to merge\n with self.assertRaises(Exception, msg=\"ddi_join should not allow datasets with a different value for their data_var to be merged\"):\n cngi.vis.ddijoin(orig, cppy)",
"def _validate_plot_observables(mass_solution, observable, **kwargs):\n time_vector = _validate_time_vector(kwargs.get(\"time_vector\"), mass_solution.time)\n # Return all items in the solution if no observables are provided.\n if observable is None:\n observable = list(iterkeys(mass_solution))\n else:\n # Otherwise ensure observable is iterable\n observable = ensure_iterable(observable)\n\n # Replace mass objects with their identifiers.\n observable = [getattr(x, \"id\", x) for x in observable]\n\n # Check to ensure specified observables are in the MassSolution\n if not set(observable).issubset(set(iterkeys(mass_solution))):\n raise ValueError(\"`observable` must keys from the mass_solution.\")\n\n # Turn solutions into interpolating functions if the timepoints provided\n # are not identical to those used in the simulation.\n if not isinstance(time_vector, np.ndarray):\n time_vector = np.array(time_vector)\n\n # Turn observable into a copy of the MassSolution containing only\n # the observable values.\n observable = mass_solution.__class__(\n id_or_model=mass_solution.id,\n solution_type=mass_solution.solution_type,\n data_dict={x: mass_solution[x] for x in observable},\n time=mass_solution.time,\n interpolate=mass_solution.interpolate,\n initial_values={\n x: mass_solution.initial_values.get(x)\n for x in observable\n if mass_solution.initial_values.get(x, None) is not None\n },\n )\n\n # Change the time points and solutions if the time_vector has changed\n if not np.array_equal(observable.time, time_vector):\n observable.interpolate = True\n observable.time = time_vector\n\n # Turn interpolating functions into solutions.\n observable.interpolate = False\n\n if kwargs.get(\"deviation\"):\n observable = _calculate_deviation_solutions(observable, **kwargs)\n\n return observable"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Clears all gradient information.
|
def clear_gradients(self):
self._gradients = []
|
[
"def clear_gradients(self):\n for observable in self._observables.values():\n observable.clear_gradients()",
"def clear(self):\n # Clear all terms!\n self.set_point = 0.0\n self.Pterm = 0.0\n self.Iterm = 0.0\n self.Dterm = 0.0\n self.last_error = 0.0\n self.control_variable = 0.0",
"def clear_data(self):\n self.__vertices = None\n self.__dofs = None \n self.__jacobi_dets = None \n self.__ijacobis = None\n self.__initialized = False",
"def clear_degradations(self):\n self.degradations = []",
"def zero_grad(self):\n\t\tfor i in range(len(self.Layers)):\n\t\t\tself.Layers[i].zero_grad()",
"def clear_gradient_clipping(self):\n self.nn_estimator.clearGradientClipping()\n self.estimator.clear_gradient_clipping()",
"def clear_all_traces(self):\n self.write(\"CALC:MEAS:DEL:ALL\")",
"def clear(self):\n\n for node in self._nodes:\n node.clear()",
"def clear_pixel_data(self):\n for field in PIXEL_FIELDS:\n self.delete_field(field)",
"def clearAll(self):\n\t\tself.faceSnapShot = None #This is the state of the HappyFace to which all the expressions are compared\n\t\tself.expressionLibrary = []",
"def clear(self):\n self._init_pixels(color=PixelColor(0, 0, 0, 0))",
"def reset(self):\n self.x_mean_pr, self.x_cov_pr = None, None\n self.x_mean_sm, self.x_cov_sm = None, None\n self.xx_cov, self.xy_cov = None, None\n self.pr_mean, self.pr_cov, self.pr_xx_cov = None, None, None\n self.fi_mean, self.fi_cov = None, None\n self.sm_mean, self.sm_cov = None, None\n self.D, self.N = None, None\n self.flags = {'filtered': False, 'smoothed': False}",
"def clear_data(self):\n self.strategy = None\n self.tick = None\n self.bar = None\n self.datetime = None\n\n self.algo_count = 0\n self.algos.clear()\n self.active_algos.clear()\n\n self.trade_count = 0\n self.trades.clear()\n\n self.logs.clear()\n self.daily_results.clear()",
"def _clear_grad(grad_sum, zero):\n\n return assign(grad_sum, zero)",
"def clear(self) -> None:\n\n self.render_list.point_lights = list()\n self.render_list.geometry = list()\n self.children = list()",
"def clear(self):\n del self._elements[:]\n del self.trig_waits[:]\n del self.nreps[:]\n del self.goto_states[:]\n del self.jump_tos[:]\n self.name = None\n self.variable = None\n self.variable_unit = None\n self._start = None\n self._stop = None\n self._step = None",
"def clear_summaries(self):\n\n\t\tself.count = 0\n\t\tmemset(self.counts, 0, self.n*sizeof(double))",
"def clear(self):\n\n self._check_init() # Check for delayed init\n for pt, fm in self._factmaps.items(): fm.clear()",
"def clear(self):\n self.actualBackwardActivation = None\n self.actualActivation = None\n self.inputNeuron.clear()\n self.outputNeuron.clear()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Extracts the subset of the values stored for this observable at the specified indices.
|
def subset(self, indices: Iterable[int]) -> "ObservableArray":
return self.__class__(
value=self._value[indices],
gradients=[
ParameterGradient(key=gradient.key, value=gradient.value[indices])
for gradient in self._gradients
],
)
|
[
"def get_subset(self, indices):\n return MLData(self.data[indices], self.target[indices])",
"def slice(self, indices):\n indices = np.asarray(indices)\n max_index = indices.max()\n n_total = len(self)\n if max_index >= len(self):\n raise ValueError(\"Invalid index %d for Dataset of size %d\" % (\n max_index, n_total))\n\n df = self.to_dataframe()\n df_subset = pd.DataFrame()\n for column_name in df.columns:\n df_subset[column_name] = np.asarray(df[column_name].values)[indices]\n return self.__class__(df_subset)",
"def restrict_to_subset(self, indices):\n lon = [self.drifter_longitudes[i] for i in indices]\n lat = [self.drifter_latitudes[i] for i in indices]\n return trajectory_data(drifter_longitudes = lon, drifter_latitudes=lat, \n domain_name=self.domain_name)",
"def slice(self, indices):\r\n for index in xrange(*indices): #loop over range of indices\r\n yield self[index]",
"def subset(self, indices):\n ip = IndividualParameters()\n\n for idx in indices:\n if idx not in self._indices:\n raise ValueError(f'The index {0} is not in the indices'.format(idx))\n p = self[idx].copy()\n ip.add_individual_parameters(idx, p)\n\n return ip",
"def subset(self, subset_keys: List[str]) -> \"Variables\":\n\n return Variables({name: self[name] for name in subset_keys if name in self})",
"def select_indices(tensor, indices):\n return tensor.gather(1, indices.unsqueeze(1)).squeeze()",
"def get_values(t, col, indexes):\n return [t[col][i] for i in range(len(t[col])) if i in indexes]",
"def slice_select(x,dim,ind,return_indices=False):\n \n indices = (slice(None),) * dim + ((ind),)\n return x[indices] if not return_indices else indices # return indices if requested",
"def __get_result(bins: List[List[Any]], indices: List[int]) -> List[Any]:\n return [bins[i][index] for i, index in enumerate(indices)]",
"def _make_subset(self, indices: np.ndarray, name: str) -> \"PathologyDataset\":\n data = copy.deepcopy(self)\n data.X = data.X[indices]\n if data.has_y:\n data.y = data.y[indices]\n data.ninstances = len(data.X)\n data.name = name\n data.to_intermediate()\n return data",
"def select_idx(self, indices):\n\n assert isinstance(indices, list), \"Error: indices must a list\"\n\n if isinstance(indices, int):\n indices = [indices]\n\n self._df = self._df.select(*(self._df.columns[i] for i in indices))\n\n return self",
"def select_by_index(self, val, level=0, squeeze=False, filter=False, return_mask=False):\n try:\n level[0]\n except:\n level = [level]\n try:\n val[0]\n except:\n val = [val]\n\n remove = []\n if len(level) == 1:\n try:\n val[0][0]\n except:\n val = [val]\n if squeeze and not filter and len(val) == 1:\n remove.append(level[0])\n else:\n for i in range(len(val)):\n try:\n val[i][0]\n except:\n val[i] = [val[i]]\n if squeeze and not filter and len(val[i]) == 1:\n remove.append(level[i])\n\n if len(level) != len(val):\n raise ValueError(\"List of levels must be same length as list of corresponding values\")\n\n p = product(*val)\n selected = set([x for x in p])\n\n masks, ind = self._makemasks(index=self.index, level=level)\n nmasks = len(masks)\n masks = array([masks[x] for x in range(nmasks) if tuple(ind[x]) in selected])\n\n final_mask = masks.any(axis=0)\n if filter:\n final_mask = logical_not(final_mask)\n\n indFinal = array(self.index)\n if len(indFinal.shape) == 1:\n indFinal = array(indFinal, ndmin=2).T\n indFinal = indFinal[final_mask]\n\n if squeeze:\n indFinal = delete(indFinal, remove, axis=1)\n\n if len(indFinal[0]) == 1:\n indFinal = ravel(indFinal)\n\n elif len(indFinal[1]) == 0:\n indFinal = arange(sum(final_mask))\n\n result = self.map(lambda v: v[final_mask], index=indFinal)\n\n if return_mask:\n return result, final_mask\n else:\n return result",
"def idxs_take(idxs, vals, which):\r\n # TODO: consider insisting on sorted idxs\r\n # TODO: use np.searchsorted instead of dct\r\n assert len(idxs) == len(vals)\r\n table = dict(zip(idxs, vals))\r\n return np.asarray([table[w] for w in which])",
"def _resample_subset_indices(self):\n start = time.time()\n self.logger.debug(\"Iteration: {0:d}, requires subset selection. \".format(self.cur_iter))\n logging.debug(\"Random budget: %d\", self.budget)\n subset_indices, _ = self.strategy.select(self.budget)\n end = time.time()\n self.logger.info(\"Iteration: {0:d}, subset selection finished, takes {1:.2f}. \".format(self.cur_iter, (end - start)))\n return subset_indices",
"def select(sequences, indices):\n\n assert len(indices) == sequences.shape[0]\n\n # shape indices properly\n indices_shaped = indices[:, jnp.newaxis, jnp.newaxis]\n\n # select element\n selected_elements = jnp.take_along_axis(sequences, indices_shaped, axis=1)\n\n # remove sequence dimension\n selected_elements = jnp.squeeze(selected_elements, axis=1)\n\n return selected_elements",
"def subset(self, x):\n if x not in self._indices:\n raise KeyError(x)\n\n result = [x]\n nxt = self._nbrs[x]\n while self._indices[nxt] != self._indices[x]:\n result.append(nxt)\n nxt = self._nbrs[nxt]\n return set(result)",
"def get_data_subset(self, cell_ids):\n data_subset = self.data_sampled_all_genes[:, cell_ids]\n return sparse.csc_matrix(data_subset)",
"def getDataIndices(self, featureIndex, featureValue, subsetIndices):\n indices = []\n\n if len(subsetIndices) == 0:\n subsetIndices = list(range(0, len(self.__matrix)))\n\n for dataPointIndex in subsetIndices:\n dataPoint = self.__matrix[dataPointIndex]\n if dataPoint[featureIndex] == featureValue:\n indices.append(dataPointIndex)\n\n return indices"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Validates whether a key is either an `ObservableType` or a string representation of an `ObservableType`. A `KeyError` is raised if any other types are passed as an key, or if the `str` cannot be converted to an `ObservableType`
|
def _validate_key(key: Union[str, ObservableType]) -> ObservableType:
key_error_message = (
"The key must either be an `ObservableType` object or a "
"string representation of an `ObservableType` object."
)
if isinstance(key, str):
try:
key = ObservableType(key)
except ValueError:
raise KeyError(key_error_message)
elif not isinstance(key, ObservableType):
raise KeyError(key_error_message)
return key
|
[
"def _validate_key(key):\n key_error_message = (\n \"The key must either be an ObservableType or a \"\n \"string representation of an ObservableType\"\n )\n\n if isinstance(key, str):\n\n try:\n key = ObservableType(key)\n except ValueError:\n raise KeyError(key_error_message)\n\n elif not isinstance(key, ObservableType):\n raise KeyError(key_error_message)\n\n return key",
"def _type_check(self, key):\n if self._type == \"I\" and isinstance(key,str):\n raise TypeError(\"STDict keys is set as type int()\")\n\n elif self._type == \"S\" and isinstance(key,int):\n raise TypeError(\"STDict keys is set as type str()\")\n else:\n return",
"def _check_valid_key(self, key):\n if not isinstance(key, key_type):\n raise ValueError('%r is not a valid key type' % key)\n if not VALID_KEY_RE.match(key):\n raise ValueError('%r contains illegal characters' % key)",
"def input_type_check(data: object) -> None:\n if not isinstance(data, str):\n raise TypeError(\"Input data must be a 'str' object.\")",
"def check_str(cls, **kwargs):\r\n for value in kwargs:\r\n if not isinstance(kwargs[value], str):\r\n raise TypeError(value+' must be of type string')",
"def check_type(type_: Union[str, IdentifierType]) -> None:\n if str(type_) not in IdentifierType.__members__:\n raise ConversionError(f\"Invalid type {type_!r}!\")",
"def change_type(self, type_):\n if type_ != \"S\" and type_ != \"I\":\n raise TypeError(\"Error: Type: str(\"+str(type_)+\") not valid, str(S)=string and str(I)=integes.\")\n elif self._size == 0 or self._type == type_:\n self._type = type_\n else:\n raise TypeError(\"Can't change type to str(\"+str(type_)+\") when keys already in STDict has type str(\"+str(self._type)+\")\")",
"def _check_type(self, allowedTypes, key, value):\n\n # check the type\n correctType = isinstance(value, allowedTypes)\n\n # format the error message\n if isinstance(allowedTypes, (list, tuple)):\n allowedString = ' or '.join(i.__name__ for i in allowedTypes)\n else:\n allowedString = allowedTypes.__name__\n\n # throw an informative error if it fails\n if not correctType:\n actualString = type(value).__name__\n message = '%s attribute must be a %s (got %s instead)' % \\\n (key, allowedString, actualString)\n raise TypeError(message)",
"def _is_type(self, key, value, etype, none=False):\n if not isinstance(value, etype):\n if none and value is None:\n return value\n self._error(\"{} must be {}, not {}\", self._keyname(key), etype, type(value))\n return value",
"def _validate_lookup(lookup):\n if not isinstance(lookup, str):\n raise TypeError(\"Lookup value must be string. Given type {0}.\".format(type(lookup)))",
"def test_builtin_key_type():\n assert all(type(k) == str for k in dir(builtins))",
"def get_prop_type(value, key=None):\n \"\"\"\n if isinstance(key, unicode):\n # Encode the key as ASCII\n key = key.encode('ascii', errors='replace')\n \"\"\"\n\n \"\"\"\n elif isinstance(value, unicode):\n tname = 'string'\n value = value.encode('ascii', errors='replace')\n \"\"\"\n\n # Deal with the value\n if isinstance(value, bool):\n tname = 'bool'\n\n elif isinstance(value, int):\n tname = 'float'\n value = float(value)\n\n elif isinstance(value, float):\n tname = 'float'\n\n elif isinstance(value, dict):\n tname = 'object'\n\n else:\n tname = 'string'\n value = str(value)\n\n return tname, value, key",
"def can_to_str(_type):\n return isinstance(_type, String)",
"def _convert_type(doc, key_or_keys, converter):\n if isinstance(key_or_keys, str):\n doc[key_or_keys] = converter(doc[key_or_keys])\n else:\n for key in key_or_keys:\n doc[key] = converter(doc[key])",
"def _kv_to_str(self, value):\n if isinstance(value, str):\n return value\n elif isinstance(value, bool):\n return str(value).lower()\n elif isinstance(value, Number):\n return str(value)\n else:\n # don't coerce unrecognized types, TypeError will be raised later\n return value",
"def __validateDictionary(dictionary: Dict[str, Any], *, keyType: type = str, dictionaryName: str = \"argument\") -> None:\n if dictionary is None:\n return\n if not type(dictionary) is dict:\n raise TypeError(f\"Provided {dictionaryName} '{dictionary}' is of type {type(dictionary).__name__}, it needs to be of type dict\")\n for key in dictionary.keys():\n if not type(key) is keyType:\n raise TypeError(f\"Key '{key}' in dictionary '{dictionaryName}' is of type {type(key).__name__}, it needs to be of type {keyType.__name__}\")",
"def _assert_type_string(self, name, val):\n self._assert_type(name, val, basestring)",
"def test_ex_type_str():\n assert putil.exh._ex_type_str(RuntimeError) == 'RuntimeError'\n assert putil.exh._ex_type_str(OSError) == 'OSError'",
"def test_setitem_check_new_valid_type(dictionary):\n\n val = list(dictionary.values())[0]\n matching = BaseMatching(dictionary)\n assert matching._check_new_valid_type(val, str) is None\n\n with pytest.raises(ValueError):\n matching._check_new_valid_type(val, float)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates an observable frame from the CSV output of an OpenMM simulation.
|
def from_openmm(
cls, file_path: str, pressure: unit.Quantity = None
) -> "ObservableFrame":
with open(file_path, "r") as file:
file_contents = file.read()
if len(file_contents) < 1:
return cls()
file_contents = file_contents[1:]
file_contents = re.sub("#.*\n", "", file_contents)
string_object = StringIO(file_contents)
data_array = pandas.read_csv(string_object)
observable_to_openmm_header = {
ObservableType.PotentialEnergy: "Potential Energy (kJ/mole)",
ObservableType.KineticEnergy: "Kinetic Energy (kJ/mole)",
ObservableType.TotalEnergy: "Total Energy (kJ/mole)",
ObservableType.Temperature: "Temperature (K)",
ObservableType.Volume: "Box Volume (nm^3)",
ObservableType.Density: "Density (g/mL)",
}
openmm_header_to_unit = {
"Potential Energy (kJ/mole)": unit.kilojoules / unit.mole,
"Kinetic Energy (kJ/mole)": unit.kilojoules / unit.mole,
"Total Energy (kJ/mole)": unit.kilojoules / unit.mole,
"Temperature (K)": unit.kelvin,
"Box Volume (nm^3)": unit.nanometer**3,
"Density (g/mL)": unit.gram / unit.milliliter,
}
observables = {
observable_type: ObservableArray(
value=numpy.array(data_array[header]) * openmm_header_to_unit[header]
)
for observable_type, header in observable_to_openmm_header.items()
if header in data_array
}
if pressure is not None:
observables[ObservableType.Enthalpy] = ObservableArray(
value=(
observables[ObservableType.TotalEnergy].value
+ observables[ObservableType.Volume].value
* pressure
* unit.avogadro_constant
)
)
return cls(observables)
|
[
"def format_bom_mjo(src_file, target_file):\n\n cols = [0, 1, 2, 3, 4, 5, 6]\n names = ['year', 'month', 'day', 'RMM1', 'RMM2', 'phase', 'amplitude']\n na_values = ['1E+36', '1.E+36', '999', 999,\n '9.99999962E+35', 9.99999962E+35]\n\n logging.info('Reading source file: %s', src_file)\n\n input_data = pd.read_csv(src_file, skiprows=[0, 1], na_values=na_values,\n usecols=cols, names=names, delim_whitespace=True)\n\n n_samples = input_data.shape[0]\n\n logging.info('Number of samples: %d', n_samples)\n\n header = 'year,month,day,value'\n fmt = '%d,%d,%d,%16.8e'\n\n for field in target_file:\n\n data = np.ones((n_samples, 4))\n data[:, 0] = input_data['year']\n data[:, 1] = input_data['month']\n data[:, 2] = input_data['day']\n data[:, 3] = input_data[field]\n\n logging.info('Writing formatted data to: %s', target_file[field])\n np.savetxt(target_file[field], data, header=header, fmt=fmt)",
"def get_observation_list(self):\n ob_names = [\"line switches\", \"loads supplied\", \"gen power\" , \"pv scaling\", \n \"pv powered\", \"wind scaling\", \"wind powered\", \"storage powered\", \n \"storage SOCs\"]\n ob_n = [self.n_line, self.n_varloads, self.n_gen, self.n_pv, self.n_pv, \n self.n_wind, self.n_wind, self.n_storage, self.n_storage]\n df = pd.DataFrame(list(zip(ob_names, ob_n)), columns=[\"name\", \"number\"])\n return df",
"def flights_observable(self, year):\n def emit_flights(observer):\n reader = csv.reader(self.flights_file(year))\n reader.__next__() # skip the header\n for row in reader:\n observer.on_next(Flight(*row))\n observer.on_completed()\n\n return Observable.create(emit_flights)",
"def model(model, directory):\n return pandas.read_csv(csv_path(directory, model))",
"def stream_ol_data(csv_file):\n # Read in our logfile into the completed list\n with open(csv_file, 'r') as working_file:\n print(f\"Working on file {csv_file}\")\n items = csv.DictReader(working_file)\n for item in items:\n if csv_file.find('entities') > 1 and \\\n (csv_file == ['incorporation_date'] == \"\" or len(item['incorporation_date']) > 10):\n item['incorporation_date'] = None\n yield {\n \"_index\": f\"ol{csv_file[csv_file.rfind('_'):-4]}\",\n \"_source\": item\n }",
"def csvgenerator():\n fileManager = session_functions.loadFileManager()\n\n if request.method == \"GET\":\n # \"GET\" request occurs when the page is first loaded.\n if 'csvoptions' not in session:\n session['csvoptions'] = constants.DEFAULT_CSV_OPTIONS\n\n labels = fileManager.getActiveLabels()\n return render_template('csvgenerator.html', labels=labels)\n\n if 'get-csv' in request.form:\n #The 'Generate and Download Matrix' button is clicked on csvgenerator.html.\n session_functions.cacheCSVOptions()\n\n savePath, fileExtension = fileManager.generateCSV()\n\n session_functions.saveFileManager(fileManager)\n return send_file(savePath, attachment_filename=\"frequency_matrix\"+fileExtension, as_attachment=True)",
"def to_observable(r):\n\n id = str(uuid.uuid4())\n created = datetime.datetime.now()\n modified = created\n # protocol = int(r[\"protocol\"])\n temp_first = float(r[\"first_observed\"])\n temp_last = float(r[\"last_observed\"])\n first_observed = datetime.datetime.fromtimestamp(temp_first)\n last_observed = datetime.datetime.fromtimestamp(temp_last)\n number_observed = int(r[\"number_observed\"])\n\n # if protocol == 6:\n # protocol = \"tcp\"\n # elif protocol == 7:\n # protocol = \"udp\"\n # else:\n # protocol = \"unknown\"\n\n observed = stix2.ObservedData(\n id=\"observed-data--\" + id,\n created=created,\n modified=modified,\n first_observed=first_observed,\n last_observed=last_observed,\n number_observed=number_observed,\n objects=\n {\n \"0\": {\n \"type\": \"ipv4-addr\",\n \"value\": r[\"src_ip\"]\n },\n \"1\": {\n \"type\": \"ipv4-addr\",\n \"value\": r[\"dest_ip\"]\n },\n \"2\": {\n \"type\": \"network-traffic\",\n \"src_ref\": \"0\",\n \"dst_ref\": \"1\",\n \"src_port\": r[\"src_port\"],\n \"dst_port\": r[\"dest_port\"],\n \"protocols\": [\n \"ipv4\",\n r[\"protocol\"]\n ],\n }\n }\n )\n\n # di jadikan json\n observed_serialized = observed.serialize()\n observed_json = json.loads(observed_serialized)\n\n # dijadikan ke row rdd\n return _rowify(observed_json, prototypes.observable_prototype)",
"def generate_data():\n\n data = read_csv()\n data = compile_data(data)\n data.to_csv(\n r'C:\\Users\\cjros\\DIRECT\\Capstone\\Data\\Cleaned_Data\\energy_consumption.csv',\n index_label='UID')",
"def test_from_csv_condensed(self):\n\n file_path = pytest.helpers.data_path('matrix_ranged_condensed.tsv')\n\n matrix = GenomicMatrix.from_csv_condensed(file_path, sep='\\t')\n\n assert list(matrix.columns) == ['s1', 's2', 's3', 's4']\n assert list(matrix.gloc.chromosome) == ['1', '1', '2', '2']\n assert list(matrix.gloc.start) == [20, 30, 10, 50]\n assert list(matrix.gloc.end) == [30, 40, 25, 60]",
"def create_csv(topN):\n all_dates = os.listdir(articles_path)\n header = ['month', 'entities']\n output = []\n for date in all_dates[0:11]:\n output.append([date[:-3], get_entities(date, topN)])\n file = open(csv_path / 'entity_vectors.csv', mode='w+', newline='', encoding='utf-8')\n with file:\n write = csv.writer(file)\n write.writerow(header)\n write.writerows(output)",
"def init_csv(self):\n with open(self.csv_out_file, \"w\") as f:\n writer = csv.writer(f, delimiter=',')\n writer.writerow(i for i in self.csv_headers)",
"def generate_csv():\n\tdata_frame = get_all_occupancy_data(False)\n\tdata_frame = resample_timestamp(data_frame)\n\tprint('Resample time stamp DONE')\n\tdata_frame = clean_data(data_frame)\n\tprint('Clean data DONE')\n\tdata_frame = add_public_holidays(data_frame)\n\tprint('Add holidays DONE')\n\tdata_frame = add_weather_info_to_data(data_frame)\n\tprint('Add weather DONE')\n\tdata_frame = add_lines_info_to_data(data_frame)\n\tprint('Add lines DONE')\n\tdata_frame = cut_weather(data_frame, True)\n\tprint('Cut weather DONE')\n\tdata_frame = cut_lines_reservation(data_frame)\n\tprint('Cut lines DONE')\n\tsave_data_to_csv(data_frame, DATASET_CSV_PATH)\n\t#split_csv(data_frame)",
"def __init__(self, events, csv_path, spread):\n self.events = events\n self.csv_path = csv_path\n self.spread = spread\n\n self.symbol_data = pd.DataFrame()\n self.latest_symbol_data = pd.DataFrame()\n self.continue_backtest = True\n\n self._open_convert_csv_files()\n self.length = 0\n self.current_idx = 1",
"def airports_observable(self):\n def emit_airports(observer):\n reader = csv.reader(self.airports_file())\n reader.__next__() # skip the header\n for row in reader:\n observer.on_next(Airport(*row))\n observer.on_completed()\n\n return Observable.create(emit_airports)",
"def add_reconstructed_observables(dataframe):\n \n dataframe = dataframe.copy()\n dataframe = add_angular_seperation(dataframe)\n dataframe = add_Z_bosons(dataframe)\n dataframe = add_higgs_mass(dataframe)\n dataframe = DWDF.df_move_to_last_col(dataframe, 'signal')\n return dataframe",
"def generate_csv():\n data = pd.DataFrame(\n columns=['sampleID', 'donor_sex', 'donor_age_at_diagnosis', 'histology_tier1', 'histology_tier2',\n 'tumor_stage1', 'tumor_stage2'])\n\n with open(METADATAPATH.replace('.csv','.txt')) as f:\n for l in f:\n words = l.split()\n id = words[0]\n sex = words[1]\n age = words[2]\n tier1 = words[3]\n tier2 = words[4]\n tumor_stage1 = '_'.join(words[5:7])\n tumor_stage2 = '_'.join(words[8:])\n data = data.append({'sampleID': id, 'donor_sex': sex, 'donor_age_at_diagnosis': age,\n 'histology_tier1': tier1, 'histology_tier2': tier2,\n 'tumor_stage1': tumor_stage1, 'tumor_stage2': tumor_stage2}, ignore_index=True)\n\n data = data.drop(data.index[0])\n\n data.to_csv(METADATAPATH, index=False)",
"def load_tick_sample() -> pd.DataFrame:\r\n\r\n devadarsh.track('load_tick_sample')\r\n\r\n project_path = os.path.dirname(__file__)\r\n tick_df = pd.read_csv(os.path.join(project_path, 'data/tick_data.csv'), index_col=0, parse_dates=[0])\r\n\r\n return tick_df",
"def create_dataset_LSTM(pattern):\r\n\r\n print('load vocabulary')\r\n with open('../vocabulary/' + pattern + 'voc.txt', 'r') as fv:\r\n vocabulary = json.load(fv)\r\n inv_voc = {v: k for k, v in vocabulary.items()}\r\n\r\n print('load training data')\r\n x_train = np.loadtxt('../splitted_data/' + pattern + 'x_train.txt')\r\n y_train = np.loadtxt('../splitted_data/' + pattern + 'y_train.txt')\r\n\r\n print('load testing data')\r\n x_test = np.loadtxt('../splitted_data/' + pattern + 'x_test.txt')\r\n y_test = np.loadtxt('../splitted_data/' + pattern + 'y_test.txt')\r\n\r\n with open(pattern + 'train.csv', 'w', newline='') as f2:\r\n writer = csv.writer(f2)\r\n for i in range(len(x_train)):\r\n s = ' '.join(inv_voc[k] for k in x_train[i]).replace(' <PAD/>', '')\r\n writer.writerow([int(y_train[i, 1]), s])\r\n\r\n with open(pattern + 'test.csv', 'w', newline='') as f2:\r\n writer = csv.writer(f2)\r\n for i in range(len(x_test)):\r\n s = ' '.join(inv_voc[k] for k in x_test[i]).replace(' <PAD/>', '')\r\n writer.writerow([int(y_test[i, 1]), s])",
"def csv(self, request):\n buffer = io.BytesIO()\n filename = 'all_covid_history_data_{date}.csv'.format(date=datetime.date.today())\n GeneralData.objects.to_csv(buffer)\n response = HttpResponse(\n content_type='text/csv',\n status=200,\n )\n response.write(buffer.getvalue())\n response['Content-Disposition'] = 'attachment; filename={name}'.format(name=filename)\n return response"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Joins multiple observable frames together in the order that they appear in the args list.
|
def join(cls, *observable_frames: "ObservableFrame") -> "ObservableFrame":
if len(observable_frames) < 2:
raise ValueError("At least two observable frames must be provided.")
expected_observables: Set[ObservableType] = {*observable_frames[0]}
# Ensure the observable frames contain the same observables.
if not all(
{*observable_frame} == expected_observables
for observable_frame in observable_frames
):
raise ValueError(
"The observable frames must contain the same types of observable."
)
joined_observables = {
observable_type: ObservableArray.join(
*(
observable_frame[observable_type]
for observable_frame in observable_frames
)
)
for observable_type in expected_observables
}
return cls(joined_observables)
|
[
"def prep_animations(self,*animations,args=None):\n for animation in animations:\n if not args is None:\n animation.pass_obs=args\n self.held_animations.append(animation)\n animation.prep_queue()",
"def zip(*args: Union[Iterable[Any], ObservableBase], # pylint: disable=W0622\n result_mapper: Mapper = None) -> ObservableBase:\n\n if len(args) == 2 and isinstance(args[1], Iterable):\n return _zip_with_list(args[0], args[1], result_mapper=result_mapper)\n\n sources = list(args)\n result_mapper = result_mapper or list\n\n def subscribe(observer, scheduler=None):\n n = len(sources)\n queues = [[] for _ in range(n)]\n is_done = [False] * n\n\n def next(i):\n if all([len(q) for q in queues]):\n try:\n queued_values = [x.pop(0) for x in queues]\n res = result_mapper(*queued_values)\n except Exception as ex:\n observer.on_error(ex)\n return\n\n observer.on_next(res)\n elif all([x for j, x in enumerate(is_done) if j != i]):\n observer.on_completed()\n\n def done(i):\n is_done[i] = True\n if all(is_done):\n observer.on_completed()\n\n subscriptions = [None]*n\n\n def func(i):\n source = sources[i]\n sad = SingleAssignmentDisposable()\n source = Observable.from_future(source) if is_future(source) else source\n\n def on_next(x):\n queues[i].append(x)\n next(i)\n\n sad.disposable = source.subscribe_(on_next, observer.on_error, lambda: done(i), scheduler)\n subscriptions[i] = sad\n for idx in range(n):\n func(idx)\n return CompositeDisposable(subscriptions)\n return AnonymousObservable(subscribe)",
"def zip_(*args: Observable[Any]) -> Observable[Tuple[Any, ...]]:\n\n sources = list(args)\n\n def subscribe(\n observer: abc.ObserverBase[Any], scheduler: Optional[abc.SchedulerBase] = None\n ) -> CompositeDisposable:\n n = len(sources)\n queues: List[List[Any]] = [[] for _ in range(n)]\n lock = RLock()\n is_completed = [False] * n\n\n @synchronized(lock)\n def next_(i: int) -> None:\n if all(len(q) for q in queues):\n try:\n queued_values = [x.pop(0) for x in queues]\n res = tuple(queued_values)\n except Exception as ex: # pylint: disable=broad-except\n observer.on_error(ex)\n return\n\n observer.on_next(res)\n\n # after sending the zipped values, complete the observer if at least one\n # upstream observable is completed and its queue has length zero\n if any(\n (\n done\n for queue, done in zip(queues, is_completed)\n if len(queue) == 0\n )\n ):\n observer.on_completed()\n\n def completed(i: int) -> None:\n is_completed[i] = True\n if len(queues[i]) == 0:\n observer.on_completed()\n\n subscriptions: List[Optional[abc.DisposableBase]] = [None] * n\n\n def func(i: int) -> None:\n source: Observable[Any] = sources[i]\n if isinstance(source, Future):\n source = from_future(source)\n\n sad = SingleAssignmentDisposable()\n\n def on_next(x: Any) -> None:\n queues[i].append(x)\n next_(i)\n\n sad.disposable = source.subscribe(\n on_next, observer.on_error, lambda: completed(i), scheduler=scheduler\n )\n subscriptions[i] = sad\n\n for idx in range(n):\n func(idx)\n return CompositeDisposable(subscriptions)\n\n return Observable(subscribe)",
"def merge(*sources: Observable[Any]) -> Observable[Any]:\n from .observable.merge import merge_\n\n return merge_(*sources)",
"def push_many(self, *args):\n for i in args:\n self.push(i)",
"def combine_gen_sources(source_a, source_b, mask):\n animation = zip(source_a(), source_b(), mask())\n\n first_time = cv2.getTickCount()\n for frame_a, frame_b, frame_mask in animation:\n frame = primitives.mask_together(frame_a, frame_b, frame_mask)\n last_time = cv2.getTickCount()\n execution_time = (last_time - first_time) / cv2.getTickFrequency()\n write_on_frame(frame, str(execution_time))\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n first_time = cv2.getTickCount()\n\n cv2.destroyAllWindows()",
"def merge(self, integration):\n self.frames.merge(integration.frames)",
"def concat(*sources: Observable[_T]) -> Observable[_T]:\n\n from .observable.concat import concat_with_iterable_\n\n return concat_with_iterable_(sources)",
"def also(*args):\r\n return sequential(args)",
"def roll_many(self, frames: int, pins: int):\n for _ in range(frames):\n self.g.roll(pins)",
"def append_trajs(self, trajs):\n for traj in trajs:\n T = len(traj.obs)\n stacked_obs = np.stack(traj.obs, axis=0)\n actions = np.array(traj.actions)\n\n idxs = np.arange(self.t, self.t + T) % self.max_buffer_size\n frame_idxs = (np.arange(self.t, self.t + T) + self.n_frames -\n 1) % (self.max_buffer_size + self.n_frames - 1)\n # only most recent frames\n self.frame_buffer[frame_idxs] = stacked_obs[:, -1]\n self.action_buffer[idxs] = actions\n self.traj_end_buffer[idxs] = idxs[-1]\n self.t_buffer[idxs] = np.arange(T)\n self.t = (self.t + T) % self.max_buffer_size\n\n if self.current_buffer_size < self.max_buffer_size:\n self.current_buffer_size = min(\n self.current_buffer_size + T, self.max_buffer_size)",
"def concat(seqs): # real signature unknown; restored from __doc__\n pass",
"def join_flowables(\n *others: MultiCastOpMixin,\n):\n\n stack = get_stack_lines()\n\n def op_func(source: MultiCast):\n return source.join_flowables(list(others), stack=stack)\n\n return MultiCastOperator(op_func)",
"def emit(self, *args, **kwargs):\n for func in self._connections:\n func(*args, **kwargs)",
"def register_many(\n self,\n frames: Mapping[str, DataFrame | LazyFrame] | None = None,\n **named_frames: DataFrame | LazyFrame,\n ) -> Self:\n frames = dict(frames or {})\n frames.update(named_frames)\n for name, frame in frames.items():\n self.register(name, frame)\n return self",
"def test_on_multiple_handlers():\n\n obs = Observable()\n nose.assert_false(obs.events)\n\n results = []\n\n def some_test(*args, **kw):\n results.append(1)\n\n def some_test_2(*args, **kw):\n results.append(2)\n\n obs.on('some_test', some_test, some_test_2)\n nose.assert_equals(len(obs.events['some_test']), 2)\n\n obs.trigger('some_test')\n nose.assert_equals(results, [1,2])",
"def mix_fluids(\n cls, this_stack, other_stacks: list, env: FluidCombinationEnvironment\n ) -> typing.Tuple[list, FluidCombinationEnvironment]:\n return [this_stack] + other_stacks, env",
"def pack_args(self, args):\n\n return nest.pack_sequence_as(self._args, args)",
"def _fire_impl(self, *args):\r\n for x in list(self._listeners):\r\n x(*args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Clears all gradient information for each observable in the frame.
|
def clear_gradients(self):
for observable in self._observables.values():
observable.clear_gradients()
|
[
"def clear_gradients(self):\n self._gradients = []",
"def zero_grad(self):\n\t\tfor i in range(len(self.Layers)):\n\t\t\tself.Layers[i].zero_grad()",
"def clearAll(self):\n\t\tself.faceSnapShot = None #This is the state of the HappyFace to which all the expressions are compared\n\t\tself.expressionLibrary = []",
"def reset_all(self):\n self.reset_memory()\n self.reset_traces()\n self.reset_tags()\n\n self.prev_obs = np.zeros(self.nx_inst)\n self.prev_qa = 0\n self.prev_max = 0.",
"def clear_crossfilter(self):\n print ('Trigger clear')\n self.struct_df = None\n self.elem_df = None\n self.prop_df = None\n self.code_df = None\n self.exchange_df = None\n self.plot_data = None\n layout.children[4] = self.create_figure(self.plot_data)",
"def clear_drawn_objects(self, view_manager):\n\n if self._gl_points_collection is not None:\n view_manager.get_view().removeItem(self._gl_points_collection)\n\n self._gl_points_collection = None\n self._points = None\n self._vals = None\n self._colors = None",
"def clearDeltas(self):\n\t\tfor expItem in self.expressionItemsData:\n\t\t\texpItem.clearDelta()",
"def clear(self) -> None:\n\n self.render_list.point_lights = list()\n self.render_list.geometry = list()\n self.children = list()",
"def clear_data(self):\n self.__vertices = None\n self.__dofs = None \n self.__jacobi_dets = None \n self.__ijacobis = None\n self.__initialized = False",
"def reset(self):\n self.x_mean_pr, self.x_cov_pr = None, None\n self.x_mean_sm, self.x_cov_sm = None, None\n self.xx_cov, self.xy_cov = None, None\n self.pr_mean, self.pr_cov, self.pr_xx_cov = None, None, None\n self.fi_mean, self.fi_cov = None, None\n self.sm_mean, self.sm_cov = None, None\n self.D, self.N = None, None\n self.flags = {'filtered': False, 'smoothed': False}",
"def clear_fn(self):\n self.x, self.y = [], []",
"def clear_gradient_clipping(self):\n self.nn_estimator.clearGradientClipping()\n self.estimator.clear_gradient_clipping()",
"def reset ( self ) :\n for h in self._histos : self._histos[h].reset()",
"def clear(self):\n # Clear all terms!\n self.set_point = 0.0\n self.Pterm = 0.0\n self.Iterm = 0.0\n self.Dterm = 0.0\n self.last_error = 0.0\n self.control_variable = 0.0",
"def clear_slctns(self):\n for mrkr in self.mrkrs: self.maparea.delete(mrkr)\n for line in self.lines: self.maparea.delete(line)\n for arr in self.clearables: arr = []\n self.navigator.waypoints.clear()\n self.navigator.next_tar()",
"def clear(self):\n\n # loop through all existing figures\n if self.figs is not None:\n self.figs.clear()\n self.repaint()",
"def clear(self):\n self.actualBackwardActivation = None\n self.actualActivation = None\n self.inputNeuron.clear()\n self.outputNeuron.clear()",
"def clear(self):\n self.shapes.clear()",
"def clear(self):\n for shape in self.__shapes:\n self.delete(shape)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Bootstrapping a set of observables to compute the average value of the observables as well as the the standard error in the average.
|
def bootstrap(
bootstrap_function: Callable,
iterations: int = 200,
relative_sample_size: float = 1.0,
sub_counts: Iterable[int] = None,
**observables: ObservableArray,
) -> Observable:
if len(observables) == 0:
raise ValueError("There are no observables to bootstrap")
# Ensure that the observables are all compatible.
data_size = len(observables[next(iter(observables))])
assert all(
isinstance(data_value, ObservableArray) for data_value in observables.values()
)
assert all(len(observables[key]) == data_size for key in observables)
# Make a copy of the observables so we don't accidentally destroy anything.
observables = copy.deepcopy(observables)
if sub_counts is None:
sub_counts = numpy.array([data_size])
assert numpy.sum(sub_counts) == data_size
# Compute the mean value (and gradients if present).
mean_observable = bootstrap_function(**observables)
# Bootstrap to compute the uncertainties
bootstrapped_values = numpy.zeros(iterations)
for bootstrap_iteration in range(iterations):
sample_observables: Dict[str, ObservableArray] = {
key: ObservableArray(
value=(numpy.zeros(observables[key].value.magnitude.shape))
* observables[key].value.units,
)
for key in observables
}
start_index = 0
for sub_count in sub_counts:
# Choose the sample size as a percentage of the full data set.
sample_size = min(math.floor(sub_count * relative_sample_size), sub_count)
sample_indices = numpy.random.choice(sub_count, sample_size)
for key in observables:
sub_data = observables[key].subset(
range(start_index, start_index + sub_count)
)
sample_observables[key].value[
start_index : start_index + sub_count
] = sub_data.value[sample_indices]
start_index += sub_count
bootstrapped_values[bootstrap_iteration] = (
bootstrap_function(**sample_observables)
.value.to(mean_observable.value.units)
.magnitude
)
std_error = bootstrapped_values.std() * mean_observable.value.units
return Observable(
value=mean_observable.value.plus_minus(std_error),
gradients=mean_observable.gradients,
)
|
[
"def computeMeansErrors(*arrays):\n workMat = stack(arrays)\n return workMat.mean(axis=0), workMat.std(axis=0)",
"def _compute_bootstrapped_statistics(\n measured_values,\n measured_stds,\n estimated_values,\n estimated_stds,\n statistics=None,\n percentile=0.95,\n bootstrap_iterations=1000,\n):\n\n sample_count = len(measured_values)\n\n # Compute the mean of the statistics.\n mean_statistics, statistics_labels = _compute_statistics(\n measured_values, estimated_values, statistics\n )\n\n # Generate the bootstrapped statistics samples.\n sample_statistics = numpy.zeros((bootstrap_iterations, len(mean_statistics)))\n\n for sample_index in range(bootstrap_iterations):\n\n samples_indices = numpy.random.randint(\n low=0, high=sample_count, size=sample_count\n )\n\n sample_measured_values = measured_values[samples_indices]\n\n if measured_stds is not None:\n sample_measured_values += numpy.random.normal(0.0, measured_stds)\n\n sample_estimated_values = estimated_values[samples_indices]\n\n if estimated_stds is not None:\n sample_estimated_values += numpy.random.normal(0.0, estimated_stds)\n\n sample_statistics[sample_index], _ = _compute_statistics(\n sample_measured_values, sample_estimated_values, statistics\n )\n\n # Compute the SEM\n standard_errors_array = numpy.std(sample_statistics, axis=0)\n\n # Store the means and SEMs in dictionaries\n means = dict()\n standard_errors = dict()\n\n for statistic_index in range(len(mean_statistics)):\n statistic_label = statistics_labels[statistic_index]\n\n means[statistic_label] = mean_statistics[statistic_index]\n standard_errors[statistic_label] = standard_errors_array[statistic_index]\n\n # Compute the confidence intervals.\n lower_percentile_index = int(bootstrap_iterations * (1 - percentile) / 2)\n upper_percentile_index = int(bootstrap_iterations * (1 + percentile) / 2)\n\n confidence_intervals = dict()\n\n for statistic_index in range(len(mean_statistics)):\n statistic_label = statistics_labels[statistic_index]\n\n sorted_samples = numpy.sort(sample_statistics[:, statistic_index])\n\n confidence_intervals[statistic_label] = (\n sorted_samples[lower_percentile_index],\n sorted_samples[upper_percentile_index],\n )\n\n return means, standard_errors, confidence_intervals",
"def compute_averages(self):\n self.energy_average = self.cumulative_energy / self.N\n self.energy_squared_average = self.cumulative_squared_energy / self.N\n self.wave_function_derivative_average = self.cumulative_wave_function_derivative / self.N\n self.wave_function_energy_average = self.cumulative_wave_function_energy / self.N",
"def bootstrap(array, iterations, bound):\r\n initial = np.empty(shape=(array.size, iterations), dtype=float)\r\n for step in np.arange(0, iterations, 1):\r\n resample = np.random.choice(array, array.size, replace=True)\r\n initial[:, step] = resample\r\n samplemean = np.mean(array)\r\n bootstrapmean = np.mean(initial, axis=0)\r\n stackedmean = np.sort(bootstrapmean-samplemean)\r\n upperboundpercent = 1-(1-bound)/2\r\n lowerboundpercent = (1-bound)/2\r\n upperboundval = stackedmean[round(upperboundpercent*(stackedmean.size-1))]\r\n lowerboundval = stackedmean[round(lowerboundpercent*(stackedmean.size-1))]\r\n upperrange = samplemean - lowerboundval\r\n lowerrange = samplemean - upperboundval\r\n return [bootstrapmean, upperrange, lowerrange]",
"def bootstrap(data,objFunc,nBootSamp=128):\r\n N=len(data)\r\n objFuncVals = scipy.zeros(nBootSamp)\r\n for n in range(nBootSamp):\r\n resDat = data[scipy.random.randint(0,N,(N,))]\r\n objFuncVals[n]=objFunc(resDat)\r\n av = objFunc(data) \r\n # scipy.std is computed from the uncorrected variance of the \r\n # data. Apply correction factor to account for bias.\r\n sDev = scipy.sqrt(nBootSamp/(nBootSamp-1))*scipy.std(objFuncVals)\r\n return av,sDev",
"def avg(results):\n\n xwins = [zip(*result)[0] for result in results]\n owins = [zip(*result)[1] for result in results]\n\n xwins_avg = np.average(np.array(xwins), 0)\n owins_avg = np.average(np.array(owins), 0)\n\n return zip(xwins_avg, owins_avg)",
"def calc_mean_std(self):\n\n # get ob_next sets from memory\n memory_len = len(self._memory)\n all_obs_next = []\n col_len = len(self._memory[memory_len - 1].obs_nex)\n \n for i in range(memory_len):\n all_obs_next.append(self._memory[i].obs_nex)\n \n # cacualte average and standard diviation for each features \n return (np.mean(np.array(all_obs_next).reshape(memory_len, \n col_len).transpose(), axis=1), \n np.std(np.array(all_obs_next).reshape(memory_len, \n col_len).transpose(), axis=1))",
"def _compute_mean_and_std(self, patches):\n assert len(patches) > 0, 'Patches list is empty!'\n # compute the mean\n mean = np.mean(patches)\n # compute the standard deviation\n std = np.std(patches)\n return mean, std",
"def ensemble_averaging(self, setObservations, setStates, \n weighting_factor=\"unit\", maxiter=1000, impr=1):\n N = self.N\n W = 0\n hmmk = self.__class__(self.omega_X, self.omega_O)\n A_bar = zeros( (N, N))\n B_bar = zeros( (self.M, N))\n pi_bar = zeros(N)\n for k, obs in enumerate(setObservations):\n hmmk.A = self.A\n hmmk.B = self.B\n hmmk.pi = self.pi\n obsIndices = self._get_observationIndices(obs)\n state = setStates[k]\n hmmk._baum_welch(obsIndices, state, maxiter, impr)\n if weighting_factor == \"Pall\":\n Wk = hmmk._weighting_factor_Pall(setObservations)\n elif weighting_factor == \"Pk\":\n Wk = hmmk._weighting_factor_Pk(obs)\n else:\n Wk = 1\n A_bar = A_bar + Wk * hmmk.A\n B_bar = B_bar + Wk * hmmk.B\n pi_bar = pi_bar + Wk * hmmk.pi\n W = W + Wk\n if W == 0:\n W = 1\n print \"The ensemble averaging method did not converge\" \n else:\n self.A = A_bar / W\n self.B = B_bar / W\n self.pi = pi_bar / W\n self._mask()",
"def average_models(self, models):\n num_models = len(models)\n # Get all weights of the models.\n for i in range(0, num_models):\n weights = np.asarray(deserialize_keras_model(models[i]).get_weights())\n self.parameter_buffer += weights\n # Average the parameters.\n self.parameter_buffer /= num_models\n temp_model = deserialize_keras_model(self.master_model)\n temp_model.set_weights(self.parameter_buffer)\n self.master_model = serialize_keras_model(temp_model)",
"def bootstrap_sterr(x, B=100):\r\n N = len(x)\r\n samples = np.zeros((B, N))\r\n mus = np.zeros((B,))\r\n for b in range(B):\r\n samples[b,:] = np.random.choice(x, N, replace=True)\r\n mus[b] = np.mean(samples[b,:])\r\n return np.std(mus)",
"def average(source: Observable) -> Observable:\n\n if key_mapper:\n return source.pipe(\n operators.map(key_mapper),\n operators.average()\n )\n\n def accumulator(prev, cur):\n return AverageValue(sum=prev.sum+cur, count=prev.count+1)\n\n def mapper(s):\n if s.count == 0:\n raise Exception('The input sequence was empty')\n\n return s.sum / float(s.count)\n\n seed = AverageValue(sum=0, count=0)\n return source.pipe(\n operators.scan(accumulator, seed),\n operators.last(),\n operators.map(mapper)\n )",
"def _compute_avg(self, gen):\n tuple_map = map(lambda f: (1, f.delay), gen)\n sum_reduce = reduce(lambda x, y: (x[0]+y[0], x[1]+y[1]), tuple_map)\n avg = sum_reduce[1] / sum_reduce[0]\n return avg",
"def run_bootstrap(self, runs=10000):\n self.boot = self.bootstrap_sample(self.A, runs)\n self.bootstrap_mean = np.mean(self.boot)\n self.bootstrap_sdm = np.std(self.boot, ddof=1)\n self.bootstrap_runs = runs\n self.bootstrap_bias = self.meanA - np.mean(self.boot)\n self.__sample_is_bootstrapped = True",
"def compute_mean_std(self, verbose=False):\n sum_intensities = 0.0\n numel = 0\n\n with mt_datasets.DatasetManager(self,\n override_transform=mt_transforms.ToTensor()) as dset:\n pbar = tqdm(dset, desc=\"Mean calculation\", disable=not verbose)\n for sample in pbar:\n input_data = sample['input']\n sum_intensities += input_data.sum()\n numel += input_data.numel()\n pbar.set_postfix(mean=\"{:.2f}\".format(sum_intensities / numel),\n refresh=False)\n\n training_mean = sum_intensities / numel\n\n sum_var = 0.0\n numel = 0\n\n pbar = tqdm(dset, desc=\"Std Dev calculation\", disable=not verbose)\n for sample in pbar:\n input_data = sample['input']\n sum_var += (input_data - training_mean).pow(2).sum()\n numel += input_data.numel()\n pbar.set_postfix(std=\"{:.2f}\".format(np.sqrt(sum_var / numel)),\n refresh=False)\n\n training_std = np.sqrt(sum_var / numel)\n return training_mean.item(), training_std.item()",
"def test_recomputing_angular_average(self):\n self.dataset.compute_angular_averages(center=(34, 56))\n self.dataset.compute_baseline(first_stage=\"sym6\", wavelet=\"qshift1\")\n self.dataset.compute_angular_averages(center=(45, 45), normalized=False)\n self.dataset.compute_baseline(first_stage=\"sym5\", wavelet=\"qshift2\")\n self.dataset.compute_angular_averages(\n center=(34, 56), angular_bounds=(15.3, 187)\n )\n self.dataset.compute_baseline(first_stage=\"sym6\", wavelet=\"qshift1\")",
"def _compute_mean_std(self, sum_, ssum, size):\n assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'\n mean = sum_ / size\n sumvar = ssum - sum_ * mean\n unbias_var = sumvar / (size - 1)\n bias_var = sumvar / size\n\n if hasattr(torch, 'no_grad'):\n with torch.no_grad():\n self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data\n self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data\n else:\n self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data\n self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data\n\n return mean, bias_var.clamp(self.eps) ** -0.5",
"def bootstrap(data, num_samples, alpha, data_weights=None):\n n = len(data)\n stat=np.zeros(num_samples)\n for i in xrange(num_samples):\n idx = np.random.randint(0, n, n)\n samples = data[idx]\n\tif (weightsFlag):\n \tweights = data_weights[idx]\n \tstat[i]=np.average(samples, 0, weights) \n\telse:\n \tstat[i]=np.mean(samples, 0) \n stat = np.sort(stat)\n return (stat[int((alpha/2.0)*num_samples)],\n stat[int((1-alpha/2.0)*num_samples)])",
"def evaluate_models_2():\n df = prepare_individual_datasets()\n get_averaged_models()\n scores = []\n mean_plot = []\n print(\"Starting evaluation...\")\n for model in glob.glob('*_averaged.csv'):\n averaged_model = pd.read_csv(model)\n featuress = averaged_model['feature']\n\n # weights\n intercept = averaged_model['weight'].values[0]\n weights = averaged_model['weight'][1:]\n features_used = featuress.values[1:]\n # reindex to perform series multiplication\n weights.index = features_used\n\n temp_scores = []\n for station in df:\n X = station.loc[:, station.columns != 'bikes']\n Y = station['bikes']\n X = X.filter(items=features_used)\n predictions = X.apply(lambda row: intercept + row.dot(weights), axis=1).astype('int64')\n temp_scores.append(mean_absolute_error(predictions, Y))\n name = model.split('_averaged')[0]\n scores.append((name, temp_scores))\n mean_score = mean(temp_scores)\n print(f'Accuracy of model {name} is {mean_score}\\n')\n mean_plot.append(mean_score)\n plot_scores_2(scores, mean(mean_plot))\n print(mean(mean_plot))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test case for me_get
|
def test_me_get(self):
pass
|
[
"def test_hirststonge_using_get(self):\n pass",
"def test_chores_get(self):\n pass",
"def test_musicals_get(self):\n pass",
"def test_hirststonge_using_get1(self):\n pass",
"def test_hirststonge_using_get2(self):\n pass",
"def test_greenalgas_get(self):\n pass",
"def test_get_data(self):\n\n\t\t# Test to go here when best approach is decided for making requests.",
"def test_get(self):\n cell_admin = treadmill.context.AdminContext.cell.return_value\n self.cell.get('some-cell')\n cell_admin.get.assert_called_with('some-cell')",
"def test_staking_parameters_get(self):\n pass",
"def test_murderers_get(self):\n pass",
"def test_hirststonge_using_get3(self):\n pass",
"def test_get_node_using_get(self):\n pass",
"def test_me_get_list(self):\n pass",
"def test_api_v3_member_get(self):\n pass",
"def test_get_feature_details_using_get(self):\n pass",
"def test_get(self):\n model = self.model\n for field in self.exposed_fields_ans:\n ans = model._get(field)\n self.assertTrue(\n self.get_ans[field](ans),\n \"\"\"Get failed in field {}. Output was {}.\"\"\".format(field, ans),\n )",
"def test_list_using_get1(self):\n pass",
"def test_humangenes_get(self):\n pass",
"def test_get_document_using_get(self):\n pass",
"def test_get_account_type_using_get(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test case for me_get_list
|
def test_me_get_list(self):
pass
|
[
"def test_list_using_get1(self):\n pass",
"def test_get_list(self):\n\t\tinput = get_list('./tests/sample.json')\n\t\tassert isinstance(input, list)",
"def test_list_operations(self):\n pass",
"def test_get_items_in_list(self):\n\n list_name = 'travel'\n item1 = 'cake'\n item2 = 'soda'\n\n self.user.create_list('travel')\n self.user.add_item('travel', 'cake',4000 )\n self.user.add_item('travel', 'soda',3000)\n items = self.user.get_items('travel')\n self.assertIsInstance(items, list)\n self.assertEqual(len(items), 2)",
"def test_list_queries(self):\n pass",
"def test_get_list(self):\n #Get and verify the resp\n resp = self.client.get('/api/v1/acknowledgement/')\n self.assertEqual(resp.status_code, 200, msg=resp)\n\n #Verify the data sent\n resp_obj = resp.data\n self.assertIsNotNone(resp_obj['results'])\n self.assertEqual(len(resp_obj['results']), 1)\n self.assertEqual(len(resp_obj['results'][0]['items']), 2)",
"def test_enabled_feature_get_list(self):\n self._test_method('get', True)",
"def test_getall2(self):\n pass",
"def test_disabled_feature_get_list(self):\n self._test_method('get', False)",
"def test_list_get(self):\n with self.login(self.test_user):\n self.get_check_200(\n \"admin:organizer_newslink_changelist\"\n )",
"def test_list(self):\n self.__assert_empty_builder()\n self.__builder.list()\n self.assertEqual('path -list ', str(self.__builder))",
"def test_cms_block_repository_v1_get_list_get(self):\n pass",
"def test_get_note_from_list(self):\n notes = [\"a\" , \"b\" , \"c\"]\n id = 1\n expected_output = \"b\"\n self.assertEqual(expected_output, get(notes,id))",
"def test_list_cast(self):\n self.plist = PaginatedResourceList(int, self.endpoint)\n\n entire_list = list(self.plist)\n self.assertEqual(entire_list, list(range(self.total)))\n self.assertEqual(len(responses.calls), self.lazy_pages(self.total-1))",
"def getList(name):",
"def test_list(self):\n payloads = [\n b'payload A',\n b'second payload'\n b'payload 3+'\n ]\n res = []\n provider = payload_provider.List(payloads)\n for payload in provider:\n res.append(payload)\n for num, payload in enumerate(payloads):\n self.assertEqual(res[num], payload, 'Payload not expected in position {0}'.format(num))",
"def test_list_name_getter(self):\n self.assertTrue(self.parser.list_name == \"Test\")",
"def test_list(self):\n self.cell.list()\n cell_admin = treadmill.context.AdminContext.cell.return_value\n self.assertTrue(cell_admin.list.called)",
"def test_get_trade_list(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test case for me_get_pay_ins
|
def test_me_get_pay_ins(self):
pass
|
[
"def test_pay_ins_universal_pay_universal_pay_get_payment(self):\n pass",
"def test_payment_methods_get(self):\n pass",
"def test__account_instruments(self, mock_req):\n tid = \"_v3_account_by_accountID_instruments\"\n resp, data, params = fetchTestData(responses, tid)\n r = accounts.AccountInstruments(accountID=accountID, params=params)\n mock_req.register_uri('GET',\n \"{}/{}\".format(api.api_url, r),\n text=json.dumps(resp))\n result = api.request(r)\n self.assertTrue(result == resp)",
"def test_payment_methods_id_get(self):\n pass",
"def test_get_payment_modules(self):\n pass",
"def test_sitter_payment(self):\n self.assertTrue(self.sitter.calculate_payment() == 3 * 15)",
"def test_get_payees(self):\n expected_result = [{'payee_id': 1,\n 'payee_name': 'National Rail'},\n {'payee_id': 2,\n 'payee_name': \"Sainsbury's\"}]\n actual_result = self.ledger.get_payees()\n\n self.assertEqual(actual_result, expected_result)",
"def test_get_receipts_by_payment(self):\n pass",
"def test_getinvestment(self):\n pass",
"def test_list_transactions(self):\n get_trns_return = [{'date': datetime.date(2015, 1, 2),\n 'amount': Decimal('46.85'),\n 'description': 'Train ticket',\n 'transaction_id': 1,\n 'payee_id': 1},\n {'date': datetime.date(2015, 1, 3),\n 'amount': Decimal('5.42'),\n 'description': 'Shopping',\n 'transaction_id': 2,\n 'payee_id': 2}]\n self.ws.ledger.get_transactions = MagicMock(return_value =\n get_trns_return)\n\n self.ws.onecmd('list transactions')\n self.ws.ledger.get_transactions.assert_called_with()\n # [todo] - test display of transactions",
"def test_get_withdrawals(self):\n pass",
"def test_get_account_balances_using_get(self):\n pass",
"def test_list_payees(self):\n get_pye_rtn = [{'payee_id': 1,\n 'payee_name': 'National Rail'},\n {'payee_id': 2,\n 'payee_name': \"Sainsbury's\"}]\n self.ws.ledger.get_payees = MagicMock(return_value = get_pye_rtn)\n\n self.ws.onecmd('list payees')\n self.ws.ledger.get_payees.assert_called_with()\n # [todo] - test display of payees",
"def test_get_trade_list(self):\n pass",
"def test_payemnt_providers_get(self):\n response = self.client.open(\n '/v1/payemnt_providers',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_get_account_by_type_and_currency(self):\n pass",
"def test_get_ad_accounts(self):\n pass",
"def test_getinvestmentvalue(self):\n pass",
"def __get_instrument_details(self):\n instrument = self.get_random_instrument()\n instrument_id = instrument['instrument_id']\n isin = instrument['isin']\n return instrument_id, isin"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Here, we check to see if every endpoint documented in the OpenAPI documentation actually exists in urls.py and thus in actual code.
|
def check_for_non_existent_openapi_endpoints(self) -> None:
openapi_paths = set(get_openapi_paths())
undocumented_paths = openapi_paths - self.checked_endpoints
undocumented_paths -= self.buggy_documentation_endpoints
undocumented_paths -= self.pending_endpoints
try:
self.assert_length(undocumented_paths, 0)
except AssertionError: # nocoverage
msg = "The following endpoints have been documented but can't be found in urls.py:"
for undocumented_path in undocumented_paths:
msg += f"\n + {undocumented_path}"
raise AssertionError(msg)
|
[
"def test_openapi_arguments(self) -> None:\n\n from zproject import urls as urlconf\n\n # We loop through all the API patterns, looking in particular\n # for those using the rest_dispatch decorator; we then parse\n # its mapping of (HTTP_METHOD -> FUNCTION).\n for p in urlconf.v1_api_and_json_patterns + urlconf.v1_api_mobile_patterns:\n methods_endpoints: Dict[str, Any] = {}\n if p.callback is not rest_dispatch:\n # Endpoints not using rest_dispatch don't have extra data.\n if str(p.pattern) in self.documented_post_only_endpoints:\n methods_endpoints = dict(POST=p.callback)\n else:\n methods_endpoints = dict(GET=p.callback)\n else:\n methods_endpoints = assert_is_not_none(p.default_args)\n\n # since the module was already imported and is now residing in\n # memory, we won't actually face any performance penalties here.\n for method, value in methods_endpoints.items():\n if callable(value):\n function: Callable[..., HttpResponse] = value\n tags: Set[str] = set()\n else:\n function, tags = value\n\n if function is get_events:\n # Work around the fact that the registered\n # get_events view function isn't where we do\n # @has_request_variables.\n #\n # TODO: Make this configurable via an optional argument\n # to has_request_variables, e.g.\n # @has_request_variables(view_func_name=\"zerver.tornado.views.get_events\")\n function = get_events_backend\n\n function_name = f\"{function.__module__}.{function.__name__}\"\n\n with self.subTest(function_name):\n self.check_openapi_arguments_for_view(p, function_name, function, method, tags)\n\n self.check_for_non_existent_openapi_endpoints()",
"def _check_endpoint_path(endpoints: List[swagger_to.intermediate.Endpoint]) -> List[Complaint]:\n complaints = [] # type: List[Complaint]\n\n for endpoint in endpoints:\n if not endpoint.path.startswith(\"/\"):\n complaints.append(\n Complaint(\n message=\"Path doesn't begin with a slash\",\n what=endpoint.path,\n where=\"In endpoint {}\".format(endpoint.operation_id),\n line=endpoint.line))\n\n return complaints",
"def test_empty_endpoints(self):\n for endpoint in self.endpoints:\n url_endpoint = self.server+endpoint[1]+'/'\n\n if len(endpoint) > self.index_example:\n # Endpoint with parameter within the URL path\n if ('path' in endpoint[self.index_example]):\n ex = endpoint[self.index_example]['path'][0]\n\n # Endpoint with parameter as query\n elif ('query' in endpoint[self.index_example]):\n ex_full = endpoint[self.index_example]['query'][0]\n ex_content = ex_full.split('=')\n url_endpoint += '?'+ex_content[0]+'='\n ex = ex_content[1]\n\n if ex:\n if re.match(\"^\\d+$\",ex):\n url_endpoint += self.fake_examples['integer']\n elif re.match(\"^\\d{4}-\\d{2}-\\d{2}$\", ex):\n url_endpoint += self.fake_examples['date']\n else:\n url_endpoint += self.fake_examples['string']\n self.get_empty_response(url_endpoint, endpoint[self.index_result_mutliplicity])",
"def test_service_doc(self):\n response = self.app.get(\"api.html\", follow_redirects=True)\n self.assertEqual(200, response.status_code)",
"def isEndPointExistNotUsedButAGoodReference( self, epname):\n\t\t#######################################################\n\t\t#\tCheck to see if the given EndPoint exists.\n\t\t#######################################################\n\t\tmyargs\t\t\t= array( ['specialEndpoints'], java.lang.String )\n\t\tendpointAttrs\t= self.configService.getAttributes( self.configService.session, self.rootObjectName, myargs, False )\n\t\t#endpointAttrs\t= self.configService.getAttributes( self.configService.session, self.rootObjectName, None, False )\n\t\t#self.debug( __name__ + \".isEndPointExist(): endpointAttrs=\" + str( endpointAttrs ) + \"\\n\" )\n\t\tself.debug( __name__ + \".isEndPointExist(): endpointAttrs type=\" + str( type( endpointAttrs ) ) + \"\\n\" )\n\t\tfor endpointAttr in endpointAttrs:\n\t\t\t#self.debug( __name__ + \".isEndPointExist(): endpointAttr=\" + str( endpointAttr ) + \"\\n\" )\n\t\t\tself.debug( __name__ + \".isEndPointExist(): endpointAttr type=\" + str( type( endpointAttr ) ) + \"\\n\" )\n\t\t\tattrName = endpointAttr.getName()\n\t\t\tspecialEndPointAttrs= endpointAttr.getValue()\n\t\t\tself.debug( __name__ + \".isEndPointExist(): attrName=\" + str( attrName ) + \"\\n\" )\n\t\t\tself.debug( __name__ + \".isEndPointExist(): attrName type=\" + str( type( attrName ) ) + \"\\n\" )\n\t\t\t#self.debug( __name__ + \".isEndPointExist(): specialEndPointAttrs=\" + str( specialEndPointAttrs ) + \"\\n\" )\n\t\t\tself.debug( __name__ + \".isEndPointExist(): specialEndPointAttrs type=\" + str( type( specialEndPointAttrs ) ) + \"\\n\" )\n\t\t\tif isinstance( specialEndPointAttrs, java.util.ArrayList ):\n\t\t\t\tfor namedEndPoint in specialEndPointAttrs:\n\t\t\t\t\t#self.debug( __name__ + \".isEndPointExist(): namedEndPoint=\" + str( namedEndPoint ) + \"\\n\" )\n\t\t\t\t\tself.debug( __name__ + \".isEndPointExist(): namedEndPoint type=\" + str( type( namedEndPoint ) ) + \"\\n\" )\n\t\t\t\t\tepArgs = array( ['endPointName'], java.lang.String )\n\t\t\t\t\tnameAttrs\t= self.configService.getAttributes( self.configService.session, namedEndPoint, epArgs, False )\n\t\t\t\t\tself.debug( __name__ + \".isEndPointExist(): nameAttrs=\" + str( nameAttrs ) + \"\\n\" )\n\t\t\t\t\tself.debug( __name__ + \".isEndPointExist(): nameAttrs type=\" + str( type( nameAttrs ) ) + \"\\n\" )\n\t\t\t\t\tepName = self.configService.configServiceHelper.getAttributeValue( nameAttrs, 'endPointName' )\n\t\t\t\t\tif epName == epname:\n\t\t\t\t\t\treturn True\n\t\t\t\t#Endfor\n\t\t\t#Endif\n\t\t#Endfor\n\t\treturn False",
"def test_114_designate_api_endpoint(self):\n u.log.debug('Checking designate api endpoint data...')\n endpoints = self.keystone.endpoints.list()\n u.log.debug(endpoints)\n admin_port = internal_port = public_port = '9001'\n expected = {'id': u.not_null,\n 'region': 'RegionOne',\n 'adminurl': u.valid_url,\n 'internalurl': u.valid_url,\n 'publicurl': u.valid_url,\n 'service_id': u.not_null}\n\n ret = u.validate_endpoint_data(\n endpoints,\n admin_port,\n internal_port,\n public_port,\n expected,\n openstack_release=self._get_openstack_release())\n if ret:\n message = 'Designate endpoint: {}'.format(ret)\n amulet.raise_status(amulet.FAIL, msg=message)\n\n u.log.debug('OK')",
"def test_watchlist_endpoint_available(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_swagger_docs_url_resolves_to_correct_view(self):\n view = resolve('/docs/')\n SCHEMA_VIEW = get_swagger_view(\n title='Harvester Control Center API',\n url=os.environ.get('FORCE_SCRIPT_NAME', '')\n )\n self.assertEqual(view.func.__name__, SCHEMA_VIEW.__name__)",
"def is_endpoint_external(self):\n return self.endpoint in objects.EXTERNAL_END_POINTS",
"def test_service_desc(self):\n service_desc = self._request_valid(\"api\")\n self.assertIn(\"openapi\", service_desc.keys())\n self.assertIn(\"eodag\", service_desc[\"info\"][\"title\"].lower())\n self.assertGreater(len(service_desc[\"paths\"].keys()), 0)\n # test a 2nd call (ending slash must be ignored)\n self._request_valid(\"api/\")",
"def _check_endpoint_responses(endpoints: List[swagger_to.intermediate.Endpoint]) -> List[Complaint]:\n complaints = [] # type: List[Complaint]\n\n for endpoint in endpoints:\n if \"200\" not in endpoint.responses.keys():\n complaints.append(\n Complaint(\n message=\"Path doesn't include response 200\",\n what=endpoint.path,\n where=\"In endpoint {}\".format(endpoint.operation_id),\n line=endpoint.line))\n if \"default\" not in endpoint.responses.keys():\n complaints.append(\n Complaint(\n message=\"Path doesn't include default response\",\n what=endpoint.path,\n where=\"In endpoint {}\".format(endpoint.operation_id),\n line=endpoint.line))\n\n return complaints",
"def test_wrong_endpoint_url(self):\n # Try to get a book from wrong url\n response = self.client.get('/api/v2/booooks')\n self.assertIn(\"http://localhost/api/v2/booooks is not a valid url\",\n str(response.data), msg=\"Handles invalid url\")",
"def validate_api_auth(app: Flask):\n unmarked_endpoints = []\n for label, endpoint in app.view_functions.items():\n if not hasattr(endpoint, \"is_protected\"):\n unmarked_endpoints.append(label)\n\n assert len(unmarked_endpoints) == 0, (\n \"All endpoints must use either the `requires_auth` or `public` decorator \"\n \"to explicitly specify their auth configuration. Missing from the following \"\n \"endpoints: \" + \", \".join(unmarked_endpoints)\n )",
"def test_offer_endpoint_available(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def docs(endpoint):\n api = {\n 'endpoint': endpoint,\n 'methods': [],\n 'doc': '',\n 'url': '',\n 'name': ''\n }\n\n try:\n func = get_app().view_functions[endpoint]\n\n api['name'] = _get_api_name(func)\n api['doc'] = _get_api_doc(func)\n\n for rule in get_app().url_map.iter_rules():\n if rule.endpoint == endpoint:\n api['methods'] = ','.join(rule.methods)\n api['url'] = str(rule)\n\n except:\n api['doc'] = 'Invalid api endpoint: \"{}\"!'.format(endpoint)\n\n return render_template('api_docs.html', api=api)",
"def test_404(self):\r\n client = self.app.test_client() \r\n\r\n \"\"\" the endpoints and methods to test \"\"\"\r\n endpoints = { \r\n \"/bookings/\":[\"get\",\"put\",\"delete\"] \r\n } \r\n for k,v in endpoints.items(): \r\n for m in v: \r\n response = None \r\n if m == \"get\": \r\n response = client.get(k+'999') \r\n elif m == \"put\": \r\n response = client.put(k+'999',json={}) \r\n elif m == \"delete\": \r\n response = client.delete(k+'999') \r\n self.assertEqual(response.status_code, 404, msg=\"ENDPOINT: \"+k+\"\\nMETHOD: \"+m+\"\\n\"+response.get_data(as_text=True)) # not found\r",
"def test_item_endpoint_available(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_swagger_docs_reverses_to_correct_url(self):\n url = reverse('swagger-docs')\n self.assertEqual(url, '/docs/')",
"def supported(self) -> bool:\n return self.api_id.value in self.vapix.api_discovery"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Print a VERY clear and verbose error message for when the types (between the OpenAPI documentation and the function declaration) don't match.
|
def render_openapi_type_exception(
self,
function: Callable[..., HttpResponse],
openapi_params: Set[Tuple[str, Union[type, Tuple[type, object]]]],
function_params: Set[Tuple[str, Union[type, Tuple[type, object]]]],
diff: Set[Tuple[str, Union[type, Tuple[type, object]]]],
) -> None: # nocoverage
msg = f"""
The types for the request parameters in zerver/openapi/zulip.yaml
do not match the types declared in the implementation of {function.__name__}.\n"""
msg += "=" * 65 + "\n"
msg += "{:<10}{:^30}{:>10}\n".format(
"parameter", "OpenAPI type", "function declaration type"
)
msg += "=" * 65 + "\n"
opvtype = None
fdvtype = None
for element in diff:
vname = element[0]
for element in openapi_params:
if element[0] == vname:
opvtype = element[1]
break
for element in function_params:
if element[0] == vname:
fdvtype = element[1]
break
msg += f"{vname:<10}{opvtype!s:^30}{fdvtype!s:>10}\n"
raise AssertionError(msg)
|
[
"def _type_error_message(func: callable, expected: type,\n got: object) -> str:\n\n return ('{} should return a {}, but returned {}' +\n '.').format(func.__name__, expected.__name__, got)",
"def test_function_args(self):\n reporter = SimpleReporter(\n pkgs=[PackageAPI(BASE_PACKAGE), PackageAPI(PACKAGE_WITH_DIFFERENT_ARGS)],\n errors_allowed=100,\n )\n reporter._check_function_args()\n errors = reporter.errors\n self.assertTrue(len(errors) == 2)\n self.assertTrue(all([isinstance(x, DoppelTestError) for x in errors]))\n expected_message = (\n \"Function 'playback()' exists in all packages but some \"\n \"arguments are not shared in all implementations.\"\n )\n self.assertTrue(errors[0].msg == expected_message)",
"def check_argument_types(\n self, function: Callable[..., HttpResponse], openapi_parameters: List[Dict[str, Any]]\n ) -> None:\n openapi_params: Set[Tuple[str, Union[type, Tuple[type, object]]]] = set()\n json_params: Dict[str, Union[type, Tuple[type, object]]] = {}\n for element in openapi_parameters:\n name: str = element[\"name\"]\n schema = {}\n if \"content\" in element:\n # The only content-type we use in our API is application/json.\n assert \"schema\" in element[\"content\"][\"application/json\"]\n # If content_type is application/json, then the\n # parameter needs to be handled specially, as REQ can\n # either return the application/json as a string or it\n # can either decode it and return the required\n # elements. For example `to` array in /messages: POST\n # is processed by REQ as a string and then its type is\n # checked in the view code.\n #\n # Meanwhile `profile_data` in /users/{user_id}: GET is\n # taken as array of objects. So treat them separately.\n schema = element[\"content\"][\"application/json\"][\"schema\"]\n json_params[name] = schema_type(schema)\n continue\n else:\n schema = element[\"schema\"]\n openapi_params.add((name, schema_type(schema)))\n\n function_params: Set[Tuple[str, Union[type, Tuple[type, object]]]] = set()\n\n # Iterate through the decorators to find the original\n # function, wrapped by has_request_variables, so we can parse\n # its arguments.\n while (wrapped := getattr(function, \"__wrapped__\", None)) is not None:\n function = wrapped\n\n # Now, we do inference mapping each REQ parameter's\n # declaration details to the Python/mypy types for the\n # arguments passed to it.\n #\n # Because the mypy types are the types used inside the inner\n # function (after the original data is processed by any\n # validators, converters, etc.), they will not always match\n # the API-level argument types. The main case where this\n # happens is when a `converter` is used that changes the types\n # of its parameters.\n for pname, defval in inspect.signature(function).parameters.items():\n defval = defval.default\n if isinstance(defval, _REQ):\n # TODO: The below inference logic in cases where\n # there's a converter function declared is incorrect.\n # Theoretically, we could restructure the converter\n # function model so that we can check what type it\n # excepts to be passed to make validation here\n # possible.\n\n vtype = self.get_standardized_argument_type(function.__annotations__[pname])\n vname = defval.post_var_name\n assert vname is not None\n if vname in json_params:\n # Here we have two cases. If the the REQ type is\n # string then there is no point in comparing as\n # JSON can always be returned as string. Ideally,\n # we wouldn't use REQ for a JSON object without a\n # validator in these cases, but it does happen.\n #\n # If the REQ type is not string then, insert the\n # REQ and OpenAPI data types of the variable in\n # the respective sets so that they can be dealt\n # with later. In either case remove the variable\n # from `json_params`.\n if vtype == str:\n json_params.pop(vname, None)\n continue\n else:\n openapi_params.add((vname, json_params[vname]))\n json_params.pop(vname, None)\n function_params.add((vname, vtype))\n\n # After the above operations `json_params` should be empty.\n assert len(json_params) == 0\n diff = openapi_params - function_params\n if diff: # nocoverage\n self.render_openapi_type_exception(function, openapi_params, function_params, diff)",
"def test_review_func_docstrings(self):\n for func in self.review_f:\n self.assertIsNot(func[1].__doc__, None,\n \"{:s} method needs a docstring\".format(func[0]))\n self.assertTrue(len(func[1].__doc__) >= 1,\n \"{:s} method needs a docstring\".format(func[0]))",
"def test_any_type(self):\n\n @typecheck(int, None)\n def to_string(x, y):\n x = y\n return str(x)\n\n try:\n to_string(1, 9)\n except InvalidArgumentType:\n self.fail(\"Failed typecheck while it shouldn't have, given the first argument has the correct type and no type check should be performed on the second argument.\")",
"def wrong_args(func, signature, missing_args, arg_type, number_of_args=0):\n ordered_args = [a for a in signature.parameters if a in missing_args]\n ordered_args = ordered_args[number_of_args:]\n error_message = ['%s() missing %d required %s argument' % (func.__name__, len(ordered_args), arg_type)]\n if len(ordered_args) == 1:\n error_message.append(\": '%s'\" % ordered_args[0])\n else:\n error_message.extend(['s: ', ' '.join(\"'%s'\" % a for a in ordered_args[:-1]), \" and '%s'\" % ordered_args[-1]])\n raise TypeError(''.join(error_message))",
"def check_func_params(lineno, func, params, param_list, decl=True):\n if len(params) != len(func[\"parameters\"]):\n print_error(lineno, {}, 30, \"few\" if len(params) < len(func[\"parameters\"]) else \"many\", func[\"name\"])\n return False\n c2 = all(param[\"id_type\"] in param_list for param in params)\n if decl:\n c3 = all([ param.get(\"is_decl\", True) for param in params])\n else:\n c3 = all([ not param.get(\"is_decl\", False) for param in params])\n if not (c2 and c3):\n print_error(lineno, {}, 33)\n return False\n no_err = True\n for p1, p2 in zip(params, func[\"parameters\"]):\n if simple_type_specifier.get(' '.join(p1[\"type\"])) and simple_type_specifier.get(' '.join(p2[\"type\"])) :\n if simple_type_specifier[' '.join(p1[\"type\"])][\"equiv_type\"] != simple_type_specifier[' '.join(p2[\"type\"])][\"equiv_type\"] :\n no_err = False\n print_error(lineno, {}, 31, p1[\"name\"], p2[\"name\"])\n elif p1[\"id_type\"] in [pt for pt in param_list if pt not in [\"literal\"]] and set(p1[\"specifier\"]) != set(p2[\"specifier\"]):\n no_err = False\n print_error(lineno, {}, 34, p1[\"name\"], p2[\"name\"])\n elif p1.get(\"order\", []) != p2.get(\"order\", []):\n no_err = False\n print_error(lineno, {}, 35, p1[\"name\"], p2[\"name\"])\n elif p1.get(\"star\", 0) != p2.get(\"star\", 0):\n no_err = False\n print_error(lineno, {}, 31, p1[\"name\"], p2[\"name\"])\n else:\n no_err = False\n print_error(lineno,{}, 32, p1[\"name\"])\n return no_err",
"def test_builtin_errors():\r\n try: SampleAPI.execute('in.valid')\r\n except MethodNotFoundError, e:\r\n assert e.method == ['in', 'valid']",
"def test_identical_functions(self):\n reporter = SimpleReporter(\n pkgs=[PackageAPI(BASE_PACKAGE), PackageAPI(BASE_PACKAGE2)], errors_allowed=0\n )\n reporter._check_function_args()\n errors = reporter.errors\n self.assertTrue(len(errors) == 0)",
"def test_get_args_schema(self):\n try:\n Draft4Validator.check_schema(token.TokenView2.GET_SCHEMA)\n schema_valid = True\n except RuntimeError:\n schema_valid = False\n\n self.assertTrue(schema_valid)",
"def test_poorly_defined_functions(self):\r\n # test for bad field name\r\n search = {'functions': [{'name': 'sum', 'field': 'bogusfieldname'}]}\r\n resp = self.app.get('/api/eval/person?q={0}'.format(dumps(search)))\r\n assert resp.status_code == 400\r\n assert 'message' in loads(resp.data)\r\n assert 'bogusfieldname' in loads(resp.data)['message']\r\n\r\n # test for bad function name\r\n search = {'functions': [{'name': 'bogusfuncname', 'field': 'age'}]}\r\n resp = self.app.get('/api/eval/person?q={0}'.format(dumps(search)))\r\n assert resp.status_code == 400\r\n assert 'message' in loads(resp.data)\r\n assert 'bogusfuncname' in loads(resp.data)['message']",
"def test_function_definitions_should_break(Script):\n assert_signature(Script, 'abs(\\ndef x', 'abs', 0)\n assert not Script('abs(\\ndef x(): pass').get_signatures()",
"def check_types_docstrings(self):\n for astnode in self.astnode_types:\n with astnode.diagnostic_context:\n RstCommentChecker.check_doc(astnode._doc)\n\n for struct in self.struct_types:\n with struct.diagnostic_context:\n RstCommentChecker.check_doc(struct._doc)",
"def type_mismatch_error_message(\n first: 'Type',\n second: 'Type',\n relation: TypeRelation,\n second_is_expected: bool = False,\n) -> str:\n maybe_expected = 'expected ' if second_is_expected else ''\n first_str = first.compact_representation()\n second_str = second.compact_representation()\n diff = None\n if first_str == second_str:\n # The two only differ in container types or some other property not\n # visible via the compact representation, so show `repr` instead.\n # No diff is used because `repr` prints to a single line.\n first_str = repr(first)\n second_str = repr(second)\n diff = None\n elif len(first_str) > MAX_LINE_LEN or len(second_str) > MAX_LINE_LEN:\n # The types are large structures, and so the formatted representation is\n # used and a summary diff is added. The logic here is that large types\n # may be easier to diff visually with a more structured representation,\n # and logical line breaks are required to make diff output useful.\n first_str = first.formatted_representation()\n second_str = second.formatted_representation()\n split_first = first_str.split('\\n')\n split_second = second_str.split('\\n')\n diff = '\\n'.join(difflib.unified_diff(split_first, split_second))\n message = [\n 'Type',\n f'`{first_str}`',\n f'is not {relation.value} to {maybe_expected}type',\n f'`{second_str}`',\n ]\n if diff:\n message += [f'\\nDiff:\\n{diff}']\n single_line = ' '.join(message)\n if len(single_line) > MAX_LINE_LEN or '\\n' in single_line:\n return '\\n'.join(message)\n else:\n return single_line",
"def console_validate(\n # Source\n source: List[str] = common.source,\n name: str = common.resource_name,\n type: str = common.type,\n path: str = common.path,\n scheme: str = common.scheme,\n format: str = common.format,\n encoding: str = common.encoding,\n innerpath: str = common.innerpath,\n compression: str = common.compression,\n schema: str = common.schema,\n hash: str = common.hash,\n bytes: int = common.bytes,\n fields: int = common.fields,\n rows: int = common.rows,\n basepath: str = common.basepath,\n # Dialect\n dialect: str = common.dialect,\n header_rows: str = common.header_rows,\n header_join: str = common.header_join,\n comment_char: str = common.comment_char,\n comment_rows: str = common.comment_rows,\n sheet: str = common.sheet,\n table: str = common.table,\n keys: str = common.keys,\n keyed: bool = common.keyed,\n # Detector\n buffer_size: int = common.buffer_size,\n sample_size: int = common.sample_size,\n field_type: str = common.field_type,\n field_names: str = common.field_names,\n field_confidence: float = common.field_confidence,\n field_float_numbers: bool = common.field_float_numbers,\n field_missing_values: str = common.field_missing_values,\n schema_sync: bool = common.schema_sync,\n # Checklist\n checklist: str = common.checklist,\n checks: str = common.checks,\n pick_errors: str = common.pick_errors,\n skip_errors: str = common.skip_errors,\n # Command\n parallel: bool = common.parallel,\n limit_rows: int = common.limit_rows,\n limit_errors: int = common.limit_errors,\n yaml: bool = common.yaml,\n json: bool = common.json,\n debug: bool = common.debug,\n trusted: bool = common.trusted,\n standards: str = common.standards,\n # Deprecated\n resource_name: str = common.resource_name,\n):\n console = Console()\n name = name or resource_name\n\n # Setup system\n if trusted:\n system.trusted = trusted\n if standards:\n system.standards = standards # type: ignore\n\n # Create source\n source = helpers.create_source(source, path=path)\n if not source and not path:\n note = 'Providing \"source\" or \"path\" is required'\n helpers.print_error(console, note=note)\n raise typer.Exit(code=1)\n\n try:\n # Create dialect\n dialect_obj = helpers.create_dialect(\n descriptor=dialect,\n header_rows=header_rows,\n header_join=header_join,\n comment_char=comment_char,\n comment_rows=comment_rows,\n sheet=sheet,\n table=table,\n keys=keys,\n keyed=keyed,\n )\n\n # Create detector\n detector_obj = helpers.create_detector(\n buffer_size=buffer_size,\n sample_size=sample_size,\n field_type=field_type,\n field_names=field_names,\n field_confidence=field_confidence,\n field_float_numbers=field_float_numbers,\n field_missing_values=field_missing_values,\n schema_sync=schema_sync,\n )\n\n # Create checklist\n checklist_obj = helpers.create_checklist(\n descriptor=checklist,\n checks=checks,\n pick_errors=pick_errors,\n skip_errors=skip_errors,\n )\n\n # Create resource\n resource = Resource(\n source=helpers.create_source(source),\n name=name,\n path=path,\n scheme=scheme,\n format=format,\n datatype=type,\n compression=compression,\n innerpath=innerpath,\n encoding=encoding,\n hash=hash,\n bytes=bytes,\n fields=fields,\n rows=rows,\n schema=schema,\n basepath=basepath,\n detector=detector_obj,\n )\n\n # Add dialect\n if dialect_obj:\n resource.dialect = dialect_obj\n\n # Validate resource\n report = resource.validate(\n checklist_obj,\n name=name,\n parallel=parallel,\n limit_rows=limit_rows,\n limit_errors=limit_errors,\n )\n code = int(not report.valid)\n except Exception as exception:\n helpers.print_exception(console, debug=debug, exception=exception)\n raise typer.Exit(code=1)\n\n # Yaml mode\n if yaml:\n content = report.to_yaml().strip()\n print(content)\n raise typer.Exit(code=code)\n\n # Json mode\n if json:\n content = report.to_json()\n print(content)\n raise typer.Exit(code=code)\n\n # Default mode\n labels = [\"Row\", \"Field\", \"Type\", \"Message\"]\n props = [\"row_number\", \"field_number\", \"type\", \"message\"]\n names = [\"dataset\"] + [task.name for task in report.tasks]\n matrix = [report.errors] + [task.errors for task in report.tasks]\n\n # Status\n if report.tasks:\n console.rule(\"[bold]Dataset\")\n view = Table(title=\"dataset\")\n view.add_column(\"name\")\n view.add_column(\"type\")\n view.add_column(\"path\")\n view.add_column(\"status\")\n for task in report.tasks:\n status = \"VALID\" if task.valid else \"INVALID\"\n style = \"green\" if task.valid else \"bold red\"\n status_row = [task.name, task.type, task.place, status]\n view.add_row(*status_row, style=style)\n console.print(view)\n\n # Errors\n if not report.valid:\n console.rule(\"[bold]Tables\")\n for name, errors in zip(names, matrix):\n if errors:\n view = Table(title=name)\n for label in labels:\n view.add_column(label)\n for error in errors:\n error_row: List[str] = []\n for prop in props:\n error_row.append(str(getattr(error, prop, None)))\n view.add_row(*error_row)\n console.print(view)\n\n # Proper retcode\n raise typer.Exit(code=code)",
"def check_undefined_type_within_parameters(localization, call_description, *arg_types, **kwargs_types):\n arg_types_list = list(arg_types)\n\n # Process arguments\n for i in range(len(arg_types_list)):\n if isinstance(arg_types_list[i], union_type_copy.UnionType):\n # Is an undefined type inside this union type?\n exist_undefined = len(filter(lambda elem: isinstance(elem, UndefinedType), arg_types[i].types)) > 0\n if exist_undefined:\n # Compose a type warning with the full description of the problem.\n offset = print_utils_copy.get_param_position(\n module_line_numbering_copy.ModuleLineNumbering.get_line_from_module_code(\n localization.file_name, localization.line), i)\n if offset is not -1: # Sometimes offsets of the offending parameters cannot be obtained\n clone_loc = localization.clone()\n clone_loc.column = offset\n else:\n clone_loc = localization\n TypeWarning.instance(clone_loc, \"{0}: Argument {1} could be undefined\".format(call_description,\n i + 1))\n # Remove undefined type from the union type\n arg_types_list[i] = strip_undefined_type_from_union_type(arg_types[i])\n continue\n else:\n # Undefined types outside union types are treated as Type errors.\n if isinstance(arg_types[i], UndefinedType):\n offset = print_utils_copy.get_param_position(\n module_line_numbering_copy.ModuleLineNumbering.get_line_from_module_code(\n localization.file_name, localization.line), i)\n if offset is not -1: # Sometimes offsets of the offending parameters cannot be obtained\n clone_loc = localization.clone()\n clone_loc.column = offset\n else:\n clone_loc = localization\n\n arg_types_list[i] = TypeError(clone_loc, \"{0}: Argument {1} is not defined\".format(call_description,\n i + 1))\n continue\n\n arg_types_list[i] = arg_types[i]\n\n # Process keyword arguments (the same processing as argument lists)\n final_kwargs = {}\n for key, value in kwargs_types.items():\n if isinstance(value, union_type_copy.UnionType):\n exist_undefined = filter(lambda elem: isinstance(elem, UndefinedType), value.types)\n if exist_undefined:\n TypeWarning.instance(localization,\n \"{0}: Keyword argument {1} could be undefined\".format(call_description,\n key))\n final_kwargs[key] = strip_undefined_type_from_union_type(value)\n continue\n else:\n if isinstance(value, UndefinedType):\n final_kwargs[key] = TypeError(localization,\n \"{0}: Keyword argument {1} is not defined\".format(call_description,\n key))\n continue\n final_kwargs[key] = value\n\n return tuple(arg_types_list), final_kwargs",
"def test_typecheck_raises_on_failed_check(self):\n \n @typecheck(int, int, prompt=str)\n def sum_string(x, y, prompt='The sum of {} and {} is {}.'):\n return prompt.format(str(x), str(y), str(x+y))\n\n try:\n sum_string(1, 2, prompt='{} + {} = {}')\n except InvalidArgumentType:\n self.fail(\"Failed typecheck while it shouldn't have.\")\n with self.assertRaises(InvalidArgumentType):\n sum_string('hello', 'world')",
"def test_func_docstrings(self):\n for func in dir(BaseModel):\n with self.subTest(function=func):\n self.assertIsNot(\n func[1].__doc__,\n None,\n \"{:s} method needs a docstring\".format(func[0])\n )\n self.assertTrue(\n len(func[1].__doc__) > 1,\n \"{:s} method needs a docstring\".format(func[0])\n )",
"def test_openapi_arguments(self) -> None:\n\n from zproject import urls as urlconf\n\n # We loop through all the API patterns, looking in particular\n # for those using the rest_dispatch decorator; we then parse\n # its mapping of (HTTP_METHOD -> FUNCTION).\n for p in urlconf.v1_api_and_json_patterns + urlconf.v1_api_mobile_patterns:\n methods_endpoints: Dict[str, Any] = {}\n if p.callback is not rest_dispatch:\n # Endpoints not using rest_dispatch don't have extra data.\n if str(p.pattern) in self.documented_post_only_endpoints:\n methods_endpoints = dict(POST=p.callback)\n else:\n methods_endpoints = dict(GET=p.callback)\n else:\n methods_endpoints = assert_is_not_none(p.default_args)\n\n # since the module was already imported and is now residing in\n # memory, we won't actually face any performance penalties here.\n for method, value in methods_endpoints.items():\n if callable(value):\n function: Callable[..., HttpResponse] = value\n tags: Set[str] = set()\n else:\n function, tags = value\n\n if function is get_events:\n # Work around the fact that the registered\n # get_events view function isn't where we do\n # @has_request_variables.\n #\n # TODO: Make this configurable via an optional argument\n # to has_request_variables, e.g.\n # @has_request_variables(view_func_name=\"zerver.tornado.views.get_events\")\n function = get_events_backend\n\n function_name = f\"{function.__module__}.{function.__name__}\"\n\n with self.subTest(function_name):\n self.check_openapi_arguments_for_view(p, function_name, function, method, tags)\n\n self.check_for_non_existent_openapi_endpoints()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
We construct for both the OpenAPI data and the function's definition a set of tuples of the form (var_name, type) and then compare those sets to see if the OpenAPI data defines a different type than that actually accepted by the function. Otherwise, we print out the exact differences for convenient debugging and raise an AssertionError.
|
def check_argument_types(
self, function: Callable[..., HttpResponse], openapi_parameters: List[Dict[str, Any]]
) -> None:
openapi_params: Set[Tuple[str, Union[type, Tuple[type, object]]]] = set()
json_params: Dict[str, Union[type, Tuple[type, object]]] = {}
for element in openapi_parameters:
name: str = element["name"]
schema = {}
if "content" in element:
# The only content-type we use in our API is application/json.
assert "schema" in element["content"]["application/json"]
# If content_type is application/json, then the
# parameter needs to be handled specially, as REQ can
# either return the application/json as a string or it
# can either decode it and return the required
# elements. For example `to` array in /messages: POST
# is processed by REQ as a string and then its type is
# checked in the view code.
#
# Meanwhile `profile_data` in /users/{user_id}: GET is
# taken as array of objects. So treat them separately.
schema = element["content"]["application/json"]["schema"]
json_params[name] = schema_type(schema)
continue
else:
schema = element["schema"]
openapi_params.add((name, schema_type(schema)))
function_params: Set[Tuple[str, Union[type, Tuple[type, object]]]] = set()
# Iterate through the decorators to find the original
# function, wrapped by has_request_variables, so we can parse
# its arguments.
while (wrapped := getattr(function, "__wrapped__", None)) is not None:
function = wrapped
# Now, we do inference mapping each REQ parameter's
# declaration details to the Python/mypy types for the
# arguments passed to it.
#
# Because the mypy types are the types used inside the inner
# function (after the original data is processed by any
# validators, converters, etc.), they will not always match
# the API-level argument types. The main case where this
# happens is when a `converter` is used that changes the types
# of its parameters.
for pname, defval in inspect.signature(function).parameters.items():
defval = defval.default
if isinstance(defval, _REQ):
# TODO: The below inference logic in cases where
# there's a converter function declared is incorrect.
# Theoretically, we could restructure the converter
# function model so that we can check what type it
# excepts to be passed to make validation here
# possible.
vtype = self.get_standardized_argument_type(function.__annotations__[pname])
vname = defval.post_var_name
assert vname is not None
if vname in json_params:
# Here we have two cases. If the the REQ type is
# string then there is no point in comparing as
# JSON can always be returned as string. Ideally,
# we wouldn't use REQ for a JSON object without a
# validator in these cases, but it does happen.
#
# If the REQ type is not string then, insert the
# REQ and OpenAPI data types of the variable in
# the respective sets so that they can be dealt
# with later. In either case remove the variable
# from `json_params`.
if vtype == str:
json_params.pop(vname, None)
continue
else:
openapi_params.add((vname, json_params[vname]))
json_params.pop(vname, None)
function_params.add((vname, vtype))
# After the above operations `json_params` should be empty.
assert len(json_params) == 0
diff = openapi_params - function_params
if diff: # nocoverage
self.render_openapi_type_exception(function, openapi_params, function_params, diff)
|
[
"def render_openapi_type_exception(\n self,\n function: Callable[..., HttpResponse],\n openapi_params: Set[Tuple[str, Union[type, Tuple[type, object]]]],\n function_params: Set[Tuple[str, Union[type, Tuple[type, object]]]],\n diff: Set[Tuple[str, Union[type, Tuple[type, object]]]],\n ) -> None: # nocoverage\n\n msg = f\"\"\"\nThe types for the request parameters in zerver/openapi/zulip.yaml\ndo not match the types declared in the implementation of {function.__name__}.\\n\"\"\"\n msg += \"=\" * 65 + \"\\n\"\n msg += \"{:<10}{:^30}{:>10}\\n\".format(\n \"parameter\", \"OpenAPI type\", \"function declaration type\"\n )\n msg += \"=\" * 65 + \"\\n\"\n opvtype = None\n fdvtype = None\n for element in diff:\n vname = element[0]\n for element in openapi_params:\n if element[0] == vname:\n opvtype = element[1]\n break\n for element in function_params:\n if element[0] == vname:\n fdvtype = element[1]\n break\n msg += f\"{vname:<10}{opvtype!s:^30}{fdvtype!s:>10}\\n\"\n raise AssertionError(msg)",
"def check_func_params(lineno, func, params, param_list, decl=True):\n if len(params) != len(func[\"parameters\"]):\n print_error(lineno, {}, 30, \"few\" if len(params) < len(func[\"parameters\"]) else \"many\", func[\"name\"])\n return False\n c2 = all(param[\"id_type\"] in param_list for param in params)\n if decl:\n c3 = all([ param.get(\"is_decl\", True) for param in params])\n else:\n c3 = all([ not param.get(\"is_decl\", False) for param in params])\n if not (c2 and c3):\n print_error(lineno, {}, 33)\n return False\n no_err = True\n for p1, p2 in zip(params, func[\"parameters\"]):\n if simple_type_specifier.get(' '.join(p1[\"type\"])) and simple_type_specifier.get(' '.join(p2[\"type\"])) :\n if simple_type_specifier[' '.join(p1[\"type\"])][\"equiv_type\"] != simple_type_specifier[' '.join(p2[\"type\"])][\"equiv_type\"] :\n no_err = False\n print_error(lineno, {}, 31, p1[\"name\"], p2[\"name\"])\n elif p1[\"id_type\"] in [pt for pt in param_list if pt not in [\"literal\"]] and set(p1[\"specifier\"]) != set(p2[\"specifier\"]):\n no_err = False\n print_error(lineno, {}, 34, p1[\"name\"], p2[\"name\"])\n elif p1.get(\"order\", []) != p2.get(\"order\", []):\n no_err = False\n print_error(lineno, {}, 35, p1[\"name\"], p2[\"name\"])\n elif p1.get(\"star\", 0) != p2.get(\"star\", 0):\n no_err = False\n print_error(lineno, {}, 31, p1[\"name\"], p2[\"name\"])\n else:\n no_err = False\n print_error(lineno,{}, 32, p1[\"name\"])\n return no_err",
"def test_equality(self, input_type, value):\n ds = DatasetList(input_type(value))\n\n assert ds == input_type(value)\n assert ds != value.append(\"additional\")\n for variable in [\"string\", 1, 1.0, {\"0\": 1}, True]:\n assert ds != variable",
"def test_compare_spec_type_inFOV(self):\n\n # download cached file\n compare_spec_type_fname = download_rename(\n f\"{self.basename}_compare_spec_type.asdf\"\n )\n with asdf.open(compare_spec_type_fname) as af:\n compare_spec_type_info = copy.deepcopy(af.tree)\n\n # run compare_spec_type\n spec_type = compare_spec_type(\n self.obs_fname_cache,\n self.stats_fname_cache,\n **compare_spec_type_info[\"input\"],\n )\n\n # expected output table\n expected_table = Table(compare_spec_type_info[\"output\"])\n\n # compare to new table\n compare_tables(expected_table, Table(spec_type), rtol=2e-3)",
"def test_equal_on_type_mismatch(self):\n a = payloads.DeriveKeyResponsePayload()\n b = \"invalid\"\n\n self.assertFalse(a == b)\n self.assertFalse(b == a)",
"def assert_type_compatibility(defined_symbols: collections.OrderedDict, types: tuple):\n\n # Sanity check for any failed inference\n if None in types:\n raise IncompatibleTypeError('`None` was given', types)\n\n # Find all unique vector, pointer and scalar types\n # TODO: Better way to determine uniqueness\n vec_types = list(set([t for t in types if isinstance(t, dtypes.vector)]))\n ptr_types = list(set([t for t in types if isinstance(t, dtypes.pointer)]))\n scal_types = list(set([t for t in types if not isinstance(t, (dtypes.vector, dtypes.pointer))]))\n\n # Check if we can represent the types in SVE\n for t in types:\n if util.get_base_type(t).type not in util.TYPE_TO_SVE:\n raise IncompatibleTypeError('Not available in SVE', types)\n\n # Check if we have different vector types (would require casting, not implemented yet)\n if len(vec_types) > 1:\n raise IncompatibleTypeError('Vectors of different type', types)\n\n # Ensure no mixing of pointers and vectors/scalars ever occurs (totally incompatible)\n if (len(vec_types) != 0 or len(scal_types) != 0) and len(ptr_types) != 0:\n raise IncompatibleTypeError('Vectors/scalars are incompatible with pointers', types)",
"def test_identical_functions(self):\n reporter = SimpleReporter(\n pkgs=[PackageAPI(BASE_PACKAGE), PackageAPI(BASE_PACKAGE2)], errors_allowed=0\n )\n reporter._check_function_args()\n errors = reporter.errors\n self.assertTrue(len(errors) == 0)",
"def test3_equality(self):\n xml = self.data.xml()\n data = clam.common.data.OutputTemplate.fromxml(xml)\n self.assertTrue(data.formatclass == clam.common.formats.PlainTextFormat)\n self.assertTrue(isinstance(data.metafields[0], clam.common.data.SetMetaField))\n self.assertTrue(isinstance(data.metafields[1], clam.common.data.UnsetMetaField))\n self.assertTrue(isinstance(data.metafields[2], clam.common.data.ParameterMetaField))\n self.assertTrue(isinstance(data.metafields[3], clam.common.data.CopyMetaField))\n #self.assertTrue(data.filename == 'test') #always gives error, client unaware of server filename\n self.assertTrue(data.unique)\n #note: viewers and converters not supported client-side",
"def assert_is_similar(cls, expected, inferred):\n\n ERROR_URL_298 = \"https://github.com/opendp/opendp/discussions/298\"\n if isinstance(inferred, UnknownType):\n return\n if isinstance(expected, str) and isinstance(inferred, str):\n if inferred in ATOM_EQUIVALENCE_CLASSES:\n assert expected in ATOM_EQUIVALENCE_CLASSES[inferred], \\\n f\"inferred type is {inferred}, expected {expected}. See {ERROR_URL_298}\"\n else:\n assert expected == inferred, \\\n f\"inferred type is {inferred}, expected {expected}. See {ERROR_URL_298}\"\n\n elif isinstance(expected, RuntimeType) and isinstance(inferred, RuntimeType):\n # allow extra flexibility around options, as the inferred type of an Option::<T>::Some will just be T\n if expected.origin == \"Option\" and inferred.origin != \"Option\":\n expected = expected.args[0]\n\n assert expected.origin == inferred.origin, \\\n f\"inferred type is {inferred.origin}, expected {expected.origin}. See {ERROR_URL_298}\"\n\n assert len(expected.args) == len(inferred.args), \\\n f\"inferred type has {len(inferred.args)} arg(s), expected {len(expected.args)} arg(s). See {ERROR_URL_298}\"\n\n for (arg_par, arg_inf) in zip(expected.args, inferred.args):\n RuntimeType.assert_is_similar(arg_par, arg_inf)\n else:\n # inferred type differs in structure\n raise AssertionError(f\"inferred type is {inferred}, expected {expected}. See {ERROR_URL_298}\")",
"def _create_type_verifier(data_type: DataType, name: str = None):\n\n if name is None:\n new_msg = lambda msg: msg\n new_name = lambda n: \"field %s\" % n\n else:\n new_msg = lambda msg: \"%s: %s\" % (name, msg)\n new_name = lambda n: \"field %s in %s\" % (n, name)\n\n def verify_nullability(obj):\n if obj is None:\n if data_type._nullable:\n return True\n else:\n raise ValueError(new_msg(\"This field is not nullable, but got None\"))\n else:\n return False\n\n _type = type(data_type)\n\n assert _type in _acceptable_types or isinstance(data_type, UserDefinedType),\\\n new_msg(\"unknown datatype: %s\" % data_type)\n\n def verify_acceptable_types(obj):\n # subclass of them can not be from_sql_type in JVM\n if type(obj) not in _acceptable_types[_type]:\n raise TypeError(new_msg(\"%s can not accept object %r in type %s\"\n % (data_type, obj, type(obj))))\n\n if isinstance(data_type, CharType):\n def verify_char(obj):\n verify_acceptable_types(obj)\n if len(obj) != data_type.length:\n raise ValueError(new_msg(\n \"length of object (%s) of CharType is not: %d\" % (obj, data_type.length)))\n\n verify_value = verify_char\n\n elif isinstance(data_type, VarCharType):\n def verify_varchar(obj):\n verify_acceptable_types(obj)\n if len(obj) > data_type.length:\n raise ValueError(new_msg(\n \"length of object (%s) of VarCharType exceeds: %d\" % (obj, data_type.length)))\n\n verify_value = verify_varchar\n\n elif isinstance(data_type, BinaryType):\n def verify_binary(obj):\n verify_acceptable_types(obj)\n if len(obj) != data_type.length:\n raise ValueError(new_msg(\n \"length of object (%s) of BinaryType is not: %d\" % (obj, data_type.length)))\n\n verify_value = verify_binary\n\n elif isinstance(data_type, VarBinaryType):\n def verify_varbinary(obj):\n verify_acceptable_types(obj)\n if len(obj) > data_type.length:\n raise ValueError(new_msg(\n \"length of object (%s) of VarBinaryType exceeds: %d\"\n % (obj, data_type.length)))\n\n verify_value = verify_varbinary\n\n elif isinstance(data_type, UserDefinedType):\n sql_type = data_type.sql_type()\n verifier = _create_type_verifier(sql_type, name=name)\n\n def verify_udf(obj):\n if not (hasattr(obj, '__UDT__') and obj.__UDT__ == data_type):\n raise ValueError(new_msg(\"%r is not an instance of type %r\" % (obj, data_type)))\n data = data_type.to_sql_type(obj)\n if isinstance(sql_type, RowType):\n # remove the RowKind value in the first position.\n data = data[1:]\n verifier(data)\n\n verify_value = verify_udf\n\n elif isinstance(data_type, TinyIntType):\n def verify_tiny_int(obj):\n verify_acceptable_types(obj)\n if obj < -128 or obj > 127:\n raise ValueError(new_msg(\"object of TinyIntType out of range, got: %s\" % obj))\n\n verify_value = verify_tiny_int\n\n elif isinstance(data_type, SmallIntType):\n def verify_small_int(obj):\n verify_acceptable_types(obj)\n if obj < -32768 or obj > 32767:\n raise ValueError(new_msg(\"object of SmallIntType out of range, got: %s\" % obj))\n\n verify_value = verify_small_int\n\n elif isinstance(data_type, IntType):\n def verify_integer(obj):\n verify_acceptable_types(obj)\n if obj < -2147483648 or obj > 2147483647:\n raise ValueError(\n new_msg(\"object of IntType out of range, got: %s\" % obj))\n\n verify_value = verify_integer\n\n elif isinstance(data_type, ArrayType):\n element_verifier = _create_type_verifier(\n data_type.element_type, name=\"element in array %s\" % name)\n\n def verify_array(obj):\n verify_acceptable_types(obj)\n for i in obj:\n element_verifier(i)\n\n verify_value = verify_array\n\n elif isinstance(data_type, MapType):\n key_verifier = _create_type_verifier(data_type.key_type, name=\"key of map %s\" % name)\n value_verifier = _create_type_verifier(data_type.value_type, name=\"value of map %s\" % name)\n\n def verify_map(obj):\n verify_acceptable_types(obj)\n for k, v in obj.items():\n key_verifier(k)\n value_verifier(v)\n\n verify_value = verify_map\n\n elif isinstance(data_type, RowType):\n verifiers = []\n for f in data_type.fields:\n verifier = _create_type_verifier(f.data_type, name=new_name(f.name))\n verifiers.append((f.name, verifier))\n\n def verify_row_field(obj):\n if isinstance(obj, dict):\n for f, verifier in verifiers:\n verifier(obj.get(f))\n elif isinstance(obj, Row) and getattr(obj, \"_from_dict\", False):\n # the order in obj could be different than dataType.fields\n for f, verifier in verifiers:\n verifier(obj[f])\n elif isinstance(obj, (tuple, list)):\n if len(obj) != len(verifiers):\n raise ValueError(\n new_msg(\"Length of object (%d) does not match with \"\n \"length of fields (%d)\" % (len(obj), len(verifiers))))\n for v, (_, verifier) in zip(obj, verifiers):\n verifier(v)\n elif hasattr(obj, \"__dict__\"):\n d = obj.__dict__\n for f, verifier in verifiers:\n verifier(d.get(f))\n else:\n raise TypeError(new_msg(\"RowType can not accept object %r in type %s\"\n % (obj, type(obj))))\n\n verify_value = verify_row_field\n\n else:\n def verify_default(obj):\n verify_acceptable_types(obj)\n\n verify_value = verify_default\n\n def verify(obj):\n if not verify_nullability(obj):\n verify_value(obj)\n\n return verify",
"def test_equal_on_type_mismatch(self):\n a = payloads.DeriveKeyRequestPayload()\n b = \"invalid\"\n\n self.assertFalse(a == b)\n self.assertFalse(b == a)",
"def test_functionset_accept_type():\r\n\r\n for fname in fst.functions:\r\n if fname.startswith(\"_\"):\r\n continue\r\n\r\n func = getattr(fst, fname)\r\n\r\n test_matrix0 = np.array([1, 2, 3, 4, 5, 6, 7])\r\n test_matrix1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\r\n test_matrix2 = np.array([[11, 22, 33, 44, 41], [55, 66, 77, 88, 85], [\r\n 99, 100, 111, 112, 199], [113, 114, 115, 116, 133]])\r\n test_matrix3 = np.array([[[2.42592027e-04, 1.92521620e-01, 8.78703764e-01],\r\n [2.16552246e-01, 2.73009222e-01, 8.73941110e-01]],\r\n\r\n [[1.00600025e-01, 3.28003203e-01, 9.42664245e-01],\r\n [3.43682259e-01, 8.87961491e-01, 7.08044033e-02]],\r\n\r\n [[4.62203008e-01, 2.24871123e-01, 7.33267856e-01],\r\n [5.85063713e-02, 8.36420046e-01, 2.23164288e-01]]])\r\n\r\n test_scalar = 42\r\n test_parameter = 0.5\r\n\r\n tt = [test_matrix0, test_matrix1,\r\n test_matrix2, test_matrix3, test_scalar, 0]\r\n\r\n tests = itertools.product(tt, tt)\r\n\r\n for t_inp1, t_inp2 in tests:\r\n try:\r\n res = func(t_inp1, t_inp2, test_parameter)\r\n\r\n if res is None:\r\n pytest.fail(\r\n f\"it seems that function '{fname}' does not return a value.\")\r\n\r\n if isinstance(res, np.ndarray) and len(res) == 0:\r\n pytest.fail(\r\n f\"it seems that function '{fname}' returns an empty array for inputs of types ({type(t_inp1)}, {type(t_inp2)}). \"\r\n f\"With the respective shapes {np.array(t_inp1).shape} and {np.array(t_inp2).shape}\")\r\n\r\n except Exception as err:\r\n pytest.fail(\r\n f\"Function '{fname}' seems to fail when the inputs are ({type(t_inp1)}, {type(t_inp2)}) \"\r\n f\"With the respective shapes {np.array(t_inp1).shape} and {np.array(t_inp2).shape}\"\r\n f\"\\nwith the following exception:\\n{err}\")",
"def test_validator_valid_stdtype_values_should_not_raise_exception(self):\n try:\n self.dummy.stdtype_bool = True\n self.dummy.stdtype_bytearray = bytearray(b'bytearray')\n self.dummy.stdtype_bytes = b'bytes'\n self.dummy.stdtype_complex = 1j\n self.dummy.stdtype_dict = {'Dictionary': True}\n self.dummy.stdtype_float = 1.1\n self.dummy.stdtype_frozenset = frozenset({1, 2, 3})\n self.dummy.stdtype_int = 666\n self.dummy.stdtype_list = ['List']\n self.dummy.stdtype_memoryview = memoryview(b'')\n self.dummy.stdtype_range = range(1, 10)\n self.dummy.stdtype_set = {1, 2, 3}\n self.dummy.stdtype_str = 'String'\n self.dummy.stdtype_tuple = ('Tuple',)\n self.dummy.stdtype_type = type\n except Exception as e:\n self.fail(e)",
"def test_function_args(self):\n reporter = SimpleReporter(\n pkgs=[PackageAPI(BASE_PACKAGE), PackageAPI(PACKAGE_WITH_DIFFERENT_ARGS)],\n errors_allowed=100,\n )\n reporter._check_function_args()\n errors = reporter.errors\n self.assertTrue(len(errors) == 2)\n self.assertTrue(all([isinstance(x, DoppelTestError) for x in errors]))\n expected_message = (\n \"Function 'playback()' exists in all packages but some \"\n \"arguments are not shared in all implementations.\"\n )\n self.assertTrue(errors[0].msg == expected_message)",
"def validate_output(self, value: Any, output_spec: \"Spec\",\n output_dict: JsonDict, input_spec: \"Spec\",\n dataset_spec: \"Spec\", input_example: Input):\n del output_spec, output_dict, dataset_spec\n # If not overwritten by a LitType, then validate it as an input to re-use\n # simple validation code.\n self.validate_input(value, input_spec, input_example)",
"def type_mismatch_error_message(\n first: 'Type',\n second: 'Type',\n relation: TypeRelation,\n second_is_expected: bool = False,\n) -> str:\n maybe_expected = 'expected ' if second_is_expected else ''\n first_str = first.compact_representation()\n second_str = second.compact_representation()\n diff = None\n if first_str == second_str:\n # The two only differ in container types or some other property not\n # visible via the compact representation, so show `repr` instead.\n # No diff is used because `repr` prints to a single line.\n first_str = repr(first)\n second_str = repr(second)\n diff = None\n elif len(first_str) > MAX_LINE_LEN or len(second_str) > MAX_LINE_LEN:\n # The types are large structures, and so the formatted representation is\n # used and a summary diff is added. The logic here is that large types\n # may be easier to diff visually with a more structured representation,\n # and logical line breaks are required to make diff output useful.\n first_str = first.formatted_representation()\n second_str = second.formatted_representation()\n split_first = first_str.split('\\n')\n split_second = second_str.split('\\n')\n diff = '\\n'.join(difflib.unified_diff(split_first, split_second))\n message = [\n 'Type',\n f'`{first_str}`',\n f'is not {relation.value} to {maybe_expected}type',\n f'`{second_str}`',\n ]\n if diff:\n message += [f'\\nDiff:\\n{diff}']\n single_line = ' '.join(message)\n if len(single_line) > MAX_LINE_LEN or '\\n' in single_line:\n return '\\n'.join(message)\n else:\n return single_line",
"def test_compare_spec_type_notFOV(self):\n # run compare_spec_type\n spec_type = compare_spec_type(\n self.obs_fname_cache,\n self.stats_fname_cache,\n [1.0], # RA\n [1.0], # Dec\n [\"B\"], # Spectral type\n [4], # Subtype\n [\"V\"], # Luminosity class\n match_radius=0.2, # Match radius (arcsec)\n )\n\n # expected output table\n expected_table = Table(\n {\n \"spec_ra\": [1.0],\n \"spec_dec\": [1.0],\n \"spec_type\": [\"B 4 V\"],\n \"spec_teff\": [np.nan],\n \"spec_logg\": [np.nan],\n \"phot_cat_ind\": [np.nan],\n \"stats_cat_ind\": [np.nan],\n \"beast_teff_p50\": [np.nan],\n \"beast_teff_p16\": [np.nan],\n \"beast_teff_p84\": [np.nan],\n \"beast_logg_p50\": [np.nan],\n \"beast_logg_p16\": [np.nan],\n \"beast_logg_p84\": [np.nan],\n \"teff_sigma\": [np.nan],\n \"logg_sigma\": [np.nan],\n }\n )\n\n # compare to new table\n compare_tables(expected_table, Table(spec_type))",
"def test_get_args_schema(self):\n try:\n Draft4Validator.check_schema(token.TokenView2.GET_SCHEMA)\n schema_valid = True\n except RuntimeError:\n schema_valid = False\n\n self.assertTrue(schema_valid)",
"def check_undefined_type_within_parameters(localization, call_description, *arg_types, **kwargs_types):\n arg_types_list = list(arg_types)\n\n # Process arguments\n for i in range(len(arg_types_list)):\n if isinstance(arg_types_list[i], union_type_copy.UnionType):\n # Is an undefined type inside this union type?\n exist_undefined = len(filter(lambda elem: isinstance(elem, UndefinedType), arg_types[i].types)) > 0\n if exist_undefined:\n # Compose a type warning with the full description of the problem.\n offset = print_utils_copy.get_param_position(\n module_line_numbering_copy.ModuleLineNumbering.get_line_from_module_code(\n localization.file_name, localization.line), i)\n if offset is not -1: # Sometimes offsets of the offending parameters cannot be obtained\n clone_loc = localization.clone()\n clone_loc.column = offset\n else:\n clone_loc = localization\n TypeWarning.instance(clone_loc, \"{0}: Argument {1} could be undefined\".format(call_description,\n i + 1))\n # Remove undefined type from the union type\n arg_types_list[i] = strip_undefined_type_from_union_type(arg_types[i])\n continue\n else:\n # Undefined types outside union types are treated as Type errors.\n if isinstance(arg_types[i], UndefinedType):\n offset = print_utils_copy.get_param_position(\n module_line_numbering_copy.ModuleLineNumbering.get_line_from_module_code(\n localization.file_name, localization.line), i)\n if offset is not -1: # Sometimes offsets of the offending parameters cannot be obtained\n clone_loc = localization.clone()\n clone_loc.column = offset\n else:\n clone_loc = localization\n\n arg_types_list[i] = TypeError(clone_loc, \"{0}: Argument {1} is not defined\".format(call_description,\n i + 1))\n continue\n\n arg_types_list[i] = arg_types[i]\n\n # Process keyword arguments (the same processing as argument lists)\n final_kwargs = {}\n for key, value in kwargs_types.items():\n if isinstance(value, union_type_copy.UnionType):\n exist_undefined = filter(lambda elem: isinstance(elem, UndefinedType), value.types)\n if exist_undefined:\n TypeWarning.instance(localization,\n \"{0}: Keyword argument {1} could be undefined\".format(call_description,\n key))\n final_kwargs[key] = strip_undefined_type_from_union_type(value)\n continue\n else:\n if isinstance(value, UndefinedType):\n final_kwargs[key] = TypeError(localization,\n \"{0}: Keyword argument {1} is not defined\".format(call_description,\n key))\n continue\n final_kwargs[key] = value\n\n return tuple(arg_types_list), final_kwargs"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This endtoend API documentation test compares the arguments defined in the actual code using and REQ(), with the arguments declared in our API documentation for every API endpoint in Zulip. First, we import the fancyDjango version of zproject/urls.py by doing this, each has_request_variables wrapper around each imported view function gets called to generate the wrapped view function and thus filling the global arguments_map variable. Basically, we're exploiting code execution during import. Then we need to import some view modules not already imported in urls.py. We use this different syntax because of the linters complaining of an unused import (which is correct, but we do this for triggering the has_request_variables decorator). At the end, we perform a reverse mapping test that verifies that every URL pattern defined in the OpenAPI documentation actually exists in code.
|
def test_openapi_arguments(self) -> None:
from zproject import urls as urlconf
# We loop through all the API patterns, looking in particular
# for those using the rest_dispatch decorator; we then parse
# its mapping of (HTTP_METHOD -> FUNCTION).
for p in urlconf.v1_api_and_json_patterns + urlconf.v1_api_mobile_patterns:
methods_endpoints: Dict[str, Any] = {}
if p.callback is not rest_dispatch:
# Endpoints not using rest_dispatch don't have extra data.
if str(p.pattern) in self.documented_post_only_endpoints:
methods_endpoints = dict(POST=p.callback)
else:
methods_endpoints = dict(GET=p.callback)
else:
methods_endpoints = assert_is_not_none(p.default_args)
# since the module was already imported and is now residing in
# memory, we won't actually face any performance penalties here.
for method, value in methods_endpoints.items():
if callable(value):
function: Callable[..., HttpResponse] = value
tags: Set[str] = set()
else:
function, tags = value
if function is get_events:
# Work around the fact that the registered
# get_events view function isn't where we do
# @has_request_variables.
#
# TODO: Make this configurable via an optional argument
# to has_request_variables, e.g.
# @has_request_variables(view_func_name="zerver.tornado.views.get_events")
function = get_events_backend
function_name = f"{function.__module__}.{function.__name__}"
with self.subTest(function_name):
self.check_openapi_arguments_for_view(p, function_name, function, method, tags)
self.check_for_non_existent_openapi_endpoints()
|
[
"def test_URL_kwargs(self):\n self.request_method_test('matchdict')",
"def test_iomanager_kwargs_collected(self):\n iomanager_kwargs = dict(\n required=object(),\n optional=object(),\n unlimited=object(),\n returns=object(),\n )\n view_kwargs = dict(\n predicate=object()\n )\n decorator_kwargs = iomanager_kwargs.copy()\n decorator_kwargs.update(view_kwargs)\n \n @api_view(**decorator_kwargs)\n def view_callable():\n pass\n \n assert view_callable.view_kwargs == view_kwargs",
"def test_definition_kwargs_not_unlimited(self):\n @api_view\n def view_callable(**kwargs):\n pass\n \n self.call_raises_test(view_callable, a=None)",
"def testF_view_request(self):\n _, _, requestIds = self._inject(15) # creates x docs/requests\n requestView = self._getViewResults(\"request\")\n self.assertEqual(len(requestView), 15)\n for reqView in requestView:\n self.failUnless(reqView[u\"key\"] in requestIds)\n self.failUnless(reqView[u\"value\"][u\"state\"] == u\"NewlyHeld\")",
"def test_dict_optional_args(self, request_args):\n args = request_args.dict_optional_args(\n autocast_arguments_to_string=False,\n )\n assert args['data'] == {'d1': 1, 'd2': 2}\n assert 'method' not in args\n assert 'url' not in args\n assert 'full_url' not in args",
"def _define_module_argument_spec():\n return dict(\n name=dict(\n required=True,\n choices=['GET', 'PUT', 'POST', 'DELETE', 'PATCH', 'HEAD', 'ANY', 'OPTIONS'],\n aliases=['method']\n ),\n rest_api_id=dict(required=True),\n resource_id=dict(required=True),\n authorization_type=dict(required=False, default='NONE'),\n authorizer_id=dict(required=False),\n api_key_required=dict(required=False, type='bool', default=False),\n request_params=dict(\n type='list',\n required=False,\n default=[],\n name=dict(required=True),\n location=dict(required=True, choices=['querystring', 'path', 'header']),\n param_required=dict(type='bool')\n ),\n method_integration=dict(\n type='dict',\n default={},\n integration_type=dict(\n required=False,\n default='AWS',\n choices=['AWS', 'MOCK', 'HTTP', 'HTTP_PROXY', 'AWS_PROXY']\n ),\n http_method=dict(required=False, default='POST', choices=['POST', 'GET', 'PUT']),\n uri=dict(required=False),\n credentials=dict(required=False),\n passthrough_behavior=dict(\n required=False,\n default='when_no_templates',\n choices=['when_no_templates', 'when_no_match', 'never']\n ),\n request_templates=dict(\n required=False,\n type='list',\n default=[],\n content_type=dict(required=True),\n template=dict(required=True)\n ),\n uses_caching=dict(required=False, default=False, type='bool'),\n cache_namespace=dict(required=False, default=''),\n cache_key_parameters=dict(required=False, type='list', default=[]),\n content_handling=dict(required=False, default='', choices=['convert_to_binary', 'convert_to_text', '']),\n integration_params=dict(\n type='list',\n required=False,\n default=[],\n name=dict(required=True),\n location=dict(required=True, choices=['querystring', 'path', 'header']),\n value=dict(required=True)\n )\n ),\n method_responses=dict(\n type='list',\n default=[],\n status_code=dict(required=True),\n response_params=dict(\n type='list',\n required=False,\n default=[],\n name=dict(required=True),\n is_required=dict(required=True, type='bool')\n ),\n response_models=dict(\n type='list',\n required=False,\n default=[],\n content_type=dict(required=True),\n model=dict(required=False, default='Empty', choices=['Empty', 'Error'])\n )\n ),\n integration_responses=dict(\n type='list',\n default=[],\n status_code=dict(required=True),\n is_default=dict(required=False, default=False, type='bool'),\n pattern=dict(required=False),\n response_params=dict(\n type='list',\n required=False,\n default=[],\n name=dict(required=True),\n location=dict(required=True, choices=['body', 'header']),\n value=dict(required=True)\n ),\n response_templates=dict(\n required=False,\n type='list',\n default=[],\n content_type=dict(required=True),\n template=dict(required=True)\n ),\n ),\n state=dict(default='present', choices=['present', 'absent'])\n )",
"def test_register_route_request(self):\n pass",
"def test_returns_true_if_request_has_get_parameter(self):\n self.request_mock.GET = {self.parameter_name: 'foobar'}\n self.assertTrue(self.has_parameter(self.get_response_mock, self.request_mock))",
"def test_default_view():",
"def w_positional_request(foo, bar, REQUEST):\n return 42",
"def test_returns_false_if_request_hasnt_get_parameter(self):\n self.request_mock.GET = dict()\n self.assertFalse(self.has_parameter(self.get_response_mock, self.request_mock))",
"def test_swagger_docs_url_resolves_to_correct_view(self):\n view = resolve('/docs/')\n SCHEMA_VIEW = get_swagger_view(\n title='Harvester Control Center API',\n url=os.environ.get('FORCE_SCRIPT_NAME', '')\n )\n self.assertEqual(view.func.__name__, SCHEMA_VIEW.__name__)",
"def test_request_parameter_validation():\n from django.core.exceptions import ValidationError\n\n schema = SchemaFactory(\n paths={\n '/get/{id}/': {\n 'parameters': [\n {\n 'name': 'id',\n 'in': PATH,\n 'description': 'id',\n 'required': True,\n 'type': STRING,\n 'format': 'uuid',\n },\n {\n 'name': 'page',\n 'in': QUERY,\n 'type': INTEGER,\n },\n ],\n 'get': {\n 'responses': {200: {'description': \"Success\"}},\n },\n },\n },\n )\n\n request = RequestFactory(url='http://www.example.com/get/32/?page=abcd')\n\n with pytest.raises(ValidationError) as err:\n validate_request(\n request,\n paths=schema['paths'],\n base_path=schema.get('base_path', ''),\n context=schema,\n inner=True,\n )\n\n assert 'method' in err.value.messages[0]\n assert 'parameters' in err.value.messages[0]['method'][0][0]\n assert 'path' in err.value.messages[0]['method'][0][0]['parameters'][0]\n assert 'id' in err.value.messages[0]['method'][0][0]['parameters'][0]['path'][0]\n assert 'format' in err.value.messages[0]['method'][0][0]['parameters'][0]['path'][0]['id'][0]\n assert_error_message_equal(\n err.value.messages[0]['method'][0][0]['parameters'][0]['path'][0]['id'][0]['format'][0],\n MESSAGES['format']['invalid_uuid'],\n )\n\n assert 'query' in err.value.messages[0]['method'][0][0]['parameters'][0]\n assert 'page' in err.value.messages[0]['method'][0][0]['parameters'][0]['query'][0]\n assert 'type' in err.value.messages[0]['method'][0][0]['parameters'][0]['query'][0]['page'][0]\n assert_error_message_equal(\n err.value.messages[0]['method'][0][0]['parameters'][0]['query'][0]['page'][0]['type'][0],\n MESSAGES['type']['invalid'],\n )",
"def test_routes_added_params(self):\n req, _ = self.ht.add_route('/blah/<param>')\n resp = requests.get(req.replace('<param>', '12345'))\n\n self.assertEqual(resp.status_code, 200, 'Route was not added')\n\n last_call = self.last_func()\n self.assertIn('param', last_call['args'].keys(),\n 'args should contain param as key')\n self.assertIn('12345', last_call['args'].values(),\n 'args should contain 12345 as val')",
"def test_call_makes_request_with_required_parameters(self):\n base.call(\"GET\", self.url, self.req_ctx)\n self.session.request.assert_called_once_with(\n \"GET\", self.url, auth=None, **self.OPTIONAL_REQUEST_ARGS)",
"def validate_args(request: LocalProxy) -> Dict:\n url_type = request.path.split('/')[-1]\n\n if url_type not in Arguments.members():\n logging.warning('Can not check requested arguments')\n return {}\n\n required_arguments = getattr(Arguments, url_type).value\n extra_keys = set(request.args.keys()) - required_arguments\n\n if extra_keys:\n logging.warning('Found extra arguments for {0}. Removed: {1}'.format(\n request.path,\n ', '.join(extra_keys),\n ))\n\n return {key: value for key, value in request.args.items() if key in required_arguments} # noqa: WPS110",
"def test_special_kwargs(self):\n expected_kwargs = {'a': object()}\n method_kwargs = expected_kwargs.copy()\n \n class SpecialKwargsFunctionViewCallable(FunctionViewCallable):\n def special_kwargs(self, request):\n return method_kwargs\n \n @SpecialKwargsFunctionViewCallable\n def view_callable(**kwargs):\n assert kwargs == expected_kwargs\n raise WrappedCallableSuccessError\n \n request = MockPyramidRequest()\n with pytest.raises(WrappedCallableSuccessError):\n view_callable(request)",
"def testG_view_request_id(self):\n self._inject(11) # creates x docs/requests\n viewResult = self._getViewResults(\"request_ids\")\n requestIds = [ x[u\"key\"] for x in viewResult ]\n self.assertEqual(len(requestIds), 11)",
"def test_request_init(self):\n\n\t\tself.assertEqual(self.request.path, '/index')\n\t\tself.assertEqual(self.request.method, 'GET')\n\t\tself.assertEqual(self.request._get_data, None)\n\t\tself.assertEqual(self.request._post_data, None)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test to make sure the request validator works properly The tests cover both cases such as catching valid requests marked as invalid and making sure invalid requests are marked properly
|
def test_validator(self) -> None:
# `/users/me/subscriptions` doesn't require any parameters
validate_request("/users/me/subscriptions", "get", {}, {}, False, "200")
with self.assertRaises(SchemaError):
# `/messages` POST does not work on an empty response
validate_request("/messages", "post", {}, {}, False, "200")
# 400 responses are allowed to fail validation.
validate_request("/messages", "post", {}, {}, False, "400")
# `intentionally_undocumented` allows validation errors on
# 200 responses.
validate_request(
"/dev_fetch_api_key", "post", {}, {}, False, "200", intentionally_undocumented=True
)
|
[
"def validate(self, request):\n\t\treturn True",
"def test_invalid_action_in_requests(action):\n req = {\n 'dataset': {'database': 'ABC', 'name': 'XYZ'},\n 'fetch': {},\n 'action': action\n }\n with pytest.raises(ValidationError):\n validator.validate(req)",
"def test_conformance(self):\n self._request_valid(\"conformance\")",
"def test_request_parameter_validation():\n from django.core.exceptions import ValidationError\n\n schema = SchemaFactory(\n paths={\n '/get/{id}/': {\n 'parameters': [\n {\n 'name': 'id',\n 'in': PATH,\n 'description': 'id',\n 'required': True,\n 'type': STRING,\n 'format': 'uuid',\n },\n {\n 'name': 'page',\n 'in': QUERY,\n 'type': INTEGER,\n },\n ],\n 'get': {\n 'responses': {200: {'description': \"Success\"}},\n },\n },\n },\n )\n\n request = RequestFactory(url='http://www.example.com/get/32/?page=abcd')\n\n with pytest.raises(ValidationError) as err:\n validate_request(\n request,\n paths=schema['paths'],\n base_path=schema.get('base_path', ''),\n context=schema,\n inner=True,\n )\n\n assert 'method' in err.value.messages[0]\n assert 'parameters' in err.value.messages[0]['method'][0][0]\n assert 'path' in err.value.messages[0]['method'][0][0]['parameters'][0]\n assert 'id' in err.value.messages[0]['method'][0][0]['parameters'][0]['path'][0]\n assert 'format' in err.value.messages[0]['method'][0][0]['parameters'][0]['path'][0]['id'][0]\n assert_error_message_equal(\n err.value.messages[0]['method'][0][0]['parameters'][0]['path'][0]['id'][0]['format'][0],\n MESSAGES['format']['invalid_uuid'],\n )\n\n assert 'query' in err.value.messages[0]['method'][0][0]['parameters'][0]\n assert 'page' in err.value.messages[0]['method'][0][0]['parameters'][0]['query'][0]\n assert 'type' in err.value.messages[0]['method'][0][0]['parameters'][0]['query'][0]['page'][0]\n assert_error_message_equal(\n err.value.messages[0]['method'][0][0]['parameters'][0]['query'][0]['page'][0]['type'][0],\n MESSAGES['type']['invalid'],\n )",
"def validate_request(req):\n # print out the request to the terminal window if in debug mode\n # this item is set in the settings, in the __init__.py file\n if settings.LTI_DEBUG:\n for item in req.POST:\n debug_printer('DEBUG - %s: %s \\r' % (item, req.POST[item]))\n \n # verifies that request contains the information needed\n if 'oauth_consumer_key' not in req.POST:\n debug_printer('DEBUG - Consumer Key was not present in request.')\n raise PermissionDenied()\n if 'user_id' not in req.POST:\n debug_printer('DEBUG - Anonymous ID was not present in request.')\n raise PermissionDenied()\n if 'lis_person_sourcedid' not in req.POST:\n debug_printer('DEBUG - Username was not present in request.')\n raise PermissionDenied()\n if 'lis_person_contact_email_primary' not in req.POST:\n debug_printer('DEBUG - User Email was not present in request.')\n raise PermissionDenied()",
"def test_bad_requests_give_400(self) -> None:\n self.assertEqual(self._request({}), 400)",
"def test_query_request_validator_valid_request():\n\n request = create_mock_request(master_password='abcd',\n master_key='1234',\n query_type=QUERY_GET_SECRETS_FOR_DOMAIN_AND_USERNAME_TYPE,\n domain='some_domain')\n dbclient = create_mock_dbclient_with_master_collection(master_password='abcd',\n master_key='1234')\n valid, message = QueryRequestValidator(request, dbclient,\n acceptable_query_type=[\n QUERY_GET_SECRETS_FOR_DOMAIN_AND_USERNAME_TYPE,\n QUERY_SEARCH_BY_DOMAIN_AND_USERNAME_TYPE\n ]).isValid()\n assert valid == True\n assert message == None",
"def test_presence_validations(self):\r\n # missing required name field\r\n person = dict(email='example@example.com')\r\n response = self.app.post('/api/test', data=dumps(person))\r\n assert response.status_code == 400\r\n data = loads(response.data)\r\n assert 'validation_errors' in data\r\n errors = data['validation_errors']\r\n assert 'name' in errors\r\n assert 'enter a value' in errors['name'].lower()\r\n\r\n # missing required email field\r\n person = dict(name='Jeffrey')\r\n response = self.app.post('/api/test', data=dumps(person))\r\n assert response.status_code == 400\r\n data = loads(response.data)\r\n assert 'validation_errors' in data\r\n errors = data['validation_errors']\r\n assert 'email' in errors\r\n assert 'enter a value' in errors['email'].lower()\r\n\r\n # everything required is now provided\r\n person = dict(name='Jeffrey', email='example@example.com', age=24)\r\n response = self.app.post('/api/test', data=dumps(person))\r\n assert response.status_code == 201\r\n personid = loads(response.data)['id']\r\n\r\n # check that the provided field values are in there\r\n response = self.app.get('/api/test/' + str(personid))\r\n assert response.status_code == 200\r\n data = loads(response.data)\r\n assert data['name'] == 'Jeffrey'\r\n assert data['email'] == 'example@example.com'",
"def test_invalid_fetch_in_requests(fetch):\n req = {'dataset': {'database': 'ABC', 'name': 'XYZ'}, 'fetch': fetch}\n with pytest.raises(ValidationError):\n validator.validate(req)",
"def test_staking_validators_get(self):\n pass",
"def test_format_validations(self):\r\n # test posting a person with a badly formatted email field\r\n person = dict(name='Jeffrey', email='bogus!!!email', age=1)\r\n response = self.app.post('/api/test', data=dumps(person))\r\n assert response.status_code == 400\r\n data = loads(response.data)\r\n assert 'validation_errors' in data\r\n errors = data['validation_errors']\r\n assert 'email' in errors\r\n assert 'email address' in errors['email'].lower()\r\n\r\n # posting a new person with valid email format should be fine\r\n person = dict(name='John', email='foo@example.com', age=1)\r\n response = self.app.post('/api/test', data=dumps(person))\r\n assert response.status_code == 201\r\n personid = loads(response.data)['id']\r\n\r\n # test patching a person to with badly formatted data\r\n person = dict(name='Jeffrey', email='bogus!!!email', age=24)\r\n response = self.app.patch('/api/test/' + str(personid),\r\n data=dumps(person))\r\n assert 'validation_errors' in data\r\n errors = data['validation_errors']\r\n assert 'email' in errors\r\n assert 'email address' in errors['email'].lower()\r\n\r\n # patching a person with correctly formatted fields should be fine\r\n person = dict(email='foo@example.com')\r\n response = self.app.patch('/api/test/' + str(personid),\r\n data=dumps(person))\r\n data = loads(response.data)\r\n if 'validation_errors' in data and \\\r\n 'email' in data['validation_errors']:\r\n assert 'email address' not in errors['email'].lower()",
"def test_if_having_tag_without_correct_message_type_throws_validation_error():\n r = Request({\"messaging_type\": \"RESPONSE\", \"tag\": \"GAME_EVENT\"})\n with pytest.raises(DataError):\n r.validate()",
"def test_healthchecks_validations(self, mock_requests):\n response = self.client.post('/v2/apps')\n self.assertEqual(response.status_code, 201, response.data)\n app_id = response.data['id']\n\n # Set one of the values that require a numeric value to a string\n resp = self.client.post(\n '/v2/apps/{app_id}/config'.format(**locals()),\n {'values': json.dumps({'HEALTHCHECK_INITIAL_DELAY': 'horse'})}\n )\n self.assertEqual(resp.status_code, 400, response.data)\n\n # test URL - Path is the only allowed thing\n # Try setting various things such as query param\n\n # query param\n resp = self.client.post(\n '/v2/apps/{app_id}/config'.format(**locals()),\n {'values': json.dumps({'HEALTHCHECK_URL': '/health?testing=0'})}\n )\n self.assertEqual(resp.status_code, 400, response.data)\n\n # fragment\n resp = self.client.post(\n '/v2/apps/{app_id}/config'.format(**locals()),\n {'values': json.dumps({'HEALTHCHECK_URL': '/health#db'})}\n )\n self.assertEqual(resp.status_code, 400, response.data)\n\n # netloc\n resp = self.client.post(\n '/v2/apps/{app_id}/config'.format(**locals()),\n {'values': json.dumps({'HEALTHCHECK_URL': 'http://someurl.com/health/'})}\n )\n self.assertEqual(resp.status_code, 400, response.data)\n\n # no path\n resp = self.client.post(\n '/v2/apps/{app_id}/config'.format(**locals()),\n {'values': json.dumps({'HEALTHCHECK_URL': 'http://someurl.com'})}\n )\n self.assertEqual(resp.status_code, 400, response.data)",
"def validate_http_request(request):\n # TO DO: write function\n req = request.split()\n if len(req) == 0:\n return False, 'Invalid request'\n if not req[0] == 'GET':\n return False, '500 Internal Server Error'\n\n if not req[2] == 'HTTP/1.1':\n return False, '500 Internal Server Error'\n\n return True, req[1]",
"def test_post_edit_page_invalid(self):\n\n data = {\n 'first_name': 'test',\n 'last_name': '',\n 'biography': '',\n 'email': 'INVALID',\n 'skype': 'test',\n 'jabber': 'INVALID',\n 'other_contacts': '',\n 'birthday': 'INVALID'\n }\n self.client.login(username='ahalan', password='12345')\n response = self.client.post(\n self.edit_path, data, HTTP_X_REQUESTED_WITH='XMLHttpRequest'\n )\n content = json.loads(response.content)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'application/json')\n\n self.assertFalse(content['success'])\n self.assertIn('errors', content['payload'])\n\n self.assertIn('birthday', content['payload']['errors'])\n self.assertEqual(\n content['payload']['errors']['birthday'],\n [u'Enter a valid date.']\n )\n self.assertIn('jabber', content['payload']['errors'])\n self.assertEqual(\n content['payload']['errors']['jabber'],\n [u'Enter a valid email address.']\n )\n self.assertIn('email', content['payload']['errors'])\n self.assertEqual(\n content['payload']['errors']['email'],\n [u'Enter a valid email address.']\n )\n self.assertIn('last_name', content['payload']['errors'])\n self.assertEqual(\n content['payload']['errors']['last_name'],\n [u'This field is required.']\n )",
"def valid(self):\n\n if self.recognition:\n # Verify that this is in the alphabet requried\n if True:\n logger.debug(\"RequestElement.valid(): GOOD - recognition %s is valid\" %\n self.recognition)\n recognition = True\n else:\n logger.debug(\"RequestElement.valid(): BAD - recognition %s is not valid\" %\n self.recognition)\n recognition = False\n else:\n logger.debug(\"RequestElement.valid(): BAD - no recognition\")\n recognition = False\n\n if self.request_id:\n logger.debug(\"RequestElement.valid(): GOOD - request_id %s found\" %\n self.request_id)\n recognition = True\n else:\n logger.debug(\"RequestElement.valid(): BAD - no request_id found\")\n recognition = False\n\n if self.command:\n if self.command in self.valid_commands:\n logger.debug(\"RequestElement.valid(): GOOD - command %s is valid\" %\n self.command)\n command = True\n else:\n logger.debug(\"RequestElement.valid(): BAD - command %s is not valid, not in %s\" %\n (self.command, self.valid_commands))\n command = False\n else:\n logger.debug(\"RequestElement.valid(): BAD - no command given\")\n command = False\n\n if (self.binding_ratio_lo and self.binding_ratio_hi):\n if (self.binding_ratio_lo < self.binding_ratio_hi):\n logger.debug(\"RequestElement.valid(): GOOD - %f < %f\" %\n (self.binding_ratio_lo, self.binding_ratio_hi))\n ratio = True\n else:\n logger.debug(\"RequestElement.valid(): BAD - %f !< %f\" %\n (self.binding_ratio_lo, self.binding_ratio_hi))\n ratio = False\n elif (not self.binding_ratio_lo) and (not self.binding_ratio_hi):\n logger.debug(\"RequestElement.valid(): GOOD - binding ratio not specificed\")\n ratio = True\n else:\n logger.debug(\"RequestElement.valid(): BAD - binding ratio lo: %s hi: %s\" %\n (self.binding_ratio_lo, self.binding_ratio_hi))\n ratio = False\n\n if self.output_dir:\n # This test should be a bit stronger, but cie la vie\n if os.path.isabs(self.output_dir) and os.path.isdir(self.output_dir):\n logger.debug(\"RequestElement.valid(): GOOD - dir %s looks find\" %\n self.output_dir)\n output_dir = True\n else:\n logger.debug(\"RequestElement.valid(): BAD - dir %s is not a valid directory\" %\n self.output_dir)\n output_dir = False\n else:\n logger.debug(\"RequestElement.valid(): BAD - no output directory speficied\")\n output_dir = False\n\n if self.maxenergy:\n if isinstance(self.maxenergy,float):\n logger.debug(\"RequestElement.valid(): GOOD - max energy %f specified and is float\" %\n self.maxenergy)\n maxenergy = True\n else:\n logger.debug(\"RequestElement.valid(): BAD - max energy %s specified and is something weird\" %\n self.maxenergy)\n\n maxenergy = False\n else:\n maxenergy = True\n\n if self.maxunknown_percent:\n if 0 <= self.maxunknown_percent <= 1:\n logger.debug(\"RequestElement.valid(): GOOD - max unknown 0 <= %f <= 1\" %\n self.maxunknown_percent)\n maxunknown = True\n else:\n logger.debug(\"RequestElement.valid(): BAD - max unknown 0 !<= %f !<= 1\" %\n self.maxunknown_percent)\n maxunknown = False\n else:\n logger.debug(\"RequestElement.valid(): GOOD - max unknown percent not specified\")\n maxunknown = True\n\n if command and output_dir and maxenergy and recognition and ratio and maxunknown:\n return True\n else:\n return False",
"def test_emailservice_validation():\n\n emailService = EmailService()\n _test(\"emailService.validate_email_address('marzi@dtu.dk')\",\n (bool(emailService.validate_email_address('marzi@dtu.dk'))))\n _test(\"emailService.validate_email_address('marzi@dtu.dk')\",\n (bool(emailService.validate_email_address('marzi@dtu.dk'))))\n _test(\"not emailService.validate_email_address('@gmail.com')\",\n (bool(not emailService.validate_email_address('@gmail.com'))))\n _test(\"not emailService.validate_email_address('test@gmail')\",\n (bool(not emailService.validate_email_address('test@gmail'))))",
"def test_query_request_validator_domain_missing():\n\n request = create_mock_request(master_password='abcd',\n master_key='1234',\n query_type=QUERY_GET_SECRETS_FOR_DOMAIN_AND_USERNAME_TYPE)\n dbclient = create_mock_dbclient_with_master_collection(master_password='abcd',\n master_key='1234')\n valid, message = QueryRequestValidator(request, dbclient,\n acceptable_query_type=[\n QUERY_GET_SECRETS_FOR_DOMAIN_AND_USERNAME_TYPE,\n QUERY_SEARCH_BY_DOMAIN_AND_USERNAME_TYPE\n ]).isValid()\n assert valid == False\n assert message == ERROR_DOMAIN_REQUIRED",
"def test_parse_request_value_error(request):\n from server import parse_request\n with pytest.raises(ValueError):\n parse_request(request)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets all the Bittrex markets and filters them based on the main market filter
|
def get_markets(self, main_market_filter=None):
markets = self.bittrex.get_markets()
if not markets["success"]:
error_str = print("market", True)
logger.error(error_str)
exit()
markets = markets["result"]
#return markets
markets = list(map(lambda item: (item['MarketName']), markets))
if main_market_filter is not None:
market_check = main_market_filter + "-"
markets = list(filter(lambda market: market_check in market, markets))
return markets
|
[
"def get_active_markets():\n b = Bittrex(None, None)\n response = b.get_markets()\n if response['success']:\n markets = response['result']\n active_markets = []\n for market in markets:\n if market['IsActive']:\n active_markets.append(Market(market['MarketName']))\n return active_markets\n else:\n raise Exception(response['message'])",
"def pull_markets(self):\n # update headers and update full endpoint\n api_endpoint = '/markets'\n self.update_headers(api_endpoint)\n r = requests.get(self.full_endpoint, headers=self.headers)\n results = r.json()\n if results['success'] == True:\n self.markets = results['result']\n self.symbols = [x['name'] for x in self.markets]\n return self.markets\n else:\n print('Error retrieving markets')",
"def get_all_markets(self):\n markets = [\n (i.primaryCurrency, i.secondaryCurrency,i.contractName, int(i.priceSource), i)\n for i in self.c.marketDataApi.get_all_price_markets().result\n ]\n df = pd.DataFrame(\n markets,\n columns=(\n [\"primarycurrency\", \"secondarycurrency\",\"contract\", \"pricesource\", \"marketobj\"]\n ),\n )\n df.drop_duplicates(inplace=True, ignore_index=True)\n df[\"Ticker\"] = df.primarycurrency.values + df.secondarycurrency.values\n return df",
"async def fetch_markets(self, params={}):\n response = await self.publicGetProducts(params)\n #\n # [\n # {\n # id: 'BTCAUCTION-USD',\n # base_currency: 'BTC',\n # quote_currency: 'USD',\n # base_min_size: '0.000016',\n # base_max_size: '1500',\n # quote_increment: '0.01',\n # base_increment: '0.00000001',\n # display_name: 'BTCAUCTION/USD',\n # min_market_funds: '1',\n # max_market_funds: '20000000',\n # margin_enabled: False,\n # fx_stablecoin: False,\n # max_slippage_percentage: '0.02000000',\n # post_only: False,\n # limit_only: False,\n # cancel_only: True,\n # trading_disabled: False,\n # status: 'online',\n # status_message: '',\n # auction_mode: False\n # },\n # {\n # id: 'BTC-USD',\n # base_currency: 'BTC',\n # quote_currency: 'USD',\n # base_min_size: '0.000016',\n # base_max_size: '1500',\n # quote_increment: '0.01',\n # base_increment: '0.00000001',\n # display_name: 'BTC/USD',\n # min_market_funds: '1',\n # max_market_funds: '20000000',\n # margin_enabled: False,\n # fx_stablecoin: False,\n # max_slippage_percentage: '0.02000000',\n # post_only: False,\n # limit_only: False,\n # cancel_only: False,\n # trading_disabled: False,\n # status: 'online',\n # status_message: '',\n # auction_mode: False\n # }\n # ]\n #\n result = []\n for i in range(0, len(response)):\n market = response[i]\n id = self.safe_string(market, 'id')\n baseId, quoteId = id.split('-')\n # BTCAUCTION-USD vs BTC-USD conflict workaround, see the output sample above\n # baseId = self.safe_string(market, 'base_currency')\n # quoteId = self.safe_string(market, 'quote_currency')\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n status = self.safe_string(market, 'status')\n result.append(self.extend(self.fees['trading'], {\n 'id': id,\n 'symbol': base + '/' + quote,\n 'base': base,\n 'quote': quote,\n 'settle': None,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': None,\n 'type': 'spot',\n 'spot': True,\n 'margin': self.safe_value(market, 'margin_enabled'),\n 'swap': False,\n 'future': False,\n 'option': False,\n 'active': (status == 'online'),\n 'contract': False,\n 'linear': None,\n 'inverse': None,\n 'contractSize': None,\n 'expiry': None,\n 'expiryDatetime': None,\n 'strike': None,\n 'optionType': None,\n 'precision': {\n 'amount': self.safe_number(market, 'base_increment'),\n 'price': self.safe_number(market, 'quote_increment'),\n },\n 'limits': {\n 'leverage': {\n 'min': None,\n 'max': None,\n },\n 'amount': {\n 'min': None,\n 'max': None,\n },\n 'price': {\n 'min': None,\n 'max': None,\n },\n 'cost': {\n 'min': self.safe_number(market, 'min_market_funds'),\n 'max': None,\n },\n },\n 'info': market,\n }))\n return result",
"def get_inactive_markets():\n b = Bittrex(None, None)\n response = b.get_markets()\n if response['success']:\n markets = response['result']\n inactive_markets = []\n for market in markets:\n if not market['IsActive']:\n inactive_markets.append(Market(market['MarketName']))\n print \"{:s}\\t{:s}\".format(market['MarketName'], market['Notice'])\n return inactive_markets\n else:\n raise Exception(response['message'])",
"def search_market(self, search: str) -> List[Market]:\n url = \"{}/{}?searchTerm={}\".format(\n self.api_base_url, IG_API_URL.MARKETS.value, search\n )\n data = self._http_get(url)\n markets = []\n if data is not None and \"markets\" in data:\n markets = [self.get_market_info(m[\"epic\"]) for m in data[\"markets\"]]\n return markets",
"def acquire_market_data_all(self):\n return self.public_request('GET', '/market/ticker/all')['data']['ticker']",
"def filter(self, ticker: str):\n yfTicker = yf.Ticker(ticker)\n dataToReturn = {}\n\n if yfTicker is None:\n print(\"Could not find info on ticker \", ticker, \" from Yahoo!\")\n return False, None\n\n try:\n info = yfTicker.info\n except Exception as e:\n print(\"Filter got a yahoo finance error when obtaining data for \" + ticker + \":\")\n print(e)\n return False, None\n\n for tuple in self.filters:\n valuesToFilter = tuple[0]\n lambdaFilter = tuple[1]\n inputValues = []\n\n for value in valuesToFilter:\n if value in info:\n inputValues.append(info[value])\n else:\n inputValues.append(None)\n\n returnValue = False\n\n try:\n returnValue = lambdaFilter(inputValues)\n except:\n print(\"Got an error when filtering on\", ticker, \"with inputs\",\n str(inputValues) + \". Assuming the filter returned false.\")\n\n if not returnValue:\n return False, None\n\n for i in range(len(valuesToFilter)):\n dataToReturn[valuesToFilter[i]] = inputValues[i]\n\n return True, dataToReturn",
"def search_markets(self, q, limit=10, offset=0, type=\"track\", markets=None, total=None):\n warnings.warn(\n \"Searching multiple markets is an experimental feature. \"\n \"Please be aware that this method's inputs and outputs can change in the future.\",\n UserWarning,\n )\n if not markets:\n markets = self.country_codes\n\n if not (isinstance(markets, list) or isinstance(markets, tuple)):\n markets = []\n\n warnings.warn(\n \"Searching multiple markets is poorly performing.\",\n UserWarning,\n )\n return self._search_multiple_markets(q, limit, offset, type, markets, total)",
"def get_all_exchanges(self, fsym, tsym, base_url='https://www.cryptocompare.com/api/data/'):\n res = self.get_coin_snapshot(fsym, tsym, base_url=base_url)\n try:\n exchanges = res['Data']['Exchanges']\n markets = [x['MARKET'] for x in exchanges]\n return sorted(markets)\n except KeyError:\n return res",
"def filterList(self, tickers: List):\n returnList = []\n dataToReturn = []\n\n for ticker in tickers:\n print(\"Filtering\", ticker, \"...\")\n success, data = self.filter(ticker)\n\n if success:\n returnList.append(ticker)\n dataToReturn.append(data)\n\n return returnList, dataToReturn",
"def by_market(self, market_type:str):\n if market_type not in self.markets:\n logger.error(f\"Not the correct type: {market_type} must be {self.markets}\")\n return []\n\n _search = self.search\n _search[self.sc] = {\n \"market\": market_type\n }\n # print(_search.query_builder.build())\n return _search.find()",
"def search_market(self, search: str) -> List[Market]:\n return self.account_ifc.search_market(search)",
"def market_tickers(self):\n dic = {}\n requrl = '/api/swap/v2/market/tickers'\n url = self.base_url + requrl\n return request_nosign_get(url, dic)",
"def available_markets(self):\n return self._get(\"markets\")",
"def get_highest_volume_markets(number=10, base_volume=True, basis=None):\n b = Bittrex(None, None)\n response = b.get_market_summaries()\n\n if response['success']:\n volumes_markets = []\n if basis:\n for summary in response['result']:\n if summary[\"MarketName\"].split(\"-\")[0] == basis:\n volumes_markets.append(\n (summary[\"BaseVolume\"] if base_volume else summary['Volume'],\n summary['MarketName'])\n )\n else:\n for summary in response['result']:\n volumes_markets.append(\n (summary[\"BaseVolume\"] if base_volume else summary['Volume'],\n summary['MarketName'])\n )\n volumes_markets.sort(reverse=True)\n markets = []\n for volume_market in volumes_markets[:number]:\n volume, market = volume_market\n markets.append(Market(market))\n return markets\n else:\n raise Exception(response['message'])",
"def get_stocks_symbols(write_to_files=True):\n all_symbols = []\n log.info(\"Pulling markets symbols\")\n for market in markets:\n symbols = []\n request = urllib2.Request(market.soruce)\n try:\n result = urllib2.urlopen(request)\n except urllib2.URLError as e:\n log.error(\"url error #{}: {}\".format(e.errno, e.strerror))\n return\n\n data = result.readlines()\n\n # save all data to file\n if write_to_files:\n filepath = make_filepath(DATA_PATH+\"companies\", market.name)\n companies = open(filepath, 'w')\n for line in data:\n companies.write(str(line))\n\n # parse the data to get list of symbols\n for company in data:\n symbol = company.split(',')[0][1:-1]\n symbols.extend([symbol])\n\n symbols.pop(0) # exclude the first line - the description line (at the head of the table)\n all_symbols.extend(symbols)\n\n return all_symbols",
"def filter_candidates(self):\n\n try:\n if not os.path.exists(self.stock_ticker_file):\n RobinhoodInstance.get_all_instruments(self.stock_ticker_file)\n except Exception as e:\n print \"[Error]: %s\" % str(e)\n raise\n\n stock_file = open(self.stock_ticker_file, \"r\")\n filtered_stock_file = open(self.filtered_stock_ticker_file, \"w\")\n\n for stock_ticker in stock_file.readlines():\n print \"Testing: %s\" % stock_ticker\n stock_ticker = stock_ticker.strip()\n for special_char in SPECIAL_CHAR_LIST:\n stock_ticker = stock_ticker.replace(special_char, \"\")\n\n # Get the bollinger band history along with the 5 day moving average\n try:\n close, lower_band, five_day_ma = self.calculate_bands(stock_ticker)\n except Exception as e:\n print \"Could not test ticker: %s\" % stock_ticker\n print \"Error: %s\" % str(e)\n continue\n\n # If I get bad data, just continue to the next stock\n if len(close) < 5 or len(lower_band) < 5 or len(five_day_ma) < 5:\n print \"Could not test ticker: %s\" % stock_ticker\n continue\n\n print \"Adding: %s\" % stock_ticker\n filtered_stock_file.write(\"%s\\n\" % stock_ticker)",
"def get_all_filters(require=INewsItemFilter):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Apply patches to tweak SDK build system.
|
def apply_patches():
with open(os.path.join(os.getcwd(), 'utils', 'sdk.patch'), 'r') as fin:
subprocess.call(['patch', '-p2'], stdin=fin, cwd=DESTDIR)
with open(os.path.join(SRCDIR, 's-video_sgx.patch'), 'r') as fin:
subprocess.call(['patch', '-p2'], stdin=fin, cwd=DESTDIR)
|
[
"def apply_patch(self, patch=None):\n if not any([patch, self.patch]):\n return\n if not patch:\n patch = self.patch\n self.cmd(f\"patch -p1 < {self.project.patch}/{self.ver}/{patch}\")",
"def apply_patch(self, patch=None):\n if not any([patch, self.patch]):\n return\n if not patch:\n patch = self.patch\n self.cmd(f'patch -p1 < {self.project.patch}/{self.ver}/{patch}')",
"def CmdPkgUpdatePatch(package, options):\n package.UpdatePatch()",
"def apply_to(self, tagdb):\n for pkg, patch in self.iteritems():\n patch.apply(pkg, tagdb)",
"def CmdPkgPatch(package, options):\n package.Patch()",
"def update():\n update_code()\n update_env()\n symlink()\n set_current()\n permissions()",
"def apply_patches_to_MANN_files() -> None:\n\n # Original MANN files\n filenames = [\"AdamW.py\", \"AdamWParameter.py\", \"ExpertWeights.py\", \"Gating.py\", \"MANN.py\"]\n\n # Path configuration\n MANN_folder = \"../src/adherent/MANN/\"\n patches_MANN_folder = \"../src/adherent/MANN/patches/\"\n\n # Apply the patches\n for filename in filenames:\n original_file = MANN_folder + filename\n patch_file = patches_MANN_folder + filename[:-2] + \"patch\"\n os.system(\"patch \" + original_file + \" \" + patch_file)",
"def patch_wpt(config, platform):\n patch_path = '%s/util/wpt.patch' % config['wptd_path']\n with open(patch_path) as f:\n patch = f.read()\n\n # The --sauce-platform command line arg doesn't\n # accept spaces, but Sauce requires them in the platform name.\n # https://github.com/w3c/web-platform-tests/issues/6852\n patch = patch.replace('__platform_hack__', '%s %s' % (\n platform['os_name'], platform['os_version'])\n )\n\n p = subprocess.Popen(\n ['git', 'apply', '-'], cwd=config['wpt_path'], stdin=subprocess.PIPE\n )\n p.communicate(input=patch)",
"def target_update(target,deps,cmd):\n\n if target_outdated(target,deps):\n xsys(cmd)",
"def apply_patch(cls, api):\r\n\r\n\r\n ##BRANCH_ADDR = {\r\n ## \"NTSC-U\" : 0x9ea5c,\r\n ##}[api.VERSION]\r\n\r\n CURRENT_AMMO_SET_A1 = {\r\n \"NTSC-U\" : 0x9eac0,\r\n }[api.VERSION]\r\n\r\n CURRENT_AMMO_DRAW_CALL = {\r\n \"NTSC-U\" : 0x9eadc\r\n }[api.VERSION]\r\n\r\n IF_ADDR = {\r\n \"NTSC-U\" : 0x9eaec,\r\n }[api.VERSION]\r\n\r\n EXTRA_AMMO_SET_A1 = {\r\n \"NTSC-U\" : 0x9eb68,\r\n }[api.VERSION]\r\n\r\n JMP_OVER_EXTRA_AMMO = {\r\n \"NTSC-U\" : 0x9eb90,\r\n }[api.VERSION]\r\n\r\n\r\n guardDelayMem = MemoryAddress(0x801DB33C + 0x008) # guard_0x15.frames_until_update in adjusted setup\r\n guardDelayLoad = \"lb\"\r\n\r\n api.MemConst.global_timer_delta.offset_term(\"reg\")\r\n\r\n\r\n # [2] Correct extra ammo first\r\n # Clear the if\r\n for i in range(7):\r\n api.nop_quietly(hex(IF_ADDR + i*4))\r\n \r\n # Use just the last to set up our a1..\r\n # It moves very fast so we may divide it.. we end up with 3 nops above this so we have room\r\n api.asm(hex(IF_ADDR + 0x6 * 0x4), guardDelayMem.lui_instr(\"a1\"))\r\n\r\n # .. then finish it off where they actually set a1\r\n api.asm(hex(EXTRA_AMMO_SET_A1), \"{} a1, {}\".format(guardDelayLoad, guardDelayMem.offset_term(\"a1\")))\r\n\r\n\r\n # [1]\r\n # Set up the a1 where they set it ..\r\n api.asm(hex(CURRENT_AMMO_SET_A1), api.MemConst.global_timer_delta.lui_instr(\"a1\"))\r\n\r\n \"\"\"\r\n End of CURRENT_AMMO section precedes the EXTRA_AMMO if statement\r\n We need to make a bit more space\r\n 7f069fac 0f c1 a7 23 jal FUN_7f069c8c undefined FUN_7f069c8c()\r\n 7f069fb0 00 00 38 25 _or a3,zero,zero\r\n 7f069fb4 af a2 00 68 sw v0,local_res0(sp)\r\n 7f069fb8 8f ad 00 50 lw t5,extraAmmo(sp)\r\n\r\n \"\"\"\r\n\r\n instrs = [\r\n # .. then make room to finish it off\r\n \"lw a1, {}\".format(api.MemConst.global_timer_delta.offset_term(\"a1\")),\r\n \"jal 0xf069c8c\",\r\n \"or a3,zero,zero\",\r\n \"sw v0, 0x68(sp)\",\r\n # lw t5 bumped off\r\n ]\r\n\r\n for i, instr in enumerate(instrs):\r\n api.asm(hex(CURRENT_AMMO_DRAW_CALL + i*0x4), instr)\r\n \r\n\r\n # Now tidy up the if statement - restore the extraAmmo > 0 test for safety\r\n instrs = [\r\n \"lw t5, 0x50(sp)\",\r\n \"lw a0, 0x60(sp)\",\r\n \"blez t5, 0x{:x}\".format(JMP_OVER_EXTRA_AMMO),\r\n \"nop\",\r\n \"nop\",\r\n \"nop\",\r\n # guardDelayMem.lui_instr(\"a1\")\r\n ]\r\n \r\n for i, instr in enumerate(instrs):\r\n api.asm(hex(IF_ADDR + i*0x4), instr)",
"def patch(ctx, tool, dir, remove):\n from nf_core.modules import ModulePatch\n\n try:\n module_patch = ModulePatch(\n dir,\n ctx.obj[\"modules_repo_url\"],\n ctx.obj[\"modules_repo_branch\"],\n ctx.obj[\"modules_repo_no_pull\"],\n )\n if remove:\n module_patch.remove(tool)\n else:\n module_patch.patch(tool)\n except (UserWarning, LookupError) as e:\n log.error(e)\n sys.exit(1)",
"def _patch_source_for_target(self, sysroot):\n\n # The only patching needed is for iOS.\n if sysroot.target_platform_name != 'ios':\n return\n\n patch = os.path.join('Modules', 'posixmodule.c')\n orig = patch + '.orig'\n\n sysroot.progress(\"Patching {0}\".format(patch))\n\n # iOS doesn't have system() and the POSIX module uses hard-coded\n # configurations rather than the normal configure by introspection\n # process.\n os.rename(patch, orig)\n\n orig_file = sysroot.open_file(orig)\n patch_file = sysroot.create_file(patch)\n\n for line in orig_file:\n # Just skip any line that sets HAVE_SYSTEM.\n minimal = line.strip().replace(' ', '')\n if minimal != '#defineHAVE_SYSTEM1':\n patch_file.write(line)\n\n orig_file.close()\n patch_file.close()",
"def update():\n deploy()\n update_virtualenv()\n update_staticfiles()\n restart_wsgi()",
"def _patch_source_for_target(self, sysroot):\n\n if sysroot.target_platform_name == 'ios':\n self._patch_source(sysroot,\n os.path.join('Modules', 'posixmodule.c'),\n self._patch_for_ios_system)\n\n elif sysroot.target_platform_name == 'win':\n self._patch_source(sysroot,\n os.path.join('Modules', '_io', '_iomodule.c'),\n self._patch_for_win_iomodule)\n\n self._patch_source(sysroot,\n os.path.join('Modules', 'expat', 'loadlibrary.c'),\n self._patch_for_win_loadlibrary)\n\n self._patch_source(sysroot,\n os.path.join('Modules', '_winapi.c'),\n self._patch_for_win_winapi)",
"def _patch_source_for_target(self):\n\n if self.target_platform_name == 'ios':\n self.patch_file(os.path.join('Modules', 'posixmodule.c'),\n self._patch_for_ios_system)\n\n elif self.target_platform_name == 'macos':\n # We use the macOS libffi from Python v3.8.10.\n if self.version >= (3, 8, 10):\n ctypes_dir = os.path.join('Modules', '_ctypes')\n\n for src in ('_ctypes.c', 'callbacks.c', 'callproc.c', 'stgdict.c', 'cfield.c', 'malloc_closure.c'):\n self.patch_file(os.path.join(ctypes_dir, src),\n self._patch_for_macos_ctypes)\n\n elif self.target_platform_name == 'win':\n # If we are supporting dynamic loading then we must be being built\n # as a DLL.\n if not self.dynamic_loading:\n self.patch_file(os.path.join('Lib', 'ctypes', '__init__.py'),\n self._patch_for_win_ctypes)\n\n self.patch_file(\n os.path.join('Modules', 'expat', 'winconfig.h'),\n self._patch_for_win_expat)\n\n if self.version <= (3, 7, 4):\n self.patch_file(\n os.path.join('Modules', 'expat', 'loadlibrary.c'),\n self._patch_for_win_expat)\n\n self.patch_file(os.path.join('Modules', '_winapi.c'),\n self._patch_for_win_winapi)",
"def install_patchs(self):\n\t\t\n\t\tthread.start_new = self._thread_patch(thread.start_new)\n\t\tthread.start_new_thread = self._thread_patch(thread.start_new_thread)\n\t\t\n\t\tthreading._start_new_thread = self._thread_patch(threading._start_new_thread)\n\t\t\n\t\tmultiprocessing.Process.start = self._multiprocessing_patch(\n\t\t\tmultiprocessing.Process.start)",
"def update_app():\n pull_project()\n restart_app()",
"def apply_hooks(before: Callable, after: Callable):\n logger.debug(\"Applying hooks\")\n\n # LightGBM\n try:\n import lightgbm\n\n patch(lightgbm, \"train\", before=before, after=after)\n patch(lightgbm.sklearn, \"train\", before=before, after=after)\n except Exception:\n logger.debug(\"Could not import and patch lightgbm\", exc_info=True)\n\n # XGBoost\n try:\n import xgboost\n\n patch(xgboost, \"train\", before=before, after=after)\n patch(xgboost.sklearn, \"train\", before=before, after=after)\n except Exception:\n logger.debug(\"Could not import and patch xgboost\", exc_info=True)\n\n # Tensorflow & Keras\n try:\n import tensorflow\n\n patch(tensorflow.keras.Model, \"fit\", before=before, after=after)\n patch(tensorflow.keras.Model, \"train_on_batch\", before=before, after=after)\n except Exception:\n logger.debug(\"Could not import and patch tensorflow.keras\", exc_info=True)\n # If tensorflow.keras patching doesn't work, we can try\n # patching keras as a standalone\n try:\n import keras\n\n patch(keras.Model, \"fit\", before=before, after=after)\n patch(keras.Model, \"train_on_batch\", before=before, after=after)\n except Exception:\n logger.debug(\"Could not import and patch keras\", exc_info=True)\n\n # SKLearn\n try:\n import sklearn.svm\n import sklearn.tree\n\n patch(sklearn.svm.SVC, \"fit\", before=before, after=after)\n patch(sklearn.svm.SVR, \"fit\", before=before, after=after)\n patch(sklearn.svm.OneClassSVM, \"fit\", before=before, after=after)\n patch(sklearn.svm.NuSVC, \"fit\", before=before, after=after)\n patch(sklearn.svm.NuSVR, \"fit\", before=before, after=after)\n patch(sklearn.svm.LinearSVR, \"fit\", before=before, after=after)\n patch(sklearn.svm.LinearSVC, \"fit\", before=before, after=after)\n patch(sklearn.tree.DecisionTreeClassifier, \"fit\", before=before, after=after)\n patch(sklearn.tree.DecisionTreeRegressor, \"fit\", before=before, after=after)\n except ImportError:\n logger.debug(\"Could not import and patch sklearn\", exc_info=True)\n\n # Catboost\n try:\n import catboost\n\n patch(catboost.CatBoostRegressor, \"fit\", before=before, after=after)\n patch(catboost.CatBoostClassifier, \"fit\", before=before, after=after)\n except ImportError:\n logger.debug(\"Could not import and patch catboost\", exc_info=True)",
"def update_versions(\n where: str = '.',\n *,\n add: Optional[VersionList] = None,\n drop: Optional[VersionList] = None,\n update: Optional[VersionList] = None,\n diff: bool = False,\n dry_run: bool = False,\n only: Optional[FilenameSet] = None,\n) -> ReplacementDict:\n\n sources: List[Tuple[str, ExtractorFn, UpdaterFn]] = [\n # filename, extractor, updater\n ('setup.py', get_supported_python_versions,\n update_supported_python_versions),\n ('setup.py', get_python_requires,\n update_python_requires),\n (TOX_INI, get_tox_ini_python_versions,\n update_tox_ini_python_versions),\n (TRAVIS_YML, get_travis_yml_python_versions,\n update_travis_yml_python_versions),\n (APPVEYOR_YML, get_appveyor_yml_python_versions,\n update_appveyor_yml_python_versions),\n (MANYLINUX_INSTALL_SH, get_manylinux_python_versions,\n update_manylinux_python_versions),\n # TODO: CHANGES.rst\n ]\n replacements: ReplacementDict = {}\n\n for (filename, extractor, updater) in sources:\n if only and filename not in only:\n continue\n pathname = os.path.join(where, filename)\n if not os.path.exists(pathname):\n continue\n versions = extractor(filename_or_replacement(pathname, replacements))\n if versions is None:\n continue\n\n versions = sorted(important(versions))\n new_versions = update_version_list(\n versions, add=add, drop=drop, update=update)\n if versions != new_versions:\n fp = filename_or_replacement(pathname, replacements)\n new_lines = updater(fp, new_versions)\n if new_lines is not None:\n # TODO: refactor this into two functions, one that produces a\n # replacement dict and does no user interaction, and another\n # that does user interaction based on the contents of the\n # replacement dict.\n if diff:\n fp = filename_or_replacement(pathname, replacements)\n show_diff(fp, new_lines)\n if dry_run:\n # XXX: why do this on dry-run only, why not always return a\n # replacement dict?\n replacements[pathname] = new_lines\n if not diff and not dry_run:\n confirm_and_update_file(pathname, new_lines)\n\n return replacements"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Quantum ESPRESSO Symmetry class =============================== This class contains all the info about Quantum ESPRESSO symmetry data. It is used to wrap symmetries into the quantum espresso fortran subroutines. Starting from a set of symmetry operation and the structure of the system, it builds all the QE symmetry operations.
|
def __init__(self, structure, threshold = 1e-5):
if not structure.has_unit_cell:
raise ValueError("Error, symmetry operation can be initialize only if the structure has a unit cell")
self.structure = structure
self.threshold = np.float64(threshold)
# Setup the threshold
symph.symm_base.set_accep_threshold(self.threshold)
nat = structure.N_atoms
# Define the quantum espresso symmetry variables in optimized way to work with Fortran90
self.QE_nat = np.intc( nat )
self.QE_s = np.zeros( (3, 3, 48) , dtype = np.intc, order = "F")
self.QE_irt = np.zeros( (48, nat), dtype = np.intc, order = "F")
self.QE_invs = np.zeros( (48), dtype = np.intc, order = "F")
self.QE_rtau = np.zeros( (3, 48, nat), dtype = np.float64, order = "F")
self.QE_ft = np.zeros( (3, 48), dtype = np.float64, order = "F")
self.QE_minus_q = False
self.QE_irotmq = np.intc(0)
self.QE_nsymq = np.intc( 0 )
self.QE_nsym = np.intc(0)
# Prepare the QE structure
self.QE_tau = np.zeros((3, nat), dtype = np.float64, order = "F")
self.QE_ityp = np.zeros(nat, dtype = np.intc)
symbs = {}
counter = 1
for i in range(nat):
# Rank the atom number
atm = structure.atoms[i]
if not atm in symbs.keys():
symbs[atm] = counter
counter += 1
self.QE_ityp[i] = symbs[atm]
# Convert in bohr
for j in range(3):
self.QE_tau[j, i] = structure.coords[i, j]
self.QE_at = np.zeros( (3,3), dtype = np.float64, order = "F")
self.QE_bg = np.zeros( (3,3), dtype = np.float64, order = "F")
bg = structure.get_reciprocal_vectors()
for i in range(3):
for j in range(3):
self.QE_at[i,j] = structure.unit_cell[j,i]
self.QE_bg[i,j] = bg[j,i] / (2* np.pi)
# Here we define the quantities required to symmetrize the supercells
self.QE_at_sc = self.QE_at.copy()
self.QE_bg_sc = self.QE_bg.copy()
self.QE_translation_nr = 1 # The supercell total dimension (Nx * Ny * Nz)
self.QE_translations = [] # The translations in crystal axes
# After the translation, which vector is transformed in which one?
# This info is stored here as ndarray( size = (N_atoms, N_trans), dtype = np.intc, order = "F")
self.QE_translations_irt = []
|
[
"def DialsSymmetry(DriverType=None):\n\n DriverInstance = DriverFactory.Driver(DriverType)\n\n class DialsSymmetryWrapper(DriverInstance.__class__):\n \"\"\"A wrapper for dials.symmetry\"\"\"\n\n def __init__(self):\n # generic things\n super().__init__()\n\n self.set_executable(\"dials.symmetry\")\n\n self._input_laue_group = None\n\n self._experiments_filenames = []\n self._reflections_filenames = []\n self._output_experiments_filename = None\n self._output_reflections_filename = None\n\n self._hklin = None\n self._hklout = None\n self._pointgroup = None\n self._spacegroup = None\n self._reindex_matrix = None\n self._reindex_operator = None\n self._spacegroup_reindex_matrix = None\n self._spacegroup_reindex_operator = None\n self._confidence = 0.0\n self._hklref = None\n self._xdsin = None\n self._probably_twinned = False\n self._allow_out_of_sequence_files = False\n\n self._relative_length_tolerance = 0.05\n self._absolute_angle_tolerance = 2\n self._laue_group = \"auto\"\n self._sys_abs_check = True\n\n # space to store all possible solutions, to allow discussion of\n # the correct lattice with the indexer... this should be a\n # list containing e.g. 'tP'\n self._possible_lattices = []\n\n self._lattice_to_laue = {}\n\n # all \"likely\" spacegroups...\n self._likely_spacegroups = []\n\n # and unit cell information\n self._cell_info = {}\n self._cell = None\n\n self._json = None\n\n def set_mode_absences_only(self):\n self._laue_group = None\n self._sys_abs_check = True\n\n def set_hklin(self, hklin):\n self._hklin = hklin\n\n def get_hklin(self):\n return self._hklin\n\n def add_experiments(self, experiments):\n self._experiments_filenames.append(experiments)\n\n def add_reflections(self, reflections):\n self._reflections_filenames.append(reflections)\n\n def set_experiments_filename(self, experiments_filename):\n self._experiments_filenames = [experiments_filename]\n\n def set_reflections_filename(self, reflections_filename):\n self._reflections_filenames = [reflections_filename]\n\n def set_output_experiments_filename(self, experiments_filename):\n self._output_experiments_filename = experiments_filename\n\n def set_output_reflections_filename(self, reflections_filename):\n self._output_reflections_filename = reflections_filename\n\n def get_output_reflections_filename(self):\n return self._output_reflections_filename\n\n def get_output_experiments_filename(self):\n return self._output_experiments_filename\n\n def set_json(self, json):\n self._json = json\n\n def set_allow_out_of_sequence_files(self, allow=True):\n self._allow_out_of_sequence_files = allow\n\n def set_tolerance(\n self, relative_length_tolerance=0.05, absolute_angle_tolerance=2\n ):\n self._relative_length_tolerance = relative_length_tolerance\n self._absolute_angle_tolerance = absolute_angle_tolerance\n\n def set_correct_lattice(self, lattice):\n \"\"\"In a rerunning situation, set the correct lattice, which will\n assert a correct lauegroup based on the previous run of the\n program...\"\"\"\n\n if self._lattice_to_laue == {}:\n raise RuntimeError(\"no lattice to lauegroup mapping\")\n\n if lattice not in self._lattice_to_laue:\n raise RuntimeError(\"lattice %s not possible\" % lattice)\n self._input_laue_group = self._lattice_to_laue[lattice]\n\n with open(self._json, \"rb\") as f:\n d = json.load(f)\n for soln in d[\"subgroup_scores\"]:\n patterson_group = sgtbx.space_group(str(soln[\"patterson_group\"]))\n if PhilIndex.params.xia2.settings.symmetry.chirality in (\n None,\n \"chiral\",\n ):\n patterson_group = patterson_group.build_derived_acentric_group()\n\n if patterson_group == self._input_laue_group:\n # set this as correct solution\n self.set_best_solution(d, soln)\n break\n # okay so now set pg and lattices, but need to update output file by reindexing\n\n def decide_pointgroup(self, ignore_errors=False, batches=None):\n \"\"\"Decide on the correct pointgroup/spacegroup for hklin.\"\"\"\n\n self.clear_command_line()\n\n if self._hklref:\n self.add_command_line(\"hklref\")\n self.add_command_line(self._hklref)\n\n if self._hklin is not None:\n assert os.path.isfile(self._hklin)\n self.add_command_line(self._hklin)\n else:\n assert self._experiments_filenames # is not None\n assert self._reflections_filenames # is not None\n for exp in self._experiments_filenames:\n self.add_command_line(exp)\n for refl in self._reflections_filenames:\n self.add_command_line(refl)\n\n if not self._output_experiments_filename:\n self._output_experiments_filename = os.path.join(\n self.get_working_directory(),\n \"%d_symmetrized.expt\" % self.get_xpid(),\n )\n if not self._output_reflections_filename:\n self._output_reflections_filename = os.path.join(\n self.get_working_directory(),\n \"%d_symmetrized.refl\" % self.get_xpid(),\n )\n\n self.add_command_line(\n \"output.experiments=%s\" % self._output_experiments_filename\n )\n self.add_command_line(\n \"output.reflections=%s\" % self._output_reflections_filename\n )\n if self._laue_group is None:\n self.add_command_line(\"laue_group=None\")\n if not self._sys_abs_check:\n self.add_command_line(\"systematic_absences.check=False\")\n self.add_command_line(\n \"relative_length_tolerance=%s\" % self._relative_length_tolerance\n )\n self.add_command_line(\n \"absolute_angle_tolerance=%s\" % self._absolute_angle_tolerance\n )\n self.add_command_line(\"best_monoclinic_beta=False\")\n if not self._json:\n self._json = os.path.join(\n self.get_working_directory(),\n \"%d_dials_symmetry.json\" % self.get_xpid(),\n )\n\n self.add_command_line(\"output.json=%s\" % self._json)\n\n if self._input_laue_group:\n self.add_command_line(\"lattice_group=%s\" % self._input_laue_group)\n\n self.start()\n\n self.close_wait()\n\n # check for errors\n self.check_for_errors()\n\n if self._laue_group is not None:\n with open(self._json) as fh:\n d = json.load(fh)\n best_solution = d[\"subgroup_scores\"][0]\n\n self.set_best_solution(d, best_solution)\n\n def set_best_solution(self, d, best_solution):\n patterson_group = sgtbx.space_group(str(best_solution[\"patterson_group\"]))\n if PhilIndex.params.xia2.settings.symmetry.chirality in (None, \"chiral\"):\n patterson_group = patterson_group.build_derived_acentric_group()\n cb_op_min_best = sgtbx.change_of_basis_op(str(best_solution[\"cb_op\"]))\n # This should only be called with multiple sweeps if they're already\n # consistently indexed, so assert that they all have the same\n # cb_op_inp_min\n assert len(set(d[\"cb_op_inp_min\"])) == 1\n cb_op_inp_min = sgtbx.change_of_basis_op(str(d[\"cb_op_inp_min\"][0]))\n\n min_cell = uctbx.unit_cell(d[\"min_cell_symmetry\"][\"unit_cell\"])\n best_cell = min_cell.change_basis(cb_op=cb_op_min_best)\n\n cs = crystal.symmetry(\n unit_cell=best_cell,\n space_group=patterson_group,\n assert_is_compatible_unit_cell=False,\n )\n self._pointgroup = cs.space_group().type().lookup_symbol()\n\n self._confidence = best_solution[\"confidence\"]\n self._totalprob = best_solution[\"likelihood\"]\n cb_op_inp_best = cb_op_min_best * cb_op_inp_min\n self._reindex_operator = cb_op_inp_best.as_xyz()\n self._reindex_matrix = cb_op_inp_best.c().r().as_double()\n\n if not self._input_laue_group and not self._hklref:\n for score in d[\"subgroup_scores\"]:\n patterson_group = sgtbx.space_group(str(score[\"patterson_group\"]))\n if PhilIndex.params.xia2.settings.symmetry.chirality in (\n None,\n \"chiral\",\n ):\n patterson_group = patterson_group.build_derived_acentric_group()\n\n cb_op_inp_this = sgtbx.change_of_basis_op(str(score[\"cb_op\"]))\n unit_cell = min_cell.change_basis(\n cb_op=sgtbx.change_of_basis_op(str(cb_op_inp_this))\n )\n cs = crystal.symmetry(\n unit_cell=unit_cell,\n space_group=patterson_group,\n assert_is_compatible_unit_cell=False,\n )\n patterson_group = cs.space_group()\n\n netzc = score[\"z_cc_net\"]\n # record this as a possible lattice if its Z score is positive\n lattice = str(bravais_types.bravais_lattice(group=patterson_group))\n if lattice not in self._possible_lattices:\n if netzc > 0.0:\n self._possible_lattices.append(lattice)\n self._lattice_to_laue[lattice] = patterson_group\n self._likely_spacegroups.append(\n patterson_group.type().lookup_symbol()\n )\n\n elif self._input_laue_group:\n self._possible_lattices = [\n str(bravais_types.bravais_lattice(group=patterson_group))\n ]\n self._likely_spacegroups = [patterson_group.type().lookup_symbol()]\n\n def get_reindex_matrix(self):\n return self._reindex_matrix\n\n def get_reindex_operator(self):\n return self._reindex_operator\n\n def get_pointgroup(self):\n return self._pointgroup\n\n def get_cell(self):\n return self._cell\n\n def get_probably_twinned(self):\n return self._probably_twinned\n\n # FIXME spacegroup != pointgroup\n decide_spacegroup = decide_pointgroup\n get_spacegroup = get_pointgroup\n get_spacegroup_reindex_operator = get_reindex_operator\n get_spacegroup_reindex_matrix = get_reindex_matrix\n\n def get_likely_spacegroups(self):\n return self._likely_spacegroups\n\n def get_confidence(self):\n return self._confidence\n\n def get_possible_lattices(self):\n return self._possible_lattices\n\n return DialsSymmetryWrapper()",
"def getSymmetryMatrix(*args, **kwargs):\n \n pass",
"def getRawSymmetryMatrix(*args, **kwargs):\n \n pass",
"def generate_symbols(self):\n\n logger.debug(f'- Generating symbols for {self.class_name}')\n\n # clear symbols storage\n self.f_list, self.g_list = list(), list()\n self.f_matrix, self.g_matrix = Matrix([]), Matrix([])\n\n # process tex_names defined in model\n # -----------------------------------------------------------\n for key in self.parent.tex_names.keys():\n self.tex_names[key] = Symbol(self.parent.tex_names[key])\n for instance in self.parent.discrete.values():\n for name, tex_name in zip(instance.get_names(), instance.get_tex_names()):\n self.tex_names[name] = tex_name\n # -----------------------------------------------------------\n\n for var in self.cache.all_params_names:\n self.inputs_dict[var] = Symbol(var)\n\n for var in self.cache.all_vars_names:\n tmp = Symbol(var)\n self.vars_dict[var] = tmp\n self.inputs_dict[var] = tmp\n if var in self.cache.vars_int:\n self.vars_int_dict[var] = tmp\n\n # store tex names defined in `self.config`\n for key in self.config.as_dict():\n tmp = Symbol(key)\n self.inputs_dict[key] = tmp\n if key in self.config.tex_names:\n self.tex_names[tmp] = Symbol(self.config.tex_names[key])\n\n # store tex names for pretty printing replacement later\n for var in self.inputs_dict:\n if var in self.parent.__dict__ and self.parent.__dict__[var].tex_name is not None:\n self.tex_names[Symbol(var)] = Symbol(self.parent.__dict__[var].tex_name)\n\n self.inputs_dict['dae_t'] = Symbol('dae_t')\n self.inputs_dict['sys_f'] = Symbol('sys_f')\n self.inputs_dict['sys_mva'] = Symbol('sys_mva')\n\n self.lambdify_func[0]['Indicator'] = lambda x: x\n self.lambdify_func[0]['imag'] = np.imag\n self.lambdify_func[0]['real'] = np.real\n self.lambdify_func[0]['im'] = np.imag\n self.lambdify_func[0]['re'] = np.real\n\n self.vars_list = list(self.vars_dict.values()) # useful for ``.jacobian()``",
"def writeSymmetryData(self,parameters): \n \n symdict = {'ncs':[],'C2':[],'C3':[],'C4':[],'C5':[],'C6':[]}; symw = False\n \n for symmetry in self.latestRun.sortedSymmetryRestraints():\n symdict[symmetry.symmetryCode].append((1,symmetry.segmentLength,'A'))\n \n for symmetry in symdict:\n if len(symdict[symmetry]) > 0:\n if symmetry == 'ncs': \n parameters['ncs'] = {}\n parameters['ncs']['on'] = True\n parameters['ncs']['constant'] = self.latestRun.get('ncsRestraintConstant')\n parameters['ncs']['segments'] = []\n for symrange in symdict[symmetry]: parameters['ncs']['segments'].append(symrange) \n elif symw == False: \n parameters['symmetry'] = {}\n parameters['symmetry']['on'] = True\n parameters['symmetry']['constant'] = self.latestRun.get('symmetryRestraintConstant')\n for symrange in symdict[symmetry]: parameters['ncs']['segments'].append(symrange)\n symw = True\n else: pass",
"def InitFromSymmetries(self, symmetries, q_point = np.array([0,0,0])):\n \n nsym = len(symmetries)\n \n self.QE_nsymq = np.intc(nsym)\n self.QE_nsym = self.QE_nsymq\n \n \n for i, sym in enumerate(symmetries):\n self.QE_s[:,:, i] = np.transpose(sym[:, :3])\n \n # Get the atoms correspondence\n eq_atoms = GetIRT(self.structure, sym)\n \n self.QE_irt[i, :] = eq_atoms + 1\n \n # Get the inverse symmetry\n inv_sym = np.linalg.inv(sym[:, :3])\n for k, other_sym in enumerate(symmetries):\n if np.sum( (inv_sym - other_sym[:, :3])**2) < __EPSILON__:\n break\n \n self.QE_invs[i] = k + 1\n \n # Setup the position after the symmetry application\n for k in range(self.QE_nat):\n self.QE_rtau[:, i, k] = self.structure.coords[eq_atoms[k], :].astype(np.float64)\n \n \n # Get the reciprocal lattice vectors\n b_vectors = self.structure.get_reciprocal_vectors()\n \n # Get the minus_q operation\n self.QE_minusq = False\n\n # NOTE: HERE THERE COULD BE A BUG\n \n # q != -q\n # Get the q vectors in crystal coordinates\n q = Methods.covariant_coordinates(b_vectors, q_point)\n for k, sym in enumerate(self.QE_s):\n new_q = self.QE_s[:,:, k].dot(q)\n if np.sum( (Methods.put_into_cell(b_vectors, -q_point) - new_q)**2) < __EPSILON__:\n self.QE_minus_q = True\n self.QE_irotmq = k + 1\n break",
"def _derive_layout_symmetry(self):\n self._sym_df = None # Default option\n if self.exploit_layout_symmetry:\n # Check symmetry of bounds & turbine_weights\n if np.unique(self.minimum_yaw_angle, axis=0).shape[0] > 1:\n print(\"minimum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.maximum_yaw_angle, axis=0).shape[0] > 1:\n print(\"maximum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.maximum_yaw_angle, axis=0).shape[0] > 1:\n print(\"maximum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.turbine_weights, axis=0).shape[0] > 1:\n print(\"turbine_weights is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n # Check if turbine_weights are consistently 1.0 everywhere\n if np.any(np.abs(self.turbine_weights - 1.0) > 0.001):\n print(\"turbine_weights are not uniformly 1.0.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n x = self.fi.layout_x\n y = self.fi.layout_y\n df = find_layout_symmetry(x=x, y=y)\n\n # If no axes of symmetry, exit function\n if df.shape[0] <= 0:\n print(\"Wind farm layout in floris is not symmetrical.\")\n print(\"Exploitation of symmetry has been disabled.\")\n return\n\n wd_array = self.fi.floris.flow_field.wind_directions\n sym_step = df.iloc[0][\"wd_range\"][1]\n if ((0.0 not in wd_array) or(sym_step not in wd_array)):\n print(\"Floris wind direction array does not \" +\n \"intersect {:.1f} and {:.1f}.\".format(0.0, sym_step))\n print(\"Exploitation of symmetry has been disabled.\")\n return\n\n ids_minimal = (wd_array >= 0.0) & (wd_array < sym_step)\n wd_array_min = wd_array[ids_minimal]\n wd_array_remn = np.remainder(wd_array, sym_step)\n\n if not np.all([(x in wd_array_min) for x in wd_array_remn]):\n print(\"Wind direction array appears irregular.\")\n print(\"Exploitation of symmetry has been disabled.\")\n\n self._sym_mapping_extrap = np.array(\n [np.where(np.abs(x - wd_array_min) < 0.0001)[0][0]\n for x in wd_array_remn], dtype=int)\n\n self._sym_mapping_reduce = copy.deepcopy(ids_minimal)\n self._sym_df = df\n\n return",
"def SetupFromSPGLIB(self):\n if not __SPGLIB__:\n raise ImportError(\"Error, this function works only if spglib is available\")\n\n # Get the symmetries\n spg_syms = spglib.get_symmetry(self.structure.get_ase_atoms(), symprec = self.threshold)\n symmetries = GetSymmetriesFromSPGLIB(spg_syms, regolarize= False)\n\n trans_irt = 0\n self.QE_s[:,:,:] = 0\n\n\n # Check how many point group symmetries do we have\n n_syms = 0\n for i, sym in enumerate(symmetries):\n # Extract the rotation and the fractional translation\n rot = sym[:,:3]\n\n # Check if the rotation is equal to the first one\n if np.sum( (rot - symmetries[0][:,:3])**2 ) < 0.1 and n_syms == 0 and i > 0:\n # We got all the rotations\n n_syms = i \n break\n \n # Extract the point group\n if n_syms == 0:\n self.QE_s[:,:, i] = rot.T\n\n # Get the IRT (Atoms mapping using symmetries)\n irt = GetIRT(self.structure, sym)\n self.QE_irt[i, :] = irt + 1 #Py to Fort\n\n \n if n_syms == 0:\n n_syms = len(symmetries)\n \n # From the point group symmetries, get the supercell\n n_supercell = len(symmetries) // n_syms\n self.QE_translation_nr = n_supercell\n self.QE_nsymq = n_syms\n self.QE_nsym = n_syms\n\n self.QE_translations_irt = np.zeros( (self.structure.N_atoms, n_supercell), dtype = np.intc, order = \"F\")\n self.QE_translations = np.zeros( (3, n_supercell), dtype = np.double, order = \"F\")\n\n # Now extract the translations\n for i in range(n_supercell):\n sym = symmetries[i * n_syms]\n # Check if the symmetries are correctly setup\n\n I = np.eye(3)\n ERROR_MSG=\"\"\"\n Error, symmetries are not correctly ordered.\n They must always start with the identity.\n\n N_syms = {}; N = {}; SYM = {}\n \"\"\".format(n_syms,i*n_syms, sym)\n assert np.sum( (I - sym[:,:3])**2) < 0.5, ERROR_MSG\n\n # Get the irt for the translation (and the translation)\n irt = GetIRT(self.structure, sym)\n self.QE_translations_irt[:, i] = irt + 1\n self.QE_translations[:, i] = sym[:,3]\n\n # For each symmetry operation, assign the inverse\n self.QE_invs[:] = get_invs(self.QE_s, self.QE_nsym)",
"def initialize_volume_symmetry_map(self):\n #@type pg PointGroup\n pg = self.crystal.get_point_group()\n if pg is None:\n print \"ERROR!\"\n return\n\n t1 = time.time()\n\n order = len(pg.table)\n #@type inst Instrument\n inst = self.inst\n\n #Initialize the symmetry map. Last dimension = the ORDER equivalent indices\n n = len(inst.qx_list)\n numpix = n**3\n symm = np.zeros( (numpix, order) , dtype=int)\n\n if self.verbose: print \"Starting volume symmetry calculation. Order is %d. Matrix is %d voxels (%d to a side).\" % (order, n**3, n)\n\n #--- From get_hkl_from_q functions: (moved here for speed) --\n #Get the inverse the B matrix to do the reverse conversion\n B = self.crystal.get_B_matrix()\n invB = np.linalg.inv(B)\n\n #Limit +- in q space\n qlim = inst.qlim\n \n if config.cfg.force_pure_python:\n #----------- Pure Python Version --------------\n\n #Go through each pixel\n q_arr = np.zeros( (3, numpix) )\n for (ix, qx) in enumerate(inst.qx_list):\n for (iy, qy) in enumerate(inst.qx_list):\n for (iz, qz) in enumerate(inst.qx_list):\n i = iz + iy*n + ix*n*n\n #Find the (float) HKL of this voxel at qx,qy,qz.\n q_arr[:, i] = (qx,qy,qz)\n\n #Matrix multiply invB.hkl to get all the HKLs as a column array\n hkl = np.dot(invB, q_arr)\n\n #Now get ORDER equivalent HKLs, as a long list.\n #(as equivalent q)\n q_equiv = np.zeros( (3, numpix, order) )\n for ord in xrange(order):\n #Ok, we go TABLE . hkl to get the equivalent hkl\n #Them, B . hkl gives you the Q vector\n q_equiv[:,:, ord] = np.dot(B, np.dot(pg.table[ord], hkl) )\n\n #Now we need to find the index into the array.\n #Start by finding the x,y,z, indices\n ix = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[0, :, ord])\n iy = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[1, :, ord])\n iz = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[2, :, ord])\n\n #Now put the index into the symmetry matrix\n index = iz + iy*n + ix*n*n\n index[np.isnan(index)] = -1 #Put -1 where a NAN was found\n symm[:, ord] = index\n\n\n else:\n #--------------- Inline C version (about 17x faster than Python) ---------------\n code = \"\"\"\n\n //-- Calculate the hkl array ---\n int ix, iy, iz;\n int eix, eiy, eiz, eindex;\n int index, ord;\n double qx, qy, qz;\n double eqx, eqy, eqz;\n double h, k, l;\n double eh, ek, el;\n for (ix=0; ix<n; ix++)\n {\n qx = ix*qres - qlim;\n for (iy=0; iy<n; iy++)\n {\n qy = iy*qres - qlim;\n for (iz=0; iz<n; iz++)\n {\n qz = iz*qres - qlim;\n index = iz + iy*n + ix*n*n;\n //Ok, now we matrix multiply invB.hkl to get all the HKLs as a column array\n h = qx * INVB2(0,0) + qy * INVB2(0,1) + qz * INVB2(0,2);\n k = qx * INVB2(1,0) + qy * INVB2(1,1) + qz * INVB2(1,2);\n l = qx * INVB2(2,0) + qy * INVB2(2,1) + qz * INVB2(2,2);\n\n //Now go through each equivalency table.\n for (ord=0; ord<order; ord++)\n {\n //Do TABLE.hkl to find a new equivalent hkl\n eh = h * TABLE3(ord, 0,0) + k * TABLE3(ord, 0,1) + l * TABLE3(ord, 0,2);\n ek = h * TABLE3(ord, 1,0) + k * TABLE3(ord, 1,1) + l * TABLE3(ord, 1,2);\n el = h * TABLE3(ord, 2,0) + k * TABLE3(ord, 2,1) + l * TABLE3(ord, 2,2);\n //Now, matrix mult B . equiv_hkl to get the other q vector\n eqx = eh * B2(0,0) + ek * B2(0,1) + el * B2(0,2);\n eqy = eh * B2(1,0) + ek * B2(1,1) + el * B2(1,2);\n eqz = eh * B2(2,0) + ek * B2(2,1) + el * B2(2,2);\n\n //Ok, now you have to find the index into QSPACE\n eix = round( (eqx+qlim)/qres ); if ((eix >= n) || (eix < 0)) eix = -1; \n eiy = round( (eqy+qlim)/qres ); if ((eiy >= n) || (eiy < 0)) eiy = -1;\n eiz = round( (eqz+qlim)/qres ); if ((eiz >= n) || (eiz < 0)) eiz = -1;\n\n if ((eix < 0) || (eiy < 0) || (eiz < 0))\n {\n //One of the indices was out of bounds.\n //Put this marker to mean NO EQUIVALENT\n SYMM2(index, ord) = -1;\n }\n else\n {\n //No problem!, Now I put it in there\n eindex = eiz + eiy*n + eix*n*n;\n //This pixel (index) has this equivalent pixel index (eindex) for this order transform ord.\n SYMM2(index, ord) = eindex;\n }\n\n }\n \n }\n }\n }\n \"\"\"\n qres = inst.q_resolution\n n = len(self.inst.qx_list)\n table = np.array(pg.table) #Turn the list of 3x3 arrays into a Nx3x3 array\n varlist = ['B', 'invB', 'symm', 'qres', 'qlim', 'n', 'order', 'table']\n weave.inline(code, varlist, compiler='gcc', support_code=\"\")\n\n #Done with either version\n self.volume_symmetry = symm\n\n if self.verbose: print \"Volume symmetry map done in %.3f sec.\" % (time.time()-t1)",
"def build_sym(cls, savepath='ethem_system_output.txt'):\n\n # printing the name of the method and pprint the symbolic expression.\n def pprint(method):\n print('\\n{} :\\n'.format(method.__name__))\n sy.pprint(method(), wrap_line=False, use_unicode=False)\n\n\n # saving the original printing backend.\n original_stdout = sys.stdout\n\n # printing into a txt file.\n try:\n\n# # creating save directory of the save file.\n# build_path(savepath)\n\n # redirecting the printing to the txt file.\n sys.stdout = open(savepath, 'w')\n\n # defining the attribute 'bath_list'\n System.bath_list = System.subclass_list(RealBath)\n\n # printing and defining all the symbolic attributes\n pprint(System.build_phi_vect)\n pprint(System.build_capacity_matrix)\n pprint(System.build_steady_state_eq)\n pprint(System.build_eletro_thermal_eq)\n pprint(System.build_coupling_matrix)\n pprint(System.build_admittance_mat)\n\n # String to conclude the txt file, and assure a good pprint.\n print('\\n END OF PPRINT.')\n\n # reverting to the original printing backend\n sys.stdout = original_stdout\n # for the user\n print('Building System Done (written in {})'.format(savepath))\n\n finally:\n # even if an error occur, the priting backend is reverted to\n # the original one.\n sys.stdout = original_stdout",
"def to_xml(self):\n\n base_elem = ET.Element(\"symmetry\")\n x_elem = ET.SubElement(base_elem, \"x\")\n x_elem.text = self._symmetry_type_to_text[self.x_symmetry]\n y_elem = ET.SubElement(base_elem, \"y\")\n y_elem.text = self._symmetry_type_to_text[self.y_symmetry]\n z_elem = ET.SubElement(base_elem, \"z\")\n z_elem.text = self._symmetry_type_to_text[self.z_symmetry]\n\n return base_elem",
"def retr_symmetry_operations(struct,ini):\n ini[\"symgen\"] = struct.get_symmetry_operations()\n return ini",
"def PrintSymmetries(self):\n\n print()\n print(\"Number of symmetries: {}\".format(self.QE_nsym))\n syms = self.GetSymmetries()\n for i in range(self.QE_nsym):\n print(\" Symmetry {}\".format(i+1))\n for j in range(3):\n print(\" {:3.0f}{:3.0f}{:3.0f} | {:6.3f}\".format(*syms[i][j,:]))\n print()",
"def __equiv__(self, miller=None, csym=None,\n cdim=[1.,1.,1.], cang=[90.,90.,90.]):\n start = time.time()\n from .sym import cv\n from .sym import cubic, hexag\n #from sym_cy import cubic, hexag\n from . import sym #python compiled\n #import sym_cy #cython compiled\n #from sym.py cvec, cubic, and hexgonal modules are brought in\n if miller==None:\n print(\"Miller index should be given\")\n raise IOError\n vect = np.array(miller)\n norm = 0.; sneq = []\n temp = vect.copy()\n #norm = vect[0]**2 + vect[1]**2 + vect[2]**2\n #norm = np.sqrt(norm)\n #vect = vect/ norm\n #print 'elapsed time before v calculation: %8.6f'%\n #(time.time()-start)\n\n ##---------------------------------\n ##---------------------------------\n #start = time.time()\n if csym=='cubic':\n #H = sym_cy.cubic(1) #cython compiled\n H = sym.cubic() #operators\n for i in range(len(H)):\n sneq.append(np.dot(H[i], vect))\n pass\n pass\n elif csym=='hexag':\n #H = sym_cy.hexag(1) #cython compiled\n H = sym.hexag() #operators\n v = cv(pole=vect, cdim=cdim, cang=cang)\n for i in range(len(H)):\n sneq.append(np.dot(H[i], v))\n pass\n pass\n elif csym=='None':\n #H = [np.identity(3)]\n sneq = [vect]\n else:\n print('Given symmetry, %s is not prepared'%csym)\n input('Enter to raise an error and quits the job');\n raise IOError\n\n #print 'elapsed time during v calculation: %8.6f'%\n #(time.time()-start)\n #####-------------------------------\n #####--------------------------------\n\n start = time.time()\n stacked = [] #empty unique vectors\n # is cH in the already existing stacked list?\n # yes: pass\n # no : add\n\n ## filtering the sneq under whether or not it is unique\n for i in range(len(sneq)):\n cH = sneq[i].copy() #current vector\n if __isunique__(a=cH, b=stacked):\n stacked.append(cH)\n else: pass\n pass\n\n ## if v[2] is minus, mutiply minus sign to the vector.\n for i in range(len(stacked)):\n if stacked[i][2]<0:\n stacked[i] = stacked[i]*-1\n pass\n #print 'elapsed time during the rest: %8.6f'%\n #(time.time()-start)\n return np.array(stacked)",
"def Generate(self, dyn, qe_sym = None):\n \n # Check if the symmetries must be initialize\n if qe_sym is None:\n qe_sym = CC.symmetries.QE_Symmetry(dyn.structure)\n \n \n # Get the number of irreducible q points from the matrix\n self.nq = dyn.nqirr\n self.nat = dyn.structure.N_atoms\n \n # Initialize the symmetries at q = 0\n qe_sym.SetupQPoint()\n \n # Prepare the wyckoff basis\n tmp_wyck_gen = np.zeros((3 * self.nat, self.nat, 3), dtype = np.float64)\n \n for i in range( 3 * self.nat):\n x = i % 3\n n = i / 3\n tmp_wyck_gen[i, n, x] = 1\n \n # Symmetrize the vector\n qe_sym.SymmetrizeVector(tmp_wyck_gen[i, :, :])\n \n # Apply the gram-schmidt\n new_gen = tmp_wyck_gen.reshape((3 * self.nat, 3 * self.nat)).transpose()\n new_gen = scipy.linalg.orth(new_gen).transpose()\n \n # Get the number of wyckoff coefficients\n self.wyck_ncoeff = new_gen.shape()[0]\n \n # Reshape the array and get the coefficients\n self.wyck_gen = new_gen.reshape((self.wyck_ncoeff, self.nat, 3))\n \n r = np.arange(3 * self.nat)\n \n self.dyn_ncoeff = np.zeros(self.nq, dtype = int)\n self.dyn_gen = []\n \n # Cycle for each irreducible q point of the matrix\n for iq in range(self.nq):\n q = dyn.q_stars[iq][0]\n # Setup the symmetries for this q point\n qe_sym.SetupQPoint(q)\n \n gh = []\n \n for i in range(self.nat * 3):\n for j in range(i, self.nat * 3):\n # Take the generator\n fc = np.zeros((3 * self.nat, 3 * self.nat), dtype = np.complex128)\n fc[i, j] = 1\n \n # Apply the symmetry\n qe_sym.SymmetrizeDynQ(q, fc)\n \n # Check if the generator has already be generated\n is_new = True\n for k in range(i+1):\n mask = fc[k, :] != 0\n first_value = r[mask]\n if len(first_value):\n if k == i:\n if first_value[0] < j:\n is_new = False\n break\n else:\n is_new = False\n break\n \n # If the generator is new\n if is_new:\n qe_sym.ImposeSumRule(fc, \"simple\")\n \n # Check if the sum rule makes this generator desappearing\n if np.sum ((fc != 0).as_type(int)) != 0:\n gh.append(fc / np.sqrt(np.trace(fc.dot(fc))))\n \n dim = len(gh)\n \n # Prepare the gram-shmidt\n gh = np.array(gh, dtype = np.complex128)\n \n gh_new = np.reshape((dim, 9 * self.nat**2)).transpose()\n gh_new = scipy.linalg.orth(gh_new).transpose()\n \n self.dyn_ncoeff = np.shape(gh_new)[0]\n \n self.dyn_gen.append(np.reshape(gh_new, (self.dyn_ncoeff, 3*self.nat, 3*self.nat)))",
"def getSymmetryPlane(*args, **kwargs):\n \n pass",
"def to_openqasm(self, wires=None, rotations=True):\n # We import decompose_queue here to avoid a circular import\n wires = wires or self.wires\n\n # add the QASM headers\n qasm_str = \"OPENQASM 2.0;\\n\"\n qasm_str += 'include \"qelib1.inc\";\\n'\n\n if self.num_wires == 0:\n # empty circuit\n return qasm_str\n\n # create the quantum and classical registers\n qasm_str += \"qreg q[{}];\\n\".format(len(wires))\n qasm_str += \"creg c[{}];\\n\".format(len(wires))\n\n # get the user applied circuit operations\n operations = self.operations\n\n if rotations:\n # if requested, append diagonalizing gates corresponding\n # to circuit observables\n operations += self.diagonalizing_gates\n\n with QuantumTape() as tape:\n for op in operations:\n op.queue()\n\n if op.inverse:\n op.inv()\n\n # decompose the queue\n operations = tape.expand(depth=2, stop_at=lambda obj: obj.name in OPENQASM_GATES).operations\n\n # create the QASM code representing the operations\n for op in operations:\n try:\n gate = OPENQASM_GATES[op.name]\n except KeyError as e:\n raise ValueError(f\"Operation {op.name} not supported by the QASM serializer\") from e\n\n wire_labels = \",\".join([f\"q[{wires.index(w)}]\" for w in op.wires.tolist()])\n params = \"\"\n\n if op.num_params > 0:\n # If the operation takes parameters, construct a string\n # with parameter values.\n params = \"(\" + \",\".join([str(p) for p in op.parameters]) + \")\"\n\n qasm_str += \"{name}{params} {wires};\\n\".format(\n name=gate, params=params, wires=wire_labels\n )\n\n # apply computational basis measurements to each quantum register\n # NOTE: This is not strictly necessary, we could inspect self.observables,\n # and then only measure wires which are requested by the user. However,\n # some devices which consume QASM require all registers be measured, so\n # measure all wires to be safe.\n for wire in range(len(wires)):\n qasm_str += \"measure q[{wire}] -> c[{wire}];\\n\".format(wire=wire)\n\n return qasm_str",
"def computeSymbolicModel(self):\n x = self._stateSymb[0]\n y = self._stateSymb[1]\n z = self._stateSymb[2]\n x_dot = self._stateSymb[3]\n y_dot = self._stateSymb[4]\n z_dot = self._stateSymb[5]\n\n r = sp.sqrt(x**2 + y**2 + z**2)\n\n CD_drag, A_drag, mass_sat, rho_0_drag, r0_drag, \\\n H_drag, theta_dot = sp.symbols('CD_drag A_drag mass_sat rho_0_drag r0_drag H_drag theta_dot')\n\n Va = sp.sqrt((x_dot + theta_dot * y)**2 + (y_dot - theta_dot * x)**2 + z_dot**2)\n\n rho_A_drag = rho_0_drag*sp.exp(-(r-r0_drag)/H_drag)\n aux = -sp.Rational(1,2) * CD_drag * A_drag/mass_sat * rho_A_drag * Va\n\n drag_acc1 = aux * (x_dot + theta_dot * y)\n drag_acc2 = aux * (y_dot - theta_dot * x)\n drag_acc3 = aux * (z_dot)\n\n nmbrOfStates = self.getNmbrOfStates()\n\n self._modelSymb = []\n self._modelSymb.append(x_dot)\n self._modelSymb.append(y_dot)\n self._modelSymb.append(z_dot)\n self._modelSymb.append(drag_acc1)\n self._modelSymb.append(drag_acc2)\n self._modelSymb.append(drag_acc3)\n\n self._modelLambda = [0 for i in range(0, nmbrOfStates)]\n\n if self._usingDMC:\n for i in range(6, nmbrOfStates-3): # for every other state\n self._modelSymb.append(0)\n w_x = self._stateSymb[-3]\n w_y = self._stateSymb[-2]\n w_z = self._stateSymb[-1]\n B = sp.symarray('B', 3)\n self._modelSymb[3] += w_x\n self._modelSymb[4] += w_y\n self._modelSymb[5] += w_z\n self._modelSymb.append(-B[0]*w_x)\n self._modelSymb.append(-B[1]*w_y)\n self._modelSymb.append(-B[2]*w_z)\n\n for i in range(0, nmbrOfStates):\n self._modelLambda[i] = sp.lambdify((x, y, z, x_dot, y_dot, z_dot, w_x, w_y, w_z, CD_drag, A_drag, mass_sat, rho_0_drag, r0_drag, H_drag, theta_dot, [B]), self._modelSymb[i], \"numpy\")\n else:\n for i in range(6, nmbrOfStates): # for every other state\n self._modelSymb.append(0)\n for i in range(0, nmbrOfStates):\n self._modelLambda[i] = sp.lambdify((x, y, z, x_dot, y_dot, z_dot, CD_drag, A_drag, mass_sat, rho_0_drag, r0_drag, H_drag, theta_dot), self._modelSymb[i], \"numpy\")\n\n return self._modelSymb",
"def to_openqasm(self, wires=None, rotations=True, measure_all=True, precision=None):\n # We import decompose_queue here to avoid a circular import\n wires = wires or self.wires\n\n # add the QASM headers\n qasm_str = \"OPENQASM 2.0;\\n\"\n qasm_str += 'include \"qelib1.inc\";\\n'\n\n if self.num_wires == 0:\n # empty circuit\n return qasm_str\n\n # create the quantum and classical registers\n qasm_str += f\"qreg q[{len(wires)}];\\n\"\n qasm_str += f\"creg c[{len(wires)}];\\n\"\n\n # get the user applied circuit operations\n operations = self.operations\n\n if rotations:\n # if requested, append diagonalizing gates corresponding\n # to circuit observables\n operations += self.diagonalizing_gates\n\n with QuantumTape() as tape:\n for op in operations:\n op.queue()\n\n # decompose the queue\n # pylint: disable=no-member\n operations = tape.expand(depth=2, stop_at=lambda obj: obj.name in OPENQASM_GATES).operations\n\n # create the QASM code representing the operations\n for op in operations:\n try:\n gate = OPENQASM_GATES[op.name]\n except KeyError as e:\n raise ValueError(f\"Operation {op.name} not supported by the QASM serializer\") from e\n\n wire_labels = \",\".join([f\"q[{wires.index(w)}]\" for w in op.wires.tolist()])\n params = \"\"\n\n if op.num_params > 0:\n # If the operation takes parameters, construct a string\n # with parameter values.\n if precision is not None:\n params = \"(\" + \",\".join([f\"{p:.{precision}}\" for p in op.parameters]) + \")\"\n else:\n # use default precision\n params = \"(\" + \",\".join([str(p) for p in op.parameters]) + \")\"\n\n qasm_str += f\"{gate}{params} {wire_labels};\\n\"\n\n # apply computational basis measurements to each quantum register\n # NOTE: This is not strictly necessary, we could inspect self.observables,\n # and then only measure wires which are requested by the user. However,\n # some devices which consume QASM require all registers be measured, so\n # measure all wires by default to be safe.\n if measure_all:\n for wire in range(len(wires)):\n qasm_str += f\"measure q[{wire}] -> c[{wire}];\\n\"\n else:\n measured_wires = qml.wires.Wires.all_wires([m.wires for m in self.measurements])\n\n for w in measured_wires:\n wire_indx = self.wires.index(w)\n qasm_str += f\"measure q[{wire_indx}] -> c[{wire_indx}];\\n\"\n\n return qasm_str"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This method just prints the symmetries on stdout.
|
def PrintSymmetries(self):
print()
print("Number of symmetries: {}".format(self.QE_nsym))
syms = self.GetSymmetries()
for i in range(self.QE_nsym):
print(" Symmetry {}".format(i+1))
for j in range(3):
print(" {:3.0f}{:3.0f}{:3.0f} | {:6.3f}".format(*syms[i][j,:]))
print()
|
[
"def OutputAllSymbols():\n new_symbols_file = os.path.join(ROOT_DIR, MODULE + \"-symbols.txt\")\n with open(new_symbols_file, 'w', encoding='utf-8') as out:\n for symbol in sorted(AllSymbols.keys()):\n out.write(symbol + \"\\n\")",
"def sym_print_tree(symb):\n\n assert type(symb) == str, \"symb must be str type\"\n def print_parts(tree, part=[]):\n if is_leaf(tree):\n if label(tree):\n print(str(symb).join(part))\n else:\n m = str(label(tree))\n left, right = branches(tree)[0], branches(tree)[1]\n print_parts(left, part + [m])\n print_parts(right, part)\n return print_parts",
"def list_symbols(self) -> str:\n pass",
"def print_symbol_sizes(self):\n if len(self.top_symbols) == 0:\n return\n\n demangled_symbols = zip(demangle([symbol for symbol, _ in self.top_symbols]), [size for _, size in self.top_symbols])\n max_digits = len(str(self.top_symbols[0][1]))\n fmt_string = click.style(\"** \", fg=\"green\") + click.style(\"{: <\" + str(max_digits) + \"}\", fg=\"yellow\") + \\\n click.style(\" : \", fg=\"green\") + \"{}\"\n\n lexer = CppLexer()\n formatter = Terminal256Formatter()\n for symbol, size in demangled_symbols:\n print(fmt_string.format(sizeof_fmt(size), highlight(symbol, lexer, formatter).rstrip()))",
"def show_dictionary():",
"def PrintSymbolTable(c_auxScopes, identation): \r\n c_auxScopes.pop(0)\r\n values =[]\r\n types =[]\r\n for scope in c_auxScopes:\r\n for var in scope:\r\n values.append(scope[var].s_value)\r\n types.append(scope[var].s_type)\r\n \r\n sortpre =sorted(values, key=len)\r\n sorttyp = sorted(types, key=len)\r\n longest_val = len(sortpre[-1])\r\n longest_type = len(sorttyp[-1])\r\n margin_table = ' '*(((longest_val+longest_type)//2)+8)\r\n\r\n print(identation+color.BLUWHITE +margin_table+ \"SYMBOLS TABLE\"+margin_table+ color.END)\r\n scope = c_auxScopes[0]\r\n for i in scope:\r\n if len(scope[i].s_value) < longest_val:\r\n if len(scope[i].s_value) % 2 == 0:\r\n print(' '+identation+color.BLUE+'Variable '+color.END+' '*((longest_val-len(scope[i].s_value)))+scope[i].s_value+\\\r\n ' '+color.BLUE+'|'+color.END+ ' '+ color.BLUE+'Type '+color.END+scope[i].s_type)\r\n else:\r\n print(' '+identation+color.BLUE+'Variable '+color.END+' '*((longest_val-len(scope[i].s_value)))+scope[i].s_value+\\\r\n ' '+color.BLUE+'|'+color.END+ ' '+ color.BLUE+'Type '+color.END+scope[i].s_type)\r\n else:\r\n if (scope[i].is_array):\r\n print(' '+identation+color.BLUE+'Variable '+color.END+scope[i].s_value+' ' +color.BLUE+'|'+color.END+ ' '+ \\\r\n color.BLUE+'Type '+color.END+scope[i].s_type + '[' + str(scope[i].array_indexes[0]) + '..' + str(scope[i].array_indexes[1]) + ']')\r\n else:\r\n print(' '+identation+color.BLUE+'Variable '+color.END+scope[i].s_value+' ' +color.BLUE+'|'+color.END+ ' '+ \\\r\n color.BLUE+'Type '+color.END+scope[i].s_type)\r\n print('\\n')",
"def print_grammar(grammar):\r\n for key in grammar:\r\n print key, \"-->\", grammar[key]",
"def build_sym(cls, savepath='ethem_system_output.txt'):\n\n # printing the name of the method and pprint the symbolic expression.\n def pprint(method):\n print('\\n{} :\\n'.format(method.__name__))\n sy.pprint(method(), wrap_line=False, use_unicode=False)\n\n\n # saving the original printing backend.\n original_stdout = sys.stdout\n\n # printing into a txt file.\n try:\n\n# # creating save directory of the save file.\n# build_path(savepath)\n\n # redirecting the printing to the txt file.\n sys.stdout = open(savepath, 'w')\n\n # defining the attribute 'bath_list'\n System.bath_list = System.subclass_list(RealBath)\n\n # printing and defining all the symbolic attributes\n pprint(System.build_phi_vect)\n pprint(System.build_capacity_matrix)\n pprint(System.build_steady_state_eq)\n pprint(System.build_eletro_thermal_eq)\n pprint(System.build_coupling_matrix)\n pprint(System.build_admittance_mat)\n\n # String to conclude the txt file, and assure a good pprint.\n print('\\n END OF PPRINT.')\n\n # reverting to the original printing backend\n sys.stdout = original_stdout\n # for the user\n print('Building System Done (written in {})'.format(savepath))\n\n finally:\n # even if an error occur, the priting backend is reverted to\n # the original one.\n sys.stdout = original_stdout",
"def symmetrize(input, output, symmetries, full_group): # pylint: disable=redefined-builtin\n model = _read_input(input)\n click.echo(\"Reading symmetries from file '{}' ...\".format(symmetries))\n sym = sr.io.load(symmetries)\n model_sym = _symmetrize(sym, model, full_group) # pylint: disable=assignment-from-no-return\n _write_output(model_sym, output)",
"def display_symbol_tables(self):\n self._init_versioninfo()\n\n for section in self.elffile.iter_sections():\n if not isinstance(section, SymbolTableSection):\n continue\n\n if section['sh_entsize'] == 0:\n self._emitline(\"\\nSymbol table '%s' has a sh_entsize of zero!\" % (\n bytes2str(section.name)))\n continue\n\n self._emitline(\"\\nSymbol table '%s' contains %s entries:\" % (\n bytes2str(section.name), section.num_symbols()))\n\n if self.elffile.elfclass == 32:\n self._emitline(' Num: Value Size Type Bind Vis Ndx Name')\n else: # 64\n self._emitline(' Num: Value Size Type Bind Vis Ndx Name')\n\n for nsym, symbol in enumerate(section.iter_symbols()):\n\n version_info = ''\n # readelf doesn't display version info for Solaris versioning\n if (section['sh_type'] == 'SHT_DYNSYM' and\n self._versioninfo['type'] == 'GNU'):\n version = self._symbol_version(nsym)\n if (version['name'] != bytes2str(symbol.name) and\n version['index'] not in ('VER_NDX_LOCAL',\n 'VER_NDX_GLOBAL')):\n if version['filename']:\n # external symbol\n version_info = '@%(name)s (%(index)i)' % version\n else:\n # internal symbol\n if version['hidden']:\n version_info = '@%(name)s' % version\n else:\n version_info = '@@%(name)s' % version\n\n # symbol names are truncated to 25 chars, similarly to readelf\n self._emitline('%6d: %s %5d %-7s %-6s %-7s %4s %.25s%s' % (\n nsym,\n self._format_hex(\n symbol['st_value'], fullhex=True, lead0x=False),\n symbol['st_size'],\n describe_symbol_type(symbol['st_info']['type']),\n describe_symbol_bind(symbol['st_info']['bind']),\n describe_symbol_visibility(symbol['st_other']['visibility']),\n describe_symbol_shndx(symbol['st_shndx']),\n bytes2str(symbol.name),\n version_info))",
"def askPlayer() -> None:\r\n print(\"Which symbol do you take?\")\r\n for data in Symbol:\r\n print(\"{}. {}\".format(data.value, data.name))",
"def display_symbol_tables(self):\r\n self._init_versioninfo()\r\n\r\n for section in self.elffile.iter_sections():\r\n if not isinstance(section, SymbolTableSection):\r\n continue\r\n\r\n if section['sh_entsize'] == 0:\r\n self._emitline(\"\\nSymbol table '%s' has a sh_entsize of zero!\" % (\r\n bytes2str(section.name)))\r\n continue\r\n\r\n self._emitline(\"\\nSymbol table '%s' contains %s entries:\" % (\r\n bytes2str(section.name), section.num_symbols()))\r\n\r\n if self.elffile.elfclass == 32:\r\n self._emitline(' Num: Value Size Type Bind Vis Ndx Name')\r\n else: # 64\r\n self._emitline(' Num: Value Size Type Bind Vis Ndx Name')\r\n\r\n for nsym, symbol in enumerate(section.iter_symbols()):\r\n\r\n version_info = ''\r\n # readelf doesn't display version info for Solaris versioning\r\n if (section['sh_type'] == 'SHT_DYNSYM' and\r\n self._versioninfo['type'] == 'GNU'):\r\n version = self._symbol_version(nsym)\r\n if (version['name'] != bytes2str(symbol.name) and\r\n version['index'] not in ('VER_NDX_LOCAL',\r\n 'VER_NDX_GLOBAL')):\r\n if version['filename']:\r\n # external symbol\r\n version_info = '@%(name)s (%(index)i)' % version\r\n else:\r\n # internal symbol\r\n if version['hidden']:\r\n version_info = '@%(name)s' % version\r\n else:\r\n version_info = '@@%(name)s' % version\r\n\r\n # symbol names are truncated to 25 chars, similarly to readelf\r\n self._emitline('%6d: %s %5d %-7s %-6s %-7s %4s %.25s%s' % (\r\n nsym,\r\n self._format_hex(\r\n symbol['st_value'], fullhex=True, lead0x=False),\r\n symbol['st_size'],\r\n describe_symbol_type(symbol['st_info']['type']),\r\n describe_symbol_bind(symbol['st_info']['bind']),\r\n describe_symbol_visibility(symbol['st_other']['visibility']),\r\n describe_symbol_shndx(symbol['st_shndx']),\r\n bytes2str(symbol.name),\r\n version_info))",
"def show_all_keys():\n\n private_keys = keychain.get_all_private_keys()\n if len(private_keys) == 0:\n print(\"There are no saved private keys.\")\n return\n print(\"Showing all private keys:\")\n for sk, seed in private_keys:\n print(\"\")\n print(\"Fingerprint:\", sk.get_g1().get_fingerprint())\n print(\"Master public key (m):\", sk.get_g1())\n print(\"Master private key (m):\", bytes(sk).hex())\n print(\n \"Farmer public key (m/12381/8444/0/0)::\",\n master_sk_to_farmer_sk(sk).get_g1(),\n )\n print(\"Pool public key (m/12381/8444/1/0):\", master_sk_to_pool_sk(sk).get_g1())\n print(\n \"First wallet key (m/12381/8444/2/0):\",\n master_sk_to_wallet_sk(sk, uint32(0)).get_g1(),\n )\n print(\n \"First wallet address:\",\n encode_puzzle_hash(create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(0)).get_g1())),\n )\n assert seed is not None\n mnemonic = bytes_to_mnemonic(seed)\n print(\" Mnemonic seed (24 secret words):\")\n print(mnemonic)",
"def __str__(self):\n \n print_str = ''\n for _key_ in sorted(self.grammar):\n productions = ''\n for production in self.grammar[_key_]:\n for symbol, terminal in production:\n if terminal:\n productions += ' <'+symbol+'>'\n else:\n productions += ' '+symbol\n productions += ' | '\n print_str += '<'+_key_+'> ::='+productions[:-3]+'\\n'\n\n return print_str",
"def get_symbol_map():\n functions = {}\n for ea in Segments():\n for funcea in Functions(SegStart(ea), SegEnd(ea)):\n size = FindFuncEnd(funcea) - funcea\n functions[funcea] = (GetFunctionName(funcea), size)\n # It may not be necessary to sort by ea, but be safe...\n output_lines = []\n for i, (ea, (name, size)) in enumerate(sorted(functions.items())):\n if len(name) > 255:\n print \"ClemSym: truncating name\", name\n name = name[:255]\n line = \"%d: %s @ %07x %d\" % (i, name, ea, size)\n output_lines.append(line)\n return '\\n'.join(output_lines)",
"def print_clause(self):\n all_symbols = sorted(self.positive + self.negative)\n\n print(\"[\", end='')\n if len(all_symbols) == 0:\n print(\"]=FALSE\", end=\"\")\n else:\n for symbol in all_symbols[:-1]:\n if symbol in self.negative:\n symbol = \"~\" + symbol\n\n print(symbol, end=',')\n\n symbol = all_symbols[-1]\n if symbol in self.negative:\n symbol = \"~\" + symbol\n\n print(symbol, end='')\n\n print(\"]\", end='')",
"def get_hardcoded_sym_table() -> dict:\n sym_table = {'aa': 0, 'ae': 1, 'ah': 2, 'ao': 3, 'aw': 4, 'ay': 5, 'b': 6,\n 'ch': 7, 'd': 8, 'dh': 9, 'eh': 10, 'er': 11, 'ey': 12,\n 'f': 13, 'g': 14, 'hh': 15, 'ih': 16, 'iy': 17, 'jh': 18,\n 'k': 19, 'l': 20, 'm': 21, 'n': 22, 'ng': 23, 'ow': 24,\n 'oy': 25, 'p': 26, 'r': 27, 's': 28, 'sh': 29, 't': 30,\n 'th': 31, 'uh': 32, 'uw': 33, 'v': 34, 'w': 35, 'y': 36,\n 'z': 37, 'zh': 38, 'sil': 39}\n return sym_table",
"def print_model_map() -> None:\n print(_MODEL_MAP)",
"def show(self, internal=False):\n if internal:\n info = ''\n for i in range(0, self.size):\n name = self.name_list[i]\n interface = ''\n iface = self.agents[name]\n for s in iface:\n interface += s + '{' + iface[s]['state'] + '}' + '[' + iface[s]['bond'] + '] '\n info += self.info[name]['sID'] + name + '(' + interface[:-1] + '), '\n print(info[:-2]) # remove last comma+blank\n else:\n print(self.kappa_expression())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
DIVIDE THE Q POINTS IN STARS ============================ This method divides the given q point list into the star. Remember, you need to pass the whole number of q points
|
def SetupQStar(self, q_tot, supergroup = False):
# Setup the symmetries
#self.SetupQPoint()
# Lets copy the q list (we are going to pop items from it)
q_list = q_tot[:]
q_stars = []
count_qstar = 0
count_q = 0
q_indices = np.zeros( len(q_tot), dtype = int)
while len(q_list) > 0:
q = q_list[0]
# Get the star of the current q point
_q_ = np.array(q, dtype = np.float64) # Fortran explicit conversion
nq_new, sxq, isq, imq = symph.star_q(_q_, self.QE_at, self.QE_bg,
self.QE_nsym, self.QE_s, self.QE_invs, 0)
# print ("START WITH Q:", q)
# print ("FOUND STAR:")
# for jq in range(nq_new):
# print (sxq[:, jq])
# print ()
# print ("TELL ME THE BG:")
# print (self.QE_bg.transpose())
# print("Manual star:")
# for k in range(self.QE_nsym):
# trial_q = q.dot(self.QE_s[:,:, k])
# distance_q = Methods.get_min_dist_into_cell(self.QE_bg.T, trial_q, q)
# distance_mq = Methods.get_min_dist_into_cell(self.QE_bg.T, trial_q, -q)
# print("trial_q : {} | DQ: {:.4f} | DMQ: {:.4f}".format(trial_q, distance_q, distance_mq ))
# Prepare the star
q_star = [sxq[:, k] for k in range(nq_new)]
# If imq is not zero (we do not have -q in the star) then add the -q for each in the star
if imq == 0:
old_q_star = q_star[:]
min_dist = 1
for q in old_q_star:
q_star.append(-q)
q_stars.append(q_star)
# Pop out the q_star from the q_list
for jq, q_instar in enumerate(q_star):
# Look for the q point in the star and pop them
#print("q_instar:", q_instar)
q_dist = [Methods.get_min_dist_into_cell(self.QE_bg.transpose(),
np.array(q_instar), q_point) for q_point in q_list]
pop_index = np.argmin(q_dist)
q_list.pop(pop_index)
# Use the same trick to identify the q point
q_dist = [Methods.get_min_dist_into_cell(self.QE_bg.transpose(),
np.array(q_instar), q_point) for q_point in q_tot]
q_index = np.argmin(q_dist)
#print (q_indices, count_q, q_index)
q_indices[count_q] = q_index
count_q += 1
return q_stars, q_indices
|
[
"def ApplyQStar(self, fcq, q_point_group):\n \n nq = np.shape(q_point_group)[0]\n final_fc = np.zeros(np.shape(fcq), dtype = np.complex128)\n \n # Setup all the symmetries\n self.SetupQPoint()\n \n new_dyn = np.zeros( (3 * self.QE_nat, 3*self.QE_nat), dtype = np.complex128, order = \"F\")\n \n dyn_star = np.zeros( (nq, 3, 3, self.QE_nat, self.QE_nat), dtype = np.complex128, order = \"F\")\n \n for i in range(nq):\n # Get the q points order\n nq_new, sxq, isq, imq = symph.star_q(q_point_group[i,:], self.QE_at, self.QE_bg, \n self.QE_nsymq, self.QE_s, self.QE_invs, 0)\n \n\n #print \"Found nq:\", nq_new \n #print \"IMQ?\", imq\n\n # Check if the q star is correct\n if nq_new != nq and imq != 0:\n print (\"Reciprocal lattice vectors:\")\n print (self.QE_bg.transpose() )\n print (\"Passed q star:\")\n print (q_point_group)\n print (\"QE q star:\")\n print (sxq[:, :nq_new].transpose())\n raise ValueError(\"Error, the passed q star does not match the one computed by QE\")\n# \n# # Print the star \n# print \"q point:\", q_point_group[i,:]\n# print \"Point in the stars:\", nq_new\n# print \"Star of q:\"\n# print sxq[:, :nq_new].transpose()\n# \n# print \"NEW_DYN:\", np.shape(new_dyn)\n# print \"AT:\", np.shape(self.QE_at)\n# print \"BG:\", np.shape(self.QE_bg)\n# print \"N SYM:\", self.QE_nsymq\n# print \"S:\", np.shape(self.QE_s)\n# print \"QE_INVS:\", np.shape(self.QE_invs)\n# print \"IRT:\", np.shape(self.QE_irt)\n# print \"RTAU:\", np.shape(self.QE_rtau)\n# print \"NQ_NEW:\", nq_new\n# print \"SXQ:\", np.shape(sxq)\n# print \"ISQ:\", np.shape(isq)\n# print \"IMQ:\", imq\n# print \"NAT:\", self.QE_nat\n \n new_dyn[:,:] = fcq[i,:,:]\n #print \"new dyn ready\"\n \n # Get the new matrix\n dyn_star = symph.q2qstar_out(new_dyn, self.QE_at, self.QE_bg, self.QE_nsymq, \n self.QE_s, self.QE_invs, self.QE_irt, self.QE_rtau,\n nq_new, sxq, isq, imq, nq, self.QE_nat)\n #print \"Fake\"\n \n #print \"XQ:\", q_point_group[i, :], \"NQ_NEW:\", nq_new\n\n # Now to perform the match bring the star in the same BZ as the q point\n # This facilitate the comparison between q points\n current_q = q_point_group.copy()\n #print \"Fake2\"\n# for xq in range(nq):\n# tmp = Methods.put_into_cell(self.QE_bg, sxq[:, xq])\n# sxq[:, xq] = tmp\n# current_q[xq,:] = Methods.put_into_cell(self.QE_bg, current_q [xq,:])\n# \n # Print the order of the q star\n sorting_q = np.arange(nq)\n for xq in range(nq):\n count = 0 # Debug (avoid no or more than one identification)\n for yq in range(nq):\n real_y = yq\n dot_f = 1\n if imq == 0 and yq >= nq_new:\n real_y -= nq_new\n dot_f = -1\n if Methods.get_min_dist_into_cell(self.QE_bg.transpose(), dot_f* sxq[:, real_y], current_q[xq,:]) < __EPSILON__: \n sorting_q[xq] = yq\n count += 1\n \n if count != 1:\n print (\"Original star:\")\n print (q_point_group)\n print (\"Reshaped star:\")\n print (current_q)\n print (\"Reciprocal lattice vectors:\")\n print (self.QE_bg.transpose() )\n print (\"STAR:\")\n print (sxq[:, :nq_new].transpose() )\n pta = (current_q[xq,:])\n print (\"Distances of xq in the QE star:\")\n for yq in range(nq_new):\n print (\"%.4f %.4f %.4f => \" % (sxq[0, yq], sxq[1, yq], sxq[2, yq]), Methods.get_min_dist_into_cell(self.QE_bg.transpose(), sxq[:, yq], current_q[xq,:]))\n raise ValueError(\"Error, the vector (%.3f, %.3f, %.3f) has %d identification in the star\" % (pta[0], pta[1], pta[2],\n count))\n #print \"Sorting array:\"\n #print sorting_q\n \n \n # Copy the matrix in the new one\n for xq in range(nq):\n for xat in range(self.QE_nat):\n for yat in range(self.QE_nat):\n final_fc[xq, 3*xat: 3*xat + 3, 3*yat : 3*yat + 3] += dyn_star[sorting_q[xq], :,:, xat, yat] \n \n \n # Now divide the matrix per the xq value\n final_fc /= nq\n \n # Overwrite the matrix\n fcq[:,:,:] = final_fc",
"def makequads1(starlist, n=20, s=0, d=50.0, verbose=True):\n quadlist = []\n sortedstars = star.sortstarlistbyflux(starlist)\n\n for fourstars in itertools.combinations(sortedstars[s:s+n], 4):\n if mindist(fourstars) > d:\n quadlist.append(Quad(fourstars))\n\n if verbose:\n print(\"Made %4i quads from %4i stars (combi n=%i s=%i d=%.1f)\" % (len(quadlist), len(starlist), n, s, d))\n\n return quadlist",
"def GetQStar(self, q_vector):\n self.SetupQPoint()\n nq_new, sxq, isq, imq = symph.star_q(q_vector, self.QE_at, self.QE_bg,\n self.QE_nsymq, self.QE_s, self.QE_invs, 0)\n \n #print (\"STAR IMQ:\", imq)\n if imq != 0:\n total_star = np.zeros( (nq_new, 3), dtype = np.float64)\n else:\n total_star = np.zeros( (2*nq_new, 3), dtype = np.float64)\n\n total_star[:nq_new, :] = sxq[:, :nq_new].transpose()\n\n if imq == 0:\n total_star[nq_new:, :] = -sxq[:, :nq_new].transpose()\n\n return total_star",
"def divide(intList, num, step): #4\n newIntList = []\n thingsToAdd = []\n for index in range(0, len(intList), step):\n thingsToAdd.append(index)\n for index, item in enumerate(intList):\n if index in thingsToAdd:\n newIntList.append(item / float(num))\n else:\n newIntList.append(item)\n return newIntList",
"def divide(self, number_of_segments, over_space=False):\n points = []\n compas_rhino.rs.EnableRedraw(False)\n if over_space:\n space = self.space(number_of_segments + 1)\n if space:\n points = [list(compas_rhino.rs.EvaluateCurve(self.guid, param)) for param in space]\n else:\n points = compas_rhino.rs.DivideCurve(self.guid, number_of_segments, create_points=False, return_points=True)\n points[:] = map(list, points)\n compas_rhino.rs.EnableRedraw(True)\n return points",
"def __call__(self, starlist):\n return self.group_stars(starlist)",
"def initqp(self):\n\n self.qp = get_spherical_quad_points()\n sp = cartesian2spherical(self.qp.points)\n self.sqp = sp",
"def quat_mean_lsq(quats, qm):\n # import pdb; pdb.set_trace()\n qr = np.zeros_like(qm)\n for k in range(quats.shape[1]):\n qr[k, :] = quat_mean_lsq_one(quats[:, k, :], qm[k, :])\n\n return qr",
"def compute_partition(ks,q):\n return sum(falling_fac(q,j)*esp(ks,j) for j in range(q+1))",
"def get_q_glue(self) -> List[float]:\n # We take q above the glue\n flange_area = self.thickness*self.flange_sheets*self.flange_width * 2\n flange_d = self.web_height + (self.thickness*self.flange_sheets) / 2 - self.y_bar\n\n deck_area = self.thickness * self.deck_sheets * (self.width - 2*self.flange_width)\n deck_d = self.web_height + (self.thickness * self.deck_sheets) / 2 - self.y_bar\n\n return [flange_area*flange_d + deck_area*deck_d]",
"def hellinger(p, q):\n coef = 0\n for i in range(len(p)):\n coef = coef + math.sqrt(p[i] * q[i])\n argu = 1 - coef\n if argu <= 0: # can happen to be 0, because of Python's rounding\n argu = 0.000000000000001\n return math.sqrt(argu)",
"def split_array(self,q):\n if isinstance(q, np.ndarray):\n x = 1.0*q\n else:\n x = 1.0*q.vector().array()\n\n X = x[0:np.size(x)/2]\n Y = x[np.size(x)/2: np.size(x)]\n\n return X,Y",
"def rational(x, q):\n return 1 / np.polyval(q, x)",
"def points_on_line(q, r):\n if util.dist(q, r) < 1.0:\n return []\n else:\n m = (q[0]+r[0])/2, (q[1]+r[1])/2, (q[2]+r[2])/2\n return points_on_line(q, m) + [m] + points_on_line(m, r)",
"def divide(self, frac):\n # by default, the element is indivisible\n return [self]",
"def quat_mean_lsq_one(qs, qm, eps=1e-5):\n qs = quaternion.as_quat_array(qs)\n qm = np.quaternion(*qm)\n while True:\n es = qs * qm.inverse()\n es = quaternion.as_rotation_vector(es)\n\n em = es.mean(axis=0)\n qem = quaternion.from_rotation_vector(em)\n\n qm = qem * qm\n\n if np.linalg.norm(em) <= eps:\n break\n\n return quaternion.as_float_array(qm)[0]",
"def rational2(x, p, q):\n return np.polyval( [1] + p,x) / np.polyval(q, x)",
"def group_stars(self, starlist):\n raise NotImplementedError('Needs to be implemented in a subclass.')",
"def divide(base, array):\n return [base / item for item in array]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SYMMETRIZE A RANK3 TENSOR ========================== This subroutines uses the current symmetries to symmetrize a rank3 tensor. This tensor must be in the supercell space. The v3 argument will be overwritten.
|
def ApplySymmetryToTensor3(self, v3, initialize_symmetries = True):
if initialize_symmetries:
self.SetupFromSPGLIB()
# Apply the permutation symmetry
symph.permute_v3(v3)
# Apply the translational symmetries
symph.trans_v3(v3, self.QE_translations_irt)
# Apply all the symmetries at gamma
symph.sym_v3(v3, self.QE_at, self.QE_s, self.QE_irt, self.QE_nsymq)
|
[
"def _sym3x3(T):\n T[1,0], T[2,0], T[2,1] = T[0,1], T[0,2], T[1,2]",
"def TransformSymmetricSecondRankTensor(self, *args) -> \"itkVariableLengthVectorD\":\n return _itkCompositeTransformPython.itkCompositeTransformD3_TransformSymmetricSecondRankTensor(self, *args)",
"def symCrossMat3x3( v ):\n\n A = matrix(SR,3,3)\n A[0,1] = -1*v[2][0]\n A[0,2] = v[1][0]\n A[1,0] = v[2][0]\n A[1,2] = -1*v[0][0]\n A[2,0] = -1*v[1][0]\n A[2,1] = v[0][0]\n\n return A",
"def ApplySymmetryToTensor4(self, v4, initialize_symmetries = True):\n if initialize_symmetries:\n self.SetupFromSPGLIB()\n\n # Apply the permutation symmetry\n symph.permute_v4(v4)\n\n # Apply the translational symmetries\n symph.trans_v4(v4, self.QE_translations_irt)\n\n # Apply all the symmetries at gamma\n symph.sym_v4(v4, self.QE_at, self.QE_s, self.QE_irt, self.QE_nsymq)",
"def itkStochasticFractalDimensionImageFilterIUC3IUS3_cast(*args):\n return _itkStochasticFractalDimensionImageFilterPython.itkStochasticFractalDimensionImageFilterIUC3IUS3_cast(*args)",
"def itkStochasticFractalDimensionImageFilterIF3IUS3_cast(*args):\n return _itkStochasticFractalDimensionImageFilterPython.itkStochasticFractalDimensionImageFilterIF3IUS3_cast(*args)",
"def itkStochasticFractalDimensionImageFilterIUS3IUS3_cast(*args):\n return _itkStochasticFractalDimensionImageFilterPython.itkStochasticFractalDimensionImageFilterIUS3IUS3_cast(*args)",
"def itkStochasticFractalDimensionImageFilterIUS3IUC3_cast(*args):\n return _itkStochasticFractalDimensionImageFilterPython.itkStochasticFractalDimensionImageFilterIUS3IUC3_cast(*args)",
"def itkStochasticFractalDimensionImageFilterID3IUS3_cast(*args):\n return _itkStochasticFractalDimensionImageFilterPython.itkStochasticFractalDimensionImageFilterID3IUS3_cast(*args)",
"def itkSimilarityIndexImageFilterIUS3IUS3_cast(*args):\n return _itkSimilarityIndexImageFilterPython.itkSimilarityIndexImageFilterIUS3IUS3_cast(*args)",
"def itkStochasticFractalDimensionImageFilterIUL3IUS3_cast(*args):\n return _itkStochasticFractalDimensionImageFilterPython.itkStochasticFractalDimensionImageFilterIUL3IUS3_cast(*args)",
"def TransformSymmetricSecondRankTensor(self, *args) -> \"itkVariableLengthVectorD\":\n return _itkCompositeTransformPython.itkCompositeTransformD2_TransformSymmetricSecondRankTensor(self, *args)",
"def itkStochasticFractalDimensionImageFilterIUS3IUL3_cast(*args):\n return _itkStochasticFractalDimensionImageFilterPython.itkStochasticFractalDimensionImageFilterIUS3IUL3_cast(*args)",
"def itkStochasticFractalDimensionImageFilterID3IUC3_cast(*args):\n return _itkStochasticFractalDimensionImageFilterPython.itkStochasticFractalDimensionImageFilterID3IUC3_cast(*args)",
"def swap(self, v: 'vectoritkImageUC3') -> \"void\":\n return _itkImagePython.vectoritkImageUC3_swap(self, v)",
"def ApplySymmetriesToV2(self, v2, apply_translations = True):\n\n # Apply the Permutation symmetry\n v2[:,:] = 0.5 * (v2 + v2.T)\n\n # First lets recall that the fortran subroutines\n # Takes the input as (3,3,nat,nat)\n new_v2 = np.zeros( (3,3, self.QE_nat, self.QE_nat), dtype = np.double, order =\"F\")\n for i in range(self.QE_nat):\n for j in range(self.QE_nat):\n new_v2[:, :, i, j] = v2[3*i : 3*(i+1), 3*j : 3*(j+1)]\n\n # Apply the translations\n if apply_translations:\n # Check that the translations have been setted up\n assert len(np.shape(self.QE_translations_irt)) == 2, \"Error, symmetries not setted up to work in the supercell\"\n symph.trans_v2(new_v2, self.QE_translations_irt)\n \n # Apply the symmetrization\n symph.sym_v2(new_v2, self.QE_at, self.QE_bg, self.QE_s, self.QE_irt, self.QE_nsym, self.QE_nat)\n\n # Return back\n for i in range(self.QE_nat):\n for j in range(self.QE_nat):\n v2[3*i : 3*(i+1), 3*j : 3*(j+1)] = new_v2[:, :, i, j]",
"def rot3d(*args):\n return _seb.rot3d(*args)",
"def itkStochasticFractalDimensionImageFilterIF3IUC3_cast(*args):\n return _itkStochasticFractalDimensionImageFilterPython.itkStochasticFractalDimensionImageFilterIF3IUC3_cast(*args)",
"def itkSimilarityIndexImageFilterIF3IF3_cast(*args):\n return _itkSimilarityIndexImageFilterPython.itkSimilarityIndexImageFilterIF3IF3_cast(*args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SYMMETRIZE EFFECTIVE CHARGES ============================ This subroutine applies the symmetries to the effective charges. As always, the eff_charges will be modified by this subroutine.
|
def ApplySymmetryToEffCharge(self, eff_charges):
nat, cart1, cart2 = np.shape(eff_charges)
assert cart1 == cart2
assert cart1 == 3
assert nat == self.QE_nat, "Error, the structure and effective charges are not compatible"
# Apply the sum rule
tot_sum = np.sum(eff_charges, axis = 0)
eff_charges -= np.tile(tot_sum, (nat, 1)).reshape((nat, 3,3 )) / nat
new_eff_charges = np.zeros((nat, cart1, cart2), dtype = np.double)
# Get the effective charges in crystal components
for i in range(nat):
eff_charges[i, :, :] = Methods.convert_matrix_cart_cryst(eff_charges[i, :, :], self.QE_at.T)
# Apply translations
if self.QE_translation_nr > 1:
for i in range(self.QE_translation_nr):
irt = self.QE_translations_irt[:, i] - 1
for j in range(nat):
new_mat = eff_charges[irt[j], :, :]
new_eff_charges[j, :, :] += new_mat
eff_charges[:,:,:] = new_eff_charges / self.QE_translation_nr
new_eff_charges[:,:,:] = 0.
# Apply rotations
for i in range(self.QE_nsym):
irt = self.QE_irt[i, :] - 1
for j in range(nat):
new_mat = self.QE_s[:,:, i].dot( eff_charges[irt[j], :, :].dot(self.QE_s[:,:,i].T))
new_eff_charges[j, :, :] += new_mat
new_eff_charges /= self.QE_nsym
# Convert back into cartesian
for i in range(nat):
eff_charges[i, :, :] = Methods.convert_matrix_cart_cryst(new_eff_charges[i, :, :], self.QE_at.T, True)
|
[
"def apply_symmetrisation(self):\n\n # get the values to be symmetrised\n for sym_set in self.molecule.symm_hs.values():\n charges, sigmas, epsilons = [], [], []\n for atom_set in sym_set:\n for atom in atom_set:\n charges.append(float(self.non_bonded_force[atom - 1][0]))\n sigmas.append(float(self.non_bonded_force[atom - 1][1]))\n epsilons.append(float(self.non_bonded_force[atom - 1][2]))\n # calculate the average values to be used in symmetry\n charge, sigma, epsilon = sum(charges) / len(charges), sum(sigmas) / len(sigmas), sum(epsilons) / len(epsilons)\n\n # now loop through the atoms again and store the new values\n for atom in atom_set:\n self.non_bonded_force[atom - 1] = [str(charge), str(sigma), str(epsilon)]",
"def _symmetrize_charges(\n cls, molecule: \"Molecule\", charges: numpy.ndarray\n ) -> numpy.ndarray:\n\n symmetry_groups = get_atom_symmetries(molecule)\n\n charges_by_group = {group: [] for group in symmetry_groups}\n\n for group, charge in zip(symmetry_groups, charges):\n charges_by_group[group].append(charge)\n\n average_charges = {\n group: float(numpy.mean(charges_by_group[group]))\n for group in charges_by_group\n }\n\n return numpy.array([[average_charges[group]] for group in symmetry_groups])",
"def test_symmetrization_parallel(self):\n before = np.array(self.dataset.diffraction_group[\"intensity\"])\n symmetrized = np.array(before, copy=True)\n for index, _ in enumerate(self.dataset.time_points):\n symmetrized[:, :, index] = nfold(\n before[:, :, index], mod=3, center=(63, 65)\n )\n\n self.dataset.symmetrize(mod=3, center=(63, 65), processes=2)\n after = np.array(self.dataset.diffraction_group[\"intensity\"])\n\n self.assertTrue(np.allclose(symmetrized, after))",
"def symmetry_stretch(self, bond_to_change, bonds_to_keep, lattice_constants, stretching_percent):\n\t\tpass",
"def _format_metabs_sym(expr, rxn, mets):\n # For boundary reactions, generate an \"boundary\" metabolite for boundary\n if rxn.boundary and not mets:\n expr = sym.Mul(expr, sym.Symbol(rxn.boundary_metabolite))\n # For all other reactions\n else:\n for met in mets:\n met_ode = _mk_met_func(met)\n coeff = abs(rxn.get_coefficient(met.id))\n if coeff == 1:\n expr = sym.Mul(expr, met_ode)\n else:\n expr = sym.Mul(expr, sym.Pow(met_ode, coeff))\n return expr",
"def symmetricModelling(reset=bool, symmetry=int, seamTolerance=float, preserveSeam=int, seamFalloffCurve=\"string\", about=\"string\", axis=\"string\", allowPartial=bool, tolerance=float, topoSymmetry=bool):\n pass",
"def compute_seismic_force(self):\r\n # Please note that the period for computing the required strength should be bounded by CuTa\r\n period_for_strength = min(self.elf_parameters['modal period'], self.elf_parameters['period'])\r\n # The period used for computing story drift is not required to be bounded by CuTa\r\n if PERIOD_FOR_DRIFT_LIMIT:\r\n period_for_drift = min(self.elf_parameters['modal period'], self.elf_parameters['period'])\r\n else:\r\n period_for_drift = self.elf_parameters['modal period']\r\n # Call function defined in \"help_functions.py\" to determine the seismic response coefficient\r\n Cs_for_strength = calculate_Cs_coefficient(self.elf_parameters['SDS'], self.elf_parameters['SD1'],\r\n self.elf_parameters['S1'], period_for_strength,\r\n self.elf_parameters['TL'], self.elf_parameters['R'],\r\n self.elf_parameters['Ie'])\r\n Cs_for_drift = calculate_Cs_coefficient(self.elf_parameters['SDS'], self.elf_parameters['SD1'],\r\n self.elf_parameters['S1'], period_for_drift,\r\n self.elf_parameters['TL'], self.elf_parameters['R'],\r\n self.elf_parameters['Ie'])\r\n # Calculate the base shear\r\n base_shear_for_strength = Cs_for_strength * np.sum(self.gravity_loads['floor weight'])\r\n base_shear_for_drift = Cs_for_drift * np.sum(self.gravity_loads['floor weight'])\r\n # Call function defined in \"help_functions.py\" to compute k coefficient\r\n k = determine_k_coeficient(self.elf_parameters['period'])\r\n # Call function defined in \"help_functions.py\" to determine the lateral force for each floor level\r\n lateral_story_force_for_strength, story_shear_for_strength \\\r\n = calculate_seismic_force(base_shear_for_strength, self.gravity_loads['floor weight'], \\\r\n self.geometry['floor height'], k)\r\n lateral_story_force_for_drift, story_shear_for_drift \\\r\n = calculate_seismic_force(base_shear_for_drift, self.gravity_loads['floor weight'], \\\r\n self.geometry['floor height'], k)\r\n # Store information into class attribute\r\n self.seismic_force_for_strength = {'lateral story force': lateral_story_force_for_strength, \\\r\n 'story shear': story_shear_for_strength, \\\r\n 'base shear': base_shear_for_strength, 'Cs': Cs_for_strength}\r\n self.seismic_force_for_drift = {'lateral story force': lateral_story_force_for_drift, \\\r\n 'story shear': story_shear_for_drift, \\\r\n 'base shear': base_shear_for_drift, 'Cs': Cs_for_drift}",
"def _remove_slow_reactions(changes, change_reacs, rate_threshold=0.99):\n totalDestruct = sum(changes[np.where(changes < 0)])\n totalProd = sum(changes[np.where(changes > 0)])\n\n key_reactions = []\n key_changes = []\n form = 0.0\n destruct = 0.0\n\n for i, reaction in enumerate(change_reacs):\n if (changes[i] > 0) and (form < rate_threshold * totalProd):\n form = form + changes[i]\n key_reactions.append(reaction)\n key_changes.append(changes[i])\n elif (changes[i] < 0) and (abs(destruct) < rate_threshold * abs(totalDestruct)):\n destruct = destruct + changes[i]\n key_reactions.append(reaction)\n key_changes.append(changes[i])\n\n return totalProd, totalDestruct, key_reactions, key_changes",
"def simulate(rmg):\n \n for index, reactionSystem in enumerate(rmg.reactionSystems):\n \n if reactionSystem.sensitiveSpecies:\n logging.info('Conducting sensitivity analysis of reaction system %s...' % (index+1))\n \n if rmg.saveSimulationProfiles:\n csvfile = file(os.path.join(rmg.outputDirectory, 'simulation_{0}.csv'.format(index+1)),'w')\n worksheet = csv.writer(csvfile)\n else:\n worksheet = None\n \n sensWorksheet = []\n for spec in reactionSystem.sensitiveSpecies:\n csvfile = file(os.path.join(rmg.outputDirectory, 'sensitivity_{0}_SPC_{1}.csv'.format(index+1, spec.index)),'w')\n sensWorksheet.append(csv.writer(csvfile))\n \n pdepNetworks = []\n for source, networks in rmg.reactionModel.networkDict.items():\n pdepNetworks.extend(networks)\n terminated, obj = reactionSystem.simulate(\n coreSpecies = rmg.reactionModel.core.species,\n coreReactions = rmg.reactionModel.core.reactions,\n edgeSpecies = rmg.reactionModel.edge.species,\n edgeReactions = rmg.reactionModel.edge.reactions,\n toleranceKeepInEdge = 0,\n toleranceMoveToCore = 1,\n toleranceInterruptSimulation = 1,\n pdepNetworks = pdepNetworks,\n absoluteTolerance = rmg.absoluteTolerance,\n relativeTolerance = rmg.relativeTolerance,\n sensitivity = True,\n sensitivityAbsoluteTolerance = rmg.sensitivityAbsoluteTolerance,\n sensitivityRelativeTolerance = rmg.sensitivityRelativeTolerance,\n sensWorksheet = sensWorksheet,\n )",
"def reapply(self, circuit):\n self._modifiers(circuit.y(self.qargs[0]))",
"def amt_fixup(request, reform, model):\n cap_gains_params = [\"CG_rt1\", \"CG_brk1_0\", \"CG_brk1_1\",\n \"CG_brk1_2\", \"CG_brk1_3\", \"CG_brk1_cpi\",\n \"CG_rt2\", \"CG_brk2_0\", \"CG_brk2_1\",\n \"CG_brk2_2\", \"CG_brk2_3\", \"CG_brk2_cpi\",\n \"CG_rt3\"]\n\n for cgparam in cap_gains_params:\n if cgparam in reform:\n reform['AMT_' + cgparam] = reform[cgparam]\n if cgparam.endswith(\"_cpi\"):\n setattr(model, 'AMT_' + cgparam, reform[cgparam])\n else:\n setattr(model, 'AMT_' + cgparam, reform[cgparam][0])",
"def GetSymmetrizedWavefunction(psi):\n\tAssertSingleProc()\n\n\tsym = GetSymmetrizationIndexPairs(psi)\n\texchgPsi = GetWavefunctionParticleExchange(psi, sym)\n\n\t#create symmetrized wavefunction\n\tsymPsi = psi.Copy()\n\tsymPsi.GetData()[:] += exchgPsi.GetData()\n\tsymPsi.GetData()[:] *= 0.5\n\t\n\tantiSymPsi = exchgPsi\n\tantiSymPsi.GetData()[:] -= psi.GetData()\n\tantiSymPsi.GetData()[:] *= 0.5\n\n\treturn symPsi, antiSymPsi",
"def generate_symbols(self):\n\n logger.debug(f'- Generating symbols for {self.class_name}')\n\n # clear symbols storage\n self.f_list, self.g_list = list(), list()\n self.f_matrix, self.g_matrix = Matrix([]), Matrix([])\n\n # process tex_names defined in model\n # -----------------------------------------------------------\n for key in self.parent.tex_names.keys():\n self.tex_names[key] = Symbol(self.parent.tex_names[key])\n for instance in self.parent.discrete.values():\n for name, tex_name in zip(instance.get_names(), instance.get_tex_names()):\n self.tex_names[name] = tex_name\n # -----------------------------------------------------------\n\n for var in self.cache.all_params_names:\n self.inputs_dict[var] = Symbol(var)\n\n for var in self.cache.all_vars_names:\n tmp = Symbol(var)\n self.vars_dict[var] = tmp\n self.inputs_dict[var] = tmp\n if var in self.cache.vars_int:\n self.vars_int_dict[var] = tmp\n\n # store tex names defined in `self.config`\n for key in self.config.as_dict():\n tmp = Symbol(key)\n self.inputs_dict[key] = tmp\n if key in self.config.tex_names:\n self.tex_names[tmp] = Symbol(self.config.tex_names[key])\n\n # store tex names for pretty printing replacement later\n for var in self.inputs_dict:\n if var in self.parent.__dict__ and self.parent.__dict__[var].tex_name is not None:\n self.tex_names[Symbol(var)] = Symbol(self.parent.__dict__[var].tex_name)\n\n self.inputs_dict['dae_t'] = Symbol('dae_t')\n self.inputs_dict['sys_f'] = Symbol('sys_f')\n self.inputs_dict['sys_mva'] = Symbol('sys_mva')\n\n self.lambdify_func[0]['Indicator'] = lambda x: x\n self.lambdify_func[0]['imag'] = np.imag\n self.lambdify_func[0]['real'] = np.real\n self.lambdify_func[0]['im'] = np.imag\n self.lambdify_func[0]['re'] = np.real\n\n self.vars_list = list(self.vars_dict.values()) # useful for ``.jacobian()``",
"def _assign_sym2(cmap_ops):\n cmap_ops.phase('assign sym2')\n keycap_chars = tool_utils.parse_int_ranges(\"\"\"\n 0023 # Number Sign\n 002A # Asterisk\n 0030-0039 # Digits\n 20E3 # Combining Enclosing Keycap\"\"\")\n cmap_ops.add_all(keycap_chars, 'SYM2')",
"def Generate(self, dyn, qe_sym = None):\n \n # Check if the symmetries must be initialize\n if qe_sym is None:\n qe_sym = CC.symmetries.QE_Symmetry(dyn.structure)\n \n \n # Get the number of irreducible q points from the matrix\n self.nq = dyn.nqirr\n self.nat = dyn.structure.N_atoms\n \n # Initialize the symmetries at q = 0\n qe_sym.SetupQPoint()\n \n # Prepare the wyckoff basis\n tmp_wyck_gen = np.zeros((3 * self.nat, self.nat, 3), dtype = np.float64)\n \n for i in range( 3 * self.nat):\n x = i % 3\n n = i / 3\n tmp_wyck_gen[i, n, x] = 1\n \n # Symmetrize the vector\n qe_sym.SymmetrizeVector(tmp_wyck_gen[i, :, :])\n \n # Apply the gram-schmidt\n new_gen = tmp_wyck_gen.reshape((3 * self.nat, 3 * self.nat)).transpose()\n new_gen = scipy.linalg.orth(new_gen).transpose()\n \n # Get the number of wyckoff coefficients\n self.wyck_ncoeff = new_gen.shape()[0]\n \n # Reshape the array and get the coefficients\n self.wyck_gen = new_gen.reshape((self.wyck_ncoeff, self.nat, 3))\n \n r = np.arange(3 * self.nat)\n \n self.dyn_ncoeff = np.zeros(self.nq, dtype = int)\n self.dyn_gen = []\n \n # Cycle for each irreducible q point of the matrix\n for iq in range(self.nq):\n q = dyn.q_stars[iq][0]\n # Setup the symmetries for this q point\n qe_sym.SetupQPoint(q)\n \n gh = []\n \n for i in range(self.nat * 3):\n for j in range(i, self.nat * 3):\n # Take the generator\n fc = np.zeros((3 * self.nat, 3 * self.nat), dtype = np.complex128)\n fc[i, j] = 1\n \n # Apply the symmetry\n qe_sym.SymmetrizeDynQ(q, fc)\n \n # Check if the generator has already be generated\n is_new = True\n for k in range(i+1):\n mask = fc[k, :] != 0\n first_value = r[mask]\n if len(first_value):\n if k == i:\n if first_value[0] < j:\n is_new = False\n break\n else:\n is_new = False\n break\n \n # If the generator is new\n if is_new:\n qe_sym.ImposeSumRule(fc, \"simple\")\n \n # Check if the sum rule makes this generator desappearing\n if np.sum ((fc != 0).as_type(int)) != 0:\n gh.append(fc / np.sqrt(np.trace(fc.dot(fc))))\n \n dim = len(gh)\n \n # Prepare the gram-shmidt\n gh = np.array(gh, dtype = np.complex128)\n \n gh_new = np.reshape((dim, 9 * self.nat**2)).transpose()\n gh_new = scipy.linalg.orth(gh_new).transpose()\n \n self.dyn_ncoeff = np.shape(gh_new)[0]\n \n self.dyn_gen.append(np.reshape(gh_new, (self.dyn_ncoeff, 3*self.nat, 3*self.nat)))",
"def secularize(self):\n \n if not self.is_secular:\n \n self.secular_basis_op = self._get_current_basis_op()\n \n if self.as_operators:\n self._set_population_rates_from_operators()\n self._set_dephasing_rates_from_operators() \n else:\n self._set_population_rates_from_tensor()\n self._set_dephasing_rates_from_tensor()\n \n self.is_secular = True",
"def addApproxDiam(dics, verbose=True):\n # surface brightness relations for dwarf stars\n # from Kervella et al. 2004\n k04 = {}\n # coef0 coef1 error\n k04['BV']=[.9095, .4889, .0918]\n k04['BJ']=[.3029, .5216, .0307]\n k04['BH']=[.2630, .5134, .0189]\n k04['BK']=[.2538, .5158, .0100]\n k04['VJ']=[.3547, .5310, .0475]\n k04['VH']=[.2893, .5148, .0185]\n k04['VK']=[.2753, .5175, .0101]\n k04['JK']=[.5256, .5097, .0575]\n\n for k, d in enumerate(dics): # for each star\n diams = []\n errs = []\n for coul in k04.keys(): # for each color\n # check magnitudes are valid, compute diameter and error\n if d.has_key(coul[0]+'MAG') and d[coul[0]+'MAG']>-90 and\\\n d.has_key(coul[1]+'MAG') and d[coul[1]+'MAG']>-90:\n diams.append(diamSurfBri(d[coul[0]+'MAG'], d[coul[1]+'MAG'],\n k04[coul]))\n errs.append(k04[coul][2]*diams[-1])\n if len(diams)>1:\n # weighted average\\\n dics[k]['DIAM'] = reduce(lambda x,y: x+y, [diams[i]*errs[i]\n for i in range(len(diams))])/\\\n reduce(lambda x,y: x+y, errs)\n dics[k]['DIAM'] = round(dics[k]['DIAM'],\n int(-math.log10(dics[k]['DIAM']) +3))\n elif len(diams)==1:\n dics[k]['DIAM'] = round(diams[0], int(-math.log10(diams[0])+3))\n else:\n dics[k]['DIAM'] = 0 \n if verbose: \n print dics[k]['NAME'], '|', dics[k]['DIAM']\n return dics",
"def effective_costs(self, reporting_period=None):\n agreements = Agreement.objects.filter(task=self)\n if reporting_period:\n all_work_in_task = Work.objects.filter(task=self.id,\n reporting_period=reporting_period)\n else:\n all_work_in_task = Work.objects.filter(task=self.id)\n sum_costs = 0\n work_with_agreement = list()\n work_without_agreement = list()\n work_calculated = list()\n human_resource_list = dict()\n for work_object in all_work_in_task:\n if work_object.human_resource not in human_resource_list:\n human_resource_list[work_object.human_resource] = dict()\n for agreement in agreements:\n agreement_matches = agreement.match_with_work(work_object)\n if agreement_matches:\n if work_object not in work_with_agreement:\n work_with_agreement.append(work_object)\n if agreement not in human_resource_list.get(work_object.human_resource):\n human_resource_list.get(work_object.human_resource)[agreement] = list()\n human_resource_list.get(work_object.human_resource).get(agreement).append(work_object)\n if work_object not in work_with_agreement:\n work_without_agreement.append(work_object)\n\n for human_resource_dict in human_resource_list:\n agreement_list = Agreement.objects.filter(task=self, resource=human_resource_dict).order_by('costs__price')\n for agreement in agreement_list:\n agreement_remaining_amount = agreement.amount\n if human_resource_list[human_resource_dict].get(agreement):\n for work in human_resource_list[human_resource_dict].get(agreement):\n if work in work_with_agreement:\n if (agreement_remaining_amount - work.worked_hours) > 0:\n agreement_remaining_amount -= work.worked_hours\n getcontext().prec = 5\n sum_costs += Decimal(work.effort_hours())*agreement.costs.price\n work_calculated.append(work)\n for work in all_work_in_task:\n if work not in work_calculated:\n if work not in work_without_agreement:\n work_without_agreement.append(work)\n for work in work_without_agreement:\n default_resource_prices = ResourcePrice.objects.filter(resource=work.human_resource.id).order_by('price')\n if default_resource_prices:\n default_resource_price = default_resource_prices[0]\n getcontext().prec = 5\n sum_costs += Decimal(work.effort_hours())*default_resource_price.price\n else:\n sum_costs = 0\n break\n sum_costs = self.project.default_currency.round(sum_costs)\n return sum_costs",
"def _set_fixed_metabolites_in_rate(reaction, rate):\n to_strip = [\n str(metabolite) for metabolite in list(reaction.metabolites) if metabolite.fixed\n ]\n\n if reaction.model is not None and reaction.model.boundary_conditions:\n to_strip += [\n met\n for met, value in iteritems(reaction.model.boundary_conditions)\n if not isinstance(value, sym.Basic)\n ]\n if to_strip:\n to_sub = {\n _mk_met_func(met): sym.Symbol(met)\n for met in to_strip\n if _mk_met_func(met) in list(rate.atoms(sym.Function))\n }\n rate = rate.subs(to_sub)\n\n return rate"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SYMMETRIZE RAMAN TENSOR ============================ This subroutine applies the symmetries to the raman tensor As always, the raman_tensor will be modified by this subroutine.
|
def ApplySymmetryToRamanTensor(self, raman_tensor):
pol1, pol2, at_cart = np.shape(raman_tensor)
assert pol1 == pol2
assert pol2 == 3
assert at_cart == 3*self.QE_nat, "Error, the structure and effective charges are not compatible"
# Apply the permutation on the electric fields
raman_tensor += np.einsum("abc->bac", raman_tensor)
raman_tensor /= 2
# Apply the sum rule
# The sum over all the atom for each cartesian coordinate should be zero.
rt_reshaped = raman_tensor.reshape((3,3,self.QE_nat, 3))
# Sum over all the atomic indices
tot_sum = np.sum(rt_reshaped, axis = 2)
# Rebuild the shift to the tensor of the correct shape
shift = np.tile(tot_sum, (self.QE_nat, 1, 1, 1))
# Place the number of atoms at the correct position
# From the first to the third
shift = np.einsum("abcd->bcad", shift)
# Now we apply the sum rule
rt_reshaped -= shift / self.QE_nat
new_tensor = np.zeros(np.shape(rt_reshaped), dtype = np.double)
# Get the raman tensor in crystal components
for i in range(self.QE_nat):
rt_reshaped[:,:, i, :] = Methods.convert_3tensor_to_cryst(rt_reshaped[:,:, i, :], self.QE_at.T)
# Apply translations
if self.QE_translation_nr > 1:
for i in range(self.QE_translation_nr):
irt = self.QE_translations_irt[:, i] - 1
for j in range(self.QE_nat):
new_mat = rt_reshaped[:,:, irt[j], :]
new_tensor += new_mat
rt_reshaped = new_tensor / self.QE_translation_nr
new_tensor[:,:,:,:] = 0.
# Apply rotations
for i in range(self.QE_nsym):
irt = self.QE_irt[i, :] - 1
for j in range(self.QE_nat):
# Apply the symmetry to the 3 order tensor
new_mat = np.einsum("ai, bj, ck, ijk", self.QE_s[:,:,i], self.QE_s[:,:,i], self.QE_s[:,:,i], rt_reshaped[:,:, irt[j], :])
#new_mat = self.QE_s[:,:, i].dot( eff_charges[irt[j], :, :].dot(self.QE_s[:,:,i].T))
new_tensor[:,:,j,:] += new_mat
new_tensor /= self.QE_nsym
# Convert back into cartesian
for i in range(self.QE_nat):
rt_reshaped[:, :, i, :] = Methods.convert_3tensor_to_cryst(new_tensor[:,:,i,:], self.QE_at.T, True)
# Compress again the notation
raman_tensor[:,:,:] = rt_reshaped.reshape((3,3, 3*self.QE_nat))
|
[
"def to_symmetric(self,sym):\n # Throw error if tensor is not loaded\n if not self.in_mem: raise ValueError('GEN_TEN not in memory for operation to_symmetric')\n\n # Return a copy of self if already a symtensor\n if self.is_symmetric:\n return self.copy()\n\n # Convert the full dense (sparse in symtensor lang) to symmetric version\n else:\n # Create the new tensor\n newten = self.ten.copy()\n assert(len(sym[0]) == len(newten.shape))\n # Convert the shape\n newshape = []\n for i in range(len(newten.shape)):\n newshape.append(len(sym[1][i]))\n newshape.append(newten.shape[i]/len(sym[1][i]))\n newten = newten.reshape(newshape)\n # Do a transpose on the indices\n order = []\n for i in range(len(sym[1])):\n order.append(2*i)\n for i in range(len(sym[1])):\n order.append(2*i+1)\n newten = newten.transpose(order)\n # Create a random symtensor\n newsymten = rand(newten.shape[len(sym[1]):],\n sym=sym,\n backend=self.backend,\n dtype=self.dtype,\n legs=self.legs,\n in_mem=self.in_mem)\n # Contract with delta to get dense irrep\n delta = newsymten.ten.get_irrep_map()\n einstr = LETTERS[:len(sym[1])].upper() + \\\n LETTERS[:len(sym[1])] + ',' + \\\n LETTERS[:len(sym[1])].upper() + '->' + \\\n LETTERS[:len(sym[1])-1].upper() + \\\n LETTERS[:len(sym[1])]\n newten = newsymten.backend.einsum(einstr,newten,delta)\n # Put the result into a symtensor\n newsymten.ten.array = newten\n # Return result\n return newsymten",
"def preprocessing_symb(symbol):\n symbol = torch.Tensor(symbol)\n return symbol[None,None,:,:]",
"def test_symmetrization_parallel(self):\n before = np.array(self.dataset.diffraction_group[\"intensity\"])\n symmetrized = np.array(before, copy=True)\n for index, _ in enumerate(self.dataset.time_points):\n symmetrized[:, :, index] = nfold(\n before[:, :, index], mod=3, center=(63, 65)\n )\n\n self.dataset.symmetrize(mod=3, center=(63, 65), processes=2)\n after = np.array(self.dataset.diffraction_group[\"intensity\"])\n\n self.assertTrue(np.allclose(symmetrized, after))",
"def flip_signs(self):\n # Throw error if tensor is not loaded\n if not self.in_mem: raise ValueError('GEN_TEN not in memory for operation flip_signs')\n\n # Do the operation\n if self.sym is not None:\n self.sym[0] = ''.join(FLIP[i] for i in self.sym[0])\n self.ten.sym[0] = ''.join(FLIP[i] for i in self.ten.sym[0])",
"def apply_symmetrisation(self):\n\n # get the values to be symmetrised\n for sym_set in self.molecule.symm_hs.values():\n charges, sigmas, epsilons = [], [], []\n for atom_set in sym_set:\n for atom in atom_set:\n charges.append(float(self.non_bonded_force[atom - 1][0]))\n sigmas.append(float(self.non_bonded_force[atom - 1][1]))\n epsilons.append(float(self.non_bonded_force[atom - 1][2]))\n # calculate the average values to be used in symmetry\n charge, sigma, epsilon = sum(charges) / len(charges), sum(sigmas) / len(sigmas), sum(epsilons) / len(epsilons)\n\n # now loop through the atoms again and store the new values\n for atom in atom_set:\n self.non_bonded_force[atom - 1] = [str(charge), str(sigma), str(epsilon)]",
"def symmetrize(input, output, symmetries, full_group): # pylint: disable=redefined-builtin\n model = _read_input(input)\n click.echo(\"Reading symmetries from file '{}' ...\".format(symmetries))\n sym = sr.io.load(symmetries)\n model_sym = _symmetrize(sym, model, full_group) # pylint: disable=assignment-from-no-return\n _write_output(model_sym, output)",
"def tanhshrink(a: TensorLikeType) -> TensorLikeType:\n if not isinstance(a, TensorLike):\n raise RuntimeError(\n \"Expected a tensor input for an elementwise unary operation!\"\n )\n return a - torch.tanh(a)",
"def compare_raman_response_functions():\n simparams = prepare_sim_params(0.0, \n [0 ,0,0,0],\n 800e-9,\n 0.0,\n 1.0,\n 11, # Npoints\n 1.0, #tempspread \n )\n r_bw = raman_blowwood(simparams['tvec'])\n r_la = raman_linagrawal(simparams['tvec'])\n r_hc = raman_hollenbeck(simparams['tvec'])\n\n ft_bw = np.fft.fftshift(np.fft.ifft(np.fft.fftshift( r_bw)))\n ft_la = np.fft.fftshift(np.fft.ifft(np.fft.fftshift(r_la)))\n ft_hc = np.fft.fftshift(np.fft.ifft(np.fft.fftshift(r_hc)))\n\n plt.figure(1)\n plt.title(\"temporal response function R(t)\")\n plt.plot( simparams['tvec']/1e-12, r_bw, color=\"#ff0000\")\n plt.plot( simparams['tvec']/1e-12, r_la, color=\"#000000\")\n plt.plot( simparams['tvec']/1e-12, r_hc, color=\"#0000ff\")\n plt.xlabel(\"time / ps\")\n plt.ylabel(\"response / a.u.\")\n plt.legend([\"Blow, Wood\",\"Lin, Agrawal\",\"Hollenbeck, Cantrell\"])\n plt.xlim([-.2, np.max(simparams['tvec']/1e-12)])\n\n plt.figure(2)\n plt.subplot(211)\n plt.title(\"fourier transform R(omega) IMAG part\")\n plt.plot( simparams['relomvec']/2e12/np.pi, np.imag(ft_bw), color=\"#ff0000\")\n plt.plot( simparams['relomvec']/2e12/np.pi, np.imag(ft_la), color=\"#000000\")\n plt.plot( simparams['relomvec']/2e12/np.pi, np.imag(ft_hc), color=\"#0000ff\")\n plt.axhline(y=0, color=\"#999999\")\n plt.legend([\"Blow, Wood\",\"Lin, Agrawal\",\"Hollenbeck, Cantrell\"])\n plt.xlim([-5,45])\n plt.xlabel(\"frequency / THz\")\n plt.ylabel(\"imag part of R(omega)\")\n\n plt.subplot(212)\n plt.title(\"fourier transform R(omega) REAL part\")\n plt.plot( simparams['relomvec']/2e12/np.pi, np.real(ft_bw), color=\"#ff0000\")\n plt.plot( simparams['relomvec']/2e12/np.pi, np.real(ft_la), color=\"#000000\")\n plt.plot( simparams['relomvec']/2e12/np.pi, np.real(ft_hc), color=\"#0000ff\")\n plt.legend([\"Blow, Wood\",\"Lin, Agrawal\",\"Hollenbeck, Cantrell\"])\n plt.axhline(y=0, color=\"#999999\")\n plt.xlabel(\"frequency / THz\")\n plt.ylabel(\"real part of R(omega)\")\n plt.xlim([-5,45])\n \n plt.show()",
"def force_symmetry(matrix, symmetry):\n symmetric_matrix = matrix.copy()\n\n if symmetry is None:\n return symmetric_matrix\n\n for index, x in np.ndenumerate(matrix):\n\n if symmetry == 'upper':\n if index[0] > index[1]:\n symmetric_matrix[index] = matrix[tuple(reversed(index))]\n\n if symmetry == 'lower':\n if index[0] < index[1]:\n symmetric_matrix[index] = matrix[tuple(reversed(index))]\n\n if symmetry == 'mean':\n if index[0] != index[1]:\n symmetric_matrix[index] = np.mean((matrix[index], matrix[tuple(reversed(index))]))\n\n return symmetric_matrix",
"def buildRamanMuellerMatrix(ramanTensor: np.ndarray):\n\n # Check type of input\n if not isinstance(ramanTensor, np.ndarray):\n raise TypeError(\"utilities.buildRamanMuellerMatrix expects a numpy.ndarray as input!\")\n if ramanTensor.shape != (3,3):\n raise TypeError(\"utilities.buildRamanMuellerMatrix expects a 3x3 numpy.ndarray as input!\")\n\n # Extract elements from raman tensor\n xx = ramanTensor[0,0]\n xy = ramanTensor[0,1]\n yx = ramanTensor[1,0]\n yy = ramanTensor[1,1]\n\n # Build new matrix\n # The conversion is described in ramanMuellerMatrix.pdf\n # This conversion does only work for fully polarised light with no circular polarised component\n muellerMatrix = np.array([ [ (xx**2 + yx**2 + xy**2 + yy**2)/2 , (xx**2 + yx**2 - xy**2 - yy**2)/2 , xy*xx + yx*yy , 0 ],\n [ (xx**2 - yx**2 + xy**2 - yy**2)/2 , (xx**2 - yx**2 - xy**2 + yy**2)/2 , xy*xx - yx*yy , 0 ],\n [ xx*yx + xy*yy , xx*yx - xy*yy , xx*yy + xy*yx , 0 ],\n [ 0 , 0 , 0 , 0 ] ])\n\n return muellerMatrix",
"def apply( self, digram ):\n\t\t# ensure rule utility\n\t\t#log.debug( \" replacing digram at %s with reference to rule %s\" % (digram.debugstr(), self.debugstr()) )\n\t\tnewsymbol = digram.replace_digram( Ruleref( self ) )\n\t\treturn newsymbol",
"def getRawSymmetryMatrix(*args, **kwargs):\n \n pass",
"def write_in_qp(\n self, tensor: Tensor, format_: str, name_format=None, set_symms=True\n ):\n\n terms = tensor.subst_all(self.f_in_qp).simplify().local_terms\n\n # Internal book keeping, maps the cr/an order to lhs and the rhs terms\n # of the definition of the new matrix element.\n transf = {}\n\n rewritten_terms = []\n\n for term in terms:\n cr_order = 0\n an_order = 0\n indices = []\n for i in term.vecs:\n if len(i.indices) != 2:\n raise ValueError(\n 'Invalid operator to rewrite, one index expected', i\n )\n char, index = i.indices\n if char == CR:\n assert an_order == 0\n cr_order += 1\n elif char == AN:\n an_order += 1\n else:\n assert False\n\n indices.append(index)\n continue\n\n norm = factorial(cr_order) * factorial(an_order)\n order = (cr_order, an_order)\n tot_order = cr_order + an_order\n\n base = IndexedBase(format_.format(*order))\n if name_format is not None:\n base_name = name_format.format(*order)\n self.set_name(**{base_name: base})\n\n indices[cr_order:tot_order] = reversed(indices[cr_order:tot_order])\n if tot_order > 0:\n new_amp = base[tuple(indices)]\n else:\n new_amp = base.label\n orig_amp = term.amp\n\n new_sums = []\n wrapped_sums = []\n for i in term.sums:\n if new_amp.has(i[0]):\n new_sums.append(i)\n else:\n wrapped_sums.append(i)\n continue\n\n def_term = Term(\n sums=tuple(wrapped_sums), amp=orig_amp * norm, vecs=()\n )\n\n if order in transf:\n entry = transf[order]\n assert entry[0] == new_amp\n entry[1].append(def_term)\n else:\n transf[order] = (new_amp, [def_term])\n rewritten_terms.append(Term(\n sums=tuple(new_sums), amp=new_amp / norm, vecs=term.vecs\n ))\n if set_symms and (cr_order > 1 or an_order > 1):\n self.set_dbbar_base(base, cr_order, an_order)\n\n continue\n\n defs = [\n self.define(lhs, self.create_tensor(rhs_terms))\n for lhs, rhs_terms in transf.values()\n ]\n\n return self.create_tensor(rewritten_terms), defs",
"def ApplySymmetryToTensor3(self, v3, initialize_symmetries = True):\n if initialize_symmetries:\n self.SetupFromSPGLIB()\n\n # Apply the permutation symmetry\n symph.permute_v3(v3)\n\n # Apply the translational symmetries\n symph.trans_v3(v3, self.QE_translations_irt)\n\n # Apply all the symmetries at gamma\n symph.sym_v3(v3, self.QE_at, self.QE_s, self.QE_irt, self.QE_nsymq)",
"def getSymmetryMatrix(*args, **kwargs):\n \n pass",
"def list2sym(lst):\n ...",
"def SymmetrizeVector(self, vector):\n\n # Apply Translations if any\n self.ApplyTranslationsToVector(vector)\n \n # Prepare the real vector\n tmp_vector = np.zeros( (3, self.QE_nat), dtype = np.float64, order = \"F\")\n \n for i in range(self.QE_nat):\n tmp_vector[0, i] = vector[i,0]\n tmp_vector[1, i] = vector[i,1]\n tmp_vector[2,i] = vector[i,2]\n \n symph.symvector(self.QE_nsymq, self.QE_irt, self.QE_s, self.QE_at, self.QE_bg,\n tmp_vector, self.QE_nat)\n \n \n for i in range(self.QE_nat):\n vector[i, :] = tmp_vector[:,i]",
"def symmetry_rotation(self, bond_to_rotate, normal_direction, angles):\n\t\tpass",
"def __rmatmul__(self, a):\n if isinstance(a, tm):\n return tm(a.TM @ self.TM)\n else:\n if isinstance(a, np.ndarray):\n return tm(a @ self.TM)\n return tm(a * self.TAA)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SYMMETRIZE A RANK4 TENSOR ========================== This subroutines uses the current symmetries to symmetrize a rank4 tensor. This tensor must be in the supercell space. The v4 argument will be overwritten.
|
def ApplySymmetryToTensor4(self, v4, initialize_symmetries = True):
if initialize_symmetries:
self.SetupFromSPGLIB()
# Apply the permutation symmetry
symph.permute_v4(v4)
# Apply the translational symmetries
symph.trans_v4(v4, self.QE_translations_irt)
# Apply all the symmetries at gamma
symph.sym_v4(v4, self.QE_at, self.QE_s, self.QE_irt, self.QE_nsymq)
|
[
"def SymmetrizeVector(self, vector):\n\n # Apply Translations if any\n self.ApplyTranslationsToVector(vector)\n \n # Prepare the real vector\n tmp_vector = np.zeros( (3, self.QE_nat), dtype = np.float64, order = \"F\")\n \n for i in range(self.QE_nat):\n tmp_vector[0, i] = vector[i,0]\n tmp_vector[1, i] = vector[i,1]\n tmp_vector[2,i] = vector[i,2]\n \n symph.symvector(self.QE_nsymq, self.QE_irt, self.QE_s, self.QE_at, self.QE_bg,\n tmp_vector, self.QE_nat)\n \n \n for i in range(self.QE_nat):\n vector[i, :] = tmp_vector[:,i]",
"def symmetrize(dimTags, a, b, c, d):\n api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)\n ierr = c_int()\n lib.gmshModelGeoSymmetrize(\n api_dimTags_, api_dimTags_n_,\n c_double(a),\n c_double(b),\n c_double(c),\n c_double(d),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelGeoSymmetrize returned non-zero error code: \",\n ierr.value)",
"def to_symmetric(self,sym):\n # Throw error if tensor is not loaded\n if not self.in_mem: raise ValueError('GEN_TEN not in memory for operation to_symmetric')\n\n # Return a copy of self if already a symtensor\n if self.is_symmetric:\n return self.copy()\n\n # Convert the full dense (sparse in symtensor lang) to symmetric version\n else:\n # Create the new tensor\n newten = self.ten.copy()\n assert(len(sym[0]) == len(newten.shape))\n # Convert the shape\n newshape = []\n for i in range(len(newten.shape)):\n newshape.append(len(sym[1][i]))\n newshape.append(newten.shape[i]/len(sym[1][i]))\n newten = newten.reshape(newshape)\n # Do a transpose on the indices\n order = []\n for i in range(len(sym[1])):\n order.append(2*i)\n for i in range(len(sym[1])):\n order.append(2*i+1)\n newten = newten.transpose(order)\n # Create a random symtensor\n newsymten = rand(newten.shape[len(sym[1]):],\n sym=sym,\n backend=self.backend,\n dtype=self.dtype,\n legs=self.legs,\n in_mem=self.in_mem)\n # Contract with delta to get dense irrep\n delta = newsymten.ten.get_irrep_map()\n einstr = LETTERS[:len(sym[1])].upper() + \\\n LETTERS[:len(sym[1])] + ',' + \\\n LETTERS[:len(sym[1])].upper() + '->' + \\\n LETTERS[:len(sym[1])-1].upper() + \\\n LETTERS[:len(sym[1])]\n newten = newsymten.backend.einsum(einstr,newten,delta)\n # Put the result into a symtensor\n newsymten.ten.array = newten\n # Return result\n return newsymten",
"def permute_into_4(origin, table):\n rowNum = ((origin & 0b100000) >> 4) + (origin & 0b000001)\n colNum = (origin & 0b011110) >> 1\n return table[rowNum][colNum]",
"def symmetrize(dimTags, a, b, c, d):\n api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)\n ierr = c_int()\n lib.gmshModelOccSymmetrize(\n api_dimTags_, api_dimTags_n_,\n c_double(a),\n c_double(b),\n c_double(c),\n c_double(d),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelOccSymmetrize returned non-zero error code: \",\n ierr.value)",
"def _sym3x3(T):\n T[1,0], T[2,0], T[2,1] = T[0,1], T[0,2], T[1,2]",
"def symmetrize(input, output, symmetries, full_group): # pylint: disable=redefined-builtin\n model = _read_input(input)\n click.echo(\"Reading symmetries from file '{}' ...\".format(symmetries))\n sym = sr.io.load(symmetries)\n model_sym = _symmetrize(sym, model, full_group) # pylint: disable=assignment-from-no-return\n _write_output(model_sym, output)",
"def isometrize(self):\n for idx,w0 in enumerate(self.W[0]):\n temp=np.reshape(w0,[self.d**2,self.Dbond])\n dmin=min(temp.shape)\n Q,R=np.linalg.qr(temp)\n self.W[0][idx]=np.reshape(Q,[self.d,self.d,dmin])\n\n for i in range(1,self.Nlayer):\n for idx,wj in enumerate(self.W[i]):\n temp=np.reshape(wj,[self.Dbond*self.Dbond,wj.shape[2]])\n Q,R=np.linalg.qr(temp)\n self.W[i][idx]=np.reshape(Q,[self.Dbond,self.Dbond,wj.shape[2]])",
"def ApplySymmetryToRamanTensor(self, raman_tensor):\n \n pol1, pol2, at_cart = np.shape(raman_tensor)\n\n assert pol1 == pol2 \n assert pol2 == 3\n assert at_cart == 3*self.QE_nat, \"Error, the structure and effective charges are not compatible\"\n\n # Apply the permutation on the electric fields\n raman_tensor += np.einsum(\"abc->bac\", raman_tensor)\n raman_tensor /= 2\n\n # Apply the sum rule\n # The sum over all the atom for each cartesian coordinate should be zero.\n rt_reshaped = raman_tensor.reshape((3,3,self.QE_nat, 3))\n\n # Sum over all the atomic indices\n tot_sum = np.sum(rt_reshaped, axis = 2)\n\n # Rebuild the shift to the tensor of the correct shape\n shift = np.tile(tot_sum, (self.QE_nat, 1, 1, 1))\n\n # Place the number of atoms at the correct position\n # From the first to the third\n shift = np.einsum(\"abcd->bcad\", shift)\n \n # Now we apply the sum rule\n rt_reshaped -= shift / self.QE_nat\n new_tensor = np.zeros(np.shape(rt_reshaped), dtype = np.double)\n\n # Get the raman tensor in crystal components\n for i in range(self.QE_nat):\n rt_reshaped[:,:, i, :] = Methods.convert_3tensor_to_cryst(rt_reshaped[:,:, i, :], self.QE_at.T)\n\n # Apply translations\n if self.QE_translation_nr > 1:\n for i in range(self.QE_translation_nr):\n irt = self.QE_translations_irt[:, i] - 1\n for j in range(self.QE_nat):\n new_mat = rt_reshaped[:,:, irt[j], :]\n new_tensor += new_mat\n\n rt_reshaped = new_tensor / self.QE_translation_nr\n new_tensor[:,:,:,:] = 0.\n\n # Apply rotations\n for i in range(self.QE_nsym):\n irt = self.QE_irt[i, :] - 1\n\n for j in range(self.QE_nat):\n # Apply the symmetry to the 3 order tensor\n new_mat = np.einsum(\"ai, bj, ck, ijk\", self.QE_s[:,:,i], self.QE_s[:,:,i], self.QE_s[:,:,i], rt_reshaped[:,:, irt[j], :])\n #new_mat = self.QE_s[:,:, i].dot( eff_charges[irt[j], :, :].dot(self.QE_s[:,:,i].T))\n new_tensor[:,:,j,:] += new_mat\n\n new_tensor /= self.QE_nsym\n\n # Convert back into cartesian\n for i in range(self.QE_nat):\n rt_reshaped[:, :, i, :] = Methods.convert_3tensor_to_cryst(new_tensor[:,:,i,:], self.QE_at.T, True)\n\n # Compress again the notation\n raman_tensor[:,:,:] = rt_reshaped.reshape((3,3, 3*self.QE_nat))",
"def TransformSymmetricSecondRankTensor(self, *args) -> \"itkVariableLengthVectorD\":\n return _itkCompositeTransformPython.itkCompositeTransformD3_TransformSymmetricSecondRankTensor(self, *args)",
"def ApplySymmetryToVector(symmetry, vector, unit_cell, irt):\n \n # Get the vector in crystalline coordinate\n nat, dumb = np.shape(vector)\n work = np.zeros( (nat, 3))\n sym = symmetry[:, :3]\n\n v1 = Methods.covariant_coordinates(unit_cell, vector)\n w1 = sym.dot(v1.T).T\n\n # Return in cartesian coordinates\n work[irt[:], :] = w1.dot(unit_cell)# unit_cell.T.dot(w1) #np.einsum(\"ab,a\", unit_cell, w1)\n\n return work",
"def setUpTripartiteSystem(d):\n rho = setUpNQudits(3, d)\n POVM = setUpPOVMElements(d)\n tau = TensorProduct(rho[0], rho[1], rho[2])\n M = TensorProduct(POVM[0], POVM[1], sympy.eye(d,d))\n return (M * tau).trace()",
"def inverse4X4(matrix):\n # check the shape\n if matrix.shape !=(4,4) and matrix.shape !=(16,) :\n raise ValueError(\"Argument must Numeric array of shape (4,4) or (16,)\")\n return None\n if matrix.shape ==(16,):\n matrix=numpy.array(matrix,'f')\n matrix=numpy.reshape(matrix,(4,4)) # force the matrix to be (4,4)\n t_1=numpy.identity(4,'f')\n t_1[:2,3]= - matrix[:2, 3]\n r_1=numpy.identity(4,'f')\n r_1[:3,:3] = numpy.transpose(matrix[:3,:3])\n mat_inverse=numpy.dot(r_1, t_1)\n #asert numpy.dot(matrix, mat_inverse) is numpy.identity(4,'f')\n return mat_inverse",
"def test_symmetrization_parallel(self):\n before = np.array(self.dataset.diffraction_group[\"intensity\"])\n symmetrized = np.array(before, copy=True)\n for index, _ in enumerate(self.dataset.time_points):\n symmetrized[:, :, index] = nfold(\n before[:, :, index], mod=3, center=(63, 65)\n )\n\n self.dataset.symmetrize(mod=3, center=(63, 65), processes=2)\n after = np.array(self.dataset.diffraction_group[\"intensity\"])\n\n self.assertTrue(np.allclose(symmetrized, after))",
"def TransformSymmetricSecondRankTensor(self, *args) -> \"itkVariableLengthVectorD\":\n return _itkCompositeTransformPython.itkCompositeTransformD2_TransformSymmetricSecondRankTensor(self, *args)",
"def ApplySymmetryToTensor3(self, v3, initialize_symmetries = True):\n if initialize_symmetries:\n self.SetupFromSPGLIB()\n\n # Apply the permutation symmetry\n symph.permute_v3(v3)\n\n # Apply the translational symmetries\n symph.trans_v3(v3, self.QE_translations_irt)\n\n # Apply all the symmetries at gamma\n symph.sym_v3(v3, self.QE_at, self.QE_s, self.QE_irt, self.QE_nsymq)",
"def test_reshape_4d_to_2d(self):\n example_0_slice_0 = numpy.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],\n dtype=numpy.float32)\n example_0_slice_1 = numpy.array([[-1., -2., -3.], [-4., -5., -6.], [-7., -8., -9.]],\n dtype=numpy.float32)\n tuple_slices_0 = (\n numpy.expand_dims(example_0_slice_0, axis=2),\n numpy.expand_dims(example_0_slice_1, axis=2)\n )\n example_0 = numpy.expand_dims(numpy.concatenate(tuple_slices_0, axis=2),\n axis=0)\n example_1_slice_0 = numpy.array([[9., 8., 7.], [6., 5., 4.], [3., 2., 1.]],\n dtype=numpy.float32)\n example_1_slice_1 = numpy.array([[-9., -8., -7.], [-6., -5., -4.], [-3., -2., -1.]],\n dtype=numpy.float32)\n tuple_slices_1 = (\n numpy.expand_dims(example_1_slice_0, axis=2),\n numpy.expand_dims(example_1_slice_1, axis=2)\n )\n example_1 = numpy.expand_dims(numpy.concatenate(tuple_slices_1, axis=2),\n axis=0)\n tensor_4d = numpy.concatenate((example_0, example_1), axis=0)\n print('1st slice of the 4D tensor:')\n print(tensor_4d[:, :, :, 0])\n print('2nd slice of the 4D tensor:')\n print(tensor_4d[:, :, :, 1])\n node_tensor_4d = tf.placeholder(tf.float32, shape=(2, 3, 3, 2))\n node_tensor_2d = tfuls.reshape_4d_to_2d(node_tensor_4d)\n with tf.Session() as sess:\n tensor_2d = sess.run(node_tensor_2d, feed_dict={node_tensor_4d:tensor_4d})\n print('2D tensor:')\n print(tensor_2d)",
"def tune_vrmpy_auto_tensorize(mod, params, hexagon_launcher):\n sch_rules = [\n schedule_rule.ApplyCustomRule(),\n schedule_rule.AutoInline(\n into_producer=False,\n into_consumer=True,\n inline_const_tensor=True,\n disallow_if_then_else=True,\n require_injective=True,\n require_ordered=True,\n disallow_op=[\"tir.exp\"],\n ),\n # VRMPY_u8i8i32_INTRIN is used for conv2d. See topi/hexagon/conv2d_alter_op.py\n # for why we use different intrins for conv2d and dense.\n schedule_rule.MultiLevelTilingWithIntrin(\n VRMPY_u8i8i32_INTRIN,\n structure=\"SRSRS\",\n tile_binds=None,\n max_innermost_factor=64,\n vector_load_lens=None,\n reuse_read=None,\n reuse_write=schedule_rule.ReuseType(\n req=\"may\",\n levels=[1, 2],\n scope=\"global\",\n ),\n ),\n # VRMPY_u8u8i32_INTRIN is used for dense\n schedule_rule.MultiLevelTilingWithIntrin(\n VRMPY_u8u8i32_INTRIN,\n structure=\"SRSRS\",\n tile_binds=None,\n max_innermost_factor=64,\n vector_load_lens=None,\n reuse_read=None,\n reuse_write=schedule_rule.ReuseType(\n req=\"may\",\n levels=[1, 2],\n scope=\"global\",\n ),\n ),\n schedule_rule.ParallelizeVectorizeUnroll(\n max_jobs_per_core=16,\n max_vectorize_extent=128,\n unroll_max_steps=[0, 16, 64, 512],\n unroll_explicit=True,\n ),\n ]\n\n postprocs = [\n postproc.RewriteParallelVectorizeUnroll(),\n postproc.RewriteReductionBlock(),\n postproc.RewriteTensorize(vectorize_init_loop=True),\n ]\n\n # This line is necessary for link-params to take effect during\n # task extraction and relay.build(...).\n mod = mod.with_attr(\"executor\", EXECUTOR)\n\n num_cores = cpu_count(logical=False)\n\n with tempfile.TemporaryDirectory() as work_dir:\n database = ms.relay_integration.tune_relay(\n mod=mod,\n target=TARGET_HEXAGON,\n params=params,\n work_dir=work_dir,\n # for faster tuning\n max_trials_global=20000,\n max_trials_per_task=8,\n num_trials_per_iter=8,\n strategy=\"replay-trace\",\n # max_trials_global=20000,\n # num_trials_per_iter=32,\n # max_trials_per_task=128,\n # strategy=\"evolutionary\",\n builder=get_hexagon_local_builder(max_workers=num_cores),\n runner=get_hexagon_rpc_runner(hexagon_launcher, number=20, max_workers=num_cores),\n space=ms.space_generator.PostOrderApply(\n sch_rules=sch_rules,\n postprocs=postprocs,\n mutator_probs={},\n ),\n # This enables anchor-block tuning, where different subgraphs\n # with the same anchor block workload will be identified as equal.\n # It reduces the number of conv2d tuning tasks in the int8 resnet50 model\n # from 36 to 23, with negligible performance difference.\n module_equality=\"anchor-block\",\n num_tuning_cores=num_cores,\n )\n return ms.relay_integration.compile_relay(\n database=database,\n mod=mod,\n target=TARGET_HEXAGON,\n params=params,\n )",
"def remap_vertex(vertex, symmetry):\n assert vertex >= 0 and vertex < 361\n x = vertex % 19\n y = vertex // 19\n if symmetry >= 4:\n x, y = y, x\n symmetry -= 4\n if symmetry == 1 or symmetry == 3:\n x = 19 - x - 1\n if symmetry == 2 or symmetry == 3:\n y = 19 - y - 1\n return y * 19 + x"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
APPLY THE Q STAR SYMMETRY ========================= Given the fc matrix at each q in the star, it applies the symmetries in between them.
|
def ApplyQStar(self, fcq, q_point_group):
nq = np.shape(q_point_group)[0]
final_fc = np.zeros(np.shape(fcq), dtype = np.complex128)
# Setup all the symmetries
self.SetupQPoint()
new_dyn = np.zeros( (3 * self.QE_nat, 3*self.QE_nat), dtype = np.complex128, order = "F")
dyn_star = np.zeros( (nq, 3, 3, self.QE_nat, self.QE_nat), dtype = np.complex128, order = "F")
for i in range(nq):
# Get the q points order
nq_new, sxq, isq, imq = symph.star_q(q_point_group[i,:], self.QE_at, self.QE_bg,
self.QE_nsymq, self.QE_s, self.QE_invs, 0)
#print "Found nq:", nq_new
#print "IMQ?", imq
# Check if the q star is correct
if nq_new != nq and imq != 0:
print ("Reciprocal lattice vectors:")
print (self.QE_bg.transpose() )
print ("Passed q star:")
print (q_point_group)
print ("QE q star:")
print (sxq[:, :nq_new].transpose())
raise ValueError("Error, the passed q star does not match the one computed by QE")
#
# # Print the star
# print "q point:", q_point_group[i,:]
# print "Point in the stars:", nq_new
# print "Star of q:"
# print sxq[:, :nq_new].transpose()
#
# print "NEW_DYN:", np.shape(new_dyn)
# print "AT:", np.shape(self.QE_at)
# print "BG:", np.shape(self.QE_bg)
# print "N SYM:", self.QE_nsymq
# print "S:", np.shape(self.QE_s)
# print "QE_INVS:", np.shape(self.QE_invs)
# print "IRT:", np.shape(self.QE_irt)
# print "RTAU:", np.shape(self.QE_rtau)
# print "NQ_NEW:", nq_new
# print "SXQ:", np.shape(sxq)
# print "ISQ:", np.shape(isq)
# print "IMQ:", imq
# print "NAT:", self.QE_nat
new_dyn[:,:] = fcq[i,:,:]
#print "new dyn ready"
# Get the new matrix
dyn_star = symph.q2qstar_out(new_dyn, self.QE_at, self.QE_bg, self.QE_nsymq,
self.QE_s, self.QE_invs, self.QE_irt, self.QE_rtau,
nq_new, sxq, isq, imq, nq, self.QE_nat)
#print "Fake"
#print "XQ:", q_point_group[i, :], "NQ_NEW:", nq_new
# Now to perform the match bring the star in the same BZ as the q point
# This facilitate the comparison between q points
current_q = q_point_group.copy()
#print "Fake2"
# for xq in range(nq):
# tmp = Methods.put_into_cell(self.QE_bg, sxq[:, xq])
# sxq[:, xq] = tmp
# current_q[xq,:] = Methods.put_into_cell(self.QE_bg, current_q [xq,:])
#
# Print the order of the q star
sorting_q = np.arange(nq)
for xq in range(nq):
count = 0 # Debug (avoid no or more than one identification)
for yq in range(nq):
real_y = yq
dot_f = 1
if imq == 0 and yq >= nq_new:
real_y -= nq_new
dot_f = -1
if Methods.get_min_dist_into_cell(self.QE_bg.transpose(), dot_f* sxq[:, real_y], current_q[xq,:]) < __EPSILON__:
sorting_q[xq] = yq
count += 1
if count != 1:
print ("Original star:")
print (q_point_group)
print ("Reshaped star:")
print (current_q)
print ("Reciprocal lattice vectors:")
print (self.QE_bg.transpose() )
print ("STAR:")
print (sxq[:, :nq_new].transpose() )
pta = (current_q[xq,:])
print ("Distances of xq in the QE star:")
for yq in range(nq_new):
print ("%.4f %.4f %.4f => " % (sxq[0, yq], sxq[1, yq], sxq[2, yq]), Methods.get_min_dist_into_cell(self.QE_bg.transpose(), sxq[:, yq], current_q[xq,:]))
raise ValueError("Error, the vector (%.3f, %.3f, %.3f) has %d identification in the star" % (pta[0], pta[1], pta[2],
count))
#print "Sorting array:"
#print sorting_q
# Copy the matrix in the new one
for xq in range(nq):
for xat in range(self.QE_nat):
for yat in range(self.QE_nat):
final_fc[xq, 3*xat: 3*xat + 3, 3*yat : 3*yat + 3] += dyn_star[sorting_q[xq], :,:, xat, yat]
# Now divide the matrix per the xq value
final_fc /= nq
# Overwrite the matrix
fcq[:,:,:] = final_fc
|
[
"def SymmetrizeFCQ(self, fcq, q_stars, verbose = False, asr = \"simple\"):\n nqirr = len(q_stars)\n nq = np.sum([len(x) for x in q_stars])\n \n # Get the q_points vector\n q_points = np.zeros( (nq, 3), dtype = np.float64)\n sigma = 0\n for i in range(nqirr):\n for q_vec in q_stars[i]:\n q_points[sigma, :] = q_vec\n sigma += 1\n \n if nq != np.shape(fcq)[0]:\n raise ValueError(\"Error, the force constant number of q point %d does not match with the %d given q_points\" % (np.shape(fcq)[0], nq))\n \n \n for iq in range(nq):\n # Prepare the symmetrization\n if verbose:\n print (\"Symmetries in q = \", q_points[iq, :])\n t1 = time.time()\n self.SetupQPoint(q_points[iq,:], verbose)\n t2 = time.time()\n if verbose:\n print (\" [SYMMETRIZEFCQ] Time to setup the q point %d\" % iq, t2-t1, \"s\")\n \n # Proceed with the sum rule if we are at Gamma\n \n if asr == \"simple\" or asr == \"custom\":\n if np.sqrt(np.sum(q_points[iq,:]**2)) < __EPSILON__:\n if verbose:\n print (\"q_point:\", q_points[iq,:])\n print (\"Applying sum rule\")\n self.ImposeSumRule(fcq[iq,:,:], asr)\n elif asr == \"crystal\":\n self.ImposeSumRule(fcq[iq, :,:], asr = asr)\n elif asr == \"no\":\n pass\n else:\n raise ValueError(\"Error, only 'simple', 'crystal', 'custom' or 'no' asr are supported, given %s\" % asr)\n \n t1 = time.time()\n if verbose:\n print (\" [SYMMETRIZEFCQ] Time to apply the sum rule:\", t1-t2, \"s\")\n \n # # Symmetrize the matrix\n if verbose:\n old_fcq = fcq[iq, :,:].copy()\n w_old = np.linalg.eigvals(fcq[iq, :, :])\n print (\"FREQ BEFORE SYM:\", w_old )\n self.SymmetrizeDynQ(fcq[iq, :,:], q_points[iq,:])\n t2 = time.time()\n if verbose:\n print (\" [SYMMETRIZEFCQ] Time to symmetrize the %d dynamical matrix:\" % iq, t2 -t1, \"s\" )\n print (\" [SYMMETRIZEFCQ] Difference before the symmetrization:\", np.sqrt(np.sum(np.abs(old_fcq - fcq[iq, :,:])**2)))\n w_new = np.linalg.eigvals(fcq[iq, :, :])\n print (\"FREQ AFTER SYM:\", w_new)\n\n # For each star perform the symmetrization over that star\n q0_index = 0\n for i in range(nqirr):\n q_len = len(q_stars[i])\n t1 = time.time()\n if verbose:\n print (\"Applying the q star symmetrization on:\")\n print (np.array(q_stars[i]))\n self.ApplyQStar(fcq[q0_index : q0_index + q_len, :,:], np.array(q_stars[i]))\n t2 = time.time()\n if verbose:\n print (\" [SYMMETRIZEFCQ] Time to apply the star q_irr = %d:\" % i, t2 - t1, \"s\")\n q0_index += q_len",
"def InitFromSymmetries(self, symmetries, q_point = np.array([0,0,0])):\n \n nsym = len(symmetries)\n \n self.QE_nsymq = np.intc(nsym)\n self.QE_nsym = self.QE_nsymq\n \n \n for i, sym in enumerate(symmetries):\n self.QE_s[:,:, i] = np.transpose(sym[:, :3])\n \n # Get the atoms correspondence\n eq_atoms = GetIRT(self.structure, sym)\n \n self.QE_irt[i, :] = eq_atoms + 1\n \n # Get the inverse symmetry\n inv_sym = np.linalg.inv(sym[:, :3])\n for k, other_sym in enumerate(symmetries):\n if np.sum( (inv_sym - other_sym[:, :3])**2) < __EPSILON__:\n break\n \n self.QE_invs[i] = k + 1\n \n # Setup the position after the symmetry application\n for k in range(self.QE_nat):\n self.QE_rtau[:, i, k] = self.structure.coords[eq_atoms[k], :].astype(np.float64)\n \n \n # Get the reciprocal lattice vectors\n b_vectors = self.structure.get_reciprocal_vectors()\n \n # Get the minus_q operation\n self.QE_minusq = False\n\n # NOTE: HERE THERE COULD BE A BUG\n \n # q != -q\n # Get the q vectors in crystal coordinates\n q = Methods.covariant_coordinates(b_vectors, q_point)\n for k, sym in enumerate(self.QE_s):\n new_q = self.QE_s[:,:, k].dot(q)\n if np.sum( (Methods.put_into_cell(b_vectors, -q_point) - new_q)**2) < __EPSILON__:\n self.QE_minus_q = True\n self.QE_irotmq = k + 1\n break",
"def apply_symmetrisation(self):\n\n # get the values to be symmetrised\n for sym_set in self.molecule.symm_hs.values():\n charges, sigmas, epsilons = [], [], []\n for atom_set in sym_set:\n for atom in atom_set:\n charges.append(float(self.non_bonded_force[atom - 1][0]))\n sigmas.append(float(self.non_bonded_force[atom - 1][1]))\n epsilons.append(float(self.non_bonded_force[atom - 1][2]))\n # calculate the average values to be used in symmetry\n charge, sigma, epsilon = sum(charges) / len(charges), sum(sigmas) / len(sigmas), sum(epsilons) / len(epsilons)\n\n # now loop through the atoms again and store the new values\n for atom in atom_set:\n self.non_bonded_force[atom - 1] = [str(charge), str(sigma), str(epsilon)]",
"def apply_symplectic(self, S, qubits):\n # Approach 1: convert the 2m x 2m symplectic matrix S to a 2n x 2n\n # matrix that acts on the corresponding columns in qubits\n # M = decompose.symplectic_to_matrix(S, self.n, qubits)\n # self.state = (self.state @ M) % 2\n\n # Approach 2: decompose the 2m x 2m symplectic matrix into a\n # series of {C, H, P} gates, then apply those\n # NOTE: this is actually much faster in practice for large n\n m = len(qubits)\n gates = decompose.decompose_state(CHP_Simulation(m, S))\n gates = decompose.change_gates(gates, qubits)\n decompose.apply_gates(gates, self)",
"def SetupQStar(self, q_tot, supergroup = False):\n \n # Setup the symmetries\n #self.SetupQPoint()\n \n # Lets copy the q list (we are going to pop items from it)\n q_list = q_tot[:]\n q_stars = []\n \n count_qstar = 0\n count_q = 0\n q_indices = np.zeros( len(q_tot), dtype = int)\n while len(q_list) > 0:\n q = q_list[0]\n # Get the star of the current q point\n _q_ = np.array(q, dtype = np.float64) # Fortran explicit conversion\n \n nq_new, sxq, isq, imq = symph.star_q(_q_, self.QE_at, self.QE_bg, \n self.QE_nsym, self.QE_s, self.QE_invs, 0)\n \n # print (\"START WITH Q:\", q)\n # print (\"FOUND STAR:\")\n # for jq in range(nq_new):\n # print (sxq[:, jq])\n # print ()\n \n # print (\"TELL ME THE BG:\")\n # print (self.QE_bg.transpose())\n\n # print(\"Manual star:\")\n # for k in range(self.QE_nsym):\n # trial_q = q.dot(self.QE_s[:,:, k])\n # distance_q = Methods.get_min_dist_into_cell(self.QE_bg.T, trial_q, q)\n # distance_mq = Methods.get_min_dist_into_cell(self.QE_bg.T, trial_q, -q)\n # print(\"trial_q : {} | DQ: {:.4f} | DMQ: {:.4f}\".format(trial_q, distance_q, distance_mq ))\n \n # Prepare the star\n q_star = [sxq[:, k] for k in range(nq_new)]\n\n # If imq is not zero (we do not have -q in the star) then add the -q for each in the star\n if imq == 0:\n old_q_star = q_star[:]\n min_dist = 1\n \n for q in old_q_star:\n q_star.append(-q)\n\n \n\n q_stars.append(q_star)\n \n # Pop out the q_star from the q_list\n for jq, q_instar in enumerate(q_star):\n # Look for the q point in the star and pop them\n #print(\"q_instar:\", q_instar)\n q_dist = [Methods.get_min_dist_into_cell(self.QE_bg.transpose(), \n np.array(q_instar), q_point) for q_point in q_list]\n \n pop_index = np.argmin(q_dist) \n q_list.pop(pop_index)\n \n # Use the same trick to identify the q point\n q_dist = [Methods.get_min_dist_into_cell(self.QE_bg.transpose(), \n np.array(q_instar), q_point) for q_point in q_tot]\n \n q_index = np.argmin(q_dist)\n #print (q_indices, count_q, q_index)\n q_indices[count_q] = q_index\n \n count_q += 1\n \n \n return q_stars, q_indices",
"def SymmetrizeDynQ(self, dyn_matrix, q_point):\n \n # TODO: implement hermitianity to speedup the conversion\n \n #Prepare the array to be passed to the fortran code\n QE_dyn = np.zeros( (3, 3, self.QE_nat, self.QE_nat), dtype = np.complex128, order = \"F\")\n \n # Get the crystal coordinates for the matrix\n for na in range(self.QE_nat):\n for nb in range(self.QE_nat):\n fc = dyn_matrix[3 * na : 3* na + 3, 3*nb: 3 * nb + 3]\n QE_dyn[:, :, na, nb] = Methods.convert_matrix_cart_cryst(fc, self.structure.unit_cell, False)\n \n # Prepare the xq variable\n #xq = np.ones(3, dtype = np.float64)\n xq = np.array(q_point, dtype = np.float64)\n # print \"XQ:\", xq\n # print \"XQ_CRYST:\", Methods.covariant_coordinates(self.QE_bg.T, xq)\n # print \"NSYMQ:\", self.QE_nsymq, \"NSYM:\", self.QE_nsym\n # print \"QE SYM:\"\n # print np.einsum(\"abc->cba\", self.QE_s[:, :, :self.QE_nsymq])\n # print \"Other syms:\"\n # print np.einsum(\"abc->cba\", self.QE_s[:, :, self.QE_nsymq: self.QE_nsym])\n # print \"QE INVS:\"\n # print self.QE_invs[:self.QE_nsymq]\n # #print \"QE RTAU:\"\n # #print np.einsum(\"abc->bca\", self.QE_rtau[:, :self.QE_nsymq, :])\n # print \"IROTMQ:\", self.QE_irotmq\n # print \"MINUS Q:\", self.QE_minus_q\n # print \"IRT:\"\n # print self.QE_irt[:self.QE_nsymq, :]\n # print \"NAT:\", self.QE_nat\n\n # Inibhit minus q\n #self.QE_minus_q = 0\n \n \n # USE THE QE library to perform the symmetrization\n symph.symdynph_gq_new( xq, QE_dyn, self.QE_s, self.QE_invs, self.QE_rtau, \n self.QE_irt, self.QE_irotmq, self.QE_minus_q, self.QE_nsymq, self.QE_nat)\n \n # Return to cartesian coordinates\n for na in range(self.QE_nat):\n for nb in range(self.QE_nat):\n fc = QE_dyn[:, :, na, nb] \n dyn_matrix[3 * na : 3* na + 3, 3*nb: 3 * nb + 3] = Methods.convert_matrix_cart_cryst(fc, self.structure.unit_cell, True)",
"def write_in_qp(\n self, tensor: Tensor, format_: str, name_format=None, set_symms=True\n ):\n\n terms = tensor.subst_all(self.f_in_qp).simplify().local_terms\n\n # Internal book keeping, maps the cr/an order to lhs and the rhs terms\n # of the definition of the new matrix element.\n transf = {}\n\n rewritten_terms = []\n\n for term in terms:\n cr_order = 0\n an_order = 0\n indices = []\n for i in term.vecs:\n if len(i.indices) != 2:\n raise ValueError(\n 'Invalid operator to rewrite, one index expected', i\n )\n char, index = i.indices\n if char == CR:\n assert an_order == 0\n cr_order += 1\n elif char == AN:\n an_order += 1\n else:\n assert False\n\n indices.append(index)\n continue\n\n norm = factorial(cr_order) * factorial(an_order)\n order = (cr_order, an_order)\n tot_order = cr_order + an_order\n\n base = IndexedBase(format_.format(*order))\n if name_format is not None:\n base_name = name_format.format(*order)\n self.set_name(**{base_name: base})\n\n indices[cr_order:tot_order] = reversed(indices[cr_order:tot_order])\n if tot_order > 0:\n new_amp = base[tuple(indices)]\n else:\n new_amp = base.label\n orig_amp = term.amp\n\n new_sums = []\n wrapped_sums = []\n for i in term.sums:\n if new_amp.has(i[0]):\n new_sums.append(i)\n else:\n wrapped_sums.append(i)\n continue\n\n def_term = Term(\n sums=tuple(wrapped_sums), amp=orig_amp * norm, vecs=()\n )\n\n if order in transf:\n entry = transf[order]\n assert entry[0] == new_amp\n entry[1].append(def_term)\n else:\n transf[order] = (new_amp, [def_term])\n rewritten_terms.append(Term(\n sums=tuple(new_sums), amp=new_amp / norm, vecs=term.vecs\n ))\n if set_symms and (cr_order > 1 or an_order > 1):\n self.set_dbbar_base(base, cr_order, an_order)\n\n continue\n\n defs = [\n self.define(lhs, self.create_tensor(rhs_terms))\n for lhs, rhs_terms in transf.values()\n ]\n\n return self.create_tensor(rewritten_terms), defs",
"def calcDQ(gsEqCoords, gsFreqCoords, exEqCoords, exFreqCoords):\n\n # Make a list of lists [[x1_1, ...zn_1], ..., [x1_m, ..., zn_m]]\n gsListOfQCoords = [coords for (f, coords) in gsFreqCoords]\n exListOfQCoords = [coords for (f, coords) in exFreqCoords]\n\n # Convert lists into vectors\n gsEqCartCoords = np.array(gsEqCoords)\n exEqCartCoords = np.array(exEqCoords)\n\n #print \"gsEq\", gsEqCartCoords\n # print \"exEq\", exEqCartCoords\n\n gsQCoords = np.array(gsListOfQCoords)\n exQCoords = np.array(exListOfQCoords)\n\n # Change in equilibirum cartesian coordinates \n changeCartCoords = np.subtract(gsEqCartCoords, exEqCartCoords)\n #print \"exQCoords\", exQCoords\n #print \"change\", changeCartCoords\n\n # Convert Change to Q-coordinates\n #dQArray = np.dot(exQCoords, changeCartCoords)\n dQArray = np.dot(gsQCoords, changeCartCoords)\n dQList = list(dQArray)\n\n # Get frequencies\n gsFreqs = [f for (f,coords) in gsFreqCoords]\n exFreqs = [f for (f,coords) in exFreqCoords]\n\n # Return a list of tuples. Each tuple gives the ground state frequency,\n # the excited state frequency, and the dQ for one mode.\n return zip(gsFreqs,exFreqs, dQList)",
"def chisq(self, star, logger=None):\n\n # Start by getting all interpolation coefficients for all observed points\n data, weight, u, v = star.data.getDataVector()\n if not star.data.values_are_sb:\n # If the images are flux instead of surface brightness, convert\n # them into SB\n star_pix_area = star.data.pixel_area\n data /= star_pix_area\n weight *= star_pix_area*star_pix_area\n\n # Subtract star.fit.center from u, v:\n u -= star.fit.center[0]\n v -= star.fit.center[1]\n\n if self._force_model_center:\n coeffs, dcdu, dcdv, psfx, psfy = self.interp.derivatives(u/self.du, v/self.du)\n dcdu /= self.du\n dcdv /= self.du\n else:\n coeffs, psfx, psfy = self.interp(u/self.du, v/self.du)\n\n # Turn the (psfy,psfx) coordinates into an index into 1d parameter vector.\n index1d = self._indexFromPsfxy(psfx, psfy)\n # All invalid pixel references now have negative index; record and set to zero\n nopsf = index1d < 0\n index1d = np.where(nopsf, 0, index1d)\n # And null the coefficients for such pixels\n coeffs = np.where(nopsf, 0., coeffs)\n if self._force_model_center:\n dcdu = np.where(nopsf, 0., dcdu)\n dcdv = np.where(nopsf, 0., dcdv)\n\n # Multiply kernel (and derivs) by current PSF element values\n # to get current estimates\n pvals = self._fullPsf1d(star)[index1d]\n mod = np.sum(coeffs*pvals, axis=1)\n if self._force_model_center:\n dmdu = star.fit.flux * np.sum(dcdu*pvals, axis=1)\n dmdv = star.fit.flux * np.sum(dcdv*pvals, axis=1)\n resid = data - mod*star.fit.flux\n\n # Now begin construction of alpha/beta/chisq that give\n # chisq vs linearized model.\n rw = resid * weight\n chisq = np.sum(resid * rw)\n\n # To begin with, we build alpha and beta over all PSF points\n # within mask, *and* the flux (and center) shifts. Then\n # will eliminate the constrained PSF points, and then\n # marginalize over the flux (and center).\n\n # Augment the coeffs and index1d vectors with extra column(s)\n # for the shift in flux (and center), so it will be\n # the derivative of model w.r.t. augmented parameter set\n derivs = np.zeros( (coeffs.shape[0], coeffs.shape[1]+self._constraints),\n dtype=float)\n indices = np.zeros( (index1d.shape[0], index1d.shape[1]+self._constraints),\n dtype=int)\n derivs[:, :coeffs.shape[1]] = star.fit.flux * coeffs #derivs wrt PSF elements\n indices[:,:index1d.shape[1]] = index1d\n\n # Add derivs wrt flux\n derivs[:,coeffs.shape[1]] = mod\n dflux_index = self._nparams + self._constraints\n indices[:,coeffs.shape[1]] = dflux_index\n if self._force_model_center:\n # Derivs w.r.t. center shift:\n derivs[:,coeffs.shape[1]+1] = dmdu\n derivs[:,coeffs.shape[1]+2] = dmdv\n indices[:,coeffs.shape[1]+1] = dflux_index+1\n indices[:,coeffs.shape[1]+2] = dflux_index+2\n\n # Accumulate alpha and beta point by point. I don't\n # know how to do it purely with numpy calls instead of a loop over data points\n nderivs = self._nparams + 2*self._constraints\n beta = np.zeros(nderivs, dtype=float)\n alpha = np.zeros( (nderivs,nderivs), dtype=float)\n for i in range(len(data)):\n ii = indices[i,:]\n cc = derivs[i,:]\n # beta_j += resid_i * weight_i * coeff_{ij}\n beta[ii] += rw[i] * cc\n # alpha_jk += weight_i * coeff_ij * coeff_ik\n dalpha = cc[np.newaxis,:]*cc[:,np.newaxis] * weight[i]\n iouter = np.broadcast_to(ii, (len(ii),len(ii)))\n alpha[iouter.flatten(), iouter.T.flatten()] += dalpha.flatten()\n\n # Next we eliminate the first _constraints PSF values from the parameters\n # using the linear constraints that dp0 = - _a * dp1\n s0 = slice(None, self._constraints) # parameters to eliminate\n s1 = slice(self._constraints, None) # parameters to keep\n beta = beta[s1] - np.dot(beta[s0], self._a).T\n alpha = alpha[s1,s1] \\\n - np.dot( self._a.T, alpha[s0,s1]) \\\n - np.dot( alpha[s1,s0], self._a) \\\n + np.dot( self._a.T, np.dot(alpha[s0,s0],self._a))\n\n # Now we marginalize over the flux (and center). These shifts are at\n # the back end of the parameter array.\n # But first we need to apply a prior to the shift of flux (and center)\n # to avoid numerical instabilities when these are degenerate because of\n # missing pixel data or otherwise unspecified PSF\n # ??? make these properties of the Model???\n fractional_flux_prior = 0.5 # prior of 50% on pre-existing flux ???\n center_shift_prior = 0.5*self.du #prior of 0.5 uv-plane pixels ???\n alpha[self._nparams, self._nparams] += (fractional_flux_prior*star.fit.flux)**(-2.)\n if self._force_model_center:\n alpha[self._nparams+1, self._nparams+1] += (center_shift_prior)**(-2.)\n alpha[self._nparams+2, self._nparams+2] += (center_shift_prior)**(-2.)\n\n s0 = slice(None, self._nparams) # parameters to keep\n s1 = slice(self._nparams, None) # parameters to marginalize\n a11inv = np.linalg.inv(alpha[s1,s1])\n # Calculate shift in flux - ??? Note that this is the solution for shift\n # when PSF parameters do *not* move; so if we subsequently update\n # the PSF params, we miss shifts due to covariances between flux and PSF.\n\n df = np.dot(a11inv, beta[s1])\n outflux = star.fit.flux + df[0]\n if self._force_model_center:\n outcenter = (star.fit.center[0] + df[1],\n star.fit.center[1] + df[2])\n else:\n outcenter = star.fit.center\n\n # Now get the final alpha, beta, chisq for the remaining PSF params\n outchisq = chisq - np.dot(beta[s1].T,np.dot(a11inv, beta[s1]))\n tmp = np.dot(a11inv, alpha[s1,s0])\n outbeta = beta[s0] - np.dot(beta[s1].T,tmp)\n outalpha = alpha[s0,s0] - np.dot(alpha[s0,s1],tmp)\n\n outfit = StarFit(star.fit.params,\n flux = outflux,\n center = outcenter,\n chisq = outchisq,\n dof = np.count_nonzero(weight) - self._nparams,\n alpha = outalpha,\n beta = outbeta)\n\n return Star(star.data, outfit)",
"def apply_volume_symmetry(self, use_inline_c=True):\n t1 = time.time()\n\n #Get the # of pixels and the order from the symmetry map\n symm = self.volume_symmetry\n (numpix, order) = symm.shape\n\n if use_inline_c and not config.cfg.force_pure_python:\n #------ C version (about 400x faster than python) -------\n #Put some variables in the workspace\n old_q = self.qspace.flatten() * 1.0\n qspace_flat = old_q * 0.0\n\n support = \"\"\n code = \"\"\"\n int pix, ord, index;\n for (pix=0; pix<numpix; pix++)\n {\n //Go through each pixel\n for (ord=0; ord<order; ord++)\n {\n //Now go through each equivalent q.\n index = SYMM2(pix, ord);\n if (index >= 0)\n {\n //Valid index.\n QSPACE_FLAT1(pix) += OLD_Q1(index);\n //printf(\"%d\\\\n\", index);\n }\n }\n }\n \"\"\"\n varlist = ['old_q', 'qspace_flat', 'numpix', 'order', 'symm']\n weave.inline(code, varlist, compiler='gcc', support_code=support)\n #Reshape it back as a 3D array.\n n = len(self.inst.qx_list)\n self.qspace = qspace_flat.reshape( (n,n,n) )\n else:\n #---- Pure python version ----\n\n #Clear the starting space\n old_q = self.qspace\n new_q = self.qspace * 0\n for pix in xrange(numpix):\n for ord in xrange(order):\n eq_index = symm[pix, ord]\n if eq_index >= 0:\n #Add up to this pixel, the equivalent one.\n #The list includes this given voxel too.\n new_q.flat[pix] += old_q.flat[eq_index]\n self.qspace = new_q\n\n #Done.\n if self.verbose: print \"Volume symmetry computed in %.3f sec.\" % (time.time()-t1)",
"def ApplySymmetryToEffCharge(self, eff_charges):\n \n nat, cart1, cart2 = np.shape(eff_charges)\n\n assert cart1 == cart2 \n assert cart1 == 3\n assert nat == self.QE_nat, \"Error, the structure and effective charges are not compatible\"\n\n\n # Apply the sum rule\n tot_sum = np.sum(eff_charges, axis = 0)\n eff_charges -= np.tile(tot_sum, (nat, 1)).reshape((nat, 3,3 )) / nat\n\n new_eff_charges = np.zeros((nat, cart1, cart2), dtype = np.double)\n\n # Get the effective charges in crystal components\n for i in range(nat):\n eff_charges[i, :, :] = Methods.convert_matrix_cart_cryst(eff_charges[i, :, :], self.QE_at.T)\n\n # Apply translations\n if self.QE_translation_nr > 1:\n for i in range(self.QE_translation_nr):\n irt = self.QE_translations_irt[:, i] - 1\n for j in range(nat):\n new_mat = eff_charges[irt[j], :, :]\n new_eff_charges[j, :, :] += new_mat\n\n eff_charges[:,:,:] = new_eff_charges / self.QE_translation_nr\n new_eff_charges[:,:,:] = 0.\n\n # Apply rotations\n for i in range(self.QE_nsym):\n irt = self.QE_irt[i, :] - 1\n\n for j in range(nat):\n new_mat = self.QE_s[:,:, i].dot( eff_charges[irt[j], :, :].dot(self.QE_s[:,:,i].T))\n new_eff_charges[j, :, :] += new_mat\n new_eff_charges /= self.QE_nsym\n\n # Convert back into cartesian\n for i in range(nat):\n eff_charges[i, :, :] = Methods.convert_matrix_cart_cryst(new_eff_charges[i, :, :], self.QE_at.T, True)",
"def forward(q,T):\n qcounter = 0\n Tcounter = 0\n s1 = np.sin(q[qcounter])\n c1 = np.cos(q[qcounter])\n qcounter += 1\n\n q234 = q[qcounter]\n s2= np.sin(q[qcounter])\n c2= np.cos(q[qcounter])\n qcounter +=1\n\n s3 = np.sin(q[qcounter])\n c3= np.cos(q[qcounter])\n q234 += q[qcounter]\n qcounter +=1\n\n q234 += q[qcounter]\n qcounter +=1\n\n s5 = np.sin(q[qcounter])\n c5= np.cos(q[qcounter])\n qcounter +=1\n\n s6= np.sin(q[qcounter])\n c6= np.cos(q[qcounter])\n s234= np.sin(q234)\n c234=np.cos(q234)\n\n q= ((c1*c234-s1*s234)*s5)/2.0 - c5*s1 + ((c1*c234+s1*s234)*s5)/2\n\n T[Tcounter] = (c6*(s1*s5 + ((c1*c234-s1*s234)*c5)/2.0 + ((c1*c234+s1*s234)*c5)/2.0) -\\\n (s6*((s1*c234+c1*s234) - (s1*c234-c1*s234)))/2.0)\n Tcounter+=1 #//nx\n\n T[Tcounter] = (-(c6*((s1*c234+c1*s234) - (s1*c234-c1*s234)))/2.0 -\\\n s6*(s1*s5 + ((c1*c234-s1*s234)*c5)/2.0 + ((c1*c234+s1*s234)*c5)/2.0))\n Tcounter+=1 #//ox\n\n\n T[Tcounter]= c5*s1 - ((c1*c234-s1*s234)*s5)/2.0 -((c1*c234-s1*s234)*s5)/2.0\n Tcounter+=1 #//ax\n\n T[Tcounter] = -(d5*(s1*c234-c1*s234))/2.0 + (d5*(s1*c234+c1*s234))/2.0 +\\\n d4*s1 - (d6*(c1*c234-s1*s234)*s5)/2.0 - (d6*(c1*c234+s1*s234)*s5)/2.0 +\\\n a2*c1*c2 + d6*c5*s1 + a3*c1*c2*c3 - a3*c1*s2*s3\n Tcounter+=1 # //px\n\n T[Tcounter] = (c6*(((s1*c234+c1*s234)*c5)/2.0 - c1*s5 + ((s1*c234-c1*s234)*c5)/2.0) +\\\n s6*((c1*c234-s1*s234)/2.0 - (c1*c234+s1*s234)/2.0))\n Tcounter+=1 #//ny\n\n T[Tcounter] = (c6*((c1*c234-s1*s234)/2.0 - (c1*c234+s1*s234)/2.0) -\\\n s6*(((s1*c234+c1*s234)*c5)/2.0 - c1*s5 + ((s1*c234-c1*s234)*c5)/2.0))\n Tcounter+=1# //oy\n\n T[Tcounter] = -c1*c5 -((s1*c234+c1*s234)*s5)/2.0 + ((c1*s234-s1*c234)*s5)/2.0\n Tcounter+=1 #//ay\n\n T[Tcounter] = -(d5*(c1*c234-s1*s234))/2.0 + (d5*(c1*c234+s1*s234))/2.0 - d4*c1 -\\\n (d6*(s1*c234+c1*s234)*s5)/2.0 - (d6*(s1*c234-c1*s234)*s5)/2.0 - d6*c1*c5 +\\\n a2*c2*s1 + a3*c2*c3*s1 - a3*s1*s2*s3\n Tcounter+=1 # //py\n\n\n T[Tcounter] = ((s234*c6+c234*s6)/2.0 + s234*c5*c6-(s234*c6-c234*s6)/2.0)\n Tcounter+=1 #//nz\n\n T[Tcounter] = ((c234*c6+s234*s6)/2.0 + (c234*c6-s234*s6)/2.0 - s234*c5*s6 )\n Tcounter+=1 # oz\n\n T[Tcounter] = ((c234*c5-s234*s5)/2.0 - (c234*c5+s234*s5)/2.0)\n Tcounter+=1 # //az\n\n T[Tcounter] = (d1 + (d6*(c234*c5-s234*s5))/2.0 + a3*(s2*c3+c2*s3) + a2*s2 -\\\n (d6*(c234*c5+s234*s5))/2.0 - d5*c234)\n Tcounter+=1; #//pz\n T[Tcounter] = 0.0\n Tcounter+=1\n T[Tcounter] = 0.0\n Tcounter+=1\n T[Tcounter] = 0.0\n Tcounter+=1\n T[Tcounter] = 1.0",
"def _compute_Q_vector(self):\n\n self.QVector = list(it.product([fsc.Q for fsc in self.fscs]))",
"def __init__(self, structure, threshold = 1e-5):\n \n if not structure.has_unit_cell:\n raise ValueError(\"Error, symmetry operation can be initialize only if the structure has a unit cell\")\n \n self.structure = structure\n self.threshold = np.float64(threshold)\n \n # Setup the threshold \n symph.symm_base.set_accep_threshold(self.threshold)\n \n nat = structure.N_atoms\n \n # Define the quantum espresso symmetry variables in optimized way to work with Fortran90\n self.QE_nat = np.intc( nat )\n self.QE_s = np.zeros( (3, 3, 48) , dtype = np.intc, order = \"F\")\n self.QE_irt = np.zeros( (48, nat), dtype = np.intc, order = \"F\")\n self.QE_invs = np.zeros( (48), dtype = np.intc, order = \"F\")\n self.QE_rtau = np.zeros( (3, 48, nat), dtype = np.float64, order = \"F\")\n self.QE_ft = np.zeros( (3, 48), dtype = np.float64, order = \"F\")\n \n \n self.QE_minus_q = False\n self.QE_irotmq = np.intc(0)\n self.QE_nsymq = np.intc( 0 )\n self.QE_nsym = np.intc(0)\n \n # Prepare the QE structure\n self.QE_tau = np.zeros((3, nat), dtype = np.float64, order = \"F\")\n self.QE_ityp = np.zeros(nat, dtype = np.intc)\n \n symbs = {}\n counter = 1\n for i in range(nat):\n # Rank the atom number\n atm = structure.atoms[i]\n if not atm in symbs.keys():\n symbs[atm] = counter\n counter += 1\n \n self.QE_ityp[i] = symbs[atm]\n # Convert in bohr\n for j in range(3):\n self.QE_tau[j, i] = structure.coords[i, j]\n \n \n self.QE_at = np.zeros( (3,3), dtype = np.float64, order = \"F\")\n self.QE_bg = np.zeros( (3,3), dtype = np.float64, order = \"F\")\n \n bg = structure.get_reciprocal_vectors()\n for i in range(3):\n for j in range(3):\n self.QE_at[i,j] = structure.unit_cell[j,i]\n self.QE_bg[i,j] = bg[j,i] / (2* np.pi) \n\n # Here we define the quantities required to symmetrize the supercells\n self.QE_at_sc = self.QE_at.copy()\n self.QE_bg_sc = self.QE_bg.copy()\n self.QE_translation_nr = 1 # The supercell total dimension (Nx * Ny * Nz)\n self.QE_translations = [] # The translations in crystal axes\n\n # After the translation, which vector is transformed in which one?\n # This info is stored here as ndarray( size = (N_atoms, N_trans), dtype = np.intc, order = \"F\")\n self.QE_translations_irt = []",
"def Generate(self, dyn, qe_sym = None):\n \n # Check if the symmetries must be initialize\n if qe_sym is None:\n qe_sym = CC.symmetries.QE_Symmetry(dyn.structure)\n \n \n # Get the number of irreducible q points from the matrix\n self.nq = dyn.nqirr\n self.nat = dyn.structure.N_atoms\n \n # Initialize the symmetries at q = 0\n qe_sym.SetupQPoint()\n \n # Prepare the wyckoff basis\n tmp_wyck_gen = np.zeros((3 * self.nat, self.nat, 3), dtype = np.float64)\n \n for i in range( 3 * self.nat):\n x = i % 3\n n = i / 3\n tmp_wyck_gen[i, n, x] = 1\n \n # Symmetrize the vector\n qe_sym.SymmetrizeVector(tmp_wyck_gen[i, :, :])\n \n # Apply the gram-schmidt\n new_gen = tmp_wyck_gen.reshape((3 * self.nat, 3 * self.nat)).transpose()\n new_gen = scipy.linalg.orth(new_gen).transpose()\n \n # Get the number of wyckoff coefficients\n self.wyck_ncoeff = new_gen.shape()[0]\n \n # Reshape the array and get the coefficients\n self.wyck_gen = new_gen.reshape((self.wyck_ncoeff, self.nat, 3))\n \n r = np.arange(3 * self.nat)\n \n self.dyn_ncoeff = np.zeros(self.nq, dtype = int)\n self.dyn_gen = []\n \n # Cycle for each irreducible q point of the matrix\n for iq in range(self.nq):\n q = dyn.q_stars[iq][0]\n # Setup the symmetries for this q point\n qe_sym.SetupQPoint(q)\n \n gh = []\n \n for i in range(self.nat * 3):\n for j in range(i, self.nat * 3):\n # Take the generator\n fc = np.zeros((3 * self.nat, 3 * self.nat), dtype = np.complex128)\n fc[i, j] = 1\n \n # Apply the symmetry\n qe_sym.SymmetrizeDynQ(q, fc)\n \n # Check if the generator has already be generated\n is_new = True\n for k in range(i+1):\n mask = fc[k, :] != 0\n first_value = r[mask]\n if len(first_value):\n if k == i:\n if first_value[0] < j:\n is_new = False\n break\n else:\n is_new = False\n break\n \n # If the generator is new\n if is_new:\n qe_sym.ImposeSumRule(fc, \"simple\")\n \n # Check if the sum rule makes this generator desappearing\n if np.sum ((fc != 0).as_type(int)) != 0:\n gh.append(fc / np.sqrt(np.trace(fc.dot(fc))))\n \n dim = len(gh)\n \n # Prepare the gram-shmidt\n gh = np.array(gh, dtype = np.complex128)\n \n gh_new = np.reshape((dim, 9 * self.nat**2)).transpose()\n gh_new = scipy.linalg.orth(gh_new).transpose()\n \n self.dyn_ncoeff = np.shape(gh_new)[0]\n \n self.dyn_gen.append(np.reshape(gh_new, (self.dyn_ncoeff, 3*self.nat, 3*self.nat)))",
"def GetQStar(self, q_vector):\n self.SetupQPoint()\n nq_new, sxq, isq, imq = symph.star_q(q_vector, self.QE_at, self.QE_bg,\n self.QE_nsymq, self.QE_s, self.QE_invs, 0)\n \n #print (\"STAR IMQ:\", imq)\n if imq != 0:\n total_star = np.zeros( (nq_new, 3), dtype = np.float64)\n else:\n total_star = np.zeros( (2*nq_new, 3), dtype = np.float64)\n\n total_star[:nq_new, :] = sxq[:, :nq_new].transpose()\n\n if imq == 0:\n total_star[nq_new:, :] = -sxq[:, :nq_new].transpose()\n\n return total_star",
"def _setup_Q(self):\n self.Q_s = [None for _ in range(self.p+1)]\n self.Q_s[self.p] = np.eye(self.layers[self.p-1])\n for i in range(self.p-1, -1, -1):\n self.Q_s[i] = np.dot(self.U_s[i], self.Q_s[i+1])",
"def fit(self, star):\n star1 = self.chisq(star) # Get chisq Taylor expansion for linearized model\n ### Check for non-pos-def\n ###S = np.linalg.svd(star1.fit.alpha,compute_uv=False)\n ###print(\" .in fit(), min SV:\",np.min(S))###\n ###U,S,Vt = np.linalg.svd(star1.fit.alpha,compute_uv=True)\n ###print(\" ..in fit(), min SV:\",np.min(S))###\n\n # star1 has marginalized over flux (& center, if free), and updated these\n # for best linearized fit at the input parameter values.\n if self._degenerate:\n # Do SVD and retain\n # input values for degenerate parameter combinations\n # U,S,Vt = np.linalg.svd(star1.fit.alpha)\n S,U = np.linalg.eigh(star1.fit.alpha)\n # Invert, while zeroing small elements of S.\n # \"Small\" will be taken to be causing a small chisq change\n # when corresponding PSF component changes by the full flux of PSF\n small = 0.2 * self.pixel_area * self.pixel_area\n if np.any(S < -small):\n print(\"negative: \",np.min(S),\"small:\",small)###\n raise ValueError(\"Negative singular value in alpha matrix\")\n # Leave values that are close to zero equal to zero in inverse.\n nonzero = np.abs(S) > small\n invs = np.zeros_like(S)\n invs[nonzero] = 1./S[nonzero]\n\n ###print('S/zero:',S.shape,np.count_nonzero(np.abs(S)<=small),'small=',small) ###\n ###print(' ',np.max(S[np.abs(S)<=small]),np.min(S[np.abs(S)>small])) ##\n # answer = V * S^{-1} * U^T * beta\n # dparam = np.dot(Vt.T, invs * np.dot(U.T,star1.fit.beta))\n dparam = np.dot(U, invs * np.dot(U.T,star1.fit.beta))\n else:\n # If it is known there are no degeneracies, we can skip SVD\n dparam = np.linalg.solve(star1.fit.alpha, star1.fit.beta)\n # ??? dparam = scipy.linalg.solve(alpha, beta, sym_pos=True) would be faster\n # Create new StarFit, update the chisq value. Note no beta is returned as\n # the quadratic Taylor expansion was about the old parameters, not these.\n starfit2 = StarFit(star1.fit.params + dparam,\n flux = star1.fit.flux,\n center = star1.fit.center,\n alpha = star1.fit.alpha, # Inverse covariance matrix\n chisq = star1.fit.chisq \\\n + np.dot(dparam, np.dot(star1.fit.alpha, dparam)) \\\n - 2 * np.dot(star1.fit.beta, dparam))\n return Star(star1.data, starfit2)",
"def test_sym_m_product():\n amat = np.array([[1, 2, 3], [3, 4, 6]], float, order='F')\n out1 = amat.T.dot(amat)\n out2 = my_dsyrk(amat)\n idx = np.triu_indices(amat.shape[1])\n\n assert np.allclose(out1[idx], out2[idx])\n\n amat = np.array([[1, 2, 3], [3, 4, 6]], float)\n amat = np.asfortranarray(amat.dot(amat.T))\n\n out1 = amat.T.dot(amat)\n out2 = my_dsyrk(amat)\n idx = np.triu_indices(amat.shape[1])\n\n assert np.allclose(out1[idx], out2[idx])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Use the current structure to impose symmetries on a complete dynamical matrix in q space. Also the simple sum rule at Gamma is imposed
|
def SymmetrizeFCQ(self, fcq, q_stars, verbose = False, asr = "simple"):
nqirr = len(q_stars)
nq = np.sum([len(x) for x in q_stars])
# Get the q_points vector
q_points = np.zeros( (nq, 3), dtype = np.float64)
sigma = 0
for i in range(nqirr):
for q_vec in q_stars[i]:
q_points[sigma, :] = q_vec
sigma += 1
if nq != np.shape(fcq)[0]:
raise ValueError("Error, the force constant number of q point %d does not match with the %d given q_points" % (np.shape(fcq)[0], nq))
for iq in range(nq):
# Prepare the symmetrization
if verbose:
print ("Symmetries in q = ", q_points[iq, :])
t1 = time.time()
self.SetupQPoint(q_points[iq,:], verbose)
t2 = time.time()
if verbose:
print (" [SYMMETRIZEFCQ] Time to setup the q point %d" % iq, t2-t1, "s")
# Proceed with the sum rule if we are at Gamma
if asr == "simple" or asr == "custom":
if np.sqrt(np.sum(q_points[iq,:]**2)) < __EPSILON__:
if verbose:
print ("q_point:", q_points[iq,:])
print ("Applying sum rule")
self.ImposeSumRule(fcq[iq,:,:], asr)
elif asr == "crystal":
self.ImposeSumRule(fcq[iq, :,:], asr = asr)
elif asr == "no":
pass
else:
raise ValueError("Error, only 'simple', 'crystal', 'custom' or 'no' asr are supported, given %s" % asr)
t1 = time.time()
if verbose:
print (" [SYMMETRIZEFCQ] Time to apply the sum rule:", t1-t2, "s")
# # Symmetrize the matrix
if verbose:
old_fcq = fcq[iq, :,:].copy()
w_old = np.linalg.eigvals(fcq[iq, :, :])
print ("FREQ BEFORE SYM:", w_old )
self.SymmetrizeDynQ(fcq[iq, :,:], q_points[iq,:])
t2 = time.time()
if verbose:
print (" [SYMMETRIZEFCQ] Time to symmetrize the %d dynamical matrix:" % iq, t2 -t1, "s" )
print (" [SYMMETRIZEFCQ] Difference before the symmetrization:", np.sqrt(np.sum(np.abs(old_fcq - fcq[iq, :,:])**2)))
w_new = np.linalg.eigvals(fcq[iq, :, :])
print ("FREQ AFTER SYM:", w_new)
# For each star perform the symmetrization over that star
q0_index = 0
for i in range(nqirr):
q_len = len(q_stars[i])
t1 = time.time()
if verbose:
print ("Applying the q star symmetrization on:")
print (np.array(q_stars[i]))
self.ApplyQStar(fcq[q0_index : q0_index + q_len, :,:], np.array(q_stars[i]))
t2 = time.time()
if verbose:
print (" [SYMMETRIZEFCQ] Time to apply the star q_irr = %d:" % i, t2 - t1, "s")
q0_index += q_len
|
[
"def SymmetrizeDynQ(self, dyn_matrix, q_point):\n \n # TODO: implement hermitianity to speedup the conversion\n \n #Prepare the array to be passed to the fortran code\n QE_dyn = np.zeros( (3, 3, self.QE_nat, self.QE_nat), dtype = np.complex128, order = \"F\")\n \n # Get the crystal coordinates for the matrix\n for na in range(self.QE_nat):\n for nb in range(self.QE_nat):\n fc = dyn_matrix[3 * na : 3* na + 3, 3*nb: 3 * nb + 3]\n QE_dyn[:, :, na, nb] = Methods.convert_matrix_cart_cryst(fc, self.structure.unit_cell, False)\n \n # Prepare the xq variable\n #xq = np.ones(3, dtype = np.float64)\n xq = np.array(q_point, dtype = np.float64)\n # print \"XQ:\", xq\n # print \"XQ_CRYST:\", Methods.covariant_coordinates(self.QE_bg.T, xq)\n # print \"NSYMQ:\", self.QE_nsymq, \"NSYM:\", self.QE_nsym\n # print \"QE SYM:\"\n # print np.einsum(\"abc->cba\", self.QE_s[:, :, :self.QE_nsymq])\n # print \"Other syms:\"\n # print np.einsum(\"abc->cba\", self.QE_s[:, :, self.QE_nsymq: self.QE_nsym])\n # print \"QE INVS:\"\n # print self.QE_invs[:self.QE_nsymq]\n # #print \"QE RTAU:\"\n # #print np.einsum(\"abc->bca\", self.QE_rtau[:, :self.QE_nsymq, :])\n # print \"IROTMQ:\", self.QE_irotmq\n # print \"MINUS Q:\", self.QE_minus_q\n # print \"IRT:\"\n # print self.QE_irt[:self.QE_nsymq, :]\n # print \"NAT:\", self.QE_nat\n\n # Inibhit minus q\n #self.QE_minus_q = 0\n \n \n # USE THE QE library to perform the symmetrization\n symph.symdynph_gq_new( xq, QE_dyn, self.QE_s, self.QE_invs, self.QE_rtau, \n self.QE_irt, self.QE_irotmq, self.QE_minus_q, self.QE_nsymq, self.QE_nat)\n \n # Return to cartesian coordinates\n for na in range(self.QE_nat):\n for nb in range(self.QE_nat):\n fc = QE_dyn[:, :, na, nb] \n dyn_matrix[3 * na : 3* na + 3, 3*nb: 3 * nb + 3] = Methods.convert_matrix_cart_cryst(fc, self.structure.unit_cell, True)",
"def InitFromSymmetries(self, symmetries, q_point = np.array([0,0,0])):\n \n nsym = len(symmetries)\n \n self.QE_nsymq = np.intc(nsym)\n self.QE_nsym = self.QE_nsymq\n \n \n for i, sym in enumerate(symmetries):\n self.QE_s[:,:, i] = np.transpose(sym[:, :3])\n \n # Get the atoms correspondence\n eq_atoms = GetIRT(self.structure, sym)\n \n self.QE_irt[i, :] = eq_atoms + 1\n \n # Get the inverse symmetry\n inv_sym = np.linalg.inv(sym[:, :3])\n for k, other_sym in enumerate(symmetries):\n if np.sum( (inv_sym - other_sym[:, :3])**2) < __EPSILON__:\n break\n \n self.QE_invs[i] = k + 1\n \n # Setup the position after the symmetry application\n for k in range(self.QE_nat):\n self.QE_rtau[:, i, k] = self.structure.coords[eq_atoms[k], :].astype(np.float64)\n \n \n # Get the reciprocal lattice vectors\n b_vectors = self.structure.get_reciprocal_vectors()\n \n # Get the minus_q operation\n self.QE_minusq = False\n\n # NOTE: HERE THERE COULD BE A BUG\n \n # q != -q\n # Get the q vectors in crystal coordinates\n q = Methods.covariant_coordinates(b_vectors, q_point)\n for k, sym in enumerate(self.QE_s):\n new_q = self.QE_s[:,:, k].dot(q)\n if np.sum( (Methods.put_into_cell(b_vectors, -q_point) - new_q)**2) < __EPSILON__:\n self.QE_minus_q = True\n self.QE_irotmq = k + 1\n break",
"def __init__(self, structure, threshold = 1e-5):\n \n if not structure.has_unit_cell:\n raise ValueError(\"Error, symmetry operation can be initialize only if the structure has a unit cell\")\n \n self.structure = structure\n self.threshold = np.float64(threshold)\n \n # Setup the threshold \n symph.symm_base.set_accep_threshold(self.threshold)\n \n nat = structure.N_atoms\n \n # Define the quantum espresso symmetry variables in optimized way to work with Fortran90\n self.QE_nat = np.intc( nat )\n self.QE_s = np.zeros( (3, 3, 48) , dtype = np.intc, order = \"F\")\n self.QE_irt = np.zeros( (48, nat), dtype = np.intc, order = \"F\")\n self.QE_invs = np.zeros( (48), dtype = np.intc, order = \"F\")\n self.QE_rtau = np.zeros( (3, 48, nat), dtype = np.float64, order = \"F\")\n self.QE_ft = np.zeros( (3, 48), dtype = np.float64, order = \"F\")\n \n \n self.QE_minus_q = False\n self.QE_irotmq = np.intc(0)\n self.QE_nsymq = np.intc( 0 )\n self.QE_nsym = np.intc(0)\n \n # Prepare the QE structure\n self.QE_tau = np.zeros((3, nat), dtype = np.float64, order = \"F\")\n self.QE_ityp = np.zeros(nat, dtype = np.intc)\n \n symbs = {}\n counter = 1\n for i in range(nat):\n # Rank the atom number\n atm = structure.atoms[i]\n if not atm in symbs.keys():\n symbs[atm] = counter\n counter += 1\n \n self.QE_ityp[i] = symbs[atm]\n # Convert in bohr\n for j in range(3):\n self.QE_tau[j, i] = structure.coords[i, j]\n \n \n self.QE_at = np.zeros( (3,3), dtype = np.float64, order = \"F\")\n self.QE_bg = np.zeros( (3,3), dtype = np.float64, order = \"F\")\n \n bg = structure.get_reciprocal_vectors()\n for i in range(3):\n for j in range(3):\n self.QE_at[i,j] = structure.unit_cell[j,i]\n self.QE_bg[i,j] = bg[j,i] / (2* np.pi) \n\n # Here we define the quantities required to symmetrize the supercells\n self.QE_at_sc = self.QE_at.copy()\n self.QE_bg_sc = self.QE_bg.copy()\n self.QE_translation_nr = 1 # The supercell total dimension (Nx * Ny * Nz)\n self.QE_translations = [] # The translations in crystal axes\n\n # After the translation, which vector is transformed in which one?\n # This info is stored here as ndarray( size = (N_atoms, N_trans), dtype = np.intc, order = \"F\")\n self.QE_translations_irt = []",
"def Generate(self, dyn, qe_sym = None):\n \n # Check if the symmetries must be initialize\n if qe_sym is None:\n qe_sym = CC.symmetries.QE_Symmetry(dyn.structure)\n \n \n # Get the number of irreducible q points from the matrix\n self.nq = dyn.nqirr\n self.nat = dyn.structure.N_atoms\n \n # Initialize the symmetries at q = 0\n qe_sym.SetupQPoint()\n \n # Prepare the wyckoff basis\n tmp_wyck_gen = np.zeros((3 * self.nat, self.nat, 3), dtype = np.float64)\n \n for i in range( 3 * self.nat):\n x = i % 3\n n = i / 3\n tmp_wyck_gen[i, n, x] = 1\n \n # Symmetrize the vector\n qe_sym.SymmetrizeVector(tmp_wyck_gen[i, :, :])\n \n # Apply the gram-schmidt\n new_gen = tmp_wyck_gen.reshape((3 * self.nat, 3 * self.nat)).transpose()\n new_gen = scipy.linalg.orth(new_gen).transpose()\n \n # Get the number of wyckoff coefficients\n self.wyck_ncoeff = new_gen.shape()[0]\n \n # Reshape the array and get the coefficients\n self.wyck_gen = new_gen.reshape((self.wyck_ncoeff, self.nat, 3))\n \n r = np.arange(3 * self.nat)\n \n self.dyn_ncoeff = np.zeros(self.nq, dtype = int)\n self.dyn_gen = []\n \n # Cycle for each irreducible q point of the matrix\n for iq in range(self.nq):\n q = dyn.q_stars[iq][0]\n # Setup the symmetries for this q point\n qe_sym.SetupQPoint(q)\n \n gh = []\n \n for i in range(self.nat * 3):\n for j in range(i, self.nat * 3):\n # Take the generator\n fc = np.zeros((3 * self.nat, 3 * self.nat), dtype = np.complex128)\n fc[i, j] = 1\n \n # Apply the symmetry\n qe_sym.SymmetrizeDynQ(q, fc)\n \n # Check if the generator has already be generated\n is_new = True\n for k in range(i+1):\n mask = fc[k, :] != 0\n first_value = r[mask]\n if len(first_value):\n if k == i:\n if first_value[0] < j:\n is_new = False\n break\n else:\n is_new = False\n break\n \n # If the generator is new\n if is_new:\n qe_sym.ImposeSumRule(fc, \"simple\")\n \n # Check if the sum rule makes this generator desappearing\n if np.sum ((fc != 0).as_type(int)) != 0:\n gh.append(fc / np.sqrt(np.trace(fc.dot(fc))))\n \n dim = len(gh)\n \n # Prepare the gram-shmidt\n gh = np.array(gh, dtype = np.complex128)\n \n gh_new = np.reshape((dim, 9 * self.nat**2)).transpose()\n gh_new = scipy.linalg.orth(gh_new).transpose()\n \n self.dyn_ncoeff = np.shape(gh_new)[0]\n \n self.dyn_gen.append(np.reshape(gh_new, (self.dyn_ncoeff, 3*self.nat, 3*self.nat)))",
"def ApplySymmetryToEffCharge(self, eff_charges):\n \n nat, cart1, cart2 = np.shape(eff_charges)\n\n assert cart1 == cart2 \n assert cart1 == 3\n assert nat == self.QE_nat, \"Error, the structure and effective charges are not compatible\"\n\n\n # Apply the sum rule\n tot_sum = np.sum(eff_charges, axis = 0)\n eff_charges -= np.tile(tot_sum, (nat, 1)).reshape((nat, 3,3 )) / nat\n\n new_eff_charges = np.zeros((nat, cart1, cart2), dtype = np.double)\n\n # Get the effective charges in crystal components\n for i in range(nat):\n eff_charges[i, :, :] = Methods.convert_matrix_cart_cryst(eff_charges[i, :, :], self.QE_at.T)\n\n # Apply translations\n if self.QE_translation_nr > 1:\n for i in range(self.QE_translation_nr):\n irt = self.QE_translations_irt[:, i] - 1\n for j in range(nat):\n new_mat = eff_charges[irt[j], :, :]\n new_eff_charges[j, :, :] += new_mat\n\n eff_charges[:,:,:] = new_eff_charges / self.QE_translation_nr\n new_eff_charges[:,:,:] = 0.\n\n # Apply rotations\n for i in range(self.QE_nsym):\n irt = self.QE_irt[i, :] - 1\n\n for j in range(nat):\n new_mat = self.QE_s[:,:, i].dot( eff_charges[irt[j], :, :].dot(self.QE_s[:,:,i].T))\n new_eff_charges[j, :, :] += new_mat\n new_eff_charges /= self.QE_nsym\n\n # Convert back into cartesian\n for i in range(nat):\n eff_charges[i, :, :] = Methods.convert_matrix_cart_cryst(new_eff_charges[i, :, :], self.QE_at.T, True)",
"def apply_symmetrisation(self):\n\n # get the values to be symmetrised\n for sym_set in self.molecule.symm_hs.values():\n charges, sigmas, epsilons = [], [], []\n for atom_set in sym_set:\n for atom in atom_set:\n charges.append(float(self.non_bonded_force[atom - 1][0]))\n sigmas.append(float(self.non_bonded_force[atom - 1][1]))\n epsilons.append(float(self.non_bonded_force[atom - 1][2]))\n # calculate the average values to be used in symmetry\n charge, sigma, epsilon = sum(charges) / len(charges), sum(sigmas) / len(sigmas), sum(epsilons) / len(epsilons)\n\n # now loop through the atoms again and store the new values\n for atom in atom_set:\n self.non_bonded_force[atom - 1] = [str(charge), str(sigma), str(epsilon)]",
"def __init__(self, n, dim, element, mkbasis=True):\n\n self.TOL = 1e-14\n\n #print \"Working on Preliminaries:\\nInitializing...\"\n nb = numbase(n, dim)\n #PQ_j = dim\n\n #Initialize the modal basis\n #BUG: Neglecting int() causes crash in sympy 0.7.1\n # but works in sympy 0.6.7\n #if sympy.__version__ == '0.7.1':\n aij = [[Rational(int(i==j)) for i in range(nb)] for j in range(nb)]\n #aij = [[Rational(i==j) for i in range(nb)] for j in range(nb)]\n #aij = [[Rational(0)] * nb] * nb\n #for i in range(nb):\n # aij[i][i] = Rational(1)\n\n #Figure out the coefficients of all the monomials\n #print \"Creating coefficients.\"\n pqr = mk_pqr_coeff(n * 3, dim)\n #pqr_i = len(pqr)\n\n #Calculate all the integrals\n #print \"Calculating integrals.\"\n #int_pqr, int_pqr_ed, el_verts, ids_ed = int_el_pqr(pqr, element)\n int_pqr, el_verts, ids_ed = int_el_pqr(pqr, element)\n ed_verts = [el_verts[ids_ed[i]] for i in range(len(ids_ed))]\n\n\n if mkbasis:\n #print \"Preliminaries finished, starting basis creating:\"\n #Do Gram-Shmidt orthonormalization\n for j in range(0, nb):\n #print \"Creating basis\", j+1, \"of\", nb\n #Now orthogonalize wrt old basis\n for k in range(0, j):\n coeff = inprod(aij[j], aij[k], pqr, int_pqr)\n for ii in range(nb):\n aij[j][ii] = aij[j][ii] - aij[k][ii] * coeff\n\n #And Normalize\n coeff = inprod(aij[j], aij[j], pqr, int_pqr)\n for k in range(nb):\n aij[j][k] = aij[j][k] / sqrt(coeff)\n else:\n pass\n #print \"Preliminaries finished.\"\n #Assign created structures to public member variables\n #doxygen documentation is also created HERE TODO: detailed doc for these\n #variables should go here...\n\n ##Contains the coefficients of each of the bases. As and example,\n # aij[0] contains the coefficients for the first basis.\n self.coeffs = aij\n if dim == 0:\n self.coeffs = [[1.0]]\n\n ##Number of bases\n self.nb = nb\n\n ##Matrix defining what each monomial basis means -- that is, it\n # gives the degree of each monomial component x^p y^q z^r.\n # pqr[1] = [0 0 1] could give x^0 y^0 z^1, for example.\n self.monoms = pqr\n\n ##Contains the value of the integral of the monomial\n # over the element. volint_pqr[0] gives the volume of the\n # element, for example.\n self.elmint_monoms = int_pqr\n if dim==0:\n self.elmint_monoms = [1]\n\n ##Array containing the x,y,z coordinates of the element.\n # vol_verts[0] = [-1, -1, -1], for example. These are\n # labeled, in general counter-clockwise from bottom\n # (z smallest) to top (z largest).\n self.vol_verts = el_verts\n if dim==0:\n self.vol_verts = array([[0.0]])\n\n ##Array containing the x,y,z coordinates of the element\n # edges. ed_verts[0][0] = [-1, -1, -1], for example gives the\n # first vertex of the first edge. These are labeled, in\n # general counter-clockwise to give outward pointing normals\n # according to the right-hand rule\n self.ed_verts = ed_verts\n if dim==0:\n self.ed_verts = [array([0.0])]\n\n ##Array containing the ids of the vertices that make up the coordinates\n # of the element eges. vol_verts[ids_ed[0]] gives the\n # coordinates of the first edge, for example.\n # These are labeled, in general counter-clockwise to give outward\n # pointing normalsaccording to the right-hand rule\n self.ids_ed = ids_ed\n if dim==0:\n self.ids_ed = [[0]]\n\n ##Array containing the type of each edge. In 1D and 2D this is always\n # zeros everywhere. For 3D prisms, the element has both triangles (0's)\n # and squares (1's)\n self.ed_type = [0 for i in range(len(ids_ed))]\n for i in range(len(ids_ed)):\n if len(ids_ed[i]) == 4:\n self.ed_type[i] = 1\n if dim==0:\n self.ed_type = [0]\n\n ##Number of active monomials -- basically how many coefficients each\n #basis has.\n self.nm = nb\n\n ##Order of the created basis\n self.n = n\n\n ##Number of edges\n self.ne = len(self.ids_ed)\n\n ##The element type\n self.element = element\n\n ##Dimension of basis\n self.dim = dim",
"def write_in_qp(\n self, tensor: Tensor, format_: str, name_format=None, set_symms=True\n ):\n\n terms = tensor.subst_all(self.f_in_qp).simplify().local_terms\n\n # Internal book keeping, maps the cr/an order to lhs and the rhs terms\n # of the definition of the new matrix element.\n transf = {}\n\n rewritten_terms = []\n\n for term in terms:\n cr_order = 0\n an_order = 0\n indices = []\n for i in term.vecs:\n if len(i.indices) != 2:\n raise ValueError(\n 'Invalid operator to rewrite, one index expected', i\n )\n char, index = i.indices\n if char == CR:\n assert an_order == 0\n cr_order += 1\n elif char == AN:\n an_order += 1\n else:\n assert False\n\n indices.append(index)\n continue\n\n norm = factorial(cr_order) * factorial(an_order)\n order = (cr_order, an_order)\n tot_order = cr_order + an_order\n\n base = IndexedBase(format_.format(*order))\n if name_format is not None:\n base_name = name_format.format(*order)\n self.set_name(**{base_name: base})\n\n indices[cr_order:tot_order] = reversed(indices[cr_order:tot_order])\n if tot_order > 0:\n new_amp = base[tuple(indices)]\n else:\n new_amp = base.label\n orig_amp = term.amp\n\n new_sums = []\n wrapped_sums = []\n for i in term.sums:\n if new_amp.has(i[0]):\n new_sums.append(i)\n else:\n wrapped_sums.append(i)\n continue\n\n def_term = Term(\n sums=tuple(wrapped_sums), amp=orig_amp * norm, vecs=()\n )\n\n if order in transf:\n entry = transf[order]\n assert entry[0] == new_amp\n entry[1].append(def_term)\n else:\n transf[order] = (new_amp, [def_term])\n rewritten_terms.append(Term(\n sums=tuple(new_sums), amp=new_amp / norm, vecs=term.vecs\n ))\n if set_symms and (cr_order > 1 or an_order > 1):\n self.set_dbbar_base(base, cr_order, an_order)\n\n continue\n\n defs = [\n self.define(lhs, self.create_tensor(rhs_terms))\n for lhs, rhs_terms in transf.values()\n ]\n\n return self.create_tensor(rewritten_terms), defs",
"def _setup_Q(self):\n self.Q_s = [None for _ in range(self.p+1)]\n self.Q_s[self.p] = np.eye(self.layers[self.p-1])\n for i in range(self.p-1, -1, -1):\n self.Q_s[i] = np.dot(self.U_s[i], self.Q_s[i+1])",
"def initialize_volume_symmetry_map(self):\n #@type pg PointGroup\n pg = self.crystal.get_point_group()\n if pg is None:\n print \"ERROR!\"\n return\n\n t1 = time.time()\n\n order = len(pg.table)\n #@type inst Instrument\n inst = self.inst\n\n #Initialize the symmetry map. Last dimension = the ORDER equivalent indices\n n = len(inst.qx_list)\n numpix = n**3\n symm = np.zeros( (numpix, order) , dtype=int)\n\n if self.verbose: print \"Starting volume symmetry calculation. Order is %d. Matrix is %d voxels (%d to a side).\" % (order, n**3, n)\n\n #--- From get_hkl_from_q functions: (moved here for speed) --\n #Get the inverse the B matrix to do the reverse conversion\n B = self.crystal.get_B_matrix()\n invB = np.linalg.inv(B)\n\n #Limit +- in q space\n qlim = inst.qlim\n \n if config.cfg.force_pure_python:\n #----------- Pure Python Version --------------\n\n #Go through each pixel\n q_arr = np.zeros( (3, numpix) )\n for (ix, qx) in enumerate(inst.qx_list):\n for (iy, qy) in enumerate(inst.qx_list):\n for (iz, qz) in enumerate(inst.qx_list):\n i = iz + iy*n + ix*n*n\n #Find the (float) HKL of this voxel at qx,qy,qz.\n q_arr[:, i] = (qx,qy,qz)\n\n #Matrix multiply invB.hkl to get all the HKLs as a column array\n hkl = np.dot(invB, q_arr)\n\n #Now get ORDER equivalent HKLs, as a long list.\n #(as equivalent q)\n q_equiv = np.zeros( (3, numpix, order) )\n for ord in xrange(order):\n #Ok, we go TABLE . hkl to get the equivalent hkl\n #Them, B . hkl gives you the Q vector\n q_equiv[:,:, ord] = np.dot(B, np.dot(pg.table[ord], hkl) )\n\n #Now we need to find the index into the array.\n #Start by finding the x,y,z, indices\n ix = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[0, :, ord])\n iy = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[1, :, ord])\n iz = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[2, :, ord])\n\n #Now put the index into the symmetry matrix\n index = iz + iy*n + ix*n*n\n index[np.isnan(index)] = -1 #Put -1 where a NAN was found\n symm[:, ord] = index\n\n\n else:\n #--------------- Inline C version (about 17x faster than Python) ---------------\n code = \"\"\"\n\n //-- Calculate the hkl array ---\n int ix, iy, iz;\n int eix, eiy, eiz, eindex;\n int index, ord;\n double qx, qy, qz;\n double eqx, eqy, eqz;\n double h, k, l;\n double eh, ek, el;\n for (ix=0; ix<n; ix++)\n {\n qx = ix*qres - qlim;\n for (iy=0; iy<n; iy++)\n {\n qy = iy*qres - qlim;\n for (iz=0; iz<n; iz++)\n {\n qz = iz*qres - qlim;\n index = iz + iy*n + ix*n*n;\n //Ok, now we matrix multiply invB.hkl to get all the HKLs as a column array\n h = qx * INVB2(0,0) + qy * INVB2(0,1) + qz * INVB2(0,2);\n k = qx * INVB2(1,0) + qy * INVB2(1,1) + qz * INVB2(1,2);\n l = qx * INVB2(2,0) + qy * INVB2(2,1) + qz * INVB2(2,2);\n\n //Now go through each equivalency table.\n for (ord=0; ord<order; ord++)\n {\n //Do TABLE.hkl to find a new equivalent hkl\n eh = h * TABLE3(ord, 0,0) + k * TABLE3(ord, 0,1) + l * TABLE3(ord, 0,2);\n ek = h * TABLE3(ord, 1,0) + k * TABLE3(ord, 1,1) + l * TABLE3(ord, 1,2);\n el = h * TABLE3(ord, 2,0) + k * TABLE3(ord, 2,1) + l * TABLE3(ord, 2,2);\n //Now, matrix mult B . equiv_hkl to get the other q vector\n eqx = eh * B2(0,0) + ek * B2(0,1) + el * B2(0,2);\n eqy = eh * B2(1,0) + ek * B2(1,1) + el * B2(1,2);\n eqz = eh * B2(2,0) + ek * B2(2,1) + el * B2(2,2);\n\n //Ok, now you have to find the index into QSPACE\n eix = round( (eqx+qlim)/qres ); if ((eix >= n) || (eix < 0)) eix = -1; \n eiy = round( (eqy+qlim)/qres ); if ((eiy >= n) || (eiy < 0)) eiy = -1;\n eiz = round( (eqz+qlim)/qres ); if ((eiz >= n) || (eiz < 0)) eiz = -1;\n\n if ((eix < 0) || (eiy < 0) || (eiz < 0))\n {\n //One of the indices was out of bounds.\n //Put this marker to mean NO EQUIVALENT\n SYMM2(index, ord) = -1;\n }\n else\n {\n //No problem!, Now I put it in there\n eindex = eiz + eiy*n + eix*n*n;\n //This pixel (index) has this equivalent pixel index (eindex) for this order transform ord.\n SYMM2(index, ord) = eindex;\n }\n\n }\n \n }\n }\n }\n \"\"\"\n qres = inst.q_resolution\n n = len(self.inst.qx_list)\n table = np.array(pg.table) #Turn the list of 3x3 arrays into a Nx3x3 array\n varlist = ['B', 'invB', 'symm', 'qres', 'qlim', 'n', 'order', 'table']\n weave.inline(code, varlist, compiler='gcc', support_code=\"\")\n\n #Done with either version\n self.volume_symmetry = symm\n\n if self.verbose: print \"Volume symmetry map done in %.3f sec.\" % (time.time()-t1)",
"def Qlearning(mdp,gamma,lambd,iterations):\r\n\r\n # The Q-values are a real-valued dictionary Q[s,a] where s is a state and a is an action.\r\n state = 0 # Always start from state 0\r\n Q = dict()\r\n ### YOUR CODE HERE\r\n ### YOUR CODE HERE\r\n ### YOUR CODE HERE\r\n return Q",
"def do_sym_z(self):\n \n nx = self.nx()\n ny = self.ny()\n nz = self.nz()\n \n scale = np.float32(0.5)\n data = np.empty((nx, ny, nz), dtype=np.float32)\n \n for iz in range(0, nz):\n for iy in range(0, ny):\n for ix in range(0, nx):\n dleft = self._data[ix, iy, iz]\n drght = self._data[ix, iy, nz-1-iz]\n data[ix,iy,iz] = (dleft + drght) * scale\n \n self._data = data\n self._sym_z = True",
"def FGW_matrix(exp,settings,a,atoms_name):\n n= exp[a].shape[0]\n alpha= settings['alpha']\n if atoms_name =='Cs':\n learnt_mat = [(gwu.np_sum_scaled_mat(exp['Cs'], exp[a][t]),gwu.np_sum_scaled_mat(exp['As'], exp[a][t])) for t in range(exp[a].shape[0]) ]\n elif atoms_name =='checkpoint_Cs':\n learnt_mat = [(gwu.np_sum_scaled_mat(exp['checkpoint_Cs'], exp[a][t]),gwu.np_sum_scaled_mat(exp['checkpoint_As'], exp[a][t])) for t in range(exp[a].shape[0]) ]\n \n D = np.zeros((n,n), dtype=np.float64)\n \n for i in tqdm(range(n-1)):\n for j in range (i+1, n):\n \n dist,T= gwu.numpy_FGW_loss(learnt_mat[i][0], learnt_mat[j][0], learnt_mat[i][1], learnt_mat[j][1], alpha=alpha)\n D[i,j]= dist\n D[j,i]= dist\n return D",
"def stability_matrix(self,z):\n s = self.Ahat.shape[1]\n if self.type == 'General':\n # J Y^n = K Y^{n-1}\n K1 = np.column_stack((z*self.Ahat,self.d,1-self.d))\n K2 = snp.zeros(s+2); K2[-1] = 1\n K3 = np.concatenate((z*self.bhat,np.array((self.theta,1-self.theta))))\n K = np.vstack((K1,K2,K3))\n\n J = snp.eye(s+2)\n J[:s,:s] = J[:s,:s] - z*self.A\n J[-1,:s] = z*self.b\n\n M = snp.solve(J.astype('complex64'),K.astype('complex64'))\n #M = snp.solve(J, K) # This version is slower\n\n else:\n D=np.hstack([1.-self.d,self.d])\n thet=np.hstack([1.-self.theta,self.theta])\n A,b=self.A,self.b\n if self.type=='Type II':\n ahat = np.zeros([self.s,1]); ahat[:,0] = self.Ahat[:,0]\n bh = np.zeros([1,1]); bh[0,0]=self.bhat[0]\n A = np.hstack([ahat,self.A])\n A = np.vstack([np.zeros([1,self.s+1]),A])\n b = np.vstack([bh,self.b])\n\n M1=np.linalg.solve(np.eye(self.s)-z*self.A,D)\n L1=thet+z*np.dot(self.b.T,M1)\n M=np.vstack([L1,[1.,0.]])\n return M",
"def apply_volume_symmetry(self, use_inline_c=True):\n t1 = time.time()\n\n #Get the # of pixels and the order from the symmetry map\n symm = self.volume_symmetry\n (numpix, order) = symm.shape\n\n if use_inline_c and not config.cfg.force_pure_python:\n #------ C version (about 400x faster than python) -------\n #Put some variables in the workspace\n old_q = self.qspace.flatten() * 1.0\n qspace_flat = old_q * 0.0\n\n support = \"\"\n code = \"\"\"\n int pix, ord, index;\n for (pix=0; pix<numpix; pix++)\n {\n //Go through each pixel\n for (ord=0; ord<order; ord++)\n {\n //Now go through each equivalent q.\n index = SYMM2(pix, ord);\n if (index >= 0)\n {\n //Valid index.\n QSPACE_FLAT1(pix) += OLD_Q1(index);\n //printf(\"%d\\\\n\", index);\n }\n }\n }\n \"\"\"\n varlist = ['old_q', 'qspace_flat', 'numpix', 'order', 'symm']\n weave.inline(code, varlist, compiler='gcc', support_code=support)\n #Reshape it back as a 3D array.\n n = len(self.inst.qx_list)\n self.qspace = qspace_flat.reshape( (n,n,n) )\n else:\n #---- Pure python version ----\n\n #Clear the starting space\n old_q = self.qspace\n new_q = self.qspace * 0\n for pix in xrange(numpix):\n for ord in xrange(order):\n eq_index = symm[pix, ord]\n if eq_index >= 0:\n #Add up to this pixel, the equivalent one.\n #The list includes this given voxel too.\n new_q.flat[pix] += old_q.flat[eq_index]\n self.qspace = new_q\n\n #Done.\n if self.verbose: print \"Volume symmetry computed in %.3f sec.\" % (time.time()-t1)",
"def apply_symplectic(self, S, qubits):\n # Approach 1: convert the 2m x 2m symplectic matrix S to a 2n x 2n\n # matrix that acts on the corresponding columns in qubits\n # M = decompose.symplectic_to_matrix(S, self.n, qubits)\n # self.state = (self.state @ M) % 2\n\n # Approach 2: decompose the 2m x 2m symplectic matrix into a\n # series of {C, H, P} gates, then apply those\n # NOTE: this is actually much faster in practice for large n\n m = len(qubits)\n gates = decompose.decompose_state(CHP_Simulation(m, S))\n gates = decompose.change_gates(gates, qubits)\n decompose.apply_gates(gates, self)",
"def ApplySymmetryToTensor3(self, v3, initialize_symmetries = True):\n if initialize_symmetries:\n self.SetupFromSPGLIB()\n\n # Apply the permutation symmetry\n symph.permute_v3(v3)\n\n # Apply the translational symmetries\n symph.trans_v3(v3, self.QE_translations_irt)\n\n # Apply all the symmetries at gamma\n symph.sym_v3(v3, self.QE_at, self.QE_s, self.QE_irt, self.QE_nsymq)",
"def getSymmetryMatrix(*args, **kwargs):\n \n pass",
"def stickel_method(U: Set[Equation], ac_symbol: Function) -> SubstituteTerm:\n # Gather all variables for fresh var calculation\n ALL_VARS = vars_from_equations(U)\n original_from_generalized : Dict[Variable, Term] = dict()\n\n def generalize_term(t: Term) -> Variable:\n \"\"\"\n Returns a generalized variable for every\n term that's not a variable.\n \"\"\"\n vt = t\n if isinstance(t, Variable):\n original_from_generalized[t] = t\n else:\n vt = None\n for gen_var, og_term in original_from_generalized.items():\n if t == og_term:\n vt = gen_var\n break\n if vt is None:\n vt = fresh_variable(ALL_VARS)\n ALL_VARS.add(vt)\n original_from_generalized[vt] = t\n return vt\n\n var_count = Counter()\n # Go through each equation\n for e in U:\n LS, RS = flatten_equation(e, ac_symbol)\n # print(\"LS\", LS)\n # print(\"RS\", RS)\n\n # Generalize left and right sides\n LS_VARS = [generalize_term(t) for t in LS]\n RS_VARS = [generalize_term(t) for t in RS]\n\n # Calculate multiplicity\n VARS_IN_EQ = set(LS_VARS).union(set(RS_VARS))\n for x in VARS_IN_EQ:\n num = LS_VARS.count(x) - RS_VARS.count(x)\n var_count[x] += num\n\n # Create the equation with variable coeficients\n # being the counts above\n sympy_expression = 0\n var_map: Dict[sympy.core.Symbol, Variable] = dict()\n for x, count in var_count.items():\n # Construct Z3 variable\n sympy_var = symbols(x.symbol + \"_0\", integer=True, positive=True)\n var_map[sympy_var] = x\n\n # Construct part of expression\n sympy_expression += count * sympy_var\n\n\n # Determine the ordering of the diophantine solver output\n sympy_ordering = list(sympy_expression.expand(force=True).free_symbols)\n sympy_ordering.sort(key=default_sort_key)\n\n # Solve diophantine equation\n # print(original_from_generalized)\n # print(sympy_expression)\n basis_vector = diop_linear(sympy_expression)\n basis_tables = generate_basis_table(basis_vector)\n\n sigma = False\n while not sigma:\n # Generate the basis table\n basis_table = next(basis_tables)\n # print(basis_table)\n\n # Create variables representing each row\n row_vars = n_fresh_variables(ALL_VARS, len(basis_table))\n ALL_VARS = ALL_VARS.union(set(row_vars))\n\n # Craft intermediate substitution from basis table\n sub_basis: Dict[Variable, Term] = dict()\n for column, sympy_var in enumerate(sympy_ordering):\n term = None\n for i, row in enumerate(basis_table):\n if row[column] == 0:\n continue\n row_var = row_vars[i]\n for _ in range(row[column]):\n if term is None:\n term = row_var\n else: # z_2 + z_4\n term = ac_symbol(term, row_var)\n sub_basis[var_map[sympy_var]] = term\n\n # [TODO] [IN PROGRESS] Unify variables in the generalized terms with\n # their counterparts in the original terms.\n # print(sub_basis)\n new_eqs = set()\n for gen_var, basis_var in sub_basis.items():\n rhs = original_from_generalized[gen_var]\n new_eqs.add(Equation(\n basis_var,\n rhs\n ))\n sigma = syntactic_unification(new_eqs)\n\n\n # Currently returning one posisble unifier but we can keep generating\n # using the basis vector\n return {sigma}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
QE SUM RULE =========== This subroutine imposes on the given force constant matrix the acustic sum rule
|
def ImposeSumRule(self, force_constant, asr = "simple", axis = 1, zeu = None):
QE_fc = np.zeros( (3, 3, self.QE_nat, self.QE_nat), order ="F", dtype = np.complex128)
# Fill the effective charges if required
if zeu is not None:
# Convert in the correct indexing and use the fortran order
f_zeu = np.einsum("ijk -> kji", zeu, order = "F", dtype = np.float64)
else:
f_zeu = np.zeros( (3, 3, self.QE_nat), order = "F", dtype = np.float64)
# Prepare the force constant
if asr != "custom":
for na in range(self.QE_nat):
for nb in range(self.QE_nat):
QE_fc[:, :, na, nb] = force_constant[3 * na : 3* na + 3, 3*nb: 3 * nb + 3]
#
# print "ASR:", asr
# print "AXIS:", axis
# print "NAT:", self.QE_nat
# print "TAU SHAPE:", np.shape(self.QE_tau)
# print "QE_FC SHAPE:", np.shape(self.QE_fc)
symph.set_asr(asr, axis, self.QE_tau, QE_fc, f_zeu)
# Copy the new value on output
for na in range(self.QE_nat):
if zeu is not None:
zeu[na, :,:] = f_zeu[:,:, na]
for nb in range(self.QE_nat):
force_constant[3 * na : 3* na + 3, 3*nb: 3 * nb + 3] = QE_fc[:,:, na, nb]
else:
CustomASR(force_constant)
|
[
"def _reduceSum(self,tensor):\n if self._envType == \"DDP\":\n dist.reduce(tensor,0)\n return tensor",
"def test_large_sum(self):\r\n for n in [10, 20, 30, 40, 50]:\r\n A = matrix(range(n*n), (n,n))\r\n x = Variable(n,n)\r\n p = Problem(Minimize(at.sum_entries(x)), [x >= A])\r\n result = p.solve()\r\n answer = n*n*(n*n+1)/2 - n*n\r\n print result - answer\r\n self.assertAlmostEqual(result, answer)",
"def test_sum_with_scalar(self):\n sum_op = 5 + qml.PauliX(0) + 0\n final_op = qml.op_sum(qml.PauliX(0), qml.s_prod(5, qml.Identity(0)))\n # TODO: Use qml.equal when fixed.\n assert isinstance(sum_op, qml.ops.Sum)\n for s1, s2 in zip(sum_op.summands, final_op.summands):\n assert s1.name == s2.name\n assert s1.wires == s2.wires\n assert s1.data == s2.data\n assert np.allclose(a=sum_op.matrix(), b=final_op.matrix(), rtol=0)",
"def force_sum(forces, x, x1): \r\n \r\n # This variable stores the sum of all the force\r\n sum = 0\r\n\r\n # The following block checks whether the second input is a symbol or not----------------------------Check input\r\n if not isinstance(x, sp.Symbol):\r\n print(\"force_sum error: the second argument is not a symbol. Check\")\r\n return\r\n\r\n if not isinstance(x1,sp.Symbol):\r\n if x1 <= 0:\r\n print(\"force_sum error: the third input is not greater than 0, check\")\r\n return \r\n\r\n if isinstance(forces, list):\r\n\r\n for f in forces:\r\n if isinstance(f, list):\r\n print(\"force_sum error: the input is a list of list. Check the input\")\r\n return\r\n # this block checks if the input is a proper list of expression.\r\n # It returns\r\n # an error if the input is a list of list.\r\n #---------------------------------------------------------------------------------------------------\\Check input\r\n\r\n for f in forces:\r\n # as mentioned earlier, this function calculates the sum of all\r\n # forces from\r\n # -1 to x1.\r\n # Why -1? That is because the reaction force acts at x = 0 and it\r\n # is defined\r\n # by a dirac delta function and integral(diracDelta(x), from 0 to\r\n # inf) has a\r\n # definition which we cannot use meaningfully\r\n sum = sum + sp.integrate(f,[x, -1, x1])\r\n\r\n return sum\r\n\r\n # the following block is used when the input is just a single function\r\n sum = sum + sp.integrate(forces,[x, -1, x1])\r\n return sum",
"def sum_of_matr(matrix): \n total = sum([sum(x) for x in matrix])\n return total",
"def test_sum_multi_wire_operator_with_scalar(self):\n sum_op = 5 + qml.CNOT(wires=[0, 1])\n final_op = qml.op_sum(\n qml.CNOT(wires=[0, 1]),\n qml.s_prod(5, qml.prod(qml.Identity(0), qml.Identity(1))),\n )\n # TODO: Use qml.equal when fixed.\n assert isinstance(sum_op, qml.ops.Sum)\n for s1, s2 in zip(sum_op.summands, final_op.summands):\n assert s1.name == s2.name\n assert s1.wires == s2.wires\n assert s1.data == s2.data\n assert np.allclose(a=sum_op.matrix(), b=final_op.matrix(), rtol=0)",
"def axial_force(self):\n return sum([thick.effective_axial_force() for thick in self.thick])",
"def test_sum_hessian(problem):\n problem.set_up()\n skip_BCEWithLogitsLoss(problem) # TODO Implement _sum_hessian for BCEWithLogitsLoss\n\n backpack_res = BackpackDerivatives(problem).sum_hessian()\n autograd_res = AutogradDerivatives(problem).sum_hessian()\n\n check_sizes_and_values(autograd_res, backpack_res)\n problem.tear_down()",
"def __sum__(self):\n return sum(self.TAA)",
"def solve(es):\n es = es.copy()\n if len(es) == 1:\n return np.sum(es) * 2\n\n shared = np.min(es, axis=0, keepdims=True)\n extra_cost = np.sum(shared) * 2\n if extra_cost > 0:\n es -= shared\n\n return extra_cost + min([solve(es[:i])+solve(es[i:]) for i in range(1, len(es))])",
"def SolveTruss(self):\n #check if truss is statically indeterminate\n if (2*self.nSups)+self.nBeams != (2*self.nJoints):\n raise RuntimeError(\"Truss geometry not suitable for static equilibrium\\\n analysis\")\n \n #create angles_arr: row-joints, column-beams, values-angle of beam wrt +x axis\n self.angles_arr = np.zeros((self.nJoints,self.nBeams))\n for i in np.arange(self.nBeams):\n #find the two joints connected to each beam\n joints = np.where(self.beams_arr[:,i] == 1)[0]\n x_coord = self.joints_arr[joints,0]\n y_coord = self.joints_arr[joints,1]\n del_y,del_x = y_coord[1]-y_coord[0], x_coord[1]-x_coord[0]\n alpha = np.arctan2(del_y,del_x) #angle at first joint\n beta = np.pi + alpha #angle at second joint\n self.angles_arr[joints,i] = [alpha,beta]\n\n indR = self.nBeams #index of reaction force\n row,col,data = [],[],[] #store values that help to make csr matrix\n \n #horizontal force balance at each joint\n #for each joint, get the values of elements of the arr that are non-zero\n rhs_h = np.zeros((self.nJoints,1)) #right hand side of equation\n for i in np.arange(self.nJoints):\n beams = np.where(self.beams_arr[i,:] == 1)[0]\n beam_n = np.shape(beams)[0] #number of beams connected to joint\n row.extend([i]*beam_n)\n col.extend(beams)\n angle = self.angles_arr[i,beams]\n data.extend(np.cos(angle))\n if self.joints_arr[i,4] == 1: #for reaction forces at support\n row.append(i)\n col.append(indR)\n data.append(1)\n indR += 1\n rhs_h[i] = self.joints_arr[i,2] #for external forces\n\n #vertical force balance at each joint\n #for each joint, get the values of elements of the arr that are non-zero\n rhs_v = np.zeros((self.nJoints,1))\n for i in np.arange(self.nJoints):\n beams = np.where(self.beams_arr[i,:] == 1)[0]\n beam_n = np.shape(beams)[0]\n row.extend([self.nJoints+i]*beam_n)\n col.extend(beams)\n angle = self.angles_arr[i,beams]\n data.extend(np.sin(angle))\n if self.joints_arr[i,4]:\n row.append(self.nJoints+i)\n col.append(indR)\n data.append(1)\n indR += 1\n rhs_v[i] = self.joints_arr[i,3]\n rhs_arr = np.concatenate((rhs_h,rhs_v),axis = 0)\n \n #create sparse matrix\n sparseM = csr_matrix((data,(row,col)),shape = (self.n,self.n))\n \n try:\n self.solve_F = spsolve(sparseM,rhs_arr)\n except:\n raise RuntimeError(\"Cannot solve the linear system, unstable truss?\")",
"def sumMatrix(*args):\n return sum(args)",
"def apply_bc(self):\n nsize = len(self._nodes)\n ncount = 0\n for node in self._nodes:\n for dof in range(3):\n i = nsize*dof + ncount\n if not node._fixed[dof]:\n # not fixed: apply load to right hand side vector\n self._R[i] = node._r[dof]\n else:\n # is fixed: apply displacement and set corresponding equations to identity\n self._R[i] = node._u[dof]\n self._K[i].fill(0)\n self._K[i,i] = 1\n # TODO: apply suture constraints\n ncount = ncount + 1",
"def sum_sqr_vals(self):\n\treturn numpy.sum(numpy.square(self.data))",
"def sumw ( self ) :\n N = len ( self )\n if 0 == N : return 0 \n g = ( self.weight ( i ) for i in range ( N ) ) \n return sum ( g )",
"def atmost_one_VNF(self):\r\n self.sum_o = 0\r\n for f in self.VNFS:\r\n for u in G.nodes():\r\n name_o = self.o_template.format(f, u)\r\n RHS = self.model.getVarByName(name_o)\r\n self.sum_o += RHS\r\n self.model.addConstr(self.sum_o, GRB.LESS_EQUAL, 1)",
"def __vertical_sum_of_all(self, ws: openpyxl.worksheet, matrix_x_shift: int = 0, matrix_y_shift: int = 0):\n assert matrix_x_shift >= 0\n assert matrix_y_shift >= 0\n for row_number in range(self.array_length):\n formula = \"=sum(\" + get_column_letter(self.x_shift + 1 + matrix_x_shift) + str(\n 1 + row_number + self.y_shift + matrix_y_shift) + \":\" + \\\n get_column_letter(self.array_length + self.x_shift + matrix_x_shift) + \\\n str(row_number + 1 + self.y_shift + matrix_y_shift) + str(\")\")\n ws[get_column_letter(self.x_shift + 1 + matrix_x_shift + self.array_length) +\n str(row_number + 1 + self.y_shift + matrix_y_shift)].value = formula",
"def __horizontal_sum_of_all(self, ws: openpyxl.worksheet, matrix_x_shift: int = 0, matrix_y_shift: int = 0):\n for column_number in range(self.array_length):\n formula = \"=sum(\" + get_column_letter(column_number + 1 + self.x_shift + matrix_x_shift) + str(\n 1 + self.y_shift + matrix_y_shift) + \":\" + get_column_letter(\n column_number + 1 + self.x_shift + matrix_x_shift) + str(\n self.y_shift + matrix_y_shift + self.array_length) + str(\")\")\n ws[get_column_letter(column_number + 1 + self.x_shift + matrix_x_shift) + str(\n self.array_length + 1 + self.y_shift + matrix_y_shift)].value = formula",
"def sum(self, dims=None):\n return self.to_linexpr().sum(dims)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
USE SPGLIB TO SETUP THE SYMMETRIZATION ====================================== This function uses spglib to find symmetries, recognize the supercell and setup all the variables to perform the symmetrization inside the supercell.
|
def SetupFromSPGLIB(self):
if not __SPGLIB__:
raise ImportError("Error, this function works only if spglib is available")
# Get the symmetries
spg_syms = spglib.get_symmetry(self.structure.get_ase_atoms(), symprec = self.threshold)
symmetries = GetSymmetriesFromSPGLIB(spg_syms, regolarize= False)
trans_irt = 0
self.QE_s[:,:,:] = 0
# Check how many point group symmetries do we have
n_syms = 0
for i, sym in enumerate(symmetries):
# Extract the rotation and the fractional translation
rot = sym[:,:3]
# Check if the rotation is equal to the first one
if np.sum( (rot - symmetries[0][:,:3])**2 ) < 0.1 and n_syms == 0 and i > 0:
# We got all the rotations
n_syms = i
break
# Extract the point group
if n_syms == 0:
self.QE_s[:,:, i] = rot.T
# Get the IRT (Atoms mapping using symmetries)
irt = GetIRT(self.structure, sym)
self.QE_irt[i, :] = irt + 1 #Py to Fort
if n_syms == 0:
n_syms = len(symmetries)
# From the point group symmetries, get the supercell
n_supercell = len(symmetries) // n_syms
self.QE_translation_nr = n_supercell
self.QE_nsymq = n_syms
self.QE_nsym = n_syms
self.QE_translations_irt = np.zeros( (self.structure.N_atoms, n_supercell), dtype = np.intc, order = "F")
self.QE_translations = np.zeros( (3, n_supercell), dtype = np.double, order = "F")
# Now extract the translations
for i in range(n_supercell):
sym = symmetries[i * n_syms]
# Check if the symmetries are correctly setup
I = np.eye(3)
ERROR_MSG="""
Error, symmetries are not correctly ordered.
They must always start with the identity.
N_syms = {}; N = {}; SYM = {}
""".format(n_syms,i*n_syms, sym)
assert np.sum( (I - sym[:,:3])**2) < 0.5, ERROR_MSG
# Get the irt for the translation (and the translation)
irt = GetIRT(self.structure, sym)
self.QE_translations_irt[:, i] = irt + 1
self.QE_translations[:, i] = sym[:,3]
# For each symmetry operation, assign the inverse
self.QE_invs[:] = get_invs(self.QE_s, self.QE_nsym)
|
[
"def ApplyTranslationsToSupercell(fc_matrix, super_cell_structure, supercell):\n\n natsc = super_cell_structure.N_atoms\n\n # Check the consistency of the passed options\n natsc3, _ = np.shape(fc_matrix)\n assert natsc == int(natsc3 / 3), \"Error, wrong number of atoms in the supercell structure\"\n assert natsc3 == _, \"Error, the matrix passed has a wrong shape\"\n assert natsc % np.prod(supercell) == 0, \"Error, the given supercell is impossible with the number of atoms\"\n\n # Fill the auxiliary matrix\n new_v2 = np.zeros( (3,3, natsc, natsc), dtype = np.double, order =\"F\")\n for i in range(natsc):\n for j in range(natsc):\n new_v2[:, :, i, j] = fc_matrix[3*i : 3*(i+1), 3*j : 3*(j+1)]\n\n\n # The number of translations\n n_trans = np.prod(supercell)\n trans_irt = np.zeros((natsc, n_trans), dtype = np.double, order = \"F\")\n\n # Setup the translational symmetries\n for nx in range(supercell[0]):\n for ny in range(supercell[1]):\n for nz in range(supercell[2]):\n # Build the translational symmetry\n symmat = np.zeros((3,4))\n symmat[:3,:3] = np.eye(3)\n symmat[:, 3] = np.array([nx, ny, nz], dtype = float) / np.array(supercell)\n\n\n nindex = supercell[2] * supercell[1] *nx \n nindex += supercell[2] * ny \n nindex += nz \n\n # Get the IRT for this symmetry operation in the supercell\n trans_irt[:, nindex] = GetIRT(super_cell_structure, symmat) + 1 \n \n\n \n \n # Apply the translations\n symph.trans_v2(new_v2, trans_irt)\n\n # Return back to the fc_matrix\n for i in range(natsc):\n for j in range(natsc):\n fc_matrix[3*i : 3*(i+1), 3*j : 3*(j+1)] = new_v2[:, :, i, j]",
"def GetSymmetriesFromSPGLIB(spglib_sym, regolarize = False):\n \n # Check if the type is correct\n if not \"translations\" in spglib_sym:\n raise ValueError(\"Error, your symmetry dict has no 'translations' key.\")\n \n if not \"rotations\" in spglib_sym:\n raise ValueError(\"Error, your symmetry dict has no 'rotations' key.\")\n \n # Get the number of symmetries\n out_sym = []\n n_sym = np.shape(spglib_sym[\"translations\"])[0]\n \n translations = spglib_sym[\"translations\"]\n rotations = spglib_sym[\"rotations\"]\n \n for i in range(n_sym):\n # Create the symmetry\n sym = np.zeros((3,4))\n sym[:,:3] = rotations[i, :, :]\n sym[:, 3] = translations[i,:]\n \n # Edit the translation\n if regolarize:\n sym[:, 3] *= 2\n sym[:, 3] = np.floor(sym[:, 3] + .5)\n sym[:, 3] *= .5\n sym[:, 3] = sym[:,3] % 1\n \n out_sym.append(sym)\n \n return out_sym",
"def SSpcGroup(SGData,SSymbol):\n \n def fixMonoOrtho():\n mod = ''.join(modsym).replace('1/2','0').replace('1','0')\n if SGData['SGPtGrp'] in ['2','m']: #OK\n if mod in ['a00','0b0','00g']:\n result = [i*-1 for i in SGData['SSGKl']]\n else:\n result = SGData['SSGKl'][:]\n if '/' in mod:\n return [i*-1 for i in result]\n else:\n return result\n elif SGData['SGPtGrp'] == '2/m': #OK\n if mod in ['a00','0b0','00g']:\n result = SGData['SSGKl'][:]\n else:\n result = [i*-1 for i in SGData['SSGKl']]\n if '/' in mod:\n return [i*-1 for i in result]\n else:\n return result\n else: #orthorhombic\n return [-SSGKl[i] if mod[i] in ['a','b','g'] else SSGKl[i] for i in range(3)]\n \n def extendSSGOps(SSGOps):\n for OpA in SSGOps:\n OpAtxt = SSMT2text(OpA)\n if 't' not in OpAtxt:\n continue\n for OpB in SSGOps:\n OpBtxt = SSMT2text(OpB)\n if 't' not in OpBtxt:\n continue\n OpC = list(SGProd(OpB,OpA))\n OpC[1] %= 1.\n OpCtxt = SSMT2text(OpC)\n# print OpAtxt.replace(' ','')+' * '+OpBtxt.replace(' ','')+' = '+OpCtxt.replace(' ','')\n for k,OpD in enumerate(SSGOps):\n OpDtxt = SSMT2text(OpD)\n OpDtxt2 = ''\n if SGData['SGGray']: \n OpDtxt2 = SSMT2text([OpD[0],OpD[1]+np.array([0.,0.,0.,.5])])\n# print ' ('+OpCtxt.replace(' ','')+' = ? '+OpDtxt.replace(' ','')+')'\n if OpCtxt == OpDtxt:\n continue\n elif OpCtxt == OpDtxt2:\n continue\n elif OpCtxt.split(',')[:3] == OpDtxt.split(',')[:3]:\n if 't' not in OpDtxt:\n SSGOps[k] = OpC\n# print k,' new:',OpCtxt.replace(' ','')\n break\n else:\n OpCtxt = OpCtxt.replace(' ','')\n OpDtxt = OpDtxt.replace(' ','')\n Txt = OpCtxt+' conflicts with '+OpDtxt\n# print (Txt)\n return False,Txt\n return True,SSGOps\n \n def findMod(modSym):\n for a in ['a','b','g']:\n if a in modSym:\n return a\n \n def genSSGOps():\n SSGOps = SSGData['SSGOps'][:]\n iFrac = {}\n for i,frac in enumerate(SSGData['modSymb']):\n if frac in ['1/2','1/3','1/4','1/6','1']:\n iFrac[i] = frac+'.'\n# print SGData['SpGrp']+SSymbol\n# print 'SSGKl',SSGKl,'genQ',genQ,'iFrac',iFrac,'modSymb',SSGData['modSymb']\n# set identity & 1,-1; triclinic\n SSGOps[0][0][3,3] = 1.\n## expand if centrosymmetric\n# if SGData['SGInv']:\n# SSGOps += [[-1*M,V] for M,V in SSGOps[:]]\n# monoclinic - all done & all checked\n if SGData['SGPtGrp'] in ['2','m']: #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n for i in iFrac:\n SSGOps[1][0][3,i] = -SSGKl[0]\n elif SGData['SGPtGrp'] == '2/m': #OK\n SSGOps[1][0][3,3] = SSGKl[1]\n if 's' in gensym:\n SSGOps[1][1][3] = 0.5\n for i in iFrac:\n SSGOps[1][0][3,i] = SSGKl[0]\n \n# orthorhombic - all OK not fully checked\n elif SGData['SGPtGrp'] in ['222','mm2','m2m','2mm']: #OK\n if SGData['SGPtGrp'] == '222':\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[1,2],2:[1,3]},'b':{2:[3,2],0:[1,2]}} #OK\n elif SGData['SGPtGrp'] == 'mm2':\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]}} #OK\n elif SGData['SGPtGrp'] == 'm2m':\n OrOps = {'b':{0:[1,2],2:[3,2]},'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]}} #OK\n elif SGData['SGPtGrp'] == '2mm':\n OrOps = {'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]},'g':{0:[1,3],1:[2,3]}} #OK\n a = findMod(SSGData['modSymb'])\n OrFrac = OrOps[a]\n for j in iFrac:\n for i in OrFrac[j]:\n SSGOps[i][0][3,j] = -2.*eval(iFrac[j])*SSGKl[i-1]\n for i in [0,1,2]:\n SSGOps[i+1][0][3,3] = SSGKl[i]\n SSGOps[i+1][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] == 'mmm': #OK\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]}} \n a = findMod(SSGData['modSymb'])\n if a == 'g':\n SSkl = [1,1,1]\n elif a == 'a':\n SSkl = [-1,1,-1]\n else:\n SSkl = [1,-1,-1]\n OrFrac = OrOps[a]\n for j in iFrac:\n for i in OrFrac[j]:\n SSGOps[i][0][3,j] = -2.*eval(iFrac[j])*SSkl[i-1]\n for i in [0,1,2]:\n SSGOps[i+1][0][3,3] = SSkl[i]\n SSGOps[i+1][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps \n# tetragonal - all done & checked\n elif SGData['SGPtGrp'] == '4': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n elif SGData['SGPtGrp'] == '-4': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = 1\n elif SGData['SGPtGrp'] in ['4/m',]: #OK\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -SSGKl[0]\n for i,j in enumerate([1,3]):\n SSGOps[j][0][3,3] = 1\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] in ['422','4mm','-42m','-4m2',]: #OK\n iGens = [1,4,5]\n if SGData['SGPtGrp'] in ['4mm','-4m2',]:\n iGens = [1,6,7]\n for i,j in enumerate(iGens):\n if '1/2' in SSGData['modSymb'] and i < 2:\n SSGOps[j][0][3,1] = SSGKl[i]\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n if 's' in gensym and j == 6:\n SSGOps[j][1][3] = -genQ[i]\n else:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] in ['4/mmm',]:#OK\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -SSGKl[0]\n SSGOps[6][0][3,1] = SSGKl[1]\n if modsym:\n SSGOps[1][1][3] = -genQ[3]\n for i,j in enumerate([1,2,6,7]):\n SSGOps[j][0][3,3] = 1\n SSGOps[j][1][3] = genQ[i]\n E,Result = extendSSGOps(SSGOps)\n if not E:\n return E,Result\n else:\n SSGOps = Result\n \n# trigonal - all done & checked\n elif SGData['SGPtGrp'] == '3': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] == '-3': #OK\n SSGOps[1][0][3,3] = -SSGKl[0]\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] in ['312','3m','-3m','-3m1','3m1']: #OK\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n for i,j in enumerate([1,5]):\n if SGData['SGPtGrp'] in ['3m','-3m']:\n SSGOps[j][0][3,3] = 1\n else: \n SSGOps[j][0][3,3] = SSGKl[i+1]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n elif SGData['SGPtGrp'] in ['321','32']: #OK\n for i,j in enumerate([1,4]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n elif SGData['SGPtGrp'] in ['31m','-31m']: #OK\n ids = [1,3]\n if SGData['SGPtGrp'] == '-31m':\n ids = [1,3]\n if '1/3' in SSGData['modSymb']:\n SSGOps[ids[0]][0][3,1] = -SSGKl[0]\n for i,j in enumerate(ids):\n SSGOps[j][0][3,3] = 1\n if genQ[i+1]:\n SSGOps[j][1][3] = genQ[i+1]\n \n# hexagonal all done & checked\n elif SGData['SGPtGrp'] == '6': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] == '-6': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n elif SGData['SGPtGrp'] in ['6/m',]: #OK\n SSGOps[1][0][3,3] = -SSGKl[1]\n SSGOps[1][1][3] = genQ[0]\n SSGOps[2][1][3] = genQ[1]\n elif SGData['SGPtGrp'] in ['622',]: #OK\n for i,j in enumerate([1,9,8]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = -genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n \n elif SGData['SGPtGrp'] in ['6mm','-62m','-6m2',]: #OK\n for i,j in enumerate([1,6,7]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n elif SGData['SGPtGrp'] in ['6/mmm',]: # OK\n for i,j in enumerate([1,2,10,11]):\n SSGOps[j][0][3,3] = 1\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n elif SGData['SGPtGrp'] in ['1','-1']: #triclinic - done\n return True,SSGOps\n E,SSGOps = extendSSGOps(SSGOps)\n return E,SSGOps\n \n def specialGen(gensym,modsym):\n sym = ''.join(gensym)\n if SGData['SGPtGrp'] in ['2/m',] and 'n' in SGData['SpGrp']:\n if 's' in sym:\n gensym = 'ss'\n if SGData['SGPtGrp'] in ['-62m',] and sym == '00s':\n gensym = '0ss'\n elif SGData['SGPtGrp'] in ['222',]:\n if sym == '00s':\n gensym = '0ss'\n elif sym == '0s0':\n gensym = 'ss0'\n elif sym == 's00':\n gensym = 's0s'\n elif SGData['SGPtGrp'] in ['mmm',]:\n if 'g' in modsym:\n if sym == 's00':\n gensym = 's0s'\n elif sym == '0s0':\n gensym = '0ss'\n elif 'a' in modsym:\n if sym == '0s0':\n gensym = 'ss0'\n elif sym == '00s':\n gensym = 's0s'\n elif 'b' in modsym:\n if sym == '00s':\n gensym = '0ss'\n elif sym == 's00':\n gensym = 'ss0'\n return gensym\n \n Fracs = {'1/2':0.5,'1/3':1./3,'1':1.0,'0':0.,'s':.5,'t':1./3,'q':.25,'h':-1./6,'a':0.,'b':0.,'g':0.}\n if SGData['SGLaue'] in ['m3','m3m']:\n return '(3+1) superlattices not defined for cubic space groups',None\n elif SGData['SGLaue'] in ['3R','3mR']:\n return '(3+1) superlattices not defined for rhombohedral settings - use hexagonal setting',None\n try:\n modsym,gensym = splitSSsym(SSymbol)\n except ValueError:\n return 'Error in superspace symbol '+SSymbol,None\n modQ = [Fracs[mod] for mod in modsym]\n SSGKl = SGData['SSGKl'][:]\n if SGData['SGLaue'] in ['2/m','mmm']:\n SSGKl = fixMonoOrtho()\n Ngen = len(gensym)\n if SGData.get('SGGray',False):\n Ngen -= 1\n if len(gensym) and Ngen != len(SSGKl):\n return 'Wrong number of items in generator symbol '+''.join(gensym),None\n gensym = specialGen(gensym[:Ngen],modsym)\n genQ = [Fracs[mod] for mod in gensym[:Ngen]]\n if not genQ:\n genQ = [0,0,0,0]\n SSgSpc = SGData['SpGrp']+SSymbol\n if SGData['SGGray']:\n SSgSpc = SSgSpc.replace('(',\" 1'(\")\n SSGData = {'SSpGrp':SSgSpc,'modQ':modQ,'modSymb':modsym,'SSGKl':SSGKl}\n SSCen = np.zeros((len(SGData['SGCen']),4))\n for icen,cen in enumerate(SGData['SGCen']):\n SSCen[icen,0:3] = cen\n if 'BNSlattsym' in SGData and '_' in SGData['BNSlattsym'][0]:\n Ncen = len(SGData['SGCen'])\n for icen in range(Ncen//2,Ncen):\n SSCen[icen,3] = 0.5\n SSGData['SSGCen'] = SSCen%1.\n SSGData['SSGOps'] = []\n for iop,op in enumerate(SGData['SGOps']):\n T = np.zeros(4)\n ssop = np.zeros((4,4))\n ssop[:3,:3] = op[0]\n T[:3] = op[1]\n SSGData['SSGOps'].append([ssop,T])\n E,Result = genSSGOps()\n if E:\n SSGData['SSGOps'] = Result\n if DEBUG:\n print ('Super spacegroup operators for '+SSGData['SSpGrp'])\n for Op in Result:\n print (SSMT2text(Op).replace(' ',''))\n if SGData['SGInv']: \n for Op in Result:\n Op = [-Op[0],-Op[1]%1.]\n print (SSMT2text(Op).replace(' ','')) \n return None,SSGData\n else:\n return Result+'\\nOperator conflict - incorrect superspace symbol',None",
"def setup_symbols_for_species_pKs(self, sid_list):\n new_variable_index = 0\n self.variable_vector_dict = {}\n for species_id in sid_list:\n pK_data_val = self.get_pK_val(species_id) \n self.variable_vector_dict[species_id] = [symbols('x[%d]'%new_variable_index), pK_data_val]\n new_variable_index += 1\n #for each species_id, set up the sequence of species that eventually lead to least protonated state, for binding constant calculation\n self.compounds_species_id_sequence = {}\n for species_id in self.compounds_data_dict.keys():\n self.compounds_species_id_sequence[species_id] = self.get_sequence_of_species_ids(species_id)",
"def SpcGroup(SGSymbol):\n LaueSym = ('-1','2/m','mmm','4/m','4/mmm','3R','3mR','3','3m1','31m','6/m','6/mmm','m3','m3m')\n LattSym = ('P','A','B','C','I','F','R')\n UniqSym = ('','','a','b','c','',)\n SysSym = ('triclinic','monoclinic','orthorhombic','tetragonal','rhombohedral','trigonal','hexagonal','cubic')\n SGData = {}\n if len(SGSymbol.split()) < 2:\n return SGErrors(0),SGData\n if ':R' in SGSymbol:\n SGSymbol = SGSymbol.replace(':',' ') #get rid of ':' in R space group symbols from some cif files\n SGData['SGGray'] = False\n if \"1'\" in SGSymbol: #set for incommensurate magnetic\n SGData['SGGray'] = True\n SGSymbol = SGSymbol.replace(\"1'\",'')\n SGSymbol = SGSymbol.split(':')[0] #remove :1/2 setting symbol from some cif files\n if '-2' in SGSymbol: #replace bad but legal symbols with correct equivalents\n SGSymbol = SGSymbol.replace('-2','m')\n if SGSymbol.split()[1] =='3/m':\n SGSymbol = SGSymbol.replace('3/m','-6')\n import pyspg\n SGInfo = pyspg.sgforpy(SGSymbol)\n SGData['SpGrp'] = SGSymbol.strip().lower().capitalize()\n SGData['SGLaue'] = LaueSym[SGInfo[0]-1]\n SGData['SGInv'] = bool(SGInfo[1])\n SGData['SGLatt'] = LattSym[SGInfo[2]-1]\n SGData['SGUniq'] = UniqSym[SGInfo[3]+1]\n SGData['SGFixed'] = False\n SGData['SGOps'] = []\n SGData['SGGen'] = []\n for i in range(SGInfo[5]):\n Mat = np.array(SGInfo[6][i])\n Trns = np.array(SGInfo[7][i])\n SGData['SGOps'].append([Mat,Trns])\n if 'array' in str(type(SGInfo[8])): #patch for old fortran bin?\n SGData['SGGen'].append(int(SGInfo[8][i]))\n SGData['BNSlattsym'] = [LattSym[SGInfo[2]-1],[0,0,0]]\n lattSpin = []\n if SGData['SGLatt'] == 'P':\n SGData['SGCen'] = np.array(([0,0,0],))\n elif SGData['SGLatt'] == 'A':\n SGData['SGCen'] = np.array(([0,0,0],[0,.5,.5]))\n lattSpin += [1,]\n elif SGData['SGLatt'] == 'B':\n SGData['SGCen'] = np.array(([0,0,0],[.5,0,.5]))\n lattSpin += [1,]\n elif SGData['SGLatt'] == 'C':\n SGData['SGCen'] = np.array(([0,0,0],[.5,.5,0,]))\n lattSpin += [1,]\n elif SGData['SGLatt'] == 'I':\n SGData['SGCen'] = np.array(([0,0,0],[.5,.5,.5]))\n lattSpin += [1,]\n elif SGData['SGLatt'] == 'F':\n SGData['SGCen'] = np.array(([0,0,0],[0,.5,.5],[.5,0,.5],[.5,.5,0,]))\n lattSpin += [1,1,1,1]\n elif SGData['SGLatt'] == 'R':\n SGData['SGCen'] = np.array(([0,0,0],[2./3,1./3,1./3],[1./3,2./3,2./3]))\n\n if SGData['SGInv']:\n if SGData['SGLaue'] in ['-1','2/m','mmm']:\n Ibar = 7\n elif SGData['SGLaue'] in ['4/m','4/mmm']:\n Ibar = 1\n elif SGData['SGLaue'] in ['3R','3mR','3','3m1','31m','6/m','6/mmm']:\n Ibar = 15 #8+4+2+1\n else:\n Ibar = 4\n Ibarx = Ibar&14\n else:\n Ibarx = 8\n if SGData['SGLaue'] in ['-1','2/m','mmm','m3','m3m']:\n Ibarx = 0\n moregen = []\n for i,gen in enumerate(SGData['SGGen']):\n if SGData['SGLaue'] in ['m3','m3m']:\n if gen in [1,2,4]:\n SGData['SGGen'][i] = 4\n elif gen < 7:\n SGData['SGGen'][i] = 0\n elif SGData['SGLaue'] in ['4/m','4/mmm','3R','3mR','3','3m1','31m','6/m','6/mmm']:\n if gen == 2:\n SGData['SGGen'][i] = 4\n elif gen in [3,5]:\n SGData['SGGen'][i] = 3\n elif gen == 6:\n if SGData['SGLaue'] in ['4/m','4/mmm']:\n SGData['SGGen'][i] = 128\n else:\n SGData['SGGen'][i] = 16\n elif not SGData['SGInv'] and gen == 12:\n SGData['SGGen'][i] = 8\n elif (not SGData['SGInv']) and (SGData['SGLaue'] in ['3','3m1','31m','6/m','6/mmm']) and (gen == 1):\n SGData['SGGen'][i] = 24\n gen = SGData['SGGen'][i]\n if gen == 99:\n gen = 8\n if SGData['SGLaue'] in ['3m1','31m','6/m','6/mmm']:\n gen = 3\n elif SGData['SGLaue'] == 'm3m':\n gen = 12\n SGData['SGGen'][i] = gen\n elif gen == 98:\n gen = 8\n if SGData['SGLaue'] in ['3m1','31m','6/m','6/mmm']:\n gen = 4\n SGData['SGGen'][i] = gen\n elif not SGData['SGInv'] and gen in [23,] and SGData['SGLaue'] in ['m3','m3m']:\n SGData['SGGen'][i] = 24\n elif gen >= 16 and gen != 128:\n if not SGData['SGInv']:\n gen = 31\n else:\n gen ^= Ibarx \n SGData['SGGen'][i] = gen\n if SGData['SGInv']:\n if gen < 128:\n moregen.append(SGData['SGGen'][i]^Ibar)\n else:\n moregen.append(1)\n SGData['SGGen'] += moregen\n if SGData['SGLaue'] in '-1':\n SGData['SGSys'] = SysSym[0]\n elif SGData['SGLaue'] in '2/m':\n SGData['SGSys'] = SysSym[1]\n elif SGData['SGLaue'] in 'mmm':\n SGData['SGSys'] = SysSym[2]\n elif SGData['SGLaue'] in ['4/m','4/mmm']:\n SGData['SGSys'] = SysSym[3]\n elif SGData['SGLaue'] in ['3R','3mR']:\n SGData['SGSys'] = SysSym[4]\n elif SGData['SGLaue'] in ['3','3m1','31m']:\n SGData['SGSys'] = SysSym[5]\n elif SGData['SGLaue'] in ['6/m','6/mmm']:\n SGData['SGSys'] = SysSym[6]\n elif SGData['SGLaue'] in ['m3','m3m']:\n SGData['SGSys'] = SysSym[7]\n SGData['SGPolax'] = SGpolar(SGData)\n SGData['SGPtGrp'],SGData['SSGKl'] = SGPtGroup(SGData)\n\n if SGData['SGLatt'] == 'R':\n if SGData['SGPtGrp'] in ['3',]:\n SGData['SGSpin'] = 3*[1,]\n elif SGData['SGPtGrp'] in ['-3','32','3m']:\n SGData['SGSpin'] = 4*[1,]\n elif SGData['SGPtGrp'] in ['-3m',]:\n SGData['SGSpin'] = 5*[1,]\n \n else:\n if SGData['SGPtGrp'] in ['1','3','23',]:\n SGData['SGSpin'] = lattSpin+[1,]\n elif SGData['SGPtGrp'] in ['-1','2','m','4','-4','-3','312','321','3m1','31m','6','-6','432','-43m']:\n SGData['SGSpin'] = lattSpin+[1,1,]\n elif SGData['SGPtGrp'] in ['2/m','4/m','422','4mm','-42m','-4m2','-3m1','-31m',\n '6/m','622','6mm','-6m2','-62m','m3','m3m']:\n SGData['SGSpin'] = lattSpin+[1,1,1,]\n else: #'222'-'mmm','4/mmm','6/mmm'\n SGData['SGSpin'] = lattSpin+[1,1,1,1,]\n return SGInfo[-1],SGData",
"def retr_symmetry_generators(struct,ini):\n #hall = struct.spacegroup_hall()\n ini[\"symgen\"] = struct.get_symmetry_generators()\n return ini",
"def __init__(self, structure, threshold = 1e-5):\n \n if not structure.has_unit_cell:\n raise ValueError(\"Error, symmetry operation can be initialize only if the structure has a unit cell\")\n \n self.structure = structure\n self.threshold = np.float64(threshold)\n \n # Setup the threshold \n symph.symm_base.set_accep_threshold(self.threshold)\n \n nat = structure.N_atoms\n \n # Define the quantum espresso symmetry variables in optimized way to work with Fortran90\n self.QE_nat = np.intc( nat )\n self.QE_s = np.zeros( (3, 3, 48) , dtype = np.intc, order = \"F\")\n self.QE_irt = np.zeros( (48, nat), dtype = np.intc, order = \"F\")\n self.QE_invs = np.zeros( (48), dtype = np.intc, order = \"F\")\n self.QE_rtau = np.zeros( (3, 48, nat), dtype = np.float64, order = \"F\")\n self.QE_ft = np.zeros( (3, 48), dtype = np.float64, order = \"F\")\n \n \n self.QE_minus_q = False\n self.QE_irotmq = np.intc(0)\n self.QE_nsymq = np.intc( 0 )\n self.QE_nsym = np.intc(0)\n \n # Prepare the QE structure\n self.QE_tau = np.zeros((3, nat), dtype = np.float64, order = \"F\")\n self.QE_ityp = np.zeros(nat, dtype = np.intc)\n \n symbs = {}\n counter = 1\n for i in range(nat):\n # Rank the atom number\n atm = structure.atoms[i]\n if not atm in symbs.keys():\n symbs[atm] = counter\n counter += 1\n \n self.QE_ityp[i] = symbs[atm]\n # Convert in bohr\n for j in range(3):\n self.QE_tau[j, i] = structure.coords[i, j]\n \n \n self.QE_at = np.zeros( (3,3), dtype = np.float64, order = \"F\")\n self.QE_bg = np.zeros( (3,3), dtype = np.float64, order = \"F\")\n \n bg = structure.get_reciprocal_vectors()\n for i in range(3):\n for j in range(3):\n self.QE_at[i,j] = structure.unit_cell[j,i]\n self.QE_bg[i,j] = bg[j,i] / (2* np.pi) \n\n # Here we define the quantities required to symmetrize the supercells\n self.QE_at_sc = self.QE_at.copy()\n self.QE_bg_sc = self.QE_bg.copy()\n self.QE_translation_nr = 1 # The supercell total dimension (Nx * Ny * Nz)\n self.QE_translations = [] # The translations in crystal axes\n\n # After the translation, which vector is transformed in which one?\n # This info is stored here as ndarray( size = (N_atoms, N_trans), dtype = np.intc, order = \"F\")\n self.QE_translations_irt = []",
"def __init__(self, s: ghidra.program.model.symbol.Symbol, row: int, charOffset: int):\n ...",
"def InitFromSymmetries(self, symmetries, q_point = np.array([0,0,0])):\n \n nsym = len(symmetries)\n \n self.QE_nsymq = np.intc(nsym)\n self.QE_nsym = self.QE_nsymq\n \n \n for i, sym in enumerate(symmetries):\n self.QE_s[:,:, i] = np.transpose(sym[:, :3])\n \n # Get the atoms correspondence\n eq_atoms = GetIRT(self.structure, sym)\n \n self.QE_irt[i, :] = eq_atoms + 1\n \n # Get the inverse symmetry\n inv_sym = np.linalg.inv(sym[:, :3])\n for k, other_sym in enumerate(symmetries):\n if np.sum( (inv_sym - other_sym[:, :3])**2) < __EPSILON__:\n break\n \n self.QE_invs[i] = k + 1\n \n # Setup the position after the symmetry application\n for k in range(self.QE_nat):\n self.QE_rtau[:, i, k] = self.structure.coords[eq_atoms[k], :].astype(np.float64)\n \n \n # Get the reciprocal lattice vectors\n b_vectors = self.structure.get_reciprocal_vectors()\n \n # Get the minus_q operation\n self.QE_minusq = False\n\n # NOTE: HERE THERE COULD BE A BUG\n \n # q != -q\n # Get the q vectors in crystal coordinates\n q = Methods.covariant_coordinates(b_vectors, q_point)\n for k, sym in enumerate(self.QE_s):\n new_q = self.QE_s[:,:, k].dot(q)\n if np.sum( (Methods.put_into_cell(b_vectors, -q_point) - new_q)**2) < __EPSILON__:\n self.QE_minus_q = True\n self.QE_irotmq = k + 1\n break",
"def generate_symbols(self):\n\n logger.debug(f'- Generating symbols for {self.class_name}')\n\n # clear symbols storage\n self.f_list, self.g_list = list(), list()\n self.f_matrix, self.g_matrix = Matrix([]), Matrix([])\n\n # process tex_names defined in model\n # -----------------------------------------------------------\n for key in self.parent.tex_names.keys():\n self.tex_names[key] = Symbol(self.parent.tex_names[key])\n for instance in self.parent.discrete.values():\n for name, tex_name in zip(instance.get_names(), instance.get_tex_names()):\n self.tex_names[name] = tex_name\n # -----------------------------------------------------------\n\n for var in self.cache.all_params_names:\n self.inputs_dict[var] = Symbol(var)\n\n for var in self.cache.all_vars_names:\n tmp = Symbol(var)\n self.vars_dict[var] = tmp\n self.inputs_dict[var] = tmp\n if var in self.cache.vars_int:\n self.vars_int_dict[var] = tmp\n\n # store tex names defined in `self.config`\n for key in self.config.as_dict():\n tmp = Symbol(key)\n self.inputs_dict[key] = tmp\n if key in self.config.tex_names:\n self.tex_names[tmp] = Symbol(self.config.tex_names[key])\n\n # store tex names for pretty printing replacement later\n for var in self.inputs_dict:\n if var in self.parent.__dict__ and self.parent.__dict__[var].tex_name is not None:\n self.tex_names[Symbol(var)] = Symbol(self.parent.__dict__[var].tex_name)\n\n self.inputs_dict['dae_t'] = Symbol('dae_t')\n self.inputs_dict['sys_f'] = Symbol('sys_f')\n self.inputs_dict['sys_mva'] = Symbol('sys_mva')\n\n self.lambdify_func[0]['Indicator'] = lambda x: x\n self.lambdify_func[0]['imag'] = np.imag\n self.lambdify_func[0]['real'] = np.real\n self.lambdify_func[0]['im'] = np.imag\n self.lambdify_func[0]['re'] = np.real\n\n self.vars_list = list(self.vars_dict.values()) # useful for ``.jacobian()``",
"def apply_symmetrisation(self):\n\n # get the values to be symmetrised\n for sym_set in self.molecule.symm_hs.values():\n charges, sigmas, epsilons = [], [], []\n for atom_set in sym_set:\n for atom in atom_set:\n charges.append(float(self.non_bonded_force[atom - 1][0]))\n sigmas.append(float(self.non_bonded_force[atom - 1][1]))\n epsilons.append(float(self.non_bonded_force[atom - 1][2]))\n # calculate the average values to be used in symmetry\n charge, sigma, epsilon = sum(charges) / len(charges), sum(sigmas) / len(sigmas), sum(epsilons) / len(epsilons)\n\n # now loop through the atoms again and store the new values\n for atom in atom_set:\n self.non_bonded_force[atom - 1] = [str(charge), str(sigma), str(epsilon)]",
"def GetQForEachMode(pols_sc, unit_cell_structure, supercell_structure, \\\n supercell_size, crystal = True):\n\n # Check the supercell\n n_cell = np.prod(supercell_size)\n\n nat = unit_cell_structure.N_atoms\n nat_sc = np.shape(pols_sc)[0] / 3\n n_modes = np.shape(pols_sc)[1] \n\n ERR_MSG = \"\"\"\n Error, the supercell {} is not commensurate with the polarization vector given.\n nat = {}, nat_sc = {}\n \"\"\"\n assert n_cell * nat == nat_sc, ERR_MSG.format(supercell_size, nat, nat_sc)\n assert nat_sc == supercell_structure.N_atoms\n\n # Get the reciprocal lattice\n bg = Methods.get_reciprocal_vectors(unit_cell_structure.unit_cell) / (2 * np.pi)\n\n # Get the possible Q list\n q_grid = GetQGrid(unit_cell_structure.unit_cell, supercell_size)\n\n # Allocate the output variable\n q_list = np.zeros( (n_modes, 3), dtype = np.double, order = \"C\")\n\n # Get the correspondance between the unit cell and the super cell atoms\n itau = supercell_structure.get_itau(unit_cell_structure) - 1 #Fort2Py\n\n # Get the translational vectors\n R_vects = np.zeros( (nat_sc, 3), dtype = np.double)\n for i in range(nat_sc):\n R_vects[i, :] = unit_cell_structure.coords[itau[i],:] - supercell_structure.coords[i,:]\n \n R_vects = R_vects.ravel()\n __thr__ = 1e-6\n\n for imu in range(n_modes):\n pol_v = pols_sc[:, imu]\n\n nq = 0\n for q in q_grid:\n q_vec = np.tile(q, nat_sc)\n q_cos = np.cos(2*np.pi * q_vec * R_vects)\n q_cos /= np.sqrt(q_cos.dot(q_cos))\n q_sin = np.sin(2*np.pi * q_vec * R_vects)\n q_sin /= np.sqrt(q_cos.dot(q_cos))\n\n cos_proj = q_cos.dot(pol_v)\n sin_proj = q_sin.dot(pol_v)\n # Wrong, this select only a translational mode\n\n if np.abs(cos_proj**2 + sin_proj**2 -1) < __thr__:\n new_q = q\n if crystal:\n new_q = Methods.covariant_coordinates(bg, q)\n q_list[imu, :] = new_q\n break\n elif cos_proj**2 + sin_proj**2 > __thr__:\n print (q_cos)\n ERROR_MSG = \"\"\"\n Error, mixing between two |q|.\n Please provide polarization vectors that are well defined in |q|.\n This can be reached using the subroutine Phonons.Phonons.DiagonalizeSupercell.\n q = {}\n i_mode = {}\n\n cos_proj = {} | sin_proj = {}\n \"\"\"\n raise ValueError(ERROR_MSG.format(q, imu, cos_proj, sin_proj))\n else:\n nq += 1\n\n \n # If we are here not q has been found\n if nq == len(q_grid):\n ERROR_MSG = \"\"\"\n Error, the polarization vector {} cannot be identified!\n No q found in this supercell!\n \"\"\"\n raise ValueError(ERROR_MSG.format(imu))\n\n\n return q_list",
"def generate_gh(self):\n \n self.ghx_list = []\n self.ghy_list = []\n \n # get the general expression for h before plugging in g.\n self.hx = 0\n self.hy = 0\n \n for i in range(2,self.trunc_gh+1):\n # all x1,x2 are evaluated on limit cycle x=cos(t), y=sin(t)\n p = lib.kProd(i,self.dx)\n d1 = lib.vec(lib.df(self.NIC1,self.x,i))\n d2 = lib.vec(lib.df(self.NIC2,self.x,i))\n \n self.hx += (1/math.factorial(i)) * np.dot(p,d1)\n self.hy += (1/math.factorial(i)) * np.dot(p,d2)\n \n self.hx = sym.Matrix(self.hx)\n self.hy = sym.Matrix(self.hy)\n \n # expand all terms\n self.hx = sym.expand(self.hx.subs([(self.dx1,self.gx),(self.dx2,self.gy)]))\n self.hy = sym.expand(self.hy.subs([(self.dx1,self.gx),(self.dx2,self.gy)]))\n \n # collect all psi terms into list of some kind\n self.tempx = sym.collect(self.hx[0],self.psi,evaluate=False)\n self.tempy = sym.collect(self.hy[0],self.psi,evaluate=False)\n \n counter = 0\n while (counter <= self.trunc_g+1):\n # save current term\n self.ghx_list.append(self.tempx[self.psi**counter])\n self.ghy_list.append(self.tempy[self.psi**counter])\n \n counter += 1\n \n # substitute limit cycle. maybe move elsewhere.\n for i in range(len(self.ghx_list)):\n self.ghx_list[i] = self.ghx_list[i].subs({self.x1:sym.cos(2*sym.pi*self.t),\n self.x2:sym.sin(2*sym.pi*self.t),\n sym.Indexed('gx',0):s(0),\n sym.Indexed('gy',0):s(0)})\n self.ghy_list[i] = self.ghy_list[i].subs({self.x1:sym.cos(2*sym.pi*self.t),\n self.x2:sym.sin(2*sym.pi*self.t),\n sym.Indexed('gx',0):s(0),\n sym.Indexed('gy',0):s(0)})",
"def prepare_symbols(self):",
"def constructSymbolicHubbard2(hdim,vdim,t,U):\n hdim = hdim*2\n nqubits = vdim*hdim\n # first horizontal line of sites\n firstLine = range(1,hdim-1)\n spinDownList = [x for x in firstLine if x % 2 == 1]\n coefficients=[]\n operators=[]\n # Generating the horizontal contributions to the hamiltonian\n for j in range(0,vdim):\n offset = j*hdim\n for i in spinDownList:\n #print(i)\n operators.append([i+offset,-i-2-offset])\n coefficients.append(-t)\n operators.append([i+1+offset,-i-3-offset])\n coefficients.append(-t) \n # periodic boundary conditions\n if hdim > 2:\n operators.append([hdim-1+offset,-1-offset])\n coefficients.append(-t)\n operators.append([hdim+offset,-2-offset])\n coefficients.append(-t) \n #print(\"spinDownList\",spinDownList)\n #print(\"horizontal contributions:\",operators)\n\n # Generating the vertical contributions to the hamiltonian\n # open boundary conditions\n firstLine = range(1,hdim+1)\n spinDownList = [x for x in firstLine if x % 2 == 1]\n #print(\"spinDownList\",spinDownList)\n for j in range(1,vdim):\n offset1 = (j-1)*hdim\n offset2 = j*hdim\n for i in spinDownList:\n #print(i)\n operators.append([i+offset1,-i-offset2])\n coefficients.append(-t)\n operators.append([i+1+offset1,-i-1-offset2])\n coefficients.append(-t) \n #print(\"vertical contributions:\",operators)\n \n # repulsion terms\n allQubits = range(1,nqubits+1)\n spinDownListAll = [x for x in allQubits if x % 2 == 1]\n for i in spinDownListAll:\n operators.append([i,-i,i+1,-i-1])\n coefficients.append(U)\n \n #print(\"repulsion contributions:\",operators)\n return operators, coefficients, nqubits",
"def initialize_volume_symmetry_map(self):\n #@type pg PointGroup\n pg = self.crystal.get_point_group()\n if pg is None:\n print \"ERROR!\"\n return\n\n t1 = time.time()\n\n order = len(pg.table)\n #@type inst Instrument\n inst = self.inst\n\n #Initialize the symmetry map. Last dimension = the ORDER equivalent indices\n n = len(inst.qx_list)\n numpix = n**3\n symm = np.zeros( (numpix, order) , dtype=int)\n\n if self.verbose: print \"Starting volume symmetry calculation. Order is %d. Matrix is %d voxels (%d to a side).\" % (order, n**3, n)\n\n #--- From get_hkl_from_q functions: (moved here for speed) --\n #Get the inverse the B matrix to do the reverse conversion\n B = self.crystal.get_B_matrix()\n invB = np.linalg.inv(B)\n\n #Limit +- in q space\n qlim = inst.qlim\n \n if config.cfg.force_pure_python:\n #----------- Pure Python Version --------------\n\n #Go through each pixel\n q_arr = np.zeros( (3, numpix) )\n for (ix, qx) in enumerate(inst.qx_list):\n for (iy, qy) in enumerate(inst.qx_list):\n for (iz, qz) in enumerate(inst.qx_list):\n i = iz + iy*n + ix*n*n\n #Find the (float) HKL of this voxel at qx,qy,qz.\n q_arr[:, i] = (qx,qy,qz)\n\n #Matrix multiply invB.hkl to get all the HKLs as a column array\n hkl = np.dot(invB, q_arr)\n\n #Now get ORDER equivalent HKLs, as a long list.\n #(as equivalent q)\n q_equiv = np.zeros( (3, numpix, order) )\n for ord in xrange(order):\n #Ok, we go TABLE . hkl to get the equivalent hkl\n #Them, B . hkl gives you the Q vector\n q_equiv[:,:, ord] = np.dot(B, np.dot(pg.table[ord], hkl) )\n\n #Now we need to find the index into the array.\n #Start by finding the x,y,z, indices\n ix = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[0, :, ord])\n iy = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[1, :, ord])\n iz = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[2, :, ord])\n\n #Now put the index into the symmetry matrix\n index = iz + iy*n + ix*n*n\n index[np.isnan(index)] = -1 #Put -1 where a NAN was found\n symm[:, ord] = index\n\n\n else:\n #--------------- Inline C version (about 17x faster than Python) ---------------\n code = \"\"\"\n\n //-- Calculate the hkl array ---\n int ix, iy, iz;\n int eix, eiy, eiz, eindex;\n int index, ord;\n double qx, qy, qz;\n double eqx, eqy, eqz;\n double h, k, l;\n double eh, ek, el;\n for (ix=0; ix<n; ix++)\n {\n qx = ix*qres - qlim;\n for (iy=0; iy<n; iy++)\n {\n qy = iy*qres - qlim;\n for (iz=0; iz<n; iz++)\n {\n qz = iz*qres - qlim;\n index = iz + iy*n + ix*n*n;\n //Ok, now we matrix multiply invB.hkl to get all the HKLs as a column array\n h = qx * INVB2(0,0) + qy * INVB2(0,1) + qz * INVB2(0,2);\n k = qx * INVB2(1,0) + qy * INVB2(1,1) + qz * INVB2(1,2);\n l = qx * INVB2(2,0) + qy * INVB2(2,1) + qz * INVB2(2,2);\n\n //Now go through each equivalency table.\n for (ord=0; ord<order; ord++)\n {\n //Do TABLE.hkl to find a new equivalent hkl\n eh = h * TABLE3(ord, 0,0) + k * TABLE3(ord, 0,1) + l * TABLE3(ord, 0,2);\n ek = h * TABLE3(ord, 1,0) + k * TABLE3(ord, 1,1) + l * TABLE3(ord, 1,2);\n el = h * TABLE3(ord, 2,0) + k * TABLE3(ord, 2,1) + l * TABLE3(ord, 2,2);\n //Now, matrix mult B . equiv_hkl to get the other q vector\n eqx = eh * B2(0,0) + ek * B2(0,1) + el * B2(0,2);\n eqy = eh * B2(1,0) + ek * B2(1,1) + el * B2(1,2);\n eqz = eh * B2(2,0) + ek * B2(2,1) + el * B2(2,2);\n\n //Ok, now you have to find the index into QSPACE\n eix = round( (eqx+qlim)/qres ); if ((eix >= n) || (eix < 0)) eix = -1; \n eiy = round( (eqy+qlim)/qres ); if ((eiy >= n) || (eiy < 0)) eiy = -1;\n eiz = round( (eqz+qlim)/qres ); if ((eiz >= n) || (eiz < 0)) eiz = -1;\n\n if ((eix < 0) || (eiy < 0) || (eiz < 0))\n {\n //One of the indices was out of bounds.\n //Put this marker to mean NO EQUIVALENT\n SYMM2(index, ord) = -1;\n }\n else\n {\n //No problem!, Now I put it in there\n eindex = eiz + eiy*n + eix*n*n;\n //This pixel (index) has this equivalent pixel index (eindex) for this order transform ord.\n SYMM2(index, ord) = eindex;\n }\n\n }\n \n }\n }\n }\n \"\"\"\n qres = inst.q_resolution\n n = len(self.inst.qx_list)\n table = np.array(pg.table) #Turn the list of 3x3 arrays into a Nx3x3 array\n varlist = ['B', 'invB', 'symm', 'qres', 'qlim', 'n', 'order', 'table']\n weave.inline(code, varlist, compiler='gcc', support_code=\"\")\n\n #Done with either version\n self.volume_symmetry = symm\n\n if self.verbose: print \"Volume symmetry map done in %.3f sec.\" % (time.time()-t1)",
"def __init__(self, nbin, lower, upper, mass1, mass2):\n self._gr = root.TGraph()\n for ibin in range(nbin):\n m0 = lower + ibin*(upper - lower)/float(nbin)\n self._gr.SetPoint(ibin, lower + ibin*(upper - lower)/float(nbin), self.__getPHSPFactor(m0, mass1, mass2))",
"def make_super_cell(structure, sc):\n\n supercell = Structure()\n supercell.structure_comment = \"{}x{}x{}\".format(sc[0],sc[1],sc[2])\n\n # set lattice parameter\n supercell.lattice_parameter = structure.lattice_parameter \n\n # set h_matrix\n h = np.zeros(shape=[3,3])\n for i in range(3):\n h[i,:] = structure.h_matrix[i,:] * sc[i]\n supercell.h_matrix = h\n\n # add supercell atoms\n for i in range(sc[0]):\n for j in range(sc[1]):\n for k in range(sc[2]):\n for atom in structure.atoms:\n symbol = atom.symbol\n position = atom.position\n position = [(i+position[0])/sc[0],\\\n (j+position[1])/sc[1],\\\n (k+position[2])/sc[2]]\n supercell.add_atom(symbol,position)\n\n # return a copy of the supercell\n return copy.deepcopy(supercell)",
"def __init__(self,xsym,ax,Lmax,Mmax,lmax,parity='natural',ax2=None,psum=None,Lmin=None,Mmin=None,lsum=None): \n\n # set the defaults\n def default_if_None(val,deflt): \n if val is None: \n return deflt\n else:\n return val\n\n self.xsym=xsym\n self.ax =ax\n self.bx =default_if_None(ax2,ax)\n self.ax.show('radial axis 1')\n self.bx.show('radial axis 2')\n L0=default_if_None(Lmin,Lmax)\n M0=default_if_None(Mmin,Mmax)\n ls=default_if_None(lsum,2*lmax)+1\n ks=default_if_None(psum,self.ax.order()+self.bx.order())\n\n self.gaunt=GauntCoeffTable(2*lmax)\n self.bang=[]\n self.len=-1\n block_i0=0\n count=0\n for L in range(L0,Lmax+1):\n for M in range(M0,Mmax+1):\n for l1 in range(lmax+1):\n for l2 in range(lmax+1):\n if l1+l2>ls: continue\n if parity=='natural' and (L+l1+l2)%2==1: continue\n if parity=='unnatural' and (L+l1+l2)%2==0: continue\n if xsym!=0 and l1<l2: continue # skip exchange symmetric angular part\n self.bang.append(BasTwoAngle(L,M,l1,l2))\n ba=self.bang[-1]\n\n # generate product basis\n for e1 in self.ax.e:\n for e2 in self.bx.e:\n ba.brad.append(BasTwoRadial(e1.centrifugal(l1),e2.centrifugal(l2)))\n br=ba.brad[-1]\n for k1 in range(e1.n):\n for k2 in range(e2.n):\n count+=1\n br.k1.append(k1)\n br.k2.append(k2)\n itotal=block_i0+e1.i0+k1+self.ax.len()*(e2.i0+k2)\n# print 'block',L,M,l1,l2,itotal,block_i0\n self.len=max(self.len,itotal+1)\n br.i.append(itotal)\n block_i0=block_i0+self.ax.len()*self.bx.len() \n print 'total',self.len"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This subroutine applies the translations to the given vector. To be used only if the structure is a supercell structure and the symmetries have been initialized with SPGLIB
|
def ApplyTranslationsToVector(self, vector):
nat = self.QE_nat
assert vector.shape[0] == nat
assert vector.shape[1] == 3
# Ignore if no translations are presents
if self.QE_translation_nr <= 1:
return
sum_all = np.zeros((nat, 3), dtype = type(vector[0,0]))
for i in range(self.QE_translation_nr):
n_supercell = np.shape(self.QE_translations_irt)[1]
sum_all += vector[self.QE_translations_irt[:, i] - 1, :]
sum_all /= self.QE_translation_nr
vector[:,:] = sum_all
|
[
"def translate(self, vector):\n for atom in self.atoms:\n atom.translate(vector)",
"def translate(self, vector):\n seg2 = [ x.translated(vector) for x in self.asSegments()]\n self.activeRepresentation = SegmentRepresentation(self, seg2)\n return self",
"def translate(self, vector):\n self.position = self.position + np.array(vector)",
"def ApplySymmetriesToVector(symmetries, vector, unit_cell, irts):\n \n # Get the vector in crystalline coordinate\n nat, dumb = np.shape(vector)\n n_sym = len(symmetries)\n\n assert n_sym == len(irts)\n\n work = np.zeros( (n_sym, nat, 3), dtype = np.double, order = \"C\")\n \n # Pass to crystalline coordinates\n v1 = Methods.covariant_coordinates(unit_cell, vector)\n \n # Apply the symmetry\n for j, symmetry in enumerate(symmetries):\n sym = symmetry[:, :3]\n w1 = sym.dot(v1.T).T\n\n # Return in cartesian coordinates\n work[j, irts[j][:], :] = w1.dot(unit_cell)# unit_cell.T.dot(w1) #np.einsum(\"ab,a\", unit_cell, w1)\n \n return work",
"def vector_trans(self, v, T, V0):\n v = np.array(v)\n return np.add(v[0:2].dot(T),V0)",
"def ApplySymmetryToVector(symmetry, vector, unit_cell, irt):\n \n # Get the vector in crystalline coordinate\n nat, dumb = np.shape(vector)\n work = np.zeros( (nat, 3))\n sym = symmetry[:, :3]\n\n v1 = Methods.covariant_coordinates(unit_cell, vector)\n w1 = sym.dot(v1.T).T\n\n # Return in cartesian coordinates\n work[irt[:], :] = w1.dot(unit_cell)# unit_cell.T.dot(w1) #np.einsum(\"ab,a\", unit_cell, w1)\n\n return work",
"def ApplyTranslationsToSupercell(fc_matrix, super_cell_structure, supercell):\n\n natsc = super_cell_structure.N_atoms\n\n # Check the consistency of the passed options\n natsc3, _ = np.shape(fc_matrix)\n assert natsc == int(natsc3 / 3), \"Error, wrong number of atoms in the supercell structure\"\n assert natsc3 == _, \"Error, the matrix passed has a wrong shape\"\n assert natsc % np.prod(supercell) == 0, \"Error, the given supercell is impossible with the number of atoms\"\n\n # Fill the auxiliary matrix\n new_v2 = np.zeros( (3,3, natsc, natsc), dtype = np.double, order =\"F\")\n for i in range(natsc):\n for j in range(natsc):\n new_v2[:, :, i, j] = fc_matrix[3*i : 3*(i+1), 3*j : 3*(j+1)]\n\n\n # The number of translations\n n_trans = np.prod(supercell)\n trans_irt = np.zeros((natsc, n_trans), dtype = np.double, order = \"F\")\n\n # Setup the translational symmetries\n for nx in range(supercell[0]):\n for ny in range(supercell[1]):\n for nz in range(supercell[2]):\n # Build the translational symmetry\n symmat = np.zeros((3,4))\n symmat[:3,:3] = np.eye(3)\n symmat[:, 3] = np.array([nx, ny, nz], dtype = float) / np.array(supercell)\n\n\n nindex = supercell[2] * supercell[1] *nx \n nindex += supercell[2] * ny \n nindex += nz \n\n # Get the IRT for this symmetry operation in the supercell\n trans_irt[:, nindex] = GetIRT(super_cell_structure, symmat) + 1 \n \n\n \n \n # Apply the translations\n symph.trans_v2(new_v2, trans_irt)\n\n # Return back to the fc_matrix\n for i in range(natsc):\n for j in range(natsc):\n fc_matrix[3*i : 3*(i+1), 3*j : 3*(j+1)] = new_v2[:, :, i, j]",
"def TranslateVtu(vtu, translation):\n \n # Translate the locations\n locations = vtu.GetLocations()\n newLocations = vtk.vtkPoints()\n for location in locations:\n newLocations.InsertNextPoint([comp + translation[i] for i, comp in enumerate(location)])\n vtu.ugrid.SetPoints(newLocations)\n \n return",
"def shift(self, translation_vector):\n self.domain.center += tuple(np.array(translation_vector))",
"def fractional_translation_vec(self, v: ArrayLike):\n v = np.array(v)\n if len(v) == 2:\n c = self.lattice.get_fractional_coords(self.translation_vec)[2]\n v = np.append(v, 0)\n else:\n c = v[2]\n v[2] = 0\n v = self.lattice.get_cartesian_coords(v)\n self.grain_1.translation_vec = v\n self._translation_vec = [*v[:2], c]",
"def v_translate(f, shift):\n\n return GenericTranslation(f, 'vel', shift)",
"def translate(coords_to_translate, translation_vector, active=False):\n translation_dims = np.shape(coords_to_translate)\n\n for _ in translation_dims[1::]:\n translation_vector = np.expand_dims(translation_vector, axis=-1)\n\n if active:\n translation_vector = -translation_vector\n\n return coords_to_translate - translation_vector",
"def translate(self, vec: \"Point\") -> \"Point\":\n return self + vec",
"def _transform(self, vector, word):\n for w in word:\n vector = np.dot(vector, self._reflections[w])\n return vector",
"def translate(self, vector):\n return Point.from_xyz(self.X() + vector.X(), self.Y() + vector.Y(), self.Z() + vector.Z())",
"def translateBy(state: 'SoState', translation: 'SbVec3f') -> \"void\":\n return _coin.SoLocalBBoxMatrixElement_translateBy(state, translation)",
"def SymmetrizeVector(self, vector):\n\n # Apply Translations if any\n self.ApplyTranslationsToVector(vector)\n \n # Prepare the real vector\n tmp_vector = np.zeros( (3, self.QE_nat), dtype = np.float64, order = \"F\")\n \n for i in range(self.QE_nat):\n tmp_vector[0, i] = vector[i,0]\n tmp_vector[1, i] = vector[i,1]\n tmp_vector[2,i] = vector[i,2]\n \n symph.symvector(self.QE_nsymq, self.QE_irt, self.QE_s, self.QE_at, self.QE_bg,\n tmp_vector, self.QE_nat)\n \n \n for i in range(self.QE_nat):\n vector[i, :] = tmp_vector[:,i]",
"def xv_translate(f, x_shift, v_shift):\n\n return translate(v_translate(f, v_shift),\n x_shift)",
"def point_translate(point_in, vector_in):\n try:\n if point_in is None or len(point_in) == 0 or vector_in is None or len(vector_in) == 0:\n raise ValueError(\"Input arguments cannot be empty\")\n except TypeError as e:\n print(\"An error occurred: {}\".format(e.args[-1]))\n raise TypeError(\"Input must be a list or tuple\")\n except Exception:\n raise\n\n # Translate the point using the input vector\n point_out = [coord + comp for coord, comp in zip(point_in, vector_in)]\n\n return point_out"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function initialize the QE symmetries from the symmetries expressed in the Cellconstructor format, i.e. a list of numpy array 3x4 where the last column is the fractional translation.
|
def InitFromSymmetries(self, symmetries, q_point = np.array([0,0,0])):
nsym = len(symmetries)
self.QE_nsymq = np.intc(nsym)
self.QE_nsym = self.QE_nsymq
for i, sym in enumerate(symmetries):
self.QE_s[:,:, i] = np.transpose(sym[:, :3])
# Get the atoms correspondence
eq_atoms = GetIRT(self.structure, sym)
self.QE_irt[i, :] = eq_atoms + 1
# Get the inverse symmetry
inv_sym = np.linalg.inv(sym[:, :3])
for k, other_sym in enumerate(symmetries):
if np.sum( (inv_sym - other_sym[:, :3])**2) < __EPSILON__:
break
self.QE_invs[i] = k + 1
# Setup the position after the symmetry application
for k in range(self.QE_nat):
self.QE_rtau[:, i, k] = self.structure.coords[eq_atoms[k], :].astype(np.float64)
# Get the reciprocal lattice vectors
b_vectors = self.structure.get_reciprocal_vectors()
# Get the minus_q operation
self.QE_minusq = False
# NOTE: HERE THERE COULD BE A BUG
# q != -q
# Get the q vectors in crystal coordinates
q = Methods.covariant_coordinates(b_vectors, q_point)
for k, sym in enumerate(self.QE_s):
new_q = self.QE_s[:,:, k].dot(q)
if np.sum( (Methods.put_into_cell(b_vectors, -q_point) - new_q)**2) < __EPSILON__:
self.QE_minus_q = True
self.QE_irotmq = k + 1
break
|
[
"def generate_init_eqn(self):\n\n self.init_asn = OrderedDict() # assignment-type initialization\n self.init_itn = OrderedDict() # iterative initialization\n self.init_itn_vars = OrderedDict() # variables corr. to iterative vars\n self.init_jac = OrderedDict()\n\n for item in self.init_seq:\n if isinstance(item, str):\n instance = self.parent.__dict__[item]\n if instance.v_str is not None:\n self.init_asn[item] = self.v_str_syms[item]\n if instance.v_iter is not None:\n self.init_itn[item] = self.v_iter_syms[item]\n\n elif isinstance(item, list):\n name_concat = '_'.join(item)\n eqn_set = Matrix([self.v_iter_syms[name] for name in item])\n self.init_itn[name_concat] = eqn_set\n self.init_itn_vars[name_concat] = item\n for vv in item:\n instance = self.parent.__dict__[vv]\n if instance.v_str is not None:\n self.init_asn[vv] = self.v_str_syms[vv]\n\n for name, expr in self.init_itn.items():\n vars_iter = OrderedDict()\n for item in self.init_itn_vars[name]:\n vars_iter[item] = self.vars_dict[item]\n\n self.init_jac[name] = expr.jacobian(list(vars_iter.values()))",
"def __init__(self, structure, threshold = 1e-5):\n \n if not structure.has_unit_cell:\n raise ValueError(\"Error, symmetry operation can be initialize only if the structure has a unit cell\")\n \n self.structure = structure\n self.threshold = np.float64(threshold)\n \n # Setup the threshold \n symph.symm_base.set_accep_threshold(self.threshold)\n \n nat = structure.N_atoms\n \n # Define the quantum espresso symmetry variables in optimized way to work with Fortran90\n self.QE_nat = np.intc( nat )\n self.QE_s = np.zeros( (3, 3, 48) , dtype = np.intc, order = \"F\")\n self.QE_irt = np.zeros( (48, nat), dtype = np.intc, order = \"F\")\n self.QE_invs = np.zeros( (48), dtype = np.intc, order = \"F\")\n self.QE_rtau = np.zeros( (3, 48, nat), dtype = np.float64, order = \"F\")\n self.QE_ft = np.zeros( (3, 48), dtype = np.float64, order = \"F\")\n \n \n self.QE_minus_q = False\n self.QE_irotmq = np.intc(0)\n self.QE_nsymq = np.intc( 0 )\n self.QE_nsym = np.intc(0)\n \n # Prepare the QE structure\n self.QE_tau = np.zeros((3, nat), dtype = np.float64, order = \"F\")\n self.QE_ityp = np.zeros(nat, dtype = np.intc)\n \n symbs = {}\n counter = 1\n for i in range(nat):\n # Rank the atom number\n atm = structure.atoms[i]\n if not atm in symbs.keys():\n symbs[atm] = counter\n counter += 1\n \n self.QE_ityp[i] = symbs[atm]\n # Convert in bohr\n for j in range(3):\n self.QE_tau[j, i] = structure.coords[i, j]\n \n \n self.QE_at = np.zeros( (3,3), dtype = np.float64, order = \"F\")\n self.QE_bg = np.zeros( (3,3), dtype = np.float64, order = \"F\")\n \n bg = structure.get_reciprocal_vectors()\n for i in range(3):\n for j in range(3):\n self.QE_at[i,j] = structure.unit_cell[j,i]\n self.QE_bg[i,j] = bg[j,i] / (2* np.pi) \n\n # Here we define the quantities required to symmetrize the supercells\n self.QE_at_sc = self.QE_at.copy()\n self.QE_bg_sc = self.QE_bg.copy()\n self.QE_translation_nr = 1 # The supercell total dimension (Nx * Ny * Nz)\n self.QE_translations = [] # The translations in crystal axes\n\n # After the translation, which vector is transformed in which one?\n # This info is stored here as ndarray( size = (N_atoms, N_trans), dtype = np.intc, order = \"F\")\n self.QE_translations_irt = []",
"def SetupFromSPGLIB(self):\n if not __SPGLIB__:\n raise ImportError(\"Error, this function works only if spglib is available\")\n\n # Get the symmetries\n spg_syms = spglib.get_symmetry(self.structure.get_ase_atoms(), symprec = self.threshold)\n symmetries = GetSymmetriesFromSPGLIB(spg_syms, regolarize= False)\n\n trans_irt = 0\n self.QE_s[:,:,:] = 0\n\n\n # Check how many point group symmetries do we have\n n_syms = 0\n for i, sym in enumerate(symmetries):\n # Extract the rotation and the fractional translation\n rot = sym[:,:3]\n\n # Check if the rotation is equal to the first one\n if np.sum( (rot - symmetries[0][:,:3])**2 ) < 0.1 and n_syms == 0 and i > 0:\n # We got all the rotations\n n_syms = i \n break\n \n # Extract the point group\n if n_syms == 0:\n self.QE_s[:,:, i] = rot.T\n\n # Get the IRT (Atoms mapping using symmetries)\n irt = GetIRT(self.structure, sym)\n self.QE_irt[i, :] = irt + 1 #Py to Fort\n\n \n if n_syms == 0:\n n_syms = len(symmetries)\n \n # From the point group symmetries, get the supercell\n n_supercell = len(symmetries) // n_syms\n self.QE_translation_nr = n_supercell\n self.QE_nsymq = n_syms\n self.QE_nsym = n_syms\n\n self.QE_translations_irt = np.zeros( (self.structure.N_atoms, n_supercell), dtype = np.intc, order = \"F\")\n self.QE_translations = np.zeros( (3, n_supercell), dtype = np.double, order = \"F\")\n\n # Now extract the translations\n for i in range(n_supercell):\n sym = symmetries[i * n_syms]\n # Check if the symmetries are correctly setup\n\n I = np.eye(3)\n ERROR_MSG=\"\"\"\n Error, symmetries are not correctly ordered.\n They must always start with the identity.\n\n N_syms = {}; N = {}; SYM = {}\n \"\"\".format(n_syms,i*n_syms, sym)\n assert np.sum( (I - sym[:,:3])**2) < 0.5, ERROR_MSG\n\n # Get the irt for the translation (and the translation)\n irt = GetIRT(self.structure, sym)\n self.QE_translations_irt[:, i] = irt + 1\n self.QE_translations[:, i] = sym[:,3]\n\n # For each symmetry operation, assign the inverse\n self.QE_invs[:] = get_invs(self.QE_s, self.QE_nsym)",
"def initialize_quaternions(self):\n # initialize as s = (0,0,0,0) and r = (0,0,0,1)\n N, M = self.M.num_reactants, self.M.num_products\n # only use three elements and enforce constraints in the fourth\n q = np.zeros((N-1, 2, 3))\n #q[:,1,3] = 1\n return q",
"def init_Q(self):\n self.Q = np.matrix(np.tril(self.A))",
"def _tushare_init_symbols(self):\r\n\t\t\"\"\"\r\n\t\troot_dir = 'config'\r\n\t\tif not os.path.exists(root_dir):\r\n\t\t\tos.makedirs(root_dir)\r\n\t\tPATH_ALLSYMBOLS_EQUD = self._PATH_ALLSYMBOLS_EQUD_TS\r\n\r\n\t\tif not os.path.isfile(PATH_ALLSYMBOLS_EQUD):\r\n\t\t\tdf = ts.get_today_all()\r\n\t\t\tdf[['code','name']].to_json(PATH_ALLSYMBOLS_EQUD)\r\n\t\telse:\r\n\t\t\tdf = pd.read_json(PATH_ALLSYMBOLS_EQUD)\r\n\r\n\t\tlist_symbols = list(df['code'])\r\n\t\tlist_symbols = [(6-len(str(s)))*'0'+str(s) for s in list_symbols]\r\n\t\tself._tushare_list_EquSymbols = list_symbols\r\n\t\t\"\"\"\r\n\t\tpass",
"def _setup_Q(self):\n self.Q_s = [None for _ in range(self.p+1)]\n self.Q_s[self.p] = np.eye(self.layers[self.p-1])\n for i in range(self.p-1, -1, -1):\n self.Q_s[i] = np.dot(self.U_s[i], self.Q_s[i+1])",
"def setup_symbols_for_species_pKs(self, sid_list):\n new_variable_index = 0\n self.variable_vector_dict = {}\n for species_id in sid_list:\n pK_data_val = self.get_pK_val(species_id) \n self.variable_vector_dict[species_id] = [symbols('x[%d]'%new_variable_index), pK_data_val]\n new_variable_index += 1\n #for each species_id, set up the sequence of species that eventually lead to least protonated state, for binding constant calculation\n self.compounds_species_id_sequence = {}\n for species_id in self.compounds_data_dict.keys():\n self.compounds_species_id_sequence[species_id] = self.get_sequence_of_species_ids(species_id)",
"def init_Q(self):\n self.Q = np.matrix(np.diagflat(np.diag(self.A)))",
"def __init__(self):\n self.alpha_list = []\n self.key_nums = ['11', '12', '13', '14', '15', '21', '22', '23', '24', '25', '31', '32', '33', '34', '35', '41', '42', '43', '44',\n '45', '51', '52', '53', '54', '55', '56']\n\n for a in self.ALPHA:\n self.alpha_list.append(a)\n\n self.enc_grid = {number: letter for letter, number in zip(self.alpha_list, self.key_nums)}\n self.dec_grid = {letter: number for number, letter, in zip(self.key_nums, self.alpha_list)}\n self.num_grid = ['0', '1', '2', '3', '4', '5', '6', '7','8', '9']",
"def setUpNQudits(n, d):\n rho = [] #List of density matrices\n for i in range(n):\n char_num = 97 + i \n density_matrix = sympy.Matrix(sympy.MatrixSymbol(chr(char_num), d, d))\n trace = density_matrix.trace()\n rho.append(density_matrix) \n return rho",
"def initialize_volume_symmetry_map(self):\n #@type pg PointGroup\n pg = self.crystal.get_point_group()\n if pg is None:\n print \"ERROR!\"\n return\n\n t1 = time.time()\n\n order = len(pg.table)\n #@type inst Instrument\n inst = self.inst\n\n #Initialize the symmetry map. Last dimension = the ORDER equivalent indices\n n = len(inst.qx_list)\n numpix = n**3\n symm = np.zeros( (numpix, order) , dtype=int)\n\n if self.verbose: print \"Starting volume symmetry calculation. Order is %d. Matrix is %d voxels (%d to a side).\" % (order, n**3, n)\n\n #--- From get_hkl_from_q functions: (moved here for speed) --\n #Get the inverse the B matrix to do the reverse conversion\n B = self.crystal.get_B_matrix()\n invB = np.linalg.inv(B)\n\n #Limit +- in q space\n qlim = inst.qlim\n \n if config.cfg.force_pure_python:\n #----------- Pure Python Version --------------\n\n #Go through each pixel\n q_arr = np.zeros( (3, numpix) )\n for (ix, qx) in enumerate(inst.qx_list):\n for (iy, qy) in enumerate(inst.qx_list):\n for (iz, qz) in enumerate(inst.qx_list):\n i = iz + iy*n + ix*n*n\n #Find the (float) HKL of this voxel at qx,qy,qz.\n q_arr[:, i] = (qx,qy,qz)\n\n #Matrix multiply invB.hkl to get all the HKLs as a column array\n hkl = np.dot(invB, q_arr)\n\n #Now get ORDER equivalent HKLs, as a long list.\n #(as equivalent q)\n q_equiv = np.zeros( (3, numpix, order) )\n for ord in xrange(order):\n #Ok, we go TABLE . hkl to get the equivalent hkl\n #Them, B . hkl gives you the Q vector\n q_equiv[:,:, ord] = np.dot(B, np.dot(pg.table[ord], hkl) )\n\n #Now we need to find the index into the array.\n #Start by finding the x,y,z, indices\n ix = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[0, :, ord])\n iy = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[1, :, ord])\n iz = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[2, :, ord])\n\n #Now put the index into the symmetry matrix\n index = iz + iy*n + ix*n*n\n index[np.isnan(index)] = -1 #Put -1 where a NAN was found\n symm[:, ord] = index\n\n\n else:\n #--------------- Inline C version (about 17x faster than Python) ---------------\n code = \"\"\"\n\n //-- Calculate the hkl array ---\n int ix, iy, iz;\n int eix, eiy, eiz, eindex;\n int index, ord;\n double qx, qy, qz;\n double eqx, eqy, eqz;\n double h, k, l;\n double eh, ek, el;\n for (ix=0; ix<n; ix++)\n {\n qx = ix*qres - qlim;\n for (iy=0; iy<n; iy++)\n {\n qy = iy*qres - qlim;\n for (iz=0; iz<n; iz++)\n {\n qz = iz*qres - qlim;\n index = iz + iy*n + ix*n*n;\n //Ok, now we matrix multiply invB.hkl to get all the HKLs as a column array\n h = qx * INVB2(0,0) + qy * INVB2(0,1) + qz * INVB2(0,2);\n k = qx * INVB2(1,0) + qy * INVB2(1,1) + qz * INVB2(1,2);\n l = qx * INVB2(2,0) + qy * INVB2(2,1) + qz * INVB2(2,2);\n\n //Now go through each equivalency table.\n for (ord=0; ord<order; ord++)\n {\n //Do TABLE.hkl to find a new equivalent hkl\n eh = h * TABLE3(ord, 0,0) + k * TABLE3(ord, 0,1) + l * TABLE3(ord, 0,2);\n ek = h * TABLE3(ord, 1,0) + k * TABLE3(ord, 1,1) + l * TABLE3(ord, 1,2);\n el = h * TABLE3(ord, 2,0) + k * TABLE3(ord, 2,1) + l * TABLE3(ord, 2,2);\n //Now, matrix mult B . equiv_hkl to get the other q vector\n eqx = eh * B2(0,0) + ek * B2(0,1) + el * B2(0,2);\n eqy = eh * B2(1,0) + ek * B2(1,1) + el * B2(1,2);\n eqz = eh * B2(2,0) + ek * B2(2,1) + el * B2(2,2);\n\n //Ok, now you have to find the index into QSPACE\n eix = round( (eqx+qlim)/qres ); if ((eix >= n) || (eix < 0)) eix = -1; \n eiy = round( (eqy+qlim)/qres ); if ((eiy >= n) || (eiy < 0)) eiy = -1;\n eiz = round( (eqz+qlim)/qres ); if ((eiz >= n) || (eiz < 0)) eiz = -1;\n\n if ((eix < 0) || (eiy < 0) || (eiz < 0))\n {\n //One of the indices was out of bounds.\n //Put this marker to mean NO EQUIVALENT\n SYMM2(index, ord) = -1;\n }\n else\n {\n //No problem!, Now I put it in there\n eindex = eiz + eiy*n + eix*n*n;\n //This pixel (index) has this equivalent pixel index (eindex) for this order transform ord.\n SYMM2(index, ord) = eindex;\n }\n\n }\n \n }\n }\n }\n \"\"\"\n qres = inst.q_resolution\n n = len(self.inst.qx_list)\n table = np.array(pg.table) #Turn the list of 3x3 arrays into a Nx3x3 array\n varlist = ['B', 'invB', 'symm', 'qres', 'qlim', 'n', 'order', 'table']\n weave.inline(code, varlist, compiler='gcc', support_code=\"\")\n\n #Done with either version\n self.volume_symmetry = symm\n\n if self.verbose: print \"Volume symmetry map done in %.3f sec.\" % (time.time()-t1)",
"def four_finite_jacobi_quartics(self):\n \"\"\" to this point\"\"\"\n X,Y,Z = self.X, self.Y, self.Z\n if X==0 or Y==0:\n yield JacobiQuartic(0,1,check=False)\n yield JacobiQuartic(1,2*magic*i, check=False)\n yield JacobiQuartic(-1,2*magic*i, check=False)\n return\n\n gamma = fe_inv(fe_sqrt( pow(Y,4,q) * pow(X,2,q) \\\n * (pow(Z,2,q)-pow(Y,2,q))))\n\n den = gamma*pow(Y,2,q)\n s_X_inv = ( den * (Z-Y) ) % q\n s = (s_X_inv * X) % q\n t = (2*magic*s_X_inv*Z) % q\n sp_Xp_inv = ( den * (Z+Y) ) % q\n sp = (- sp_Xp_inv * X) % q\n tp = (2*magic*sp_Xp_inv*Z) % q\n\n yield JacobiQuartic(s, t, check=False)\n yield JacobiQuartic(sp, tp, check=False)\n\n den = fe_inv(fe_sqrt(1+d)) * (pow(Y,2,q)-pow(Z,2,q)) * gamma\n X,Y,Z = Y,X,(i*Z)%q\n s_X_inv = ( den * (Z-Y) ) % q\n s = (s_X_inv * X) % q\n t = (2*magic*s_X_inv*Z) % q\n sp_Xp_inv = ( den * (Z+Y) ) % q\n sp = (- sp_Xp_inv * X) % q\n tp = (2*magic*sp_Xp_inv*Z) % q\n\n yield JacobiQuartic(s, t, check=False)\n yield JacobiQuartic(sp, tp, check=False)",
"def init_matrix(m, eq_n):\n eq_mtrx = []\n\n # Populate array with arrays of sparse matrices\n for row in range(eq_n):\n mtrx_row = []\n\n for col in range(eq_n):\n # Populate row of matrices\n mtrx_row.append(\n lil_matrix((m, m), dtype=np.cfloat)\n )\n\n # Add row to eq_mtrx\n eq_mtrx.append( mtrx_row )\n\n return eq_mtrx",
"def Generate(self, dyn, qe_sym = None):\n \n # Check if the symmetries must be initialize\n if qe_sym is None:\n qe_sym = CC.symmetries.QE_Symmetry(dyn.structure)\n \n \n # Get the number of irreducible q points from the matrix\n self.nq = dyn.nqirr\n self.nat = dyn.structure.N_atoms\n \n # Initialize the symmetries at q = 0\n qe_sym.SetupQPoint()\n \n # Prepare the wyckoff basis\n tmp_wyck_gen = np.zeros((3 * self.nat, self.nat, 3), dtype = np.float64)\n \n for i in range( 3 * self.nat):\n x = i % 3\n n = i / 3\n tmp_wyck_gen[i, n, x] = 1\n \n # Symmetrize the vector\n qe_sym.SymmetrizeVector(tmp_wyck_gen[i, :, :])\n \n # Apply the gram-schmidt\n new_gen = tmp_wyck_gen.reshape((3 * self.nat, 3 * self.nat)).transpose()\n new_gen = scipy.linalg.orth(new_gen).transpose()\n \n # Get the number of wyckoff coefficients\n self.wyck_ncoeff = new_gen.shape()[0]\n \n # Reshape the array and get the coefficients\n self.wyck_gen = new_gen.reshape((self.wyck_ncoeff, self.nat, 3))\n \n r = np.arange(3 * self.nat)\n \n self.dyn_ncoeff = np.zeros(self.nq, dtype = int)\n self.dyn_gen = []\n \n # Cycle for each irreducible q point of the matrix\n for iq in range(self.nq):\n q = dyn.q_stars[iq][0]\n # Setup the symmetries for this q point\n qe_sym.SetupQPoint(q)\n \n gh = []\n \n for i in range(self.nat * 3):\n for j in range(i, self.nat * 3):\n # Take the generator\n fc = np.zeros((3 * self.nat, 3 * self.nat), dtype = np.complex128)\n fc[i, j] = 1\n \n # Apply the symmetry\n qe_sym.SymmetrizeDynQ(q, fc)\n \n # Check if the generator has already be generated\n is_new = True\n for k in range(i+1):\n mask = fc[k, :] != 0\n first_value = r[mask]\n if len(first_value):\n if k == i:\n if first_value[0] < j:\n is_new = False\n break\n else:\n is_new = False\n break\n \n # If the generator is new\n if is_new:\n qe_sym.ImposeSumRule(fc, \"simple\")\n \n # Check if the sum rule makes this generator desappearing\n if np.sum ((fc != 0).as_type(int)) != 0:\n gh.append(fc / np.sqrt(np.trace(fc.dot(fc))))\n \n dim = len(gh)\n \n # Prepare the gram-shmidt\n gh = np.array(gh, dtype = np.complex128)\n \n gh_new = np.reshape((dim, 9 * self.nat**2)).transpose()\n gh_new = scipy.linalg.orth(gh_new).transpose()\n \n self.dyn_ncoeff = np.shape(gh_new)[0]\n \n self.dyn_gen.append(np.reshape(gh_new, (self.dyn_ncoeff, 3*self.nat, 3*self.nat)))",
"def __init__(self, num_qubits: int):\n self.__n = num_qubits\n self.__N = 2 ** self.n\n self._H = scp.sparse.csr_matrix((self.N, self.N))",
"def GetQForEachMode(pols_sc, unit_cell_structure, supercell_structure, \\\n supercell_size, crystal = True):\n\n # Check the supercell\n n_cell = np.prod(supercell_size)\n\n nat = unit_cell_structure.N_atoms\n nat_sc = np.shape(pols_sc)[0] / 3\n n_modes = np.shape(pols_sc)[1] \n\n ERR_MSG = \"\"\"\n Error, the supercell {} is not commensurate with the polarization vector given.\n nat = {}, nat_sc = {}\n \"\"\"\n assert n_cell * nat == nat_sc, ERR_MSG.format(supercell_size, nat, nat_sc)\n assert nat_sc == supercell_structure.N_atoms\n\n # Get the reciprocal lattice\n bg = Methods.get_reciprocal_vectors(unit_cell_structure.unit_cell) / (2 * np.pi)\n\n # Get the possible Q list\n q_grid = GetQGrid(unit_cell_structure.unit_cell, supercell_size)\n\n # Allocate the output variable\n q_list = np.zeros( (n_modes, 3), dtype = np.double, order = \"C\")\n\n # Get the correspondance between the unit cell and the super cell atoms\n itau = supercell_structure.get_itau(unit_cell_structure) - 1 #Fort2Py\n\n # Get the translational vectors\n R_vects = np.zeros( (nat_sc, 3), dtype = np.double)\n for i in range(nat_sc):\n R_vects[i, :] = unit_cell_structure.coords[itau[i],:] - supercell_structure.coords[i,:]\n \n R_vects = R_vects.ravel()\n __thr__ = 1e-6\n\n for imu in range(n_modes):\n pol_v = pols_sc[:, imu]\n\n nq = 0\n for q in q_grid:\n q_vec = np.tile(q, nat_sc)\n q_cos = np.cos(2*np.pi * q_vec * R_vects)\n q_cos /= np.sqrt(q_cos.dot(q_cos))\n q_sin = np.sin(2*np.pi * q_vec * R_vects)\n q_sin /= np.sqrt(q_cos.dot(q_cos))\n\n cos_proj = q_cos.dot(pol_v)\n sin_proj = q_sin.dot(pol_v)\n # Wrong, this select only a translational mode\n\n if np.abs(cos_proj**2 + sin_proj**2 -1) < __thr__:\n new_q = q\n if crystal:\n new_q = Methods.covariant_coordinates(bg, q)\n q_list[imu, :] = new_q\n break\n elif cos_proj**2 + sin_proj**2 > __thr__:\n print (q_cos)\n ERROR_MSG = \"\"\"\n Error, mixing between two |q|.\n Please provide polarization vectors that are well defined in |q|.\n This can be reached using the subroutine Phonons.Phonons.DiagonalizeSupercell.\n q = {}\n i_mode = {}\n\n cos_proj = {} | sin_proj = {}\n \"\"\"\n raise ValueError(ERROR_MSG.format(q, imu, cos_proj, sin_proj))\n else:\n nq += 1\n\n \n # If we are here not q has been found\n if nq == len(q_grid):\n ERROR_MSG = \"\"\"\n Error, the polarization vector {} cannot be identified!\n No q found in this supercell!\n \"\"\"\n raise ValueError(ERROR_MSG.format(imu))\n\n\n return q_list",
"def __init__(self, s: ghidra.program.model.symbol.Symbol, row: int, charOffset: int):\n ...",
"def basis(self):\n d = self.get_dimension()\n basis = [LieAlgebra(self) for _ in range(d)]\n z = np.zeros(d)\n for ii in range(d):\n z[ii] = 1\n basis[ii].set_vector(z)\n z[ii] = 0\n return basis"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GET SYMMETRIES FROM QE ====================== This method returns the symmetries in the CellConstructor format from the ones elaborated here.
|
def GetSymmetries(self, get_irt=False):
syms = []
for i in range(self.QE_nsym):
s_rot = np.zeros( (3, 4))
s_rot[:, :3] = np.transpose(self.QE_s[:, :, i])
s_rot[:, 3] = self.QE_ft[:, i]
syms.append(s_rot)
if not get_irt:
return syms
return syms, self.QE_irt[:self.QE_nsym, :].copy() - 1
|
[
"def symbols(self):\r\n return [symbolData.symbol for symbolData in self.symbolData]",
"def InitFromSymmetries(self, symmetries, q_point = np.array([0,0,0])):\n \n nsym = len(symmetries)\n \n self.QE_nsymq = np.intc(nsym)\n self.QE_nsym = self.QE_nsymq\n \n \n for i, sym in enumerate(symmetries):\n self.QE_s[:,:, i] = np.transpose(sym[:, :3])\n \n # Get the atoms correspondence\n eq_atoms = GetIRT(self.structure, sym)\n \n self.QE_irt[i, :] = eq_atoms + 1\n \n # Get the inverse symmetry\n inv_sym = np.linalg.inv(sym[:, :3])\n for k, other_sym in enumerate(symmetries):\n if np.sum( (inv_sym - other_sym[:, :3])**2) < __EPSILON__:\n break\n \n self.QE_invs[i] = k + 1\n \n # Setup the position after the symmetry application\n for k in range(self.QE_nat):\n self.QE_rtau[:, i, k] = self.structure.coords[eq_atoms[k], :].astype(np.float64)\n \n \n # Get the reciprocal lattice vectors\n b_vectors = self.structure.get_reciprocal_vectors()\n \n # Get the minus_q operation\n self.QE_minusq = False\n\n # NOTE: HERE THERE COULD BE A BUG\n \n # q != -q\n # Get the q vectors in crystal coordinates\n q = Methods.covariant_coordinates(b_vectors, q_point)\n for k, sym in enumerate(self.QE_s):\n new_q = self.QE_s[:,:, k].dot(q)\n if np.sum( (Methods.put_into_cell(b_vectors, -q_point) - new_q)**2) < __EPSILON__:\n self.QE_minus_q = True\n self.QE_irotmq = k + 1\n break",
"def symbols(self):\n def _iter_symbols(symbol_values):\n # The initial charset doesn't matter, as the start codes have the same symbol values in all charsets.\n charset = 'A'\n\n shift_charset = None\n for symbol_value in symbol_values:\n if shift_charset:\n symbol = self._val2sym[shift_charset][symbol_value]\n shift_charset = None\n else:\n symbol = self._val2sym[charset][symbol_value]\n\n if symbol in (self.Special.START_A, self.Special.CODE_A):\n charset = 'A'\n elif symbol in (self.Special.START_B, self.Special.CODE_B):\n charset = 'B'\n elif symbol in (self.Special.START_C, self.Special.CODE_C):\n charset = 'C'\n elif symbol in (self.Special.SHIFT_A,):\n shift_charset = 'A'\n elif symbol in (self.Special.SHIFT_B,):\n shift_charset = 'B'\n\n yield symbol\n\n return list(_iter_symbols(self.symbol_values))",
"def get_hardcoded_sym_table() -> dict:\n sym_table = {'aa': 0, 'ae': 1, 'ah': 2, 'ao': 3, 'aw': 4, 'ay': 5, 'b': 6,\n 'ch': 7, 'd': 8, 'dh': 9, 'eh': 10, 'er': 11, 'ey': 12,\n 'f': 13, 'g': 14, 'hh': 15, 'ih': 16, 'iy': 17, 'jh': 18,\n 'k': 19, 'l': 20, 'm': 21, 'n': 22, 'ng': 23, 'ow': 24,\n 'oy': 25, 'p': 26, 'r': 27, 's': 28, 'sh': 29, 't': 30,\n 'th': 31, 'uh': 32, 'uw': 33, 'v': 34, 'w': 35, 'y': 36,\n 'z': 37, 'zh': 38, 'sil': 39}\n return sym_table",
"def PrintSymmetries(self):\n\n print()\n print(\"Number of symmetries: {}\".format(self.QE_nsym))\n syms = self.GetSymmetries()\n for i in range(self.QE_nsym):\n print(\" Symmetry {}\".format(i+1))\n for j in range(3):\n print(\" {:3.0f}{:3.0f}{:3.0f} | {:6.3f}\".format(*syms[i][j,:]))\n print()",
"def prepare_symbols(self):",
"def symbols(self) -> Iterable[str]:\n\n symbols = set([context.symbol for context in self.map.keys()])\n yield from symbols",
"def bi_relation_symbols():\n return _bi_symbols",
"def get_el_sym(self) :\n with open(self.filename, 'r') as f :\n for line in f :\n self.el_sym.append(line.split()[1])\n return self.el_sym",
"def Generate(self, dyn, qe_sym = None):\n \n # Check if the symmetries must be initialize\n if qe_sym is None:\n qe_sym = CC.symmetries.QE_Symmetry(dyn.structure)\n \n \n # Get the number of irreducible q points from the matrix\n self.nq = dyn.nqirr\n self.nat = dyn.structure.N_atoms\n \n # Initialize the symmetries at q = 0\n qe_sym.SetupQPoint()\n \n # Prepare the wyckoff basis\n tmp_wyck_gen = np.zeros((3 * self.nat, self.nat, 3), dtype = np.float64)\n \n for i in range( 3 * self.nat):\n x = i % 3\n n = i / 3\n tmp_wyck_gen[i, n, x] = 1\n \n # Symmetrize the vector\n qe_sym.SymmetrizeVector(tmp_wyck_gen[i, :, :])\n \n # Apply the gram-schmidt\n new_gen = tmp_wyck_gen.reshape((3 * self.nat, 3 * self.nat)).transpose()\n new_gen = scipy.linalg.orth(new_gen).transpose()\n \n # Get the number of wyckoff coefficients\n self.wyck_ncoeff = new_gen.shape()[0]\n \n # Reshape the array and get the coefficients\n self.wyck_gen = new_gen.reshape((self.wyck_ncoeff, self.nat, 3))\n \n r = np.arange(3 * self.nat)\n \n self.dyn_ncoeff = np.zeros(self.nq, dtype = int)\n self.dyn_gen = []\n \n # Cycle for each irreducible q point of the matrix\n for iq in range(self.nq):\n q = dyn.q_stars[iq][0]\n # Setup the symmetries for this q point\n qe_sym.SetupQPoint(q)\n \n gh = []\n \n for i in range(self.nat * 3):\n for j in range(i, self.nat * 3):\n # Take the generator\n fc = np.zeros((3 * self.nat, 3 * self.nat), dtype = np.complex128)\n fc[i, j] = 1\n \n # Apply the symmetry\n qe_sym.SymmetrizeDynQ(q, fc)\n \n # Check if the generator has already be generated\n is_new = True\n for k in range(i+1):\n mask = fc[k, :] != 0\n first_value = r[mask]\n if len(first_value):\n if k == i:\n if first_value[0] < j:\n is_new = False\n break\n else:\n is_new = False\n break\n \n # If the generator is new\n if is_new:\n qe_sym.ImposeSumRule(fc, \"simple\")\n \n # Check if the sum rule makes this generator desappearing\n if np.sum ((fc != 0).as_type(int)) != 0:\n gh.append(fc / np.sqrt(np.trace(fc.dot(fc))))\n \n dim = len(gh)\n \n # Prepare the gram-shmidt\n gh = np.array(gh, dtype = np.complex128)\n \n gh_new = np.reshape((dim, 9 * self.nat**2)).transpose()\n gh_new = scipy.linalg.orth(gh_new).transpose()\n \n self.dyn_ncoeff = np.shape(gh_new)[0]\n \n self.dyn_gen.append(np.reshape(gh_new, (self.dyn_ncoeff, 3*self.nat, 3*self.nat)))",
"def list_symbols(self) -> str:\n pass",
"def symbols(self):\n url = MARKET_URL + '/v1/common/symbols'\n params = {}\n return self._get(url, params)",
"def iter_symbols(self):\r\n for i in range(self.num_symbols()):\r\n yield self.get_symbol(i)",
"def _tushare_init_symbols(self):\r\n\t\t\"\"\"\r\n\t\troot_dir = 'config'\r\n\t\tif not os.path.exists(root_dir):\r\n\t\t\tos.makedirs(root_dir)\r\n\t\tPATH_ALLSYMBOLS_EQUD = self._PATH_ALLSYMBOLS_EQUD_TS\r\n\r\n\t\tif not os.path.isfile(PATH_ALLSYMBOLS_EQUD):\r\n\t\t\tdf = ts.get_today_all()\r\n\t\t\tdf[['code','name']].to_json(PATH_ALLSYMBOLS_EQUD)\r\n\t\telse:\r\n\t\t\tdf = pd.read_json(PATH_ALLSYMBOLS_EQUD)\r\n\r\n\t\tlist_symbols = list(df['code'])\r\n\t\tlist_symbols = [(6-len(str(s)))*'0'+str(s) for s in list_symbols]\r\n\t\tself._tushare_list_EquSymbols = list_symbols\r\n\t\t\"\"\"\r\n\t\tpass",
"def uni_relation_symbols():\n return _uni_symbols",
"def ExtractSymbols(self, native_heaps, sym_paths):\n raise NotImplementedError()",
"def get_symbol_map():\n functions = {}\n for ea in Segments():\n for funcea in Functions(SegStart(ea), SegEnd(ea)):\n size = FindFuncEnd(funcea) - funcea\n functions[funcea] = (GetFunctionName(funcea), size)\n # It may not be necessary to sort by ea, but be safe...\n output_lines = []\n for i, (ea, (name, size)) in enumerate(sorted(functions.items())):\n if len(name) > 255:\n print \"ClemSym: truncating name\", name\n name = name[:255]\n line = \"%d: %s @ %07x %d\" % (i, name, ea, size)\n output_lines.append(line)\n return '\\n'.join(output_lines)",
"def generate_symbols(self):\n\n logger.debug(f'- Generating symbols for {self.class_name}')\n\n # clear symbols storage\n self.f_list, self.g_list = list(), list()\n self.f_matrix, self.g_matrix = Matrix([]), Matrix([])\n\n # process tex_names defined in model\n # -----------------------------------------------------------\n for key in self.parent.tex_names.keys():\n self.tex_names[key] = Symbol(self.parent.tex_names[key])\n for instance in self.parent.discrete.values():\n for name, tex_name in zip(instance.get_names(), instance.get_tex_names()):\n self.tex_names[name] = tex_name\n # -----------------------------------------------------------\n\n for var in self.cache.all_params_names:\n self.inputs_dict[var] = Symbol(var)\n\n for var in self.cache.all_vars_names:\n tmp = Symbol(var)\n self.vars_dict[var] = tmp\n self.inputs_dict[var] = tmp\n if var in self.cache.vars_int:\n self.vars_int_dict[var] = tmp\n\n # store tex names defined in `self.config`\n for key in self.config.as_dict():\n tmp = Symbol(key)\n self.inputs_dict[key] = tmp\n if key in self.config.tex_names:\n self.tex_names[tmp] = Symbol(self.config.tex_names[key])\n\n # store tex names for pretty printing replacement later\n for var in self.inputs_dict:\n if var in self.parent.__dict__ and self.parent.__dict__[var].tex_name is not None:\n self.tex_names[Symbol(var)] = Symbol(self.parent.__dict__[var].tex_name)\n\n self.inputs_dict['dae_t'] = Symbol('dae_t')\n self.inputs_dict['sys_f'] = Symbol('sys_f')\n self.inputs_dict['sys_mva'] = Symbol('sys_mva')\n\n self.lambdify_func[0]['Indicator'] = lambda x: x\n self.lambdify_func[0]['imag'] = np.imag\n self.lambdify_func[0]['real'] = np.real\n self.lambdify_func[0]['im'] = np.imag\n self.lambdify_func[0]['re'] = np.real\n\n self.vars_list = list(self.vars_dict.values()) # useful for ``.jacobian()``",
"def _symbol_set():\n ranges = unicode_data._parse_code_ranges(noto_data.SYMBOL_RANGES_TXT)\n return _code_range_to_set(ranges)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SYMMETRIZE A VECTOR =================== This is the easier symmetrization of a generic vector. Note, fractional translation and generic translations are not imposed. This is because this simmetrization acts on displacements and forces.
|
def SymmetrizeVector(self, vector):
# Apply Translations if any
self.ApplyTranslationsToVector(vector)
# Prepare the real vector
tmp_vector = np.zeros( (3, self.QE_nat), dtype = np.float64, order = "F")
for i in range(self.QE_nat):
tmp_vector[0, i] = vector[i,0]
tmp_vector[1, i] = vector[i,1]
tmp_vector[2,i] = vector[i,2]
symph.symvector(self.QE_nsymq, self.QE_irt, self.QE_s, self.QE_at, self.QE_bg,
tmp_vector, self.QE_nat)
for i in range(self.QE_nat):
vector[i, :] = tmp_vector[:,i]
|
[
"def ApplySymmetryToVector(symmetry, vector, unit_cell, irt):\n \n # Get the vector in crystalline coordinate\n nat, dumb = np.shape(vector)\n work = np.zeros( (nat, 3))\n sym = symmetry[:, :3]\n\n v1 = Methods.covariant_coordinates(unit_cell, vector)\n w1 = sym.dot(v1.T).T\n\n # Return in cartesian coordinates\n work[irt[:], :] = w1.dot(unit_cell)# unit_cell.T.dot(w1) #np.einsum(\"ab,a\", unit_cell, w1)\n\n return work",
"def matrize_vector(self, vector):\n if isinstance(vector, np.ndarray):\n vector = COO.from_numpy(vector)\n return sparse.tensordot(vector, self.operators, ([-1], [0]))",
"def vector_trans(self, v, T, V0):\n v = np.array(v)\n return np.add(v[0:2].dot(T),V0)",
"def ApplySymmetriesToVector(symmetries, vector, unit_cell, irts):\n \n # Get the vector in crystalline coordinate\n nat, dumb = np.shape(vector)\n n_sym = len(symmetries)\n\n assert n_sym == len(irts)\n\n work = np.zeros( (n_sym, nat, 3), dtype = np.double, order = \"C\")\n \n # Pass to crystalline coordinates\n v1 = Methods.covariant_coordinates(unit_cell, vector)\n \n # Apply the symmetry\n for j, symmetry in enumerate(symmetries):\n sym = symmetry[:, :3]\n w1 = sym.dot(v1.T).T\n\n # Return in cartesian coordinates\n work[j, irts[j][:], :] = w1.dot(unit_cell)# unit_cell.T.dot(w1) #np.einsum(\"ab,a\", unit_cell, w1)\n \n return work",
"def test_vector_from_symmetric_matrix_and_symmetric_matrix_from_vector(\n self):\n sym_mat_1 = gs.array([[1., 0.6, -3.],\n [0.6, 7., 0.],\n [-3., 0., 8.]])\n vector_1 = self.space.to_vector(sym_mat_1)\n result_1 = self.space.from_vector(vector_1)\n expected_1 = sym_mat_1\n\n self.assertTrue(gs.allclose(result_1, expected_1))\n\n vector_2 = gs.array([1., 2., 3., 4., 5., 6.])\n sym_mat_2 = self.space.from_vector(vector_2)\n result_2 = self.space.to_vector(sym_mat_2)\n expected_2 = vector_2\n\n self.assertTrue(gs.allclose(result_2, expected_2))",
"def vectorScale(v,s):\n return [s*e for e in v]",
"def unit_vector(vector):\n return vector/mag(vector)",
"def to_symmetric(self,sym):\n # Throw error if tensor is not loaded\n if not self.in_mem: raise ValueError('GEN_TEN not in memory for operation to_symmetric')\n\n # Return a copy of self if already a symtensor\n if self.is_symmetric:\n return self.copy()\n\n # Convert the full dense (sparse in symtensor lang) to symmetric version\n else:\n # Create the new tensor\n newten = self.ten.copy()\n assert(len(sym[0]) == len(newten.shape))\n # Convert the shape\n newshape = []\n for i in range(len(newten.shape)):\n newshape.append(len(sym[1][i]))\n newshape.append(newten.shape[i]/len(sym[1][i]))\n newten = newten.reshape(newshape)\n # Do a transpose on the indices\n order = []\n for i in range(len(sym[1])):\n order.append(2*i)\n for i in range(len(sym[1])):\n order.append(2*i+1)\n newten = newten.transpose(order)\n # Create a random symtensor\n newsymten = rand(newten.shape[len(sym[1]):],\n sym=sym,\n backend=self.backend,\n dtype=self.dtype,\n legs=self.legs,\n in_mem=self.in_mem)\n # Contract with delta to get dense irrep\n delta = newsymten.ten.get_irrep_map()\n einstr = LETTERS[:len(sym[1])].upper() + \\\n LETTERS[:len(sym[1])] + ',' + \\\n LETTERS[:len(sym[1])].upper() + '->' + \\\n LETTERS[:len(sym[1])-1].upper() + \\\n LETTERS[:len(sym[1])]\n newten = newsymten.backend.einsum(einstr,newten,delta)\n # Put the result into a symtensor\n newsymten.ten.array = newten\n # Return result\n return newsymten",
"def _transformVector(self, vector):\n assert len(vector) == 2, 'must be 2D vector'\n if self.nd == 2:\n vector[0] *= self.direction[0]\n return np.array([vector[0], vector[1], 0.])\n else:\n vector3D = np.zeros(3)\n vector3D[0] = vector[0]*self.direction[0]\n vector3D[1] = vector[0]*self.direction[1]\n vector3D[2] = vector[1]\n return vector3D",
"def orthogonalise_sym(vectors):\n ang = vec_angle(vectors[0],vectors[1])\n remainder = 90 - ang\n disp = remainder/2\n perp_unnormal = np.cross(vectors[0],vectors[1])\n normal = perp_unnormal / np.linalg.norm(perp_unnormal)\n\n rot_1 = rotation_matrix(normal,-disp)\n rot_2 = rotation_matrix(normal,disp)\n\n ovec_1 = np.dot(rot_1,vectors[0])\n ovec_2 = np.dot(rot_2,vectors[1])\n\n o_vecs = np.array([ovec_1,ovec_2])\n return o_vecs",
"def thrust(s, obj):\n #return vector(0.0, 0.0, 0.0)\n return obj.n.scale(ft2WU(2000))",
"def unit_vector(u: Vec4) -> Vec4:\n _mag = Vec4.magnitude(u)\n linv = 1.0 / _mag\n return u * linv",
"def norm2(v):\n # return (v.T @ v) ** (0.5)\n return math.sqrt(sum(x*x for x in v))",
"def __mul__(self, *args):\n return _vnl_vectorPython.vnl_vectorSI___mul__(self, *args)",
"def translate(self, vector):\n for atom in self.atoms:\n atom.translate(vector)",
"def to_tangent(self, vector, base_point):\n sq_norm = gs.sum(base_point**2, axis=-1)\n inner_prod = self.embedding_space.metric.inner_product(base_point, vector)\n coef = inner_prod / sq_norm\n return vector - gs.einsum(\"...,...j->...j\", coef, base_point)",
"def ApplySymmetriesToV2(self, v2, apply_translations = True):\n\n # Apply the Permutation symmetry\n v2[:,:] = 0.5 * (v2 + v2.T)\n\n # First lets recall that the fortran subroutines\n # Takes the input as (3,3,nat,nat)\n new_v2 = np.zeros( (3,3, self.QE_nat, self.QE_nat), dtype = np.double, order =\"F\")\n for i in range(self.QE_nat):\n for j in range(self.QE_nat):\n new_v2[:, :, i, j] = v2[3*i : 3*(i+1), 3*j : 3*(j+1)]\n\n # Apply the translations\n if apply_translations:\n # Check that the translations have been setted up\n assert len(np.shape(self.QE_translations_irt)) == 2, \"Error, symmetries not setted up to work in the supercell\"\n symph.trans_v2(new_v2, self.QE_translations_irt)\n \n # Apply the symmetrization\n symph.sym_v2(new_v2, self.QE_at, self.QE_bg, self.QE_s, self.QE_irt, self.QE_nsym, self.QE_nat)\n\n # Return back\n for i in range(self.QE_nat):\n for j in range(self.QE_nat):\n v2[3*i : 3*(i+1), 3*j : 3*(j+1)] = new_v2[:, :, i, j]",
"def __truediv__(self, d: 'double const') -> \"SbVec2s\":\n return _coin.SbVec2s___truediv__(self, d)",
"def reflect(v, n):\n return v - 2 * np.dot(v,n) * n"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
DYNAMICAL MATRIX SYMMETRIZATION =============================== Use the Quantum ESPRESSO fortran code to symmetrize the dynamical matrix at the given q point.
|
def SymmetrizeDynQ(self, dyn_matrix, q_point):
# TODO: implement hermitianity to speedup the conversion
#Prepare the array to be passed to the fortran code
QE_dyn = np.zeros( (3, 3, self.QE_nat, self.QE_nat), dtype = np.complex128, order = "F")
# Get the crystal coordinates for the matrix
for na in range(self.QE_nat):
for nb in range(self.QE_nat):
fc = dyn_matrix[3 * na : 3* na + 3, 3*nb: 3 * nb + 3]
QE_dyn[:, :, na, nb] = Methods.convert_matrix_cart_cryst(fc, self.structure.unit_cell, False)
# Prepare the xq variable
#xq = np.ones(3, dtype = np.float64)
xq = np.array(q_point, dtype = np.float64)
# print "XQ:", xq
# print "XQ_CRYST:", Methods.covariant_coordinates(self.QE_bg.T, xq)
# print "NSYMQ:", self.QE_nsymq, "NSYM:", self.QE_nsym
# print "QE SYM:"
# print np.einsum("abc->cba", self.QE_s[:, :, :self.QE_nsymq])
# print "Other syms:"
# print np.einsum("abc->cba", self.QE_s[:, :, self.QE_nsymq: self.QE_nsym])
# print "QE INVS:"
# print self.QE_invs[:self.QE_nsymq]
# #print "QE RTAU:"
# #print np.einsum("abc->bca", self.QE_rtau[:, :self.QE_nsymq, :])
# print "IROTMQ:", self.QE_irotmq
# print "MINUS Q:", self.QE_minus_q
# print "IRT:"
# print self.QE_irt[:self.QE_nsymq, :]
# print "NAT:", self.QE_nat
# Inibhit minus q
#self.QE_minus_q = 0
# USE THE QE library to perform the symmetrization
symph.symdynph_gq_new( xq, QE_dyn, self.QE_s, self.QE_invs, self.QE_rtau,
self.QE_irt, self.QE_irotmq, self.QE_minus_q, self.QE_nsymq, self.QE_nat)
# Return to cartesian coordinates
for na in range(self.QE_nat):
for nb in range(self.QE_nat):
fc = QE_dyn[:, :, na, nb]
dyn_matrix[3 * na : 3* na + 3, 3*nb: 3 * nb + 3] = Methods.convert_matrix_cart_cryst(fc, self.structure.unit_cell, True)
|
[
"def getSymmetryMatrix(*args, **kwargs):\n \n pass",
"def SymmetrizeFCQ(self, fcq, q_stars, verbose = False, asr = \"simple\"):\n nqirr = len(q_stars)\n nq = np.sum([len(x) for x in q_stars])\n \n # Get the q_points vector\n q_points = np.zeros( (nq, 3), dtype = np.float64)\n sigma = 0\n for i in range(nqirr):\n for q_vec in q_stars[i]:\n q_points[sigma, :] = q_vec\n sigma += 1\n \n if nq != np.shape(fcq)[0]:\n raise ValueError(\"Error, the force constant number of q point %d does not match with the %d given q_points\" % (np.shape(fcq)[0], nq))\n \n \n for iq in range(nq):\n # Prepare the symmetrization\n if verbose:\n print (\"Symmetries in q = \", q_points[iq, :])\n t1 = time.time()\n self.SetupQPoint(q_points[iq,:], verbose)\n t2 = time.time()\n if verbose:\n print (\" [SYMMETRIZEFCQ] Time to setup the q point %d\" % iq, t2-t1, \"s\")\n \n # Proceed with the sum rule if we are at Gamma\n \n if asr == \"simple\" or asr == \"custom\":\n if np.sqrt(np.sum(q_points[iq,:]**2)) < __EPSILON__:\n if verbose:\n print (\"q_point:\", q_points[iq,:])\n print (\"Applying sum rule\")\n self.ImposeSumRule(fcq[iq,:,:], asr)\n elif asr == \"crystal\":\n self.ImposeSumRule(fcq[iq, :,:], asr = asr)\n elif asr == \"no\":\n pass\n else:\n raise ValueError(\"Error, only 'simple', 'crystal', 'custom' or 'no' asr are supported, given %s\" % asr)\n \n t1 = time.time()\n if verbose:\n print (\" [SYMMETRIZEFCQ] Time to apply the sum rule:\", t1-t2, \"s\")\n \n # # Symmetrize the matrix\n if verbose:\n old_fcq = fcq[iq, :,:].copy()\n w_old = np.linalg.eigvals(fcq[iq, :, :])\n print (\"FREQ BEFORE SYM:\", w_old )\n self.SymmetrizeDynQ(fcq[iq, :,:], q_points[iq,:])\n t2 = time.time()\n if verbose:\n print (\" [SYMMETRIZEFCQ] Time to symmetrize the %d dynamical matrix:\" % iq, t2 -t1, \"s\" )\n print (\" [SYMMETRIZEFCQ] Difference before the symmetrization:\", np.sqrt(np.sum(np.abs(old_fcq - fcq[iq, :,:])**2)))\n w_new = np.linalg.eigvals(fcq[iq, :, :])\n print (\"FREQ AFTER SYM:\", w_new)\n\n # For each star perform the symmetrization over that star\n q0_index = 0\n for i in range(nqirr):\n q_len = len(q_stars[i])\n t1 = time.time()\n if verbose:\n print (\"Applying the q star symmetrization on:\")\n print (np.array(q_stars[i]))\n self.ApplyQStar(fcq[q0_index : q0_index + q_len, :,:], np.array(q_stars[i]))\n t2 = time.time()\n if verbose:\n print (\" [SYMMETRIZEFCQ] Time to apply the star q_irr = %d:\" % i, t2 - t1, \"s\")\n q0_index += q_len",
"def Generate(self, dyn, qe_sym = None):\n \n # Check if the symmetries must be initialize\n if qe_sym is None:\n qe_sym = CC.symmetries.QE_Symmetry(dyn.structure)\n \n \n # Get the number of irreducible q points from the matrix\n self.nq = dyn.nqirr\n self.nat = dyn.structure.N_atoms\n \n # Initialize the symmetries at q = 0\n qe_sym.SetupQPoint()\n \n # Prepare the wyckoff basis\n tmp_wyck_gen = np.zeros((3 * self.nat, self.nat, 3), dtype = np.float64)\n \n for i in range( 3 * self.nat):\n x = i % 3\n n = i / 3\n tmp_wyck_gen[i, n, x] = 1\n \n # Symmetrize the vector\n qe_sym.SymmetrizeVector(tmp_wyck_gen[i, :, :])\n \n # Apply the gram-schmidt\n new_gen = tmp_wyck_gen.reshape((3 * self.nat, 3 * self.nat)).transpose()\n new_gen = scipy.linalg.orth(new_gen).transpose()\n \n # Get the number of wyckoff coefficients\n self.wyck_ncoeff = new_gen.shape()[0]\n \n # Reshape the array and get the coefficients\n self.wyck_gen = new_gen.reshape((self.wyck_ncoeff, self.nat, 3))\n \n r = np.arange(3 * self.nat)\n \n self.dyn_ncoeff = np.zeros(self.nq, dtype = int)\n self.dyn_gen = []\n \n # Cycle for each irreducible q point of the matrix\n for iq in range(self.nq):\n q = dyn.q_stars[iq][0]\n # Setup the symmetries for this q point\n qe_sym.SetupQPoint(q)\n \n gh = []\n \n for i in range(self.nat * 3):\n for j in range(i, self.nat * 3):\n # Take the generator\n fc = np.zeros((3 * self.nat, 3 * self.nat), dtype = np.complex128)\n fc[i, j] = 1\n \n # Apply the symmetry\n qe_sym.SymmetrizeDynQ(q, fc)\n \n # Check if the generator has already be generated\n is_new = True\n for k in range(i+1):\n mask = fc[k, :] != 0\n first_value = r[mask]\n if len(first_value):\n if k == i:\n if first_value[0] < j:\n is_new = False\n break\n else:\n is_new = False\n break\n \n # If the generator is new\n if is_new:\n qe_sym.ImposeSumRule(fc, \"simple\")\n \n # Check if the sum rule makes this generator desappearing\n if np.sum ((fc != 0).as_type(int)) != 0:\n gh.append(fc / np.sqrt(np.trace(fc.dot(fc))))\n \n dim = len(gh)\n \n # Prepare the gram-shmidt\n gh = np.array(gh, dtype = np.complex128)\n \n gh_new = np.reshape((dim, 9 * self.nat**2)).transpose()\n gh_new = scipy.linalg.orth(gh_new).transpose()\n \n self.dyn_ncoeff = np.shape(gh_new)[0]\n \n self.dyn_gen.append(np.reshape(gh_new, (self.dyn_ncoeff, 3*self.nat, 3*self.nat)))",
"def test_sym_m_product():\n amat = np.array([[1, 2, 3], [3, 4, 6]], float, order='F')\n out1 = amat.T.dot(amat)\n out2 = my_dsyrk(amat)\n idx = np.triu_indices(amat.shape[1])\n\n assert np.allclose(out1[idx], out2[idx])\n\n amat = np.array([[1, 2, 3], [3, 4, 6]], float)\n amat = np.asfortranarray(amat.dot(amat.T))\n\n out1 = amat.T.dot(amat)\n out2 = my_dsyrk(amat)\n idx = np.triu_indices(amat.shape[1])\n\n assert np.allclose(out1[idx], out2[idx])",
"def symmetrize(dimTags, a, b, c, d):\n api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)\n ierr = c_int()\n lib.gmshModelGeoSymmetrize(\n api_dimTags_, api_dimTags_n_,\n c_double(a),\n c_double(b),\n c_double(c),\n c_double(d),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelGeoSymmetrize returned non-zero error code: \",\n ierr.value)",
"def sylvester(self, q):\n n = self.degree + q.degree\n matrix = [None] * n\n i = 0\n for j in range(q.degree):\n matrix[i] = [0] * j + list(self.coefficients[::-1]) + [0] * (n - self.degree - j - 1)\n i += 1\n for j in range(self.degree):\n matrix[i] = [0] * j + list(q.coefficients[::-1]) + [0] * (n - q.degree - j - 1)\n i += 1\n return Matrix(matrix)",
"def SymmetrizeVector(self, vector):\n\n # Apply Translations if any\n self.ApplyTranslationsToVector(vector)\n \n # Prepare the real vector\n tmp_vector = np.zeros( (3, self.QE_nat), dtype = np.float64, order = \"F\")\n \n for i in range(self.QE_nat):\n tmp_vector[0, i] = vector[i,0]\n tmp_vector[1, i] = vector[i,1]\n tmp_vector[2,i] = vector[i,2]\n \n symph.symvector(self.QE_nsymq, self.QE_irt, self.QE_s, self.QE_at, self.QE_bg,\n tmp_vector, self.QE_nat)\n \n \n for i in range(self.QE_nat):\n vector[i, :] = tmp_vector[:,i]",
"def getRawSymmetryMatrix(*args, **kwargs):\n \n pass",
"def stress(X,Q,D):\n Y = projected_positions(X,Q)\n s2 = mds.stress(Y,D)\n return s2",
"def initialize_volume_symmetry_map(self):\n #@type pg PointGroup\n pg = self.crystal.get_point_group()\n if pg is None:\n print \"ERROR!\"\n return\n\n t1 = time.time()\n\n order = len(pg.table)\n #@type inst Instrument\n inst = self.inst\n\n #Initialize the symmetry map. Last dimension = the ORDER equivalent indices\n n = len(inst.qx_list)\n numpix = n**3\n symm = np.zeros( (numpix, order) , dtype=int)\n\n if self.verbose: print \"Starting volume symmetry calculation. Order is %d. Matrix is %d voxels (%d to a side).\" % (order, n**3, n)\n\n #--- From get_hkl_from_q functions: (moved here for speed) --\n #Get the inverse the B matrix to do the reverse conversion\n B = self.crystal.get_B_matrix()\n invB = np.linalg.inv(B)\n\n #Limit +- in q space\n qlim = inst.qlim\n \n if config.cfg.force_pure_python:\n #----------- Pure Python Version --------------\n\n #Go through each pixel\n q_arr = np.zeros( (3, numpix) )\n for (ix, qx) in enumerate(inst.qx_list):\n for (iy, qy) in enumerate(inst.qx_list):\n for (iz, qz) in enumerate(inst.qx_list):\n i = iz + iy*n + ix*n*n\n #Find the (float) HKL of this voxel at qx,qy,qz.\n q_arr[:, i] = (qx,qy,qz)\n\n #Matrix multiply invB.hkl to get all the HKLs as a column array\n hkl = np.dot(invB, q_arr)\n\n #Now get ORDER equivalent HKLs, as a long list.\n #(as equivalent q)\n q_equiv = np.zeros( (3, numpix, order) )\n for ord in xrange(order):\n #Ok, we go TABLE . hkl to get the equivalent hkl\n #Them, B . hkl gives you the Q vector\n q_equiv[:,:, ord] = np.dot(B, np.dot(pg.table[ord], hkl) )\n\n #Now we need to find the index into the array.\n #Start by finding the x,y,z, indices\n ix = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[0, :, ord])\n iy = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[1, :, ord])\n iz = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[2, :, ord])\n\n #Now put the index into the symmetry matrix\n index = iz + iy*n + ix*n*n\n index[np.isnan(index)] = -1 #Put -1 where a NAN was found\n symm[:, ord] = index\n\n\n else:\n #--------------- Inline C version (about 17x faster than Python) ---------------\n code = \"\"\"\n\n //-- Calculate the hkl array ---\n int ix, iy, iz;\n int eix, eiy, eiz, eindex;\n int index, ord;\n double qx, qy, qz;\n double eqx, eqy, eqz;\n double h, k, l;\n double eh, ek, el;\n for (ix=0; ix<n; ix++)\n {\n qx = ix*qres - qlim;\n for (iy=0; iy<n; iy++)\n {\n qy = iy*qres - qlim;\n for (iz=0; iz<n; iz++)\n {\n qz = iz*qres - qlim;\n index = iz + iy*n + ix*n*n;\n //Ok, now we matrix multiply invB.hkl to get all the HKLs as a column array\n h = qx * INVB2(0,0) + qy * INVB2(0,1) + qz * INVB2(0,2);\n k = qx * INVB2(1,0) + qy * INVB2(1,1) + qz * INVB2(1,2);\n l = qx * INVB2(2,0) + qy * INVB2(2,1) + qz * INVB2(2,2);\n\n //Now go through each equivalency table.\n for (ord=0; ord<order; ord++)\n {\n //Do TABLE.hkl to find a new equivalent hkl\n eh = h * TABLE3(ord, 0,0) + k * TABLE3(ord, 0,1) + l * TABLE3(ord, 0,2);\n ek = h * TABLE3(ord, 1,0) + k * TABLE3(ord, 1,1) + l * TABLE3(ord, 1,2);\n el = h * TABLE3(ord, 2,0) + k * TABLE3(ord, 2,1) + l * TABLE3(ord, 2,2);\n //Now, matrix mult B . equiv_hkl to get the other q vector\n eqx = eh * B2(0,0) + ek * B2(0,1) + el * B2(0,2);\n eqy = eh * B2(1,0) + ek * B2(1,1) + el * B2(1,2);\n eqz = eh * B2(2,0) + ek * B2(2,1) + el * B2(2,2);\n\n //Ok, now you have to find the index into QSPACE\n eix = round( (eqx+qlim)/qres ); if ((eix >= n) || (eix < 0)) eix = -1; \n eiy = round( (eqy+qlim)/qres ); if ((eiy >= n) || (eiy < 0)) eiy = -1;\n eiz = round( (eqz+qlim)/qres ); if ((eiz >= n) || (eiz < 0)) eiz = -1;\n\n if ((eix < 0) || (eiy < 0) || (eiz < 0))\n {\n //One of the indices was out of bounds.\n //Put this marker to mean NO EQUIVALENT\n SYMM2(index, ord) = -1;\n }\n else\n {\n //No problem!, Now I put it in there\n eindex = eiz + eiy*n + eix*n*n;\n //This pixel (index) has this equivalent pixel index (eindex) for this order transform ord.\n SYMM2(index, ord) = eindex;\n }\n\n }\n \n }\n }\n }\n \"\"\"\n qres = inst.q_resolution\n n = len(self.inst.qx_list)\n table = np.array(pg.table) #Turn the list of 3x3 arrays into a Nx3x3 array\n varlist = ['B', 'invB', 'symm', 'qres', 'qlim', 'n', 'order', 'table']\n weave.inline(code, varlist, compiler='gcc', support_code=\"\")\n\n #Done with either version\n self.volume_symmetry = symm\n\n if self.verbose: print \"Volume symmetry map done in %.3f sec.\" % (time.time()-t1)",
"def InitFromSymmetries(self, symmetries, q_point = np.array([0,0,0])):\n \n nsym = len(symmetries)\n \n self.QE_nsymq = np.intc(nsym)\n self.QE_nsym = self.QE_nsymq\n \n \n for i, sym in enumerate(symmetries):\n self.QE_s[:,:, i] = np.transpose(sym[:, :3])\n \n # Get the atoms correspondence\n eq_atoms = GetIRT(self.structure, sym)\n \n self.QE_irt[i, :] = eq_atoms + 1\n \n # Get the inverse symmetry\n inv_sym = np.linalg.inv(sym[:, :3])\n for k, other_sym in enumerate(symmetries):\n if np.sum( (inv_sym - other_sym[:, :3])**2) < __EPSILON__:\n break\n \n self.QE_invs[i] = k + 1\n \n # Setup the position after the symmetry application\n for k in range(self.QE_nat):\n self.QE_rtau[:, i, k] = self.structure.coords[eq_atoms[k], :].astype(np.float64)\n \n \n # Get the reciprocal lattice vectors\n b_vectors = self.structure.get_reciprocal_vectors()\n \n # Get the minus_q operation\n self.QE_minusq = False\n\n # NOTE: HERE THERE COULD BE A BUG\n \n # q != -q\n # Get the q vectors in crystal coordinates\n q = Methods.covariant_coordinates(b_vectors, q_point)\n for k, sym in enumerate(self.QE_s):\n new_q = self.QE_s[:,:, k].dot(q)\n if np.sum( (Methods.put_into_cell(b_vectors, -q_point) - new_q)**2) < __EPSILON__:\n self.QE_minus_q = True\n self.QE_irotmq = k + 1\n break",
"def do_sym_y(self):\n \n nx = self.nx()\n ny = self.ny()\n nz = self.nz()\n \n scale = np.float32(0.5)\n data = np.empty((nx, ny, nz), dtype=np.float32)\n \n for iz in range(0, nz):\n for iy in range(0, ny):\n for ix in range(0, nx):\n dleft = self._data[ix, iy, iz]\n drght = self._data[ix, ny-1-iy, iz]\n data[ix,iy,iz] = (dleft + drght) * scale\n \n self._data = data\n self._sym_y = True",
"def to_symmetric(self,sym):\n # Throw error if tensor is not loaded\n if not self.in_mem: raise ValueError('GEN_TEN not in memory for operation to_symmetric')\n\n # Return a copy of self if already a symtensor\n if self.is_symmetric:\n return self.copy()\n\n # Convert the full dense (sparse in symtensor lang) to symmetric version\n else:\n # Create the new tensor\n newten = self.ten.copy()\n assert(len(sym[0]) == len(newten.shape))\n # Convert the shape\n newshape = []\n for i in range(len(newten.shape)):\n newshape.append(len(sym[1][i]))\n newshape.append(newten.shape[i]/len(sym[1][i]))\n newten = newten.reshape(newshape)\n # Do a transpose on the indices\n order = []\n for i in range(len(sym[1])):\n order.append(2*i)\n for i in range(len(sym[1])):\n order.append(2*i+1)\n newten = newten.transpose(order)\n # Create a random symtensor\n newsymten = rand(newten.shape[len(sym[1]):],\n sym=sym,\n backend=self.backend,\n dtype=self.dtype,\n legs=self.legs,\n in_mem=self.in_mem)\n # Contract with delta to get dense irrep\n delta = newsymten.ten.get_irrep_map()\n einstr = LETTERS[:len(sym[1])].upper() + \\\n LETTERS[:len(sym[1])] + ',' + \\\n LETTERS[:len(sym[1])].upper() + '->' + \\\n LETTERS[:len(sym[1])-1].upper() + \\\n LETTERS[:len(sym[1])]\n newten = newsymten.backend.einsum(einstr,newten,delta)\n # Put the result into a symtensor\n newsymten.ten.array = newten\n # Return result\n return newsymten",
"def do_sym_z(self):\n \n nx = self.nx()\n ny = self.ny()\n nz = self.nz()\n \n scale = np.float32(0.5)\n data = np.empty((nx, ny, nz), dtype=np.float32)\n \n for iz in range(0, nz):\n for iy in range(0, ny):\n for ix in range(0, nx):\n dleft = self._data[ix, iy, iz]\n drght = self._data[ix, iy, nz-1-iz]\n data[ix,iy,iz] = (dleft + drght) * scale\n \n self._data = data\n self._sym_z = True",
"def apply_volume_symmetry(self, use_inline_c=True):\n t1 = time.time()\n\n #Get the # of pixels and the order from the symmetry map\n symm = self.volume_symmetry\n (numpix, order) = symm.shape\n\n if use_inline_c and not config.cfg.force_pure_python:\n #------ C version (about 400x faster than python) -------\n #Put some variables in the workspace\n old_q = self.qspace.flatten() * 1.0\n qspace_flat = old_q * 0.0\n\n support = \"\"\n code = \"\"\"\n int pix, ord, index;\n for (pix=0; pix<numpix; pix++)\n {\n //Go through each pixel\n for (ord=0; ord<order; ord++)\n {\n //Now go through each equivalent q.\n index = SYMM2(pix, ord);\n if (index >= 0)\n {\n //Valid index.\n QSPACE_FLAT1(pix) += OLD_Q1(index);\n //printf(\"%d\\\\n\", index);\n }\n }\n }\n \"\"\"\n varlist = ['old_q', 'qspace_flat', 'numpix', 'order', 'symm']\n weave.inline(code, varlist, compiler='gcc', support_code=support)\n #Reshape it back as a 3D array.\n n = len(self.inst.qx_list)\n self.qspace = qspace_flat.reshape( (n,n,n) )\n else:\n #---- Pure python version ----\n\n #Clear the starting space\n old_q = self.qspace\n new_q = self.qspace * 0\n for pix in xrange(numpix):\n for ord in xrange(order):\n eq_index = symm[pix, ord]\n if eq_index >= 0:\n #Add up to this pixel, the equivalent one.\n #The list includes this given voxel too.\n new_q.flat[pix] += old_q.flat[eq_index]\n self.qspace = new_q\n\n #Done.\n if self.verbose: print \"Volume symmetry computed in %.3f sec.\" % (time.time()-t1)",
"def isometrize(self):\n for idx,w0 in enumerate(self.W[0]):\n temp=np.reshape(w0,[self.d**2,self.Dbond])\n dmin=min(temp.shape)\n Q,R=np.linalg.qr(temp)\n self.W[0][idx]=np.reshape(Q,[self.d,self.d,dmin])\n\n for i in range(1,self.Nlayer):\n for idx,wj in enumerate(self.W[i]):\n temp=np.reshape(wj,[self.Dbond*self.Dbond,wj.shape[2]])\n Q,R=np.linalg.qr(temp)\n self.W[i][idx]=np.reshape(Q,[self.Dbond,self.Dbond,wj.shape[2]])",
"def symmetrize(dimTags, a, b, c, d):\n api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)\n ierr = c_int()\n lib.gmshModelOccSymmetrize(\n api_dimTags_, api_dimTags_n_,\n c_double(a),\n c_double(b),\n c_double(c),\n c_double(d),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelOccSymmetrize returned non-zero error code: \",\n ierr.value)",
"def symMatrix( numRows , numCols , letter ):\n n = max(len(str(numRows)),len(str(numCols)))\n format = '%s%0'+str(n)+'d'+'%0'+str(n)+'d'\n A = matrix(SR,numRows,numCols)\n for i in range(0,numRows):\n for j in range(0,numCols):\n A[i,j] = var(format % (letter,i,j) )\n\n return A",
"def transposeMatrixExpr(self,m):\n assert False, 'abstract method called'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GET THE Q STAR ============== Given a vector in q space, get the whole star. We use the quantum espresso subrouitine.
|
def GetQStar(self, q_vector):
self.SetupQPoint()
nq_new, sxq, isq, imq = symph.star_q(q_vector, self.QE_at, self.QE_bg,
self.QE_nsymq, self.QE_s, self.QE_invs, 0)
#print ("STAR IMQ:", imq)
if imq != 0:
total_star = np.zeros( (nq_new, 3), dtype = np.float64)
else:
total_star = np.zeros( (2*nq_new, 3), dtype = np.float64)
total_star[:nq_new, :] = sxq[:, :nq_new].transpose()
if imq == 0:
total_star[nq_new:, :] = -sxq[:, :nq_new].transpose()
return total_star
|
[
"def SetupQStar(self, q_tot, supergroup = False):\n \n # Setup the symmetries\n #self.SetupQPoint()\n \n # Lets copy the q list (we are going to pop items from it)\n q_list = q_tot[:]\n q_stars = []\n \n count_qstar = 0\n count_q = 0\n q_indices = np.zeros( len(q_tot), dtype = int)\n while len(q_list) > 0:\n q = q_list[0]\n # Get the star of the current q point\n _q_ = np.array(q, dtype = np.float64) # Fortran explicit conversion\n \n nq_new, sxq, isq, imq = symph.star_q(_q_, self.QE_at, self.QE_bg, \n self.QE_nsym, self.QE_s, self.QE_invs, 0)\n \n # print (\"START WITH Q:\", q)\n # print (\"FOUND STAR:\")\n # for jq in range(nq_new):\n # print (sxq[:, jq])\n # print ()\n \n # print (\"TELL ME THE BG:\")\n # print (self.QE_bg.transpose())\n\n # print(\"Manual star:\")\n # for k in range(self.QE_nsym):\n # trial_q = q.dot(self.QE_s[:,:, k])\n # distance_q = Methods.get_min_dist_into_cell(self.QE_bg.T, trial_q, q)\n # distance_mq = Methods.get_min_dist_into_cell(self.QE_bg.T, trial_q, -q)\n # print(\"trial_q : {} | DQ: {:.4f} | DMQ: {:.4f}\".format(trial_q, distance_q, distance_mq ))\n \n # Prepare the star\n q_star = [sxq[:, k] for k in range(nq_new)]\n\n # If imq is not zero (we do not have -q in the star) then add the -q for each in the star\n if imq == 0:\n old_q_star = q_star[:]\n min_dist = 1\n \n for q in old_q_star:\n q_star.append(-q)\n\n \n\n q_stars.append(q_star)\n \n # Pop out the q_star from the q_list\n for jq, q_instar in enumerate(q_star):\n # Look for the q point in the star and pop them\n #print(\"q_instar:\", q_instar)\n q_dist = [Methods.get_min_dist_into_cell(self.QE_bg.transpose(), \n np.array(q_instar), q_point) for q_point in q_list]\n \n pop_index = np.argmin(q_dist) \n q_list.pop(pop_index)\n \n # Use the same trick to identify the q point\n q_dist = [Methods.get_min_dist_into_cell(self.QE_bg.transpose(), \n np.array(q_instar), q_point) for q_point in q_tot]\n \n q_index = np.argmin(q_dist)\n #print (q_indices, count_q, q_index)\n q_indices[count_q] = q_index\n \n count_q += 1\n \n \n return q_stars, q_indices",
"def q(self):\n return self.coords.q",
"def ApplyQStar(self, fcq, q_point_group):\n \n nq = np.shape(q_point_group)[0]\n final_fc = np.zeros(np.shape(fcq), dtype = np.complex128)\n \n # Setup all the symmetries\n self.SetupQPoint()\n \n new_dyn = np.zeros( (3 * self.QE_nat, 3*self.QE_nat), dtype = np.complex128, order = \"F\")\n \n dyn_star = np.zeros( (nq, 3, 3, self.QE_nat, self.QE_nat), dtype = np.complex128, order = \"F\")\n \n for i in range(nq):\n # Get the q points order\n nq_new, sxq, isq, imq = symph.star_q(q_point_group[i,:], self.QE_at, self.QE_bg, \n self.QE_nsymq, self.QE_s, self.QE_invs, 0)\n \n\n #print \"Found nq:\", nq_new \n #print \"IMQ?\", imq\n\n # Check if the q star is correct\n if nq_new != nq and imq != 0:\n print (\"Reciprocal lattice vectors:\")\n print (self.QE_bg.transpose() )\n print (\"Passed q star:\")\n print (q_point_group)\n print (\"QE q star:\")\n print (sxq[:, :nq_new].transpose())\n raise ValueError(\"Error, the passed q star does not match the one computed by QE\")\n# \n# # Print the star \n# print \"q point:\", q_point_group[i,:]\n# print \"Point in the stars:\", nq_new\n# print \"Star of q:\"\n# print sxq[:, :nq_new].transpose()\n# \n# print \"NEW_DYN:\", np.shape(new_dyn)\n# print \"AT:\", np.shape(self.QE_at)\n# print \"BG:\", np.shape(self.QE_bg)\n# print \"N SYM:\", self.QE_nsymq\n# print \"S:\", np.shape(self.QE_s)\n# print \"QE_INVS:\", np.shape(self.QE_invs)\n# print \"IRT:\", np.shape(self.QE_irt)\n# print \"RTAU:\", np.shape(self.QE_rtau)\n# print \"NQ_NEW:\", nq_new\n# print \"SXQ:\", np.shape(sxq)\n# print \"ISQ:\", np.shape(isq)\n# print \"IMQ:\", imq\n# print \"NAT:\", self.QE_nat\n \n new_dyn[:,:] = fcq[i,:,:]\n #print \"new dyn ready\"\n \n # Get the new matrix\n dyn_star = symph.q2qstar_out(new_dyn, self.QE_at, self.QE_bg, self.QE_nsymq, \n self.QE_s, self.QE_invs, self.QE_irt, self.QE_rtau,\n nq_new, sxq, isq, imq, nq, self.QE_nat)\n #print \"Fake\"\n \n #print \"XQ:\", q_point_group[i, :], \"NQ_NEW:\", nq_new\n\n # Now to perform the match bring the star in the same BZ as the q point\n # This facilitate the comparison between q points\n current_q = q_point_group.copy()\n #print \"Fake2\"\n# for xq in range(nq):\n# tmp = Methods.put_into_cell(self.QE_bg, sxq[:, xq])\n# sxq[:, xq] = tmp\n# current_q[xq,:] = Methods.put_into_cell(self.QE_bg, current_q [xq,:])\n# \n # Print the order of the q star\n sorting_q = np.arange(nq)\n for xq in range(nq):\n count = 0 # Debug (avoid no or more than one identification)\n for yq in range(nq):\n real_y = yq\n dot_f = 1\n if imq == 0 and yq >= nq_new:\n real_y -= nq_new\n dot_f = -1\n if Methods.get_min_dist_into_cell(self.QE_bg.transpose(), dot_f* sxq[:, real_y], current_q[xq,:]) < __EPSILON__: \n sorting_q[xq] = yq\n count += 1\n \n if count != 1:\n print (\"Original star:\")\n print (q_point_group)\n print (\"Reshaped star:\")\n print (current_q)\n print (\"Reciprocal lattice vectors:\")\n print (self.QE_bg.transpose() )\n print (\"STAR:\")\n print (sxq[:, :nq_new].transpose() )\n pta = (current_q[xq,:])\n print (\"Distances of xq in the QE star:\")\n for yq in range(nq_new):\n print (\"%.4f %.4f %.4f => \" % (sxq[0, yq], sxq[1, yq], sxq[2, yq]), Methods.get_min_dist_into_cell(self.QE_bg.transpose(), sxq[:, yq], current_q[xq,:]))\n raise ValueError(\"Error, the vector (%.3f, %.3f, %.3f) has %d identification in the star\" % (pta[0], pta[1], pta[2],\n count))\n #print \"Sorting array:\"\n #print sorting_q\n \n \n # Copy the matrix in the new one\n for xq in range(nq):\n for xat in range(self.QE_nat):\n for yat in range(self.QE_nat):\n final_fc[xq, 3*xat: 3*xat + 3, 3*yat : 3*yat + 3] += dyn_star[sorting_q[xq], :,:, xat, yat] \n \n \n # Now divide the matrix per the xq value\n final_fc /= nq\n \n # Overwrite the matrix\n fcq[:,:,:] = final_fc",
"def SelectIrreducibleQ(self, q_vectors):\n\n qs = np.array(q_vectors)\n nq = np.shape(qs)[0]\n\n q_irr = [qs[x, :].copy() for x in range(nq)]\n for i in range(nq):\n if i >= len(q_irr):\n break\n \n q_stars = self.GetQStar(q_irr[i])\n n_star = np.shape(q_stars)[0]\n\n # Look if the list contains point in the star\n for j in range(n_star):\n q_in_star = q_stars[j,:]\n # Go reverse, in this way if we pop an element we do not have to worry about indices\n for k in range(len(q_irr)-1, i, -1):\n if Methods.get_min_dist_into_cell(self.QE_bg.transpose(), q_in_star, q_irr[k]) < __EPSILON__:\n q_irr.pop(k) # Delete the k element\n \n return q_irr",
"def q(z):\n if z>=1.0: return 0.5\n\n \"\"\"\n Grace Wahba, Spline interpolation and smoothing on the sphere,\n SIAM J. SCI. STAT. COMPUT., 2(1) 1981, pp. 5-16. [Equation (3.4)]\n http://www.stat.wisc.edu/%7Ewahba/ftp1/oldie/sphspl.pdf\n\n W = (1-z)/2.0\n C = 2*sqrt(W)\n A = log(1+1.0/sqrt(W))\n return 0.5*(A*(12*W*W - 4*W) -6*C*W + 6*W + 1)\n \"\"\"\n\n \"\"\"\n H. J. Taijeron, A. G. Gibson, C. Chandler,\n Spline interpolation and smoothing on hyperspheres,\n SIAM J. SCI. COMPUT., 15(5) 1994, pp. 1111-1125. [Table 1]\n \"\"\"\n try:\n S = sqrt(2-2*z)\n N = (1-z)*log(2-2*z)\n L = (1-z)*log(sqrt(2/(1-z))+1)\n return 0.5*(-L*(3*z-1)+3*S*(z-1)+4-3*z)\n except:\n return 0.5",
"def get_q(self,coord='rc',unit='au'):\n if(coord=='rc'):\n return self.param['q_rc'];\n if(coord=='cc' and unit=='au'):\n return self.param['q_cc'];\n if(coord=='cc' and unit=='si'):\n return self.param['q_cc']/0.529177249;",
"def getq_python(azimuth, elevation, wl_output, rot_matrix, wl_input=None):\n #The Ewald sphere has 1/wl radius\n inelastic = True\n if wl_input is None:\n inelastic = False\n wl_input = wl_output\n\n #The scattered beam emanates from the centre of this spher.\n #Find the intersection of the scattered beam and the sphere, in XYZ\n beam = column(az_elev_direction(azimuth, elevation)) / wl_output\n\n #And here is the incident beam direction: Along the z-axis, positive\n incident = np.array([0, 0, 1.0]).reshape(3,1) / wl_input\n\n #The wave vector difference between the two is the q vector\n q = 2*pi * (beam - incident)\n\n #Now we switch to the coordinate system of the crystal.\n #The scattered beam direction (the detector location) is rotated relative to the crystal\n # because the sample is rotated.\n #So is the incident beam direction.\n #Therefore, the q-vector measured is simply rotated by the supplied rotation matrix (which has reversed angles)\n\n if inelastic:\n q_unrotated = q\n q = np.dot(rot_matrix, q_unrotated)\n return (q, q_unrotated)\n else:\n q = np.dot(rot_matrix, q)\n return q",
"def RestartVector(v, Q):\n m, n = Q.shape\n q0 = numpy.zeros(m)\n for i in xrange(n):\n q0 = q0 + v[i]*Q[:,i]\n \n q0Norm = Vector2Norm(q0)\n q0 = q0/q0Norm\n return q0",
"def get_q_v(self,v=None):\r\n# if v is None:\r\n v = self.net.res_bus.at[self.bus, 'vm_pu']\r\n# p = self.net.res_sgen.at[self.gid, 'p_mw']\r\n if abs(v-1) <= self.deadband:\r\n return 0\r\n if v <= 1-self.deadband:\r\n return min(self.qmax, (v-(1-self.deadband)) * self.m_vmin)\r\n else:\r\n return max(self.qmin, (v-(1+self.deadband)) * self.m_vmax)",
"def GFexpansion(self):\n if self.Nvstars == 0:\n return None\n GFstarset = self.starset.copy(empty=True)\n GFstarset.diffgenerate(self.starset, self.starset)\n GFexpansion = np.zeros((self.Nvstars, self.Nvstars, GFstarset.Nstars))\n for i in range(self.Nvstars):\n for si, vi in zip(self.vecpos[i], self.vecvec[i]):\n for j in range(i, self.Nvstars):\n for sj, vj in zip(self.vecpos[j], self.vecvec[j]):\n try:\n ds = self.starset.states[sj] ^ self.starset.states[si]\n except:\n continue\n k = GFstarset.starindex(ds)\n if k is None: raise ArithmeticError('GF star not large enough to include {}?'.format(ds))\n GFexpansion[i, j, k] += np.dot(vi, vj)\n # symmetrize\n for i in range(self.Nvstars):\n for j in range(0, i):\n GFexpansion[i, j, :] = GFexpansion[j, i, :]\n # cleanup on return:\n return zeroclean(GFexpansion), GFstarset",
"def GetQGrid_old(unit_cell, supercell_size):\n \n q_list = []\n # Get the recirpocal lattice vectors\n bg = Methods.get_reciprocal_vectors(unit_cell)\n \n # Get the supercell\n supercell = np.tile(supercell_size, (3, 1)).transpose() * unit_cell\n \n # Get the lattice vectors of the supercell\n bg_s = Methods.get_reciprocal_vectors(supercell)\n \n #print \"SUPERCELL:\", supercell_size\n \n for ix in range(supercell_size[0]):\n for iy in range(supercell_size[1]):\n for iz in range(supercell_size[2]):\n n_s = np.array( [ix, iy, iz], dtype = np.float64)\n q_vect = n_s.dot(bg_s)\n #q_vect = Methods.get_closest_vector(bg, q_vect)\n\n # Check if q is in the listcount = 0\n count = 0\n for q in q_list:\n if Methods.get_min_dist_into_cell(bg, -q_vect, q) < __EPSILON__:\n count += 1\n break\n if count > 0:\n continue\n\n # Add the q point\n q_list.append(q_vect)\n \n # Check if -q and q are different\n if Methods.get_min_dist_into_cell(bg, -q_vect, q_vect) > __EPSILON__:\n q_list.append(-q_vect)\n \n\n \n return q_list",
"def gravity(self, star):\n dis = self.distance(star)\n fx = Constant.GRAVITY_CONST * self.mass * star.mass / dis ** 3 * (star.pos.x - self.pos.x)\n fy = Constant.GRAVITY_CONST * self.mass * star.mass / dis ** 3 * (star.pos.y - self.pos.y)\n return Vector(fx, fy)",
"def _compute_Q_vector(self):\n\n self.QVector = list(it.product([fsc.Q for fsc in self.fscs]))",
"def _get_initial_qpos(self):\n pos = self._convert_robosuite_to_toolbox_xpos(self.traj_pt)\n ori_euler = mat2euler(quat2mat(self.goal_quat))\n\n # desired pose\n T = SE3(pos) * SE3.RPY(ori_euler)\n\n # find initial joint positions\n if self.robots[0].name == \"UR5e\":\n robot = rtb.models.DH.UR5()\n sol = robot.ikine_min(T, q0=self.robots[0].init_qpos)\n\n # flip last joint around (pi)\n sol.q[-1] -= np.pi\n return sol.q\n\n elif self.robots[0].name == \"Panda\":\n robot = rtb.models.DH.Panda()\n sol = robot.ikine_min(T, q0=self.robots[0].init_qpos)\n return sol.q",
"def squadPt(q0, q1, q2):\r\n\r\n return MQuaternion(om.MQuaternion.squadPt(q0, q1, q2))",
"def RV_star(dp):\n from tayph.vartests import typetest\n import numpy as np\n dp=check_dp(dp)\n p=phase(dp)\n K=paramget('K',dp)\n typetest(K,float,'K in sp.RV_star()')\n rv=K*np.sin(2.0*np.pi*p) * (-1.0)\n return(rv)",
"def sphere(r):\n S = 4 * pi * (r**2)\n return S",
"def GetNewQFromUnitCell(old_cell, new_cell, old_qs):\n \n bg = Methods.get_reciprocal_vectors(old_cell) #/ (2 * np.pi)\n new_bg = Methods.get_reciprocal_vectors(new_cell)# / (2 * np.pi)\n \n new_qs = []\n for iq, q in enumerate(old_qs):\n # Get the q point in crystal coordinates\n new_qprime = Methods.covariant_coordinates(bg, q)\n \n # Convert the crystal coordinates in the new reciprocal lattice vectors\n new_q = np.einsum(\"ji, j\", new_bg, new_qprime)\n new_qs.append(new_q)\n \n return new_qs",
"def getQ(self):\n return self.qFactor.get()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GET ONLY THE IRREDUCIBLE Q POINTS ================================= This methods selects only the irreducible q points given a list of total q points for the structure.
|
def SelectIrreducibleQ(self, q_vectors):
qs = np.array(q_vectors)
nq = np.shape(qs)[0]
q_irr = [qs[x, :].copy() for x in range(nq)]
for i in range(nq):
if i >= len(q_irr):
break
q_stars = self.GetQStar(q_irr[i])
n_star = np.shape(q_stars)[0]
# Look if the list contains point in the star
for j in range(n_star):
q_in_star = q_stars[j,:]
# Go reverse, in this way if we pop an element we do not have to worry about indices
for k in range(len(q_irr)-1, i, -1):
if Methods.get_min_dist_into_cell(self.QE_bg.transpose(), q_in_star, q_irr[k]) < __EPSILON__:
q_irr.pop(k) # Delete the k element
return q_irr
|
[
"def GetQIrr(self, supercell):\n\n # Get all the q points\n q_points = GetQGrid(self.QE_at.T, supercell)\n\n # Delete the irreducible ones\n q_irr = self.SelectIrreducibleQ(q_points)\n\n return q_irr",
"def liste_Qx (self):\n liste_matrices_Qx = [self.Qx(self.liste_J[self.liste_angles[pli]][1],\n self.liste_J[self.liste_angles[pli]][3],\n self.liste_Q0[pli]) \n for pli in range(len(self.liste_angles))]\n return liste_matrices_Qx",
"def make_all_q(data):\n if not data.has_no_finite_acceptance:\n return []\n elif data.has_yz_acceptance(data):\n # compute qx, qy\n Qx, Qy = np.meshgrid(qx, qy)\n return [Qx, Qy]\n else:\n # else only need q\n # data.has_z_acceptance\n return [q]",
"def qn(self) -> np.ndarray:\n return sum(self.qn_list)",
"def partial_qs(self, qb_id, iteration):\n return {qnr.instrument for qnr in self.qnrs if\n qnr.qb_id == qb_id\n and qnr.iteration == iteration\n and qnr.status == \"in-progress\"}",
"def point_quadric(self,i):\n Q = np.zeros((4,4))\n # iterate over all triangles that contain the point i\n for j in self.graph[i].keys():\n for k in (self.graph[i].keys() & self.graph[j].keys()):\n Q += triangle_quadric(self.points[i],self.points[j],self.points[k])\n return Q",
"def _compute_Q_vector(self):\n\n self.QVector = list(it.product([fsc.Q for fsc in self.fscs]))",
"def Q(self):\n return np.vstack((self.data[self.Q_keys[i]].flatten() for i in ['h', 'k', 'l', 'e', 'temp'])).T",
"def get_q_glue(self) -> List[float]:\n raise NotImplementedError(\"Sections must have a q_glue value\")",
"def get_q_glue(self) -> List[float]:\n # We take q above the glue\n flange_area = self.thickness*self.flange_sheets*self.flange_width * 2\n flange_d = self.web_height + (self.thickness*self.flange_sheets) / 2 - self.y_bar\n\n deck_area = self.thickness * self.deck_sheets * (self.width - 2*self.flange_width)\n deck_d = self.web_height + (self.thickness * self.deck_sheets) / 2 - self.y_bar\n\n return [flange_area*flange_d + deck_area*deck_d]",
"def trackQuadraturePoints(self,q):\n import pdb\n timeToTrackPoints = (self.transport.timeIntegration.t > self.transport.timeIntegration.tLast + 1.0e-8 or\n abs(self.tForLastTrackingStep-self.transport.timeIntegration.t) > 1.0e-8)\n\n #by default, tracking element quadrature points only (q array)\n x_depart = {}\n nPoints_track = {}\n for ci in range(self.transport.nc):\n x_depart[ci] = q['x']\n nPoints_track[ci] = self.transport.mesh.nElements_global*self.transport.nQuadraturePoints_element\n\n def setupInitialElementLocations(ci,q_e):\n for k in range(q_e[ci].shape[1]):\n q_e[ci][:,k] = numpy.arange(self.transport.mesh.nElements_global,dtype='i')\n #todo need to allow skipping nonzero points with q or gq\n\n #first generate SSIPs if needed\n #todo this could be turned into a data member\n #0 -- not backtracked at all\n #1 -- backtracked only nonzero solution points\n #2 -- backtracked everything\n #mwf debug\n #import pdb\n #pdb.set_trace()\n solutionBackTrackedFlag = 0\n if self.needToTrackPoints and timeToTrackPoints and self.SSIPflag > 0:\n self.trackSolutionBackwards(skipPointsWithZeroSolution=True)\n self.generateSSIPs()\n solutionBackTrackedFlag = 1\n self.trackSSIPs()\n if self.needToTrackPoints and timeToTrackPoints:\n #mwf debug\n #pdb.set_trace()\n #update velocity fields for particle tracking\n for ci in range(self.transport.nc):\n self.particle_tracker.setTrackingVelocity(self.transport.coefficients.adjoint_velocity_dofs_last[ci],ci,\n self.transport.coefficients.adjoint_velocity_times_last[ci],\n timeLevel=0,\n trackingVelocity_l2g=self.transport.coefficients.adjoint_velocity_l2g[ci])\n self.particle_tracker.setTrackingVelocity(self.transport.coefficients.adjoint_velocity_dofs[ci],ci,\n self.transport.coefficients.adjoint_velocity_times[ci],\n timeLevel=1)\n\n\n log(\" LADRellam tracking integration points backward ci=%s\" % ci,level=2)\n self.q_t_depart[ci].fill(self.transport.timeIntegration.t)\n #in desired output time, out actual time\n self.q_t_track[ci].fill(self.transport.timeIntegration.tLast)\n #try all points, now set to -1 to try, -3 to skip, 0 or greater if a node of the mesh\n self.q_flag_track[ci].fill(-1)\n #assign ownership of quadrature points to elements\n setupInitialElementLocations(ci,self.q_element_track)\n\n #todo make sure activeComponents set explicitly?\n #mwf debug just play with forwardTrack call, normally backward tracking\n self.particle_tracker.backwardTrack(self.q_t_depart,\n self.q_t_track,\n nPoints_track,\n x_depart,\n self.q_element_track,\n self.q_x_track,\n self.q_flag_track)\n\n\n #mwf debug\n #pdb.set_trace()\n for ci in range(self.transport.nc):\n self.q_dt_track[ci] = numpy.copy(self.q_t_depart[ci])\n self.q_dt_track[ci] -= self.q_t_track[ci]\n\n if not self.useBackwardTrackingForOldMass:\n for ci in range(self.transport.nc):\n log(\" LADRellam tracking integration points forward ci=%s \" % ci,level=2)\n #forward\n self.q_t_depart[ci].fill(self.transport.timeIntegration.tLast)\n self.q_t_track[ci].fill(self.transport.timeIntegration.t)\n #todo setup so can skip points with zero solution using q or gq, need to evaluate u at gq\n #try all points, now set to -1 to try, -3 to skip, 0 or greater if a node of the mesh\n self.q_flag_track[ci].fill(-1)\n #assign ownership of quadrature points to elements\n setupInitialElementLocations(ci,self.q_element_track)\n\n\n #todo make sure activeComponents set explicitly?\n self.particle_tracker.forwardTrack(self.q_t_depart,\n self.q_t_track,\n nPoints_track,\n x_depart,\n self.q_element_track,\n self.q_x_track,\n self.q_flag_track)\n\n\n if self.needToBackTrackSolution and solutionBackTrackedFlag < 1:\n self.trackSolutionBackwards(skipPointsWithZeroSolution=False)\n\n #end tracking interpolation points\n self.needToTrackPoints = False\n self.tForLastTrackingStep=self.transport.timeIntegration.t\n #mwf debug\n #pdb.set_trace()\n #end need to track integration points",
"def q_per_channel_zero_points(self): # real signature unknown; restored from __doc__\n pass",
"def computeQindices(self):\n\n self.surf_index_Q = PUBSlib.computesurfindices(self.nsurf, self.nedge, self.ngroup, self.surf_edge, self.edge_group, self.group_m)\n self.edge_index_Q = PUBSlib.computeedgeindicesq(self.nsurf, self.nedge, self.ngroup, self.surf_edge, self.edge_group, self.group_m, self.surf_c1)\n self.vert_index_Q = PUBSlib.computevertindicesq(self.nsurf, self.nedge, self.nvert, self.surf_vert, self.surf_edge, self.surf_c1, self.edge_c1)\n self.nQ = 0\n self.nQ += max(self.vert_index_Q)\n self.nQ += max(self.edge_index_Q[:,1])\n self.nQ += self.surf_index_Q[-1,1]\n\n self.Q = numpy.zeros((self.nQ,self.nvar),order='F') \n if self.printInfo:\n print '# Degrees of freedom =',self.nQ",
"def q(self):\n return self.coords.q",
"def active_cells_dominated_by_lplus(self, q):\n for l, u, i in self.active_cells_lplus:\n if np.all(q <= l):\n yield l, u, i",
"def get_all_available_quantities():\n\n def get_derived_quantities(quantity):\n subcls = quantity.__subclasses__()\n qset = set()\n for subq in subcls:\n qset |= set((subq,))\n qset |= get_derived_quantities(subq)\n return qset\n return get_derived_quantities(Quantity)",
"def quantileGet(self,q):\n chains,iter,nparam=self.stan_fit.shape\n param=self.stan_fit.reshape((chains*iter,nparam))\n #q is quantile\n #param is array (nsamples,nparameters)\n # make a list to store the quantiles\n quants = []\n \n # for every predicted value\n for i in range(param.shape[1]):\n # make a vector to store the predictions from each chain\n val = []\n \n # next go down the rows and store the values\n for j in range(param.shape[0]):\n val.append(param[j,i])\n \n # return the quantile for the predictions.\n quants.append(np.percentile(val, q))\n \n return quants",
"def quad_points(self, n=None):\n if n is None:\n return self.__quad_points\n else:\n return self.__quad_points[n,:]",
"def liste_Q0 (self):\n liste_matrices_Q0 = [self.Q0(self.liste_El[pli], self.liste_Et[pli],\n self.liste_Glt[pli], self.liste_Nult[pli]) \n for pli in range(len(self.liste_angles))]\n return liste_matrices_Q0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GET THE LIST OF IRREDUCIBLE Q POINTS ==================================== This method returns a list of irreducible q points given the supercell size.
|
def GetQIrr(self, supercell):
# Get all the q points
q_points = GetQGrid(self.QE_at.T, supercell)
# Delete the irreducible ones
q_irr = self.SelectIrreducibleQ(q_points)
return q_irr
|
[
"def GetQGrid_old(unit_cell, supercell_size):\n \n q_list = []\n # Get the recirpocal lattice vectors\n bg = Methods.get_reciprocal_vectors(unit_cell)\n \n # Get the supercell\n supercell = np.tile(supercell_size, (3, 1)).transpose() * unit_cell\n \n # Get the lattice vectors of the supercell\n bg_s = Methods.get_reciprocal_vectors(supercell)\n \n #print \"SUPERCELL:\", supercell_size\n \n for ix in range(supercell_size[0]):\n for iy in range(supercell_size[1]):\n for iz in range(supercell_size[2]):\n n_s = np.array( [ix, iy, iz], dtype = np.float64)\n q_vect = n_s.dot(bg_s)\n #q_vect = Methods.get_closest_vector(bg, q_vect)\n\n # Check if q is in the listcount = 0\n count = 0\n for q in q_list:\n if Methods.get_min_dist_into_cell(bg, -q_vect, q) < __EPSILON__:\n count += 1\n break\n if count > 0:\n continue\n\n # Add the q point\n q_list.append(q_vect)\n \n # Check if -q and q are different\n if Methods.get_min_dist_into_cell(bg, -q_vect, q_vect) > __EPSILON__:\n q_list.append(-q_vect)\n \n\n \n return q_list",
"def GetQGrid(unit_cell, supercell_size, enforce_gamma_first = True):\n bg = Methods.get_reciprocal_vectors(unit_cell)\n\n n_vects = int(np.prod(supercell_size))\n q_final = np.zeros((3, n_vects), dtype = np.double, order = \"F\")\n q_final[:,:] = symph.get_q_grid(bg.T, supercell_size, n_vects)\n\n # Get the list of the closest vectors\n q_list = [Methods.get_closest_vector(bg, q_final[:, i]) for i in range(n_vects)]\n\n # Setup Gamma as the first vector\n if enforce_gamma_first:\n for i, q in enumerate(q_list):\n if np.abs(np.sum(q)) < __EPSILON__:\n tmp = q_list[0].copy()\n q_list[0] = q.copy()\n q_list[i] = tmp \n break \n\n\n return q_list",
"def GetSupercellFromQlist(q_list, unit_cell):\n\n # Get the bravais lattice\n bg = Methods.get_reciprocal_vectors(unit_cell) \n\n # Convert the q points in crystalline units\n supercell = [1,1,1]\n\n for q in q_list:\n qprime = Methods.covariant_coordinates(bg, q)\n qprime -= np.floor(qprime)\n qprime[np.abs(qprime) < __EPSILON__] = 1\n\n rmax = 1/np.abs(qprime)\n for j in range(3):\n if supercell[j] < int(rmax[j] + .5):\n supercell[j] = int(rmax[j] + .5)\n \n return supercell",
"def liste_Qx (self):\n liste_matrices_Qx = [self.Qx(self.liste_J[self.liste_angles[pli]][1],\n self.liste_J[self.liste_angles[pli]][3],\n self.liste_Q0[pli]) \n for pli in range(len(self.liste_angles))]\n return liste_matrices_Qx",
"def get_q_glue(self) -> List[float]:\n # We take q above the glue\n flange_area = self.thickness*self.flange_sheets*self.flange_width * 2\n flange_d = self.web_height + (self.thickness*self.flange_sheets) / 2 - self.y_bar\n\n deck_area = self.thickness * self.deck_sheets * (self.width - 2*self.flange_width)\n deck_d = self.web_height + (self.thickness * self.deck_sheets) / 2 - self.y_bar\n\n return [flange_area*flange_d + deck_area*deck_d]",
"def listof_positions(self):\n l = []\n for rnum in range(self.rnum_min, self.rnum_max+1):\n if rnum in self._pieces:\n l.append(rnum)\n return l",
"def get_qubit_neighbour_list(self, d):\n\n count = 0\n qubit_dict = {}\n qubit_neighbours = []\n for row in range(d):\n for col in range(d):\n qubit_dict[str(tuple([row,col]))] = count\n cells = starmap(lambda a,b: (row+a, col+b), product((0,-1,+1), (0,-1,+1)))\n qubit_neighbours.append(list(cells)[1:])\n count +=1\n \n neighbour_list = []\n for qubit in range(d**2):\n neighbours = []\n for neighbour in qubit_neighbours[qubit]:\n if str(neighbour) in qubit_dict.keys():\n neighbours.append(qubit_dict[str(neighbour)])\n neighbour_list.append(neighbours)\n\n return neighbour_list",
"def GetNewQFromUnitCell(old_cell, new_cell, old_qs):\n \n bg = Methods.get_reciprocal_vectors(old_cell) #/ (2 * np.pi)\n new_bg = Methods.get_reciprocal_vectors(new_cell)# / (2 * np.pi)\n \n new_qs = []\n for iq, q in enumerate(old_qs):\n # Get the q point in crystal coordinates\n new_qprime = Methods.covariant_coordinates(bg, q)\n \n # Convert the crystal coordinates in the new reciprocal lattice vectors\n new_q = np.einsum(\"ji, j\", new_bg, new_qprime)\n new_qs.append(new_q)\n \n return new_qs",
"def qn(self) -> np.ndarray:\n return sum(self.qn_list)",
"def CheckSupercellQ(unit_cell, supercell_size, q_list):\n # Get the q point list for the given supercell\n correct_q = GetQGrid(unit_cell, supercell_size)\n \n # Get the reciprocal lattice vectors\n bg = Methods.get_reciprocal_vectors(unit_cell)\n \n # Check if the vectors are equivalent or not\n for iq, q in enumerate(q_list):\n for jq, qnew in enumerate(correct_q):\n if Methods.get_min_dist_into_cell(bg, q, qnew) < __EPSILON__:\n correct_q.pop(jq)\n break\n \n if len(correct_q) > 0:\n print (\"[CHECK SUPERCELL]\")\n print (\" MISSING Q ARE \")\n print (\"\\n\".join([\" q =%16.8f%16.8f%16.8f \" % (q[0], q[1], q[2]) for q in correct_q]))\n return False\n return True",
"def liste_Q0 (self):\n liste_matrices_Q0 = [self.Q0(self.liste_El[pli], self.liste_Et[pli],\n self.liste_Glt[pli], self.liste_Nult[pli]) \n for pli in range(len(self.liste_angles))]\n return liste_matrices_Q0",
"def SelectIrreducibleQ(self, q_vectors):\n\n qs = np.array(q_vectors)\n nq = np.shape(qs)[0]\n\n q_irr = [qs[x, :].copy() for x in range(nq)]\n for i in range(nq):\n if i >= len(q_irr):\n break\n \n q_stars = self.GetQStar(q_irr[i])\n n_star = np.shape(q_stars)[0]\n\n # Look if the list contains point in the star\n for j in range(n_star):\n q_in_star = q_stars[j,:]\n # Go reverse, in this way if we pop an element we do not have to worry about indices\n for k in range(len(q_irr)-1, i, -1):\n if Methods.get_min_dist_into_cell(self.QE_bg.transpose(), q_in_star, q_irr[k]) < __EPSILON__:\n q_irr.pop(k) # Delete the k element\n \n return q_irr",
"def get_q_glue(self) -> List[float]:\n raise NotImplementedError(\"Sections must have a q_glue value\")",
"def get_quad_points(self):\n return self.get_abstract_item(\"General\", \"Drag quadrature Points\")",
"def getPlotQuantitiesList():\n return plot_quantities_list[:]",
"def GetQForEachMode(pols_sc, unit_cell_structure, supercell_structure, \\\n supercell_size, crystal = True):\n\n # Check the supercell\n n_cell = np.prod(supercell_size)\n\n nat = unit_cell_structure.N_atoms\n nat_sc = np.shape(pols_sc)[0] / 3\n n_modes = np.shape(pols_sc)[1] \n\n ERR_MSG = \"\"\"\n Error, the supercell {} is not commensurate with the polarization vector given.\n nat = {}, nat_sc = {}\n \"\"\"\n assert n_cell * nat == nat_sc, ERR_MSG.format(supercell_size, nat, nat_sc)\n assert nat_sc == supercell_structure.N_atoms\n\n # Get the reciprocal lattice\n bg = Methods.get_reciprocal_vectors(unit_cell_structure.unit_cell) / (2 * np.pi)\n\n # Get the possible Q list\n q_grid = GetQGrid(unit_cell_structure.unit_cell, supercell_size)\n\n # Allocate the output variable\n q_list = np.zeros( (n_modes, 3), dtype = np.double, order = \"C\")\n\n # Get the correspondance between the unit cell and the super cell atoms\n itau = supercell_structure.get_itau(unit_cell_structure) - 1 #Fort2Py\n\n # Get the translational vectors\n R_vects = np.zeros( (nat_sc, 3), dtype = np.double)\n for i in range(nat_sc):\n R_vects[i, :] = unit_cell_structure.coords[itau[i],:] - supercell_structure.coords[i,:]\n \n R_vects = R_vects.ravel()\n __thr__ = 1e-6\n\n for imu in range(n_modes):\n pol_v = pols_sc[:, imu]\n\n nq = 0\n for q in q_grid:\n q_vec = np.tile(q, nat_sc)\n q_cos = np.cos(2*np.pi * q_vec * R_vects)\n q_cos /= np.sqrt(q_cos.dot(q_cos))\n q_sin = np.sin(2*np.pi * q_vec * R_vects)\n q_sin /= np.sqrt(q_cos.dot(q_cos))\n\n cos_proj = q_cos.dot(pol_v)\n sin_proj = q_sin.dot(pol_v)\n # Wrong, this select only a translational mode\n\n if np.abs(cos_proj**2 + sin_proj**2 -1) < __thr__:\n new_q = q\n if crystal:\n new_q = Methods.covariant_coordinates(bg, q)\n q_list[imu, :] = new_q\n break\n elif cos_proj**2 + sin_proj**2 > __thr__:\n print (q_cos)\n ERROR_MSG = \"\"\"\n Error, mixing between two |q|.\n Please provide polarization vectors that are well defined in |q|.\n This can be reached using the subroutine Phonons.Phonons.DiagonalizeSupercell.\n q = {}\n i_mode = {}\n\n cos_proj = {} | sin_proj = {}\n \"\"\"\n raise ValueError(ERROR_MSG.format(q, imu, cos_proj, sin_proj))\n else:\n nq += 1\n\n \n # If we are here not q has been found\n if nq == len(q_grid):\n ERROR_MSG = \"\"\"\n Error, the polarization vector {} cannot be identified!\n No q found in this supercell!\n \"\"\"\n raise ValueError(ERROR_MSG.format(imu))\n\n\n return q_list",
"def _compute_Q_vector(self):\n\n self.QVector = list(it.product([fsc.Q for fsc in self.fscs]))",
"def q(self):\n return self.coords.q",
"def quad_points(self, n=None):\n if n is None:\n return self.__quad_points\n else:\n return self.__quad_points[n,:]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
APPLY THE SYMMETRIES TO A 2RANK TENSOR ======================================= This subroutines applies the symmetries to a 2rank tensor. Usefull to work with supercells.
|
def ApplySymmetriesToV2(self, v2, apply_translations = True):
# Apply the Permutation symmetry
v2[:,:] = 0.5 * (v2 + v2.T)
# First lets recall that the fortran subroutines
# Takes the input as (3,3,nat,nat)
new_v2 = np.zeros( (3,3, self.QE_nat, self.QE_nat), dtype = np.double, order ="F")
for i in range(self.QE_nat):
for j in range(self.QE_nat):
new_v2[:, :, i, j] = v2[3*i : 3*(i+1), 3*j : 3*(j+1)]
# Apply the translations
if apply_translations:
# Check that the translations have been setted up
assert len(np.shape(self.QE_translations_irt)) == 2, "Error, symmetries not setted up to work in the supercell"
symph.trans_v2(new_v2, self.QE_translations_irt)
# Apply the symmetrization
symph.sym_v2(new_v2, self.QE_at, self.QE_bg, self.QE_s, self.QE_irt, self.QE_nsym, self.QE_nat)
# Return back
for i in range(self.QE_nat):
for j in range(self.QE_nat):
v2[3*i : 3*(i+1), 3*j : 3*(j+1)] = new_v2[:, :, i, j]
|
[
"def to_symmetric(self,sym):\n # Throw error if tensor is not loaded\n if not self.in_mem: raise ValueError('GEN_TEN not in memory for operation to_symmetric')\n\n # Return a copy of self if already a symtensor\n if self.is_symmetric:\n return self.copy()\n\n # Convert the full dense (sparse in symtensor lang) to symmetric version\n else:\n # Create the new tensor\n newten = self.ten.copy()\n assert(len(sym[0]) == len(newten.shape))\n # Convert the shape\n newshape = []\n for i in range(len(newten.shape)):\n newshape.append(len(sym[1][i]))\n newshape.append(newten.shape[i]/len(sym[1][i]))\n newten = newten.reshape(newshape)\n # Do a transpose on the indices\n order = []\n for i in range(len(sym[1])):\n order.append(2*i)\n for i in range(len(sym[1])):\n order.append(2*i+1)\n newten = newten.transpose(order)\n # Create a random symtensor\n newsymten = rand(newten.shape[len(sym[1]):],\n sym=sym,\n backend=self.backend,\n dtype=self.dtype,\n legs=self.legs,\n in_mem=self.in_mem)\n # Contract with delta to get dense irrep\n delta = newsymten.ten.get_irrep_map()\n einstr = LETTERS[:len(sym[1])].upper() + \\\n LETTERS[:len(sym[1])] + ',' + \\\n LETTERS[:len(sym[1])].upper() + '->' + \\\n LETTERS[:len(sym[1])-1].upper() + \\\n LETTERS[:len(sym[1])]\n newten = newsymten.backend.einsum(einstr,newten,delta)\n # Put the result into a symtensor\n newsymten.ten.array = newten\n # Return result\n return newsymten",
"def TransformSymmetricSecondRankTensor(self, *args) -> \"itkVariableLengthVectorD\":\n return _itkCompositeTransformPython.itkCompositeTransformD2_TransformSymmetricSecondRankTensor(self, *args)",
"def TransformSymmetricSecondRankTensor(self, *args) -> \"itkVariableLengthVectorD\":\n return _itkCompositeTransformPython.itkCompositeTransformD3_TransformSymmetricSecondRankTensor(self, *args)",
"def _assign_sym2(cmap_ops):\n cmap_ops.phase('assign sym2')\n keycap_chars = tool_utils.parse_int_ranges(\"\"\"\n 0023 # Number Sign\n 002A # Asterisk\n 0030-0039 # Digits\n 20E3 # Combining Enclosing Keycap\"\"\")\n cmap_ops.add_all(keycap_chars, 'SYM2')",
"def _sym3x3(T):\n T[1,0], T[2,0], T[2,1] = T[0,1], T[0,2], T[1,2]",
"def symmetrize(input, output, symmetries, full_group): # pylint: disable=redefined-builtin\n model = _read_input(input)\n click.echo(\"Reading symmetries from file '{}' ...\".format(symmetries))\n sym = sr.io.load(symmetries)\n model_sym = _symmetrize(sym, model, full_group) # pylint: disable=assignment-from-no-return\n _write_output(model_sym, output)",
"def preprocessing_symb(symbol):\n symbol = torch.Tensor(symbol)\n return symbol[None,None,:,:]",
"def board_symmetries(board):\n yield BoardWrapper(board)\n\n w, h = board.width, board.height\n\n symmetry_functions = [reflect_vertical, reflect_horizontal, rotate_180]\n board_is_square = (w == h)\n if board_is_square:\n symmetry_functions += [reflect_secondary_diagonal, reflect_primary_diagonal, rotate_90, rotate_270]\n\n for sf in symmetry_functions:\n new_board = board.copy()\n\n for player,move in board.__last_player_move__.items():\n if move is not board.NOT_MOVED:\n new_board.__last_player_move__[player] = sf(move, w, h)\n\n for row in range(h):\n for col in range(w):\n row2, col2 = sf((row, col), w, h)\n new_board.__board_state__[row2][col2] = board.__board_state__[row][col]\n\n yield BoardWrapper(new_board)",
"def test_symmetry(self):\n dims = [[2,3], [4,6]]\n for dim in dims:\n for times in range(5):\n code = mami.make_code(*dim)\n guess = mami.make_code(*dim)\n self.assertEqual(mami.calculate_key(code,guess),mami.calculate_key(guess,code))",
"def test_sym_m_product():\n amat = np.array([[1, 2, 3], [3, 4, 6]], float, order='F')\n out1 = amat.T.dot(amat)\n out2 = my_dsyrk(amat)\n idx = np.triu_indices(amat.shape[1])\n\n assert np.allclose(out1[idx], out2[idx])\n\n amat = np.array([[1, 2, 3], [3, 4, 6]], float)\n amat = np.asfortranarray(amat.dot(amat.T))\n\n out1 = amat.T.dot(amat)\n out2 = my_dsyrk(amat)\n idx = np.triu_indices(amat.shape[1])\n\n assert np.allclose(out1[idx], out2[idx])",
"def performSymbolicInstructionSubstitution (self, mne, op1, op2):\n\n return None",
"def test_minor_symmetry_tensor(self):\n if not available:\n self.skipTest(reason)\n e_tensor = PyEshelbyTensor(6.0, 5.0, 4.0, 0.3)\n\n for indx in product([0, 1, 2], repeat=4):\n val1 = e_tensor(indx[0], indx[1], indx[2], indx[3])\n\n val2 = e_tensor(indx[0], indx[1], indx[3], indx[2])\n self.assertAlmostEqual(val1, val2)\n\n val2 = e_tensor(indx[1], indx[0], indx[3], indx[2])\n self.assertAlmostEqual(val1, val2)\n\n val2 = e_tensor(indx[1], indx[0], indx[2], indx[3])\n self.assertAlmostEqual(val1, val2)",
"def test_symmetrization_parallel(self):\n before = np.array(self.dataset.diffraction_group[\"intensity\"])\n symmetrized = np.array(before, copy=True)\n for index, _ in enumerate(self.dataset.time_points):\n symmetrized[:, :, index] = nfold(\n before[:, :, index], mod=3, center=(63, 65)\n )\n\n self.dataset.symmetrize(mod=3, center=(63, 65), processes=2)\n after = np.array(self.dataset.diffraction_group[\"intensity\"])\n\n self.assertTrue(np.allclose(symmetrized, after))",
"def do_sym_y(self):\n \n nx = self.nx()\n ny = self.ny()\n nz = self.nz()\n \n scale = np.float32(0.5)\n data = np.empty((nx, ny, nz), dtype=np.float32)\n \n for iz in range(0, nz):\n for iy in range(0, ny):\n for ix in range(0, nx):\n dleft = self._data[ix, iy, iz]\n drght = self._data[ix, ny-1-iy, iz]\n data[ix,iy,iz] = (dleft + drght) * scale\n \n self._data = data\n self._sym_y = True",
"def apply_symmetrisation(self):\n\n # get the values to be symmetrised\n for sym_set in self.molecule.symm_hs.values():\n charges, sigmas, epsilons = [], [], []\n for atom_set in sym_set:\n for atom in atom_set:\n charges.append(float(self.non_bonded_force[atom - 1][0]))\n sigmas.append(float(self.non_bonded_force[atom - 1][1]))\n epsilons.append(float(self.non_bonded_force[atom - 1][2]))\n # calculate the average values to be used in symmetry\n charge, sigma, epsilon = sum(charges) / len(charges), sum(sigmas) / len(sigmas), sum(epsilons) / len(epsilons)\n\n # now loop through the atoms again and store the new values\n for atom in atom_set:\n self.non_bonded_force[atom - 1] = [str(charge), str(sigma), str(epsilon)]",
"def ApplySymmetriesToVector(symmetries, vector, unit_cell, irts):\n \n # Get the vector in crystalline coordinate\n nat, dumb = np.shape(vector)\n n_sym = len(symmetries)\n\n assert n_sym == len(irts)\n\n work = np.zeros( (n_sym, nat, 3), dtype = np.double, order = \"C\")\n \n # Pass to crystalline coordinates\n v1 = Methods.covariant_coordinates(unit_cell, vector)\n \n # Apply the symmetry\n for j, symmetry in enumerate(symmetries):\n sym = symmetry[:, :3]\n w1 = sym.dot(v1.T).T\n\n # Return in cartesian coordinates\n work[j, irts[j][:], :] = w1.dot(unit_cell)# unit_cell.T.dot(w1) #np.einsum(\"ab,a\", unit_cell, w1)\n \n return work",
"def sympykern(input_dim, k):\n return kern(input_dim, [spkern(input_dim, k)])",
"def getSymmetryMatrix(*args, **kwargs):\n \n pass",
"def list2sym(lst):\n ..."
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function returns a matrix containing the symmetries from the given ITA code of the Group. The corresponding ITA/group label can be found on the Bilbao Crystallographic Server.
|
def get_symmetries_from_ita(ita, red=False):
if ita <= 0:
raise ValueError("Error, ITA group %d is not valid." % ita)
filename="%s/SymData/%d.dat" % (CURRENT_DIR, ita)
if red:
filename="%s/SymData/%d_red.dat" % (CURRENT_DIR, ita)
if not os.path.exists(filename):
print ("Error, ITA group not yet implemented.")
print ("You can download the symmetries for this group from the Bilbao Crystallographic Server")
print ("And just add the %d.dat file into the SymData folder of the current program." % ita)
print ("It should take less than five minutes.")
raise ValueError("Error, ITA group %d not yet implemented. Check stdout on how to solve this problem." % ita)
fp = open(filename, "r")
# Get the number of symemtries
n_sym = int(fp.readline().strip())
fp.close()
symdata = np.loadtxt(filename, skiprows = 1)
symmetries = []
for i in range(n_sym):
symmetries.append(symdata[3*i:3*(i+1), :])
return symmetries
|
[
"def SSpcGroup(SGData,SSymbol):\n \n def fixMonoOrtho():\n mod = ''.join(modsym).replace('1/2','0').replace('1','0')\n if SGData['SGPtGrp'] in ['2','m']: #OK\n if mod in ['a00','0b0','00g']:\n result = [i*-1 for i in SGData['SSGKl']]\n else:\n result = SGData['SSGKl'][:]\n if '/' in mod:\n return [i*-1 for i in result]\n else:\n return result\n elif SGData['SGPtGrp'] == '2/m': #OK\n if mod in ['a00','0b0','00g']:\n result = SGData['SSGKl'][:]\n else:\n result = [i*-1 for i in SGData['SSGKl']]\n if '/' in mod:\n return [i*-1 for i in result]\n else:\n return result\n else: #orthorhombic\n return [-SSGKl[i] if mod[i] in ['a','b','g'] else SSGKl[i] for i in range(3)]\n \n def extendSSGOps(SSGOps):\n for OpA in SSGOps:\n OpAtxt = SSMT2text(OpA)\n if 't' not in OpAtxt:\n continue\n for OpB in SSGOps:\n OpBtxt = SSMT2text(OpB)\n if 't' not in OpBtxt:\n continue\n OpC = list(SGProd(OpB,OpA))\n OpC[1] %= 1.\n OpCtxt = SSMT2text(OpC)\n# print OpAtxt.replace(' ','')+' * '+OpBtxt.replace(' ','')+' = '+OpCtxt.replace(' ','')\n for k,OpD in enumerate(SSGOps):\n OpDtxt = SSMT2text(OpD)\n OpDtxt2 = ''\n if SGData['SGGray']: \n OpDtxt2 = SSMT2text([OpD[0],OpD[1]+np.array([0.,0.,0.,.5])])\n# print ' ('+OpCtxt.replace(' ','')+' = ? '+OpDtxt.replace(' ','')+')'\n if OpCtxt == OpDtxt:\n continue\n elif OpCtxt == OpDtxt2:\n continue\n elif OpCtxt.split(',')[:3] == OpDtxt.split(',')[:3]:\n if 't' not in OpDtxt:\n SSGOps[k] = OpC\n# print k,' new:',OpCtxt.replace(' ','')\n break\n else:\n OpCtxt = OpCtxt.replace(' ','')\n OpDtxt = OpDtxt.replace(' ','')\n Txt = OpCtxt+' conflicts with '+OpDtxt\n# print (Txt)\n return False,Txt\n return True,SSGOps\n \n def findMod(modSym):\n for a in ['a','b','g']:\n if a in modSym:\n return a\n \n def genSSGOps():\n SSGOps = SSGData['SSGOps'][:]\n iFrac = {}\n for i,frac in enumerate(SSGData['modSymb']):\n if frac in ['1/2','1/3','1/4','1/6','1']:\n iFrac[i] = frac+'.'\n# print SGData['SpGrp']+SSymbol\n# print 'SSGKl',SSGKl,'genQ',genQ,'iFrac',iFrac,'modSymb',SSGData['modSymb']\n# set identity & 1,-1; triclinic\n SSGOps[0][0][3,3] = 1.\n## expand if centrosymmetric\n# if SGData['SGInv']:\n# SSGOps += [[-1*M,V] for M,V in SSGOps[:]]\n# monoclinic - all done & all checked\n if SGData['SGPtGrp'] in ['2','m']: #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n for i in iFrac:\n SSGOps[1][0][3,i] = -SSGKl[0]\n elif SGData['SGPtGrp'] == '2/m': #OK\n SSGOps[1][0][3,3] = SSGKl[1]\n if 's' in gensym:\n SSGOps[1][1][3] = 0.5\n for i in iFrac:\n SSGOps[1][0][3,i] = SSGKl[0]\n \n# orthorhombic - all OK not fully checked\n elif SGData['SGPtGrp'] in ['222','mm2','m2m','2mm']: #OK\n if SGData['SGPtGrp'] == '222':\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[1,2],2:[1,3]},'b':{2:[3,2],0:[1,2]}} #OK\n elif SGData['SGPtGrp'] == 'mm2':\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]}} #OK\n elif SGData['SGPtGrp'] == 'm2m':\n OrOps = {'b':{0:[1,2],2:[3,2]},'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]}} #OK\n elif SGData['SGPtGrp'] == '2mm':\n OrOps = {'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]},'g':{0:[1,3],1:[2,3]}} #OK\n a = findMod(SSGData['modSymb'])\n OrFrac = OrOps[a]\n for j in iFrac:\n for i in OrFrac[j]:\n SSGOps[i][0][3,j] = -2.*eval(iFrac[j])*SSGKl[i-1]\n for i in [0,1,2]:\n SSGOps[i+1][0][3,3] = SSGKl[i]\n SSGOps[i+1][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] == 'mmm': #OK\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]}} \n a = findMod(SSGData['modSymb'])\n if a == 'g':\n SSkl = [1,1,1]\n elif a == 'a':\n SSkl = [-1,1,-1]\n else:\n SSkl = [1,-1,-1]\n OrFrac = OrOps[a]\n for j in iFrac:\n for i in OrFrac[j]:\n SSGOps[i][0][3,j] = -2.*eval(iFrac[j])*SSkl[i-1]\n for i in [0,1,2]:\n SSGOps[i+1][0][3,3] = SSkl[i]\n SSGOps[i+1][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps \n# tetragonal - all done & checked\n elif SGData['SGPtGrp'] == '4': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n elif SGData['SGPtGrp'] == '-4': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = 1\n elif SGData['SGPtGrp'] in ['4/m',]: #OK\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -SSGKl[0]\n for i,j in enumerate([1,3]):\n SSGOps[j][0][3,3] = 1\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] in ['422','4mm','-42m','-4m2',]: #OK\n iGens = [1,4,5]\n if SGData['SGPtGrp'] in ['4mm','-4m2',]:\n iGens = [1,6,7]\n for i,j in enumerate(iGens):\n if '1/2' in SSGData['modSymb'] and i < 2:\n SSGOps[j][0][3,1] = SSGKl[i]\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n if 's' in gensym and j == 6:\n SSGOps[j][1][3] = -genQ[i]\n else:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] in ['4/mmm',]:#OK\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -SSGKl[0]\n SSGOps[6][0][3,1] = SSGKl[1]\n if modsym:\n SSGOps[1][1][3] = -genQ[3]\n for i,j in enumerate([1,2,6,7]):\n SSGOps[j][0][3,3] = 1\n SSGOps[j][1][3] = genQ[i]\n E,Result = extendSSGOps(SSGOps)\n if not E:\n return E,Result\n else:\n SSGOps = Result\n \n# trigonal - all done & checked\n elif SGData['SGPtGrp'] == '3': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] == '-3': #OK\n SSGOps[1][0][3,3] = -SSGKl[0]\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] in ['312','3m','-3m','-3m1','3m1']: #OK\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n for i,j in enumerate([1,5]):\n if SGData['SGPtGrp'] in ['3m','-3m']:\n SSGOps[j][0][3,3] = 1\n else: \n SSGOps[j][0][3,3] = SSGKl[i+1]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n elif SGData['SGPtGrp'] in ['321','32']: #OK\n for i,j in enumerate([1,4]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n elif SGData['SGPtGrp'] in ['31m','-31m']: #OK\n ids = [1,3]\n if SGData['SGPtGrp'] == '-31m':\n ids = [1,3]\n if '1/3' in SSGData['modSymb']:\n SSGOps[ids[0]][0][3,1] = -SSGKl[0]\n for i,j in enumerate(ids):\n SSGOps[j][0][3,3] = 1\n if genQ[i+1]:\n SSGOps[j][1][3] = genQ[i+1]\n \n# hexagonal all done & checked\n elif SGData['SGPtGrp'] == '6': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] == '-6': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n elif SGData['SGPtGrp'] in ['6/m',]: #OK\n SSGOps[1][0][3,3] = -SSGKl[1]\n SSGOps[1][1][3] = genQ[0]\n SSGOps[2][1][3] = genQ[1]\n elif SGData['SGPtGrp'] in ['622',]: #OK\n for i,j in enumerate([1,9,8]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = -genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n \n elif SGData['SGPtGrp'] in ['6mm','-62m','-6m2',]: #OK\n for i,j in enumerate([1,6,7]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n elif SGData['SGPtGrp'] in ['6/mmm',]: # OK\n for i,j in enumerate([1,2,10,11]):\n SSGOps[j][0][3,3] = 1\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n elif SGData['SGPtGrp'] in ['1','-1']: #triclinic - done\n return True,SSGOps\n E,SSGOps = extendSSGOps(SSGOps)\n return E,SSGOps\n \n def specialGen(gensym,modsym):\n sym = ''.join(gensym)\n if SGData['SGPtGrp'] in ['2/m',] and 'n' in SGData['SpGrp']:\n if 's' in sym:\n gensym = 'ss'\n if SGData['SGPtGrp'] in ['-62m',] and sym == '00s':\n gensym = '0ss'\n elif SGData['SGPtGrp'] in ['222',]:\n if sym == '00s':\n gensym = '0ss'\n elif sym == '0s0':\n gensym = 'ss0'\n elif sym == 's00':\n gensym = 's0s'\n elif SGData['SGPtGrp'] in ['mmm',]:\n if 'g' in modsym:\n if sym == 's00':\n gensym = 's0s'\n elif sym == '0s0':\n gensym = '0ss'\n elif 'a' in modsym:\n if sym == '0s0':\n gensym = 'ss0'\n elif sym == '00s':\n gensym = 's0s'\n elif 'b' in modsym:\n if sym == '00s':\n gensym = '0ss'\n elif sym == 's00':\n gensym = 'ss0'\n return gensym\n \n Fracs = {'1/2':0.5,'1/3':1./3,'1':1.0,'0':0.,'s':.5,'t':1./3,'q':.25,'h':-1./6,'a':0.,'b':0.,'g':0.}\n if SGData['SGLaue'] in ['m3','m3m']:\n return '(3+1) superlattices not defined for cubic space groups',None\n elif SGData['SGLaue'] in ['3R','3mR']:\n return '(3+1) superlattices not defined for rhombohedral settings - use hexagonal setting',None\n try:\n modsym,gensym = splitSSsym(SSymbol)\n except ValueError:\n return 'Error in superspace symbol '+SSymbol,None\n modQ = [Fracs[mod] for mod in modsym]\n SSGKl = SGData['SSGKl'][:]\n if SGData['SGLaue'] in ['2/m','mmm']:\n SSGKl = fixMonoOrtho()\n Ngen = len(gensym)\n if SGData.get('SGGray',False):\n Ngen -= 1\n if len(gensym) and Ngen != len(SSGKl):\n return 'Wrong number of items in generator symbol '+''.join(gensym),None\n gensym = specialGen(gensym[:Ngen],modsym)\n genQ = [Fracs[mod] for mod in gensym[:Ngen]]\n if not genQ:\n genQ = [0,0,0,0]\n SSgSpc = SGData['SpGrp']+SSymbol\n if SGData['SGGray']:\n SSgSpc = SSgSpc.replace('(',\" 1'(\")\n SSGData = {'SSpGrp':SSgSpc,'modQ':modQ,'modSymb':modsym,'SSGKl':SSGKl}\n SSCen = np.zeros((len(SGData['SGCen']),4))\n for icen,cen in enumerate(SGData['SGCen']):\n SSCen[icen,0:3] = cen\n if 'BNSlattsym' in SGData and '_' in SGData['BNSlattsym'][0]:\n Ncen = len(SGData['SGCen'])\n for icen in range(Ncen//2,Ncen):\n SSCen[icen,3] = 0.5\n SSGData['SSGCen'] = SSCen%1.\n SSGData['SSGOps'] = []\n for iop,op in enumerate(SGData['SGOps']):\n T = np.zeros(4)\n ssop = np.zeros((4,4))\n ssop[:3,:3] = op[0]\n T[:3] = op[1]\n SSGData['SSGOps'].append([ssop,T])\n E,Result = genSSGOps()\n if E:\n SSGData['SSGOps'] = Result\n if DEBUG:\n print ('Super spacegroup operators for '+SSGData['SSpGrp'])\n for Op in Result:\n print (SSMT2text(Op).replace(' ',''))\n if SGData['SGInv']: \n for Op in Result:\n Op = [-Op[0],-Op[1]%1.]\n print (SSMT2text(Op).replace(' ','')) \n return None,SSGData\n else:\n return Result+'\\nOperator conflict - incorrect superspace symbol',None",
"def SpcGroup(SGSymbol):\n LaueSym = ('-1','2/m','mmm','4/m','4/mmm','3R','3mR','3','3m1','31m','6/m','6/mmm','m3','m3m')\n LattSym = ('P','A','B','C','I','F','R')\n UniqSym = ('','','a','b','c','',)\n SysSym = ('triclinic','monoclinic','orthorhombic','tetragonal','rhombohedral','trigonal','hexagonal','cubic')\n SGData = {}\n if len(SGSymbol.split()) < 2:\n return SGErrors(0),SGData\n if ':R' in SGSymbol:\n SGSymbol = SGSymbol.replace(':',' ') #get rid of ':' in R space group symbols from some cif files\n SGData['SGGray'] = False\n if \"1'\" in SGSymbol: #set for incommensurate magnetic\n SGData['SGGray'] = True\n SGSymbol = SGSymbol.replace(\"1'\",'')\n SGSymbol = SGSymbol.split(':')[0] #remove :1/2 setting symbol from some cif files\n if '-2' in SGSymbol: #replace bad but legal symbols with correct equivalents\n SGSymbol = SGSymbol.replace('-2','m')\n if SGSymbol.split()[1] =='3/m':\n SGSymbol = SGSymbol.replace('3/m','-6')\n import pyspg\n SGInfo = pyspg.sgforpy(SGSymbol)\n SGData['SpGrp'] = SGSymbol.strip().lower().capitalize()\n SGData['SGLaue'] = LaueSym[SGInfo[0]-1]\n SGData['SGInv'] = bool(SGInfo[1])\n SGData['SGLatt'] = LattSym[SGInfo[2]-1]\n SGData['SGUniq'] = UniqSym[SGInfo[3]+1]\n SGData['SGFixed'] = False\n SGData['SGOps'] = []\n SGData['SGGen'] = []\n for i in range(SGInfo[5]):\n Mat = np.array(SGInfo[6][i])\n Trns = np.array(SGInfo[7][i])\n SGData['SGOps'].append([Mat,Trns])\n if 'array' in str(type(SGInfo[8])): #patch for old fortran bin?\n SGData['SGGen'].append(int(SGInfo[8][i]))\n SGData['BNSlattsym'] = [LattSym[SGInfo[2]-1],[0,0,0]]\n lattSpin = []\n if SGData['SGLatt'] == 'P':\n SGData['SGCen'] = np.array(([0,0,0],))\n elif SGData['SGLatt'] == 'A':\n SGData['SGCen'] = np.array(([0,0,0],[0,.5,.5]))\n lattSpin += [1,]\n elif SGData['SGLatt'] == 'B':\n SGData['SGCen'] = np.array(([0,0,0],[.5,0,.5]))\n lattSpin += [1,]\n elif SGData['SGLatt'] == 'C':\n SGData['SGCen'] = np.array(([0,0,0],[.5,.5,0,]))\n lattSpin += [1,]\n elif SGData['SGLatt'] == 'I':\n SGData['SGCen'] = np.array(([0,0,0],[.5,.5,.5]))\n lattSpin += [1,]\n elif SGData['SGLatt'] == 'F':\n SGData['SGCen'] = np.array(([0,0,0],[0,.5,.5],[.5,0,.5],[.5,.5,0,]))\n lattSpin += [1,1,1,1]\n elif SGData['SGLatt'] == 'R':\n SGData['SGCen'] = np.array(([0,0,0],[2./3,1./3,1./3],[1./3,2./3,2./3]))\n\n if SGData['SGInv']:\n if SGData['SGLaue'] in ['-1','2/m','mmm']:\n Ibar = 7\n elif SGData['SGLaue'] in ['4/m','4/mmm']:\n Ibar = 1\n elif SGData['SGLaue'] in ['3R','3mR','3','3m1','31m','6/m','6/mmm']:\n Ibar = 15 #8+4+2+1\n else:\n Ibar = 4\n Ibarx = Ibar&14\n else:\n Ibarx = 8\n if SGData['SGLaue'] in ['-1','2/m','mmm','m3','m3m']:\n Ibarx = 0\n moregen = []\n for i,gen in enumerate(SGData['SGGen']):\n if SGData['SGLaue'] in ['m3','m3m']:\n if gen in [1,2,4]:\n SGData['SGGen'][i] = 4\n elif gen < 7:\n SGData['SGGen'][i] = 0\n elif SGData['SGLaue'] in ['4/m','4/mmm','3R','3mR','3','3m1','31m','6/m','6/mmm']:\n if gen == 2:\n SGData['SGGen'][i] = 4\n elif gen in [3,5]:\n SGData['SGGen'][i] = 3\n elif gen == 6:\n if SGData['SGLaue'] in ['4/m','4/mmm']:\n SGData['SGGen'][i] = 128\n else:\n SGData['SGGen'][i] = 16\n elif not SGData['SGInv'] and gen == 12:\n SGData['SGGen'][i] = 8\n elif (not SGData['SGInv']) and (SGData['SGLaue'] in ['3','3m1','31m','6/m','6/mmm']) and (gen == 1):\n SGData['SGGen'][i] = 24\n gen = SGData['SGGen'][i]\n if gen == 99:\n gen = 8\n if SGData['SGLaue'] in ['3m1','31m','6/m','6/mmm']:\n gen = 3\n elif SGData['SGLaue'] == 'm3m':\n gen = 12\n SGData['SGGen'][i] = gen\n elif gen == 98:\n gen = 8\n if SGData['SGLaue'] in ['3m1','31m','6/m','6/mmm']:\n gen = 4\n SGData['SGGen'][i] = gen\n elif not SGData['SGInv'] and gen in [23,] and SGData['SGLaue'] in ['m3','m3m']:\n SGData['SGGen'][i] = 24\n elif gen >= 16 and gen != 128:\n if not SGData['SGInv']:\n gen = 31\n else:\n gen ^= Ibarx \n SGData['SGGen'][i] = gen\n if SGData['SGInv']:\n if gen < 128:\n moregen.append(SGData['SGGen'][i]^Ibar)\n else:\n moregen.append(1)\n SGData['SGGen'] += moregen\n if SGData['SGLaue'] in '-1':\n SGData['SGSys'] = SysSym[0]\n elif SGData['SGLaue'] in '2/m':\n SGData['SGSys'] = SysSym[1]\n elif SGData['SGLaue'] in 'mmm':\n SGData['SGSys'] = SysSym[2]\n elif SGData['SGLaue'] in ['4/m','4/mmm']:\n SGData['SGSys'] = SysSym[3]\n elif SGData['SGLaue'] in ['3R','3mR']:\n SGData['SGSys'] = SysSym[4]\n elif SGData['SGLaue'] in ['3','3m1','31m']:\n SGData['SGSys'] = SysSym[5]\n elif SGData['SGLaue'] in ['6/m','6/mmm']:\n SGData['SGSys'] = SysSym[6]\n elif SGData['SGLaue'] in ['m3','m3m']:\n SGData['SGSys'] = SysSym[7]\n SGData['SGPolax'] = SGpolar(SGData)\n SGData['SGPtGrp'],SGData['SSGKl'] = SGPtGroup(SGData)\n\n if SGData['SGLatt'] == 'R':\n if SGData['SGPtGrp'] in ['3',]:\n SGData['SGSpin'] = 3*[1,]\n elif SGData['SGPtGrp'] in ['-3','32','3m']:\n SGData['SGSpin'] = 4*[1,]\n elif SGData['SGPtGrp'] in ['-3m',]:\n SGData['SGSpin'] = 5*[1,]\n \n else:\n if SGData['SGPtGrp'] in ['1','3','23',]:\n SGData['SGSpin'] = lattSpin+[1,]\n elif SGData['SGPtGrp'] in ['-1','2','m','4','-4','-3','312','321','3m1','31m','6','-6','432','-43m']:\n SGData['SGSpin'] = lattSpin+[1,1,]\n elif SGData['SGPtGrp'] in ['2/m','4/m','422','4mm','-42m','-4m2','-3m1','-31m',\n '6/m','622','6mm','-6m2','-62m','m3','m3m']:\n SGData['SGSpin'] = lattSpin+[1,1,1,]\n else: #'222'-'mmm','4/mmm','6/mmm'\n SGData['SGSpin'] = lattSpin+[1,1,1,1,]\n return SGInfo[-1],SGData",
"def test_get_isosteric_pairs(self):\n ima = IsostericityMatrices()\n result = ima.get_isosteric_pairs('AC', 'cWW')\n self.assertEqual(result, ('AC', 'GU'))",
"def symbology(self):\n\n\t\tif ARCMAP and self.layer_object.symbologyType == \"OTHER\":\n\t\t\traise NotSupportedError(\"Unsupported symbology type in ArcMap\")\n\n\t\treturn self.layer_object.symbology",
"def getSymmetryMatrix(*args, **kwargs):\n \n pass",
"def toMatrice(self):\n\t\ttxt = \" \"\n\t\tfor i in sorted(self.graphe.keys()):\n\t\t txt += str(i)+\"-\"\n\t\tprint(txt, file=sys.stderr)\n\t\t\n\t\ttxt=\"\"\n\t\tfor i in sorted(self.graphe.keys()):\n\t\t\ttxt += str(i)\n\t\t\tfor j in sorted(self.graphe.keys()):\n\t\t\t\tif i in self.graphe[j].keys():\n\t\t\t\t\ttxt += \" 1\"\n\t\t\t\telse:\n\t\t\t\t\ttxt += \" 0\"\n\t\t\tprint(txt, file=sys.stderr)\n\t\t\ttxt = \"\"",
"def get_hardcoded_sym_table() -> dict:\n sym_table = {'aa': 0, 'ae': 1, 'ah': 2, 'ao': 3, 'aw': 4, 'ay': 5, 'b': 6,\n 'ch': 7, 'd': 8, 'dh': 9, 'eh': 10, 'er': 11, 'ey': 12,\n 'f': 13, 'g': 14, 'hh': 15, 'ih': 16, 'iy': 17, 'jh': 18,\n 'k': 19, 'l': 20, 'm': 21, 'n': 22, 'ng': 23, 'ow': 24,\n 'oy': 25, 'p': 26, 'r': 27, 's': 28, 'sh': 29, 't': 30,\n 'th': 31, 'uh': 32, 'uw': 33, 'v': 34, 'w': 35, 'y': 36,\n 'z': 37, 'zh': 38, 'sil': 39}\n return sym_table",
"def find_symbols(self) -> None:\n if self.tiling.is_epsilon():\n self.img[0][0] = \"ε\"\n return\n blacklist = set(Labeller._INIT_CACHE.values())\n for cell, (obstructions, _) in sorted(self.tiling.cell_basis().items()):\n positive = cell in self.tiling.positive_cells\n label = self._get_label(sorted(obstructions), positive)\n self.img[self.rows - cell[1] - 1][cell[0]] = label\n if label not in blacklist:\n bases = \", \".join(\n \"\".join(str(val + 1) for val in perm) for perm in obstructions\n )\n if positive:\n self.labels_to_basis[label] = f\"Av+({bases})\"\n else:\n self.labels_to_basis[label] = f\"Av({bases})\"\n if Labeller._EMPTY_STR in self.labels_to_basis:\n del self.labels_to_basis[Labeller._EMPTY_STR]",
"def getRawSymmetryMatrix(*args, **kwargs):\n \n pass",
"def Ida2Code(sig) -> str:\n\n mask = ''\n patt = ''\n\n for entry in sig.split(' '):\n if entry == '?':\n patt = patt + '\\\\x00'\n mask = mask + '?'\n else:\n patt = patt + '\\\\x%s' % entry\n mask = mask + 'x'\n\n return patt, mask",
"def killing_form(self):\n from beluga.liepack import killing\n basis = self.basis()\n L = len(basis)\n mat = np.zeros((L, L))\n for ii in range(L):\n for jj in range(L):\n mat[ii, jj] = killing(basis[ii], basis[jj])\n\n return mat",
"def retr_symmetry_generators(struct,ini):\n #hall = struct.spacegroup_hall()\n ini[\"symgen\"] = struct.get_symmetry_generators()\n return ini",
"def make_octads_table():\n bl = [mat24.gcode_to_bit_list(mat24.octad_to_gcode(i))\n for i in range(759)] \n return np.array(sum(bl, []), dtype = np.uint8)",
"def GetSymmetries(self, get_irt=False):\n \n syms = []\n for i in range(self.QE_nsym):\n s_rot = np.zeros( (3, 4))\n s_rot[:, :3] = np.transpose(self.QE_s[:, :, i])\n s_rot[:, 3] = self.QE_ft[:, i]\n \n syms.append(s_rot)\n \n if not get_irt:\n return syms\n return syms, self.QE_irt[:self.QE_nsym, :].copy() - 1",
"def symMatrix( numRows , numCols , letter ):\n n = max(len(str(numRows)),len(str(numCols)))\n format = '%s%0'+str(n)+'d'+'%0'+str(n)+'d'\n A = matrix(SR,numRows,numCols)\n for i in range(0,numRows):\n for j in range(0,numCols):\n A[i,j] = var(format % (letter,i,j) )\n\n return A",
"def calculateAsymmetries(image: np.ndarray, segmap: np.ndarray) -> Tuple[float]:\n\n Rmax = calcRmax(image, segmap)\n aperturepixmap = aperpixmap(image.shape[0], Rmax, 9, 0.1)\n\n starmask = np.ones_like(image)\n apix = minapix(image, segmap, aperturepixmap, starmask)\n angle = 180.\n\n A = calcA(image, segmap, aperturepixmap, apix, angle, starmask, noisecorrect=True)\n\n As = calcA(segmap, segmap, aperturepixmap, apix, angle, starmask)\n\n angle = 90.\n As90 = calcA(segmap, segmap, aperturepixmap, apix, angle, starmask)\n\n return A[0], As[0], As90[0]",
"def LR1Table(self):\n\t\tC=self.Elementos()\n\t\tTable=[]\n\t\t\n\t\tfor StatesSet in C:\n\t\t\tRow={}\n\t\t\tfor symbol in self.VT+self.VN:\n\t\t\t\tRow[symbol]=''\n\t\t\tTable.append(Row)\n\t\t\n\n\t\tfor i in range(len(C)):\n\n\t\t\t\tfor j in range(len(C[i])):\t\n\n\n\t\t\t\t\t# CALCULO DE ACCIONES\t\t\n\t\t\t\t\ta=C[i][j][0].dotNextChar()\n\t\t\t\t\tAlpha=C[i][j][0].getDotAlpha()\n\t\t\t\t\tB=C[i][j][0].dotNextChar()\n\t\t\t\t\t\n\t\t\t\t\t# CONDICION A !CORRECTO!\t\t\t\t\t\n\t\t\t\t\tif self.isPureTerminal(a) and a!='':\n\t\t\t\t\t\tindex=Grammar.getSetIndex( C,self.ir_a(C[i],a,self))\n\t\t\t\t\t\tif index != -1:\n\t\t\t\t\t\t\tTable[i][a]='d'+str(index)\n\n\t\t\t\t\t# CONDICION B !CORRECTA!\n\t\t\t\t\tif B=='' :\n\n\t\t\t\t\t\tz=-1\n\t\t\t\t\t\tfor k in range(len(self.Productions)):\n\t\t\t\t\t\t\tprodAux=self.Productions[k]\n\n\t\t\t\t\t\t\tif C[i][j][0].Right[0:len(C[i][j][0].Right)-1]==prodAux.Right:\n\n\t\t\t\t\t\t\t\tprint(\"---------------------------------------------prodWithDotAtEnd\")\n\t\t\t\t\t\t\t\tprint(prodAux)\n\t\t\t\t\t\t\t\tprint(\"---------------------------------------------C[][][]\")\n\t\t\t\t\t\t\t\tprint(C[i][j][0])\n\t\t\t\t\t\t\t\tif k!=0:\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tTable[i][C[i][j][1]]=\"r\"+str(k)\n\t\t\t\t\t\t\t\t\tprint('r'+str(k)+' con:'+str(C[i][j][1]))\n\n\t\t\t\t\t#CONDICION C !CORRECTO!\n\t\t\t\t\tif Grammar._in(C[i],[self.Productions[0].dotInit().dotAdvance(),'$']):\n\t\t\t\t\t\tprint('jaja-----------------------------------------------------------------:')\n\t\t\t\t\t\tprint(self.Productions[0])\n\t\t\t\t\t\tTable[i]['$']='Acc'\n\n\t\t\t\t\t# TRANSICIONES IR_A !CORRECTO!\n\t\t\t\t\tfor NT in self.VN:\n\t\t\t\t\t\tindex=Grammar.getSetIndex(C,self.ir_a(C[i],NT,self))\n\t\t\t\t\t\tif index!= -1:\n\t\t\t\t\t\t\tTable[i][NT]=str(index)\n\n\t\treturn Table",
"def dfDict2dictOfBigramIcTimes(dfDict, listOfBigrams, ict_XY=None, label='call', \n ict_label='ict'):\n for thisdf in dfDict.values():\n ict_XY=dictOfBigramIcTimes(listOfBigrams, thisdf, ict_XY_l=ict_XY, label=label,\n ict_label=ict_label)\n return ict_XY",
"def get_labels():\n cannon_teff = data['cannon_teff_2']\n cannon_logg = data['cannon_logg_2']\n cannon_m_h = data['cannon_m_h']\n cannon_alpha_m = data['cannon_alpha_m']\n cannon_a_k = data['cannon_a_k']\n labels = np.vstack(\n (cannon_teff, cannon_logg, cannon_m_h, cannon_alpha_m, cannon_a_k))\n cannon_chisq = data['cannon_chisq']\n np.savez(DATA_DIR + \"chisq.npz\", labels)\n np.savez(DATA_DIR + \"labels.npz\", labels)\n snrg = data['cannon_snrg'] # snrg * 3\n np.savez(\"snr.npz\", snrg)\n return labels.T"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
CONVERT THE SYMMETRIES ====================== This module comvert the symmetry fynction from the spglib format.
|
def GetSymmetriesFromSPGLIB(spglib_sym, regolarize = False):
# Check if the type is correct
if not "translations" in spglib_sym:
raise ValueError("Error, your symmetry dict has no 'translations' key.")
if not "rotations" in spglib_sym:
raise ValueError("Error, your symmetry dict has no 'rotations' key.")
# Get the number of symmetries
out_sym = []
n_sym = np.shape(spglib_sym["translations"])[0]
translations = spglib_sym["translations"]
rotations = spglib_sym["rotations"]
for i in range(n_sym):
# Create the symmetry
sym = np.zeros((3,4))
sym[:,:3] = rotations[i, :, :]
sym[:, 3] = translations[i,:]
# Edit the translation
if regolarize:
sym[:, 3] *= 2
sym[:, 3] = np.floor(sym[:, 3] + .5)
sym[:, 3] *= .5
sym[:, 3] = sym[:,3] % 1
out_sym.append(sym)
return out_sym
|
[
"def list2sym(lst):\n ...",
"def retr_symmetry_operations(struct,ini):\n ini[\"symgen\"] = struct.get_symmetry_operations()\n return ini",
"def retr_symmetry_generators(struct,ini):\n #hall = struct.spacegroup_hall()\n ini[\"symgen\"] = struct.get_symmetry_generators()\n return ini",
"def _assign_sym2(cmap_ops):\n cmap_ops.phase('assign sym2')\n keycap_chars = tool_utils.parse_int_ranges(\"\"\"\n 0023 # Number Sign\n 002A # Asterisk\n 0030-0039 # Digits\n 20E3 # Combining Enclosing Keycap\"\"\")\n cmap_ops.add_all(keycap_chars, 'SYM2')",
"def bytes_to_syms():\n return _digital_swig.bytes_to_syms()",
"def Sym(s, symbol_table={}):\n if s not in symbol_table: symbol_table[s] = Symbol(s)\n return symbol_table[s]",
"def get_hardcoded_sym_table() -> dict:\n sym_table = {'aa': 0, 'ae': 1, 'ah': 2, 'ao': 3, 'aw': 4, 'ay': 5, 'b': 6,\n 'ch': 7, 'd': 8, 'dh': 9, 'eh': 10, 'er': 11, 'ey': 12,\n 'f': 13, 'g': 14, 'hh': 15, 'ih': 16, 'iy': 17, 'jh': 18,\n 'k': 19, 'l': 20, 'm': 21, 'n': 22, 'ng': 23, 'ow': 24,\n 'oy': 25, 'p': 26, 'r': 27, 's': 28, 'sh': 29, 't': 30,\n 'th': 31, 'uh': 32, 'uw': 33, 'v': 34, 'w': 35, 'y': 36,\n 'z': 37, 'zh': 38, 'sil': 39}\n return sym_table",
"def get_symbol_map():\n functions = {}\n for ea in Segments():\n for funcea in Functions(SegStart(ea), SegEnd(ea)):\n size = FindFuncEnd(funcea) - funcea\n functions[funcea] = (GetFunctionName(funcea), size)\n # It may not be necessary to sort by ea, but be safe...\n output_lines = []\n for i, (ea, (name, size)) in enumerate(sorted(functions.items())):\n if len(name) > 255:\n print \"ClemSym: truncating name\", name\n name = name[:255]\n line = \"%d: %s @ %07x %d\" % (i, name, ea, size)\n output_lines.append(line)\n return '\\n'.join(output_lines)",
"def _get_sym_map(self):\n with open(self._norm, 'r') as digits:\n sym_map = yaml.load(digits)\n for key, value in sym_map.iteritems():\n new_map = np.array([], dtype=int)\n for element in value:\n if element == 0:\n new_map = np.append(new_map, np.zeros(self._seg, dtype=int))\n if element == 1:\n new_map = np.append(new_map, np.ones(self._seg, dtype=int))\n sym_map[key] = new_map\n return sym_map",
"def getSymmetryMatrix(*args, **kwargs):\n \n pass",
"def apply_symmetrisation(self):\n\n # get the values to be symmetrised\n for sym_set in self.molecule.symm_hs.values():\n charges, sigmas, epsilons = [], [], []\n for atom_set in sym_set:\n for atom in atom_set:\n charges.append(float(self.non_bonded_force[atom - 1][0]))\n sigmas.append(float(self.non_bonded_force[atom - 1][1]))\n epsilons.append(float(self.non_bonded_force[atom - 1][2]))\n # calculate the average values to be used in symmetry\n charge, sigma, epsilon = sum(charges) / len(charges), sum(sigmas) / len(sigmas), sum(epsilons) / len(epsilons)\n\n # now loop through the atoms again and store the new values\n for atom in atom_set:\n self.non_bonded_force[atom - 1] = [str(charge), str(sigma), str(epsilon)]",
"def to_symmetric_function(self):\n m = SymmetricFunctions(self.parent().base_ring()).monomial()\n if self.is_symmetric():\n return m._from_dict({_Partitions(list(I)): coeff\n for I, coeff in self\n if list(I) in _Partitions}, remove_zeros=False)\n else:\n raise ValueError(\"%s is not a symmetric function\"%self)",
"def createValidSymmetryStrings(cls):\n return [\n cls(domain, boundary, isThroughCenter)\n for domain, boundary, isThroughCenter in cls.VALID_SYMMETRY\n ]",
"def getRawSymmetryMatrix(*args, **kwargs):\n \n pass",
"def to_symmetric(self,sym):\n # Throw error if tensor is not loaded\n if not self.in_mem: raise ValueError('GEN_TEN not in memory for operation to_symmetric')\n\n # Return a copy of self if already a symtensor\n if self.is_symmetric:\n return self.copy()\n\n # Convert the full dense (sparse in symtensor lang) to symmetric version\n else:\n # Create the new tensor\n newten = self.ten.copy()\n assert(len(sym[0]) == len(newten.shape))\n # Convert the shape\n newshape = []\n for i in range(len(newten.shape)):\n newshape.append(len(sym[1][i]))\n newshape.append(newten.shape[i]/len(sym[1][i]))\n newten = newten.reshape(newshape)\n # Do a transpose on the indices\n order = []\n for i in range(len(sym[1])):\n order.append(2*i)\n for i in range(len(sym[1])):\n order.append(2*i+1)\n newten = newten.transpose(order)\n # Create a random symtensor\n newsymten = rand(newten.shape[len(sym[1]):],\n sym=sym,\n backend=self.backend,\n dtype=self.dtype,\n legs=self.legs,\n in_mem=self.in_mem)\n # Contract with delta to get dense irrep\n delta = newsymten.ten.get_irrep_map()\n einstr = LETTERS[:len(sym[1])].upper() + \\\n LETTERS[:len(sym[1])] + ',' + \\\n LETTERS[:len(sym[1])].upper() + '->' + \\\n LETTERS[:len(sym[1])-1].upper() + \\\n LETTERS[:len(sym[1])]\n newten = newsymten.backend.einsum(einstr,newten,delta)\n # Put the result into a symtensor\n newsymten.ten.array = newten\n # Return result\n return newsymten",
"def SSpcGroup(SGData,SSymbol):\n \n def fixMonoOrtho():\n mod = ''.join(modsym).replace('1/2','0').replace('1','0')\n if SGData['SGPtGrp'] in ['2','m']: #OK\n if mod in ['a00','0b0','00g']:\n result = [i*-1 for i in SGData['SSGKl']]\n else:\n result = SGData['SSGKl'][:]\n if '/' in mod:\n return [i*-1 for i in result]\n else:\n return result\n elif SGData['SGPtGrp'] == '2/m': #OK\n if mod in ['a00','0b0','00g']:\n result = SGData['SSGKl'][:]\n else:\n result = [i*-1 for i in SGData['SSGKl']]\n if '/' in mod:\n return [i*-1 for i in result]\n else:\n return result\n else: #orthorhombic\n return [-SSGKl[i] if mod[i] in ['a','b','g'] else SSGKl[i] for i in range(3)]\n \n def extendSSGOps(SSGOps):\n for OpA in SSGOps:\n OpAtxt = SSMT2text(OpA)\n if 't' not in OpAtxt:\n continue\n for OpB in SSGOps:\n OpBtxt = SSMT2text(OpB)\n if 't' not in OpBtxt:\n continue\n OpC = list(SGProd(OpB,OpA))\n OpC[1] %= 1.\n OpCtxt = SSMT2text(OpC)\n# print OpAtxt.replace(' ','')+' * '+OpBtxt.replace(' ','')+' = '+OpCtxt.replace(' ','')\n for k,OpD in enumerate(SSGOps):\n OpDtxt = SSMT2text(OpD)\n OpDtxt2 = ''\n if SGData['SGGray']: \n OpDtxt2 = SSMT2text([OpD[0],OpD[1]+np.array([0.,0.,0.,.5])])\n# print ' ('+OpCtxt.replace(' ','')+' = ? '+OpDtxt.replace(' ','')+')'\n if OpCtxt == OpDtxt:\n continue\n elif OpCtxt == OpDtxt2:\n continue\n elif OpCtxt.split(',')[:3] == OpDtxt.split(',')[:3]:\n if 't' not in OpDtxt:\n SSGOps[k] = OpC\n# print k,' new:',OpCtxt.replace(' ','')\n break\n else:\n OpCtxt = OpCtxt.replace(' ','')\n OpDtxt = OpDtxt.replace(' ','')\n Txt = OpCtxt+' conflicts with '+OpDtxt\n# print (Txt)\n return False,Txt\n return True,SSGOps\n \n def findMod(modSym):\n for a in ['a','b','g']:\n if a in modSym:\n return a\n \n def genSSGOps():\n SSGOps = SSGData['SSGOps'][:]\n iFrac = {}\n for i,frac in enumerate(SSGData['modSymb']):\n if frac in ['1/2','1/3','1/4','1/6','1']:\n iFrac[i] = frac+'.'\n# print SGData['SpGrp']+SSymbol\n# print 'SSGKl',SSGKl,'genQ',genQ,'iFrac',iFrac,'modSymb',SSGData['modSymb']\n# set identity & 1,-1; triclinic\n SSGOps[0][0][3,3] = 1.\n## expand if centrosymmetric\n# if SGData['SGInv']:\n# SSGOps += [[-1*M,V] for M,V in SSGOps[:]]\n# monoclinic - all done & all checked\n if SGData['SGPtGrp'] in ['2','m']: #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n for i in iFrac:\n SSGOps[1][0][3,i] = -SSGKl[0]\n elif SGData['SGPtGrp'] == '2/m': #OK\n SSGOps[1][0][3,3] = SSGKl[1]\n if 's' in gensym:\n SSGOps[1][1][3] = 0.5\n for i in iFrac:\n SSGOps[1][0][3,i] = SSGKl[0]\n \n# orthorhombic - all OK not fully checked\n elif SGData['SGPtGrp'] in ['222','mm2','m2m','2mm']: #OK\n if SGData['SGPtGrp'] == '222':\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[1,2],2:[1,3]},'b':{2:[3,2],0:[1,2]}} #OK\n elif SGData['SGPtGrp'] == 'mm2':\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]}} #OK\n elif SGData['SGPtGrp'] == 'm2m':\n OrOps = {'b':{0:[1,2],2:[3,2]},'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]}} #OK\n elif SGData['SGPtGrp'] == '2mm':\n OrOps = {'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]},'g':{0:[1,3],1:[2,3]}} #OK\n a = findMod(SSGData['modSymb'])\n OrFrac = OrOps[a]\n for j in iFrac:\n for i in OrFrac[j]:\n SSGOps[i][0][3,j] = -2.*eval(iFrac[j])*SSGKl[i-1]\n for i in [0,1,2]:\n SSGOps[i+1][0][3,3] = SSGKl[i]\n SSGOps[i+1][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] == 'mmm': #OK\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]}} \n a = findMod(SSGData['modSymb'])\n if a == 'g':\n SSkl = [1,1,1]\n elif a == 'a':\n SSkl = [-1,1,-1]\n else:\n SSkl = [1,-1,-1]\n OrFrac = OrOps[a]\n for j in iFrac:\n for i in OrFrac[j]:\n SSGOps[i][0][3,j] = -2.*eval(iFrac[j])*SSkl[i-1]\n for i in [0,1,2]:\n SSGOps[i+1][0][3,3] = SSkl[i]\n SSGOps[i+1][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps \n# tetragonal - all done & checked\n elif SGData['SGPtGrp'] == '4': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n elif SGData['SGPtGrp'] == '-4': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = 1\n elif SGData['SGPtGrp'] in ['4/m',]: #OK\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -SSGKl[0]\n for i,j in enumerate([1,3]):\n SSGOps[j][0][3,3] = 1\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] in ['422','4mm','-42m','-4m2',]: #OK\n iGens = [1,4,5]\n if SGData['SGPtGrp'] in ['4mm','-4m2',]:\n iGens = [1,6,7]\n for i,j in enumerate(iGens):\n if '1/2' in SSGData['modSymb'] and i < 2:\n SSGOps[j][0][3,1] = SSGKl[i]\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n if 's' in gensym and j == 6:\n SSGOps[j][1][3] = -genQ[i]\n else:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] in ['4/mmm',]:#OK\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -SSGKl[0]\n SSGOps[6][0][3,1] = SSGKl[1]\n if modsym:\n SSGOps[1][1][3] = -genQ[3]\n for i,j in enumerate([1,2,6,7]):\n SSGOps[j][0][3,3] = 1\n SSGOps[j][1][3] = genQ[i]\n E,Result = extendSSGOps(SSGOps)\n if not E:\n return E,Result\n else:\n SSGOps = Result\n \n# trigonal - all done & checked\n elif SGData['SGPtGrp'] == '3': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] == '-3': #OK\n SSGOps[1][0][3,3] = -SSGKl[0]\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] in ['312','3m','-3m','-3m1','3m1']: #OK\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n for i,j in enumerate([1,5]):\n if SGData['SGPtGrp'] in ['3m','-3m']:\n SSGOps[j][0][3,3] = 1\n else: \n SSGOps[j][0][3,3] = SSGKl[i+1]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n elif SGData['SGPtGrp'] in ['321','32']: #OK\n for i,j in enumerate([1,4]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n elif SGData['SGPtGrp'] in ['31m','-31m']: #OK\n ids = [1,3]\n if SGData['SGPtGrp'] == '-31m':\n ids = [1,3]\n if '1/3' in SSGData['modSymb']:\n SSGOps[ids[0]][0][3,1] = -SSGKl[0]\n for i,j in enumerate(ids):\n SSGOps[j][0][3,3] = 1\n if genQ[i+1]:\n SSGOps[j][1][3] = genQ[i+1]\n \n# hexagonal all done & checked\n elif SGData['SGPtGrp'] == '6': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] == '-6': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n elif SGData['SGPtGrp'] in ['6/m',]: #OK\n SSGOps[1][0][3,3] = -SSGKl[1]\n SSGOps[1][1][3] = genQ[0]\n SSGOps[2][1][3] = genQ[1]\n elif SGData['SGPtGrp'] in ['622',]: #OK\n for i,j in enumerate([1,9,8]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = -genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n \n elif SGData['SGPtGrp'] in ['6mm','-62m','-6m2',]: #OK\n for i,j in enumerate([1,6,7]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n elif SGData['SGPtGrp'] in ['6/mmm',]: # OK\n for i,j in enumerate([1,2,10,11]):\n SSGOps[j][0][3,3] = 1\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n elif SGData['SGPtGrp'] in ['1','-1']: #triclinic - done\n return True,SSGOps\n E,SSGOps = extendSSGOps(SSGOps)\n return E,SSGOps\n \n def specialGen(gensym,modsym):\n sym = ''.join(gensym)\n if SGData['SGPtGrp'] in ['2/m',] and 'n' in SGData['SpGrp']:\n if 's' in sym:\n gensym = 'ss'\n if SGData['SGPtGrp'] in ['-62m',] and sym == '00s':\n gensym = '0ss'\n elif SGData['SGPtGrp'] in ['222',]:\n if sym == '00s':\n gensym = '0ss'\n elif sym == '0s0':\n gensym = 'ss0'\n elif sym == 's00':\n gensym = 's0s'\n elif SGData['SGPtGrp'] in ['mmm',]:\n if 'g' in modsym:\n if sym == 's00':\n gensym = 's0s'\n elif sym == '0s0':\n gensym = '0ss'\n elif 'a' in modsym:\n if sym == '0s0':\n gensym = 'ss0'\n elif sym == '00s':\n gensym = 's0s'\n elif 'b' in modsym:\n if sym == '00s':\n gensym = '0ss'\n elif sym == 's00':\n gensym = 'ss0'\n return gensym\n \n Fracs = {'1/2':0.5,'1/3':1./3,'1':1.0,'0':0.,'s':.5,'t':1./3,'q':.25,'h':-1./6,'a':0.,'b':0.,'g':0.}\n if SGData['SGLaue'] in ['m3','m3m']:\n return '(3+1) superlattices not defined for cubic space groups',None\n elif SGData['SGLaue'] in ['3R','3mR']:\n return '(3+1) superlattices not defined for rhombohedral settings - use hexagonal setting',None\n try:\n modsym,gensym = splitSSsym(SSymbol)\n except ValueError:\n return 'Error in superspace symbol '+SSymbol,None\n modQ = [Fracs[mod] for mod in modsym]\n SSGKl = SGData['SSGKl'][:]\n if SGData['SGLaue'] in ['2/m','mmm']:\n SSGKl = fixMonoOrtho()\n Ngen = len(gensym)\n if SGData.get('SGGray',False):\n Ngen -= 1\n if len(gensym) and Ngen != len(SSGKl):\n return 'Wrong number of items in generator symbol '+''.join(gensym),None\n gensym = specialGen(gensym[:Ngen],modsym)\n genQ = [Fracs[mod] for mod in gensym[:Ngen]]\n if not genQ:\n genQ = [0,0,0,0]\n SSgSpc = SGData['SpGrp']+SSymbol\n if SGData['SGGray']:\n SSgSpc = SSgSpc.replace('(',\" 1'(\")\n SSGData = {'SSpGrp':SSgSpc,'modQ':modQ,'modSymb':modsym,'SSGKl':SSGKl}\n SSCen = np.zeros((len(SGData['SGCen']),4))\n for icen,cen in enumerate(SGData['SGCen']):\n SSCen[icen,0:3] = cen\n if 'BNSlattsym' in SGData and '_' in SGData['BNSlattsym'][0]:\n Ncen = len(SGData['SGCen'])\n for icen in range(Ncen//2,Ncen):\n SSCen[icen,3] = 0.5\n SSGData['SSGCen'] = SSCen%1.\n SSGData['SSGOps'] = []\n for iop,op in enumerate(SGData['SGOps']):\n T = np.zeros(4)\n ssop = np.zeros((4,4))\n ssop[:3,:3] = op[0]\n T[:3] = op[1]\n SSGData['SSGOps'].append([ssop,T])\n E,Result = genSSGOps()\n if E:\n SSGData['SSGOps'] = Result\n if DEBUG:\n print ('Super spacegroup operators for '+SSGData['SSpGrp'])\n for Op in Result:\n print (SSMT2text(Op).replace(' ',''))\n if SGData['SGInv']: \n for Op in Result:\n Op = [-Op[0],-Op[1]%1.]\n print (SSMT2text(Op).replace(' ','')) \n return None,SSGData\n else:\n return Result+'\\nOperator conflict - incorrect superspace symbol',None",
"def prepare_symbols(self):",
"def writeSymmetryData(self,parameters): \n \n symdict = {'ncs':[],'C2':[],'C3':[],'C4':[],'C5':[],'C6':[]}; symw = False\n \n for symmetry in self.latestRun.sortedSymmetryRestraints():\n symdict[symmetry.symmetryCode].append((1,symmetry.segmentLength,'A'))\n \n for symmetry in symdict:\n if len(symdict[symmetry]) > 0:\n if symmetry == 'ncs': \n parameters['ncs'] = {}\n parameters['ncs']['on'] = True\n parameters['ncs']['constant'] = self.latestRun.get('ncsRestraintConstant')\n parameters['ncs']['segments'] = []\n for symrange in symdict[symmetry]: parameters['ncs']['segments'].append(symrange) \n elif symw == False: \n parameters['symmetry'] = {}\n parameters['symmetry']['on'] = True\n parameters['symmetry']['constant'] = self.latestRun.get('symmetryRestraintConstant')\n for symrange in symdict[symmetry]: parameters['ncs']['segments'].append(symrange)\n symw = True\n else: pass",
"def symmetrize(input, output, symmetries, full_group): # pylint: disable=redefined-builtin\n model = _read_input(input)\n click.echo(\"Reading symmetries from file '{}' ...\".format(symmetries))\n sym = sr.io.load(symmetries)\n model_sym = _symmetrize(sym, model, full_group) # pylint: disable=assignment-from-no-return\n _write_output(model_sym, output)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GET IRT ======= Get the irt array. It is the array of the atom index that the symmetry operation swaps. the yth element of the array (irt[y]) is the index of the original structure, while y is the index of the equivalent atom after the symmetry is applied.
|
def GetIRT(structure, symmetry, timer = Timer.Timer(), debug = False):
new_struct = structure.copy()
if timer is None:
new_struct.fix_coords_in_unit_cell(delete_copies = False, debug = debug)
else:
timer.execute_timed_function(new_struct.fix_coords_in_unit_cell, delete_copies = False, debug = debug)
n_struct_2 = new_struct.copy()
if timer is None:
new_struct.apply_symmetry(symmetry, True)
irt = np.array(new_struct.get_equivalent_atoms(n_struct_2), dtype =np.intc)
else:
timer.execute_timed_function(new_struct.apply_symmetry, symmetry, True)
irt = np.array( timer.execute_timed_function(new_struct.get_equivalent_atoms, n_struct_2), dtype =np.intc)
return irt
|
[
"def GetSymmetries(self, get_irt=False):\n \n syms = []\n for i in range(self.QE_nsym):\n s_rot = np.zeros( (3, 4))\n s_rot[:, :3] = np.transpose(self.QE_s[:, :, i])\n s_rot[:, 3] = self.QE_ft[:, i]\n \n syms.append(s_rot)\n \n if not get_irt:\n return syms\n return syms, self.QE_irt[:self.QE_nsym, :].copy() - 1",
"def trace(self):\r\n\r\n tr = np.array([np.trace(ii) for ii in self.z])\r\n\r\n return tr",
"def getRawSymmetryMatrix(*args, **kwargs):\n \n pass",
"def local_IEN(self,patch_num,patch_mimic=False): \r\n \"\"\"\r\n 1D case\r\n \"\"\"\r\n \r\n if self.dimension==1:\r\n num_bases=self.num_bases\r\n num_element_bases=self.order+1\r\n num_local_bases=num_bases\r\n basis_array=np.linspace(1,num_local_bases,num_local_bases)\r\n\r\n if patch_mimic:\r\n return basis_array.astype(int)-1 #MAY NEED TO REVERSE\r\n else: \r\n \r\n\r\n \"\"\"\r\n Total number of elements in patch\r\n \"\"\"\r\n num_elements=self.number_elements\r\n \r\n \"\"\"\r\n Initializing IEN array\r\n \"\"\"\r\n if num_elements==1:\r\n patch_local_IEN=np.zeros(num_element_bases)\r\n \r\n else:\r\n patch_local_IEN=np.zeros((num_elements,num_element_bases))\r\n \r\n \"\"\"\r\n counter for IEN row\r\n \"\"\"\r\n IEN_row=0\r\n \"\"\"\r\n for loops for finding entries for each row of IEN\r\n \"\"\"\r\n for col in range(self.number_elements):\r\n \r\n \"\"\"\r\n Bounds for rows and columns in basis_array for current element\r\n \"\"\"\r\n lowest_col_in_ele=col*self.mp\r\n highest_col_in_ele=col*self.mp+self.order+1 #formatted to be used as index\r\n\r\n \r\n \"\"\"\r\n Gathers entries for current element in local IEN\r\n \"\"\"\r\n row_local_IEN=basis_array[lowest_col_in_ele:highest_col_in_ele]\r\n \r\n if num_elements==1:\r\n patch_local_IEN=row_local_IEN[::-1]\r\n else:\r\n patch_local_IEN[IEN_row,:]=row_local_IEN[::-1]\r\n \r\n \r\n \"\"\"\r\n Counter for going to next row in IEN\r\n \"\"\"\r\n IEN_row+=1\r\n\r\n \r\n \"\"\"\r\n Ensuring that entry is a 2D array by using a dummy row for consistency\r\n \"\"\"\r\n if len(patch_local_IEN.shape)!=2 :\r\n patch_local_IEN=np.vstack((patch_local_IEN,np.zeros(len(patch_local_IEN))))\r\n \r\n return patch_local_IEN.astype(int)-1\r\n \"\"\"\r\n 2D case\r\n \"\"\"\r\n \"\"\"\r\n Number of bases in principle directions along patch\r\n \"\"\"\r\n \r\n num_basis_xi=self.num_bases[patch_num,0]\r\n num_basis_eta=self.num_bases[patch_num,1]\r\n \r\n \"\"\"\r\n Total number of bases functions over patch\r\n \"\"\"\r\n num_local_bases=num_basis_xi*num_basis_eta\r\n \r\n \"\"\"\r\n Number of supporting bases over an element\r\n \"\"\"\r\n dimensions=self.order[patch_num,:]+1 #Number of bases in xi and eta direction with support on each element\r\n num_element_bases=dimensions.prod() \r\n \r\n \"\"\"\r\n Creating 2d array in \"shape\" of elements in patch that contains basis function numbers\r\n \"\"\"\r\n basis_array=np.linspace(1,num_local_bases,num_local_bases)\r\n basis_array=basis_array.reshape(num_basis_eta,num_basis_xi)\r\n \r\n if patch_mimic:\r\n \r\n return basis_array.astype(int)-1 #MAY NEED TO REVERSE\r\n \r\n else: \r\n \r\n \"\"\"\r\n Total number of elements in patch\r\n \"\"\"\r\n num_elements=self.number_elements[patch_num,:].prod()\r\n \r\n \"\"\"\r\n Initializing IEN array\r\n \"\"\"\r\n if num_elements==1:\r\n patch_local_IEN=np.zeros(num_element_bases)\r\n else:\r\n patch_local_IEN=np.zeros((num_elements,num_element_bases))\r\n \r\n \"\"\"\r\n counter for IEN row\r\n \"\"\"\r\n IEN_row=0\r\n \r\n \"\"\"\r\n for loops for finding entries for each row of IEN\r\n \"\"\"\r\n for row in range(self.number_elements[patch_num,1]):\r\n for col in range(self.number_elements[patch_num,0]):\r\n \r\n #ASK about line 294 in IGA file to shorten this\r\n \r\n \"\"\"\r\n Bounds for rows and columns in basis_array for current element\r\n \"\"\"\r\n lowest_row_in_ele=row*self.mp[patch_num,1]\r\n highest_row_in_ele=row*self.mp[patch_num,1]+self.order[patch_num,1]+1 #formatted to be used as index\r\n lowest_col_in_ele=col*self.mp[patch_num,0]\r\n highest_col_in_ele=col*self.mp[patch_num,0]+self.order[patch_num,0]+1 #formatted to be used as index\r\n \r\n \"\"\"\r\n Gathers entries for current element in local IEN\r\n \"\"\"\r\n row_local_IEN=basis_array[lowest_row_in_ele:highest_row_in_ele,lowest_col_in_ele:highest_col_in_ele]\r\n \r\n if num_elements==1:\r\n patch_local_IEN=row_local_IEN.flatten()[::-1]\r\n else:\r\n patch_local_IEN[IEN_row,:]=row_local_IEN.flatten()[::-1]\r\n \r\n \r\n \"\"\"\r\n Counter for going to next row in IEN\r\n \"\"\"\r\n IEN_row+=1\r\n \r\n \"\"\"\r\n Ensuring that entry is a 2D array by using a dummy row for consistency\r\n \"\"\"\r\n if len(patch_local_IEN.shape)!=2 :\r\n patch_local_IEN=np.vstack((patch_local_IEN,np.zeros(len(patch_local_IEN))))\r\n \r\n return patch_local_IEN.astype(int)-1",
"def isi(self):\n isis = []\n for spk in self:\n isis.append(spk.isi())\n return numpy.array(isis)",
"def get_array(self): # real signature unknown; restored from __doc__\n pass",
"def Tz(self):\n return self.P[2, 3]",
"def ttISF_tvISF(ttISF):\n res = []\n for j in range(len(ttISF)):\n ttj = ttISF[j] #jth PO ISF\n if ttj[0] == m.const(1): #onset\n newres = [m.const(1),m.const(0)]\n elif ttj[0] == m.const(0):\n newres = [m.const(0),m.const(1)]\n else: \n newres = [[tt_tv(ttj[0]),tt_tv(ttj[1])]] \n res = res + newres\n return res",
"def getReactionRates(self, T, P, Ci):\n cython.declare(rxnRates=numpy.ndarray, rxn=Reaction, j=cython.int)\n rxnRates = numpy.zeros(len(self.reactions), numpy.float64)\n for rxn in self.reactions:\n j = rxn.index - 1\n rxnRates[j] = rxn.getRate(T, P, Ci)\n return rxnRates",
"def GetI(self, *args):\n return _snap.TFltV_GetI(self, *args)",
"def copy(self):\n new_tirp = TIRP()\n new_tirp._symbols = copy(self._symbols)\n new_tirp._label=self._label\n new_tirp._tirp_matrix = self._tirp_matrix.copy()\n for entity_id in self._supporting_sequences_by_entity.keys():\n new_tirp._supporting_sequences_by_entity[entity_id] = deepcopy(self._supporting_sequences_by_entity[entity_id])\n for entity_id in self._Artemis_by_entity.keys():\n new_tirp._Artemis_by_entity[entity_id] = deepcopy(self._Artemis_by_entity[entity_id])\n return new_tirp",
"def GetI(self, *args):\n return _snap.TIntTrV_GetI(self, *args)",
"def radii(self):\n return array([self.graph[u][v]['conductivity']\n for u, v in self.edgeset])",
"def getSymmetryMatrix(*args, **kwargs):\n \n pass",
"def rgetYrot(self):\n return _core.CGPkronSumCache_rgetYrot(self)",
"def gety(self):\n res = np.array([])\n for p in self.trajlist:\n res = np.concatenate( (res, p.gety()) )\n return res",
"def return_shifts(self):\r\n return [self.xc - self.xc0, self.yc - self.yc0]",
"def get_rmat_tvec(extrinsic_mat: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n\n try:\n rot_mat = extrinsic_mat[0:3, 0:3]\n tvec = np.array(\n extrinsic_mat[0:3, 3], dtype=np.float32).reshape(3, -1)\n except ValueError as ext_mat_err:\n raise ValueError('The extrinsic matrix entered: {} \\\n is not a 4x4 matrix or is zero.'.format(extrinsic_mat)) from ext_mat_err\n\n return rot_mat, tvec",
"def __getitem__(self, *args) -> \"tid_t\":\n return _ida_pro.tid_array___getitem__(self, *args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
APPLY SYMMETRY ============== Apply the symmetry to the given vector of displacements. Translations are neglected.
|
def ApplySymmetryToVector(symmetry, vector, unit_cell, irt):
# Get the vector in crystalline coordinate
nat, dumb = np.shape(vector)
work = np.zeros( (nat, 3))
sym = symmetry[:, :3]
v1 = Methods.covariant_coordinates(unit_cell, vector)
w1 = sym.dot(v1.T).T
# Return in cartesian coordinates
work[irt[:], :] = w1.dot(unit_cell)# unit_cell.T.dot(w1) #np.einsum("ab,a", unit_cell, w1)
return work
|
[
"def ApplySymmetriesToVector(symmetries, vector, unit_cell, irts):\n \n # Get the vector in crystalline coordinate\n nat, dumb = np.shape(vector)\n n_sym = len(symmetries)\n\n assert n_sym == len(irts)\n\n work = np.zeros( (n_sym, nat, 3), dtype = np.double, order = \"C\")\n \n # Pass to crystalline coordinates\n v1 = Methods.covariant_coordinates(unit_cell, vector)\n \n # Apply the symmetry\n for j, symmetry in enumerate(symmetries):\n sym = symmetry[:, :3]\n w1 = sym.dot(v1.T).T\n\n # Return in cartesian coordinates\n work[j, irts[j][:], :] = w1.dot(unit_cell)# unit_cell.T.dot(w1) #np.einsum(\"ab,a\", unit_cell, w1)\n \n return work",
"def ApplySymmetryToTensor4(self, v4, initialize_symmetries = True):\n if initialize_symmetries:\n self.SetupFromSPGLIB()\n\n # Apply the permutation symmetry\n symph.permute_v4(v4)\n\n # Apply the translational symmetries\n symph.trans_v4(v4, self.QE_translations_irt)\n\n # Apply all the symmetries at gamma\n symph.sym_v4(v4, self.QE_at, self.QE_s, self.QE_irt, self.QE_nsymq)",
"def _GetSymmetriesOnModes(symmetries, structure, pol_vects):\n\n # Get the vector of the displacement in the polarization\n m = np.tile(structure.get_masses_array(), (3,1)).T.ravel()\n disp_v = np.einsum(\"im,i->mi\", pol_vects, 1 / np.sqrt(m))\n underdisp_v = np.einsum(\"im,i->mi\", pol_vects, np.sqrt(m))\n\n n_dim, n_modes = np.shape(pol_vects)\n\n n_sym = len(symmetries)\n nat = structure.N_atoms\n \n # For each symmetry operation apply the\n pol_symmetries = np.zeros((n_sym, n_modes, n_modes), dtype = np.float64)\n for i, sym_mat in enumerate(symmetries):\n irt = GetIRT(structure, sym_mat)\n \n for j in range(n_modes):\n # Apply the i-th symmetry to the j-th mode\n new_vector = ApplySymmetryToVector(sym_mat, disp_v[j, :].reshape((nat, 3)), structure.unit_cell, irt).ravel()\n pol_symmetries[i, :, j] = underdisp_v.dot(new_vector.ravel())\n\n return pol_symmetries",
"def SymmetrizeVector(self, vector):\n\n # Apply Translations if any\n self.ApplyTranslationsToVector(vector)\n \n # Prepare the real vector\n tmp_vector = np.zeros( (3, self.QE_nat), dtype = np.float64, order = \"F\")\n \n for i in range(self.QE_nat):\n tmp_vector[0, i] = vector[i,0]\n tmp_vector[1, i] = vector[i,1]\n tmp_vector[2,i] = vector[i,2]\n \n symph.symvector(self.QE_nsymq, self.QE_irt, self.QE_s, self.QE_at, self.QE_bg,\n tmp_vector, self.QE_nat)\n \n \n for i in range(self.QE_nat):\n vector[i, :] = tmp_vector[:,i]",
"def orthogonalise_sym(vectors):\n ang = vec_angle(vectors[0],vectors[1])\n remainder = 90 - ang\n disp = remainder/2\n perp_unnormal = np.cross(vectors[0],vectors[1])\n normal = perp_unnormal / np.linalg.norm(perp_unnormal)\n\n rot_1 = rotation_matrix(normal,-disp)\n rot_2 = rotation_matrix(normal,disp)\n\n ovec_1 = np.dot(rot_1,vectors[0])\n ovec_2 = np.dot(rot_2,vectors[1])\n\n o_vecs = np.array([ovec_1,ovec_2])\n return o_vecs",
"def ApplySymmetryToTensor3(self, v3, initialize_symmetries = True):\n if initialize_symmetries:\n self.SetupFromSPGLIB()\n\n # Apply the permutation symmetry\n symph.permute_v3(v3)\n\n # Apply the translational symmetries\n symph.trans_v3(v3, self.QE_translations_irt)\n\n # Apply all the symmetries at gamma\n symph.sym_v3(v3, self.QE_at, self.QE_s, self.QE_irt, self.QE_nsymq)",
"def getSymmetryPlane(*args, **kwargs):\n \n pass",
"def use_symmetry(self):\n symmetry = self.params[PARAM_SYMMETRY]\n if symmetry is None:\n #Default to false if no parameter.\n return False\n else:\n return symmetry.use_symmetry",
"def apply_symmetrisation(self):\n\n # get the values to be symmetrised\n for sym_set in self.molecule.symm_hs.values():\n charges, sigmas, epsilons = [], [], []\n for atom_set in sym_set:\n for atom in atom_set:\n charges.append(float(self.non_bonded_force[atom - 1][0]))\n sigmas.append(float(self.non_bonded_force[atom - 1][1]))\n epsilons.append(float(self.non_bonded_force[atom - 1][2]))\n # calculate the average values to be used in symmetry\n charge, sigma, epsilon = sum(charges) / len(charges), sum(sigmas) / len(sigmas), sum(epsilons) / len(epsilons)\n\n # now loop through the atoms again and store the new values\n for atom in atom_set:\n self.non_bonded_force[atom - 1] = [str(charge), str(sigma), str(epsilon)]",
"def getSymmetryMatrix(*args, **kwargs):\n \n pass",
"def force_symmetry(matrix, symmetry):\n symmetric_matrix = matrix.copy()\n\n if symmetry is None:\n return symmetric_matrix\n\n for index, x in np.ndenumerate(matrix):\n\n if symmetry == 'upper':\n if index[0] > index[1]:\n symmetric_matrix[index] = matrix[tuple(reversed(index))]\n\n if symmetry == 'lower':\n if index[0] < index[1]:\n symmetric_matrix[index] = matrix[tuple(reversed(index))]\n\n if symmetry == 'mean':\n if index[0] != index[1]:\n symmetric_matrix[index] = np.mean((matrix[index], matrix[tuple(reversed(index))]))\n\n return symmetric_matrix",
"def get_diagonal_symmetry_polarization_vectors(pol_sc, w, pol_symmetries):\n raise NotImplementedError(\"Error, this subroutine has not been implemented.\")\n\n # First we must get the degeneracies\n deg_list = get_degeneracies(w) \n\n # Now perform the diagonalization on each degeneracies\n final_vectors = np.zeros( pol_sc.shape, dtype = np.complex128)\n final_vectors[:,:] = pol_sc.copy()\n\n n_modes = len(w)\n n_syms = pol_symmetries.shape[0]\n skip_list = []\n\n syms_values = np.zeros((n_modes, n_syms), dtype = np.complex128)\n\n print(\"All modes:\")\n for i in range(n_modes):\n print(\"Mode {} = {} cm-1 => \".format(i, w[i] * RY_TO_CM), deg_list[i])\n\n print()\n for i in range(n_modes):\n if i in skip_list:\n continue\n\n # If we have no degeneracies, we can ignore it\n if len(deg_list[i]) == 1:\n continue \n\n partial_modes = np.zeros((len(deg_list[i]), len(deg_list[i])), dtype = np.complex128)\n partial_modes[:,:] = np.eye(len(deg_list[i])) # identity matrix\n\n mask_final = np.array([x in deg_list[i] for x in range(n_modes)])\n\n # If we have degeneracies, lets diagonalize all the symmetries\n for i_sym in range(n_syms):\n skip_j = []\n diagonalized = False\n np.savetxt(\"sym_{}.dat\".format(i_sym), pol_symmetries[i_sym, :,:])\n\n \n # Get the symmetry matrix in the mode space (this could generate a problem with masses)\n ps = pol_symmetries[i_sym, :, :]\n sym_mat_origin = ps[np.outer(mask_final, mask_final)].reshape((len(deg_list[i]), len(deg_list[i]))) \n\n for j_mode in deg_list[i]:\n if j_mode in skip_j:\n continue \n\n # Get the modes that can be still degenerate by symmetries\n mode_dna = syms_values[j_mode, : i_sym]\n\n # Avoid a bad error if i_sym = 0\n if len(mode_dna) > 0:\n mode_space = [x for x in deg_list[i] if np.max(np.abs(syms_values[x, :i_sym] - mode_dna)) < 1e-3]\n else:\n mode_space = [x for x in deg_list[i]]\n\n # The mask for the whole symmetry and the partial_modes\n mask_all = np.array([x in mode_space for x in np.arange(n_modes)])\n mask_partial_mode = np.array([x in mode_space for x in deg_list[i]])\n n_deg_new = np.sum(mask_all.astype(int))\n\n if len(mode_space) == 1:\n continue\n\n p_modes_new = partial_modes[:, mask_partial_mode]\n\n \n print()\n print(\"SYMMETRY_INDEX:\", i_sym)\n print(\"SHAPE sym_mat_origin:\", sym_mat_origin.shape)\n print(\"MODES: {} | DEG: {}\".format(mode_space, deg_list[i]))\n print(\"SHAPE P_MODES_NEW:\", p_modes_new.shape)\n sym_mat = np.conj(p_modes_new.T).dot(sym_mat_origin.dot(p_modes_new))\n \n # Decompose in upper triangular (assures that eigenvectors are orthogonal)\n s_eigvals_mat, s_eigvects = scipy.linalg.schur(sym_mat, output = \"complex\")\n s_eigvals = np.diag(s_eigvals_mat)\n\n # Check if the s_eigvals confirm the unitary of sym_mat\n # TODO: Check if some mass must be accounted or not...\n print(\"SYM_MAT\")\n print(sym_mat)\n print(\"Eigvals:\")\n print(s_eigvals)\n print(\"Eigval_mat:\")\n print(s_eigvals_mat)\n print(\"Eigvects:\")\n print(s_eigvects)\n assert np.max(np.abs(np.abs(s_eigvals) - 1)) < 1e-5, \"Error, it seems that the {}-th matrix is not a rotation.\".format(i_sym).format(sym_mat)\n\n # Update the polarization vectors to account this diagonalization\n partial_modes[:, mask_partial_mode] = p_modes_new.dot(s_eigvects)\n\n # Add the symmetry character on the new eigen modes\n for k_i, k in enumerate(mode_space):\n syms_values[k, i_sym] = s_eigvals[k_i]\n\n # Now add the modes analyzed up to know to the skip\n for x in mode_space:\n skip_j.append(x)\n \n diagonalized = True\n\n\n # Now we diagonalized the space\n # Apply the symmetries if we did not perform the diagonalization\n if not diagonalized:\n # Get the symmetrized matrix in the partial mode list:\n sym_mat = np.conj(partial_modes.T).dot(sym_mat_origin.dot(partial_modes))\n\n # Check that it is diagonal\n s_eigvals = np.diag(sym_mat) \n disp = sym_mat - np.diag( s_eigvals)\n if np.max(np.abs(disp)) > 1e-4:\n print(\"Matrix {}:\".format(i_sym))\n print(sym_mat)\n raise ValueError(\"Error, I expect the symmetry {} to be diagonal\".format(i_sym))\n\n syms_values[k, i_sym] = s_eigvals[k_i]\n\n # Add the symmetry character on the new eigen modes\n for k_i, k in enumerate(deg_list[i]):\n syms_values[k, i_sym] = s_eigvals[k_i]\n \n\n # Now we solved our polarization vectors, add them to the final ones\n final_vectors[:, mask_final] = pol_sc[:, mask_final].dot(partial_modes) \n\n # Do not further process the modes we used in this iteration\n for mode in deg_list[i]:\n skip_list.append(mode)\n\n\n return final_vectors, syms_values",
"def symmetricModelling(reset=bool, symmetry=int, seamTolerance=float, preserveSeam=int, seamFalloffCurve=\"string\", about=\"string\", axis=\"string\", allowPartial=bool, tolerance=float, topoSymmetry=bool):\n pass",
"def translate(self, vector):\n for atom in self.atoms:\n atom.translate(vector)",
"def ApplySymmetriesToV2(self, v2, apply_translations = True):\n\n # Apply the Permutation symmetry\n v2[:,:] = 0.5 * (v2 + v2.T)\n\n # First lets recall that the fortran subroutines\n # Takes the input as (3,3,nat,nat)\n new_v2 = np.zeros( (3,3, self.QE_nat, self.QE_nat), dtype = np.double, order =\"F\")\n for i in range(self.QE_nat):\n for j in range(self.QE_nat):\n new_v2[:, :, i, j] = v2[3*i : 3*(i+1), 3*j : 3*(j+1)]\n\n # Apply the translations\n if apply_translations:\n # Check that the translations have been setted up\n assert len(np.shape(self.QE_translations_irt)) == 2, \"Error, symmetries not setted up to work in the supercell\"\n symph.trans_v2(new_v2, self.QE_translations_irt)\n \n # Apply the symmetrization\n symph.sym_v2(new_v2, self.QE_at, self.QE_bg, self.QE_s, self.QE_irt, self.QE_nsym, self.QE_nat)\n\n # Return back\n for i in range(self.QE_nat):\n for j in range(self.QE_nat):\n v2[3*i : 3*(i+1), 3*j : 3*(j+1)] = new_v2[:, :, i, j]",
"def symmetrize(dimTags, a, b, c, d):\n api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)\n ierr = c_int()\n lib.gmshModelGeoSymmetrize(\n api_dimTags_, api_dimTags_n_,\n c_double(a),\n c_double(b),\n c_double(c),\n c_double(d),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelGeoSymmetrize returned non-zero error code: \",\n ierr.value)",
"def apply_volume_symmetry(self, use_inline_c=True):\n t1 = time.time()\n\n #Get the # of pixels and the order from the symmetry map\n symm = self.volume_symmetry\n (numpix, order) = symm.shape\n\n if use_inline_c and not config.cfg.force_pure_python:\n #------ C version (about 400x faster than python) -------\n #Put some variables in the workspace\n old_q = self.qspace.flatten() * 1.0\n qspace_flat = old_q * 0.0\n\n support = \"\"\n code = \"\"\"\n int pix, ord, index;\n for (pix=0; pix<numpix; pix++)\n {\n //Go through each pixel\n for (ord=0; ord<order; ord++)\n {\n //Now go through each equivalent q.\n index = SYMM2(pix, ord);\n if (index >= 0)\n {\n //Valid index.\n QSPACE_FLAT1(pix) += OLD_Q1(index);\n //printf(\"%d\\\\n\", index);\n }\n }\n }\n \"\"\"\n varlist = ['old_q', 'qspace_flat', 'numpix', 'order', 'symm']\n weave.inline(code, varlist, compiler='gcc', support_code=support)\n #Reshape it back as a 3D array.\n n = len(self.inst.qx_list)\n self.qspace = qspace_flat.reshape( (n,n,n) )\n else:\n #---- Pure python version ----\n\n #Clear the starting space\n old_q = self.qspace\n new_q = self.qspace * 0\n for pix in xrange(numpix):\n for ord in xrange(order):\n eq_index = symm[pix, ord]\n if eq_index >= 0:\n #Add up to this pixel, the equivalent one.\n #The list includes this given voxel too.\n new_q.flat[pix] += old_q.flat[eq_index]\n self.qspace = new_q\n\n #Done.\n if self.verbose: print \"Volume symmetry computed in %.3f sec.\" % (time.time()-t1)",
"def GetSymmetriesFromSPGLIB(spglib_sym, regolarize = False):\n \n # Check if the type is correct\n if not \"translations\" in spglib_sym:\n raise ValueError(\"Error, your symmetry dict has no 'translations' key.\")\n \n if not \"rotations\" in spglib_sym:\n raise ValueError(\"Error, your symmetry dict has no 'rotations' key.\")\n \n # Get the number of symmetries\n out_sym = []\n n_sym = np.shape(spglib_sym[\"translations\"])[0]\n \n translations = spglib_sym[\"translations\"]\n rotations = spglib_sym[\"rotations\"]\n \n for i in range(n_sym):\n # Create the symmetry\n sym = np.zeros((3,4))\n sym[:,:3] = rotations[i, :, :]\n sym[:, 3] = translations[i,:]\n \n # Edit the translation\n if regolarize:\n sym[:, 3] *= 2\n sym[:, 3] = np.floor(sym[:, 3] + .5)\n sym[:, 3] *= .5\n sym[:, 3] = sym[:,3] % 1\n \n out_sym.append(sym)\n \n return out_sym",
"def _derive_layout_symmetry(self):\n self._sym_df = None # Default option\n if self.exploit_layout_symmetry:\n # Check symmetry of bounds & turbine_weights\n if np.unique(self.minimum_yaw_angle, axis=0).shape[0] > 1:\n print(\"minimum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.maximum_yaw_angle, axis=0).shape[0] > 1:\n print(\"maximum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.maximum_yaw_angle, axis=0).shape[0] > 1:\n print(\"maximum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.turbine_weights, axis=0).shape[0] > 1:\n print(\"turbine_weights is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n # Check if turbine_weights are consistently 1.0 everywhere\n if np.any(np.abs(self.turbine_weights - 1.0) > 0.001):\n print(\"turbine_weights are not uniformly 1.0.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n x = self.fi.layout_x\n y = self.fi.layout_y\n df = find_layout_symmetry(x=x, y=y)\n\n # If no axes of symmetry, exit function\n if df.shape[0] <= 0:\n print(\"Wind farm layout in floris is not symmetrical.\")\n print(\"Exploitation of symmetry has been disabled.\")\n return\n\n wd_array = self.fi.floris.flow_field.wind_directions\n sym_step = df.iloc[0][\"wd_range\"][1]\n if ((0.0 not in wd_array) or(sym_step not in wd_array)):\n print(\"Floris wind direction array does not \" +\n \"intersect {:.1f} and {:.1f}.\".format(0.0, sym_step))\n print(\"Exploitation of symmetry has been disabled.\")\n return\n\n ids_minimal = (wd_array >= 0.0) & (wd_array < sym_step)\n wd_array_min = wd_array[ids_minimal]\n wd_array_remn = np.remainder(wd_array, sym_step)\n\n if not np.all([(x in wd_array_min) for x in wd_array_remn]):\n print(\"Wind direction array appears irregular.\")\n print(\"Exploitation of symmetry has been disabled.\")\n\n self._sym_mapping_extrap = np.array(\n [np.where(np.abs(x - wd_array_min) < 0.0001)[0][0]\n for x in wd_array_remn], dtype=int)\n\n self._sym_mapping_reduce = copy.deepcopy(ids_minimal)\n self._sym_df = df\n\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
APPLY SYMMETRY ============== Apply the symmetry to the given vector of displacements. Translations are neglected.
|
def ApplySymmetriesToVector(symmetries, vector, unit_cell, irts):
# Get the vector in crystalline coordinate
nat, dumb = np.shape(vector)
n_sym = len(symmetries)
assert n_sym == len(irts)
work = np.zeros( (n_sym, nat, 3), dtype = np.double, order = "C")
# Pass to crystalline coordinates
v1 = Methods.covariant_coordinates(unit_cell, vector)
# Apply the symmetry
for j, symmetry in enumerate(symmetries):
sym = symmetry[:, :3]
w1 = sym.dot(v1.T).T
# Return in cartesian coordinates
work[j, irts[j][:], :] = w1.dot(unit_cell)# unit_cell.T.dot(w1) #np.einsum("ab,a", unit_cell, w1)
return work
|
[
"def ApplySymmetryToVector(symmetry, vector, unit_cell, irt):\n \n # Get the vector in crystalline coordinate\n nat, dumb = np.shape(vector)\n work = np.zeros( (nat, 3))\n sym = symmetry[:, :3]\n\n v1 = Methods.covariant_coordinates(unit_cell, vector)\n w1 = sym.dot(v1.T).T\n\n # Return in cartesian coordinates\n work[irt[:], :] = w1.dot(unit_cell)# unit_cell.T.dot(w1) #np.einsum(\"ab,a\", unit_cell, w1)\n\n return work",
"def ApplySymmetryToTensor4(self, v4, initialize_symmetries = True):\n if initialize_symmetries:\n self.SetupFromSPGLIB()\n\n # Apply the permutation symmetry\n symph.permute_v4(v4)\n\n # Apply the translational symmetries\n symph.trans_v4(v4, self.QE_translations_irt)\n\n # Apply all the symmetries at gamma\n symph.sym_v4(v4, self.QE_at, self.QE_s, self.QE_irt, self.QE_nsymq)",
"def _GetSymmetriesOnModes(symmetries, structure, pol_vects):\n\n # Get the vector of the displacement in the polarization\n m = np.tile(structure.get_masses_array(), (3,1)).T.ravel()\n disp_v = np.einsum(\"im,i->mi\", pol_vects, 1 / np.sqrt(m))\n underdisp_v = np.einsum(\"im,i->mi\", pol_vects, np.sqrt(m))\n\n n_dim, n_modes = np.shape(pol_vects)\n\n n_sym = len(symmetries)\n nat = structure.N_atoms\n \n # For each symmetry operation apply the\n pol_symmetries = np.zeros((n_sym, n_modes, n_modes), dtype = np.float64)\n for i, sym_mat in enumerate(symmetries):\n irt = GetIRT(structure, sym_mat)\n \n for j in range(n_modes):\n # Apply the i-th symmetry to the j-th mode\n new_vector = ApplySymmetryToVector(sym_mat, disp_v[j, :].reshape((nat, 3)), structure.unit_cell, irt).ravel()\n pol_symmetries[i, :, j] = underdisp_v.dot(new_vector.ravel())\n\n return pol_symmetries",
"def SymmetrizeVector(self, vector):\n\n # Apply Translations if any\n self.ApplyTranslationsToVector(vector)\n \n # Prepare the real vector\n tmp_vector = np.zeros( (3, self.QE_nat), dtype = np.float64, order = \"F\")\n \n for i in range(self.QE_nat):\n tmp_vector[0, i] = vector[i,0]\n tmp_vector[1, i] = vector[i,1]\n tmp_vector[2,i] = vector[i,2]\n \n symph.symvector(self.QE_nsymq, self.QE_irt, self.QE_s, self.QE_at, self.QE_bg,\n tmp_vector, self.QE_nat)\n \n \n for i in range(self.QE_nat):\n vector[i, :] = tmp_vector[:,i]",
"def orthogonalise_sym(vectors):\n ang = vec_angle(vectors[0],vectors[1])\n remainder = 90 - ang\n disp = remainder/2\n perp_unnormal = np.cross(vectors[0],vectors[1])\n normal = perp_unnormal / np.linalg.norm(perp_unnormal)\n\n rot_1 = rotation_matrix(normal,-disp)\n rot_2 = rotation_matrix(normal,disp)\n\n ovec_1 = np.dot(rot_1,vectors[0])\n ovec_2 = np.dot(rot_2,vectors[1])\n\n o_vecs = np.array([ovec_1,ovec_2])\n return o_vecs",
"def ApplySymmetryToTensor3(self, v3, initialize_symmetries = True):\n if initialize_symmetries:\n self.SetupFromSPGLIB()\n\n # Apply the permutation symmetry\n symph.permute_v3(v3)\n\n # Apply the translational symmetries\n symph.trans_v3(v3, self.QE_translations_irt)\n\n # Apply all the symmetries at gamma\n symph.sym_v3(v3, self.QE_at, self.QE_s, self.QE_irt, self.QE_nsymq)",
"def getSymmetryPlane(*args, **kwargs):\n \n pass",
"def use_symmetry(self):\n symmetry = self.params[PARAM_SYMMETRY]\n if symmetry is None:\n #Default to false if no parameter.\n return False\n else:\n return symmetry.use_symmetry",
"def apply_symmetrisation(self):\n\n # get the values to be symmetrised\n for sym_set in self.molecule.symm_hs.values():\n charges, sigmas, epsilons = [], [], []\n for atom_set in sym_set:\n for atom in atom_set:\n charges.append(float(self.non_bonded_force[atom - 1][0]))\n sigmas.append(float(self.non_bonded_force[atom - 1][1]))\n epsilons.append(float(self.non_bonded_force[atom - 1][2]))\n # calculate the average values to be used in symmetry\n charge, sigma, epsilon = sum(charges) / len(charges), sum(sigmas) / len(sigmas), sum(epsilons) / len(epsilons)\n\n # now loop through the atoms again and store the new values\n for atom in atom_set:\n self.non_bonded_force[atom - 1] = [str(charge), str(sigma), str(epsilon)]",
"def getSymmetryMatrix(*args, **kwargs):\n \n pass",
"def force_symmetry(matrix, symmetry):\n symmetric_matrix = matrix.copy()\n\n if symmetry is None:\n return symmetric_matrix\n\n for index, x in np.ndenumerate(matrix):\n\n if symmetry == 'upper':\n if index[0] > index[1]:\n symmetric_matrix[index] = matrix[tuple(reversed(index))]\n\n if symmetry == 'lower':\n if index[0] < index[1]:\n symmetric_matrix[index] = matrix[tuple(reversed(index))]\n\n if symmetry == 'mean':\n if index[0] != index[1]:\n symmetric_matrix[index] = np.mean((matrix[index], matrix[tuple(reversed(index))]))\n\n return symmetric_matrix",
"def get_diagonal_symmetry_polarization_vectors(pol_sc, w, pol_symmetries):\n raise NotImplementedError(\"Error, this subroutine has not been implemented.\")\n\n # First we must get the degeneracies\n deg_list = get_degeneracies(w) \n\n # Now perform the diagonalization on each degeneracies\n final_vectors = np.zeros( pol_sc.shape, dtype = np.complex128)\n final_vectors[:,:] = pol_sc.copy()\n\n n_modes = len(w)\n n_syms = pol_symmetries.shape[0]\n skip_list = []\n\n syms_values = np.zeros((n_modes, n_syms), dtype = np.complex128)\n\n print(\"All modes:\")\n for i in range(n_modes):\n print(\"Mode {} = {} cm-1 => \".format(i, w[i] * RY_TO_CM), deg_list[i])\n\n print()\n for i in range(n_modes):\n if i in skip_list:\n continue\n\n # If we have no degeneracies, we can ignore it\n if len(deg_list[i]) == 1:\n continue \n\n partial_modes = np.zeros((len(deg_list[i]), len(deg_list[i])), dtype = np.complex128)\n partial_modes[:,:] = np.eye(len(deg_list[i])) # identity matrix\n\n mask_final = np.array([x in deg_list[i] for x in range(n_modes)])\n\n # If we have degeneracies, lets diagonalize all the symmetries\n for i_sym in range(n_syms):\n skip_j = []\n diagonalized = False\n np.savetxt(\"sym_{}.dat\".format(i_sym), pol_symmetries[i_sym, :,:])\n\n \n # Get the symmetry matrix in the mode space (this could generate a problem with masses)\n ps = pol_symmetries[i_sym, :, :]\n sym_mat_origin = ps[np.outer(mask_final, mask_final)].reshape((len(deg_list[i]), len(deg_list[i]))) \n\n for j_mode in deg_list[i]:\n if j_mode in skip_j:\n continue \n\n # Get the modes that can be still degenerate by symmetries\n mode_dna = syms_values[j_mode, : i_sym]\n\n # Avoid a bad error if i_sym = 0\n if len(mode_dna) > 0:\n mode_space = [x for x in deg_list[i] if np.max(np.abs(syms_values[x, :i_sym] - mode_dna)) < 1e-3]\n else:\n mode_space = [x for x in deg_list[i]]\n\n # The mask for the whole symmetry and the partial_modes\n mask_all = np.array([x in mode_space for x in np.arange(n_modes)])\n mask_partial_mode = np.array([x in mode_space for x in deg_list[i]])\n n_deg_new = np.sum(mask_all.astype(int))\n\n if len(mode_space) == 1:\n continue\n\n p_modes_new = partial_modes[:, mask_partial_mode]\n\n \n print()\n print(\"SYMMETRY_INDEX:\", i_sym)\n print(\"SHAPE sym_mat_origin:\", sym_mat_origin.shape)\n print(\"MODES: {} | DEG: {}\".format(mode_space, deg_list[i]))\n print(\"SHAPE P_MODES_NEW:\", p_modes_new.shape)\n sym_mat = np.conj(p_modes_new.T).dot(sym_mat_origin.dot(p_modes_new))\n \n # Decompose in upper triangular (assures that eigenvectors are orthogonal)\n s_eigvals_mat, s_eigvects = scipy.linalg.schur(sym_mat, output = \"complex\")\n s_eigvals = np.diag(s_eigvals_mat)\n\n # Check if the s_eigvals confirm the unitary of sym_mat\n # TODO: Check if some mass must be accounted or not...\n print(\"SYM_MAT\")\n print(sym_mat)\n print(\"Eigvals:\")\n print(s_eigvals)\n print(\"Eigval_mat:\")\n print(s_eigvals_mat)\n print(\"Eigvects:\")\n print(s_eigvects)\n assert np.max(np.abs(np.abs(s_eigvals) - 1)) < 1e-5, \"Error, it seems that the {}-th matrix is not a rotation.\".format(i_sym).format(sym_mat)\n\n # Update the polarization vectors to account this diagonalization\n partial_modes[:, mask_partial_mode] = p_modes_new.dot(s_eigvects)\n\n # Add the symmetry character on the new eigen modes\n for k_i, k in enumerate(mode_space):\n syms_values[k, i_sym] = s_eigvals[k_i]\n\n # Now add the modes analyzed up to know to the skip\n for x in mode_space:\n skip_j.append(x)\n \n diagonalized = True\n\n\n # Now we diagonalized the space\n # Apply the symmetries if we did not perform the diagonalization\n if not diagonalized:\n # Get the symmetrized matrix in the partial mode list:\n sym_mat = np.conj(partial_modes.T).dot(sym_mat_origin.dot(partial_modes))\n\n # Check that it is diagonal\n s_eigvals = np.diag(sym_mat) \n disp = sym_mat - np.diag( s_eigvals)\n if np.max(np.abs(disp)) > 1e-4:\n print(\"Matrix {}:\".format(i_sym))\n print(sym_mat)\n raise ValueError(\"Error, I expect the symmetry {} to be diagonal\".format(i_sym))\n\n syms_values[k, i_sym] = s_eigvals[k_i]\n\n # Add the symmetry character on the new eigen modes\n for k_i, k in enumerate(deg_list[i]):\n syms_values[k, i_sym] = s_eigvals[k_i]\n \n\n # Now we solved our polarization vectors, add them to the final ones\n final_vectors[:, mask_final] = pol_sc[:, mask_final].dot(partial_modes) \n\n # Do not further process the modes we used in this iteration\n for mode in deg_list[i]:\n skip_list.append(mode)\n\n\n return final_vectors, syms_values",
"def symmetricModelling(reset=bool, symmetry=int, seamTolerance=float, preserveSeam=int, seamFalloffCurve=\"string\", about=\"string\", axis=\"string\", allowPartial=bool, tolerance=float, topoSymmetry=bool):\n pass",
"def translate(self, vector):\n for atom in self.atoms:\n atom.translate(vector)",
"def ApplySymmetriesToV2(self, v2, apply_translations = True):\n\n # Apply the Permutation symmetry\n v2[:,:] = 0.5 * (v2 + v2.T)\n\n # First lets recall that the fortran subroutines\n # Takes the input as (3,3,nat,nat)\n new_v2 = np.zeros( (3,3, self.QE_nat, self.QE_nat), dtype = np.double, order =\"F\")\n for i in range(self.QE_nat):\n for j in range(self.QE_nat):\n new_v2[:, :, i, j] = v2[3*i : 3*(i+1), 3*j : 3*(j+1)]\n\n # Apply the translations\n if apply_translations:\n # Check that the translations have been setted up\n assert len(np.shape(self.QE_translations_irt)) == 2, \"Error, symmetries not setted up to work in the supercell\"\n symph.trans_v2(new_v2, self.QE_translations_irt)\n \n # Apply the symmetrization\n symph.sym_v2(new_v2, self.QE_at, self.QE_bg, self.QE_s, self.QE_irt, self.QE_nsym, self.QE_nat)\n\n # Return back\n for i in range(self.QE_nat):\n for j in range(self.QE_nat):\n v2[3*i : 3*(i+1), 3*j : 3*(j+1)] = new_v2[:, :, i, j]",
"def symmetrize(dimTags, a, b, c, d):\n api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)\n ierr = c_int()\n lib.gmshModelGeoSymmetrize(\n api_dimTags_, api_dimTags_n_,\n c_double(a),\n c_double(b),\n c_double(c),\n c_double(d),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelGeoSymmetrize returned non-zero error code: \",\n ierr.value)",
"def apply_volume_symmetry(self, use_inline_c=True):\n t1 = time.time()\n\n #Get the # of pixels and the order from the symmetry map\n symm = self.volume_symmetry\n (numpix, order) = symm.shape\n\n if use_inline_c and not config.cfg.force_pure_python:\n #------ C version (about 400x faster than python) -------\n #Put some variables in the workspace\n old_q = self.qspace.flatten() * 1.0\n qspace_flat = old_q * 0.0\n\n support = \"\"\n code = \"\"\"\n int pix, ord, index;\n for (pix=0; pix<numpix; pix++)\n {\n //Go through each pixel\n for (ord=0; ord<order; ord++)\n {\n //Now go through each equivalent q.\n index = SYMM2(pix, ord);\n if (index >= 0)\n {\n //Valid index.\n QSPACE_FLAT1(pix) += OLD_Q1(index);\n //printf(\"%d\\\\n\", index);\n }\n }\n }\n \"\"\"\n varlist = ['old_q', 'qspace_flat', 'numpix', 'order', 'symm']\n weave.inline(code, varlist, compiler='gcc', support_code=support)\n #Reshape it back as a 3D array.\n n = len(self.inst.qx_list)\n self.qspace = qspace_flat.reshape( (n,n,n) )\n else:\n #---- Pure python version ----\n\n #Clear the starting space\n old_q = self.qspace\n new_q = self.qspace * 0\n for pix in xrange(numpix):\n for ord in xrange(order):\n eq_index = symm[pix, ord]\n if eq_index >= 0:\n #Add up to this pixel, the equivalent one.\n #The list includes this given voxel too.\n new_q.flat[pix] += old_q.flat[eq_index]\n self.qspace = new_q\n\n #Done.\n if self.verbose: print \"Volume symmetry computed in %.3f sec.\" % (time.time()-t1)",
"def GetSymmetriesFromSPGLIB(spglib_sym, regolarize = False):\n \n # Check if the type is correct\n if not \"translations\" in spglib_sym:\n raise ValueError(\"Error, your symmetry dict has no 'translations' key.\")\n \n if not \"rotations\" in spglib_sym:\n raise ValueError(\"Error, your symmetry dict has no 'rotations' key.\")\n \n # Get the number of symmetries\n out_sym = []\n n_sym = np.shape(spglib_sym[\"translations\"])[0]\n \n translations = spglib_sym[\"translations\"]\n rotations = spglib_sym[\"rotations\"]\n \n for i in range(n_sym):\n # Create the symmetry\n sym = np.zeros((3,4))\n sym[:,:3] = rotations[i, :, :]\n sym[:, 3] = translations[i,:]\n \n # Edit the translation\n if regolarize:\n sym[:, 3] *= 2\n sym[:, 3] = np.floor(sym[:, 3] + .5)\n sym[:, 3] *= .5\n sym[:, 3] = sym[:,3] % 1\n \n out_sym.append(sym)\n \n return out_sym",
"def _derive_layout_symmetry(self):\n self._sym_df = None # Default option\n if self.exploit_layout_symmetry:\n # Check symmetry of bounds & turbine_weights\n if np.unique(self.minimum_yaw_angle, axis=0).shape[0] > 1:\n print(\"minimum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.maximum_yaw_angle, axis=0).shape[0] > 1:\n print(\"maximum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.maximum_yaw_angle, axis=0).shape[0] > 1:\n print(\"maximum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.turbine_weights, axis=0).shape[0] > 1:\n print(\"turbine_weights is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n # Check if turbine_weights are consistently 1.0 everywhere\n if np.any(np.abs(self.turbine_weights - 1.0) > 0.001):\n print(\"turbine_weights are not uniformly 1.0.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n x = self.fi.layout_x\n y = self.fi.layout_y\n df = find_layout_symmetry(x=x, y=y)\n\n # If no axes of symmetry, exit function\n if df.shape[0] <= 0:\n print(\"Wind farm layout in floris is not symmetrical.\")\n print(\"Exploitation of symmetry has been disabled.\")\n return\n\n wd_array = self.fi.floris.flow_field.wind_directions\n sym_step = df.iloc[0][\"wd_range\"][1]\n if ((0.0 not in wd_array) or(sym_step not in wd_array)):\n print(\"Floris wind direction array does not \" +\n \"intersect {:.1f} and {:.1f}.\".format(0.0, sym_step))\n print(\"Exploitation of symmetry has been disabled.\")\n return\n\n ids_minimal = (wd_array >= 0.0) & (wd_array < sym_step)\n wd_array_min = wd_array[ids_minimal]\n wd_array_remn = np.remainder(wd_array, sym_step)\n\n if not np.all([(x in wd_array_min) for x in wd_array_remn]):\n print(\"Wind direction array appears irregular.\")\n print(\"Exploitation of symmetry has been disabled.\")\n\n self._sym_mapping_extrap = np.array(\n [np.where(np.abs(x - wd_array_min) < 0.0001)[0][0]\n for x in wd_array_remn], dtype=int)\n\n self._sym_mapping_reduce = copy.deepcopy(ids_minimal)\n self._sym_df = df\n\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Prepare a FIND SYM input file ============================= This method can be used to prepare a suitable input file for the ISOTROPY findsym program.
|
def PrepareISOTROPYFindSymInput(structure, path_to_file = "findsym.in",
title = "Prepared with Cellconstructor",
latticeTolerance = 1e-5, atomicPositionTolerance = 0.001):
lines = GetISOTROPYFindSymInput(structure, title, latticeTolerance, atomicPositionTolerance)
fp = open(path_to_file, "w")
fp.writelines(lines)
fp.close()
|
[
"def createNewInput(self,currentInputFiles,oriInputFiles,samplerType,**Kwargs): \n import DecayParser\n import FissionYieldParser\n import QValuesParser\n import MaterialParser\n import PathParser\n \n keyWordDict = {}\n \n directoryFiles = ['path','library_fiss','input_dpl']\n #print (currentInputFiles)\n driverXML = 'test_phisics_code_interface.xml'\n keyWordDict = self.mapFile(driverXML)\n #print (keyWordDict)\n tarName = self.tarFiles(directoryFiles)\n runInfoList = self.getDirInfo(driverXML)\n #print (int(runInfoList[1]))\n N = int(runInfoList[1])\n \n \n #print (Kwargs)\n #print (\"\\n\\n\\n\\n\\n\\n\")\n perturbedVars = Kwargs['SampledVars']\n distributedPerturbedVars = self.distributeVariablesToParsers(perturbedVars)\n #print (distributedPerturbedVars)\n #perturbedVars = {'DECAY|BETA|U235':1.0778}\n #perturbedVars = {'FUEL1|DENSITY|U234':1.2, 'FUEL1|DENSITY|U235':1.08E+02}\n #perturbedVars = {'FY|FAST|PU241|SE78':1.2, 'FY|THERMAL|U238|ZN68':1.08E+02, 'FY|THERMAL|U235|ZN66':5.777}\n #perturbedVars = {'QVALUES|U235':4.5963, 'QVALUES|U238':1.08E+02, 'QVALUES|CF252':7.846}\n #perturbedVars = {'BETADECAY|U235':4.5963, 'BETADECAY|U238':1.08E+02, 'BETADECAY|CF252':7.846}\n \n # NOTE: IF YOU DON'T LIKE OR CAN'T GET THE THE KEYWORDS WIT THE DICTIONARY KEYWORDdICT, I CAN USE GETBASE TO \n # OBRAIN THE KEYWORD CORRESPONDING TO THE PARSER OF INTEREST. EXAMPLE: AAA = currentInputFiles[0].getBase()print (AAA)\n for i in distributedPerturbedVars.iterkeys():\n if i == 'DECAY' : decayParser = DecayParser.DecayParser(currentInputFiles[keyWordDict['decay']].getAbsFile(), **distributedPerturbedVars[i])\n if i == 'DENSITY' : materialParser = MaterialParser.MaterialParser(currentInputFiles[keyWordDict['material']].getAbsFile(), **distributedPerturbedVars[i])\n if i == 'FY' : FissionYieldParser = FissionYieldParser.FissionYieldParser(currentInputFiles[keyWordDict['fissionyield']].getAbsFile(), **distributedPerturbedVars[i])\n if i == 'QVALUES' : QValuesParser = QValuesParser.QValuesParser(currentInputFiles[keyWordDict['fissqvalue']].getAbsFile(), **distributedPerturbedVars[i])\n if i == 'BETADECAY': BetaDecayParser = PathParser.PathParser(currentInputFiles[keyWordDict['betadecay']].getAbsFile(), **distributedPerturbedVars[i])\n \n tarFiles = currentInputFiles[keyWordDict['dirfiles']].getAbsFile()\n workDir = currentInputFiles[0].getPath()\n #print (workDir)\n self.untarFolders(tarFiles, workDir)\n self.copyIntoFolders(workDir)\n \n return currentInputFiles",
"def generateSymsFiles(self):\n\n self.isymsFileHandle = open(self.isymsFile, 'w')\n self.isymsFileHandle.write(\"- 0\")\n\n # FSAs have only one field, hence 2 by default is added\n self.fieldFileDict = {2: self.isymsFileHandle}\n\n # FSTs have two fields\n if self.fstype == \"fst\":\n self.osymsFileHandle = open(self.osymsFile, 'w')\n self.osymsFileHandle.write(\"- 0\")\n self.fieldFileDict[3] = self.osymsFileHandle\n\n # Read the raw text file\n with open(self.fpath, 'r') as fsfiletxt:\n lines = fsfiletxt.readlines()\n lines = [line.strip().split(' ') for line in lines]\n\n for index in self.fieldFileDict:\n fh = self.fieldFileDict[index]\n field_count = 1\n for line in lines:\n try:\n field = line[index]\n except:\n pass\n else:\n fh.write(\"{} {}\".format(field_count + 1, field))\n fh.close()",
"def prepare_symbols(self):",
"def StartingOpen():\n# arg is global, because it's re-used during code execution as parameters to\n# find the relativ-path. \n global arg2\n for arg in sys.argv[1:]:\n arg2 = os.path.abspath(arg)\n try:\n if os.path.isfile(arg2):\n with open(arg2) as input_file :\n global xdsinp \n xdsinp = input_file.readlines()\n for lines in xdsinp :\n if re.search(r\"XSCALE\", lines):\n create = True\n if create == True:\n print \"XSCALE.INP is take in count\"\n return create\n return xdsinp\n return arg2\n \n except:\n print (\"it's not XSCALE.INP or it's corrupted\")\n exit(0)",
"def _construct_input_spec(self):",
"def struct_import_xcrys(filname,ubin=\"\"):\n #formatting outputname\n outname=filname.split('.')\n outname=outname[0].split('/')\n outname=outname[-1:]\n\n print(\"NOTE: XCRYSDEN INSTALLATION REQUIRED\")\n print(\"# Reading atomic coordinates from: \", filname)\n # compatbility with QE input/output\n if filname.endswith(\".in\"):\n with open(\"out.xsf\",\"w\") as outfile:\n subprocess.check_call([\"pwi2xsf\",filname],stdout=outfile)\n elif filname.endswith(\".out\"):\n with open(\"out.xsf\",\"w\") as outfile:\n subprocess.check_call([\"pwo2xsf\",filname],stdout=outfile)\n\n # no check_call, since it returns a non-zero, which raises error...\n subprocess.call([str(ubin)+\"qe2pmg.sh\", \"out.xsf\"])\n subprocess.check_call([\"mv\",\"out-out.xsf\", \"out-pmg.xsf\"])\n \n # Read structure from PWscf file, xsf format\n structure = Structure.from_file(\"out-pmg.xsf\")\n \n subprocess.call([\"rm\", \"out.xsf\",\"out-pmg.xsf\"])\n subprocess.call([\"rm\",\"-r\",\"__pycache__\"])\n\n return structure",
"def generate_source_files(self, use_simplification=False, use_cse=False):\n assert self.__is_function_set, \"Symbolic functions are not set!. Before call this method, call set_functions()\"\n self.__make_model_dir()\n if use_simplification:\n symfunc.simplify(self.__f)\n symfunc.simplify(self.__F)\n symfunc.simplify(self.__FxVx)\n symfunc.simplify(self.__FuVx)\n symfunc.simplify(self.__FxVxxFx)\n symfunc.simplify(self.__FuVxxFx)\n symfunc.simplify(self.__FuVxxFu)\n symfunc.simplify(self.__VxFxx)\n symfunc.simplify(self.__VxFux)\n symfunc.simplify(self.__VxFuu)\n symfunc.simplify(self.__l)\n symfunc.simplify(self.__lx)\n symfunc.simplify(self.__lu)\n symfunc.simplify(self.__lxx)\n symfunc.simplify(self.__lux)\n symfunc.simplify(self.__luu)\n symfunc.simplify(self.__phi)\n symfunc.simplify(self.__phix)\n symfunc.simplify(self.__phixx)\n f_model_h = open('models/'+str(self.__model_name)+'/ocp_model.hpp', 'w')\n f_model_h.writelines([\n\"\"\" \n#ifndef CDDP_OCP_MODEL_H\n#define CDDP_OCP_MODEL_H\n\n#define _USE_MATH_DEFINES\n\n#include <cmath>\n\n\nnamespace cddp {\n\nclass OCPModel {\nprivate:\n\"\"\"\n ])\n f_model_h.write(\n ' static constexpr int dimx_ = '+str(self.__dimx)+';\\n'\n )\n f_model_h.write(\n ' static constexpr int dimu_ = '\n +str(self.__dimu)+';\\n'\n )\n f_model_h.write('\\n')\n f_model_h.writelines([\n ' static constexpr double '+scalar_var[1]+' = '\n +str(scalar_var[2])+';\\n' for scalar_var in self.__scalar_vars\n ])\n f_model_h.write('\\n')\n for array_var in self.__array_vars:\n f_model_h.write(\n ' double '+array_var[1]+'['+str(len(array_var[0]))+']'+' = {'\n )\n for i in range(len(array_var[0])-1):\n f_model_h.write(str(array_var[2][i])+', ')\n f_model_h.write(str(array_var[2][len(array_var[0])-1])+'};\\n')\n f_model_h.writelines([\n\"\"\"\n\npublic:\n\n // Computes the dynamics f(t, x, u).\n // t : time parameter\n // x : state vector\n // u : control input vector\n // dx : the value of f(t, x, u)\n void dynamics(const double t, const double dtau, const double* x, \n const double* u, double* dx) const;\n\n // Computes the state equation F(t, x, u).\n // t : time parameter\n // x : state vector\n // u : control input vector\n // dx : the value of f(t, x, u)\n void stateEquation(const double t, const double dtau, const double* x, \n const double* u, double* F) const;\n\n // Computes the partial derivative of terminal cost with respect to state, \n // i.e., dphi/dx(t, x).\n // t : time parameter\n // x : state vector\n // u : control input vector\n void stageCostDerivatives(const double t, const double dtau, const double* x, \n const double* u, double* lx, double* lu, \n double* lxx, double* lux, double* luu) const;\n\n // Computes the partial derivative of terminal cost with respect to state, \n // i.e., dphi/dx(t, x).\n // t : time parameter\n // x : state vector\n // phix : the value of dphi/dx(t, x)\n void terminalCostDerivatives(const double t, const double* x, double* phix, \n double* phixx) const;\n\n // Computes the partial derivative of terminal cost with respect to state, \n // i.e., dphi/dx(t, x).\n // t : time parameter\n // x : state vector\n // u : control input vector\n void dynamicsDerivatives(const double t, const double dtau, const double* x, \n const double* u, const double* Vx, const double* Vxx, \n double* fxVx, double* fuVx, double* fxVxxfx, \n double* fuVxxfx, double* fuVxxfu, double* Vxfxx, \n double* Vxfux, double* Vxfuu) const;\n\n // Returns the dimension of the state.\n int dimx() const;\n\n // Returns the dimension of the contorl input.\n int dimu() const;\n};\n\n} // namespace cddp\n\n\n#endif // CDDP_OCP_MODEL_H\n\"\"\" \n ])\n f_model_h.close()\n f_model_c = open('models/'+self.__model_name+'/ocp_model.cpp', 'w')\n f_model_c.writelines([\n\"\"\" \n#include \"ocp_model.hpp\"\n\n\nnamespace cddp {\n\nvoid OCPModel::dynamics(const double t, const double dtau, const double* x, \n const double* u, double* dx) const {\n\"\"\" \n ])\n self.__write_function(f_model_c, self.__f, 'dx', \"=\", use_cse)\n f_model_c.writelines([\n\"\"\" \n}\n\nvoid OCPModel::stateEquation(const double t, const double dtau, const double* x, \n const double* u, double* F) const {\n\"\"\" \n ])\n self.__write_function(f_model_c, self.__F, 'F', \"=\", use_cse)\n f_model_c.writelines([\n\"\"\" \n}\n\nvoid OCPModel::stageCostDerivatives(const double t, const double dtau, \n const double* x, const double* u, \n double* lx, double* lu, double* lxx, \n double* lux, double* luu) const {\n\"\"\"\n ])\n self.__write_multiple_functions(\n f_model_c, use_cse, \"+=\", [self.__lx, 'lx'], [self.__lu, 'lu'], \n [symfunc.matrix_to_array(self.__lxx), 'lxx'], \n [symfunc.matrix_to_array(self.__lux), 'lux'], \n [symfunc.matrix_to_array(self.__luu), 'luu']\n )\n f_model_c.writelines([\n\"\"\" \n}\n\n\nvoid OCPModel::terminalCostDerivatives(const double t, const double* x, \n double* phix, double* phixx) const {\n\"\"\"\n ])\n self.__write_multiple_functions(\n f_model_c, use_cse, \"=\", [self.__phix, 'phix'], \n [symfunc.matrix_to_array(self.__phixx), 'phixx']\n )\n f_model_c.writelines([\n\"\"\" \n}\n\nvoid OCPModel::dynamicsDerivatives(const double t, const double dtau, \n const double* x, const double* u, \n const double* Vx, const double* Vxx, \n double* fxVx, double* fuVx, double* fxVxxfx, \n double* fuVxxfx, double* fuVxxfu, \n double* Vxfxx, double* Vxfux, \n double* Vxfuu) const {\n\"\"\"\n ])\n self.__write_multiple_functions(\n f_model_c, use_cse, \"+=\", [self.__FxVx, 'fxVx'], \n [self.__FuVx, 'fuVx'],\n [symfunc.matrix_to_array(self.__FxVxxFx), 'fxVxxfx'],\n [symfunc.matrix_to_array(self.__FuVxxFx), 'fuVxxfx'],\n [symfunc.matrix_to_array(self.__FuVxxFu), 'fuVxxfu'],\n [symfunc.matrix_to_array(self.__VxFxx), 'Vxfxx'],\n [symfunc.matrix_to_array(self.__VxFux), 'Vxfux'],\n [symfunc.matrix_to_array(self.__VxFuu), 'Vxfuu']\n )\n f_model_c.writelines([\n\"\"\" \n}\n\nint OCPModel::dimx() const {\n return dimx_;\n}\n\nint OCPModel::dimu() const {\n return dimu_;\n}\n\n} // namespace cgmres\n\n\"\"\" \n ])\n f_model_c.close()",
"def set_pars(pardic, dsin='dsin.txt', copy_to=None, check_auxiliary=False):\r\n\r\n \r\n\r\n orig_file = open(dsin, 'r')\r\n lines = orig_file.readlines() # list of strings, each ending with '\\n'\r\n orig_file.close()\r\n\r\n # only search the relevant part of the file\r\n for ln, line in enumerate(lines):\r\n if line.find('double initialValue(') > -1:\r\n start=ln\r\n elif line.find('char initialDescription(') > -1:\r\n end=ln\r\n break\r\n \r\n pardic_to_search = pardic.copy()\r\n #pardic_remaining = pardic.copy()\r\n lines_ = lines[start:end]\r\n #foundpar = None\r\n \r\n for linenumber, s in enumerate(lines_):\r\n splitted = s.split()\r\n for par, val in pardic_to_search.iteritems():\r\n if par in splitted:\r\n #print '{} found'.format(par)\r\n # check structure of the file\r\n two_lines = len(splitted) != 8 #True if all in one line\r\n \r\n # first we check that the parameter is not an auxiliary\r\n # parameter (5th value of the 'array line' should be a 1)\r\n if two_lines: index=0\r\n else: index=4\r\n if check_auxiliary and not splitted[index] == '1':\r\n raise ValueError(\"The parameter %s is of type 'auxiliary'.\\n\\\r\n it cannot be set in the dymosim input file. \" % (par))# check if the value to write is in this line, or the previous one\r\n \r\n # now changing the value:\r\n if two_lines: \r\n #We have to change the \r\n # second value of the previous line\r\n prev_splitted = lines[start+linenumber-1].split() \r\n old_value = copy.copy(prev_splitted[1])\r\n prev_splitted[1] = str(val)\r\n prev_splitted.append('\\n')\r\n lines[start+linenumber-1] = ' '.join(prev_splitted)\r\n else:\r\n # all is nicely in one line\r\n old_value = copy.copy(splitted[1])\r\n splitted[1] = str(val)\r\n splitted.append('\\n') \r\n lines[start+linenumber] = ' '.join(splitted)\r\n print '%s found: %s is replaced by %s' % (par,old_value, val)\r\n pardic_to_search.pop(par)\r\n break\r\n \r\n \r\n # Write the file\r\n \r\n if copy_to is None:\r\n copy_to = dsin\r\n \r\n writefile = file(copy_to, 'w')\r\n writefile.writelines(lines)\r\n writefile.close()\r\n \r\n print \"These parameters were NOT found:\\n\"\r\n for i in sorted(pardic_to_search.keys()):\r\n print i",
"def initKfile(self):\r\n with open(self.Kfile,\"w\") as f:\r\n f.write(\"*KEYWORD\\n\")",
"def write_xdsconv_input(params):\n\n friedel = {True: 'FALSE', False: 'TRUE'}\n\n file_text = \"!-XDSCONV.INP--------File generated by auto.process \\n\"\n file_text += \"INPUT_FILE= %s XDS_ASCII\\n\" % params['input_file']\n file_text += \"OUTPUT_FILE=%s %s\\n\" % (params['output_file'], params['format'])\n file_text += \"FRIEDEL'S_LAW=%s\\n\" % (friedel[params['anomalous']])\n file_text += \"MERGE=FALSE\\n\"\n if params['freeR_fraction'] > 0.0:\n file_text += \"GENERATE_FRACTION_OF_TEST_REFLECTIONS=%0.2f\\n\" % params['freeR_fraction']\n file_text += \"!-------------------File generated by auto.process \\n\"\n outfile = open('XDSCONV.INP', 'w')\n outfile.write(file_text)\n outfile.close()",
"def open_TSW_bin():\r\n global TSW_file\r\n TSW_file = fd.askopenfilename(filetypes=((\"binary file\", \"*.bin\"), (\"All files\", \"*.*\")))",
"def source_input_prompt():\n # print(), \n # print(input_instruction_loc), \n # print(input_instruction)\n path = getcwd() + '/' + input()\n return path",
"def _recover_input(self):\n filename = self._dirname + \"/main_data\"\n key = \"*************** input file needed to reproduce this run ***************\\n\"\n try:\n fh = open(filename, 'r')\n lines = fh.readlines()\n fh.close()\n except IOError:\n lines = []\n\n try:\n dex = lines.index(key)\n except ValueError:\n msg = 'no main_data file!'\n raise BertiniError(msg)\n inlines = lines[dex+1:]\n while inlines[0] == '\\n':\n inlines = inlines[1:]\n\n dex = inlines.index('END;\\n')\n inlines = inlines[:dex+1]\n\n return inlines",
"def write_input(infile,tkin,nh2,cdmol=cdmol_default):\n infile.write(mole+'.dat\\n')\n infile.write('radex.out\\n')\n infile.write(str(flow*(1-bw))+' '+str(fupp/(1-bw))+'\\n')\n infile.write(str(tkin)+'\\n')\n infile.write('1\\n')\n infile.write('H2\\n')\n infile.write(str(nh2)+'\\n')\n infile.write(str(tbg)+'\\n')\n infile.write(str(cdmol)+'\\n')\n infile.write(str(dv)+'\\n')",
"def __setInputFilepath(self):\r\n\r\n\r\n input_filepath = tkFileDialog.askopenfilename(title=\"Select a file\")\r\n self.__input_filepath.setEntryText(input_filepath)\r\n self.__presetHeaderDefine()\r\n self.__presetArrayName()",
"def _create_outfilepath(self, inpath):\n return inpath + '.crypt'",
"def symbolize_file(oatfile, uncond):\n global orig_oat_size, symbolized_oat_size\n symfs = os.path.join(apo, \"symbols\")\n symoat = os.path.join(symfs, oatfile[1:])\n symoatdir = os.path.dirname(symoat)\n u.verbose(1, \"considering %s\" % symoat)\n if uncond or not os.path.exists(symoat):\n docmd(\"mkdir -p %s\" % symoatdir)\n docmd(\"adb pull %s %s\" % (oatfile, symoat))\n docmd(\"rm -f symbolized.oat\")\n origsize = collect_file_size(symoat)\n orig_oat_size += origsize\n docmd(\"oatdump --symbolize=%s\" % symoat)\n newsize = collect_file_size(\"symbolized.oat\")\n symbolized_oat_size += newsize\n docmd(\"mv -f symbolized.oat %s\" % symoat)\n delta = newsize - origsize\n if delta:\n frac = 100.0 * (1.0 * delta) / (1.0 * origsize)\n u.verbose(1, \"%s expanded %d bytes %f percent \"\n \"from symbolization\" % (symoat, delta, frac))",
"def read_everything():\n\n ### Paths to the fullsed, source and temperature files:\n fullsed_path = '../OldBeAtlas/fullsed_v2/'\n #fullsed_path = '../OldBeAtlas/fullsed/'\n source_path = '../OldBeAtlas/source/'\n temps_path = '../OldBeAtlas/temperatures/'\n\n ### assumed distance [parsecs] for the calculations\n dist_std = 10.\n\n\n ###########################\n \n ### The domain of the power-law grid:\n npar, sigpar, Mpar, obpar, cosipar = domain_PLgrid()\n filepars=[npar,sigpar,Mpar,obpar]\n\n print(\"Reading the OldBeAtlas files...\")\n print(\"\")\n\n files_fullsed=sorted(glob.glob(fullsed_path+'*'))\t\n files_source=sorted(glob.glob(source_path+'*'))\n files_temps=sorted(glob.glob(temps_path+'*'))\n\n files_fullsed_new=[] ### will receive the names of the fullsed\n ### files to be opened.\n\n ### It is assumed that the names of the fullsed files are of the form:\n ### fullsed_mod191_PLn4.0_sig0.05_h072_Rd050.0_Be_M04.80_ob1.10_H0.30_Z0.014_bE_Ell.sed2\n ### or\n ### fullsed_mod01_PLn3.5_sig0.00_h060_Rd050.0_Be_M03.80_ob1.20_H0.77_Z0.014_bE_Ell.sed2\n for i in range(0,len(npar)):\n for j in range(0,len(sigpar)):\n for k in range(0,len(Mpar)):\n for l in range(0,len(obpar)):\n ### Check if there is a fullsed file with some specific\n ### values of n, Sig, M and ob:\n for ifile in xrange(0,len(files_fullsed)):\n if ('PLn{0}_sig{1}_h072_Rd050.0_Be_'\\\n .format(filepars[0][i],filepars[1][j])+\\\n 'M{0}_ob{1}_H0.30_Z0.014_bE_Ell'\\\n .format(filepars[2][k],filepars[3][l]) in \\\n files_fullsed[ifile]) \\\n or ('PLn{0}_sig{1}_h060_Rd050.0_Be_'\\\n .format(filepars[0][i],filepars[1][j])+\\\n 'M{0}_ob{1}_H0.30_Z0.014_bE_Ell'\\\n .format(filepars[2][k],filepars[3][l]) in \\\n files_fullsed[ifile]):\n \n ### elements of 'files_fullsed_new' are = \n ### [ [n,sig,M,ob], \"fullsed file\" ]\n files_fullsed_new.append([[ filepars[0][i],\\\n filepars[1][j],\\\n filepars[2][k],\\\n filepars[3][l]],\\\n files_fullsed[ifile]]) \n\n ### Now that we have a 'files_fullsed_new' list complete, the idea is\n ### to create lists of source and temperature files in such a way that, \n ### for each fullsed file stored in a 'files_fullsed_new' line, \n ### there is a line with the correspondent source file in \n ### 'files_source_new' and a line with the correspondent temp file in \n ### 'files_temps_new'. \n\n ### It is assumed that the names of the source files are of the form:\n ### Be_M03.40_ob1.45_H0.54_Z0.014_bE_Ell.txt\n ### (Notice that the it is contained in the name of the fullsed file.)\n files_source_new=[] ### will receive the names of the source\n ### files to be opened.\n for iffn in xrange(0,len(files_fullsed_new)):\n ### Check if there is a source file whose name is contained in \n ### the name of the specific fullsed file:\n for ifs in xrange(0,len(files_source)):\n if files_source[ifs].replace(source_path,'').replace('.txt','')\\\n in files_fullsed_new[iffn][1]:\n files_source_new.append(files_source[ifs])\n ### (Notice that I have assumed that there is always a source file \n ### associated with a fullsed file. That is not the case with the \n ### temperature files below.)\n\n\n ### It is assumed that the names of the temperature files are of the form:\n ### mod126_PLn3.5_sig0.28_h072_Rd050.0_Be_M09.60_ob1.20_H0.30_Z0.014_bE_Ell30_avg.temp\n ### (Notice that the it is contained in the name of the fullsed file.)\n files_temps_new=[] ### will receive the names of the temperature\n ### files to be opened.\n for iffn in xrange(0,len(files_fullsed_new)):\n achei=0 ### Some fullsed files may not have correspondent temp files,\n ### like the ones of purely photospherical models.\n ### Check if there is a temperature file whose name is contained in\n ### the name of the specific fullsed file.\n ### If not, add \"EMPTY\" to the 'files_temps_new' list.\n for ifs in xrange(0,len(files_temps)):\n if files_temps[ifs].replace(temps_path,'').replace(\\\n '30_avg.temp','')\\\n in files_fullsed_new[iffn][1]:\n files_temps_new.append(files_temps[ifs])\n achei=1\n if achei == 0:\n files_temps_new.append('EMPTY')\n\n\n ### Now, building the 'fullsed_contents' list. It will contain the \n ### relevant contents of all available fullsed, source and temperature \n ### files of the grid.\n\n fullsed_contents=[] ### This list will receive the important contents\n ### of all the files\n for ifile in xrange(0,len(files_fullsed_new)):\n\n ### Reading the fullsed, source and temperature files:\n \n fullsedtest=files_fullsed_new[ifile][1]\n f0=open(fullsedtest,'r')\n f0linhas=f0.readlines()\n f0.close()\n\n sourcetest=files_source_new[ifile]\n f1=open(sourcetest,'r')\n f1linhas=f1.readlines()\n f1.close() \n\n tempstest=files_temps_new[ifile]\n if tempstest != 'EMPTY':\n ### OBS: This pyhdust procedure will print \n ### \"'FILE' completely read!\"\n ncr, ncmu, ncphi, nLTE, nNLTE, Rstarz, Raz, betaz, dataz, \\\n pcr, pcmu, pcphi = hdt.readtemp(tempstest)\n abttemp=[\n [dataz[0,i,ncmu/2,0]/Rstarz for i in \\\n xrange(0,len(dataz[0,:,ncmu/2,0]))],\n [dataz[3,i,ncmu/2,0] for i in \\\n xrange(0,len(dataz[3,:,ncmu/2,0]))]\n ]\n else:\n abttemp=[\n [np.nan,np.nan],\n [np.nan,np.nan]\n ]\n\n\n ### Obtaining each element of the 'fullsed_contents' list\n\n nobs=int(f0linhas[3].split()[1]) ### number of different cosi\n nlbd=int(f0linhas[3].split()[0]) ### number of lambdas for each cosi\n contents=[ \n fullsedtest, ### 0: Name of fullsed file\n np.zeros(nobs), ### 1: will receive the cosi's\n np.zeros((nobs,nlbd,3)), ### 2: will receive the SED\n sourcetest, ### 3: Name of source file\n np.zeros(5), ### 4: will receive the \n ### parameters of the star \n ### (source)\n tempstest, ### 5: Name of temperature file\n np.zeros((2,len(abttemp[0]))), ### 6: will receive the temp \n ### profile\n [[],[]]\n ]\n contents[1][:] = np.nan\n contents[2][:] = np.nan\n contents[4][:] = np.nan\n contents[6][:] = np.nan\n\n\n ### Receiving cosi and SED (\"1\" and \"2\")\n for iobs in xrange(0,nobs):\n mu = float(f0linhas[5+iobs*nlbd].split()[0])\n contents[1][iobs] = mu\n for ilbd in xrange(0, nlbd):\n auxi = f0linhas[5+iobs*nlbd+ilbd].split()\n contents[2][iobs, ilbd, 0] = float(auxi[2])\n contents[2][iobs, ilbd, 1] = float(auxi[3])\n contents[2][iobs, ilbd, 2] = float(auxi[7])\n\n\n ### Receiving parameters of the star (source) (\"4\")\n contents[4][0] = float(f1linhas[3].split()[2]) ### M\n contents[4][1] = float(f1linhas[4].split()[2]) ### R_pole\n contents[4][2] = float(f1linhas[5].split()[2]) ### W\n contents[4][3] = float(f1linhas[6].split()[2]) ### L\n contents[4][4] = float(f1linhas[7].split()[2]) ### Beta_GD\n \n ### Receiving the temperature profile (\"6\")\n for i in xrange(0,len(contents[6][0,:])):\n contents[6][0,i] = abttemp[0][i]\n contents[6][1,i] = abttemp[1][i]\n \n ### elements of 'fullsed_contents':\n fullsed_contents.append([files_fullsed_new[ifile][0],contents])\n\n print(\"\")\n\n return files_fullsed_new, files_source_new, files_temps_new, fullsed_contents, \\\n fullsed_path, source_path, temps_path, dist_std",
"def prepare_dcm2niix_input(infile, rec_infile, work_dir):\n log.info(\"Arrange dcm2niix input.\")\n\n if zipfile.is_zipfile(infile):\n\n try:\n\n with zipfile.ZipFile(infile, \"r\") as zip_obj:\n log.info(f\"Establishing input as zip file: {infile}\")\n exit_if_archive_empty(zip_obj)\n dcm2niix_input_dir = extract_archive_contents(zip_obj, work_dir)\n\n except zipfile.BadZipFile:\n log.exception(\n (\n \"Incorrect gear input. \"\n \"File is not a zip archive file (.zip). Exiting.\"\n )\n )\n os.sys.exit(1)\n\n elif tarfile.is_tarfile(infile):\n\n try:\n with tarfile.open(infile, \"r\") as tar_obj:\n log.info(f\"Establishing input as tar file: {infile}\")\n exit_if_archive_empty(tar_obj)\n dcm2niix_input_dir = extract_archive_contents(tar_obj, work_dir)\n\n except tarfile.ReadError:\n log.exception(\n (\n \"Incorrect gear input. \"\n \"File is not a compressed tar archive file (.tgz). Exiting.\"\n )\n )\n os.sys.exit(1)\n\n elif rec_infile:\n log.info(f\"Establishing input as par/rec file pair: {infile} & {rec_infile}\")\n\n # If a REC file input was provided, check infile for a valid PAR file\n if infile.lower().endswith(\"par\") and rec_infile.lower().endswith(\"rec\"):\n\n dcm2niix_input_dir, dirname = setup_dcm2niix_input_dir(infile, work_dir)\n shutil.copy2(rec_infile, dcm2niix_input_dir)\n shutil.copy2(infile, dcm2niix_input_dir)\n adjust_parrec_filenames(dcm2niix_input_dir, dirname)\n\n else:\n log.error(\n (\n \"Incorrect gear input. If rec_file_input provided, \"\n \"dcm2niix_input must be a valid PAR file. \"\n \"rec_infile must be a valid REC file. Exiting.\"\n )\n )\n os.sys.exit(1)\n\n else:\n # Assume all other inputs will function downstream\n dcm2niix_input_dir, dirname = setup_dcm2niix_input_dir(infile, work_dir)\n shutil.copy2(infile, dcm2niix_input_dir)\n\n log.info(\"Input for dcm2niix prepared successfully.\")\n\n return dcm2niix_input_dir"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
As the method PrepareISOTROPYFindSymInput, but the input is returned as a list of string (lines).
|
def GetISOTROPYFindSymInput(structure, title = "Prepared with Cellconstructor",
latticeTolerance = 1e-5, atomicPositionTolerance = 0.001):
# Check if the structure has a unit cell
if not structure.has_unit_cell:
raise ValueError("Error, the given structure has not a valid unit cell.")
# Prepare the standard input
lines = []
lines.append("!useKeyWords\n")
lines.append("!title\n")
lines.append(title + "\n")
lines.append("!latticeTolerance\n")
lines.append("%.8f\n" % latticeTolerance)
lines.append("!atomicPositionTolerance\n")
lines.append("%.8f\n" % atomicPositionTolerance)
lines.append("!latticeBasisVectors\n")
for i in range(3):
lines.append("%16.8f %16.8f %16.8f\n" % (structure.unit_cell[i, 0],
structure.unit_cell[i, 1],
structure.unit_cell[i, 2]))
lines.append("!atomCount\n")
lines.append("%d\n" % structure.N_atoms)
lines.append("!atomType\n")
lines.append(" ".join(structure.atoms) + "\n")
lines.append("!atomPosition\n")
for i in range(structure.N_atoms):
# Get the crystal coordinate
new_vect = Methods.covariant_coordinates(structure.unit_cell, structure.coords[i, :])
lines.append("%16.8f %16.8f %16.8f\n" % (new_vect[0],
new_vect[1],
new_vect[2]))
return lines
|
[
"def PrepareISOTROPYFindSymInput(structure, path_to_file = \"findsym.in\",\n title = \"Prepared with Cellconstructor\",\n latticeTolerance = 1e-5, atomicPositionTolerance = 0.001):\n \n lines = GetISOTROPYFindSymInput(structure, title, latticeTolerance, atomicPositionTolerance)\n \n fp = open(path_to_file, \"w\")\n fp.writelines(lines)\n fp.close()",
"def process_input():\n n, symbols = input().split(None, 1)\n n = int(n)\n symbols = [Variable(s) for s in symbols.split()]\n\n read_line = lambda: input().replace(':', '').split()\n\n input_data = []\n for _ in range(n):\n input_data.append(process_input_row(read_line()))\n\n to_solve = process_input_row(read_line(), final_line=True)\n return symbols, input_data, to_solve",
"def input_stock_symbols():\n raw_input_str = input('Enter stock symbols to analyze (comma delimited): ')\n stock_symbols = [symbol.strip().upper() for symbol in raw_input_str.split(',')]\n return stock_symbols",
"def get_sierra_input(self, inpFileName=None):\n info_recs = self.__ex_get_info_recs()\n sierra_inp = []\n begin = False\n for rec in info_recs:\n vals = rec.split()\n if not begin: # have not reached Sierra block\n if len(vals) >= 2 and vals[0].lower() == \"begin\" and vals[1].lower() == \"sierra\":\n begin = True\n if begin: # inside Sierra block\n sierra_inp.append(rec)\n if len(rec) > MAX_LINE_LENGTH:\n print(\n \"WARNING: max line length reached for one or more input lines;\")\n print(\" input data might be incomplete for these lines\")\n break\n if len(vals) >= 2 and vals[0].lower() == \"end\" and vals[1].lower() == \"sierra\":\n break # end of Sierra block\n\n if inpFileName:\n fd = open(inpFileName.encode('ascii'), \"w\")\n for fileLine in sierra_inp:\n fd.write(fileLine+\"\\n\")\n fd.close()\n return []\n\n return sierra_inp",
"def _get_code_input_output(self, lines):\n if self._has_input_prompt(lines):\n input = [self._remove_prompt(line) for line in lines\n if self._has_input_prompt(line)]\n output = [line for line in lines\n if not self._has_input_prompt(line)]\n return '\\n'.join(input), '\\n'.join(output)\n else:\n return '\\n'.join(lines), ''",
"def ExtractSymbols(self, native_heaps, sym_paths):\n raise NotImplementedError()",
"def read_file_input(prompt):\n database = []\n try:\n filename = input(prompt)\n with open(filename, 'r') as file:\n content = file.readlines()\n for line in content:\n strings = line.rstrip().split(';')\n sub_poly = []\n for element in strings:\n poly = element.split(' ')\n operand = Operand(int(poly[0]), int(poly[1]))\n sub_poly.append(operand)\n database.append(sub_poly)\n\n # while True:\n # line = file.readline().rstrip()\n # if line == '':\n # break\n # strings = line.split(';')\n # sub_poly = []\n # for element in strings:\n # poly = element.split(' ')\n # operand = Operand(int(poly[0]), int(poly[1]))\n # sub_poly.append(operand)\n # database.append(sub_poly)\n return database\n except OSError:\n print(\"Error in reading the file.\")",
"def make_input_convenient(raw_input: Sequence[str]) -> List[str]:\r\n convenient = []\r\n\r\n for lst in raw_input:\r\n convenient.append(lst)\r\n\r\n return convenient",
"def _read_input(self, source):\n result = parse_lisp_iterator(source)\n return result",
"def _terminalSymbolsGenerator(self):\n if self.end_symbol_set == \"unicode\":\n symbol_set = UNICODE_SYMBOLS\n else:\n symbol_set = ASCII_SYMBOLS\n\n for c in symbol_set:\n yield(c)\n raise ValueError(\"To many input strings.\")",
"def get_puzzle_input(path: str) -> list:\n with open(path) as inp:\n return [list(line.strip(\"\\n\")) for line in inp]",
"def _run_fix_input_data(self, input_data):\n if input_data is not None:\n if len(input_data) > 0:\n if '\\\\n' in input_data:\n # Convert \\n in the input into new lines.\n lines = input_data.split('\\\\n')\n input_data = '\\n'.join(lines)\n return input_data.split('\\n')\n return []",
"def prepare_symbols(self):",
"def get_stocks_symbols(write_to_files=True):\n all_symbols = []\n log.info(\"Pulling markets symbols\")\n for market in markets:\n symbols = []\n request = urllib2.Request(market.soruce)\n try:\n result = urllib2.urlopen(request)\n except urllib2.URLError as e:\n log.error(\"url error #{}: {}\".format(e.errno, e.strerror))\n return\n\n data = result.readlines()\n\n # save all data to file\n if write_to_files:\n filepath = make_filepath(DATA_PATH+\"companies\", market.name)\n companies = open(filepath, 'w')\n for line in data:\n companies.write(str(line))\n\n # parse the data to get list of symbols\n for company in data:\n symbol = company.split(',')[0][1:-1]\n symbols.extend([symbol])\n\n symbols.pop(0) # exclude the first line - the description line (at the head of the table)\n all_symbols.extend(symbols)\n\n return all_symbols",
"def get_symbols_from_line(line):\n \n line = line.strip().split(\"\\t\")\n \n # only include autosomal genes, so as to not worry about different mutation\n # rates on chrX between males and females.\n chrom = line[1]\n if chrom in [\"X\", \"chrX\", \"23\", \"Y\", \"chrY\", \"24\", \"MT\"]:\n return []\n \n # ignore variants in CNVs, since those variants aren't accessible to our\n # de novo SNV mutation framework.\n alt = line[4]\n if alt in [\"<DEL>\", \"<DUP>\"]:\n return []\n \n info = parse_vcf_info(line[7])\n symbols = \"\"\n if \"HGNC\" in info:\n symbols = info[\"HGNC\"]\n elif \"HGNC_ALL\" in info:\n symbols = info[\"HGNC_ALL\"]\n \n symbols = re.split(',|&|\\\\|', symbols)\n \n return(symbols)",
"def getInput():\n L = []\n url = \"http://icarus.cs.weber.edu/~hvalle/cs3030/data/barCodeData.txt\"\n response = urllib.request.urlopen(url).read().decode(\"utf-8\")\n for word in response.splitlines():\n L.append(word)\n for i in L:\n print(\"Enter a zip code: \", i)\n andrew_price_task2_hw7.printZipCode(i)",
"def gen_valid_line_symb(cls, str_in, str_out):\n str_out.value = gxapi_cy.WrapDB._gen_valid_line_symb(GXContext._get_tls_geo(), str_in.encode(), str_out.value.encode())",
"def findSymbols(strng):\n\t\tsymbols=[]\n\t\tfor i in range(len(strng)):\t\t\t\n\t\t\tif strng[i]!= '`':\n\t\t\t\tif i+1< len(strng):\n\t\t\t\t\tif strng[i+1]=='`':\n\t\t\t\t\t\tsymbols.append(strng[i]+strng[i+1])\n\t\t\t\t\telse:\n\t\t\t\t\t\tsymbols.append(strng[i])\n\t\t\t\telse:\n\t\t\t\t\tsymbols.append(strng[i])\n\t\treturn symbols",
"def _interactive_input_fn(self,hparams, decode_hp):\n num_samples = decode_hp.num_samples if decode_hp.num_samples > 0 else 1\n decode_length = decode_hp.extra_length\n # input_type = \"text\"\n p_hparams = hparams.problem_hparams\n has_input = \"inputs\" in p_hparams.input_modality\n vocabulary = p_hparams.vocabulary[\"inputs\" if has_input else \"targets\"]\n # This should be longer than the longest input.\n const_array_size = 10000\n # Import readline if available for command line editing and recall.\n # try:\n # import readline # pylint: disable=g-import-not-at-top,unused-variable\n # except ImportError:\n # pass\n\n is_begin_loop=False\n while True:\n # prompt = (\"INTERACTIVE MODE num_samples=%d decode_length=%d \\n\"\n # \" it=<input_type> ('text' or 'image' or 'label', default: \"\n # \"text)\\n\"\n # \" ns=<num_samples> (changes number of samples, default: 1)\\n\"\n # \" dl=<decode_length> (changes decode length, default: 100)\\n\"\n # \" <%s> (decode)\\n\"\n # \" q (quit)\\n\"\n # \">\" % (num_samples, decode_length, \"source_string\"\n # if has_input else \"target_prefix\"))\n # input_string = input(prompt)\n # if input_string == \"q\":\n # return\n # elif input_string[:3] == \"ns=\":\n # num_samples = int(input_string[3:])\n # elif input_string[:3] == \"dl=\":\n # decode_length = int(input_string[3:])\n # elif input_string[:3] == \"it=\":\n # input_type = input_string[3:]\n # else:\n # if input_type == \"text\":\n\n if(self._task_queue.empty()):\n if not is_begin_loop:\n is_begin_loop = True\n print('begin to loop ~ ')\n continue\n uid, input_string = self._task_queue.get()\n print('task queue is coming , uid = %s , input-str = %s' % (uid,input_string))\n\n input_ids = vocabulary.encode(input_string)\n if has_input:\n input_ids.append(text_encoder.EOS_ID)\n x = [num_samples, decode_length, len(input_ids)] + input_ids\n assert len(x) < const_array_size\n x += [0] * (const_array_size - len(x))\n features = {\n \"inputs\": np.array(x).astype(np.int32),\n }\n # elif input_type == \"image\":\n # input_path = input_string\n # img = vocabulary.encode(input_path)\n # features = {\n # \"inputs\": img.astype(np.int32),\n # }\n # elif input_type == \"label\":\n # input_ids = [int(input_string)]\n # x = [num_samples, decode_length, len(input_ids)] + input_ids\n # features = {\n # \"inputs\": np.array(x).astype(np.int32),\n # }\n # else:\n # raise Exception(\"Unsupported input type.\")\n for k, v in six.iteritems(\n problem_lib.problem_hparams_to_features(p_hparams)):\n features[k] = np.array(v).astype(np.int32)\n yield features"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.